Reference documentation for deal.II version GIT 85919f3e70 2023-05-28 07:10:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
101 
107 
113  AlignedVector &
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
207  void
208  clear();
209 
215  void
216  push_back(const T in_data);
217 
221  reference
222  back();
223 
228  back() const;
229 
234  template <typename ForwardIterator>
235  void
236  insert_back(ForwardIterator begin, ForwardIterator end);
237 
247  void
248  fill();
249 
258  void
259  fill(const T &element);
260 
348  void
349  replicate_across_communicator(const MPI_Comm communicator,
350  const unsigned int root_process);
351 
355  void
357 
361  bool
362  empty() const;
363 
367  size_type
368  size() const;
369 
374  size_type
375  capacity() const;
376 
380  reference
381  operator[](const size_type index);
382 
387  operator[](const size_type index) const;
388 
392  pointer
393  data();
394 
399  data() const;
400 
404  iterator
405  begin();
406 
410  iterator
411  end();
412 
417  begin() const;
418 
423  end() const;
424 
430  size_type
432 
438  template <class Archive>
439  void
440  save(Archive &ar, const unsigned int version) const;
441 
447  template <class Archive>
448  void
449  load(Archive &ar, const unsigned int version);
450 
451 #ifdef DOXYGEN
457  template <class Archive>
458  void
459  serialize(Archive &archive, const unsigned int version);
460 #else
461  // This macro defines the serialize() method that is compatible with
462  // the templated save() and load() method that have been implemented.
463  BOOST_SERIALIZATION_SPLIT_MEMBER()
464 #endif
465 
466 private:
558  class Deleter
559  {
560  public:
566  Deleter(AlignedVector<T> *owning_object);
567 
568 #ifdef DEAL_II_WITH_MPI
576  Deleter(AlignedVector<T> *owning_object,
577  const bool is_shmem_root,
578  T * aligned_shmem_pointer,
579  MPI_Comm shmem_group_communicator,
580  MPI_Win shmem_window);
581 #endif
582 
588  void
589  operator()(T *ptr);
590 
598  void
599  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
600 
601  private:
606  {
607  public:
611  virtual ~DeleterActionBase() = default;
612 
618  virtual void
620  };
621 
622 #ifdef DEAL_II_WITH_MPI
623 
629  {
630  public:
637  MPI_Comm shmem_group_communicator,
638  MPI_Win shmem_window);
639 
645  virtual void
646  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
647 
648  private:
653  const bool is_shmem_root;
656  MPI_Win shmem_window;
657  };
658 #endif
659 
664  std::unique_ptr<DeleterActionBase> deleter_action_object;
665 
671  };
672 
676  std::unique_ptr<T[], Deleter> elements;
677 
682 
687 };
688 
689 
690 // ------------------------------- inline functions --------------------------
691 
697 namespace internal
698 {
717  template <typename T>
720  {
721  static const std::size_t minimum_parallel_grain_size =
722  160000 / sizeof(T) + 1;
723 
724  public:
734  AlignedVectorCopyConstruct(const T *const source_begin,
735  const T *const source_end,
736  T *const destination)
737  : source_(source_begin)
738  , destination_(destination)
739  {
740  Assert(source_end >= source_begin, ExcInternalError());
741  Assert(source_end == source_begin || destination != nullptr,
742  ExcInternalError());
743  const std::size_t size = source_end - source_begin;
744  if (size < minimum_parallel_grain_size)
746  else
748  }
749 
754  virtual void
755  apply_to_subrange(const std::size_t begin,
756  const std::size_t end) const override
757  {
758  if (end == begin)
759  return;
760 
761  // for classes trivial assignment can use memcpy. cast element to
762  // (void*) to silence compiler warning for virtual classes (they will
763  // never arrive here because they are non-trivial).
764 
765  if (std::is_trivial<T>::value == true)
766  std::memcpy(static_cast<void *>(destination_ + begin),
767  static_cast<const void *>(source_ + begin),
768  (end - begin) * sizeof(T));
769  else
770  for (std::size_t i = begin; i < end; ++i)
771  new (&destination_[i]) T(source_[i]);
772  }
773 
774  private:
775  const T *const source_;
776  T *const destination_;
777  };
778 
779 
786  template <typename T>
789  {
790  static const std::size_t minimum_parallel_grain_size =
791  160000 / sizeof(T) + 1;
792 
793  public:
803  AlignedVectorMoveConstruct(T *const source_begin,
804  T *const source_end,
805  T *const destination)
806  : source_(source_begin)
807  , destination_(destination)
808  {
809  Assert(source_end >= source_begin, ExcInternalError());
810  Assert(source_end == source_begin || destination != nullptr,
811  ExcInternalError());
812  const std::size_t size = source_end - source_begin;
813  if (size < minimum_parallel_grain_size)
815  else
817  }
818 
823  virtual void
824  apply_to_subrange(const std::size_t begin,
825  const std::size_t end) const override
826  {
827  if (end == begin)
828  return;
829 
830  // Classes with trivial assignment can use memcpy. cast element to
831  // (void*) to silence compiler warning for virtual classes (they will
832  // never arrive here because they are non-trivial).
833  if (std::is_trivial<T>::value == true)
834  std::memcpy(static_cast<void *>(destination_ + begin),
835  static_cast<void *>(source_ + begin),
836  (end - begin) * sizeof(T));
837  else
838  // For everything else just use the move constructor. The original
839  // object remains alive and will be destroyed elsewhere.
840  for (std::size_t i = begin; i < end; ++i)
841  new (&destination_[i]) T(std::move(source_[i]));
842  }
843 
844  private:
845  T *const source_;
846  T *const destination_;
847  };
848 
849 
867  template <typename T, bool initialize_memory>
869  {
870  static const std::size_t minimum_parallel_grain_size =
871  160000 / sizeof(T) + 1;
872 
873  public:
878  AlignedVectorInitialize(const std::size_t size,
879  const T & element,
880  T *const destination)
881  : element_(element)
882  , destination_(destination)
883  , trivial_element(false)
884  {
885  if (size == 0)
886  return;
887  Assert(destination != nullptr, ExcInternalError());
888 
889  // do not use memcmp for long double because on some systems it does not
890  // completely fill its memory and may lead to false positives in
891  // e.g. valgrind
892  if (std::is_trivial<T>::value == true &&
893  std::is_same<T, long double>::value == false)
894  {
895  const unsigned char zero[sizeof(T)] = {};
896  // cast element to (void*) to silence compiler warning for virtual
897  // classes (they will never arrive here because they are
898  // non-trivial).
899  if (std::memcmp(zero,
900  static_cast<const void *>(&element),
901  sizeof(T)) == 0)
902  trivial_element = true;
903  }
904  if (size < minimum_parallel_grain_size)
906  else
908  }
909 
913  virtual void
914  apply_to_subrange(const std::size_t begin,
915  const std::size_t end) const override
916  {
917  // for classes with trivial assignment of zero can use memset. cast
918  // element to (void*) to silence compiler warning for virtual
919  // classes (they will never arrive here because they are
920  // non-trivial).
921  if (std::is_trivial<T>::value == true && trivial_element)
922  std::memset(static_cast<void *>(destination_ + begin),
923  0,
924  (end - begin) * sizeof(T));
925  else
927  begin, end, std::integral_constant<bool, initialize_memory>());
928  }
929 
930  private:
931  const T & element_;
932  mutable T *destination_;
934 
935  // copy assignment operation
936  void
937  copy_construct_or_assign(const std::size_t begin,
938  const std::size_t end,
939  std::integral_constant<bool, false>) const
940  {
941  for (std::size_t i = begin; i < end; ++i)
942  destination_[i] = element_;
943  }
944 
945  // copy constructor (memory initialization)
946  void
947  copy_construct_or_assign(const std::size_t begin,
948  const std::size_t end,
949  std::integral_constant<bool, true>) const
950  {
951  for (std::size_t i = begin; i < end; ++i)
952  new (&destination_[i]) T(element_);
953  }
954  };
955 
956 
957 
970  template <typename T, bool initialize_memory>
973  {
974  static const std::size_t minimum_parallel_grain_size =
975  160000 / sizeof(T) + 1;
976 
977  public:
982  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
983  : destination_(destination)
984  {
985  if (size == 0)
986  return;
987  Assert(destination != nullptr, ExcInternalError());
988 
989  if (size < minimum_parallel_grain_size)
991  else
993  }
994 
998  virtual void
999  apply_to_subrange(const std::size_t begin,
1000  const std::size_t end) const override
1001  {
1002  // for classes with trivial assignment of zero can use memset. cast
1003  // element to (void*) to silence compiler warning for virtual
1004  // classes (they will never arrive here because they are
1005  // non-trivial).
1006  if (std::is_trivial<T>::value == true)
1007  std::memset(static_cast<void *>(destination_ + begin),
1008  0,
1009  (end - begin) * sizeof(T));
1010  else
1012  begin, end, std::integral_constant<bool, initialize_memory>());
1013  }
1014 
1015  private:
1016  mutable T *destination_;
1017 
1018  // copy assignment operation
1019  void
1021  const std::size_t end,
1022  std::integral_constant<bool, false>) const
1023  {
1024  for (std::size_t i = begin; i < end; ++i)
1025  destination_[i] = std::move(T());
1026  }
1027 
1028  // copy constructor (memory initialization)
1029  void
1031  const std::size_t end,
1032  std::integral_constant<bool, true>) const
1033  {
1034  for (std::size_t i = begin; i < end; ++i)
1035  new (&destination_[i]) T;
1036  }
1037  };
1038 
1039 } // end of namespace internal
1040 
1041 
1042 #ifndef DOXYGEN
1043 
1044 
1045 
1046 template <typename T>
1048  : deleter_action_object(nullptr) // encode default action by using a nullptr
1049  , owning_aligned_vector(owning_object)
1050 {}
1051 
1052 
1053 # ifdef DEAL_II_WITH_MPI
1054 
1055 template <typename T>
1057  const bool is_shmem_root,
1058  T * aligned_shmem_pointer,
1059  MPI_Comm shmem_group_communicator,
1060  MPI_Win shmem_window)
1061  : deleter_action_object(
1062  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1063  aligned_shmem_pointer,
1064  shmem_group_communicator,
1065  shmem_window))
1066  , owning_aligned_vector(owning_object)
1067 {}
1068 # endif
1069 
1070 
1071 template <typename T>
1072 inline void
1074 {
1075  // If no special action has been registered (i.e., if the action pointer is
1076  // nullptr), then just perform the default action right here.
1077  if (deleter_action_object == nullptr)
1078  {
1079  if (ptr != nullptr)
1080  {
1081  Assert(owning_aligned_vector->used_elements_end != nullptr,
1082  ExcInternalError());
1083 
1084  if (std::is_trivial<T>::value == false)
1085  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1086  --p)
1087  p->~T();
1088 
1089  std::free(ptr);
1090  }
1091  }
1092  else
1093  // Otherwise, let the action object do what is necessary
1094  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1095 }
1096 
1097 
1098 
1099 template <typename T>
1100 inline void
1102  const AlignedVector<T> *new_aligned_vector_ptr)
1103 {
1104  owning_aligned_vector = new_aligned_vector_ptr;
1105 }
1106 
1107 
1108 # ifdef DEAL_II_WITH_MPI
1109 
1110 template <typename T>
1112  MPISharedMemDeleterAction(const bool is_shmem_root,
1113  T * aligned_shmem_pointer,
1114  MPI_Comm shmem_group_communicator,
1115  MPI_Win shmem_window)
1116  : is_shmem_root(is_shmem_root)
1117  , aligned_shmem_pointer(aligned_shmem_pointer)
1118  , shmem_group_communicator(shmem_group_communicator)
1119  , shmem_window(shmem_window)
1120 {}
1121 
1122 
1123 
1124 template <typename T>
1125 inline void
1127  const AlignedVector<T> *aligned_vector,
1128  T * ptr)
1129 {
1130  (void)ptr;
1131  // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1132  // but it is not guaranteed to work: clang, for example, sets elements.get()
1133  // to nullptr and then calls the deleter on a previously made copy. Hence we
1134  // must assume here that elements.get() (which is managed by the unique_ptr)
1135  // may be nullptr at this point.
1136  //
1137  // used_elements_end is a member variable of AlignedVector (i.e., we control
1138  // it, not unique_ptr) so it is still set to its correct value.
1139 
1140  if (is_shmem_root)
1141  if (std::is_trivial<T>::value == false)
1142  for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1143  p->~T();
1144 
1145  int ierr;
1146  ierr = MPI_Win_free(&shmem_window);
1147  AssertThrowMPI(ierr);
1148 
1149  Utilities::MPI::free_communicator(shmem_group_communicator);
1150 }
1151 
1152 # endif
1153 
1154 
1155 template <class T>
1157  : elements(nullptr, Deleter(this))
1158  , used_elements_end(nullptr)
1159  , allocated_elements_end(nullptr)
1160 {}
1161 
1162 
1163 
1164 template <class T>
1165 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1166  : elements(nullptr, Deleter(this))
1167  , used_elements_end(nullptr)
1168  , allocated_elements_end(nullptr)
1169 {
1170  if (size > 0)
1171  resize(size, init);
1172 }
1173 
1174 
1175 
1176 template <class T>
1178  : elements(nullptr, Deleter(this))
1179  , used_elements_end(nullptr)
1180  , allocated_elements_end(nullptr)
1181 {
1182  // copy the data from vec
1183  reserve(vec.size());
1184  used_elements_end = allocated_elements_end;
1186  vec.used_elements_end,
1187  elements.get());
1188 }
1189 
1190 
1191 
1192 template <class T>
1194  : AlignedVector<T>()
1195 {
1196  // forward to the move operator
1197  *this = std::move(vec);
1198 }
1199 
1200 
1201 
1202 template <class T>
1203 inline AlignedVector<T> &
1205 {
1206  const size_type new_size = vec.used_elements_end - vec.elements.get();
1207 
1208  // First throw away everything and re-allocate memory but leave that
1209  // memory uninitialized for now:
1210  resize(0);
1211  reserve(new_size);
1212 
1213  // Then copy the elements over by using the copy constructor on these
1214  // elements:
1216  vec.used_elements_end,
1217  elements.get());
1218 
1219  // Finally adjust the pointer to the end of the elements that are used:
1220  used_elements_end = elements.get() + new_size;
1221 
1222  return *this;
1223 }
1224 
1225 
1226 
1227 template <class T>
1228 inline AlignedVector<T> &
1230 {
1231  clear();
1232 
1233  // Move the actual data in the 'elements' object. One problem is that this
1234  // also moves the deleter object, but the deleter object
1235  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1236  // object). The way this is implemented is that we have to move the
1237  // deleter as well, and then reset the pointer inside the deleter
1238  // that references the outer object.
1239  elements = std::move(vec.elements);
1240  elements.get_deleter().reset_owning_object(this);
1241 
1242  // Then also steal the other pointers and clear them in the original object:
1243  used_elements_end = vec.used_elements_end;
1244  allocated_elements_end = vec.allocated_elements_end;
1245 
1246  vec.used_elements_end = nullptr;
1247  vec.allocated_elements_end = nullptr;
1248 
1249  return *this;
1250 }
1251 
1252 
1253 
1254 template <class T>
1255 inline void
1257 {
1258  const size_type old_size = size();
1259 
1260  if (new_size == 0)
1261  clear();
1262  else if (new_size == old_size)
1263  {} // nothing to do here
1264  else if (new_size < old_size)
1265  {
1266  // call destructor on fields that are released, if the type requires it.
1267  // doing it backward releases the elements in reverse order as compared to
1268  // how they were created
1269  if (std::is_trivial<T>::value == false)
1270  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1271  p->~T();
1272  used_elements_end = elements.get() + new_size;
1273  }
1274  else // new_size > old_size
1275  {
1276  // Allocate more space, and claim that space as used
1277  reserve(new_size);
1278  used_elements_end = elements.get() + new_size;
1279 
1280  // need to still set the values in case the class is non-trivial because
1281  // virtual classes etc. need to run their (default) constructor
1282  if (std::is_trivial<T>::value == false)
1284  new_size - old_size, elements.get() + old_size);
1285  }
1286 }
1287 
1288 
1289 
1290 template <class T>
1291 inline void
1292 AlignedVector<T>::resize(const size_type new_size)
1293 {
1294  const size_type old_size = size();
1295 
1296  if (new_size == 0)
1297  clear();
1298  else if (new_size == old_size)
1299  {} // nothing to do here
1300  else if (new_size < old_size)
1301  {
1302  // call destructor on fields that are released, if the type requires it.
1303  // doing it backward releases the elements in reverse order as compared to
1304  // how they were created
1305  if (std::is_trivial<T>::value == false)
1306  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1307  p->~T();
1308  used_elements_end = elements.get() + new_size;
1309  }
1310  else // new_size > old_size
1311  {
1312  // Allocate more space, and claim that space as used
1313  reserve(new_size);
1314  used_elements_end = elements.get() + new_size;
1315 
1316  // finally set the values to the default initializer
1318  new_size - old_size, elements.get() + old_size);
1319  }
1320 }
1321 
1322 
1323 
1324 template <class T>
1325 inline void
1326 AlignedVector<T>::resize(const size_type new_size, const T &init)
1327 {
1328  const size_type old_size = size();
1329 
1330  if (new_size == 0)
1331  clear();
1332  else if (new_size == old_size)
1333  {} // nothing to do here
1334  else if (new_size < old_size)
1335  {
1336  // call destructor on fields that are released, if the type requires it.
1337  // doing it backward releases the elements in reverse order as compared to
1338  // how they were created
1339  if (std::is_trivial<T>::value == false)
1340  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1341  p->~T();
1342  used_elements_end = elements.get() + new_size;
1343  }
1344  else // new_size > old_size
1345  {
1346  // Allocate more space, and claim that space as used
1347  reserve(new_size);
1348  used_elements_end = elements.get() + new_size;
1349 
1350  // finally set the desired init values
1352  new_size - old_size, init, elements.get() + old_size);
1353  }
1354 }
1355 
1356 
1357 
1358 template <class T>
1359 inline void
1360 AlignedVector<T>::reserve(const size_type new_allocated_size)
1361 {
1362  const size_type old_size = used_elements_end - elements.get();
1363  const size_type old_allocated_size = allocated_elements_end - elements.get();
1364  if (new_allocated_size > old_allocated_size)
1365  {
1366  // if we continuously increase the size of the vector, we might be
1367  // reallocating a lot of times. therefore, try to increase the size more
1368  // aggressively
1369  const size_type new_size =
1370  std::max(new_allocated_size, 2 * old_allocated_size);
1371 
1372  // allocate and align along 64-byte boundaries (this is enough for all
1373  // levels of vectorization currently supported by deal.II)
1374  T *new_data_ptr;
1376  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1377 
1378  // Now create a deleter that encodes what should happen when the object is
1379  // released: We need to destroy the objects that are currently alive (in
1380  // reverse order, and then release the memory. Note that we catch the
1381  // 'this' pointer because the number of elements currently alive might
1382  // change over time.
1383  Deleter deleter(this);
1384 
1385  // copy whatever elements we need to retain
1386  if (new_allocated_size > 0)
1388  elements.get(), elements.get() + old_size, new_data_ptr);
1389 
1390  // Now reset all of the member variables of the current object
1391  // based on the allocation above. Assigning to a std::unique_ptr
1392  // object also releases the previously pointed to memory.
1393  //
1394  // Note that at the time of releasing the old memory, 'used_elements_end'
1395  // still points to its previous value, and this is important for the
1396  // deleter object of the previously allocated array (see how it loops over
1397  // the to-be-destroyed elements a the Deleter::DefaultDeleterAction
1398  // class).
1399  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1400  used_elements_end = elements.get() + old_size;
1401  allocated_elements_end = elements.get() + new_size;
1402  }
1403  else if (new_allocated_size == 0)
1404  clear();
1405  else // size_alloc < allocated_size
1406  {} // nothing to do here
1407 }
1408 
1409 
1410 
1411 template <class T>
1412 inline void
1414 {
1415  // Just release the memory (which also calls the destructor of the elements),
1416  // and then set the auxiliary pointers to invalid values.
1417  //
1418  // Note that at the time of releasing the old memory, 'used_elements_end'
1419  // still points to its previous value, and this is important for the
1420  // deleter object of the previously allocated array (see how it loops over
1421  // the to-be-destroyed elements a few lines above).
1422  elements.reset();
1423  used_elements_end = nullptr;
1424  allocated_elements_end = nullptr;
1425 }
1426 
1427 
1428 
1429 template <class T>
1430 inline void
1431 AlignedVector<T>::push_back(const T in_data)
1432 {
1433  Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1434  if (used_elements_end == allocated_elements_end)
1435  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1436  if (std::is_trivial<T>::value == false)
1437  new (used_elements_end++) T(in_data);
1438  else
1439  *used_elements_end++ = in_data;
1440 }
1441 
1442 
1443 
1444 template <class T>
1445 inline typename AlignedVector<T>::reference
1447 {
1448  AssertIndexRange(0, size());
1449  T *field = used_elements_end - 1;
1450  return *field;
1451 }
1452 
1453 
1454 
1455 template <class T>
1456 inline typename AlignedVector<T>::const_reference
1457 AlignedVector<T>::back() const
1458 {
1459  AssertIndexRange(0, size());
1460  const T *field = used_elements_end - 1;
1461  return *field;
1462 }
1463 
1464 
1465 
1466 template <class T>
1467 template <typename ForwardIterator>
1468 inline void
1469 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1470 {
1471  const size_type old_size = size();
1472  reserve(old_size + (end - begin));
1473  for (; begin != end; ++begin, ++used_elements_end)
1474  {
1475  if (std::is_trivial<T>::value == false)
1476  new (used_elements_end) T;
1477  *used_elements_end = *begin;
1478  }
1479 }
1480 
1481 
1482 
1483 template <class T>
1484 inline void
1486 {
1488  elements.get());
1489 }
1490 
1491 
1492 
1493 template <class T>
1494 inline void
1495 AlignedVector<T>::fill(const T &value)
1496 {
1498  value,
1499  elements.get());
1500 }
1501 
1502 
1503 
1504 template <class T>
1505 inline void
1506 AlignedVector<T>::replicate_across_communicator(const MPI_Comm communicator,
1507  const unsigned int root_process)
1508 {
1509 # ifdef DEAL_II_WITH_MPI
1510 
1511  // Let the root process broadcast its size. If it is zero, then all
1512  // processes just clear() their memory and reset themselves to a non-shared
1513  // empty object -- there is no point to run through complicated MPI
1514  // calls if the end result is an empty array. Otherwise, we continue on.
1515  const size_type new_size =
1516  Utilities::MPI::broadcast(communicator, size(), root_process);
1517  if (new_size == 0)
1518  {
1519  clear();
1520  return;
1521  }
1522 
1523 
1524  // **** Step 0 ****
1525  // All but the root process no longer need their data, so release the memory
1526  // used to store the previous elements.
1527  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1528  {
1529  elements.reset();
1530  used_elements_end = nullptr;
1531  allocated_elements_end = nullptr;
1532  }
1533 
1534  // **** Step 1 ****
1535  // Create communicators for each group of processes that can use
1536  // shared memory areas. Within each of these groups, we don't care about
1537  // which rank each of the old processes gets except that we would like to
1538  // make sure that the (global) root process will have rank=0 within
1539  // its own sub-communicator. We can do that through the third argument of
1540  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1541  // order of processes within the split communicators, and we should set it to
1542  // zero for the root processes and one for all others -- which means that
1543  // for all of these other processes, MPI can choose whatever order it
1544  // wants because they have the same key (MPI then documents that these ties
1545  // will be broken according to these processes' rank in the old group).
1546  //
1547  // At least that's the theory. In practice, the MPI implementation where
1548  // this function was developed on does not seem to do that. (Bug report
1549  // is here: https://github.com/open-mpi/ompi/issues/8854)
1550  // We work around this by letting MPI_Comm_split_type choose whatever
1551  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1552  // step -- not elegant, nor efficient, but seems to work:
1553  MPI_Comm shmem_group_communicator;
1554  {
1555  MPI_Comm shmem_group_communicator_temp;
1556  int ierr = MPI_Comm_split_type(communicator,
1557  MPI_COMM_TYPE_SHARED,
1558  /* key */ 0,
1559  MPI_INFO_NULL,
1560  &shmem_group_communicator_temp);
1561  AssertThrowMPI(ierr);
1562 
1563  const int key =
1564  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1565  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1566  /* color */ 0,
1567  key,
1568  &shmem_group_communicator);
1569  AssertThrowMPI(ierr);
1570 
1571  // Verify the explanation from above
1572  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1573  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1574  ExcInternalError());
1575 
1576  // And get rid of the temporary communicator
1577  Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1578  }
1579  const bool is_shmem_root =
1580  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1581 
1582  // **** Step 2 ****
1583  // We then have to send the state of the current object from the
1584  // root process to one exemplar in each shmem group. To this end,
1585  // we create another subcommunicator that includes the ranks zero
1586  // of all shmem groups, and because of the trick above, we know
1587  // that this also includes the original root process.
1588  //
1589  // There are different ways of creating a "shmem_roots_communicator".
1590  // The conceptually easiest way is to create an MPI_Group that only
1591  // includes the shmem roots and then create a communicator from this
1592  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1593  // with this is that we would have to exchange among all processes
1594  // which ones are shmem roots and which are not. This is awkward.
1595  //
1596  // A simpler way is to use MPI_Comm_split that uses "colors" to
1597  // indicate which sub-communicator each process wants to be in.
1598  // We use color=0 to indicate the group of shmem roots, and color=1
1599  // for all other processes -- the latter will simply not ever do
1600  // anything among themselves with the communicator so created.
1601  //
1602  // Using MPI_Comm_split has the additional benefit that, just as above,
1603  // we can choose where each rank will end up in shmem_roots_communicator.
1604  // We again set key=0 for the original root_process, and key=1 for all other
1605  // ranks; then, the global root becomes rank=0 on the
1606  // shmem_roots_communicator. We don't care how the other processes are
1607  // ordered.
1608  MPI_Comm shmem_roots_communicator;
1609  {
1610  const int key =
1611  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1612 
1613  const int ierr = MPI_Comm_split(communicator,
1614  /*color=*/
1615  (is_shmem_root ? 0 : 1),
1616  key,
1617  &shmem_roots_communicator);
1618  AssertThrowMPI(ierr);
1619 
1620  // Again verify the explanation from above
1621  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1622  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1623  ExcInternalError());
1624  }
1625 
1626  const unsigned int shmem_roots_root_rank = 0;
1627  const bool is_shmem_roots_root =
1628  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1629  shmem_roots_communicator) == shmem_roots_root_rank));
1630 
1631  // Now let the original root_process broadcast the current object to all
1632  // shmem roots. We know that the last rank is the original root process that
1633  // has all of the data.
1634  if (is_shmem_root)
1635  {
1636  if (std::is_trivial<T>::value)
1637  {
1638  // The data is "trivial", i.e., we can copy things directly without
1639  // having to go through the serialization/deserialization machinery of
1640  // Utilities::MPI::broadcast.
1641  //
1642  // In that case, first tell all of the other shmem roots how many
1643  // elements we will have to deal with, and let them resize their
1644  // (non-shared) arrays.
1645  const size_type new_size =
1646  Utilities::MPI::broadcast(shmem_roots_communicator,
1647  size(),
1648  shmem_roots_root_rank);
1649  if (is_shmem_roots_root == false)
1650  resize(new_size);
1651 
1652  // Then directly copy from the root process into these buffers
1653  int ierr = MPI_Bcast(elements.get(),
1654  sizeof(T) * new_size,
1655  MPI_CHAR,
1656  shmem_roots_root_rank,
1657  shmem_roots_communicator);
1658  AssertThrowMPI(ierr);
1659  }
1660  else
1661  {
1662  // The objects to be sent around are not "trivial", and so we have
1663  // to go through the serialization/deserialization machinery. On all
1664  // but the sending process, overwrite the current state with the
1665  // vector just broadcast.
1666  //
1667  // On the root rank, this would lead to resetting the 'entries'
1668  // pointer, which would trigger the deleter which would lead to a
1669  // deadlock. So we just send the result of the broadcast() call to
1670  // nirvana on the root process and keep our current state.
1671  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1672  Utilities::MPI::broadcast(shmem_roots_communicator,
1673  *this,
1674  shmem_roots_root_rank);
1675  else
1676  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1677  *this,
1678  shmem_roots_root_rank);
1679  }
1680  }
1681 
1682  // We no longer need the shmem roots communicator, so get rid of it
1683  Utilities::MPI::free_communicator(shmem_roots_communicator);
1684 
1685 
1686  // **** Step 3 ****
1687  // At this point, all shmem groups have one shmem root process that has
1688  // a copy of the data. This is the point where each shmem group should
1689  // establish a shmem area to put the data into. As mentioned above,
1690  // we know that the shmem roots are the last rank in their respective
1691  // shmem_group_communicator.
1692  //
1693  // The process for all of this works as follows: While all processes in
1694  // the shmem group participate in the generation of the shmem memory window,
1695  // only the shmem root actually allocates any memory -- the rest just
1696  // allocate zero bytes of their own. We allocate space for exactly
1697  // size() elements (computed on the shmem_root that already has the data)
1698  // and add however many bytes are necessary so that we know that we can align
1699  // things to 64-byte boundaries. The worst case happens if the memory system
1700  // gives us a pointer to an address one byte past a desired alignment
1701  // boundary, and in that case aligning the memory will require us to waste the
1702  // first (align_by-1) bytes. So we have to ask for
1703  // size() * sizeof(T) + (align_by - 1)
1704  // bytes.
1705  //
1706  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1707  // a certain number of bytes. This is going to come back to bite us further
1708  // down below when we try to get a properly aligned pointer to our memory
1709  // region, see the commentary there. Starting with MPI 4.0, one can set a
1710  // flag in an MPI_Info structure that requests a desired alignment, so we do
1711  // this for forward compatibility; MPI implementations ignore flags they don't
1712  // know anything about, and so setting this flag is backward compatible also
1713  // to older MPI versions.
1714  MPI_Win shmem_window;
1715  void * base_ptr;
1716  const MPI_Aint align_by = 64;
1717  const MPI_Aint alloc_size =
1718  Utilities::MPI::broadcast(shmem_group_communicator,
1719  (size() * sizeof(T) + (align_by - 1)),
1720  0);
1721 
1722  {
1723  int ierr;
1724 
1725  MPI_Info mpi_info;
1726  ierr = MPI_Info_create(&mpi_info);
1727  AssertThrowMPI(ierr);
1728  ierr = MPI_Info_set(mpi_info,
1729  "mpi_minimum_memory_alignment",
1730  std::to_string(align_by).c_str());
1731  AssertThrowMPI(ierr);
1732  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1733  /* disp_unit = */ 1,
1734  mpi_info,
1735  shmem_group_communicator,
1736  &base_ptr,
1737  &shmem_window);
1738  AssertThrowMPI(ierr);
1739 
1740  ierr = MPI_Info_free(&mpi_info);
1741  AssertThrowMPI(ierr);
1742  }
1743 
1744 
1745  // **** Step 4 ****
1746  // The next step is to teach all non-shmem root processes what the pointer to
1747  // the array is that the shmem-root created. MPI has a nifty way for this
1748  // given that only a single process actually allocated memory in the window:
1749  // When calling MPI_Win_shared_query, the MPI documentation says that
1750  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1751  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1752  // rank that specified size > 0. If all processes in the group attached to the
1753  // window specified size = 0, then the call returns size = 0 and a baseptr as
1754  // if MPI_ALLOC_MEM was called with size = 0."
1755  //
1756  // This will allow us to obtain the pointer to the shmem root's memory area,
1757  // which is the only one we care about. (None of the other processes have
1758  // even allocated any memory.)
1759  //
1760  // We don't need to do this on the shmem root process: This process has
1761  // already gotten its base_ptr correctly set above, and we can determine the
1762  // array size by just calling size().
1763  if (is_shmem_root == false)
1764  {
1765  int disp_unit;
1766  MPI_Aint alloc_size; // not actually used
1767  const int ierr = MPI_Win_shared_query(
1768  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1769  AssertThrowMPI(ierr);
1770 
1771  // Make sure we actually got a pointer, and check that the disp_unit is
1772  // equal to 1 (as set above)
1773  Assert(base_ptr != nullptr, ExcInternalError());
1774  Assert(disp_unit == 1, ExcInternalError());
1775  }
1776 
1777 
1778  // **** Step 5 ****
1779  // Now that all processes know the address of the space that is visible to
1780  // everyone, we need to figure out whether it is properly aligned and if not,
1781  // find the next aligned address.
1782  //
1783  // std::align does that, but it also modifies its last two arguments. The
1784  // documentation of that function at
1785  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1786  // *think* that the following should do given that we do not use base_ptr and
1787  // available_space any further after the call to std::align.
1788  std::size_t available_space = alloc_size;
1789  void * base_ptr_backup = base_ptr;
1790  T * aligned_shmem_pointer = static_cast<T *>(
1791  std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1792  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1793 
1794  // There is one step to guard against. It is *conceivable* that the base_ptr
1795  // we have previously obtained from MPI_Win_shared_query is mapped so
1796  // awkwardly into the different MPI processes' memory spaces that it is
1797  // aligned in one memory space, but not another. In that case, different
1798  // processes would align base_ptr differently, and adjust available_space
1799  // differently. We can check that by making sure that the max (or min) over
1800  // all processes is equal to every process's value. If that's not the case,
1801  // then the whole idea of aligning above is wrong and we need to rethink what
1802  // it means to align data in a shared memory space.
1803  //
1804  // One might be tempted to think that this is not how MPI implementations
1805  // actually arrange things. Alas, when developing this functionality in 2021,
1806  // this is really how at least OpenMPI ends up doing things. (This is with an
1807  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1808  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1809  // when running this code on three processes, one ends up with base_ptr values
1810  // of
1811  // base_ptr=0x7f0842f02108
1812  // base_ptr=0x7fc0a47881d0
1813  // base_ptr=0x7f64872db108
1814  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1815  // is no common offset std::align could find that leads to a 64-byte
1816  // aligned memory address in all three memory spaces. That's a tremendous
1817  // nuisance and there is really nothing we can do about this other than just
1818  // fall back on the (unaligned) base_ptr in that case.
1819  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1820  Utilities::MPI::max(available_space, shmem_group_communicator))
1821  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1822 
1823 
1824  // **** Step 6 ****
1825  // If this is the shmem root process, we need to copy the data into the
1826  // shared memory space.
1827  if (is_shmem_root)
1828  {
1829  if (std::is_trivial<T>::value == true)
1830  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1831  else
1832  for (std::size_t i = 0; i < size(); ++i)
1833  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1834  }
1835 
1836  // Make sure that the shared memory host has copied the data before we try to
1837  // access it.
1838  const int ierr = MPI_Barrier(shmem_group_communicator);
1839  AssertThrowMPI(ierr);
1840 
1841  // **** Step 7 ****
1842  // Finally, we need to set the pointers of this object to what we just
1843  // learned. This also releases all memory that may have been in use
1844  // previously.
1845  //
1846  // The part that is a bit tricky is how to write the deleter of this
1847  // shared memory object. When we want to get rid of it, we need to
1848  // also release the MPI_Win object along with the shmem_group_communicator
1849  // object. That's because as long as we use the shared memory, we still need
1850  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1851  // communicator. (The former is definitely true, the latter is not quite clear
1852  // from the MPI documentation, but seems reasonable.) So we need to have a
1853  // deleter for the pointer that ensures that upon release of the memory, we
1854  // not only call the destructor of these memory elements (but only once, on
1855  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1856  // that is encapsulated in the following call where the deleter makes copies
1857  // of the arguments in the lambda capture.
1858  elements = decltype(elements)(aligned_shmem_pointer,
1859  Deleter(this,
1860  is_shmem_root,
1861  aligned_shmem_pointer,
1862  shmem_group_communicator,
1863  shmem_window));
1864 
1865  // We then also have to set the other two pointers that define the state of
1866  // the current object. Note that the new buffer size is exactly as large as
1867  // necessary, i.e., can store size() elements, regardless of the number of
1868  // allocated elements in the original objects.
1869  used_elements_end = elements.get() + new_size;
1870  allocated_elements_end = used_elements_end;
1871 
1872  // **** Consistency check ****
1873  // At this point, each process should have a copy of the data.
1874  // Verify this in some sort of round-about way
1875 # ifdef DEBUG
1876  const std::vector<char> packed_data = Utilities::pack(*this);
1877  const int hash =
1878  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1879  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1880 # endif
1881 
1882 # else
1883  // No MPI -> nothing to replicate
1884  (void)communicator;
1885  (void)root_process;
1886 # endif
1887 }
1888 
1889 
1890 
1891 template <class T>
1892 inline void
1894 {
1895  // Swap the data in the 'elements' objects. Then also make sure that
1896  // their respective deleter objects point to the right place.
1897  std::swap(elements, vec.elements);
1898  elements.get_deleter().reset_owning_object(this);
1899  vec.elements.get_deleter().reset_owning_object(&vec);
1900 
1901  // Now also swap the remaining members.
1902  std::swap(used_elements_end, vec.used_elements_end);
1903  std::swap(allocated_elements_end, vec.allocated_elements_end);
1904 }
1905 
1906 
1907 
1908 template <class T>
1909 inline bool
1911 {
1912  return used_elements_end == elements.get();
1913 }
1914 
1915 
1916 
1917 template <class T>
1918 inline typename AlignedVector<T>::size_type
1919 AlignedVector<T>::size() const
1920 {
1921  return used_elements_end - elements.get();
1922 }
1923 
1924 
1925 
1926 template <class T>
1927 inline typename AlignedVector<T>::size_type
1929 {
1930  return allocated_elements_end - elements.get();
1931 }
1932 
1933 
1934 
1935 template <class T>
1936 inline typename AlignedVector<T>::reference
1938 {
1939  AssertIndexRange(index, size());
1940  return elements[index];
1941 }
1942 
1943 
1944 
1945 template <class T>
1946 inline typename AlignedVector<T>::const_reference
1947 AlignedVector<T>::operator[](const size_type index) const
1948 {
1949  AssertIndexRange(index, size());
1950  return elements[index];
1951 }
1952 
1953 
1954 
1955 template <typename T>
1956 inline typename AlignedVector<T>::pointer
1958 {
1959  return elements.get();
1960 }
1961 
1962 
1963 
1964 template <typename T>
1965 inline typename AlignedVector<T>::const_pointer
1966 AlignedVector<T>::data() const
1967 {
1968  return elements.get();
1969 }
1970 
1971 
1972 
1973 template <class T>
1974 inline typename AlignedVector<T>::iterator
1976 {
1977  return elements.get();
1978 }
1979 
1980 
1981 
1982 template <class T>
1983 inline typename AlignedVector<T>::iterator
1985 {
1986  return used_elements_end;
1987 }
1988 
1989 
1990 
1991 template <class T>
1992 inline typename AlignedVector<T>::const_iterator
1994 {
1995  return elements.get();
1996 }
1997 
1998 
1999 
2000 template <class T>
2001 inline typename AlignedVector<T>::const_iterator
2002 AlignedVector<T>::end() const
2003 {
2004  return used_elements_end;
2005 }
2006 
2007 
2008 
2009 template <class T>
2010 template <class Archive>
2011 inline void
2012 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2013 {
2014  size_type vec_size = size();
2015  ar & vec_size;
2016  if (vec_size > 0)
2017  ar &boost::serialization::make_array(elements.get(), vec_size);
2018 }
2019 
2020 
2021 
2022 template <class T>
2023 template <class Archive>
2024 inline void
2025 AlignedVector<T>::load(Archive &ar, const unsigned int)
2026 {
2027  size_type vec_size = 0;
2028  ar & vec_size;
2029 
2030  if (vec_size > 0)
2031  {
2032  reserve(vec_size);
2033  ar &boost::serialization::make_array(elements.get(), vec_size);
2034  used_elements_end = elements.get() + vec_size;
2035  }
2036 }
2037 
2038 
2039 
2040 template <class T>
2041 inline typename AlignedVector<T>::size_type
2043 {
2044  size_type memory = sizeof(*this);
2045  for (const T *t = elements.get(); t != used_elements_end; ++t)
2047  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2048  return memory;
2049 }
2050 
2051 
2052 #endif // ifndef DOXYGEN
2053 
2054 
2060 template <class T>
2061 bool
2063 {
2064  if (lhs.size() != rhs.size())
2065  return false;
2066  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2067  rit = rhs.begin();
2068  lit != lhs.end();
2069  ++lit, ++rit)
2070  if (*lit != *rit)
2071  return false;
2072  return true;
2073 }
2074 
2075 
2076 
2082 template <class T>
2083 bool
2085 {
2086  return !(operator==(lhs, rhs));
2087 }
2088 
2089 
2091 
2092 #endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void swap(AlignedVector< T > &vec)
void resize(const size_type new_size, const T &init)
size_type capacity() const
value_type & reference
const value_type * const_pointer
void push_back(const T in_data)
AlignedVector & operator=(const AlignedVector< T > &vec)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
T * allocated_elements_end
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
static const std::size_t minimum_parallel_grain_size
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1614
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1914
#define AssertIndexRange(index, range)
Definition: exceptions.h:1855
static const types::blas_int zero
static const char T
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
std::string to_string(const T &t)
Definition: patterns.h:2392
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:161
void free_communicator(MPI_Comm mpi_communicator)
Definition: mpi.cc:204
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1045
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1351
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition: parallel.h:723