Reference documentation for deal.II version GIT 9042b9283b 2023-12-02 14:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
101 
107 
113  AlignedVector &
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
206  void
208 
213  void
214  clear();
215 
221  void
222  push_back(const T in_data);
223 
227  reference
228  back();
229 
234  back() const;
235 
240  template <typename ForwardIterator>
241  void
242  insert_back(ForwardIterator begin, ForwardIterator end);
243 
253  void
254  fill();
255 
264  void
265  fill(const T &element);
266 
354  void
355  replicate_across_communicator(const MPI_Comm communicator,
356  const unsigned int root_process);
357 
361  void
363 
367  bool
368  empty() const;
369 
373  size_type
374  size() const;
375 
380  size_type
381  capacity() const;
382 
386  reference
387  operator[](const size_type index);
388 
393  operator[](const size_type index) const;
394 
398  pointer
399  data();
400 
405  data() const;
406 
410  iterator
411  begin();
412 
416  iterator
417  end();
418 
423  begin() const;
424 
429  end() const;
430 
436  size_type
438 
444  template <class Archive>
445  void
446  save(Archive &ar, const unsigned int version) const;
447 
453  template <class Archive>
454  void
455  load(Archive &ar, const unsigned int version);
456 
457 #ifdef DOXYGEN
463  template <class Archive>
464  void
465  serialize(Archive &archive, const unsigned int version);
466 #else
467  // This macro defines the serialize() method that is compatible with
468  // the templated save() and load() method that have been implemented.
469  BOOST_SERIALIZATION_SPLIT_MEMBER()
470 #endif
471 
479  "Changing the vector after a call to "
480  "replicate_across_communicator() is not allowed.");
481 
482 private:
487  void
488  allocate_and_move(const size_t old_size,
489  const size_t new_size,
490  const size_t new_allocated_size);
491 
583  class Deleter
584  {
585  public:
591  Deleter(AlignedVector<T> *owning_object);
592 
593 #ifdef DEAL_II_WITH_MPI
601  Deleter(AlignedVector<T> *owning_object,
602  const bool is_shmem_root,
603  T *aligned_shmem_pointer,
604  MPI_Comm shmem_group_communicator,
605  MPI_Win shmem_window);
606 #endif
607 
613  void
614  operator()(T *ptr);
615 
623  void
624  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
625 
626  private:
631  {
632  public:
636  virtual ~DeleterActionBase() = default;
637 
643  virtual void
645  };
646 
647 #ifdef DEAL_II_WITH_MPI
648 
654  {
655  public:
662  MPI_Comm shmem_group_communicator,
663  MPI_Win shmem_window);
664 
670  virtual void
671  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
672 
673  private:
678  const bool is_shmem_root;
681  MPI_Win shmem_window;
682  };
683 #endif
684 
689  std::unique_ptr<DeleterActionBase> deleter_action_object;
690 
696  };
697 
701  std::unique_ptr<T[], Deleter> elements;
702 
707 
712 
717 };
718 
719 
720 // ------------------------------- inline functions --------------------------
721 
727 namespace internal
728 {
747  template <typename T>
750  {
751  static const std::size_t minimum_parallel_grain_size =
752  160000 / sizeof(T) + 1;
753 
754  public:
764  AlignedVectorCopyConstruct(const T *const source_begin,
765  const T *const source_end,
766  T *const destination)
767  : source_(source_begin)
768  , destination_(destination)
769  {
770  Assert(source_end >= source_begin, ExcInternalError());
771  Assert(source_end == source_begin || destination != nullptr,
772  ExcInternalError());
773  const std::size_t size = source_end - source_begin;
774  if (size < minimum_parallel_grain_size)
776  else
778  }
779 
784  virtual void
785  apply_to_subrange(const std::size_t begin,
786  const std::size_t end) const override
787  {
788  if (end == begin)
789  return;
790 
791  // for classes trivial assignment can use memcpy. cast element to
792  // (void*) to silence compiler warning for virtual classes (they will
793  // never arrive here because they are non-trivial).
794 
795  if (std::is_trivial_v<T> == true)
796  std::memcpy(static_cast<void *>(destination_ + begin),
797  static_cast<const void *>(source_ + begin),
798  (end - begin) * sizeof(T));
799  else
800  for (std::size_t i = begin; i < end; ++i)
801  new (&destination_[i]) T(source_[i]);
802  }
803 
804  private:
805  const T *const source_;
806  T *const destination_;
807  };
808 
809 
816  template <typename T>
819  {
820  static const std::size_t minimum_parallel_grain_size =
821  160000 / sizeof(T) + 1;
822 
823  public:
833  AlignedVectorMoveConstruct(T *const source_begin,
834  T *const source_end,
835  T *const destination)
836  : source_(source_begin)
837  , destination_(destination)
838  {
839  Assert(source_end >= source_begin, ExcInternalError());
840  Assert(source_end == source_begin || destination != nullptr,
841  ExcInternalError());
842  const std::size_t size = source_end - source_begin;
843  if (size < minimum_parallel_grain_size)
845  else
847  }
848 
853  virtual void
854  apply_to_subrange(const std::size_t begin,
855  const std::size_t end) const override
856  {
857  if (end == begin)
858  return;
859 
860  // Classes with trivial assignment can use memcpy. cast element to
861  // (void*) to silence compiler warning for virtual classes (they will
862  // never arrive here because they are non-trivial).
863  if (std::is_trivial_v<T> == true)
864  std::memcpy(static_cast<void *>(destination_ + begin),
865  static_cast<void *>(source_ + begin),
866  (end - begin) * sizeof(T));
867  else
868  // For everything else just use the move constructor. The original
869  // object remains alive and will be destroyed elsewhere.
870  for (std::size_t i = begin; i < end; ++i)
871  new (&destination_[i]) T(std::move(source_[i]));
872  }
873 
874  private:
875  T *const source_;
876  T *const destination_;
877  };
878 
879 
897  template <typename T, bool initialize_memory>
899  {
900  static const std::size_t minimum_parallel_grain_size =
901  160000 / sizeof(T) + 1;
902 
903  public:
908  AlignedVectorInitialize(const std::size_t size,
909  const T &element,
910  T *const destination)
911  : element_(element)
912  , destination_(destination)
913  , trivial_element(false)
914  {
915  if (size == 0)
916  return;
917  Assert(destination != nullptr, ExcInternalError());
918 
919  // do not use memcmp for long double because on some systems it does not
920  // completely fill its memory and may lead to false positives in
921  // e.g. valgrind
922  if (std::is_trivial_v<T> == true &&
923  std::is_same_v<T, long double> == false)
924  {
925  const unsigned char zero[sizeof(T)] = {};
926  // cast element to (void*) to silence compiler warning for virtual
927  // classes (they will never arrive here because they are
928  // non-trivial).
929  if (std::memcmp(zero,
930  static_cast<const void *>(&element),
931  sizeof(T)) == 0)
932  trivial_element = true;
933  }
934  if (size < minimum_parallel_grain_size)
936  else
938  }
939 
943  virtual void
944  apply_to_subrange(const std::size_t begin,
945  const std::size_t end) const override
946  {
947  // for classes with trivial assignment of zero can use memset. cast
948  // element to (void*) to silence compiler warning for virtual
949  // classes (they will never arrive here because they are
950  // non-trivial).
951  if (std::is_trivial_v<T> == true && trivial_element)
952  std::memset(static_cast<void *>(destination_ + begin),
953  0,
954  (end - begin) * sizeof(T));
955  else
957  end,
958  std::bool_constant<initialize_memory>());
959  }
960 
961  private:
962  const T &element_;
963  mutable T *destination_;
965 
966  // copy assignment operation
967  void
968  copy_construct_or_assign(const std::size_t begin,
969  const std::size_t end,
970  std::bool_constant<false>) const
971  {
972  for (std::size_t i = begin; i < end; ++i)
973  destination_[i] = element_;
974  }
975 
976  // copy constructor (memory initialization)
977  void
978  copy_construct_or_assign(const std::size_t begin,
979  const std::size_t end,
980  std::bool_constant<true>) const
981  {
982  for (std::size_t i = begin; i < end; ++i)
983  new (&destination_[i]) T(element_);
984  }
985  };
986 
987 
988 
1001  template <typename T, bool initialize_memory>
1004  {
1005  static const std::size_t minimum_parallel_grain_size =
1006  160000 / sizeof(T) + 1;
1007 
1008  public:
1013  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
1014  : destination_(destination)
1015  {
1016  if (size == 0)
1017  return;
1018  Assert(destination != nullptr, ExcInternalError());
1019 
1020  if (size < minimum_parallel_grain_size)
1022  else
1024  }
1025 
1029  virtual void
1030  apply_to_subrange(const std::size_t begin,
1031  const std::size_t end) const override
1032  {
1033  // for classes with trivial assignment of zero can use memset. cast
1034  // element to (void*) to silence compiler warning for virtual
1035  // classes (they will never arrive here because they are
1036  // non-trivial).
1037  if (std::is_trivial_v<T> == true)
1038  std::memset(static_cast<void *>(destination_ + begin),
1039  0,
1040  (end - begin) * sizeof(T));
1041  else
1043  end,
1044  std::bool_constant<initialize_memory>());
1045  }
1046 
1047  private:
1048  mutable T *destination_;
1049 
1050  // copy assignment operation
1051  void
1053  const std::size_t end,
1054  std::bool_constant<false>) const
1055  {
1056  for (std::size_t i = begin; i < end; ++i)
1057  destination_[i] = std::move(T());
1058  }
1059 
1060  // copy constructor (memory initialization)
1061  void
1063  const std::size_t end,
1064  std::bool_constant<true>) const
1065  {
1066  for (std::size_t i = begin; i < end; ++i)
1067  new (&destination_[i]) T;
1068  }
1069  };
1070 
1071 } // end of namespace internal
1072 
1073 
1074 #ifndef DOXYGEN
1075 
1076 
1077 
1078 template <typename T>
1080  : deleter_action_object(nullptr) // encode default action by using a nullptr
1081  , owning_aligned_vector(owning_object)
1082 {}
1083 
1084 
1085 # ifdef DEAL_II_WITH_MPI
1086 
1087 template <typename T>
1089  const bool is_shmem_root,
1090  T *aligned_shmem_pointer,
1091  MPI_Comm shmem_group_communicator,
1092  MPI_Win shmem_window)
1093  : deleter_action_object(
1094  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1095  aligned_shmem_pointer,
1096  shmem_group_communicator,
1097  shmem_window))
1098  , owning_aligned_vector(owning_object)
1099 {}
1100 # endif
1101 
1102 
1103 template <typename T>
1104 inline void
1106 {
1107  // If no special action has been registered (i.e., if the action pointer is
1108  // nullptr), then just perform the default action right here.
1109  if (deleter_action_object == nullptr)
1110  {
1111  if (ptr != nullptr)
1112  {
1113  Assert(owning_aligned_vector->used_elements_end != nullptr,
1114  ExcInternalError());
1115 
1116  if (std::is_trivial_v<T> == false)
1117  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1118  --p)
1119  p->~T();
1120 
1121  std::free(ptr);
1122  }
1123  }
1124  else
1125  // Otherwise, let the action object do what is necessary
1126  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1127 }
1128 
1129 
1130 
1131 template <typename T>
1132 inline void
1134  const AlignedVector<T> *new_aligned_vector_ptr)
1135 {
1136  owning_aligned_vector = new_aligned_vector_ptr;
1137 }
1138 
1139 
1140 # ifdef DEAL_II_WITH_MPI
1141 
1142 template <typename T>
1144  MPISharedMemDeleterAction(const bool is_shmem_root,
1145  T *aligned_shmem_pointer,
1146  MPI_Comm shmem_group_communicator,
1147  MPI_Win shmem_window)
1148  : is_shmem_root(is_shmem_root)
1149  , aligned_shmem_pointer(aligned_shmem_pointer)
1150  , shmem_group_communicator(shmem_group_communicator)
1151  , shmem_window(shmem_window)
1152 {}
1153 
1154 
1155 
1156 template <typename T>
1157 inline void
1159  const AlignedVector<T> *aligned_vector,
1160  T *ptr)
1161 {
1162  (void)ptr;
1163  // It would be nice to assert that aligned_vector->elements.get() equals ptr,
1164  // but it is not guaranteed to work: clang, for example, sets elements.get()
1165  // to nullptr and then calls the deleter on a previously made copy. Hence we
1166  // must assume here that elements.get() (which is managed by the unique_ptr)
1167  // may be nullptr at this point.
1168  //
1169  // used_elements_end is a member variable of AlignedVector (i.e., we control
1170  // it, not unique_ptr) so it is still set to its correct value.
1171 
1172  if (is_shmem_root)
1173  if (std::is_trivial_v<T> == false)
1174  for (T *p = aligned_vector->used_elements_end - 1; p >= ptr; --p)
1175  p->~T();
1176 
1177  int ierr;
1178  ierr = MPI_Win_free(&shmem_window);
1179  AssertThrowMPI(ierr);
1180 
1181  Utilities::MPI::free_communicator(shmem_group_communicator);
1182 }
1183 
1184 # endif
1185 
1186 
1187 template <class T>
1189  : elements(nullptr, Deleter(this))
1190  , used_elements_end(nullptr)
1191  , allocated_elements_end(nullptr)
1192 # ifdef DEBUG
1193  , replicated_across_communicator(false)
1194 # endif
1195 {}
1196 
1197 
1198 
1199 template <class T>
1200 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1201  : elements(nullptr, Deleter(this))
1202  , used_elements_end(nullptr)
1203  , allocated_elements_end(nullptr)
1204 # ifdef DEBUG
1205  , replicated_across_communicator(false)
1206 # endif
1207 {
1208  if (size > 0)
1209  resize(size, init);
1210 }
1211 
1212 
1213 
1214 template <class T>
1216  : elements(nullptr, Deleter(this))
1217  , used_elements_end(nullptr)
1218  , allocated_elements_end(nullptr)
1219 # ifdef DEBUG
1220  , replicated_across_communicator(false)
1221 # endif
1222 {
1223  // copy the data from vec
1224  reserve(vec.size());
1225  used_elements_end = allocated_elements_end;
1227  vec.used_elements_end,
1228  elements.get());
1229 }
1230 
1231 
1232 
1233 template <class T>
1235  : AlignedVector<T>()
1236 {
1237  // forward to the move operator
1238  *this = std::move(vec);
1239 }
1240 
1241 
1242 
1243 template <class T>
1244 inline AlignedVector<T> &
1246 {
1247  const size_type new_size = vec.used_elements_end - vec.elements.get();
1248 
1249  // First throw away everything and re-allocate memory but leave that
1250  // memory uninitialized for now:
1251  resize(0);
1252  reserve(new_size);
1253 
1254  // Then copy the elements over by using the copy constructor on these
1255  // elements:
1257  vec.used_elements_end,
1258  elements.get());
1259 
1260  // Finally adjust the pointer to the end of the elements that are used:
1261  used_elements_end = elements.get() + new_size;
1262 
1263  return *this;
1264 }
1265 
1266 
1267 
1268 template <class T>
1269 inline AlignedVector<T> &
1271 {
1272  clear();
1273 
1274  // Move the actual data in the 'elements' object. One problem is that this
1275  // also moves the deleter object, but the deleter object
1276  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1277  // object). The way this is implemented is that we have to move the
1278  // deleter as well, and then reset the pointer inside the deleter
1279  // that references the outer object.
1280  elements = std::move(vec.elements);
1281  elements.get_deleter().reset_owning_object(this);
1282 
1283  // Then also steal the other pointers and clear them in the original object:
1284  used_elements_end = vec.used_elements_end;
1285  allocated_elements_end = vec.allocated_elements_end;
1286 
1287  vec.used_elements_end = nullptr;
1288  vec.allocated_elements_end = nullptr;
1289 
1290  return *this;
1291 }
1292 
1293 
1294 
1295 template <class T>
1296 inline void
1298 {
1299  const size_type old_size = size();
1300 
1301  if (new_size == 0)
1302  clear();
1303  else if (new_size == old_size)
1304  {
1305  } // nothing to do here
1306  else if (new_size < old_size)
1307  {
1308  // call destructor on fields that are released, if the type requires it.
1309  // doing it backward releases the elements in reverse order as compared to
1310  // how they were created
1311  if (std::is_trivial_v<T> == false)
1312  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1313  p->~T();
1314  used_elements_end = elements.get() + new_size;
1315  }
1316  else // new_size > old_size
1317  {
1318  // Allocate more space, and claim that space as used
1319  reserve(new_size);
1320  used_elements_end = elements.get() + new_size;
1321 
1322  // need to still set the values in case the class is non-trivial because
1323  // virtual classes etc. need to run their (default) constructor
1324  if (std::is_trivial_v<T> == false)
1326  new_size - old_size, elements.get() + old_size);
1327  }
1328 }
1329 
1330 
1331 
1332 template <class T>
1333 inline void
1334 AlignedVector<T>::resize(const size_type new_size)
1335 {
1336  const size_type old_size = size();
1337 
1338  if (new_size == 0)
1339  clear();
1340  else if (new_size == old_size)
1341  {
1342  } // nothing to do here
1343  else if (new_size < old_size)
1344  {
1345  // call destructor on fields that are released, if the type requires it.
1346  // doing it backward releases the elements in reverse order as compared to
1347  // how they were created
1348  if (std::is_trivial_v<T> == false)
1349  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1350  p->~T();
1351  used_elements_end = elements.get() + new_size;
1352  }
1353  else // new_size > old_size
1354  {
1355  // Allocate more space, and claim that space as used
1356  reserve(new_size);
1357  used_elements_end = elements.get() + new_size;
1358 
1359  // finally set the values to the default initializer
1361  new_size - old_size, elements.get() + old_size);
1362  }
1363 }
1364 
1365 
1366 
1367 template <class T>
1368 inline void
1369 AlignedVector<T>::resize(const size_type new_size, const T &init)
1370 {
1371  const size_type old_size = size();
1372 
1373  if (new_size == 0)
1374  clear();
1375  else if (new_size == old_size)
1376  {
1377  } // nothing to do here
1378  else if (new_size < old_size)
1379  {
1380  // call destructor on fields that are released, if the type requires it.
1381  // doing it backward releases the elements in reverse order as compared to
1382  // how they were created
1383  if (std::is_trivial_v<T> == false)
1384  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1385  p->~T();
1386  used_elements_end = elements.get() + new_size;
1387  }
1388  else // new_size > old_size
1389  {
1390  // Allocate more space, and claim that space as used
1391  reserve(new_size);
1392  used_elements_end = elements.get() + new_size;
1393 
1394  // finally set the desired init values
1396  new_size - old_size, init, elements.get() + old_size);
1397  }
1398 }
1399 
1400 
1401 
1402 template <class T>
1403 inline void
1404 AlignedVector<T>::allocate_and_move(const size_t old_size,
1405  const size_t new_size,
1406  const size_t new_allocated_size)
1407 {
1408  // allocate and align along 64-byte boundaries (this is enough for all
1409  // levels of vectorization currently supported by deal.II)
1410  T *new_data_ptr;
1411  Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_data_ptr),
1412  64,
1413  new_size * sizeof(T));
1414 
1415  // Now create a deleter that encodes what should happen when the object is
1416  // released: We need to destroy the objects that are currently alive (in
1417  // reverse order, and then release the memory. Note that we catch the
1418  // 'this' pointer because the number of elements currently alive might
1419  // change over time.
1420  Deleter deleter(this);
1421 
1422  // copy whatever elements we need to retain
1423  if (new_allocated_size > 0)
1425  elements.get() + old_size,
1426  new_data_ptr);
1427 
1428  // Now reset all the member variables of the current object
1429  // based on the allocation above. Assigning to a std::unique_ptr
1430  // object also releases the previously pointed to memory.
1431  //
1432  // Note that at the time of releasing the old memory, 'used_elements_end'
1433  // still points to its previous value, and this is important for the
1434  // deleter object of the previously allocated array (see how it loops over
1435  // the to-be-destroyed elements at the Deleter::DefaultDeleterAction
1436  // class).
1437  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1438  used_elements_end = elements.get() + old_size;
1439  allocated_elements_end = elements.get() + new_size;
1440 }
1441 
1442 
1443 
1444 template <class T>
1445 inline void
1446 AlignedVector<T>::reserve(const size_type new_allocated_size)
1447 {
1448  const size_type old_size = used_elements_end - elements.get();
1449  const size_type old_allocated_size = allocated_elements_end - elements.get();
1450  if (new_allocated_size > old_allocated_size)
1451  {
1452  // if we continuously increase the size of the vector, we might be
1453  // reallocating a lot of times. therefore, try to increase the size more
1454  // aggressively
1455  const size_type new_size =
1456  std::max(new_allocated_size, 2 * old_allocated_size);
1457 
1458  allocate_and_move(old_size, new_size, new_allocated_size);
1459  }
1460  else if (new_allocated_size == 0)
1461  clear();
1462  else // size_alloc < allocated_size
1463  {
1464  } // nothing to do here
1465 }
1466 
1467 
1468 
1469 template <class T>
1470 inline void
1472 {
1473 # ifdef DEBUG
1474  Assert(replicated_across_communicator == false,
1475  ExcAlignedVectorChangeAfterReplication());
1476 # endif
1477  const size_type used_size = used_elements_end - elements.get();
1478  const size_type allocated_size = allocated_elements_end - elements.get();
1479  if (allocated_size > used_size)
1480  allocate_and_move(used_size, used_size, used_size);
1481 }
1482 
1483 
1484 
1485 template <class T>
1486 inline void
1488 {
1489  // Just release the memory (which also calls the destructor of the elements),
1490  // and then set the auxiliary pointers to invalid values.
1491  //
1492  // Note that at the time of releasing the old memory, 'used_elements_end'
1493  // still points to its previous value, and this is important for the
1494  // deleter object of the previously allocated array (see how it loops over
1495  // the to-be-destroyed elements a few lines above).
1496  elements.reset();
1497  used_elements_end = nullptr;
1498  allocated_elements_end = nullptr;
1499 }
1500 
1501 
1502 
1503 template <class T>
1504 inline void
1505 AlignedVector<T>::push_back(const T in_data)
1506 {
1507  Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1508  if (used_elements_end == allocated_elements_end)
1509  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1510  if (std::is_trivial_v<T> == false)
1511  new (used_elements_end++) T(in_data);
1512  else
1513  *used_elements_end++ = in_data;
1514 }
1515 
1516 
1517 
1518 template <class T>
1519 inline typename AlignedVector<T>::reference
1521 {
1522  AssertIndexRange(0, size());
1523  T *field = used_elements_end - 1;
1524  return *field;
1525 }
1526 
1527 
1528 
1529 template <class T>
1530 inline typename AlignedVector<T>::const_reference
1531 AlignedVector<T>::back() const
1532 {
1533  AssertIndexRange(0, size());
1534  const T *field = used_elements_end - 1;
1535  return *field;
1536 }
1537 
1538 
1539 
1540 template <class T>
1541 template <typename ForwardIterator>
1542 inline void
1543 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1544 {
1545  const size_type old_size = size();
1546  reserve(old_size + (end - begin));
1547  for (; begin != end; ++begin, ++used_elements_end)
1548  {
1549  if (std::is_trivial_v<T> == false)
1550  new (used_elements_end) T;
1551  *used_elements_end = *begin;
1552  }
1553 }
1554 
1555 
1556 
1557 template <class T>
1558 inline void
1560 {
1562  elements.get());
1563 }
1564 
1565 
1566 
1567 template <class T>
1568 inline void
1569 AlignedVector<T>::fill(const T &value)
1570 {
1572  value,
1573  elements.get());
1574 }
1575 
1576 
1577 
1578 template <class T>
1579 inline void
1580 AlignedVector<T>::replicate_across_communicator(const MPI_Comm communicator,
1581  const unsigned int root_process)
1582 {
1583 # ifdef DEAL_II_WITH_MPI
1584 
1585  // Let the root process broadcast its size. If it is zero, then all
1586  // processes just clear() their memory and reset themselves to a non-shared
1587  // empty object -- there is no point to run through complicated MPI
1588  // calls if the end result is an empty array. Otherwise, we continue on.
1589  const size_type new_size =
1590  Utilities::MPI::broadcast(communicator, size(), root_process);
1591  if (new_size == 0)
1592  {
1593  clear();
1594  return;
1595  }
1596 
1597 
1598  // **** Step 0 ****
1599  // All but the root process no longer need their data, so release the memory
1600  // used to store the previous elements.
1601  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1602  {
1603  elements.reset();
1604  used_elements_end = nullptr;
1605  allocated_elements_end = nullptr;
1606  }
1607 
1608  // **** Step 1 ****
1609  // Create communicators for each group of processes that can use
1610  // shared memory areas. Within each of these groups, we don't care about
1611  // which rank each of the old processes gets except that we would like to
1612  // make sure that the (global) root process will have rank=0 within
1613  // its own sub-communicator. We can do that through the third argument of
1614  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1615  // order of processes within the split communicators, and we should set it to
1616  // zero for the root processes and one for all others -- which means that
1617  // for all of these other processes, MPI can choose whatever order it
1618  // wants because they have the same key (MPI then documents that these ties
1619  // will be broken according to these processes' rank in the old group).
1620  //
1621  // At least that's the theory. In practice, the MPI implementation where
1622  // this function was developed on does not seem to do that. (Bug report
1623  // is here: https://github.com/open-mpi/ompi/issues/8854)
1624  // We work around this by letting MPI_Comm_split_type choose whatever
1625  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1626  // step -- not elegant, nor efficient, but seems to work:
1627  MPI_Comm shmem_group_communicator;
1628  {
1629  MPI_Comm shmem_group_communicator_temp;
1630  int ierr = MPI_Comm_split_type(communicator,
1631  MPI_COMM_TYPE_SHARED,
1632  /* key */ 0,
1633  MPI_INFO_NULL,
1634  &shmem_group_communicator_temp);
1635  AssertThrowMPI(ierr);
1636 
1637  const int key =
1638  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1639  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1640  /* color */ 0,
1641  key,
1642  &shmem_group_communicator);
1643  AssertThrowMPI(ierr);
1644 
1645  // Verify the explanation from above
1646  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1647  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1648  ExcInternalError());
1649 
1650  // And get rid of the temporary communicator
1651  Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1652  }
1653  const bool is_shmem_root =
1654  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1655 
1656  // **** Step 2 ****
1657  // We then have to send the state of the current object from the
1658  // root process to one exemplar in each shmem group. To this end,
1659  // we create another subcommunicator that includes the ranks zero
1660  // of all shmem groups, and because of the trick above, we know
1661  // that this also includes the original root process.
1662  //
1663  // There are different ways of creating a "shmem_roots_communicator".
1664  // The conceptually easiest way is to create an MPI_Group that only
1665  // includes the shmem roots and then create a communicator from this
1666  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1667  // with this is that we would have to exchange among all processes
1668  // which ones are shmem roots and which are not. This is awkward.
1669  //
1670  // A simpler way is to use MPI_Comm_split that uses "colors" to
1671  // indicate which sub-communicator each process wants to be in.
1672  // We use color=0 to indicate the group of shmem roots, and color=1
1673  // for all other processes -- the latter will simply not ever do
1674  // anything among themselves with the communicator so created.
1675  //
1676  // Using MPI_Comm_split has the additional benefit that, just as above,
1677  // we can choose where each rank will end up in shmem_roots_communicator.
1678  // We again set key=0 for the original root_process, and key=1 for all other
1679  // ranks; then, the global root becomes rank=0 on the
1680  // shmem_roots_communicator. We don't care how the other processes are
1681  // ordered.
1682  MPI_Comm shmem_roots_communicator;
1683  {
1684  const int key =
1685  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1686 
1687  const int ierr = MPI_Comm_split(communicator,
1688  /*color=*/
1689  (is_shmem_root ? 0 : 1),
1690  key,
1691  &shmem_roots_communicator);
1692  AssertThrowMPI(ierr);
1693 
1694  // Again verify the explanation from above
1695  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1696  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1697  ExcInternalError());
1698  }
1699 
1700  const unsigned int shmem_roots_root_rank = 0;
1701  const bool is_shmem_roots_root =
1702  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1703  shmem_roots_communicator) == shmem_roots_root_rank));
1704 
1705  // Now let the original root_process broadcast the current object to all
1706  // shmem roots. We know that the last rank is the original root process that
1707  // has all of the data.
1708  if (is_shmem_root)
1709  {
1710  if (std::is_trivial_v<T>)
1711  {
1712  // The data is "trivial", i.e., we can copy things directly without
1713  // having to go through the serialization/deserialization machinery of
1714  // Utilities::MPI::broadcast.
1715  //
1716  // In that case, first tell all of the other shmem roots how many
1717  // elements we will have to deal with, and let them resize their
1718  // (non-shared) arrays.
1719  const size_type new_size =
1720  Utilities::MPI::broadcast(shmem_roots_communicator,
1721  size(),
1722  shmem_roots_root_rank);
1723  if (is_shmem_roots_root == false)
1724  resize(new_size);
1725 
1726  // Then directly copy from the root process into these buffers
1727  int ierr = MPI_Bcast(elements.get(),
1728  sizeof(T) * new_size,
1729  MPI_CHAR,
1730  shmem_roots_root_rank,
1731  shmem_roots_communicator);
1732  AssertThrowMPI(ierr);
1733  }
1734  else
1735  {
1736  // The objects to be sent around are not "trivial", and so we have
1737  // to go through the serialization/deserialization machinery. On all
1738  // but the sending process, overwrite the current state with the
1739  // vector just broadcast.
1740  //
1741  // On the root rank, this would lead to resetting the 'entries'
1742  // pointer, which would trigger the deleter which would lead to a
1743  // deadlock. So we just send the result of the broadcast() call to
1744  // nirvana on the root process and keep our current state.
1745  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1746  Utilities::MPI::broadcast(shmem_roots_communicator,
1747  *this,
1748  shmem_roots_root_rank);
1749  else
1750  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1751  *this,
1752  shmem_roots_root_rank);
1753  }
1754  }
1755 
1756  // We no longer need the shmem roots communicator, so get rid of it
1757  Utilities::MPI::free_communicator(shmem_roots_communicator);
1758 
1759 
1760  // **** Step 3 ****
1761  // At this point, all shmem groups have one shmem root process that has
1762  // a copy of the data. This is the point where each shmem group should
1763  // establish a shmem area to put the data into. As mentioned above,
1764  // we know that the shmem roots are the last rank in their respective
1765  // shmem_group_communicator.
1766  //
1767  // The process for all of this works as follows: While all processes in
1768  // the shmem group participate in the generation of the shmem memory window,
1769  // only the shmem root actually allocates any memory -- the rest just
1770  // allocate zero bytes of their own. We allocate space for exactly
1771  // size() elements (computed on the shmem_root that already has the data)
1772  // and add however many bytes are necessary so that we know that we can align
1773  // things to 64-byte boundaries. The worst case happens if the memory system
1774  // gives us a pointer to an address one byte past a desired alignment
1775  // boundary, and in that case aligning the memory will require us to waste the
1776  // first (align_by-1) bytes. So we have to ask for
1777  // size() * sizeof(T) + (align_by - 1)
1778  // bytes.
1779  //
1780  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1781  // a certain number of bytes. This is going to come back to bite us further
1782  // down below when we try to get a properly aligned pointer to our memory
1783  // region, see the commentary there. Starting with MPI 4.0, one can set a
1784  // flag in an MPI_Info structure that requests a desired alignment, so we do
1785  // this for forward compatibility; MPI implementations ignore flags they don't
1786  // know anything about, and so setting this flag is backward compatible also
1787  // to older MPI versions.
1788  MPI_Win shmem_window;
1789  void *base_ptr;
1790  const MPI_Aint align_by = 64;
1791  const MPI_Aint alloc_size =
1792  Utilities::MPI::broadcast(shmem_group_communicator,
1793  (size() * sizeof(T) + (align_by - 1)),
1794  0);
1795 
1796  {
1797  int ierr;
1798 
1799  MPI_Info mpi_info;
1800  ierr = MPI_Info_create(&mpi_info);
1801  AssertThrowMPI(ierr);
1802  ierr = MPI_Info_set(mpi_info,
1803  "mpi_minimum_memory_alignment",
1804  std::to_string(align_by).c_str());
1805  AssertThrowMPI(ierr);
1806  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1807  /* disp_unit = */ 1,
1808  mpi_info,
1809  shmem_group_communicator,
1810  &base_ptr,
1811  &shmem_window);
1812  AssertThrowMPI(ierr);
1813 
1814  ierr = MPI_Info_free(&mpi_info);
1815  AssertThrowMPI(ierr);
1816  }
1817 
1818 
1819  // **** Step 4 ****
1820  // The next step is to teach all non-shmem root processes what the pointer to
1821  // the array is that the shmem-root created. MPI has a nifty way for this
1822  // given that only a single process actually allocated memory in the window:
1823  // When calling MPI_Win_shared_query, the MPI documentation says that
1824  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1825  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1826  // rank that specified size > 0. If all processes in the group attached to the
1827  // window specified size = 0, then the call returns size = 0 and a baseptr as
1828  // if MPI_ALLOC_MEM was called with size = 0."
1829  //
1830  // This will allow us to obtain the pointer to the shmem root's memory area,
1831  // which is the only one we care about. (None of the other processes have
1832  // even allocated any memory.)
1833  //
1834  // We don't need to do this on the shmem root process: This process has
1835  // already gotten its base_ptr correctly set above, and we can determine the
1836  // array size by just calling size().
1837  if (is_shmem_root == false)
1838  {
1839  int disp_unit;
1840  MPI_Aint alloc_size; // not actually used
1841  const int ierr = MPI_Win_shared_query(
1842  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1843  AssertThrowMPI(ierr);
1844 
1845  // Make sure we actually got a pointer, and check that the disp_unit is
1846  // equal to 1 (as set above)
1847  Assert(base_ptr != nullptr, ExcInternalError());
1848  Assert(disp_unit == 1, ExcInternalError());
1849  }
1850 
1851 
1852  // **** Step 5 ****
1853  // Now that all processes know the address of the space that is visible to
1854  // everyone, we need to figure out whether it is properly aligned and if not,
1855  // find the next aligned address.
1856  //
1857  // std::align does that, but it also modifies its last two arguments. The
1858  // documentation of that function at
1859  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1860  // *think* that the following should do given that we do not use base_ptr and
1861  // available_space any further after the call to std::align.
1862  std::size_t available_space = alloc_size;
1863  void *base_ptr_backup = base_ptr;
1864  T *aligned_shmem_pointer = static_cast<T *>(
1865  std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1866  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1867 
1868  // There is one step to guard against. It is *conceivable* that the base_ptr
1869  // we have previously obtained from MPI_Win_shared_query is mapped so
1870  // awkwardly into the different MPI processes' memory spaces that it is
1871  // aligned in one memory space, but not another. In that case, different
1872  // processes would align base_ptr differently, and adjust available_space
1873  // differently. We can check that by making sure that the max (or min) over
1874  // all processes is equal to every process's value. If that's not the case,
1875  // then the whole idea of aligning above is wrong and we need to rethink what
1876  // it means to align data in a shared memory space.
1877  //
1878  // One might be tempted to think that this is not how MPI implementations
1879  // actually arrange things. Alas, when developing this functionality in 2021,
1880  // this is really how at least OpenMPI ends up doing things. (This is with an
1881  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1882  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1883  // when running this code on three processes, one ends up with base_ptr values
1884  // of
1885  // base_ptr=0x7f0842f02108
1886  // base_ptr=0x7fc0a47881d0
1887  // base_ptr=0x7f64872db108
1888  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1889  // is no common offset std::align could find that leads to a 64-byte
1890  // aligned memory address in all three memory spaces. That's a tremendous
1891  // nuisance and there is really nothing we can do about this other than just
1892  // fall back on the (unaligned) base_ptr in that case.
1893  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1894  Utilities::MPI::max(available_space, shmem_group_communicator))
1895  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1896 
1897 
1898  // **** Step 6 ****
1899  // If this is the shmem root process, we need to copy the data into the
1900  // shared memory space.
1901  if (is_shmem_root)
1902  {
1903  if (std::is_trivial_v<T> == true)
1904  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1905  else
1906  for (std::size_t i = 0; i < size(); ++i)
1907  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1908  }
1909 
1910  // Make sure that the shared memory host has copied the data before we try to
1911  // access it.
1912  const int ierr = MPI_Barrier(shmem_group_communicator);
1913  AssertThrowMPI(ierr);
1914 
1915  // **** Step 7 ****
1916  // Finally, we need to set the pointers of this object to what we just
1917  // learned. This also releases all memory that may have been in use
1918  // previously.
1919  //
1920  // The part that is a bit tricky is how to write the deleter of this
1921  // shared memory object. When we want to get rid of it, we need to
1922  // also release the MPI_Win object along with the shmem_group_communicator
1923  // object. That's because as long as we use the shared memory, we still need
1924  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1925  // communicator. (The former is definitely true, the latter is not quite clear
1926  // from the MPI documentation, but seems reasonable.) So we need to have a
1927  // deleter for the pointer that ensures that upon release of the memory, we
1928  // not only call the destructor of these memory elements (but only once, on
1929  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1930  // that is encapsulated in the following call where the deleter makes copies
1931  // of the arguments in the lambda capture.
1932  elements = decltype(elements)(aligned_shmem_pointer,
1933  Deleter(this,
1934  is_shmem_root,
1935  aligned_shmem_pointer,
1936  shmem_group_communicator,
1937  shmem_window));
1938 
1939  // We then also have to set the other two pointers that define the state of
1940  // the current object. Note that the new buffer size is exactly as large as
1941  // necessary, i.e., can store size() elements, regardless of the number of
1942  // allocated elements in the original objects.
1943  used_elements_end = elements.get() + new_size;
1944  allocated_elements_end = used_elements_end;
1945 
1946  // **** Consistency check ****
1947  // At this point, each process should have a copy of the data.
1948  // Verify this in some sort of round-about way
1949 # ifdef DEBUG
1950  replicated_across_communicator = true;
1951  const std::vector<char> packed_data = Utilities::pack(*this);
1952  const int hash =
1953  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1954  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1955 # endif
1956 
1957 # else
1958  // No MPI -> nothing to replicate
1959  (void)communicator;
1960  (void)root_process;
1961 # endif
1962 }
1963 
1964 
1965 
1966 template <class T>
1967 inline void
1969 {
1970  // Swap the data in the 'elements' objects. Then also make sure that
1971  // their respective deleter objects point to the right place.
1972  std::swap(elements, vec.elements);
1973  elements.get_deleter().reset_owning_object(this);
1974  vec.elements.get_deleter().reset_owning_object(&vec);
1975 
1976  // Now also swap the remaining members.
1977  std::swap(used_elements_end, vec.used_elements_end);
1978  std::swap(allocated_elements_end, vec.allocated_elements_end);
1979 }
1980 
1981 
1982 
1983 template <class T>
1984 inline bool
1986 {
1987  return used_elements_end == elements.get();
1988 }
1989 
1990 
1991 
1992 template <class T>
1993 inline typename AlignedVector<T>::size_type
1994 AlignedVector<T>::size() const
1995 {
1996  return used_elements_end - elements.get();
1997 }
1998 
1999 
2000 
2001 template <class T>
2002 inline typename AlignedVector<T>::size_type
2004 {
2005  return allocated_elements_end - elements.get();
2006 }
2007 
2008 
2009 
2010 template <class T>
2011 inline typename AlignedVector<T>::reference
2013 {
2014  AssertIndexRange(index, size());
2015  return elements[index];
2016 }
2017 
2018 
2019 
2020 template <class T>
2021 inline typename AlignedVector<T>::const_reference
2022 AlignedVector<T>::operator[](const size_type index) const
2023 {
2024  AssertIndexRange(index, size());
2025  return elements[index];
2026 }
2027 
2028 
2029 
2030 template <typename T>
2031 inline typename AlignedVector<T>::pointer
2033 {
2034  return elements.get();
2035 }
2036 
2037 
2038 
2039 template <typename T>
2040 inline typename AlignedVector<T>::const_pointer
2041 AlignedVector<T>::data() const
2042 {
2043  return elements.get();
2044 }
2045 
2046 
2047 
2048 template <class T>
2049 inline typename AlignedVector<T>::iterator
2051 {
2052  return elements.get();
2053 }
2054 
2055 
2056 
2057 template <class T>
2058 inline typename AlignedVector<T>::iterator
2060 {
2061  return used_elements_end;
2062 }
2063 
2064 
2065 
2066 template <class T>
2067 inline typename AlignedVector<T>::const_iterator
2069 {
2070  return elements.get();
2071 }
2072 
2073 
2074 
2075 template <class T>
2076 inline typename AlignedVector<T>::const_iterator
2077 AlignedVector<T>::end() const
2078 {
2079  return used_elements_end;
2080 }
2081 
2082 
2083 
2084 template <class T>
2085 template <class Archive>
2086 inline void
2087 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2088 {
2089  size_type vec_size = size();
2090  ar &vec_size;
2091  if (vec_size > 0)
2092  ar &boost::serialization::make_array(elements.get(), vec_size);
2093 }
2094 
2095 
2096 
2097 template <class T>
2098 template <class Archive>
2099 inline void
2100 AlignedVector<T>::load(Archive &ar, const unsigned int)
2101 {
2102  size_type vec_size = 0;
2103  ar &vec_size;
2104 
2105  if (vec_size > 0)
2106  {
2107  reserve(vec_size);
2108  ar &boost::serialization::make_array(elements.get(), vec_size);
2109  used_elements_end = elements.get() + vec_size;
2110  }
2111 }
2112 
2113 
2114 
2115 template <class T>
2116 inline typename AlignedVector<T>::size_type
2118 {
2119  size_type memory = sizeof(*this);
2120  for (const T *t = elements.get(); t != used_elements_end; ++t)
2122  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2123  return memory;
2124 }
2125 
2126 
2127 #endif // ifndef DOXYGEN
2128 
2129 
2135 template <class T>
2136 bool
2138 {
2139  if (lhs.size() != rhs.size())
2140  return false;
2141  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2142  rit = rhs.begin();
2143  lit != lhs.end();
2144  ++lit, ++rit)
2145  if (*lit != *rit)
2146  return false;
2147  return true;
2148 }
2149 
2150 
2151 
2157 template <class T>
2158 bool
2160 {
2161  return !(operator==(lhs, rhs));
2162 }
2163 
2164 
2166 
2167 #endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
void replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process)
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
void shrink_to_fit()
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void swap(AlignedVector< T > &vec)
void resize(const size_type new_size, const T &init)
size_type capacity() const
value_type & reference
const value_type * const_pointer
bool replicated_across_communicator
void push_back(const T in_data)
AlignedVector & operator=(const AlignedVector< T > &vec)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
T * allocated_elements_end
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void allocate_and_move(const size_t old_size, const size_t new_size, const size_t new_allocated_size)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< false >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::bool_constant< true >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
static const std::size_t minimum_parallel_grain_size
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
static ::ExceptionBase & ExcAlignedVectorChangeAfterReplication()
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1631
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1947
#define AssertIndexRange(index, range)
Definition: exceptions.h:1888
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:495
static const types::blas_int zero
static const char T
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
void swap(MemorySpaceData< T, MemorySpace > &u, MemorySpaceData< T, MemorySpace > &v)
std::string to_string(const T &t)
Definition: patterns.h:2391
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
void free_communicator(MPI_Comm mpi_communicator)
Definition: mpi.cc:211
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1045
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1343
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition: parallel.h:743