Reference documentation for deal.II version GIT c99bf6dfd7 2022-05-28 07:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
101 
107 
113  AlignedVector &
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
207  void
208  clear();
209 
215  void
216  push_back(const T in_data);
217 
221  reference
222  back();
223 
228  back() const;
229 
234  template <typename ForwardIterator>
235  void
236  insert_back(ForwardIterator begin, ForwardIterator end);
237 
247  void
248  fill();
249 
258  void
259  fill(const T &element);
260 
348  void
349  replicate_across_communicator(const MPI_Comm & communicator,
350  const unsigned int root_process);
351 
355  void
357 
361  bool
362  empty() const;
363 
367  size_type
368  size() const;
369 
374  size_type
375  capacity() const;
376 
380  reference
381  operator[](const size_type index);
382 
387  operator[](const size_type index) const;
388 
392  pointer
393  data();
394 
399  data() const;
400 
404  iterator
405  begin();
406 
410  iterator
411  end();
412 
417  begin() const;
418 
423  end() const;
424 
430  size_type
432 
438  template <class Archive>
439  void
440  save(Archive &ar, const unsigned int version) const;
441 
447  template <class Archive>
448  void
449  load(Archive &ar, const unsigned int version);
450 
451 #ifdef DOXYGEN
457  template <class Archive>
458  void
459  serialize(Archive &archive, const unsigned int version);
460 #else
461  // This macro defines the serialize() method that is compatible with
462  // the templated save() and load() method that have been implemented.
463  BOOST_SERIALIZATION_SPLIT_MEMBER()
464 #endif
465 
466 private:
558  class Deleter
559  {
560  public:
566  Deleter(AlignedVector<T> *owning_object);
567 
568 #ifdef DEAL_II_WITH_MPI
576  Deleter(AlignedVector<T> *owning_object,
577  const bool is_shmem_root,
578  T * aligned_shmem_pointer,
579  MPI_Comm shmem_group_communicator,
580  MPI_Win shmem_window);
581 #endif
582 
588  void
589  operator()(T *ptr);
590 
598  void
599  reset_owning_object(const AlignedVector<T> *new_aligned_vector_ptr);
600 
601  private:
606  {
607  public:
611  virtual ~DeleterActionBase() = default;
612 
618  virtual void
620  };
621 
622 #ifdef DEAL_II_WITH_MPI
623 
629  {
630  public:
637  MPI_Comm shmem_group_communicator,
638  MPI_Win shmem_window);
639 
645  virtual void
646  delete_array(const AlignedVector<T> *aligned_vector, T *ptr);
647 
648  private:
653  const bool is_shmem_root;
656  MPI_Win shmem_window;
657  };
658 #endif
659 
664  std::unique_ptr<DeleterActionBase> deleter_action_object;
665 
671  };
672 
676  std::unique_ptr<T[], Deleter> elements;
677 
682 
687 };
688 
689 
690 // ------------------------------- inline functions --------------------------
691 
697 namespace internal
698 {
717  template <typename T>
720  {
721  static const std::size_t minimum_parallel_grain_size =
722  160000 / sizeof(T) + 1;
723 
724  public:
734  AlignedVectorCopyConstruct(const T *const source_begin,
735  const T *const source_end,
736  T *const destination)
737  : source_(source_begin)
738  , destination_(destination)
739  {
740  Assert(source_end >= source_begin, ExcInternalError());
741  Assert(source_end == source_begin || destination != nullptr,
742  ExcInternalError());
743  const std::size_t size = source_end - source_begin;
744  if (size < minimum_parallel_grain_size)
746  else
748  }
749 
754  virtual void
755  apply_to_subrange(const std::size_t begin,
756  const std::size_t end) const override
757  {
758  if (end == begin)
759  return;
760 
761  // for classes trivial assignment can use memcpy. cast element to
762  // (void*) to silence compiler warning for virtual classes (they will
763  // never arrive here because they are non-trivial).
764 
765  if (std::is_trivial<T>::value == true)
766  std::memcpy(static_cast<void *>(destination_ + begin),
767  static_cast<const void *>(source_ + begin),
768  (end - begin) * sizeof(T));
769  else
770  for (std::size_t i = begin; i < end; ++i)
771  new (&destination_[i]) T(source_[i]);
772  }
773 
774  private:
775  const T *const source_;
776  T *const destination_;
777  };
778 
779 
786  template <typename T>
789  {
790  static const std::size_t minimum_parallel_grain_size =
791  160000 / sizeof(T) + 1;
792 
793  public:
803  AlignedVectorMoveConstruct(T *const source_begin,
804  T *const source_end,
805  T *const destination)
806  : source_(source_begin)
807  , destination_(destination)
808  {
809  Assert(source_end >= source_begin, ExcInternalError());
810  Assert(source_end == source_begin || destination != nullptr,
811  ExcInternalError());
812  const std::size_t size = source_end - source_begin;
813  if (size < minimum_parallel_grain_size)
815  else
817  }
818 
823  virtual void
824  apply_to_subrange(const std::size_t begin,
825  const std::size_t end) const override
826  {
827  if (end == begin)
828  return;
829 
830  // Classes with trivial assignment can use memcpy. cast element to
831  // (void*) to silence compiler warning for virtual classes (they will
832  // never arrive here because they are non-trivial).
833  if (std::is_trivial<T>::value == true)
834  std::memcpy(static_cast<void *>(destination_ + begin),
835  static_cast<void *>(source_ + begin),
836  (end - begin) * sizeof(T));
837  else
838  // For everything else just use the move constructor. The original
839  // object remains alive and will be destroyed elsewhere.
840  for (std::size_t i = begin; i < end; ++i)
841  new (&destination_[i]) T(std::move(source_[i]));
842  }
843 
844  private:
845  T *const source_;
846  T *const destination_;
847  };
848 
849 
867  template <typename T, bool initialize_memory>
869  {
870  static const std::size_t minimum_parallel_grain_size =
871  160000 / sizeof(T) + 1;
872 
873  public:
878  AlignedVectorInitialize(const std::size_t size,
879  const T & element,
880  T *const destination)
881  : element_(element)
882  , destination_(destination)
883  , trivial_element(false)
884  {
885  if (size == 0)
886  return;
887  Assert(destination != nullptr, ExcInternalError());
888 
889  // do not use memcmp for long double because on some systems it does not
890  // completely fill its memory and may lead to false positives in
891  // e.g. valgrind
892  if (std::is_trivial<T>::value == true &&
893  std::is_same<T, long double>::value == false)
894  {
895  const unsigned char zero[sizeof(T)] = {};
896  // cast element to (void*) to silence compiler warning for virtual
897  // classes (they will never arrive here because they are
898  // non-trivial).
899  if (std::memcmp(zero,
900  static_cast<const void *>(&element),
901  sizeof(T)) == 0)
902  trivial_element = true;
903  }
904  if (size < minimum_parallel_grain_size)
906  else
908  }
909 
913  virtual void
914  apply_to_subrange(const std::size_t begin,
915  const std::size_t end) const override
916  {
917  // for classes with trivial assignment of zero can use memset. cast
918  // element to (void*) to silence compiler warning for virtual
919  // classes (they will never arrive here because they are
920  // non-trivial).
921  if (std::is_trivial<T>::value == true && trivial_element)
922  std::memset(static_cast<void *>(destination_ + begin),
923  0,
924  (end - begin) * sizeof(T));
925  else
927  begin, end, std::integral_constant<bool, initialize_memory>());
928  }
929 
930  private:
931  const T & element_;
932  mutable T *destination_;
934 
935  // copy assignment operation
936  void
937  copy_construct_or_assign(const std::size_t begin,
938  const std::size_t end,
939  std::integral_constant<bool, false>) const
940  {
941  for (std::size_t i = begin; i < end; ++i)
942  destination_[i] = element_;
943  }
944 
945  // copy constructor (memory initialization)
946  void
947  copy_construct_or_assign(const std::size_t begin,
948  const std::size_t end,
949  std::integral_constant<bool, true>) const
950  {
951  for (std::size_t i = begin; i < end; ++i)
952  new (&destination_[i]) T(element_);
953  }
954  };
955 
956 
957 
970  template <typename T, bool initialize_memory>
973  {
974  static const std::size_t minimum_parallel_grain_size =
975  160000 / sizeof(T) + 1;
976 
977  public:
982  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
983  : destination_(destination)
984  {
985  if (size == 0)
986  return;
987  Assert(destination != nullptr, ExcInternalError());
988 
989  if (size < minimum_parallel_grain_size)
991  else
993  }
994 
998  virtual void
999  apply_to_subrange(const std::size_t begin,
1000  const std::size_t end) const override
1001  {
1002  // for classes with trivial assignment of zero can use memset. cast
1003  // element to (void*) to silence compiler warning for virtual
1004  // classes (they will never arrive here because they are
1005  // non-trivial).
1006  if (std::is_trivial<T>::value == true)
1007  std::memset(static_cast<void *>(destination_ + begin),
1008  0,
1009  (end - begin) * sizeof(T));
1010  else
1012  begin, end, std::integral_constant<bool, initialize_memory>());
1013  }
1014 
1015  private:
1016  mutable T *destination_;
1017 
1018  // copy assignment operation
1019  void
1021  const std::size_t end,
1022  std::integral_constant<bool, false>) const
1023  {
1024  for (std::size_t i = begin; i < end; ++i)
1025  destination_[i] = std::move(T());
1026  }
1027 
1028  // copy constructor (memory initialization)
1029  void
1031  const std::size_t end,
1032  std::integral_constant<bool, true>) const
1033  {
1034  for (std::size_t i = begin; i < end; ++i)
1035  new (&destination_[i]) T;
1036  }
1037  };
1038 
1039 } // end of namespace internal
1040 
1041 
1042 #ifndef DOXYGEN
1043 
1044 
1045 
1046 template <typename T>
1048  : deleter_action_object(nullptr) // encode default action by using a nullptr
1049  , owning_aligned_vector(owning_object)
1050 {}
1051 
1052 
1053 # ifdef DEAL_II_WITH_MPI
1054 
1055 template <typename T>
1057  const bool is_shmem_root,
1058  T * aligned_shmem_pointer,
1059  MPI_Comm shmem_group_communicator,
1060  MPI_Win shmem_window)
1061  : deleter_action_object(
1062  std::make_unique<MPISharedMemDeleterAction>(is_shmem_root,
1063  aligned_shmem_pointer,
1064  shmem_group_communicator,
1065  shmem_window))
1066  , owning_aligned_vector(owning_object)
1067 {}
1068 # endif
1069 
1070 
1071 template <typename T>
1072 inline void
1074 {
1075  // If no special action has been registered (i.e., if the action pointer is
1076  // nullptr), then just perform the default action right here.
1077  if (deleter_action_object == nullptr)
1078  {
1079  if (ptr != nullptr)
1080  {
1081  Assert(owning_aligned_vector->used_elements_end != nullptr,
1082  ExcInternalError());
1083 
1084  if (std::is_trivial<T>::value == false)
1085  for (T *p = owning_aligned_vector->used_elements_end - 1; p >= ptr;
1086  --p)
1087  p->~T();
1088 
1089  std::free(ptr);
1090  }
1091  }
1092  else
1093  // Otherwise, let the action object do what is necessary
1094  deleter_action_object->delete_array(owning_aligned_vector, ptr);
1095 }
1096 
1097 
1098 
1099 template <typename T>
1100 inline void
1102  const AlignedVector<T> *new_aligned_vector_ptr)
1103 {
1104  owning_aligned_vector = new_aligned_vector_ptr;
1105 }
1106 
1107 
1108 # ifdef DEAL_II_WITH_MPI
1109 
1110 template <typename T>
1112  MPISharedMemDeleterAction(const bool is_shmem_root,
1113  T * aligned_shmem_pointer,
1114  MPI_Comm shmem_group_communicator,
1115  MPI_Win shmem_window)
1116  : is_shmem_root(is_shmem_root)
1117  , aligned_shmem_pointer(aligned_shmem_pointer)
1118  , shmem_group_communicator(shmem_group_communicator)
1119  , shmem_window(shmem_window)
1120 {}
1121 
1122 
1123 
1124 template <typename T>
1125 inline void
1127  const AlignedVector<T> *aligned_vector,
1128  T * ptr)
1129 {
1130  (void)ptr;
1131  Assert(aligned_vector->elements.get() == ptr, ExcInternalError());
1132 
1133  if (is_shmem_root)
1134  if (std::is_trivial<T>::value == false)
1135  for (T *p = aligned_vector->used_elements_end - 1;
1136  p >= aligned_vector->elements.get();
1137  --p)
1138  p->~T();
1139 
1140  int ierr;
1141  ierr = MPI_Win_free(&shmem_window);
1142  AssertThrowMPI(ierr);
1143 
1144  Utilities::MPI::free_communicator(shmem_group_communicator);
1145 }
1146 
1147 # endif
1148 
1149 
1150 template <class T>
1152  : elements(nullptr, Deleter(this))
1153  , used_elements_end(nullptr)
1154  , allocated_elements_end(nullptr)
1155 {}
1156 
1157 
1158 
1159 template <class T>
1160 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
1161  : elements(nullptr, Deleter(this))
1162  , used_elements_end(nullptr)
1163  , allocated_elements_end(nullptr)
1164 {
1165  if (size > 0)
1166  resize(size, init);
1167 }
1168 
1169 
1170 
1171 template <class T>
1173  : elements(nullptr, Deleter(this))
1174  , used_elements_end(nullptr)
1175  , allocated_elements_end(nullptr)
1176 {
1177  // copy the data from vec
1178  reserve(vec.size());
1179  used_elements_end = allocated_elements_end;
1181  vec.used_elements_end,
1182  elements.get());
1183 }
1184 
1185 
1186 
1187 template <class T>
1189  : AlignedVector<T>()
1190 {
1191  // forward to the move operator
1192  *this = std::move(vec);
1193 }
1194 
1195 
1196 
1197 template <class T>
1198 inline AlignedVector<T> &
1200 {
1201  const size_type new_size = vec.used_elements_end - vec.elements.get();
1202 
1203  // First throw away everything and re-allocate memory but leave that
1204  // memory uninitialized for now:
1205  resize(0);
1206  reserve(new_size);
1207 
1208  // Then copy the elements over by using the copy constructor on these
1209  // elements:
1211  vec.used_elements_end,
1212  elements.get());
1213 
1214  // Finally adjust the pointer to the end of the elements that are used:
1215  used_elements_end = elements.get() + new_size;
1216 
1217  return *this;
1218 }
1219 
1220 
1221 
1222 template <class T>
1223 inline AlignedVector<T> &
1225 {
1226  clear();
1227 
1228  // Move the actual data in the 'elements' object. One problem is that this
1229  // also moves the deleter object, but the deleter object
1230  // references 'this' (i.e., the 'this' pointer of the *moved-from*
1231  // object). The way this is implemented is that we have to move the
1232  // deleter as well, and then reset the pointer inside the deleter
1233  // that references the outer object.
1234  elements = std::move(vec.elements);
1235  elements.get_deleter().reset_owning_object(this);
1236 
1237  // Then also steal the other pointers and clear them in the original object:
1238  used_elements_end = vec.used_elements_end;
1239  allocated_elements_end = vec.allocated_elements_end;
1240 
1241  vec.used_elements_end = nullptr;
1242  vec.allocated_elements_end = nullptr;
1243 
1244  return *this;
1245 }
1246 
1247 
1248 
1249 template <class T>
1250 inline void
1252 {
1253  const size_type old_size = size();
1254 
1255  if (new_size == 0)
1256  clear();
1257  else if (new_size == old_size)
1258  {} // nothing to do here
1259  else if (new_size < old_size)
1260  {
1261  // call destructor on fields that are released, if the type requires it.
1262  // doing it backward releases the elements in reverse order as compared to
1263  // how they were created
1264  if (std::is_trivial<T>::value == false)
1265  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1266  p->~T();
1267  used_elements_end = elements.get() + new_size;
1268  }
1269  else // new_size > old_size
1270  {
1271  // Allocate more space, and claim that space as used
1272  reserve(new_size);
1273  used_elements_end = elements.get() + new_size;
1274 
1275  // need to still set the values in case the class is non-trivial because
1276  // virtual classes etc. need to run their (default) constructor
1277  if (std::is_trivial<T>::value == false)
1279  new_size - old_size, elements.get() + old_size);
1280  }
1281 }
1282 
1283 
1284 
1285 template <class T>
1286 inline void
1287 AlignedVector<T>::resize(const size_type new_size)
1288 {
1289  const size_type old_size = size();
1290 
1291  if (new_size == 0)
1292  clear();
1293  else if (new_size == old_size)
1294  {} // nothing to do here
1295  else if (new_size < old_size)
1296  {
1297  // call destructor on fields that are released, if the type requires it.
1298  // doing it backward releases the elements in reverse order as compared to
1299  // how they were created
1300  if (std::is_trivial<T>::value == false)
1301  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1302  p->~T();
1303  used_elements_end = elements.get() + new_size;
1304  }
1305  else // new_size > old_size
1306  {
1307  // Allocate more space, and claim that space as used
1308  reserve(new_size);
1309  used_elements_end = elements.get() + new_size;
1310 
1311  // finally set the values to the default initializer
1313  new_size - old_size, elements.get() + old_size);
1314  }
1315 }
1316 
1317 
1318 
1319 template <class T>
1320 inline void
1321 AlignedVector<T>::resize(const size_type new_size, const T &init)
1322 {
1323  const size_type old_size = size();
1324 
1325  if (new_size == 0)
1326  clear();
1327  else if (new_size == old_size)
1328  {} // nothing to do here
1329  else if (new_size < old_size)
1330  {
1331  // call destructor on fields that are released, if the type requires it.
1332  // doing it backward releases the elements in reverse order as compared to
1333  // how they were created
1334  if (std::is_trivial<T>::value == false)
1335  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1336  p->~T();
1337  used_elements_end = elements.get() + new_size;
1338  }
1339  else // new_size > old_size
1340  {
1341  // Allocate more space, and claim that space as used
1342  reserve(new_size);
1343  used_elements_end = elements.get() + new_size;
1344 
1345  // finally set the desired init values
1347  new_size - old_size, init, elements.get() + old_size);
1348  }
1349 }
1350 
1351 
1352 
1353 template <class T>
1354 inline void
1355 AlignedVector<T>::reserve(const size_type new_allocated_size)
1356 {
1357  const size_type old_size = used_elements_end - elements.get();
1358  const size_type old_allocated_size = allocated_elements_end - elements.get();
1359  if (new_allocated_size > old_allocated_size)
1360  {
1361  // if we continuously increase the size of the vector, we might be
1362  // reallocating a lot of times. therefore, try to increase the size more
1363  // aggressively
1364  const size_type new_size =
1365  std::max(new_allocated_size, 2 * old_allocated_size);
1366 
1367  // allocate and align along 64-byte boundaries (this is enough for all
1368  // levels of vectorization currently supported by deal.II)
1369  T *new_data_ptr;
1371  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1372 
1373  // Now create a deleter that encodes what should happen when the object is
1374  // released: We need to destroy the objects that are currently alive (in
1375  // reverse order, and then release the memory. Note that we catch the
1376  // 'this' pointer because the number of elements currently alive might
1377  // change over time.
1378  Deleter deleter(this);
1379 
1380  // copy whatever elements we need to retain
1381  if (new_allocated_size > 0)
1383  elements.get(), elements.get() + old_size, new_data_ptr);
1384 
1385  // Now reset all of the member variables of the current object
1386  // based on the allocation above. Assigning to a std::unique_ptr
1387  // object also releases the previously pointed to memory.
1388  //
1389  // Note that at the time of releasing the old memory, 'used_elements_end'
1390  // still points to its previous value, and this is important for the
1391  // deleter object of the previously allocated array (see how it loops over
1392  // the to-be-destroyed elements a the Deleter::DefaultDeleterAction
1393  // class).
1394  elements = decltype(elements)(new_data_ptr, std::move(deleter));
1395  used_elements_end = elements.get() + old_size;
1396  allocated_elements_end = elements.get() + new_size;
1397  }
1398  else if (new_allocated_size == 0)
1399  clear();
1400  else // size_alloc < allocated_size
1401  {} // nothing to do here
1402 }
1403 
1404 
1405 
1406 template <class T>
1407 inline void
1409 {
1410  // Just release the memory (which also calls the destructor of the elements),
1411  // and then set the auxiliary pointers to invalid values.
1412  //
1413  // Note that at the time of releasing the old memory, 'used_elements_end'
1414  // still points to its previous value, and this is important for the
1415  // deleter object of the previously allocated array (see how it loops over
1416  // the to-be-destroyed elements a few lines above).
1417  elements.reset();
1418  used_elements_end = nullptr;
1419  allocated_elements_end = nullptr;
1420 }
1421 
1422 
1423 
1424 template <class T>
1425 inline void
1426 AlignedVector<T>::push_back(const T in_data)
1427 {
1428  Assert(used_elements_end <= allocated_elements_end, ExcInternalError());
1429  if (used_elements_end == allocated_elements_end)
1430  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1431  if (std::is_trivial<T>::value == false)
1432  new (used_elements_end++) T(in_data);
1433  else
1434  *used_elements_end++ = in_data;
1435 }
1436 
1437 
1438 
1439 template <class T>
1440 inline typename AlignedVector<T>::reference
1442 {
1443  AssertIndexRange(0, size());
1444  T *field = used_elements_end - 1;
1445  return *field;
1446 }
1447 
1448 
1449 
1450 template <class T>
1451 inline typename AlignedVector<T>::const_reference
1452 AlignedVector<T>::back() const
1453 {
1454  AssertIndexRange(0, size());
1455  const T *field = used_elements_end - 1;
1456  return *field;
1457 }
1458 
1459 
1460 
1461 template <class T>
1462 template <typename ForwardIterator>
1463 inline void
1464 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1465 {
1466  const size_type old_size = size();
1467  reserve(old_size + (end - begin));
1468  for (; begin != end; ++begin, ++used_elements_end)
1469  {
1470  if (std::is_trivial<T>::value == false)
1471  new (used_elements_end) T;
1472  *used_elements_end = *begin;
1473  }
1474 }
1475 
1476 
1477 
1478 template <class T>
1479 inline void
1481 {
1483  elements.get());
1484 }
1485 
1486 
1487 
1488 template <class T>
1489 inline void
1490 AlignedVector<T>::fill(const T &value)
1491 {
1493  value,
1494  elements.get());
1495 }
1496 
1497 
1498 
1499 template <class T>
1500 inline void
1501 AlignedVector<T>::replicate_across_communicator(const MPI_Comm & communicator,
1502  const unsigned int root_process)
1503 {
1504 # ifdef DEAL_II_WITH_MPI
1505 
1506  // Let the root process broadcast its size. If it is zero, then all
1507  // processes just clear() their memory and reset themselves to a non-shared
1508  // empty object -- there is no point to run through complicated MPI
1509  // calls if the end result is an empty array. Otherwise, we continue on.
1510  const size_type new_size =
1511  Utilities::MPI::broadcast(communicator, size(), root_process);
1512  if (new_size == 0)
1513  {
1514  clear();
1515  return;
1516  }
1517 
1518 
1519  // **** Step 0 ****
1520  // All but the root process no longer need their data, so release the memory
1521  // used to store the previous elements.
1522  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1523  {
1524  elements.reset();
1525  used_elements_end = nullptr;
1526  allocated_elements_end = nullptr;
1527  }
1528 
1529  // **** Step 1 ****
1530  // Create communicators for each group of processes that can use
1531  // shared memory areas. Within each of these groups, we don't care about
1532  // which rank each of the old processes gets except that we would like to
1533  // make sure that the (global) root process will have rank=0 within
1534  // its own sub-communicator. We can do that through the third argument of
1535  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1536  // order of processes within the split communicators, and we should set it to
1537  // zero for the root processes and one for all others -- which means that
1538  // for all of these other processes, MPI can choose whatever order it
1539  // wants because they have the same key (MPI then documents that these ties
1540  // will be broken according to these processes' rank in the old group).
1541  //
1542  // At least that's the theory. In practice, the MPI implementation where
1543  // this function was developed on does not seem to do that. (Bug report
1544  // is here: https://github.com/open-mpi/ompi/issues/8854)
1545  // We work around this by letting MPI_Comm_split_type choose whatever
1546  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1547  // step -- not elegant, nor efficient, but seems to work:
1548  MPI_Comm shmem_group_communicator;
1549  {
1550  MPI_Comm shmem_group_communicator_temp;
1551  int ierr = MPI_Comm_split_type(communicator,
1552  MPI_COMM_TYPE_SHARED,
1553  /* key */ 0,
1554  MPI_INFO_NULL,
1555  &shmem_group_communicator_temp);
1556  AssertThrowMPI(ierr);
1557 
1558  const int key =
1559  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1560  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1561  /* color */ 0,
1562  key,
1563  &shmem_group_communicator);
1564  AssertThrowMPI(ierr);
1565 
1566  // Verify the explanation from above
1567  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1568  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1569  ExcInternalError());
1570 
1571  // And get rid of the temporary communicator
1572  Utilities::MPI::free_communicator(shmem_group_communicator_temp);
1573  }
1574  const bool is_shmem_root =
1575  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1576 
1577  // **** Step 2 ****
1578  // We then have to send the state of the current object from the
1579  // root process to one exemplar in each shmem group. To this end,
1580  // we create another subcommunicator that includes the ranks zero
1581  // of all shmem groups, and because of the trick above, we know
1582  // that this also includes the original root process.
1583  //
1584  // There are different ways of creating a "shmem_roots_communicator".
1585  // The conceptually easiest way is to create an MPI_Group that only
1586  // includes the shmem roots and then create a communicator from this
1587  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1588  // with this is that we would have to exchange among all processes
1589  // which ones are shmem roots and which are not. This is awkward.
1590  //
1591  // A simpler way is to use MPI_Comm_split that uses "colors" to
1592  // indicate which sub-communicator each process wants to be in.
1593  // We use color=0 to indicate the group of shmem roots, and color=1
1594  // for all other processes -- the latter will simply not ever do
1595  // anything among themselves with the communicator so created.
1596  //
1597  // Using MPI_Comm_split has the additional benefit that, just as above,
1598  // we can choose where each rank will end up in shmem_roots_communicator.
1599  // We again set key=0 for the original root_process, and key=1 for all other
1600  // ranks; then, the global root becomes rank=0 on the
1601  // shmem_roots_communicator. We don't care how the other processes are
1602  // ordered.
1603  MPI_Comm shmem_roots_communicator;
1604  {
1605  const int key =
1606  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1607 
1608  const int ierr = MPI_Comm_split(communicator,
1609  /*color=*/
1610  (is_shmem_root ? 0 : 1),
1611  key,
1612  &shmem_roots_communicator);
1613  AssertThrowMPI(ierr);
1614 
1615  // Again verify the explanation from above
1616  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1617  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1618  ExcInternalError());
1619  }
1620 
1621  const unsigned int shmem_roots_root_rank = 0;
1622  const bool is_shmem_roots_root =
1623  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1624  shmem_roots_communicator) == shmem_roots_root_rank));
1625 
1626  // Now let the original root_process broadcast the current object to all
1627  // shmem roots. We know that the last rank is the original root process that
1628  // has all of the data.
1629  if (is_shmem_root)
1630  {
1631  if (std::is_trivial<T>::value)
1632  {
1633  // The data is "trivial", i.e., we can copy things directly without
1634  // having to go through the serialization/deserialization machinery of
1635  // Utilities::MPI::broadcast.
1636  //
1637  // In that case, first tell all of the other shmem roots how many
1638  // elements we will have to deal with, and let them resize their
1639  // (non-shared) arrays.
1640  const size_type new_size =
1641  Utilities::MPI::broadcast(shmem_roots_communicator,
1642  size(),
1643  shmem_roots_root_rank);
1644  if (is_shmem_roots_root == false)
1645  resize(new_size);
1646 
1647  // Then directly copy from the root process into these buffers
1648  int ierr = MPI_Bcast(elements.get(),
1649  sizeof(T) * new_size,
1650  MPI_CHAR,
1651  shmem_roots_root_rank,
1652  shmem_roots_communicator);
1653  AssertThrowMPI(ierr);
1654  }
1655  else
1656  {
1657  // The objects to be sent around are not "trivial", and so we have
1658  // to go through the serialization/deserialization machinery. On all
1659  // but the sending process, overwrite the current state with the
1660  // vector just broadcast.
1661  //
1662  // On the root rank, this would lead to resetting the 'entries'
1663  // pointer, which would trigger the deleter which would lead to a
1664  // deadlock. So we just send the result of the broadcast() call to
1665  // nirvana on the root process and keep our current state.
1666  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1667  Utilities::MPI::broadcast(shmem_roots_communicator,
1668  *this,
1669  shmem_roots_root_rank);
1670  else
1671  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1672  *this,
1673  shmem_roots_root_rank);
1674  }
1675  }
1676 
1677  // We no longer need the shmem roots communicator, so get rid of it
1678  Utilities::MPI::free_communicator(shmem_roots_communicator);
1679 
1680 
1681  // **** Step 3 ****
1682  // At this point, all shmem groups have one shmem root process that has
1683  // a copy of the data. This is the point where each shmem group should
1684  // establish a shmem area to put the data into. As mentioned above,
1685  // we know that the shmem roots are the last rank in their respective
1686  // shmem_group_communicator.
1687  //
1688  // The process for all of this works as follows: While all processes in
1689  // the shmem group participate in the generation of the shmem memory window,
1690  // only the shmem root actually allocates any memory -- the rest just
1691  // allocate zero bytes of their own. We allocate space for exactly
1692  // size() elements (computed on the shmem_root that already has the data)
1693  // and add however many bytes are necessary so that we know that we can align
1694  // things to 64-byte boundaries. The worst case happens if the memory system
1695  // gives us a pointer to an address one byte past a desired alignment
1696  // boundary, and in that case aligning the memory will require us to waste the
1697  // first (align_by-1) bytes. So we have to ask for
1698  // size() * sizeof(T) + (align_by - 1)
1699  // bytes.
1700  //
1701  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1702  // a certain number of bytes. This is going to come back to bite us further
1703  // down below when we try to get a properly aligned pointer to our memory
1704  // region, see the commentary there. Starting with MPI 4.0, one can set a
1705  // flag in an MPI_Info structure that requests a desired alignment, so we do
1706  // this for forward compatibility; MPI implementations ignore flags they don't
1707  // know anything about, and so setting this flag is backward compatible also
1708  // to older MPI versions.
1709  MPI_Win shmem_window;
1710  void * base_ptr;
1711  const MPI_Aint align_by = 64;
1712  const MPI_Aint alloc_size =
1713  Utilities::MPI::broadcast(shmem_group_communicator,
1714  (size() * sizeof(T) + (align_by - 1)),
1715  0);
1716 
1717  {
1718  int ierr;
1719 
1720  MPI_Info mpi_info;
1721  ierr = MPI_Info_create(&mpi_info);
1722  AssertThrowMPI(ierr);
1723  ierr = MPI_Info_set(mpi_info,
1724  "mpi_minimum_memory_alignment",
1725  std::to_string(align_by).c_str());
1726  AssertThrowMPI(ierr);
1727  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1728  /* disp_unit = */ 1,
1729  mpi_info,
1730  shmem_group_communicator,
1731  &base_ptr,
1732  &shmem_window);
1733  AssertThrowMPI(ierr);
1734 
1735  ierr = MPI_Info_free(&mpi_info);
1736  AssertThrowMPI(ierr);
1737  }
1738 
1739 
1740  // **** Step 4 ****
1741  // The next step is to teach all non-shmem root processes what the pointer to
1742  // the array is that the shmem-root created. MPI has a nifty way for this
1743  // given that only a single process actually allocated memory in the window:
1744  // When calling MPI_Win_shared_query, the MPI documentation says that
1745  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1746  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1747  // rank that specified size > 0. If all processes in the group attached to the
1748  // window specified size = 0, then the call returns size = 0 and a baseptr as
1749  // if MPI_ALLOC_MEM was called with size = 0."
1750  //
1751  // This will allow us to obtain the pointer to the shmem root's memory area,
1752  // which is the only one we care about. (None of the other processes have
1753  // even allocated any memory.)
1754  //
1755  // We don't need to do this on the shmem root process: This process has
1756  // already gotten its base_ptr correctly set above, and we can determine the
1757  // array size by just calling size().
1758  if (is_shmem_root == false)
1759  {
1760  int disp_unit;
1761  MPI_Aint alloc_size; // not actually used
1762  const int ierr = MPI_Win_shared_query(
1763  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1764  AssertThrowMPI(ierr);
1765 
1766  // Make sure we actually got a pointer, and check that the disp_unit is
1767  // equal to 1 (as set above)
1768  Assert(base_ptr != nullptr, ExcInternalError());
1769  Assert(disp_unit == 1, ExcInternalError());
1770  }
1771 
1772 
1773  // **** Step 5 ****
1774  // Now that all processes know the address of the space that is visible to
1775  // everyone, we need to figure out whether it is properly aligned and if not,
1776  // find the next aligned address.
1777  //
1778  // std::align does that, but it also modifies its last two arguments. The
1779  // documentation of that function at
1780  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1781  // *think* that the following should do given that we do not use base_ptr and
1782  // available_space any further after the call to std::align.
1783  std::size_t available_space = alloc_size;
1784  void * base_ptr_backup = base_ptr;
1785  T * aligned_shmem_pointer = static_cast<T *>(
1786  std::align(align_by, new_size * sizeof(T), base_ptr, available_space));
1787  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1788 
1789  // There is one step to guard against. It is *conceivable* that the base_ptr
1790  // we have previously obtained from MPI_Win_shared_query is mapped so
1791  // awkwardly into the different MPI processes' memory spaces that it is
1792  // aligned in one memory space, but not another. In that case, different
1793  // processes would align base_ptr differently, and adjust available_space
1794  // differently. We can check that by making sure that the max (or min) over
1795  // all processes is equal to every process's value. If that's not the case,
1796  // then the whole idea of aligning above is wrong and we need to rethink what
1797  // it means to align data in a shared memory space.
1798  //
1799  // One might be tempted to think that this is not how MPI implementations
1800  // actually arrange things. Alas, when developing this functionality in 2021,
1801  // this is really how at least OpenMPI ends up doing things. (This is with an
1802  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1803  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1804  // when running this code on three processes, one ends up with base_ptr values
1805  // of
1806  // base_ptr=0x7f0842f02108
1807  // base_ptr=0x7fc0a47881d0
1808  // base_ptr=0x7f64872db108
1809  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1810  // is no common offset std::align could find that leads to a 64-byte
1811  // aligned memory address in all three memory spaces. That's a tremendous
1812  // nuisance and there is really nothing we can do about this other than just
1813  // fall back on the (unaligned) base_ptr in that case.
1814  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1815  Utilities::MPI::max(available_space, shmem_group_communicator))
1816  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1817 
1818 
1819  // **** Step 6 ****
1820  // If this is the shmem root process, we need to copy the data into the
1821  // shared memory space.
1822  if (is_shmem_root)
1823  {
1824  if (std::is_trivial<T>::value == true)
1825  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1826  else
1827  for (std::size_t i = 0; i < size(); ++i)
1828  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1829  }
1830 
1831  // Make sure that the shared memory host has copied the data before we try to
1832  // access it.
1833  const int ierr = MPI_Barrier(shmem_group_communicator);
1834  AssertThrowMPI(ierr);
1835 
1836  // **** Step 7 ****
1837  // Finally, we need to set the pointers of this object to what we just
1838  // learned. This also releases all memory that may have been in use
1839  // previously.
1840  //
1841  // The part that is a bit tricky is how to write the deleter of this
1842  // shared memory object. When we want to get rid of it, we need to
1843  // also release the MPI_Win object along with the shmem_group_communicator
1844  // object. That's because as long as we use the shared memory, we still need
1845  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1846  // communicator. (The former is definitely true, the latter is not quite clear
1847  // from the MPI documentation, but seems reasonable.) So we need to have a
1848  // deleter for the pointer that ensures that upon release of the memory, we
1849  // not only call the destructor of these memory elements (but only once, on
1850  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1851  // that is encapsulated in the following call where the deleter makes copies
1852  // of the arguments in the lambda capture.
1853  elements = decltype(elements)(aligned_shmem_pointer,
1854  Deleter(this,
1855  is_shmem_root,
1856  aligned_shmem_pointer,
1857  shmem_group_communicator,
1858  shmem_window));
1859 
1860  // We then also have to set the other two pointers that define the state of
1861  // the current object. Note that the new buffer size is exactly as large as
1862  // necessary, i.e., can store size() elements, regardless of the number of
1863  // allocated elements in the original objects.
1864  used_elements_end = elements.get() + new_size;
1865  allocated_elements_end = used_elements_end;
1866 
1867  // **** Consistency check ****
1868  // At this point, each process should have a copy of the data.
1869  // Verify this in some sort of round-about way
1870 # ifdef DEBUG
1871  const std::vector<char> packed_data = Utilities::pack(*this);
1872  const int hash =
1873  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1874  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1875 # endif
1876 
1877 # else
1878  // No MPI -> nothing to replicate
1879  (void)communicator;
1880  (void)root_process;
1881 # endif
1882 }
1883 
1884 
1885 
1886 template <class T>
1887 inline void
1889 {
1890  // Swap the data in the 'elements' objects. Then also make sure that
1891  // their respective deleter objects point to the right place.
1892  std::swap(elements, vec.elements);
1893  elements.get_deleter().reset_owning_object(this);
1894  vec.elements.get_deleter().reset_owning_object(&vec);
1895 
1896  // Now also swap the remaining members.
1897  std::swap(used_elements_end, vec.used_elements_end);
1898  std::swap(allocated_elements_end, vec.allocated_elements_end);
1899 }
1900 
1901 
1902 
1903 template <class T>
1904 inline bool
1906 {
1907  return used_elements_end == elements.get();
1908 }
1909 
1910 
1911 
1912 template <class T>
1913 inline typename AlignedVector<T>::size_type
1914 AlignedVector<T>::size() const
1915 {
1916  return used_elements_end - elements.get();
1917 }
1918 
1919 
1920 
1921 template <class T>
1922 inline typename AlignedVector<T>::size_type
1924 {
1925  return allocated_elements_end - elements.get();
1926 }
1927 
1928 
1929 
1930 template <class T>
1931 inline typename AlignedVector<T>::reference
1933 {
1934  AssertIndexRange(index, size());
1935  return elements[index];
1936 }
1937 
1938 
1939 
1940 template <class T>
1941 inline typename AlignedVector<T>::const_reference
1942 AlignedVector<T>::operator[](const size_type index) const
1943 {
1944  AssertIndexRange(index, size());
1945  return elements[index];
1946 }
1947 
1948 
1949 
1950 template <typename T>
1951 inline typename AlignedVector<T>::pointer
1953 {
1954  return elements.get();
1955 }
1956 
1957 
1958 
1959 template <typename T>
1960 inline typename AlignedVector<T>::const_pointer
1961 AlignedVector<T>::data() const
1962 {
1963  return elements.get();
1964 }
1965 
1966 
1967 
1968 template <class T>
1969 inline typename AlignedVector<T>::iterator
1971 {
1972  return elements.get();
1973 }
1974 
1975 
1976 
1977 template <class T>
1978 inline typename AlignedVector<T>::iterator
1980 {
1981  return used_elements_end;
1982 }
1983 
1984 
1985 
1986 template <class T>
1987 inline typename AlignedVector<T>::const_iterator
1989 {
1990  return elements.get();
1991 }
1992 
1993 
1994 
1995 template <class T>
1996 inline typename AlignedVector<T>::const_iterator
1997 AlignedVector<T>::end() const
1998 {
1999  return used_elements_end;
2000 }
2001 
2002 
2003 
2004 template <class T>
2005 template <class Archive>
2006 inline void
2007 AlignedVector<T>::save(Archive &ar, const unsigned int) const
2008 {
2009  size_type vec_size = size();
2010  ar & vec_size;
2011  if (vec_size > 0)
2012  ar &boost::serialization::make_array(elements.get(), vec_size);
2013 }
2014 
2015 
2016 
2017 template <class T>
2018 template <class Archive>
2019 inline void
2020 AlignedVector<T>::load(Archive &ar, const unsigned int)
2021 {
2022  size_type vec_size = 0;
2023  ar & vec_size;
2024 
2025  if (vec_size > 0)
2026  {
2027  reserve(vec_size);
2028  ar &boost::serialization::make_array(elements.get(), vec_size);
2029  used_elements_end = elements.get() + vec_size;
2030  }
2031 }
2032 
2033 
2034 
2035 template <class T>
2036 inline typename AlignedVector<T>::size_type
2038 {
2039  size_type memory = sizeof(*this);
2040  for (const T *t = elements.get(); t != used_elements_end; ++t)
2042  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
2043  return memory;
2044 }
2045 
2046 
2047 #endif // ifndef DOXYGEN
2048 
2049 
2055 template <class T>
2056 bool
2058 {
2059  if (lhs.size() != rhs.size())
2060  return false;
2061  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
2062  rit = rhs.begin();
2063  lit != lhs.end();
2064  ++lit, ++rit)
2065  if (*lit != *rit)
2066  return false;
2067  return true;
2068 }
2069 
2070 
2071 
2077 template <class T>
2078 bool
2080 {
2081  return !(operator==(lhs, rhs));
2082 }
2083 
2084 
2086 
2087 #endif
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
virtual void delete_array(const AlignedVector< T > *owning_aligned_vector, T *ptr)=0
virtual void delete_array(const AlignedVector< T > *aligned_vector, T *ptr)
MPISharedMemDeleterAction(const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
void operator()(T *ptr)
Deleter(AlignedVector< T > *owning_object, const bool is_shmem_root, T *aligned_shmem_pointer, MPI_Comm shmem_group_communicator, MPI_Win shmem_window)
Deleter(AlignedVector< T > *owning_object)
std::unique_ptr< DeleterActionBase > deleter_action_object
void reset_owning_object(const AlignedVector< T > *new_aligned_vector_ptr)
const AlignedVector< T > * owning_aligned_vector
iterator end()
size_type memory_consumption() const
void resize_fast(const size_type new_size)
std::unique_ptr< T[], Deleter > elements
reference operator[](const size_type index)
void fill(const T &element)
AlignedVector & operator=(AlignedVector< T > &&vec) noexcept
iterator begin()
const_iterator end() const
~AlignedVector()=default
AlignedVector(AlignedVector< T > &&vec) noexcept
void reserve(const size_type new_allocated_size)
void serialize(Archive &archive, const unsigned int version)
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const_reference operator[](const size_type index) const
pointer data()
void swap(AlignedVector< T > &vec)
void resize(const size_type new_size, const T &init)
size_type capacity() const
value_type & reference
const value_type * const_pointer
void push_back(const T in_data)
AlignedVector & operator=(const AlignedVector< T > &vec)
const_iterator begin() const
AlignedVector(const size_type size, const T &init=T())
T * allocated_elements_end
bool empty() const
AlignedVector(const AlignedVector< T > &vec)
size_type size() const
const_reference back() const
std::size_t size_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
const value_type * const_iterator
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
void resize(const size_type new_size)
void load(Archive &ar, const unsigned int version)
void save(Archive &ar, const unsigned int version) const
void insert_back(ForwardIterator begin, ForwardIterator end)
const value_type & const_reference
reference back()
const_pointer data() const
value_type * pointer
value_type * iterator
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
static const std::size_t minimum_parallel_grain_size
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
static const std::size_t minimum_parallel_grain_size
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
static const std::size_t minimum_parallel_grain_size
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:416
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:417
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1473
std::string to_string(const T &t)
Definition: patterns.h:2403
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static const types::blas_int zero
static const char T
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:194
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
T min(const T &t, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if< is_mpi_type< T >==false, T >::type broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1047
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1443
void apply_parallel(const std::size_t begin, const std::size_t end, const std::size_t minimum_parallel_grain_size) const
Definition: parallel.h:662