Reference documentation for deal.II version Git 39b7d3efb0 2021-05-07 15:49:09 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
62 template <class T>
64 {
65 public:
70  using value_type = T;
71  using pointer = value_type *;
72  using const_pointer = const value_type *;
73  using iterator = value_type *;
74  using const_iterator = const value_type *;
75  using reference = value_type &;
76  using const_reference = const value_type &;
77  using size_type = std::size_t;
78 
82  AlignedVector();
83 
90  explicit AlignedVector(const size_type size, const T &init = T());
91 
96 
102  AlignedVector(const AlignedVector<T> &vec);
103 
108  AlignedVector(AlignedVector<T> &&vec) noexcept;
109 
115  AlignedVector &
116  operator=(const AlignedVector<T> &vec);
117 
121  AlignedVector &
122  operator=(AlignedVector<T> &&vec) noexcept;
123 
146  void
147  resize_fast(const size_type new_size);
148 
161  void
162  resize(const size_type new_size);
163 
179  void
180  resize(const size_type new_size, const T &init);
181 
202  void
203  reserve(const size_type new_allocated_size);
204 
209  void
210  clear();
211 
217  void
218  push_back(const T in_data);
219 
223  reference
224  back();
225 
230  back() const;
231 
236  template <typename ForwardIterator>
237  void
238  insert_back(ForwardIterator begin, ForwardIterator end);
239 
249  void
250  fill();
251 
260  void
261  fill(const T &element);
262 
350  void
351  replicate_across_communicator(const MPI_Comm & communicator,
352  const unsigned int root_process);
353 
357  void
358  swap(AlignedVector<T> &vec);
359 
363  bool
364  empty() const;
365 
369  size_type
370  size() const;
371 
376  size_type
377  capacity() const;
378 
382  reference operator[](const size_type index);
383 
387  const_reference operator[](const size_type index) const;
388 
392  pointer
393  data();
394 
399  data() const;
400 
404  iterator
405  begin();
406 
410  iterator
411  end();
412 
417  begin() const;
418 
423  end() const;
424 
430  size_type
431  memory_consumption() const;
432 
438  template <class Archive>
439  void
440  save(Archive &ar, const unsigned int version) const;
441 
447  template <class Archive>
448  void
449  load(Archive &ar, const unsigned int version);
450 
451 #ifdef DOXYGEN
452 
457  template <class Archive>
458  void
459  serialize(Archive &archive, const unsigned int version);
460 #else
461  // This macro defines the serialize() method that is compatible with
462  // the templated save() and load() method that have been implemented.
463  BOOST_SERIALIZATION_SPLIT_MEMBER()
464 #endif
465 
466 private:
470  std::unique_ptr<T[], std::function<void(T *)>> elements;
471 
476 
481 };
482 
483 
484 // ------------------------------- inline functions --------------------------
485 
491 namespace internal
492 {
508  template <typename T>
510  {
511  static const std::size_t minimum_parallel_grain_size =
512  160000 / sizeof(T) + 1;
513 
514  public:
524  AlignedVectorCopy(const T *const source_begin,
525  const T *const source_end,
526  T *const destination)
527  : source_(source_begin)
528  , destination_(destination)
529  {
530  Assert(source_end >= source_begin, ExcInternalError());
531  Assert(source_end == source_begin || destination != nullptr,
532  ExcInternalError());
533  const std::size_t size = source_end - source_begin;
534  if (size < minimum_parallel_grain_size)
535  AlignedVectorCopy::apply_to_subrange(0, size);
536  else
537  apply_parallel(0, size, minimum_parallel_grain_size);
538  }
539 
544  virtual void
545  apply_to_subrange(const std::size_t begin,
546  const std::size_t end) const override
547  {
548  if (end == begin)
549  return;
550 
551  // for classes trivial assignment can use memcpy. cast element to
552  // (void*) to silence compiler warning for virtual classes (they will
553  // never arrive here because they are non-trivial).
554 
555  if (std::is_trivial<T>::value == true)
556  std::memcpy(static_cast<void *>(destination_ + begin),
557  static_cast<const void *>(source_ + begin),
558  (end - begin) * sizeof(T));
559  else
560  for (std::size_t i = begin; i < end; ++i)
561  new (&destination_[i]) T(source_[i]);
562  }
563 
564  private:
565  const T *const source_;
566  T *const destination_;
567  };
568 
569 
575  template <typename T>
577  {
578  static const std::size_t minimum_parallel_grain_size =
579  160000 / sizeof(T) + 1;
580 
581  public:
591  AlignedVectorMove(T *const source_begin,
592  T *const source_end,
593  T *const destination)
594  : source_(source_begin)
595  , destination_(destination)
596  {
597  Assert(source_end >= source_begin, ExcInternalError());
598  Assert(source_end == source_begin || destination != nullptr,
599  ExcInternalError());
600  const std::size_t size = source_end - source_begin;
601  if (size < minimum_parallel_grain_size)
602  AlignedVectorMove::apply_to_subrange(0, size);
603  else
604  apply_parallel(0, size, minimum_parallel_grain_size);
605  }
606 
611  virtual void
612  apply_to_subrange(const std::size_t begin,
613  const std::size_t end) const override
614  {
615  if (end == begin)
616  return;
617 
618  // for classes trivial assignment can use memcpy. cast element to
619  // (void*) to silence compiler warning for virtual classes (they will
620  // never arrive here because they are non-trivial).
621 
622  if (std::is_trivial<T>::value == true)
623  std::memcpy(static_cast<void *>(destination_ + begin),
624  static_cast<void *>(source_ + begin),
625  (end - begin) * sizeof(T));
626  else
627  for (std::size_t i = begin; i < end; ++i)
628  {
629  // initialize memory (copy construct by placement new), and
630  // destruct the source
631  new (&destination_[i]) T(std::move(source_[i]));
632  source_[i].~T();
633  }
634  }
635 
636  private:
637  T *const source_;
638  T *const destination_;
639  };
640 
641 
653  template <typename T, bool initialize_memory>
655  {
656  static const std::size_t minimum_parallel_grain_size =
657  160000 / sizeof(T) + 1;
658 
659  public:
664  AlignedVectorSet(const std::size_t size,
665  const T & element,
666  T *const destination)
667  : element_(element)
668  , destination_(destination)
669  , trivial_element(false)
670  {
671  if (size == 0)
672  return;
673  Assert(destination != nullptr, ExcInternalError());
674 
675  // do not use memcmp for long double because on some systems it does not
676  // completely fill its memory and may lead to false positives in
677  // e.g. valgrind
678  if (std::is_trivial<T>::value == true &&
679  std::is_same<T, long double>::value == false)
680  {
681  const unsigned char zero[sizeof(T)] = {};
682  // cast element to (void*) to silence compiler warning for virtual
683  // classes (they will never arrive here because they are
684  // non-trivial).
685  if (std::memcmp(zero,
686  static_cast<const void *>(&element),
687  sizeof(T)) == 0)
688  trivial_element = true;
689  }
690  if (size < minimum_parallel_grain_size)
691  AlignedVectorSet::apply_to_subrange(0, size);
692  else
693  apply_parallel(0, size, minimum_parallel_grain_size);
694  }
695 
699  virtual void
700  apply_to_subrange(const std::size_t begin,
701  const std::size_t end) const override
702  {
703  // for classes with trivial assignment of zero can use memset. cast
704  // element to (void*) to silence compiler warning for virtual
705  // classes (they will never arrive here because they are
706  // non-trivial).
707  if (std::is_trivial<T>::value == true && trivial_element)
708  std::memset(static_cast<void *>(destination_ + begin),
709  0,
710  (end - begin) * sizeof(T));
711  else
712  copy_construct_or_assign(
713  begin, end, std::integral_constant<bool, initialize_memory>());
714  }
715 
716  private:
717  const T & element_;
718  mutable T *destination_;
720 
721  // copy assignment operation
722  void
723  copy_construct_or_assign(const std::size_t begin,
724  const std::size_t end,
725  std::integral_constant<bool, false>) const
726  {
727  for (std::size_t i = begin; i < end; ++i)
728  destination_[i] = element_;
729  }
730 
731  // copy constructor (memory initialization)
732  void
733  copy_construct_or_assign(const std::size_t begin,
734  const std::size_t end,
735  std::integral_constant<bool, true>) const
736  {
737  for (std::size_t i = begin; i < end; ++i)
738  new (&destination_[i]) T(element_);
739  }
740  };
741 
742 
743 
755  template <typename T, bool initialize_memory>
758  {
759  static const std::size_t minimum_parallel_grain_size =
760  160000 / sizeof(T) + 1;
761 
762  public:
767  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
768  : destination_(destination)
769  {
770  if (size == 0)
771  return;
772  Assert(destination != nullptr, ExcInternalError());
773 
774  if (size < minimum_parallel_grain_size)
775  AlignedVectorDefaultInitialize::apply_to_subrange(0, size);
776  else
777  apply_parallel(0, size, minimum_parallel_grain_size);
778  }
779 
783  virtual void
784  apply_to_subrange(const std::size_t begin,
785  const std::size_t end) const override
786  {
787  // for classes with trivial assignment of zero can use memset. cast
788  // element to (void*) to silence compiler warning for virtual
789  // classes (they will never arrive here because they are
790  // non-trivial).
791  if (std::is_trivial<T>::value == true)
792  std::memset(static_cast<void *>(destination_ + begin),
793  0,
794  (end - begin) * sizeof(T));
795  else
796  default_construct_or_assign(
797  begin, end, std::integral_constant<bool, initialize_memory>());
798  }
799 
800  private:
801  mutable T *destination_;
802 
803  // copy assignment operation
804  void
806  const std::size_t end,
807  std::integral_constant<bool, false>) const
808  {
809  for (std::size_t i = begin; i < end; ++i)
810  destination_[i] = std::move(T());
811  }
812 
813  // copy constructor (memory initialization)
814  void
816  const std::size_t end,
817  std::integral_constant<bool, true>) const
818  {
819  for (std::size_t i = begin; i < end; ++i)
820  new (&destination_[i]) T;
821  }
822  };
823 
824 } // end of namespace internal
825 
826 
827 #ifndef DOXYGEN
828 
829 
830 template <class T>
832  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
833  , used_elements_end(nullptr)
834  , allocated_elements_end(nullptr)
835 {}
836 
837 
838 
839 template <class T>
840 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
841  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
842  , used_elements_end(nullptr)
843  , allocated_elements_end(nullptr)
844 {
845  if (size > 0)
846  resize(size, init);
847 }
848 
849 
850 
851 template <class T>
853 {
854  clear();
855 }
856 
857 
858 
859 template <class T>
861  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
862  , used_elements_end(nullptr)
863  , allocated_elements_end(nullptr)
864 {
865  // copy the data from vec
866  reserve(vec.size());
868  internal::AlignedVectorCopy<T>(vec.elements.get(),
869  vec.used_elements_end,
870  elements.get());
871 }
872 
873 
874 
875 template <class T>
877  : elements(std::move(vec.elements))
878  , used_elements_end(vec.used_elements_end)
879  , allocated_elements_end(vec.allocated_elements_end)
880 {
881  vec.elements = nullptr;
882  vec.used_elements_end = nullptr;
883  vec.allocated_elements_end = nullptr;
884 }
885 
886 
887 
888 template <class T>
889 inline AlignedVector<T> &
891 {
892  resize(0);
893  resize_fast(vec.used_elements_end - vec.elements.get());
894  internal::AlignedVectorCopy<T>(vec.elements.get(),
895  vec.used_elements_end,
896  elements.get());
897  return *this;
898 }
899 
900 
901 
902 template <class T>
903 inline AlignedVector<T> &
905 {
906  clear();
907 
908  // Move the actual data
909  elements = std::move(vec.elements);
910 
911  // Then also steal the other pointers and clear them in the original object:
912  used_elements_end = vec.used_elements_end;
913  allocated_elements_end = vec.allocated_elements_end;
914 
915  vec.used_elements_end = nullptr;
916  vec.allocated_elements_end = nullptr;
917 
918  return *this;
919 }
920 
921 
922 
923 template <class T>
924 inline void
926 {
927  const size_type old_size = size();
928 
929  if (new_size == 0)
930  clear();
931  else if (new_size == old_size)
932  {} // nothing to do here
933  else if (new_size < old_size)
934  {
935  // call destructor on fields that are released. doing it backward
936  // releases the elements in reverse order as compared to how they were
937  // created
938  if (std::is_trivial<T>::value == false)
939  {
940  while (used_elements_end != elements.get() + new_size)
941  (--used_elements_end)->~T();
942  }
943  else
944  used_elements_end = elements.get() + new_size;
945  }
946  else // new_size > old_size
947  {
948  // Allocate more space, and claim that space as used
949  reserve(new_size);
950  used_elements_end = elements.get() + new_size;
951 
952  // need to still set the values in case the class is non-trivial because
953  // virtual classes etc. need to run their (default) constructor
954  if (std::is_trivial<T>::value == false)
956  new_size - old_size, elements.get() + old_size);
957  }
958 }
959 
960 
961 
962 template <class T>
963 inline void
964 AlignedVector<T>::resize(const size_type new_size)
965 {
966  const size_type old_size = size();
967 
968  if (new_size == 0)
969  clear();
970  else if (new_size == old_size)
971  {} // nothing to do here
972  else if (new_size < old_size)
973  {
974  // call destructor on fields that are released. doing it backward
975  // releases the elements in reverse order as compared to how they were
976  // created
977  if (std::is_trivial<T>::value == false)
978  {
979  while (used_elements_end != elements.get() + new_size)
980  (--used_elements_end)->~T();
981  }
982  else
983  used_elements_end = elements.get() + new_size;
984  }
985  else // new_size > old_size
986  {
987  // Allocate more space, and claim that space as used
988  reserve(new_size);
989  used_elements_end = elements.get() + new_size;
990 
991  // finally set the values to the default initializer
993  new_size - old_size, elements.get() + old_size);
994  }
995 }
996 
997 
998 
999 template <class T>
1000 inline void
1001 AlignedVector<T>::resize(const size_type new_size, const T &init)
1002 {
1003  const size_type old_size = size();
1004 
1005  if (new_size == 0)
1006  clear();
1007  else if (new_size == old_size)
1008  {} // nothing to do here
1009  else if (new_size < old_size)
1010  {
1011  // call destructor on fields that are released. doing it backward
1012  // releases the elements in reverse order as compared to how they were
1013  // created
1014  if (std::is_trivial<T>::value == false)
1015  {
1016  while (used_elements_end != elements.get() + new_size)
1017  (--used_elements_end)->~T();
1018  }
1019  else
1020  used_elements_end = elements.get() + new_size;
1021  }
1022  else // new_size > old_size
1023  {
1024  // Allocate more space, and claim that space as used
1025  reserve(new_size);
1026  used_elements_end = elements.get() + new_size;
1027 
1028  // finally set the desired init values
1029  ::internal::AlignedVectorSet<T, true>(new_size - old_size,
1030  init,
1031  elements.get() + old_size);
1032  }
1033 }
1034 
1035 
1036 
1037 template <class T>
1038 inline void
1039 AlignedVector<T>::reserve(const size_type new_allocated_size)
1040 {
1041  const size_type old_size = used_elements_end - elements.get();
1042  const size_type old_allocated_size = allocated_elements_end - elements.get();
1043  if (new_allocated_size > old_allocated_size)
1044  {
1045  // if we continuously increase the size of the vector, we might be
1046  // reallocating a lot of times. therefore, try to increase the size more
1047  // aggressively
1048  const size_type new_size =
1049  std::max(new_allocated_size, 2 * old_allocated_size);
1050 
1051  // allocate and align along 64-byte boundaries (this is enough for all
1052  // levels of vectorization currently supported by deal.II)
1053  T *new_data_ptr;
1055  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1056  std::unique_ptr<T[], void (*)(T *)> new_data(new_data_ptr, [](T *ptr) {
1057  std::free(ptr);
1058  });
1059 
1060  // copy whatever elements we need to retain
1061  if (new_allocated_size > 0)
1063  elements.get() + old_size,
1064  new_data.get());
1065 
1066  // Now reset all of the member variables of the current object
1067  // based on the allocation above. Assigning to a std::unique_ptr
1068  // object also releases the previously pointed to memory.
1069  elements = std::move(new_data);
1070  used_elements_end = elements.get() + old_size;
1071  allocated_elements_end = elements.get() + new_size;
1072  }
1073  else if (new_allocated_size == 0)
1074  clear();
1075  else // size_alloc < allocated_size
1076  {} // nothing to do here
1077 }
1078 
1079 
1080 
1081 template <class T>
1082 inline void
1084 {
1085  if (elements != nullptr)
1086  {
1087  if (std::is_trivial<T>::value == false)
1088  while (used_elements_end != elements.get())
1089  (--used_elements_end)->~T();
1090  }
1091  elements = nullptr;
1092  used_elements_end = nullptr;
1093  allocated_elements_end = nullptr;
1094 }
1095 
1096 
1097 
1098 template <class T>
1099 inline void
1100 AlignedVector<T>::push_back(const T in_data)
1101 {
1104  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1105  if (std::is_trivial<T>::value == false)
1106  new (used_elements_end++) T(in_data);
1107  else
1108  *used_elements_end++ = in_data;
1109 }
1110 
1111 
1112 
1113 template <class T>
1114 inline typename AlignedVector<T>::reference
1116 {
1117  AssertIndexRange(0, size());
1118  T *field = used_elements_end - 1;
1119  return *field;
1120 }
1121 
1122 
1123 
1124 template <class T>
1125 inline typename AlignedVector<T>::const_reference
1126 AlignedVector<T>::back() const
1127 {
1128  AssertIndexRange(0, size());
1129  const T *field = used_elements_end - 1;
1130  return *field;
1131 }
1132 
1133 
1134 
1135 template <class T>
1136 template <typename ForwardIterator>
1137 inline void
1138 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1139 {
1140  const unsigned int old_size = size();
1141  reserve(old_size + (end - begin));
1142  for (; begin != end; ++begin, ++used_elements_end)
1143  {
1144  if (std::is_trivial<T>::value == false)
1145  new (used_elements_end) T;
1147  }
1148 }
1149 
1150 
1151 
1152 template <class T>
1153 inline void
1155 {
1157  elements.get());
1158 }
1159 
1160 
1161 
1162 template <class T>
1163 inline void
1165 {
1167 }
1168 
1169 
1170 
1171 template <class T>
1172 inline void
1174  const unsigned int root_process)
1175 {
1176 # ifdef DEAL_II_WITH_MPI
1177 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1178 
1179  // **** Step 0 ****
1180  // All but the root process no longer need their data, so release the memory
1181  // used to store the previous elements.
1182  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1183  {
1184  elements.reset();
1185  used_elements_end = nullptr;
1186  allocated_elements_end = nullptr;
1187  }
1188 
1189  // **** Step 1 ****
1190  // Create communicators for each group of processes that can use
1191  // shared memory areas. Within each of these groups, we don't care about
1192  // which rank each of the old processes gets except that we would like to
1193  // make sure that the (global) root process will have rank=0 within
1194  // its own sub-communicator. We can do that through the third argument of
1195  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1196  // order of processes within the split communicators, and we should set it to
1197  // zero for the root processes and one for all others -- which means that
1198  // for all of these other processes, MPI can choose whatever order it
1199  // wants because they have the same key (MPI then documents that these ties
1200  // will be broken according to these processes' rank in the old group).
1201  //
1202  // At least that's the theory. In practice, the MPI implementation where
1203  // this function was developed on does not seem to do that. (Bug report
1204  // is here: https://github.com/open-mpi/ompi/issues/8854)
1205  // We work around this by letting MPI_Comm_split_type choose whatever
1206  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1207  // step -- not elegant, nor efficient, but seems to work:
1208  MPI_Comm shmem_group_communicator;
1209  {
1210  MPI_Comm shmem_group_communicator_temp;
1211  int ierr = MPI_Comm_split_type(communicator,
1212  MPI_COMM_TYPE_SHARED,
1213  /* key */ 0,
1214  MPI_INFO_NULL,
1215  &shmem_group_communicator_temp);
1216 
1217  AssertThrowMPI(ierr);
1218  const int key =
1219  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1220  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1221  /* color */ 0,
1222  key,
1223  &shmem_group_communicator);
1224  AssertThrowMPI(ierr);
1225 
1226  // Verify the explanation from above
1227  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1228  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1229  ExcInternalError());
1230 
1231  // And get rid of the temporary communicator
1232  ierr = MPI_Comm_free(&shmem_group_communicator_temp);
1233  }
1234  const bool is_shmem_root =
1235  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1236 
1237  // **** Step 2 ****
1238  // We then have to send the state of the current object from the
1239  // root process to one exemplar in each shmem group. To this end,
1240  // we create another subcommunicator that includes the ranks zero
1241  // of all shmem groups, and because of the trick above, we know
1242  // that this also includes the original root process.
1243  //
1244  // There are different ways of creating a "shmem_roots_communicator".
1245  // The conceptually easiest way is to create an MPI_Group that only
1246  // includes the shmem roots and then create a communicator from this
1247  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1248  // with this is that we would have to exchange among all processes
1249  // which ones are shmem roots and which are not. This is awkward.
1250  //
1251  // A simpler way is to use MPI_Comm_split that uses "colors" to
1252  // indicate which sub-communicator each process wants to be in.
1253  // We use color=0 to indicate the group of shmem roots, and color=1
1254  // for all other processes -- the latter will simply not ever do
1255  // anything among themselves with the communicator so created.
1256  //
1257  // Using MPI_Comm_split has the additional benefit that, just as above,
1258  // we can choose where each rank will end up in shmem_roots_communicator.
1259  // We again set key=0 for the original root_process, and key=1 for all other
1260  // ranks; then, the global root becomes rank=0 on the
1261  // shmem_roots_communicator. We don't care how the other processes are
1262  // ordered.
1263  MPI_Comm shmem_roots_communicator;
1264  {
1265  const int key =
1266  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1267 
1268  const int ierr = MPI_Comm_split(communicator,
1269  /*color=*/
1270  (is_shmem_root ? 0 : 1),
1271  key,
1272  &shmem_roots_communicator);
1273  AssertThrowMPI(ierr);
1274 
1275  // Again verify the explanation from above
1276  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1277  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1278  ExcInternalError());
1279  }
1280 
1281  const unsigned int shmem_roots_root_rank = 0;
1282  const bool is_shmem_roots_root =
1283  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1284  shmem_roots_communicator) == shmem_roots_root_rank));
1285 
1286  // Now let the original root_process broadcast the current object to all
1287  // shmem roots. We know that the last rank is the original root process that
1288  // has all of the data.
1289  if (is_shmem_root)
1290  {
1291  if (std::is_trivial<T>::value)
1292  {
1293  // The data is "trivial", i.e., we can copy things directly without
1294  // having to go through the serialization/deserialization machinery of
1295  // Utilities::MPI::broadcast.
1296  //
1297  // In that case, first tell all of the other shmem roots how many
1298  // elements we will have to deal with, and let them resize their
1299  // (non-shared) arrays.
1300  const size_type new_size =
1301  Utilities::MPI::broadcast(shmem_roots_communicator,
1302  size(),
1303  shmem_roots_root_rank);
1304  if (is_shmem_roots_root == false)
1305  resize(new_size);
1306 
1307  // Then directly copy from the root process into these buffers
1308  int ierr = MPI_Bcast(elements.get(),
1309  sizeof(T) * new_size,
1310  MPI_CHAR,
1311  shmem_roots_root_rank,
1312  shmem_roots_communicator);
1313  AssertThrowMPI(ierr);
1314  }
1315  else
1316  {
1317  // The objects to be sent around are not "trivial", and so we have
1318  // to go through the serialization/deserialization machinery. On all
1319  // but the sending process, overwrite the current state with the
1320  // vector just broadcast.
1321  //
1322  // On the root rank, this would lead to resetting the 'entries'
1323  // pointer, which would trigger the deleter which would lead to a
1324  // deadlock. So we just send the result of the broadcast() call to
1325  // nirvana on the root process and keep our current state.
1326  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1327  Utilities::MPI::broadcast(shmem_roots_communicator,
1328  *this,
1329  shmem_roots_root_rank);
1330  else
1331  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1332  *this,
1333  shmem_roots_root_rank);
1334  }
1335  }
1336 
1337  // We no longer need the shmem roots communicator, so get rid of it
1338  {
1339  const int ierr = MPI_Comm_free(&shmem_roots_communicator);
1340  AssertThrowMPI(ierr);
1341  }
1342 
1343 
1344  // **** Step 3 ****
1345  // At this point, all shmem groups have one shmem root process that has
1346  // a copy of the data. This is the point where each shmem group should
1347  // establish a shmem area to put the data into. As mentioned above,
1348  // we know that the shmem roots are the last rank in their respective
1349  // shmem_group_communicator.
1350  //
1351  // The process for all of this works as follows: While all processes in
1352  // the shmem group participate in the generation of the shmem memory window,
1353  // only the shmem root actually allocates any memory -- the rest just
1354  // allocate zero bytes of their own. We allocate space for exactly
1355  // size() elements (computed on the shmem_root that already has the data)
1356  // and add however many bytes are necessary so that we know that we can align
1357  // things to 64-byte boundaries. The worst case happens if the memory system
1358  // gives us a pointer to an address one byte past a desired alignment
1359  // boundary, and in that case aligning the memory will require us to waste the
1360  // first (align_by-1) bytes. So we have to ask for
1361  // size() * sizeof(T) + (align_by - 1)
1362  // bytes.
1363  //
1364  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1365  // a certain number of bytes. This is going to come back to bite us further
1366  // down below when we try to get a properly aligned pointer to our memory
1367  // region, see the commentary there. Starting with MPI 4.0, one can set a
1368  // flag in an MPI_Info structure that requests a desired alignment, so we do
1369  // this for forward compatibility; MPI implementations ignore flags they don't
1370  // know anything about, and so setting this flag is backward compatible also
1371  // to older MPI versions.
1372  //
1373  // There is one final piece we can already take care of here. At the beginning
1374  // of all of this, only the shmem_root knows how many elements there are in
1375  // the array. But at the end of it, all processes of course need to know. We
1376  // could put this information somewhere into the shmem area, along with the
1377  // other data, but that seems clumsy. It turns out that when calling
1378  // MPI_Win_allocate_shared, we are asked for the value of a parameter called
1379  // 'disp_unit' whose meaning is difficult to determine from the MPI
1380  // documentation, and that we do not actually need. So we "abuse" it a bit: On
1381  // the shmem root, we put the array size into it. Later on, the remaining
1382  // processes can query the shmem root's value of 'disp_unit', and so will be
1383  // able to learn about the array size that way.
1384  MPI_Win shmem_window;
1385  void * base_ptr;
1386  const MPI_Aint align_by = 64;
1387  const MPI_Aint alloc_size =
1388  Utilities::MPI::broadcast(shmem_group_communicator,
1389  (size() * sizeof(T) + (align_by - 1)),
1390  0);
1391 
1392  {
1393  const int disp_unit = (is_shmem_root ? size() : 1);
1394 
1395  int ierr;
1396 
1397  MPI_Info mpi_info;
1398  ierr = MPI_Info_create(&mpi_info);
1399  AssertThrowMPI(ierr);
1400  ierr = MPI_Info_set(mpi_info,
1401  "mpi_minimum_memory_alignment",
1402  std::to_string(align_by).c_str());
1403  AssertThrowMPI(ierr);
1404  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1405  disp_unit,
1406  mpi_info,
1407  shmem_group_communicator,
1408  &base_ptr,
1409  &shmem_window);
1410  AssertThrowMPI(ierr);
1411 
1412  ierr = MPI_Info_free(&mpi_info);
1413  AssertThrowMPI(ierr);
1414  }
1415 
1416 
1417  // **** Step 4 ****
1418  // The next step is to teach all non-shmem root processes what the pointer to
1419  // the array is that the shmem-root created. MPI has a nifty way for this
1420  // given that only a single process actually allocated memory in the window:
1421  // When calling MPI_Win_shared_query, the MPI documentation says that
1422  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1423  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1424  // rank that specified size > 0. If all processes in the group attached to the
1425  // window specified size = 0, then the call returns size = 0 and a baseptr as
1426  // if MPI_ALLOC_MEM was called with size = 0."
1427  //
1428  // This will allow us to obtain the pointer to the shmem root's memory area,
1429  // which is the only one we care about. (None of the other processes have
1430  // even allocated any memory.) But this will also retrieve the shmem root's
1431  // disp_unit, which in step 3 above we have abused to pass along the number of
1432  // elements in the array.
1433  //
1434  // We don't need to do this on the shmem root process: This process has
1435  // already gotten its base_ptr correctly set above, and we can determine the
1436  // array size by just calling size().
1437  unsigned int array_size =
1438  (is_shmem_root ? size() : numbers::invalid_unsigned_int);
1439  if (is_shmem_root == false)
1440  {
1441  int disp_unit;
1442  MPI_Aint alloc_size; // not actually used
1443  const int ierr = MPI_Win_shared_query(
1444  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1445  AssertThrowMPI(ierr);
1446 
1447  // Make sure we actually got a pointer, and also unpack the array size as
1448  // discussed above.
1449  Assert(base_ptr != nullptr, ExcInternalError());
1450 
1451  array_size = disp_unit;
1452  }
1453 
1454 
1455  // **** Step 5 ****
1456  // Now that all processes know the address of the space that is visible to
1457  // everyone, we need to figure out whether it is properly aligned and if not,
1458  // find the next aligned address.
1459  //
1460  // std::align does that, but it also modifies its last two arguments. The
1461  // documentation of that function at
1462  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1463  // *think* that the following should do given that we do not use base_ptr and
1464  // available_space any further after the call to std::align.
1465  std::size_t available_space = alloc_size;
1466  void * base_ptr_backup = base_ptr;
1467  T * aligned_shmem_pointer = static_cast<T *>(
1468  std::align(align_by, array_size * sizeof(T), base_ptr, available_space));
1469  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1470 
1471  // There is one step to guard against. It is *conceivable* that the base_ptr
1472  // we have previously obtained from MPI_Win_shared_query is mapped so
1473  // awkwardly into the different MPI processes' memory spaces that it is
1474  // aligned in one memory space, but not another. In that case, different
1475  // processes would align base_ptr differently, and adjust available_space
1476  // differently. We can check that by making sure that the max (or min) over
1477  // all processes is equal to every process's value. If that's not the case,
1478  // then the whole idea of aligning above is wrong and we need to rethink what
1479  // it means to align data in a shared memory space.
1480  //
1481  // One might be tempted to think that this is not how MPI implementations
1482  // actually arrange things. Alas, when developing this functionality in 2021,
1483  // this is really how at least OpenMPI ends up doing things. (This is with an
1484  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1485  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1486  // when running this code on three processes, one ends up with base_ptr values
1487  // of
1488  // base_ptr=0x7f0842f02108
1489  // base_ptr=0x7fc0a47881d0
1490  // base_ptr=0x7f64872db108
1491  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1492  // is no common offset std::align could find that leads to a 64-byte
1493  // aligned memory address in all three memory spaces. That's a tremendous
1494  // nuisance and there is really nothing we can do about this other than just
1495  // fall back on the (unaligned) base_ptr in that case.
1496  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1497  Utilities::MPI::max(available_space, shmem_group_communicator))
1498  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1499 
1500 
1501  // **** Step 6 ****
1502  // If this is the shmem root process, we need to copy the data into the
1503  // shared memory space.
1504  if (is_shmem_root)
1505  {
1506  if (std::is_trivial<T>::value == true)
1507  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1508  else
1509  for (std::size_t i = 0; i < size(); ++i)
1510  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1511  }
1512 
1513  // Make sure that the shared memory host has copied the data before we try to
1514  // access it.
1515  MPI_Barrier(shmem_group_communicator);
1516 
1517  // **** Step 7 ****
1518  // Finally, we need to set the pointers of this object to what we just
1519  // learned. This also releases all memory that may have been in use
1520  // previously.
1521  //
1522  // The part that is a bit tricky is how to write the deleter of this
1523  // shared memory object. When we want to get rid of it, we need to
1524  // also release the MPI_Win object along with the shmem_group_communicator
1525  // object. That's because as long as we use the shared memory, we still need
1526  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1527  // communicator. (The former is definitely true, the latter is not quite clear
1528  // from the MPI documentation, but seems reasonable.) So we need to have a
1529  // deleter for the pointer that ensures that upon release of the memory, we
1530  // not only call the destructor of these memory elements (but only once, on
1531  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1532  // that is encapsulated in the following call where the deleter makes copies
1533  // of the arguments in the lambda capture.
1534  elements =
1535  decltype(elements)(aligned_shmem_pointer,
1536  [is_shmem_root,
1537  array_size,
1538  aligned_shmem_pointer,
1539  shmem_group_communicator,
1540  shmem_window](T *) mutable {
1541  if (is_shmem_root)
1542  for (unsigned int i = 0; i < array_size; ++i)
1543  aligned_shmem_pointer[i].~T();
1544 
1545  int ierr;
1546  ierr = MPI_Win_free(&shmem_window);
1547  AssertThrowMPI(ierr);
1548 
1549  ierr = MPI_Comm_free(&shmem_group_communicator);
1550  AssertThrowMPI(ierr);
1551  });
1552 
1553  // We then also have to set the other two pointers that define the state of
1554  // the current object. Note that the new buffer size is exactly as large as
1555  // necessary, i.e., can store size() elements, regardless of the number of
1556  // allocated elements in the original objects.
1557  used_elements_end = elements.get() + array_size;
1559 
1560  // **** Consistency check ****
1561  // At this point, each process should have a copy of the data.
1562  // Verify this in some sort of round-about way
1563 # ifdef DEBUG
1564  const std::vector<char> packed_data = Utilities::pack(*this);
1565  const int hash =
1566  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1567  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1568 # endif
1569 
1570 
1571 
1572 # else
1573  // If we only have MPI 2.x, then simply broadcast the current object to all
1574  // other processes and forego the idea of using shmem
1575  *this = Utilities::MPI::broadcast(communicator, *this, root_process);
1576 # endif
1577 # else
1578  // No MPI -> nothing to replicate
1579  (void)communicator;
1580  (void)root_process;
1581 # endif
1582 }
1583 
1584 
1585 
1586 template <class T>
1587 inline void
1589 {
1590  std::swap(elements, vec.elements);
1591  std::swap(used_elements_end, vec.used_elements_end);
1592  std::swap(allocated_elements_end, vec.allocated_elements_end);
1593 }
1594 
1595 
1596 
1597 template <class T>
1598 inline bool
1600 {
1601  return used_elements_end == elements.get();
1602 }
1603 
1604 
1605 
1606 template <class T>
1607 inline typename AlignedVector<T>::size_type
1608 AlignedVector<T>::size() const
1609 {
1610  return used_elements_end - elements.get();
1611 }
1612 
1613 
1614 
1615 template <class T>
1616 inline typename AlignedVector<T>::size_type
1618 {
1619  return allocated_elements_end - elements.get();
1620 }
1621 
1622 
1623 
1624 template <class T>
1626  operator[](const size_type index)
1627 {
1628  AssertIndexRange(index, size());
1629  return elements[index];
1630 }
1631 
1632 
1633 
1634 template <class T>
1636  operator[](const size_type index) const
1637 {
1638  AssertIndexRange(index, size());
1639  return elements[index];
1640 }
1641 
1642 
1643 
1644 template <typename T>
1645 inline typename AlignedVector<T>::pointer
1647 {
1648  return elements.get();
1649 }
1650 
1651 
1652 
1653 template <typename T>
1654 inline typename AlignedVector<T>::const_pointer
1655 AlignedVector<T>::data() const
1656 {
1657  return elements.get();
1658 }
1659 
1660 
1661 
1662 template <class T>
1663 inline typename AlignedVector<T>::iterator
1665 {
1666  return elements.get();
1667 }
1668 
1669 
1670 
1671 template <class T>
1672 inline typename AlignedVector<T>::iterator
1674 {
1675  return used_elements_end;
1676 }
1677 
1678 
1679 
1680 template <class T>
1681 inline typename AlignedVector<T>::const_iterator
1683 {
1684  return elements.get();
1685 }
1686 
1687 
1688 
1689 template <class T>
1690 inline typename AlignedVector<T>::const_iterator
1691 AlignedVector<T>::end() const
1692 {
1693  return used_elements_end;
1694 }
1695 
1696 
1697 
1698 template <class T>
1699 template <class Archive>
1700 inline void
1701 AlignedVector<T>::save(Archive &ar, const unsigned int) const
1702 {
1703  size_type vec_size = size();
1704  ar & vec_size;
1705  if (vec_size > 0)
1706  ar &boost::serialization::make_array(elements.get(), vec_size);
1707 }
1708 
1709 
1710 
1711 template <class T>
1712 template <class Archive>
1713 inline void
1714 AlignedVector<T>::load(Archive &ar, const unsigned int)
1715 {
1716  size_type vec_size = 0;
1717  ar & vec_size;
1718 
1719  if (vec_size > 0)
1720  {
1721  reserve(vec_size);
1722  ar &boost::serialization::make_array(elements.get(), vec_size);
1723  used_elements_end = elements.get() + vec_size;
1724  }
1725 }
1726 
1727 
1728 
1729 template <class T>
1730 inline typename AlignedVector<T>::size_type
1732 {
1733  size_type memory = sizeof(*this);
1734  for (const T *t = elements.get(); t != used_elements_end; ++t)
1735  memory += ::MemoryConsumption::memory_consumption(*t);
1736  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
1737  return memory;
1738 }
1739 
1740 
1741 #endif // ifndef DOXYGEN
1742 
1743 
1749 template <class T>
1750 bool
1752 {
1753  if (lhs.size() != rhs.size())
1754  return false;
1755  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
1756  rit = rhs.begin();
1757  lit != lhs.end();
1758  ++lit, ++rit)
1759  if (*lit != *rit)
1760  return false;
1761  return true;
1762 }
1763 
1764 
1765 
1771 template <class T>
1772 bool
1774 {
1775  return !(operator==(lhs, rhs));
1776 }
1777 
1778 
1780 
1781 #endif
void resize(const size_type new_size)
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1050
static const unsigned int invalid_unsigned_int
Definition: types.h:196
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
pointer data()
void load(Archive &ar, const unsigned int version)
AlignedVector & operator=(const AlignedVector< T > &vec)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void push_back(const T in_data)
reference operator[](const size_type index)
void serialize(Archive &archive, const unsigned int version)
typename VectorType::value_type value_type
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
static const char T
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:395
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
std::string to_string(const T &t)
Definition: patterns.h:2329
void insert_back(ForwardIterator begin, ForwardIterator end)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
size_type memory_consumption() const
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(AlignedVector< T > &vec)
std::unique_ptr< T[], std::function< void(T *)> > elements
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
void save(Archive &ar, const unsigned int version) const
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
iterator end()
AlignedVectorMove(T *const source_begin, T *const source_end, T *const destination)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:394
T min(const T &t, const MPI_Comm &mpi_communicator)
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
AlignedVectorCopy(const T *const source_begin, const T *const source_end, T *const destination)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void resize_fast(const size_type new_size)
iterator begin()
AlignedVectorSet(const std::size_t size, const T &element, T *const destination)
size_type size() const
unsigned int minimum_parallel_grain_size
Definition: parallel.cc:34
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
T * allocated_elements_end
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const types::blas_int zero
void free(T *&pointer)
Definition: cuda.h:97
bool empty() const
void reserve(const size_type new_allocated_size)
T max(const T &t, const MPI_Comm &mpi_communicator)
size_type capacity() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
reference back()
static ::ExceptionBase & ExcInternalError()