Reference documentation for deal.II version Git b1a5775265 2021-07-23 17:28:58 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
aligned_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_aligned_vector_h
18 #define dealii_aligned_vector_h
19 
20 #include <deal.II/base/config.h>
21 
24 #include <deal.II/base/mpi.h>
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/utilities.h>
27 
28 // boost::serialization::make_array used to be in array.hpp, but was
29 // moved to a different file in BOOST 1.64
30 #include <boost/version.hpp>
31 #if BOOST_VERSION >= 106400
32 # include <boost/serialization/array_wrapper.hpp>
33 #else
34 # include <boost/serialization/array.hpp>
35 #endif
36 #include <boost/serialization/split_member.hpp>
37 
38 #include <cstring>
39 #include <memory>
40 #include <type_traits>
41 
42 
43 
45 
46 
60 template <class T>
62 {
63 public:
68  using value_type = T;
69  using pointer = value_type *;
70  using const_pointer = const value_type *;
71  using iterator = value_type *;
72  using const_iterator = const value_type *;
73  using reference = value_type &;
74  using const_reference = const value_type &;
75  using size_type = std::size_t;
76 
80  AlignedVector();
81 
88  explicit AlignedVector(const size_type size, const T &init = T());
89 
93  ~AlignedVector() = default;
94 
100  AlignedVector(const AlignedVector<T> &vec);
101 
106  AlignedVector(AlignedVector<T> &&vec) noexcept;
107 
113  AlignedVector &
114  operator=(const AlignedVector<T> &vec);
115 
119  AlignedVector &
120  operator=(AlignedVector<T> &&vec) noexcept;
121 
144  void
145  resize_fast(const size_type new_size);
146 
159  void
160  resize(const size_type new_size);
161 
177  void
178  resize(const size_type new_size, const T &init);
179 
200  void
201  reserve(const size_type new_allocated_size);
202 
207  void
208  clear();
209 
215  void
216  push_back(const T in_data);
217 
221  reference
222  back();
223 
228  back() const;
229 
234  template <typename ForwardIterator>
235  void
236  insert_back(ForwardIterator begin, ForwardIterator end);
237 
247  void
248  fill();
249 
258  void
259  fill(const T &element);
260 
348  void
349  replicate_across_communicator(const MPI_Comm & communicator,
350  const unsigned int root_process);
351 
355  void
356  swap(AlignedVector<T> &vec);
357 
361  bool
362  empty() const;
363 
367  size_type
368  size() const;
369 
374  size_type
375  capacity() const;
376 
380  reference operator[](const size_type index);
381 
385  const_reference operator[](const size_type index) const;
386 
390  pointer
391  data();
392 
397  data() const;
398 
402  iterator
403  begin();
404 
408  iterator
409  end();
410 
415  begin() const;
416 
421  end() const;
422 
428  size_type
429  memory_consumption() const;
430 
436  template <class Archive>
437  void
438  save(Archive &ar, const unsigned int version) const;
439 
445  template <class Archive>
446  void
447  load(Archive &ar, const unsigned int version);
448 
449 #ifdef DOXYGEN
450 
455  template <class Archive>
456  void
457  serialize(Archive &archive, const unsigned int version);
458 #else
459  // This macro defines the serialize() method that is compatible with
460  // the templated save() and load() method that have been implemented.
461  BOOST_SERIALIZATION_SPLIT_MEMBER()
462 #endif
463 
464 private:
468  std::unique_ptr<T[], std::function<void(T *)>> elements;
469 
474 
479 };
480 
481 
482 // ------------------------------- inline functions --------------------------
483 
489 namespace internal
490 {
509  template <typename T>
512  {
513  static const std::size_t minimum_parallel_grain_size =
514  160000 / sizeof(T) + 1;
515 
516  public:
526  AlignedVectorCopyConstruct(const T *const source_begin,
527  const T *const source_end,
528  T *const destination)
529  : source_(source_begin)
530  , destination_(destination)
531  {
532  Assert(source_end >= source_begin, ExcInternalError());
533  Assert(source_end == source_begin || destination != nullptr,
534  ExcInternalError());
535  const std::size_t size = source_end - source_begin;
536  if (size < minimum_parallel_grain_size)
537  AlignedVectorCopyConstruct::apply_to_subrange(0, size);
538  else
539  apply_parallel(0, size, minimum_parallel_grain_size);
540  }
541 
546  virtual void
547  apply_to_subrange(const std::size_t begin,
548  const std::size_t end) const override
549  {
550  if (end == begin)
551  return;
552 
553  // for classes trivial assignment can use memcpy. cast element to
554  // (void*) to silence compiler warning for virtual classes (they will
555  // never arrive here because they are non-trivial).
556 
557  if (std::is_trivial<T>::value == true)
558  std::memcpy(static_cast<void *>(destination_ + begin),
559  static_cast<const void *>(source_ + begin),
560  (end - begin) * sizeof(T));
561  else
562  for (std::size_t i = begin; i < end; ++i)
563  new (&destination_[i]) T(source_[i]);
564  }
565 
566  private:
567  const T *const source_;
568  T *const destination_;
569  };
570 
571 
578  template <typename T>
581  {
582  static const std::size_t minimum_parallel_grain_size =
583  160000 / sizeof(T) + 1;
584 
585  public:
595  AlignedVectorMoveConstruct(T *const source_begin,
596  T *const source_end,
597  T *const destination)
598  : source_(source_begin)
599  , destination_(destination)
600  {
601  Assert(source_end >= source_begin, ExcInternalError());
602  Assert(source_end == source_begin || destination != nullptr,
603  ExcInternalError());
604  const std::size_t size = source_end - source_begin;
605  if (size < minimum_parallel_grain_size)
606  AlignedVectorMoveConstruct::apply_to_subrange(0, size);
607  else
608  apply_parallel(0, size, minimum_parallel_grain_size);
609  }
610 
615  virtual void
616  apply_to_subrange(const std::size_t begin,
617  const std::size_t end) const override
618  {
619  if (end == begin)
620  return;
621 
622  // Classes with trivial assignment can use memcpy. cast element to
623  // (void*) to silence compiler warning for virtual classes (they will
624  // never arrive here because they are non-trivial).
625  if (std::is_trivial<T>::value == true)
626  std::memcpy(static_cast<void *>(destination_ + begin),
627  static_cast<void *>(source_ + begin),
628  (end - begin) * sizeof(T));
629  else
630  // For everything else just use the move constructor. The original
631  // object remains alive and will be destroyed elsewhere.
632  for (std::size_t i = begin; i < end; ++i)
633  new (&destination_[i]) T(std::move(source_[i]));
634  }
635 
636  private:
637  T *const source_;
638  T *const destination_;
639  };
640 
641 
659  template <typename T, bool initialize_memory>
661  {
662  static const std::size_t minimum_parallel_grain_size =
663  160000 / sizeof(T) + 1;
664 
665  public:
670  AlignedVectorInitialize(const std::size_t size,
671  const T & element,
672  T *const destination)
673  : element_(element)
674  , destination_(destination)
675  , trivial_element(false)
676  {
677  if (size == 0)
678  return;
679  Assert(destination != nullptr, ExcInternalError());
680 
681  // do not use memcmp for long double because on some systems it does not
682  // completely fill its memory and may lead to false positives in
683  // e.g. valgrind
684  if (std::is_trivial<T>::value == true &&
685  std::is_same<T, long double>::value == false)
686  {
687  const unsigned char zero[sizeof(T)] = {};
688  // cast element to (void*) to silence compiler warning for virtual
689  // classes (they will never arrive here because they are
690  // non-trivial).
691  if (std::memcmp(zero,
692  static_cast<const void *>(&element),
693  sizeof(T)) == 0)
694  trivial_element = true;
695  }
696  if (size < minimum_parallel_grain_size)
697  AlignedVectorInitialize::apply_to_subrange(0, size);
698  else
699  apply_parallel(0, size, minimum_parallel_grain_size);
700  }
701 
705  virtual void
706  apply_to_subrange(const std::size_t begin,
707  const std::size_t end) const override
708  {
709  // for classes with trivial assignment of zero can use memset. cast
710  // element to (void*) to silence compiler warning for virtual
711  // classes (they will never arrive here because they are
712  // non-trivial).
713  if (std::is_trivial<T>::value == true && trivial_element)
714  std::memset(static_cast<void *>(destination_ + begin),
715  0,
716  (end - begin) * sizeof(T));
717  else
718  copy_construct_or_assign(
719  begin, end, std::integral_constant<bool, initialize_memory>());
720  }
721 
722  private:
723  const T & element_;
724  mutable T *destination_;
726 
727  // copy assignment operation
728  void
729  copy_construct_or_assign(const std::size_t begin,
730  const std::size_t end,
731  std::integral_constant<bool, false>) const
732  {
733  for (std::size_t i = begin; i < end; ++i)
734  destination_[i] = element_;
735  }
736 
737  // copy constructor (memory initialization)
738  void
739  copy_construct_or_assign(const std::size_t begin,
740  const std::size_t end,
741  std::integral_constant<bool, true>) const
742  {
743  for (std::size_t i = begin; i < end; ++i)
744  new (&destination_[i]) T(element_);
745  }
746  };
747 
748 
749 
762  template <typename T, bool initialize_memory>
765  {
766  static const std::size_t minimum_parallel_grain_size =
767  160000 / sizeof(T) + 1;
768 
769  public:
774  AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
775  : destination_(destination)
776  {
777  if (size == 0)
778  return;
779  Assert(destination != nullptr, ExcInternalError());
780 
781  if (size < minimum_parallel_grain_size)
782  AlignedVectorDefaultInitialize::apply_to_subrange(0, size);
783  else
784  apply_parallel(0, size, minimum_parallel_grain_size);
785  }
786 
790  virtual void
791  apply_to_subrange(const std::size_t begin,
792  const std::size_t end) const override
793  {
794  // for classes with trivial assignment of zero can use memset. cast
795  // element to (void*) to silence compiler warning for virtual
796  // classes (they will never arrive here because they are
797  // non-trivial).
798  if (std::is_trivial<T>::value == true)
799  std::memset(static_cast<void *>(destination_ + begin),
800  0,
801  (end - begin) * sizeof(T));
802  else
803  default_construct_or_assign(
804  begin, end, std::integral_constant<bool, initialize_memory>());
805  }
806 
807  private:
808  mutable T *destination_;
809 
810  // copy assignment operation
811  void
813  const std::size_t end,
814  std::integral_constant<bool, false>) const
815  {
816  for (std::size_t i = begin; i < end; ++i)
817  destination_[i] = std::move(T());
818  }
819 
820  // copy constructor (memory initialization)
821  void
823  const std::size_t end,
824  std::integral_constant<bool, true>) const
825  {
826  for (std::size_t i = begin; i < end; ++i)
827  new (&destination_[i]) T;
828  }
829  };
830 
831 } // end of namespace internal
832 
833 
834 #ifndef DOXYGEN
835 
836 
837 template <class T>
839  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
840  , used_elements_end(nullptr)
841  , allocated_elements_end(nullptr)
842 {}
843 
844 
845 
846 template <class T>
847 inline AlignedVector<T>::AlignedVector(const size_type size, const T &init)
848  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
849  , used_elements_end(nullptr)
850  , allocated_elements_end(nullptr)
851 {
852  if (size > 0)
853  resize(size, init);
854 }
855 
856 
857 
858 template <class T>
860  : elements(nullptr, [](T *) { Assert(false, ExcInternalError()); })
861  , used_elements_end(nullptr)
862  , allocated_elements_end(nullptr)
863 {
864  // copy the data from vec
865  reserve(vec.size());
867  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
868  vec.used_elements_end,
869  elements.get());
870 }
871 
872 
873 
874 template <class T>
876  : AlignedVector<T>()
877 {
878  // forward to the move operator
879  *this = std::move(vec);
880 }
881 
882 
883 
884 template <class T>
885 inline AlignedVector<T> &
887 {
888  resize(0);
889  resize_fast(vec.used_elements_end - vec.elements.get());
890  internal::AlignedVectorCopyConstruct<T>(vec.elements.get(),
891  vec.used_elements_end,
892  elements.get());
893  return *this;
894 }
895 
896 
897 
898 template <class T>
899 inline AlignedVector<T> &
901 {
902  clear();
903 
904  // Move the actual data in the 'elements' object. One problem is that this
905  // also moves the deleter object, but the deleter object is a lambda function
906  // that references 'this' (i.e., the 'this' pointer of the *moved-from*
907  // object). So what we actually do is steal the pointer via
908  // std::unique_ptr::release() and then install our own deleter object that
909  // mirrors the one used in reserve() below.
910  elements = decltype(elements)(vec.elements.release(), [this](T *ptr) {
911  if (ptr != nullptr)
912  {
913  Assert(this->used_elements_end != nullptr, ExcInternalError());
914 
915  if (std::is_trivial<T>::value == false)
916  for (T *p = this->used_elements_end - 1; p >= ptr; --p)
917  p->~T();
918  }
919 
920  std::free(ptr);
921  });
922 
923  // Then also steal the other pointers and clear them in the original object:
924  used_elements_end = vec.used_elements_end;
925  allocated_elements_end = vec.allocated_elements_end;
926 
927  vec.used_elements_end = nullptr;
928  vec.allocated_elements_end = nullptr;
929 
930  return *this;
931 }
932 
933 
934 
935 template <class T>
936 inline void
938 {
939  const size_type old_size = size();
940 
941  if (new_size == 0)
942  clear();
943  else if (new_size == old_size)
944  {} // nothing to do here
945  else if (new_size < old_size)
946  {
947  // call destructor on fields that are released, if the type requires it.
948  // doing it backward releases the elements in reverse order as compared to
949  // how they were created
950  if (std::is_trivial<T>::value == false)
951  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
952  p->~T();
953  used_elements_end = elements.get() + new_size;
954  }
955  else // new_size > old_size
956  {
957  // Allocate more space, and claim that space as used
958  reserve(new_size);
959  used_elements_end = elements.get() + new_size;
960 
961  // need to still set the values in case the class is non-trivial because
962  // virtual classes etc. need to run their (default) constructor
963  if (std::is_trivial<T>::value == false)
965  new_size - old_size, elements.get() + old_size);
966  }
967 }
968 
969 
970 
971 template <class T>
972 inline void
973 AlignedVector<T>::resize(const size_type new_size)
974 {
975  const size_type old_size = size();
976 
977  if (new_size == 0)
978  clear();
979  else if (new_size == old_size)
980  {} // nothing to do here
981  else if (new_size < old_size)
982  {
983  // call destructor on fields that are released, if the type requires it.
984  // doing it backward releases the elements in reverse order as compared to
985  // how they were created
986  if (std::is_trivial<T>::value == false)
987  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
988  p->~T();
989  used_elements_end = elements.get() + new_size;
990  }
991  else // new_size > old_size
992  {
993  // Allocate more space, and claim that space as used
994  reserve(new_size);
995  used_elements_end = elements.get() + new_size;
996 
997  // finally set the values to the default initializer
999  new_size - old_size, elements.get() + old_size);
1000  }
1001 }
1002 
1003 
1004 
1005 template <class T>
1006 inline void
1007 AlignedVector<T>::resize(const size_type new_size, const T &init)
1008 {
1009  const size_type old_size = size();
1010 
1011  if (new_size == 0)
1012  clear();
1013  else if (new_size == old_size)
1014  {} // nothing to do here
1015  else if (new_size < old_size)
1016  {
1017  // call destructor on fields that are released, if the type requires it.
1018  // doing it backward releases the elements in reverse order as compared to
1019  // how they were created
1020  if (std::is_trivial<T>::value == false)
1021  for (T *p = used_elements_end - 1; p >= elements.get() + new_size; --p)
1022  p->~T();
1023  used_elements_end = elements.get() + new_size;
1024  }
1025  else // new_size > old_size
1026  {
1027  // Allocate more space, and claim that space as used
1028  reserve(new_size);
1029  used_elements_end = elements.get() + new_size;
1030 
1031  // finally set the desired init values
1033  new_size - old_size, init, elements.get() + old_size);
1034  }
1035 }
1036 
1037 
1038 
1039 template <class T>
1040 inline void
1041 AlignedVector<T>::reserve(const size_type new_allocated_size)
1042 {
1043  const size_type old_size = used_elements_end - elements.get();
1044  const size_type old_allocated_size = allocated_elements_end - elements.get();
1045  if (new_allocated_size > old_allocated_size)
1046  {
1047  // if we continuously increase the size of the vector, we might be
1048  // reallocating a lot of times. therefore, try to increase the size more
1049  // aggressively
1050  const size_type new_size =
1051  std::max(new_allocated_size, 2 * old_allocated_size);
1052 
1053  // allocate and align along 64-byte boundaries (this is enough for all
1054  // levels of vectorization currently supported by deal.II)
1055  T *new_data_ptr;
1057  reinterpret_cast<void **>(&new_data_ptr), 64, new_size * sizeof(T));
1058 
1059  // Now create a deleter that encodes what should happen when the object is
1060  // released: We need to destroy the objects that are currently alive (in
1061  // reverse order, and then release the memory. Note that we catch the
1062  // 'this' pointer because the number of elements currently alive might
1063  // change over time.
1064  auto deleter = [this](T *ptr) {
1065  if (ptr != nullptr)
1066  {
1067  Assert(this->used_elements_end != nullptr, ExcInternalError());
1068 
1069  if (std::is_trivial<T>::value == false)
1070  for (T *p = this->used_elements_end - 1; p >= ptr; --p)
1071  p->~T();
1072  }
1073 
1074  std::free(ptr);
1075  };
1076 
1077  // copy whatever elements we need to retain
1078  if (new_allocated_size > 0)
1080  elements.get(), elements.get() + old_size, new_data_ptr);
1081 
1082  // Now reset all of the member variables of the current object
1083  // based on the allocation above. Assigning to a std::unique_ptr
1084  // object also releases the previously pointed to memory.
1085  //
1086  // Note that at the time of releasing the old memory, 'used_elements_end'
1087  // still points to its previous value, and this is important for the
1088  // deleter object of the previously allocated array (see how it loops over
1089  // the to-be-destroyed elements a few lines above).
1090  elements = decltype(elements)(new_data_ptr, deleter);
1091  used_elements_end = elements.get() + old_size;
1092  allocated_elements_end = elements.get() + new_size;
1093  }
1094  else if (new_allocated_size == 0)
1095  clear();
1096  else // size_alloc < allocated_size
1097  {} // nothing to do here
1098 }
1099 
1100 
1101 
1102 template <class T>
1103 inline void
1105 {
1106  // Just release the memory (which also calls the destructor of the elements),
1107  // and then set the auxiliary pointers to invalid values.
1108  //
1109  // Note that at the time of releasing the old memory, 'used_elements_end'
1110  // still points to its previous value, and this is important for the
1111  // deleter object of the previously allocated array (see how it loops over
1112  // the to-be-destroyed elements a few lines above).
1113  elements.reset();
1114  used_elements_end = nullptr;
1115  allocated_elements_end = nullptr;
1116 }
1117 
1118 
1119 
1120 template <class T>
1121 inline void
1122 AlignedVector<T>::push_back(const T in_data)
1123 {
1126  reserve(std::max(2 * capacity(), static_cast<size_type>(16)));
1127  if (std::is_trivial<T>::value == false)
1128  new (used_elements_end++) T(in_data);
1129  else
1130  *used_elements_end++ = in_data;
1131 }
1132 
1133 
1134 
1135 template <class T>
1136 inline typename AlignedVector<T>::reference
1138 {
1139  AssertIndexRange(0, size());
1140  T *field = used_elements_end - 1;
1141  return *field;
1142 }
1143 
1144 
1145 
1146 template <class T>
1147 inline typename AlignedVector<T>::const_reference
1148 AlignedVector<T>::back() const
1149 {
1150  AssertIndexRange(0, size());
1151  const T *field = used_elements_end - 1;
1152  return *field;
1153 }
1154 
1155 
1156 
1157 template <class T>
1158 template <typename ForwardIterator>
1159 inline void
1160 AlignedVector<T>::insert_back(ForwardIterator begin, ForwardIterator end)
1161 {
1162  const unsigned int old_size = size();
1163  reserve(old_size + (end - begin));
1164  for (; begin != end; ++begin, ++used_elements_end)
1165  {
1166  if (std::is_trivial<T>::value == false)
1167  new (used_elements_end) T;
1169  }
1170 }
1171 
1172 
1173 
1174 template <class T>
1175 inline void
1177 {
1179  elements.get());
1180 }
1181 
1182 
1183 
1184 template <class T>
1185 inline void
1187 {
1189  value,
1190  elements.get());
1191 }
1192 
1193 
1194 
1195 template <class T>
1196 inline void
1198  const unsigned int root_process)
1199 {
1200 # ifdef DEAL_II_WITH_MPI
1201 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1202 
1203  // **** Step 0 ****
1204  // All but the root process no longer need their data, so release the memory
1205  // used to store the previous elements.
1206  if (Utilities::MPI::this_mpi_process(communicator) != root_process)
1207  {
1208  elements.reset();
1209  used_elements_end = nullptr;
1210  allocated_elements_end = nullptr;
1211  }
1212 
1213  // **** Step 1 ****
1214  // Create communicators for each group of processes that can use
1215  // shared memory areas. Within each of these groups, we don't care about
1216  // which rank each of the old processes gets except that we would like to
1217  // make sure that the (global) root process will have rank=0 within
1218  // its own sub-communicator. We can do that through the third argument of
1219  // MPI_Comm_split_type (the "key") which is an integer meant to indicate the
1220  // order of processes within the split communicators, and we should set it to
1221  // zero for the root processes and one for all others -- which means that
1222  // for all of these other processes, MPI can choose whatever order it
1223  // wants because they have the same key (MPI then documents that these ties
1224  // will be broken according to these processes' rank in the old group).
1225  //
1226  // At least that's the theory. In practice, the MPI implementation where
1227  // this function was developed on does not seem to do that. (Bug report
1228  // is here: https://github.com/open-mpi/ompi/issues/8854)
1229  // We work around this by letting MPI_Comm_split_type choose whatever
1230  // rank it wants, and then reshuffle with MPI_Comm_split in a second
1231  // step -- not elegant, nor efficient, but seems to work:
1232  MPI_Comm shmem_group_communicator;
1233  {
1234  MPI_Comm shmem_group_communicator_temp;
1235  int ierr = MPI_Comm_split_type(communicator,
1236  MPI_COMM_TYPE_SHARED,
1237  /* key */ 0,
1238  MPI_INFO_NULL,
1239  &shmem_group_communicator_temp);
1240  AssertThrowMPI(ierr);
1241 
1242  const int key =
1243  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1244  ierr = MPI_Comm_split(shmem_group_communicator_temp,
1245  /* color */ 0,
1246  key,
1247  &shmem_group_communicator);
1248  AssertThrowMPI(ierr);
1249 
1250  // Verify the explanation from above
1251  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1252  Assert(Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0,
1253  ExcInternalError());
1254 
1255  // And get rid of the temporary communicator
1256  ierr = MPI_Comm_free(&shmem_group_communicator_temp);
1257  AssertThrowMPI(ierr);
1258  }
1259  const bool is_shmem_root =
1260  Utilities::MPI::this_mpi_process(shmem_group_communicator) == 0;
1261 
1262  // **** Step 2 ****
1263  // We then have to send the state of the current object from the
1264  // root process to one exemplar in each shmem group. To this end,
1265  // we create another subcommunicator that includes the ranks zero
1266  // of all shmem groups, and because of the trick above, we know
1267  // that this also includes the original root process.
1268  //
1269  // There are different ways of creating a "shmem_roots_communicator".
1270  // The conceptually easiest way is to create an MPI_Group that only
1271  // includes the shmem roots and then create a communicator from this
1272  // via MPI_Comm_create or MPI_Comm_create_group. The problem
1273  // with this is that we would have to exchange among all processes
1274  // which ones are shmem roots and which are not. This is awkward.
1275  //
1276  // A simpler way is to use MPI_Comm_split that uses "colors" to
1277  // indicate which sub-communicator each process wants to be in.
1278  // We use color=0 to indicate the group of shmem roots, and color=1
1279  // for all other processes -- the latter will simply not ever do
1280  // anything among themselves with the communicator so created.
1281  //
1282  // Using MPI_Comm_split has the additional benefit that, just as above,
1283  // we can choose where each rank will end up in shmem_roots_communicator.
1284  // We again set key=0 for the original root_process, and key=1 for all other
1285  // ranks; then, the global root becomes rank=0 on the
1286  // shmem_roots_communicator. We don't care how the other processes are
1287  // ordered.
1288  MPI_Comm shmem_roots_communicator;
1289  {
1290  const int key =
1291  (Utilities::MPI::this_mpi_process(communicator) == root_process ? 0 : 1);
1292 
1293  const int ierr = MPI_Comm_split(communicator,
1294  /*color=*/
1295  (is_shmem_root ? 0 : 1),
1296  key,
1297  &shmem_roots_communicator);
1298  AssertThrowMPI(ierr);
1299 
1300  // Again verify the explanation from above
1301  if (Utilities::MPI::this_mpi_process(communicator) == root_process)
1302  Assert(Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0,
1303  ExcInternalError());
1304  }
1305 
1306  const unsigned int shmem_roots_root_rank = 0;
1307  const bool is_shmem_roots_root =
1308  (is_shmem_root && (Utilities::MPI::this_mpi_process(
1309  shmem_roots_communicator) == shmem_roots_root_rank));
1310 
1311  // Now let the original root_process broadcast the current object to all
1312  // shmem roots. We know that the last rank is the original root process that
1313  // has all of the data.
1314  if (is_shmem_root)
1315  {
1316  if (std::is_trivial<T>::value)
1317  {
1318  // The data is "trivial", i.e., we can copy things directly without
1319  // having to go through the serialization/deserialization machinery of
1320  // Utilities::MPI::broadcast.
1321  //
1322  // In that case, first tell all of the other shmem roots how many
1323  // elements we will have to deal with, and let them resize their
1324  // (non-shared) arrays.
1325  const size_type new_size =
1326  Utilities::MPI::broadcast(shmem_roots_communicator,
1327  size(),
1328  shmem_roots_root_rank);
1329  if (is_shmem_roots_root == false)
1330  resize(new_size);
1331 
1332  // Then directly copy from the root process into these buffers
1333  int ierr = MPI_Bcast(elements.get(),
1334  sizeof(T) * new_size,
1335  MPI_CHAR,
1336  shmem_roots_root_rank,
1337  shmem_roots_communicator);
1338  AssertThrowMPI(ierr);
1339  }
1340  else
1341  {
1342  // The objects to be sent around are not "trivial", and so we have
1343  // to go through the serialization/deserialization machinery. On all
1344  // but the sending process, overwrite the current state with the
1345  // vector just broadcast.
1346  //
1347  // On the root rank, this would lead to resetting the 'entries'
1348  // pointer, which would trigger the deleter which would lead to a
1349  // deadlock. So we just send the result of the broadcast() call to
1350  // nirvana on the root process and keep our current state.
1351  if (Utilities::MPI::this_mpi_process(shmem_roots_communicator) == 0)
1352  Utilities::MPI::broadcast(shmem_roots_communicator,
1353  *this,
1354  shmem_roots_root_rank);
1355  else
1356  *this = Utilities::MPI::broadcast(shmem_roots_communicator,
1357  *this,
1358  shmem_roots_root_rank);
1359  }
1360  }
1361 
1362  // We no longer need the shmem roots communicator, so get rid of it
1363  {
1364  const int ierr = MPI_Comm_free(&shmem_roots_communicator);
1365  AssertThrowMPI(ierr);
1366  }
1367 
1368 
1369  // **** Step 3 ****
1370  // At this point, all shmem groups have one shmem root process that has
1371  // a copy of the data. This is the point where each shmem group should
1372  // establish a shmem area to put the data into. As mentioned above,
1373  // we know that the shmem roots are the last rank in their respective
1374  // shmem_group_communicator.
1375  //
1376  // The process for all of this works as follows: While all processes in
1377  // the shmem group participate in the generation of the shmem memory window,
1378  // only the shmem root actually allocates any memory -- the rest just
1379  // allocate zero bytes of their own. We allocate space for exactly
1380  // size() elements (computed on the shmem_root that already has the data)
1381  // and add however many bytes are necessary so that we know that we can align
1382  // things to 64-byte boundaries. The worst case happens if the memory system
1383  // gives us a pointer to an address one byte past a desired alignment
1384  // boundary, and in that case aligning the memory will require us to waste the
1385  // first (align_by-1) bytes. So we have to ask for
1386  // size() * sizeof(T) + (align_by - 1)
1387  // bytes.
1388  //
1389  // Before MPI 4.0, there was no way to specify that we want memory aligned to
1390  // a certain number of bytes. This is going to come back to bite us further
1391  // down below when we try to get a properly aligned pointer to our memory
1392  // region, see the commentary there. Starting with MPI 4.0, one can set a
1393  // flag in an MPI_Info structure that requests a desired alignment, so we do
1394  // this for forward compatibility; MPI implementations ignore flags they don't
1395  // know anything about, and so setting this flag is backward compatible also
1396  // to older MPI versions.
1397  //
1398  // There is one final piece we can already take care of here. At the beginning
1399  // of all of this, only the shmem_root knows how many elements there are in
1400  // the array. But at the end of it, all processes of course need to know. We
1401  // could put this information somewhere into the shmem area, along with the
1402  // other data, but that seems clumsy. It turns out that when calling
1403  // MPI_Win_allocate_shared, we are asked for the value of a parameter called
1404  // 'disp_unit' whose meaning is difficult to determine from the MPI
1405  // documentation, and that we do not actually need. So we "abuse" it a bit: On
1406  // the shmem root, we put the array size into it. Later on, the remaining
1407  // processes can query the shmem root's value of 'disp_unit', and so will be
1408  // able to learn about the array size that way.
1409  MPI_Win shmem_window;
1410  void * base_ptr;
1411  const MPI_Aint align_by = 64;
1412  const MPI_Aint alloc_size =
1413  Utilities::MPI::broadcast(shmem_group_communicator,
1414  (size() * sizeof(T) + (align_by - 1)),
1415  0);
1416 
1417  {
1418  const int disp_unit = (is_shmem_root ? size() : 1);
1419 
1420  int ierr;
1421 
1422  MPI_Info mpi_info;
1423  ierr = MPI_Info_create(&mpi_info);
1424  AssertThrowMPI(ierr);
1425  ierr = MPI_Info_set(mpi_info,
1426  "mpi_minimum_memory_alignment",
1427  std::to_string(align_by).c_str());
1428  AssertThrowMPI(ierr);
1429  ierr = MPI_Win_allocate_shared((is_shmem_root ? alloc_size : 0),
1430  disp_unit,
1431  mpi_info,
1432  shmem_group_communicator,
1433  &base_ptr,
1434  &shmem_window);
1435  AssertThrowMPI(ierr);
1436 
1437  ierr = MPI_Info_free(&mpi_info);
1438  AssertThrowMPI(ierr);
1439  }
1440 
1441 
1442  // **** Step 4 ****
1443  // The next step is to teach all non-shmem root processes what the pointer to
1444  // the array is that the shmem-root created. MPI has a nifty way for this
1445  // given that only a single process actually allocated memory in the window:
1446  // When calling MPI_Win_shared_query, the MPI documentation says that
1447  // "When rank is MPI_PROC_NULL, the pointer, disp_unit, and size returned are
1448  // the pointer, disp_unit, and size of the memory segment belonging the lowest
1449  // rank that specified size > 0. If all processes in the group attached to the
1450  // window specified size = 0, then the call returns size = 0 and a baseptr as
1451  // if MPI_ALLOC_MEM was called with size = 0."
1452  //
1453  // This will allow us to obtain the pointer to the shmem root's memory area,
1454  // which is the only one we care about. (None of the other processes have
1455  // even allocated any memory.) But this will also retrieve the shmem root's
1456  // disp_unit, which in step 3 above we have abused to pass along the number of
1457  // elements in the array.
1458  //
1459  // We don't need to do this on the shmem root process: This process has
1460  // already gotten its base_ptr correctly set above, and we can determine the
1461  // array size by just calling size().
1462  unsigned int array_size =
1463  (is_shmem_root ? size() : numbers::invalid_unsigned_int);
1464  if (is_shmem_root == false)
1465  {
1466  int disp_unit;
1467  MPI_Aint alloc_size; // not actually used
1468  const int ierr = MPI_Win_shared_query(
1469  shmem_window, MPI_PROC_NULL, &alloc_size, &disp_unit, &base_ptr);
1470  AssertThrowMPI(ierr);
1471 
1472  // Make sure we actually got a pointer, and also unpack the array size as
1473  // discussed above.
1474  Assert(base_ptr != nullptr, ExcInternalError());
1475 
1476  array_size = disp_unit;
1477  }
1478 
1479 
1480  // **** Step 5 ****
1481  // Now that all processes know the address of the space that is visible to
1482  // everyone, we need to figure out whether it is properly aligned and if not,
1483  // find the next aligned address.
1484  //
1485  // std::align does that, but it also modifies its last two arguments. The
1486  // documentation of that function at
1487  // https://en.cppreference.com/w/cpp/memory/align is not entirely clear, but I
1488  // *think* that the following should do given that we do not use base_ptr and
1489  // available_space any further after the call to std::align.
1490  std::size_t available_space = alloc_size;
1491  void * base_ptr_backup = base_ptr;
1492  T * aligned_shmem_pointer = static_cast<T *>(
1493  std::align(align_by, array_size * sizeof(T), base_ptr, available_space));
1494  Assert(aligned_shmem_pointer != nullptr, ExcInternalError());
1495 
1496  // There is one step to guard against. It is *conceivable* that the base_ptr
1497  // we have previously obtained from MPI_Win_shared_query is mapped so
1498  // awkwardly into the different MPI processes' memory spaces that it is
1499  // aligned in one memory space, but not another. In that case, different
1500  // processes would align base_ptr differently, and adjust available_space
1501  // differently. We can check that by making sure that the max (or min) over
1502  // all processes is equal to every process's value. If that's not the case,
1503  // then the whole idea of aligning above is wrong and we need to rethink what
1504  // it means to align data in a shared memory space.
1505  //
1506  // One might be tempted to think that this is not how MPI implementations
1507  // actually arrange things. Alas, when developing this functionality in 2021,
1508  // this is really how at least OpenMPI ends up doing things. (This is with an
1509  // OpenMPI implementation of MPI 3.1, so it does not support the flag we set
1510  // in the MPI_Info structure above when allocating the memory window.) Indeed,
1511  // when running this code on three processes, one ends up with base_ptr values
1512  // of
1513  // base_ptr=0x7f0842f02108
1514  // base_ptr=0x7fc0a47881d0
1515  // base_ptr=0x7f64872db108
1516  // which, most annoyingly, are aligned to 8 and 16 byte boundaries -- so there
1517  // is no common offset std::align could find that leads to a 64-byte
1518  // aligned memory address in all three memory spaces. That's a tremendous
1519  // nuisance and there is really nothing we can do about this other than just
1520  // fall back on the (unaligned) base_ptr in that case.
1521  if (Utilities::MPI::min(available_space, shmem_group_communicator) !=
1522  Utilities::MPI::max(available_space, shmem_group_communicator))
1523  aligned_shmem_pointer = static_cast<T *>(base_ptr_backup);
1524 
1525 
1526  // **** Step 6 ****
1527  // If this is the shmem root process, we need to copy the data into the
1528  // shared memory space.
1529  if (is_shmem_root)
1530  {
1531  if (std::is_trivial<T>::value == true)
1532  std::memcpy(aligned_shmem_pointer, elements.get(), sizeof(T) * size());
1533  else
1534  for (std::size_t i = 0; i < size(); ++i)
1535  new (&aligned_shmem_pointer[i]) T(std::move(elements[i]));
1536  }
1537 
1538  // Make sure that the shared memory host has copied the data before we try to
1539  // access it.
1540  const int ierr = MPI_Barrier(shmem_group_communicator);
1541  AssertThrowMPI(ierr);
1542 
1543  // **** Step 7 ****
1544  // Finally, we need to set the pointers of this object to what we just
1545  // learned. This also releases all memory that may have been in use
1546  // previously.
1547  //
1548  // The part that is a bit tricky is how to write the deleter of this
1549  // shared memory object. When we want to get rid of it, we need to
1550  // also release the MPI_Win object along with the shmem_group_communicator
1551  // object. That's because as long as we use the shared memory, we still need
1552  // to hold on to the MPI_Win object, and the MPI_Win object is based on the
1553  // communicator. (The former is definitely true, the latter is not quite clear
1554  // from the MPI documentation, but seems reasonable.) So we need to have a
1555  // deleter for the pointer that ensures that upon release of the memory, we
1556  // not only call the destructor of these memory elements (but only once, on
1557  // the shmem root!) but also destroy the MPI_Win and the communicator. All of
1558  // that is encapsulated in the following call where the deleter makes copies
1559  // of the arguments in the lambda capture.
1560  elements =
1561  decltype(elements)(aligned_shmem_pointer,
1562  [is_shmem_root,
1563  array_size,
1564  aligned_shmem_pointer,
1565  shmem_group_communicator,
1566  shmem_window](T *) mutable {
1567  if (is_shmem_root)
1568  for (unsigned int i = 0; i < array_size; ++i)
1569  aligned_shmem_pointer[i].~T();
1570 
1571  int ierr;
1572  ierr = MPI_Win_free(&shmem_window);
1573  AssertThrowMPI(ierr);
1574 
1575  ierr = MPI_Comm_free(&shmem_group_communicator);
1576  AssertThrowMPI(ierr);
1577  });
1578 
1579  // We then also have to set the other two pointers that define the state of
1580  // the current object. Note that the new buffer size is exactly as large as
1581  // necessary, i.e., can store size() elements, regardless of the number of
1582  // allocated elements in the original objects.
1583  used_elements_end = elements.get() + array_size;
1585 
1586  // **** Consistency check ****
1587  // At this point, each process should have a copy of the data.
1588  // Verify this in some sort of round-about way
1589 # ifdef DEBUG
1590  const std::vector<char> packed_data = Utilities::pack(*this);
1591  const int hash =
1592  std::accumulate(packed_data.begin(), packed_data.end(), int(0));
1593  Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
1594 # endif
1595 
1596 
1597 
1598 # else
1599  // If we only have MPI 2.x, then simply broadcast the current object to all
1600  // other processes and forego the idea of using shmem
1601  *this = Utilities::MPI::broadcast(communicator, *this, root_process);
1602 # endif
1603 # else
1604  // No MPI -> nothing to replicate
1605  (void)communicator;
1606  (void)root_process;
1607 # endif
1608 }
1609 
1610 
1611 
1612 template <class T>
1613 inline void
1615 {
1616  // Swap the data in the 'elements' objects. One problem is that this
1617  // also moves the deleter object, but the deleter object is a lambda function
1618  // that references 'this' (i.e., the 'this' pointer of the *moved-from*
1619  // object). So what we actually do is steal the pointer via
1620  // std::unique_ptr::release() and then install our own deleter object that
1621  // mirrors the one used in reserve() below.
1622  //
1623  // We have to do the same for the other object
1624  T *this_element_pointer = elements.release();
1625 
1626  elements = decltype(elements)(vec.elements.release(), [this](T *ptr) {
1627  if (ptr != nullptr)
1628  {
1629  Assert(this->used_elements_end != nullptr, ExcInternalError());
1630 
1631  if (std::is_trivial<T>::value == false)
1632  for (T *p = this->used_elements_end - 1; p >= ptr; --p)
1633  p->~T();
1634  }
1635 
1636  std::free(ptr);
1637  });
1638 
1639  vec.elements = decltype(vec.elements)(this_element_pointer, [&vec](T *ptr) {
1640  if (ptr != nullptr)
1641  {
1642  Assert(vec.used_elements_end != nullptr, ExcInternalError());
1643 
1644  if (std::is_trivial<T>::value == false)
1645  for (T *p = vec.used_elements_end - 1; p >= ptr; --p)
1646  p->~T();
1647  }
1648 
1649  std::free(ptr);
1650  });
1651 
1652  std::swap(used_elements_end, vec.used_elements_end);
1653  std::swap(allocated_elements_end, vec.allocated_elements_end);
1654 }
1655 
1656 
1657 
1658 template <class T>
1659 inline bool
1661 {
1662  return used_elements_end == elements.get();
1663 }
1664 
1665 
1666 
1667 template <class T>
1668 inline typename AlignedVector<T>::size_type
1669 AlignedVector<T>::size() const
1670 {
1671  return used_elements_end - elements.get();
1672 }
1673 
1674 
1675 
1676 template <class T>
1677 inline typename AlignedVector<T>::size_type
1679 {
1680  return allocated_elements_end - elements.get();
1681 }
1682 
1683 
1684 
1685 template <class T>
1687  operator[](const size_type index)
1688 {
1689  AssertIndexRange(index, size());
1690  return elements[index];
1691 }
1692 
1693 
1694 
1695 template <class T>
1697  operator[](const size_type index) const
1698 {
1699  AssertIndexRange(index, size());
1700  return elements[index];
1701 }
1702 
1703 
1704 
1705 template <typename T>
1706 inline typename AlignedVector<T>::pointer
1708 {
1709  return elements.get();
1710 }
1711 
1712 
1713 
1714 template <typename T>
1715 inline typename AlignedVector<T>::const_pointer
1716 AlignedVector<T>::data() const
1717 {
1718  return elements.get();
1719 }
1720 
1721 
1722 
1723 template <class T>
1724 inline typename AlignedVector<T>::iterator
1726 {
1727  return elements.get();
1728 }
1729 
1730 
1731 
1732 template <class T>
1733 inline typename AlignedVector<T>::iterator
1735 {
1736  return used_elements_end;
1737 }
1738 
1739 
1740 
1741 template <class T>
1742 inline typename AlignedVector<T>::const_iterator
1744 {
1745  return elements.get();
1746 }
1747 
1748 
1749 
1750 template <class T>
1751 inline typename AlignedVector<T>::const_iterator
1752 AlignedVector<T>::end() const
1753 {
1754  return used_elements_end;
1755 }
1756 
1757 
1758 
1759 template <class T>
1760 template <class Archive>
1761 inline void
1762 AlignedVector<T>::save(Archive &ar, const unsigned int) const
1763 {
1764  size_type vec_size = size();
1765  ar & vec_size;
1766  if (vec_size > 0)
1767  ar &boost::serialization::make_array(elements.get(), vec_size);
1768 }
1769 
1770 
1771 
1772 template <class T>
1773 template <class Archive>
1774 inline void
1775 AlignedVector<T>::load(Archive &ar, const unsigned int)
1776 {
1777  size_type vec_size = 0;
1778  ar & vec_size;
1779 
1780  if (vec_size > 0)
1781  {
1782  reserve(vec_size);
1783  ar &boost::serialization::make_array(elements.get(), vec_size);
1784  used_elements_end = elements.get() + vec_size;
1785  }
1786 }
1787 
1788 
1789 
1790 template <class T>
1791 inline typename AlignedVector<T>::size_type
1793 {
1794  size_type memory = sizeof(*this);
1795  for (const T *t = elements.get(); t != used_elements_end; ++t)
1796  memory += ::MemoryConsumption::memory_consumption(*t);
1797  memory += sizeof(T) * (allocated_elements_end - used_elements_end);
1798  return memory;
1799 }
1800 
1801 
1802 #endif // ifndef DOXYGEN
1803 
1804 
1810 template <class T>
1811 bool
1813 {
1814  if (lhs.size() != rhs.size())
1815  return false;
1816  for (typename AlignedVector<T>::const_iterator lit = lhs.begin(),
1817  rit = rhs.begin();
1818  lit != lhs.end();
1819  ++lit, ++rit)
1820  if (*lit != *rit)
1821  return false;
1822  return true;
1823 }
1824 
1825 
1826 
1832 template <class T>
1833 bool
1835 {
1836  return !(operator==(lhs, rhs));
1837 }
1838 
1839 
1841 
1842 #endif
void resize(const size_type new_size)
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
void posix_memalign(void **memptr, std::size_t alignment, std::size_t size)
Definition: utilities.cc:1050
static const unsigned int invalid_unsigned_int
Definition: types.h:196
~AlignedVector()=default
value_type * pointer
AlignedVectorInitialize(const std::size_t size, const T &element, T *const destination)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1722
pointer data()
void load(Archive &ar, const unsigned int version)
AlignedVector & operator=(const AlignedVector< T > &vec)
AlignedVectorMoveConstruct(T *const source_begin, T *const source_end, T *const destination)
void push_back(const T in_data)
value_type * iterator
reference operator[](const size_type index)
void serialize(Archive &archive, const unsigned int version)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
static const char T
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, false >) const
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:401
std::string to_string(const T &t)
Definition: patterns.h:2329
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void insert_back(ForwardIterator begin, ForwardIterator end)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1218
const value_type * const_iterator
size_type memory_consumption() const
value_type & reference
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
void default_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(AlignedVector< T > &vec)
std::unique_ptr< T[], std::function< void(T *)> > elements
void copy_construct_or_assign(const std::size_t begin, const std::size_t end, std::integral_constant< bool, true >) const
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1780
void save(Archive &ar, const unsigned int version) const
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
iterator end()
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:400
T min(const T &t, const MPI_Comm &mpi_communicator)
void replicate_across_communicator(const MPI_Comm &communicator, const unsigned int root_process)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void resize_fast(const size_type new_size)
iterator begin()
size_type size() const
unsigned int minimum_parallel_grain_size
Definition: parallel.cc:34
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
T * allocated_elements_end
AlignedVectorDefaultInitialize(const std::size_t size, T *const destination)
static const types::blas_int zero
AlignedVectorCopyConstruct(const T *const source_begin, const T *const source_end, T *const destination)
void free(T *&pointer)
Definition: cuda.h:97
const value_type * const_pointer
bool empty() const
void reserve(const size_type new_allocated_size)
const value_type & const_reference
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual void apply_to_subrange(const std::size_t begin, const std::size_t end) const override
size_type capacity() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
reference back()
static ::ExceptionBase & ExcInternalError()