Reference documentation for deal.II version Git ede8f93e86 2020-12-03 14:59:20 -0700
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/numbers.h>
24 
25 #include <map>
26 #include <numeric>
27 #include <set>
28 #include <vector>
29 
30 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
31 // without MPI, we would still like to use
32 // some constructs with MPI data
33 // types. Therefore, create some dummies
34 using MPI_Comm = int;
35 using MPI_Request = int;
36 using MPI_Datatype = int;
37 using MPI_Op = int;
38 # ifndef MPI_COMM_WORLD
39 # define MPI_COMM_WORLD 0
40 # endif
41 # ifndef MPI_COMM_SELF
42 # define MPI_COMM_SELF 0
43 # endif
44 # ifndef MPI_REQUEST_NULL
45 # define MPI_REQUEST_NULL 0
46 # endif
47 # ifndef MPI_MIN
48 # define MPI_MIN 0
49 # endif
50 # ifndef MPI_MAX
51 # define MPI_MAX 0
52 # endif
53 # ifndef MPI_SUM
54 # define MPI_SUM 0
55 # endif
56 #endif
57 
58 
59 
73 #ifdef DEAL_II_WITH_MPI
74 # if DEAL_II_MPI_VERSION_GTE(3, 0)
75 
76 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
77 
78 # else
79 
80 # include <type_traits>
81 
82 # define DEAL_II_MPI_CONST_CAST(expr) \
83  const_cast<typename std::remove_const< \
84  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
85 
86 # endif
87 #endif
88 
89 
90 
92 
93 
94 // Forward type declarations to allow MPI sums over tensorial types
95 #ifndef DOXYGEN
96 template <int rank, int dim, typename Number>
97 class Tensor;
98 template <int rank, int dim, typename Number>
99 class SymmetricTensor;
100 template <typename Number>
101 class SparseMatrix;
102 class IndexSet;
103 #endif
104 
105 namespace Utilities
106 {
119  IndexSet
120  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
121  const unsigned int n_partitions,
122  const IndexSet::size_type total_size);
123 
131  namespace MPI
132  {
141  unsigned int
142  n_mpi_processes(const MPI_Comm &mpi_communicator);
143 
152  unsigned int
153  this_mpi_process(const MPI_Comm &mpi_communicator);
154 
159  const std::vector<unsigned int>
161  const MPI_Comm &comm_small);
162 
184  std::vector<unsigned int>
186  const MPI_Comm & mpi_comm,
187  const std::vector<unsigned int> &destinations);
188 
208  unsigned int
210  const MPI_Comm & mpi_comm,
211  const std::vector<unsigned int> &destinations);
212 
229  MPI_Comm
230  duplicate_communicator(const MPI_Comm &mpi_communicator);
231 
241  void
242  free_communicator(MPI_Comm &mpi_communicator);
243 
257  {
258  public:
262  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
263  : comm(duplicate_communicator(communicator))
264  {}
265 
270 
275  {
277  }
278 
282  const MPI_Comm &operator*() const
283  {
284  return comm;
285  }
286 
287 
292  operator=(const DuplicatedCommunicator &) = delete;
293 
294  private:
299  };
300 
331  {
332  public:
339  {
340  public:
344  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
345  : mutex(mutex)
346  , comm(comm)
347  {
348  mutex.lock(comm);
349  }
350 
355  {
356  mutex.unlock(comm);
357  }
358 
359  private:
367  const MPI_Comm comm;
368  };
369 
373  explicit CollectiveMutex();
374 
378  ~CollectiveMutex();
379 
386  void
387  lock(const MPI_Comm &comm);
388 
395  void
396  unlock(const MPI_Comm &comm);
397 
398  private:
402  bool locked;
403 
408  };
409 
410 
411 
439 #ifdef DEAL_II_WITH_MPI
440  int
441  create_group(const MPI_Comm & comm,
442  const MPI_Group &group,
443  const int tag,
444  MPI_Comm * new_comm);
445 #endif
446 
455  std::vector<IndexSet>
457  const IndexSet::size_type local_size);
458 
466  IndexSet
468  const MPI_Comm & comm,
469  const IndexSet::size_type total_size);
470 
471 #ifdef DEAL_II_WITH_MPI
472 
487  template <class Iterator, typename Number = long double>
488  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
489  mean_and_standard_deviation(const Iterator begin,
490  const Iterator end,
491  const MPI_Comm &comm);
492 #endif
493 
513  template <typename T>
514  T
515  sum(const T &t, const MPI_Comm &mpi_communicator);
516 
526  template <typename T, typename U>
527  void
528  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
529 
539  template <typename T>
540  void
542  const MPI_Comm & mpi_communicator,
543  const ArrayView<T> & sums);
544 
550  template <int rank, int dim, typename Number>
553  const MPI_Comm & mpi_communicator);
554 
560  template <int rank, int dim, typename Number>
562  sum(const Tensor<rank, dim, Number> &local,
563  const MPI_Comm & mpi_communicator);
564 
573  template <typename Number>
574  void
575  sum(const SparseMatrix<Number> &local,
576  const MPI_Comm & mpi_communicator,
577  SparseMatrix<Number> & global);
578 
598  template <typename T>
599  T
600  max(const T &t, const MPI_Comm &mpi_communicator);
601 
611  template <typename T, typename U>
612  void
613  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
614 
624  template <typename T>
625  void
627  const MPI_Comm & mpi_communicator,
628  const ArrayView<T> & maxima);
629 
649  template <typename T>
650  T
651  min(const T &t, const MPI_Comm &mpi_communicator);
652 
662  template <typename T, typename U>
663  void
664  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
665 
675  template <typename T>
676  void
678  const MPI_Comm & mpi_communicator,
679  const ArrayView<T> & minima);
680 
695  struct MinMaxAvg
696  {
701  double sum;
702 
707  double min;
708 
713  double max;
714 
723  unsigned int min_index;
724 
733  unsigned int max_index;
734 
739  double avg;
740  };
741 
756  MinMaxAvg
757  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
758 
770  std::vector<MinMaxAvg>
771  min_max_avg(const std::vector<double> &my_value,
772  const MPI_Comm & mpi_communicator);
773 
774 
787  void
788  min_max_avg(const ArrayView<const double> &my_values,
789  const ArrayView<MinMaxAvg> & result,
790  const MPI_Comm & mpi_communicator);
791 
792 
837  {
838  public:
885  int & argc,
886  char **& argv,
887  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
888 
893  ~MPI_InitFinalize();
894 
921  static void
922  register_request(MPI_Request &request);
923 
927  static void
928  unregister_request(MPI_Request &request);
929 
930  private:
934  static std::set<MPI_Request *> requests;
935  };
936 
948  bool
950 
968  template <typename T>
969  std::map<unsigned int, T>
970  some_to_some(const MPI_Comm & comm,
971  const std::map<unsigned int, T> &objects_to_send);
972 
986  template <typename T>
987  std::vector<T>
988  all_gather(const MPI_Comm &comm, const T &object_to_send);
989 
1005  template <typename T>
1006  std::vector<T>
1007  gather(const MPI_Comm & comm,
1008  const T & object_to_send,
1009  const unsigned int root_process = 0);
1010 
1053  std::vector<unsigned int>
1054  compute_index_owner(const IndexSet &owned_indices,
1055  const IndexSet &indices_to_look_up,
1056  const MPI_Comm &comm);
1057 
1065  template <typename T>
1066  std::vector<T>
1067  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1068 
1072  template <typename T>
1073  std::set<T>
1074  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1075 
1076 #ifndef DOXYGEN
1077  // declaration for an internal function that lives in mpi.templates.h
1078  namespace internal
1079  {
1080  template <typename T>
1081  void
1082  all_reduce(const MPI_Op & mpi_op,
1083  const ArrayView<const T> &values,
1084  const MPI_Comm & mpi_communicator,
1085  const ArrayView<T> & output);
1086  }
1087 
1088  // Since these depend on N they must live in the header file
1089  template <typename T, unsigned int N>
1090  void
1091  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1092  {
1093  internal::all_reduce(MPI_SUM,
1095  mpi_communicator,
1096  ArrayView<T>(sums, N));
1097  }
1098 
1099  template <typename T, unsigned int N>
1100  void
1101  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1102  {
1103  internal::all_reduce(MPI_MAX,
1105  mpi_communicator,
1106  ArrayView<T>(maxima, N));
1107  }
1108 
1109  template <typename T, unsigned int N>
1110  void
1111  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1112  {
1113  internal::all_reduce(MPI_MIN,
1115  mpi_communicator,
1116  ArrayView<T>(minima, N));
1117  }
1118 
1119  template <typename T>
1120  std::map<unsigned int, T>
1121  some_to_some(const MPI_Comm & comm,
1122  const std::map<unsigned int, T> &objects_to_send)
1123  {
1124 # ifndef DEAL_II_WITH_MPI
1125  (void)comm;
1126  Assert(objects_to_send.size() < 2,
1127  ExcMessage("Cannot send to more than one processor."));
1128  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1129  objects_to_send.size() == 0,
1130  ExcMessage("Can only send to myself or to nobody."));
1131  return objects_to_send;
1132 # else
1133  const auto my_proc = this_mpi_process(comm);
1134 
1135  std::map<unsigned int, T> received_objects;
1136 
1137  std::vector<unsigned int> send_to;
1138  send_to.reserve(objects_to_send.size());
1139  for (const auto &m : objects_to_send)
1140  if (m.first == my_proc)
1141  received_objects[my_proc] = m.second;
1142  else
1143  send_to.emplace_back(m.first);
1144 
1145  const unsigned int n_point_point_communications =
1147 
1148  // Protect the following communication:
1149  static CollectiveMutex mutex;
1150  CollectiveMutex::ScopedLock lock(mutex, comm);
1151 
1152  // If we have something to send, or we expect something from other
1153  // processors, we need to visit one of the two scopes below. Otherwise,
1154  // no other action is required by this mpi process, and we can safely
1155  // return.
1156  if (send_to.size() == 0 && n_point_point_communications == 0)
1157  return received_objects;
1158 
1159  const int mpi_tag =
1161 
1162  // Sending buffers
1163  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1164  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1165  {
1166  unsigned int i = 0;
1167  for (const auto &rank_obj : objects_to_send)
1168  if (rank_obj.first != my_proc)
1169  {
1170  const auto &rank = rank_obj.first;
1171  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1172  /*allow_compression=*/false);
1173  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1174  buffers_to_send[i].size(),
1175  MPI_CHAR,
1176  rank,
1177  mpi_tag,
1178  comm,
1179  &buffer_send_requests[i]);
1180  AssertThrowMPI(ierr);
1181  ++i;
1182  }
1183  }
1184 
1185  // Fill the output map
1186  {
1187  std::vector<char> buffer;
1188  // We do this on a first come/first served basis
1189  for (unsigned int i = 0; i < n_point_point_communications; ++i)
1190  {
1191  // Probe what's going on. Take data from the first available sender
1192  MPI_Status status;
1193  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1194  AssertThrowMPI(ierr);
1195 
1196  // Length of the message
1197  int len;
1198  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1199  AssertThrowMPI(ierr);
1200  buffer.resize(len);
1201 
1202  // Source rank
1203  const unsigned int rank = status.MPI_SOURCE;
1204 
1205  // Actually receive the message
1206  ierr = MPI_Recv(buffer.data(),
1207  len,
1208  MPI_CHAR,
1209  status.MPI_SOURCE,
1210  status.MPI_TAG,
1211  comm,
1212  MPI_STATUS_IGNORE);
1213  AssertThrowMPI(ierr);
1214  Assert(received_objects.find(rank) == received_objects.end(),
1216  "I should not receive again from this rank"));
1217  received_objects[rank] =
1218  Utilities::unpack<T>(buffer,
1219  /*allow_compression=*/false);
1220  }
1221  }
1222 
1223  // Wait to have sent all objects.
1224  const int ierr = MPI_Waitall(send_to.size(),
1225  buffer_send_requests.data(),
1226  MPI_STATUSES_IGNORE);
1227  AssertThrowMPI(ierr);
1228 
1229  return received_objects;
1230 # endif // deal.II with MPI
1231  }
1232 
1233  template <typename T>
1234  std::vector<T>
1235  all_gather(const MPI_Comm &comm, const T &object)
1236  {
1237  if (job_supports_mpi() == false)
1238  return {object};
1239 
1240 # ifndef DEAL_II_WITH_MPI
1241  (void)comm;
1242  std::vector<T> v(1, object);
1243  return v;
1244 # else
1245  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1246 
1247  std::vector<char> buffer = Utilities::pack(object);
1248 
1249  int n_local_data = buffer.size();
1250 
1251  // Vector to store the size of loc_data_array for every process
1252  std::vector<int> size_all_data(n_procs, 0);
1253 
1254  // Exchanging the size of each buffer
1255  MPI_Allgather(
1256  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1257 
1258  // Now computing the displacement, relative to recvbuf,
1259  // at which to store the incoming buffer
1260  std::vector<int> rdispls(n_procs);
1261  rdispls[0] = 0;
1262  for (unsigned int i = 1; i < n_procs; ++i)
1263  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1264 
1265  // Step 3: exchange the buffer:
1266  std::vector<char> received_unrolled_buffer(rdispls.back() +
1267  size_all_data.back());
1268 
1269  MPI_Allgatherv(buffer.data(),
1270  n_local_data,
1271  MPI_CHAR,
1272  received_unrolled_buffer.data(),
1273  size_all_data.data(),
1274  rdispls.data(),
1275  MPI_CHAR,
1276  comm);
1277 
1278  std::vector<T> received_objects(n_procs);
1279  for (unsigned int i = 0; i < n_procs; ++i)
1280  {
1281  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1282  rdispls[i],
1283  received_unrolled_buffer.begin() +
1284  rdispls[i] + size_all_data[i]);
1285  received_objects[i] = Utilities::unpack<T>(local_buffer);
1286  }
1287 
1288  return received_objects;
1289 # endif
1290  }
1291 
1292  template <typename T>
1293  std::vector<T>
1294  gather(const MPI_Comm & comm,
1295  const T & object_to_send,
1296  const unsigned int root_process)
1297  {
1298 # ifndef DEAL_II_WITH_MPI
1299  (void)comm;
1300  (void)root_process;
1301  std::vector<T> v(1, object_to_send);
1302  return v;
1303 # else
1304  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1305  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1306 
1307  AssertIndexRange(root_process, n_procs);
1308 
1309  std::vector<char> buffer = Utilities::pack(object_to_send);
1310  int n_local_data = buffer.size();
1311 
1312  // Vector to store the size of loc_data_array for every process
1313  // only the root process needs to allocate memory for that purpose
1314  std::vector<int> size_all_data;
1315  if (my_rank == root_process)
1316  size_all_data.resize(n_procs, 0);
1317 
1318  // Exchanging the size of each buffer
1319  int ierr = MPI_Gather(&n_local_data,
1320  1,
1321  MPI_INT,
1322  size_all_data.data(),
1323  1,
1324  MPI_INT,
1325  root_process,
1326  comm);
1327  AssertThrowMPI(ierr);
1328 
1329  // Now computing the displacement, relative to recvbuf,
1330  // at which to store the incoming buffer; only for root
1331  std::vector<int> rdispls;
1332  if (my_rank == root_process)
1333  {
1334  rdispls.resize(n_procs, 0);
1335  for (unsigned int i = 1; i < n_procs; ++i)
1336  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1337  }
1338  // exchange the buffer:
1339  std::vector<char> received_unrolled_buffer;
1340  if (my_rank == root_process)
1341  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1342 
1343  ierr = MPI_Gatherv(buffer.data(),
1344  n_local_data,
1345  MPI_CHAR,
1346  received_unrolled_buffer.data(),
1347  size_all_data.data(),
1348  rdispls.data(),
1349  MPI_CHAR,
1350  root_process,
1351  comm);
1352  AssertThrowMPI(ierr);
1353 
1354  std::vector<T> received_objects;
1355 
1356  if (my_rank == root_process)
1357  {
1358  received_objects.resize(n_procs);
1359 
1360  for (unsigned int i = 0; i < n_procs; ++i)
1361  {
1362  const std::vector<char> local_buffer(
1363  received_unrolled_buffer.begin() + rdispls[i],
1364  received_unrolled_buffer.begin() + rdispls[i] +
1365  size_all_data[i]);
1366  received_objects[i] = Utilities::unpack<T>(local_buffer);
1367  }
1368  }
1369  return received_objects;
1370 # endif
1371  }
1372 
1373 
1374 # ifdef DEAL_II_WITH_MPI
1375  template <class Iterator, typename Number>
1376  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1377  mean_and_standard_deviation(const Iterator begin,
1378  const Iterator end,
1379  const MPI_Comm &comm)
1380  {
1381  // below we do simple and straight-forward implementation. More elaborate
1382  // options are:
1383  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1384  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1385  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1386  using Std = typename numbers::NumberTraits<Number>::real_type;
1387  const Number sum = std::accumulate(begin, end, Number(0.));
1388 
1389  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1390  Assert(size > 0, ExcDivideByZero());
1391  const Number mean =
1392  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1393  Std sq_sum = 0.;
1394  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1395  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1396  });
1397  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1398  return std::make_pair(mean,
1399  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1400  }
1401 # endif
1402 
1403 #endif
1404  } // end of namespace MPI
1405 } // end of namespace Utilities
1406 
1407 
1409 
1410 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:196
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:284
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:499
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:577
#define AssertIndexRange(index, range)
Definition: exceptions.h:1691
static const char U
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:344
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:170
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1466
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1132
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:372
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:180
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const MPI_Comm & operator*() const
Definition: mpi.h:282
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1074
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1182
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
Definition: cuda.h:32
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1747
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:159
Definition: tensor.h:448
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:371
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static std::set< MPI_Request * > requests
Definition: mpi.h:934
static const char N
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type local_size)
Definition: mpi.cc:262
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:262
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:358
bool job_supports_mpi()
Definition: mpi.cc:1058
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:723
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:733
static ::ExceptionBase & ExcInternalError()
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete