Reference documentation for deal.II version Git 71593a1301 2019-07-19 12:10:10 -0400
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/array_view.h>
22 #include <deal.II/base/numbers.h>
23 
24 #include <map>
25 #include <numeric>
26 #include <set>
27 #include <vector>
28 
29 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
30 // without MPI, we would still like to use
31 // some constructs with MPI data
32 // types. Therefore, create some dummies
33 using MPI_Comm = int;
34 using MPI_Datatype = int;
35 using MPI_Op = int;
36 # ifndef MPI_COMM_WORLD
37 # define MPI_COMM_WORLD 0
38 # endif
39 # ifndef MPI_COMM_SELF
40 # define MPI_COMM_SELF 0
41 # endif
42 # ifndef MPI_MIN
43 # define MPI_MIN 0
44 # endif
45 # ifndef MPI_MAX
46 # define MPI_MAX 0
47 # endif
48 # ifndef MPI_SUM
49 # define MPI_SUM 0
50 # endif
51 #endif
52 
53 
54 
68 #ifdef DEAL_II_WITH_MPI
69 # if DEAL_II_MPI_VERSION_GTE(3, 0)
70 
71 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
72 
73 # else
74 
75 # include <type_traits>
76 
77 # define DEAL_II_MPI_CONST_CAST(expr) \
78  const_cast<typename std::remove_const< \
79  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
80 
81 # endif
82 #endif
83 
84 
85 
86 DEAL_II_NAMESPACE_OPEN
87 
88 
89 // Forward type declarations to allow MPI sums over tensorial types
90 template <int rank, int dim, typename Number>
91 class Tensor;
92 template <int rank, int dim, typename Number>
94 template <typename Number>
96 class IndexSet;
97 
98 namespace Utilities
99 {
107  namespace MPI
108  {
117  unsigned int
118  n_mpi_processes(const MPI_Comm &mpi_communicator);
119 
128  unsigned int
129  this_mpi_process(const MPI_Comm &mpi_communicator);
130 
152  std::vector<unsigned int>
154  const MPI_Comm & mpi_comm,
155  const std::vector<unsigned int> &destinations);
156 
176  unsigned int
178  const MPI_Comm & mpi_comm,
179  const std::vector<unsigned int> &destinations);
180 
194  MPI_Comm
195  duplicate_communicator(const MPI_Comm &mpi_communicator);
196 
224 #ifdef DEAL_II_WITH_MPI
225  int
226  create_group(const MPI_Comm & comm,
227  const MPI_Group &group,
228  const int tag,
229  MPI_Comm * new_comm);
230 #endif
231 
240  std::vector<IndexSet>
241  create_ascending_partitioning(const MPI_Comm & comm,
242  const IndexSet::size_type &local_size);
243 
244 #ifdef DEAL_II_WITH_MPI
245 
260  template <class Iterator, typename Number = long double>
261  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
262  mean_and_standard_deviation(const Iterator begin,
263  const Iterator end,
264  const MPI_Comm &comm);
265 #endif
266 
286  template <typename T>
287  T
288  sum(const T &t, const MPI_Comm &mpi_communicator);
289 
299  template <typename T, typename U>
300  void
301  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
302 
312  template <typename T>
313  void
314  sum(const ArrayView<const T> &values,
315  const MPI_Comm & mpi_communicator,
316  const ArrayView<T> & sums);
317 
323  template <int rank, int dim, typename Number>
326  const MPI_Comm & mpi_communicator);
327 
333  template <int rank, int dim, typename Number>
335  sum(const Tensor<rank, dim, Number> &local,
336  const MPI_Comm & mpi_communicator);
337 
346  template <typename Number>
347  void
348  sum(const SparseMatrix<Number> &local,
349  const MPI_Comm & mpi_communicator,
350  SparseMatrix<Number> & global);
351 
371  template <typename T>
372  T
373  max(const T &t, const MPI_Comm &mpi_communicator);
374 
384  template <typename T, typename U>
385  void
386  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
387 
397  template <typename T>
398  void
399  max(const ArrayView<const T> &values,
400  const MPI_Comm & mpi_communicator,
401  const ArrayView<T> & maxima);
402 
422  template <typename T>
423  T
424  min(const T &t, const MPI_Comm &mpi_communicator);
425 
435  template <typename T, typename U>
436  void
437  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
438 
448  template <typename T>
449  void
450  min(const ArrayView<const T> &values,
451  const MPI_Comm & mpi_communicator,
452  const ArrayView<T> & minima);
453 
468  struct MinMaxAvg
469  {
474  double sum;
475 
480  double min;
481 
486  double max;
487 
496  unsigned int min_index;
497 
506  unsigned int max_index;
507 
512  double avg;
513  };
514 
529  MinMaxAvg
530  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
531 
576  {
577  public:
624  int & argc,
625  char **& argv,
626  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
627 
632  ~MPI_InitFinalize();
633  };
634 
646  bool
648 
665  template <typename T>
666  std::map<unsigned int, T>
667  some_to_some(const MPI_Comm & comm,
668  const std::map<unsigned int, T> &objects_to_send);
669 
685  template <typename T>
686  std::vector<T>
687  all_gather(const MPI_Comm &comm, const T &object_to_send);
688 
706  template <typename T>
707  std::vector<T>
708  gather(const MPI_Comm & comm,
709  const T & object_to_send,
710  const unsigned int root_process = 0);
711 
735  template <typename T1, typename T2>
737  {
738  public:
745  virtual std::vector<unsigned int>
746  compute_targets() = 0;
747 
756  virtual void
757  pack_recv_buffer(const int other_rank, std::vector<T1> &send_buffer);
758 
767  virtual void
768  prepare_recv_buffer(const int other_rank, std::vector<T2> &recv_buffer);
769 
782  virtual void
783  process_request(const unsigned int other_rank,
784  const std::vector<T1> &buffer_recv,
785  std::vector<T2> & request_buffer);
786 
794  virtual void
795  unpack_recv_buffer(const int other_rank,
796  const std::vector<T2> &recv_buffer);
797  };
798 
824  template <typename T1, typename T2>
826  {
827  public:
829  const MPI_Comm & comm);
830 
834  virtual ~ConsensusAlgorithm() = default;
835 
836  virtual void
837  run() = 0;
838 
839  protected:
844 
848  const MPI_Comm &comm;
849 
853  const unsigned int my_rank;
854 
858  const unsigned int n_procs;
859  };
860 
877  template <typename T1, typename T2>
879  {
880  public:
881  // Unique tags to be used during Isend and Irecv
882  static const unsigned int tag_request = 12;
883  static const unsigned int tag_delivery = 13;
884 
892  const MPI_Comm & comm);
893 
897  virtual ~ConsensusAlgorithm_NBX() = default;
898 
902  virtual void
903  run() override;
904 
905  private:
906 #ifdef DEAL_II_WITH_MPI
907 
910  std::vector<unsigned int> targets;
911 
915  std::vector<std::vector<T1>> send_buffers;
916 
920  std::vector<MPI_Request> send_requests;
921 
925  std::vector<std::vector<T2>> recv_buffers;
926 
927 
931  std::vector<MPI_Request> recv_requests;
932 
936  std::vector<std::vector<T2>> request_buffers;
937 
941  std::vector<std::shared_ptr<MPI_Request>> request_requests;
942 
943  // request for barrier
944  MPI_Request barrier_request;
945 #endif
946 
947 #ifdef DEBUG
948 
951  std::set<unsigned int> requesting_processes;
952 #endif
953 
957  bool
958  check_own_state();
959 
964  void
965  signal_finish();
966 
971  bool
972  check_global_state();
973 
978  void
979  process_requests();
980 
985  void
986  start_communication();
987 
992  void
993  clean_up_and_end_communication();
994  };
995 
1019  template <typename T1, typename T2>
1021  {
1022  public:
1023  // Unique tags to be used during Isend and Irecv
1024  static const unsigned int tag_request = 14;
1025  static const unsigned int tag_delivery = 15;
1026 
1034  const MPI_Comm & comm);
1035 
1039  virtual ~ConsensusAlgorithm_PEX() = default;
1040 
1044  virtual void
1045  run() override;
1046 
1047  private:
1048 #ifdef DEAL_II_WITH_MPI
1049 
1052  std::vector<unsigned int> targets;
1053 
1057  std::vector<unsigned int> sources;
1058 
1059  // data structures to send and receive requests
1060 
1064  std::vector<std::vector<T1>> send_buffers;
1065 
1069  std::vector<std::vector<T2>> recv_buffers;
1070 
1074  std::vector<MPI_Request> send_and_recv_buffers;
1075 
1079  std::vector<std::vector<T2>> requests_buffers;
1080 
1084  std::vector<MPI_Request> requests_answers;
1085 #endif
1086 
1091  void
1092  process_requests(int index);
1093 
1098  unsigned int
1099  start_communication();
1100 
1105  void
1106  clean_up_and_end_communication();
1107  };
1108 
1122  template <typename T1, typename T2>
1124  {
1125  public:
1133  const MPI_Comm & comm);
1134 
1138  virtual ~ConsensusAlgorithmSelector() = default;
1139 
1144  virtual void
1145  run() override;
1146 
1147  private:
1148  // Pointer to the actual ConsensusAlgorithm implementation.
1149  std::shared_ptr<ConsensusAlgorithm<T1, T2>> consensus_algo;
1150  };
1151 
1196  std::vector<unsigned int>
1197  compute_index_owner(const IndexSet &owned_indices,
1198  const IndexSet &indices_to_look_up,
1199  const MPI_Comm &comm);
1200 
1201 #ifndef DOXYGEN
1202  // declaration for an internal function that lives in mpi.templates.h
1203  namespace internal
1204  {
1205  template <typename T>
1206  void
1207  all_reduce(const MPI_Op & mpi_op,
1208  const ArrayView<const T> &values,
1209  const MPI_Comm & mpi_communicator,
1210  const ArrayView<T> & output);
1211  }
1212 
1213  // Since these depend on N they must live in the header file
1214  template <typename T, unsigned int N>
1215  void
1216  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1217  {
1218  internal::all_reduce(MPI_SUM,
1219  ArrayView<const T>(values, N),
1220  mpi_communicator,
1221  ArrayView<T>(sums, N));
1222  }
1223 
1224  template <typename T, unsigned int N>
1225  void
1226  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1227  {
1228  internal::all_reduce(MPI_MAX,
1229  ArrayView<const T>(values, N),
1230  mpi_communicator,
1231  ArrayView<T>(maxima, N));
1232  }
1233 
1234  template <typename T, unsigned int N>
1235  void
1236  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1237  {
1238  internal::all_reduce(MPI_MIN,
1239  ArrayView<const T>(values, N),
1240  mpi_communicator,
1241  ArrayView<T>(minima, N));
1242  }
1243 
1244  template <typename T>
1245  std::map<unsigned int, T>
1246  some_to_some(const MPI_Comm & comm,
1247  const std::map<unsigned int, T> &objects_to_send)
1248  {
1249 # ifndef DEAL_II_WITH_MPI
1250  (void)comm;
1251  Assert(objects_to_send.size() == 0,
1252  ExcMessage("Cannot send to more than one processor."));
1253  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1254  objects_to_send.size() == 0,
1255  ExcMessage("Can only send to myself or to nobody."));
1256  return objects_to_send;
1257 # else
1258 
1259  std::vector<unsigned int> send_to(objects_to_send.size());
1260  {
1261  unsigned int i = 0;
1262  for (const auto &m : objects_to_send)
1263  send_to[i++] = m.first;
1264  }
1265  AssertDimension(send_to.size(), objects_to_send.size());
1266 
1267  const auto receive_from =
1269  send_to);
1270 
1271  // Sending buffers
1272  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1273  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1274  {
1275  unsigned int i = 0;
1276  for (const auto &rank_obj : objects_to_send)
1277  {
1278  const auto &rank = rank_obj.first;
1279  buffers_to_send[i] = Utilities::pack(rank_obj.second);
1280  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1281  buffers_to_send[i].size(),
1282  MPI_CHAR,
1283  rank,
1284  21,
1285  comm,
1286  &buffer_send_requests[i]);
1287  AssertThrowMPI(ierr);
1288  ++i;
1289  }
1290  }
1291 
1292  // Receiving buffers
1293  std::map<unsigned int, T> received_objects;
1294  {
1295  std::vector<char> buffer;
1296  // We do this on a first come/first served basis
1297  for (unsigned int i = 0; i < receive_from.size(); ++i)
1298  {
1299  // Probe what's going on. Take data from the first available sender
1300  MPI_Status status;
1301  int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
1302  AssertThrowMPI(ierr);
1303 
1304  // Length of the message
1305  int len;
1306  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1307  AssertThrowMPI(ierr);
1308  buffer.resize(len);
1309 
1310  // Source rank
1311  const unsigned int rank = status.MPI_SOURCE;
1312 
1313  // Actually receive the message
1314  ierr = MPI_Recv(
1315  buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE);
1316  AssertThrowMPI(ierr);
1317  Assert(received_objects.find(rank) == received_objects.end(),
1319  "I should not receive again from this rank"));
1320  received_objects[rank] = Utilities::unpack<T>(buffer);
1321  }
1322  }
1323 
1324  // Wait to have sent all objects.
1325  MPI_Waitall(send_to.size(),
1326  buffer_send_requests.data(),
1327  MPI_STATUSES_IGNORE);
1328 
1329  return received_objects;
1330 # endif // deal.II with MPI
1331  }
1332 
1333  template <typename T>
1334  std::vector<T>
1335  all_gather(const MPI_Comm &comm, const T &object)
1336  {
1337 # ifndef DEAL_II_WITH_MPI
1338  (void)comm;
1339  std::vector<T> v(1, object);
1340  return v;
1341 # else
1342  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1343 
1344  std::vector<char> buffer = Utilities::pack(object);
1345 
1346  int n_local_data = buffer.size();
1347 
1348  // Vector to store the size of loc_data_array for every process
1349  std::vector<int> size_all_data(n_procs, 0);
1350 
1351  // Exchanging the size of each buffer
1352  MPI_Allgather(
1353  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1354 
1355  // Now computing the displacement, relative to recvbuf,
1356  // at which to store the incoming buffer
1357  std::vector<int> rdispls(n_procs);
1358  rdispls[0] = 0;
1359  for (unsigned int i = 1; i < n_procs; ++i)
1360  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1361 
1362  // Step 3: exchange the buffer:
1363  std::vector<char> received_unrolled_buffer(rdispls.back() +
1364  size_all_data.back());
1365 
1366  MPI_Allgatherv(buffer.data(),
1367  n_local_data,
1368  MPI_CHAR,
1369  received_unrolled_buffer.data(),
1370  size_all_data.data(),
1371  rdispls.data(),
1372  MPI_CHAR,
1373  comm);
1374 
1375  std::vector<T> received_objects(n_procs);
1376  for (unsigned int i = 0; i < n_procs; ++i)
1377  {
1378  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1379  rdispls[i],
1380  received_unrolled_buffer.begin() +
1381  rdispls[i] + size_all_data[i]);
1382  received_objects[i] = Utilities::unpack<T>(local_buffer);
1383  }
1384 
1385  return received_objects;
1386 # endif
1387  }
1388 
1389  template <typename T>
1390  std::vector<T>
1391  gather(const MPI_Comm & comm,
1392  const T & object_to_send,
1393  const unsigned int root_process)
1394  {
1395 # ifndef DEAL_II_WITH_MPI
1396  (void)comm;
1397  (void)root_process;
1398  std::vector<T> v(1, object_to_send);
1399  return v;
1400 # else
1401  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1402  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1403 
1404  Assert(root_process < n_procs, ExcIndexRange(root_process, 0, n_procs));
1405 
1406  std::vector<char> buffer = Utilities::pack(object_to_send);
1407  int n_local_data = buffer.size();
1408 
1409  // Vector to store the size of loc_data_array for every process
1410  // only the root process needs to allocate memory for that purpose
1411  std::vector<int> size_all_data;
1412  if (my_rank == root_process)
1413  size_all_data.resize(n_procs, 0);
1414 
1415  // Exchanging the size of each buffer
1416  int ierr = MPI_Gather(&n_local_data,
1417  1,
1418  MPI_INT,
1419  size_all_data.data(),
1420  1,
1421  MPI_INT,
1422  root_process,
1423  comm);
1424  AssertThrowMPI(ierr);
1425 
1426  // Now computing the displacement, relative to recvbuf,
1427  // at which to store the incoming buffer; only for root
1428  std::vector<int> rdispls;
1429  if (my_rank == root_process)
1430  {
1431  rdispls.resize(n_procs, 0);
1432  for (unsigned int i = 1; i < n_procs; ++i)
1433  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1434  }
1435  // exchange the buffer:
1436  std::vector<char> received_unrolled_buffer;
1437  if (my_rank == root_process)
1438  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1439 
1440  ierr = MPI_Gatherv(buffer.data(),
1441  n_local_data,
1442  MPI_CHAR,
1443  received_unrolled_buffer.data(),
1444  size_all_data.data(),
1445  rdispls.data(),
1446  MPI_CHAR,
1447  root_process,
1448  comm);
1449  AssertThrowMPI(ierr);
1450 
1451  std::vector<T> received_objects;
1452 
1453  if (my_rank == root_process)
1454  {
1455  received_objects.resize(n_procs);
1456 
1457  for (unsigned int i = 0; i < n_procs; ++i)
1458  {
1459  const std::vector<char> local_buffer(
1460  received_unrolled_buffer.begin() + rdispls[i],
1461  received_unrolled_buffer.begin() + rdispls[i] +
1462  size_all_data[i]);
1463  received_objects[i] = Utilities::unpack<T>(local_buffer);
1464  }
1465  }
1466  return received_objects;
1467 # endif
1468  }
1469 
1470 
1471 # ifdef DEAL_II_WITH_MPI
1472  template <class Iterator, typename Number>
1473  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1474  mean_and_standard_deviation(const Iterator begin,
1475  const Iterator end,
1476  const MPI_Comm &comm)
1477  {
1478  // below we do simple and straight-forward implementation. More elaborate
1479  // options are:
1480  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1481  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1482  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1483  using Std = typename numbers::NumberTraits<Number>::real_type;
1484  const Number sum = std::accumulate(begin, end, Number(0.));
1485 
1486  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1487  Assert(size > 0, ExcDivideByZero());
1488  const Number mean =
1489  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1490  Std sq_sum = 0.;
1491  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1492  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1493  });
1494  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1495  return std::make_pair(mean,
1496  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1497  }
1498 # endif
1499 
1500 #endif
1501  } // end of namespace MPI
1502 } // end of namespace Utilities
1503 
1504 
1505 DEAL_II_NAMESPACE_CLOSE
1506 
1507 #endif
const MPI_Comm & comm
Definition: mpi.h:848
std::vector< unsigned int > sources
Definition: mpi.h:1057
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
std::vector< unsigned int > targets
Definition: mpi.h:1052
const unsigned int my_rank
Definition: mpi.h:853
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:326
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:560
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1069
std::set< unsigned int > requesting_processes
Definition: mpi.h:951
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:915
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:931
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:925
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1407
const unsigned int n_procs
Definition: mpi.h:858
std::vector< MPI_Request > send_requests
Definition: mpi.h:920
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1064
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:106
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1079
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1720
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1174
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1084
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:73
std::vector< std::shared_ptr< MPI_Request > > request_requests
Definition: mpi.h:941
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:95
Definition: mpi.h:91
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:843
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1074
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:84
std::vector< unsigned int > targets
Definition: mpi.h:910
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:188
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:431
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:212
bool job_supports_mpi()
Definition: mpi.cc:804
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:496
T max(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::vector< T2 > > request_buffers
Definition: mpi.h:936
unsigned int max_index
Definition: mpi.h:506
static ::ExceptionBase & ExcInternalError()