Reference documentation for deal.II version Git 082d75bebd 2019-10-16 19:44:02 +0200
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/array_view.h>
22 #include <deal.II/base/numbers.h>
23 
24 #include <map>
25 #include <numeric>
26 #include <set>
27 #include <vector>
28 
29 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
30 // without MPI, we would still like to use
31 // some constructs with MPI data
32 // types. Therefore, create some dummies
33 using MPI_Comm = int;
34 using MPI_Datatype = int;
35 using MPI_Op = int;
36 # ifndef MPI_COMM_WORLD
37 # define MPI_COMM_WORLD 0
38 # endif
39 # ifndef MPI_COMM_SELF
40 # define MPI_COMM_SELF 0
41 # endif
42 # ifndef MPI_MIN
43 # define MPI_MIN 0
44 # endif
45 # ifndef MPI_MAX
46 # define MPI_MAX 0
47 # endif
48 # ifndef MPI_SUM
49 # define MPI_SUM 0
50 # endif
51 #endif
52 
53 
54 
68 #ifdef DEAL_II_WITH_MPI
69 # if DEAL_II_MPI_VERSION_GTE(3, 0)
70 
71 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
72 
73 # else
74 
75 # include <type_traits>
76 
77 # define DEAL_II_MPI_CONST_CAST(expr) \
78  const_cast<typename std::remove_const< \
79  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
80 
81 # endif
82 #endif
83 
84 
85 
86 DEAL_II_NAMESPACE_OPEN
87 
88 
89 // Forward type declarations to allow MPI sums over tensorial types
90 #ifndef DOXYGEN
91 template <int rank, int dim, typename Number>
92 class Tensor;
93 template <int rank, int dim, typename Number>
94 class SymmetricTensor;
95 template <typename Number>
96 class SparseMatrix;
97 class IndexSet;
98 #endif
99 
100 namespace Utilities
101 {
109  namespace MPI
110  {
119  unsigned int
120  n_mpi_processes(const MPI_Comm &mpi_communicator);
121 
130  unsigned int
131  this_mpi_process(const MPI_Comm &mpi_communicator);
132 
154  std::vector<unsigned int>
156  const MPI_Comm & mpi_comm,
157  const std::vector<unsigned int> &destinations);
158 
178  unsigned int
180  const MPI_Comm & mpi_comm,
181  const std::vector<unsigned int> &destinations);
182 
196  MPI_Comm
197  duplicate_communicator(const MPI_Comm &mpi_communicator);
198 
226 #ifdef DEAL_II_WITH_MPI
227  int
228  create_group(const MPI_Comm & comm,
229  const MPI_Group &group,
230  const int tag,
231  MPI_Comm * new_comm);
232 #endif
233 
242  std::vector<IndexSet>
243  create_ascending_partitioning(const MPI_Comm & comm,
244  const IndexSet::size_type &local_size);
245 
246 #ifdef DEAL_II_WITH_MPI
247 
262  template <class Iterator, typename Number = long double>
263  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
264  mean_and_standard_deviation(const Iterator begin,
265  const Iterator end,
266  const MPI_Comm &comm);
267 #endif
268 
288  template <typename T>
289  T
290  sum(const T &t, const MPI_Comm &mpi_communicator);
291 
301  template <typename T, typename U>
302  void
303  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
304 
314  template <typename T>
315  void
316  sum(const ArrayView<const T> &values,
317  const MPI_Comm & mpi_communicator,
318  const ArrayView<T> & sums);
319 
325  template <int rank, int dim, typename Number>
328  const MPI_Comm & mpi_communicator);
329 
335  template <int rank, int dim, typename Number>
337  sum(const Tensor<rank, dim, Number> &local,
338  const MPI_Comm & mpi_communicator);
339 
348  template <typename Number>
349  void
350  sum(const SparseMatrix<Number> &local,
351  const MPI_Comm & mpi_communicator,
352  SparseMatrix<Number> & global);
353 
373  template <typename T>
374  T
375  max(const T &t, const MPI_Comm &mpi_communicator);
376 
386  template <typename T, typename U>
387  void
388  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
389 
399  template <typename T>
400  void
401  max(const ArrayView<const T> &values,
402  const MPI_Comm & mpi_communicator,
403  const ArrayView<T> & maxima);
404 
424  template <typename T>
425  T
426  min(const T &t, const MPI_Comm &mpi_communicator);
427 
437  template <typename T, typename U>
438  void
439  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
440 
450  template <typename T>
451  void
452  min(const ArrayView<const T> &values,
453  const MPI_Comm & mpi_communicator,
454  const ArrayView<T> & minima);
455 
470  struct MinMaxAvg
471  {
476  double sum;
477 
482  double min;
483 
488  double max;
489 
498  unsigned int min_index;
499 
508  unsigned int max_index;
509 
514  double avg;
515  };
516 
531  MinMaxAvg
532  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
533 
578  {
579  public:
626  int & argc,
627  char **& argv,
628  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
629 
634  ~MPI_InitFinalize();
635  };
636 
648  bool
650 
667  template <typename T>
668  std::map<unsigned int, T>
669  some_to_some(const MPI_Comm & comm,
670  const std::map<unsigned int, T> &objects_to_send);
671 
687  template <typename T>
688  std::vector<T>
689  all_gather(const MPI_Comm &comm, const T &object_to_send);
690 
708  template <typename T>
709  std::vector<T>
710  gather(const MPI_Comm & comm,
711  const T & object_to_send,
712  const unsigned int root_process = 0);
713 
737  template <typename T1, typename T2>
739  {
740  public:
744  virtual ~ConsensusAlgorithmProcess() = default;
745 
752  virtual std::vector<unsigned int>
753  compute_targets() = 0;
754 
763  virtual void
764  pack_recv_buffer(const int other_rank, std::vector<T1> &send_buffer);
765 
774  virtual void
775  prepare_recv_buffer(const int other_rank, std::vector<T2> &recv_buffer);
776 
789  virtual void
790  process_request(const unsigned int other_rank,
791  const std::vector<T1> &buffer_recv,
792  std::vector<T2> & request_buffer);
793 
801  virtual void
802  unpack_recv_buffer(const int other_rank,
803  const std::vector<T2> &recv_buffer);
804  };
805 
831  template <typename T1, typename T2>
833  {
834  public:
836  const MPI_Comm & comm);
837 
841  virtual ~ConsensusAlgorithm() = default;
842 
843  virtual void
844  run() = 0;
845 
846  protected:
851 
855  const MPI_Comm &comm;
856 
860  const unsigned int my_rank;
861 
865  const unsigned int n_procs;
866  };
867 
885  template <typename T1, typename T2>
887  {
888  public:
889  // Unique tags to be used during Isend and Irecv
890  static const unsigned int tag_request = 12;
891  static const unsigned int tag_delivery = 13;
892 
900  const MPI_Comm & comm);
901 
905  virtual ~ConsensusAlgorithm_NBX() = default;
906 
910  virtual void
911  run() override;
912 
913  private:
914 #ifdef DEAL_II_WITH_MPI
915 
918  std::vector<unsigned int> targets;
919 
923  std::vector<std::vector<T1>> send_buffers;
924 
928  std::vector<MPI_Request> send_requests;
929 
933  std::vector<std::vector<T2>> recv_buffers;
934 
935 
939  std::vector<MPI_Request> recv_requests;
940 
944  std::vector<std::vector<T2>> request_buffers;
945 
949  std::vector<std::shared_ptr<MPI_Request>> request_requests;
950 
951  // request for barrier
952  MPI_Request barrier_request;
953 #endif
954 
955 #ifdef DEBUG
956 
959  std::set<unsigned int> requesting_processes;
960 #endif
961 
965  bool
966  check_own_state();
967 
972  void
973  signal_finish();
974 
979  bool
980  check_global_state();
981 
986  void
987  process_requests();
988 
993  void
994  start_communication();
995 
1000  void
1001  clean_up_and_end_communication();
1002  };
1003 
1027  template <typename T1, typename T2>
1029  {
1030  public:
1031  // Unique tags to be used during Isend and Irecv
1032  static const unsigned int tag_request = 14;
1033  static const unsigned int tag_delivery = 15;
1034 
1042  const MPI_Comm & comm);
1043 
1047  virtual ~ConsensusAlgorithm_PEX() = default;
1048 
1052  virtual void
1053  run() override;
1054 
1055  private:
1056 #ifdef DEAL_II_WITH_MPI
1057 
1060  std::vector<unsigned int> targets;
1061 
1065  std::vector<unsigned int> sources;
1066 
1067  // data structures to send and receive requests
1068 
1072  std::vector<std::vector<T1>> send_buffers;
1073 
1077  std::vector<std::vector<T2>> recv_buffers;
1078 
1082  std::vector<MPI_Request> send_and_recv_buffers;
1083 
1087  std::vector<std::vector<T2>> requests_buffers;
1088 
1092  std::vector<MPI_Request> requests_answers;
1093 #endif
1094 
1099  void
1100  process_requests(int index);
1101 
1106  unsigned int
1107  start_communication();
1108 
1113  void
1114  clean_up_and_end_communication();
1115  };
1116 
1130  template <typename T1, typename T2>
1132  {
1133  public:
1141  const MPI_Comm & comm);
1142 
1146  virtual ~ConsensusAlgorithmSelector() = default;
1147 
1152  virtual void
1153  run() override;
1154 
1155  private:
1156  // Pointer to the actual ConsensusAlgorithm implementation.
1157  std::shared_ptr<ConsensusAlgorithm<T1, T2>> consensus_algo;
1158  };
1159 
1204  std::vector<unsigned int>
1205  compute_index_owner(const IndexSet &owned_indices,
1206  const IndexSet &indices_to_look_up,
1207  const MPI_Comm &comm);
1208 
1209 #ifndef DOXYGEN
1210  // declaration for an internal function that lives in mpi.templates.h
1211  namespace internal
1212  {
1213  template <typename T>
1214  void
1215  all_reduce(const MPI_Op & mpi_op,
1216  const ArrayView<const T> &values,
1217  const MPI_Comm & mpi_communicator,
1218  const ArrayView<T> & output);
1219  }
1220 
1221  // Since these depend on N they must live in the header file
1222  template <typename T, unsigned int N>
1223  void
1224  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1225  {
1226  internal::all_reduce(MPI_SUM,
1227  ArrayView<const T>(values, N),
1228  mpi_communicator,
1229  ArrayView<T>(sums, N));
1230  }
1231 
1232  template <typename T, unsigned int N>
1233  void
1234  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1235  {
1236  internal::all_reduce(MPI_MAX,
1237  ArrayView<const T>(values, N),
1238  mpi_communicator,
1239  ArrayView<T>(maxima, N));
1240  }
1241 
1242  template <typename T, unsigned int N>
1243  void
1244  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1245  {
1246  internal::all_reduce(MPI_MIN,
1247  ArrayView<const T>(values, N),
1248  mpi_communicator,
1249  ArrayView<T>(minima, N));
1250  }
1251 
1252  template <typename T>
1253  std::map<unsigned int, T>
1254  some_to_some(const MPI_Comm & comm,
1255  const std::map<unsigned int, T> &objects_to_send)
1256  {
1257 # ifndef DEAL_II_WITH_MPI
1258  (void)comm;
1259  Assert(objects_to_send.size() == 0,
1260  ExcMessage("Cannot send to more than one processor."));
1261  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1262  objects_to_send.size() == 0,
1263  ExcMessage("Can only send to myself or to nobody."));
1264  return objects_to_send;
1265 # else
1266 
1267  std::vector<unsigned int> send_to(objects_to_send.size());
1268  {
1269  unsigned int i = 0;
1270  for (const auto &m : objects_to_send)
1271  send_to[i++] = m.first;
1272  }
1273  AssertDimension(send_to.size(), objects_to_send.size());
1274 
1275  const auto receive_from =
1277  send_to);
1278 
1279  // Sending buffers
1280  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1281  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1282  {
1283  unsigned int i = 0;
1284  for (const auto &rank_obj : objects_to_send)
1285  {
1286  const auto &rank = rank_obj.first;
1287  buffers_to_send[i] = Utilities::pack(rank_obj.second);
1288  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1289  buffers_to_send[i].size(),
1290  MPI_CHAR,
1291  rank,
1292  21,
1293  comm,
1294  &buffer_send_requests[i]);
1295  AssertThrowMPI(ierr);
1296  ++i;
1297  }
1298  }
1299 
1300  // Receiving buffers
1301  std::map<unsigned int, T> received_objects;
1302  {
1303  std::vector<char> buffer;
1304  // We do this on a first come/first served basis
1305  for (unsigned int i = 0; i < receive_from.size(); ++i)
1306  {
1307  // Probe what's going on. Take data from the first available sender
1308  MPI_Status status;
1309  int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
1310  AssertThrowMPI(ierr);
1311 
1312  // Length of the message
1313  int len;
1314  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1315  AssertThrowMPI(ierr);
1316  buffer.resize(len);
1317 
1318  // Source rank
1319  const unsigned int rank = status.MPI_SOURCE;
1320 
1321  // Actually receive the message
1322  ierr = MPI_Recv(
1323  buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE);
1324  AssertThrowMPI(ierr);
1325  Assert(received_objects.find(rank) == received_objects.end(),
1327  "I should not receive again from this rank"));
1328  received_objects[rank] = Utilities::unpack<T>(buffer);
1329  }
1330  }
1331 
1332  // Wait to have sent all objects.
1333  MPI_Waitall(send_to.size(),
1334  buffer_send_requests.data(),
1335  MPI_STATUSES_IGNORE);
1336 
1337  return received_objects;
1338 # endif // deal.II with MPI
1339  }
1340 
1341  template <typename T>
1342  std::vector<T>
1343  all_gather(const MPI_Comm &comm, const T &object)
1344  {
1345 # ifndef DEAL_II_WITH_MPI
1346  (void)comm;
1347  std::vector<T> v(1, object);
1348  return v;
1349 # else
1350  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1351 
1352  std::vector<char> buffer = Utilities::pack(object);
1353 
1354  int n_local_data = buffer.size();
1355 
1356  // Vector to store the size of loc_data_array for every process
1357  std::vector<int> size_all_data(n_procs, 0);
1358 
1359  // Exchanging the size of each buffer
1360  MPI_Allgather(
1361  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1362 
1363  // Now computing the displacement, relative to recvbuf,
1364  // at which to store the incoming buffer
1365  std::vector<int> rdispls(n_procs);
1366  rdispls[0] = 0;
1367  for (unsigned int i = 1; i < n_procs; ++i)
1368  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1369 
1370  // Step 3: exchange the buffer:
1371  std::vector<char> received_unrolled_buffer(rdispls.back() +
1372  size_all_data.back());
1373 
1374  MPI_Allgatherv(buffer.data(),
1375  n_local_data,
1376  MPI_CHAR,
1377  received_unrolled_buffer.data(),
1378  size_all_data.data(),
1379  rdispls.data(),
1380  MPI_CHAR,
1381  comm);
1382 
1383  std::vector<T> received_objects(n_procs);
1384  for (unsigned int i = 0; i < n_procs; ++i)
1385  {
1386  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1387  rdispls[i],
1388  received_unrolled_buffer.begin() +
1389  rdispls[i] + size_all_data[i]);
1390  received_objects[i] = Utilities::unpack<T>(local_buffer);
1391  }
1392 
1393  return received_objects;
1394 # endif
1395  }
1396 
1397  template <typename T>
1398  std::vector<T>
1399  gather(const MPI_Comm & comm,
1400  const T & object_to_send,
1401  const unsigned int root_process)
1402  {
1403 # ifndef DEAL_II_WITH_MPI
1404  (void)comm;
1405  (void)root_process;
1406  std::vector<T> v(1, object_to_send);
1407  return v;
1408 # else
1409  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1410  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1411 
1412  Assert(root_process < n_procs, ExcIndexRange(root_process, 0, n_procs));
1413 
1414  std::vector<char> buffer = Utilities::pack(object_to_send);
1415  int n_local_data = buffer.size();
1416 
1417  // Vector to store the size of loc_data_array for every process
1418  // only the root process needs to allocate memory for that purpose
1419  std::vector<int> size_all_data;
1420  if (my_rank == root_process)
1421  size_all_data.resize(n_procs, 0);
1422 
1423  // Exchanging the size of each buffer
1424  int ierr = MPI_Gather(&n_local_data,
1425  1,
1426  MPI_INT,
1427  size_all_data.data(),
1428  1,
1429  MPI_INT,
1430  root_process,
1431  comm);
1432  AssertThrowMPI(ierr);
1433 
1434  // Now computing the displacement, relative to recvbuf,
1435  // at which to store the incoming buffer; only for root
1436  std::vector<int> rdispls;
1437  if (my_rank == root_process)
1438  {
1439  rdispls.resize(n_procs, 0);
1440  for (unsigned int i = 1; i < n_procs; ++i)
1441  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1442  }
1443  // exchange the buffer:
1444  std::vector<char> received_unrolled_buffer;
1445  if (my_rank == root_process)
1446  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1447 
1448  ierr = MPI_Gatherv(buffer.data(),
1449  n_local_data,
1450  MPI_CHAR,
1451  received_unrolled_buffer.data(),
1452  size_all_data.data(),
1453  rdispls.data(),
1454  MPI_CHAR,
1455  root_process,
1456  comm);
1457  AssertThrowMPI(ierr);
1458 
1459  std::vector<T> received_objects;
1460 
1461  if (my_rank == root_process)
1462  {
1463  received_objects.resize(n_procs);
1464 
1465  for (unsigned int i = 0; i < n_procs; ++i)
1466  {
1467  const std::vector<char> local_buffer(
1468  received_unrolled_buffer.begin() + rdispls[i],
1469  received_unrolled_buffer.begin() + rdispls[i] +
1470  size_all_data[i]);
1471  received_objects[i] = Utilities::unpack<T>(local_buffer);
1472  }
1473  }
1474  return received_objects;
1475 # endif
1476  }
1477 
1478 
1479 # ifdef DEAL_II_WITH_MPI
1480  template <class Iterator, typename Number>
1481  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1482  mean_and_standard_deviation(const Iterator begin,
1483  const Iterator end,
1484  const MPI_Comm &comm)
1485  {
1486  // below we do simple and straight-forward implementation. More elaborate
1487  // options are:
1488  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1489  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1490  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1491  using Std = typename numbers::NumberTraits<Number>::real_type;
1492  const Number sum = std::accumulate(begin, end, Number(0.));
1493 
1494  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1495  Assert(size > 0, ExcDivideByZero());
1496  const Number mean =
1497  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1498  Std sq_sum = 0.;
1499  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1500  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1501  });
1502  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1503  return std::make_pair(mean,
1504  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1505  }
1506 # endif
1507 
1508 #endif
1509  } // end of namespace MPI
1510 } // end of namespace Utilities
1511 
1512 
1513 DEAL_II_NAMESPACE_CLOSE
1514 
1515 #endif
const MPI_Comm & comm
Definition: mpi.h:855
std::vector< unsigned int > sources
Definition: mpi.h:1065
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
std::vector< unsigned int > targets
Definition: mpi.h:1060
const unsigned int my_rank
Definition: mpi.h:860
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:327
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:605
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1077
std::set< unsigned int > requesting_processes
Definition: mpi.h:959
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:923
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:939
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:933
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1407
const unsigned int n_procs
Definition: mpi.h:865
std::vector< MPI_Request > send_requests
Definition: mpi.h:928
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1072
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:107
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1087
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1405
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1170
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1092
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
std::vector< std::shared_ptr< MPI_Request > > request_requests
Definition: mpi.h:949
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:96
Definition: tensor.h:422
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:850
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1082
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
std::vector< unsigned int > targets
Definition: mpi.h:918
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:189
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:432
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:213
bool job_supports_mpi()
Definition: mpi.cc:810
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:498
T max(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::vector< T2 > > request_buffers
Definition: mpi.h:944
unsigned int max_index
Definition: mpi.h:508
static ::ExceptionBase & ExcInternalError()