Reference documentation for deal.II version Git 500a7ed831 2022-01-17 20:04:17 -0700
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/index_set.h>
23 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/numbers.h>
25 
26 #include <boost/signals2.hpp>
27 
28 #include <map>
29 #include <numeric>
30 #include <set>
31 #include <vector>
32 
33 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
34 // without MPI, we would still like to use
35 // some constructs with MPI data
36 // types. Therefore, create some dummies
37 using MPI_Comm = int;
38 using MPI_Request = int;
39 using MPI_Datatype = int;
40 using MPI_Op = int;
41 # ifndef MPI_COMM_WORLD
42 # define MPI_COMM_WORLD 0
43 # endif
44 # ifndef MPI_COMM_SELF
45 # define MPI_COMM_SELF 0
46 # endif
47 # ifndef MPI_REQUEST_NULL
48 # define MPI_REQUEST_NULL 0
49 # endif
50 # ifndef MPI_MIN
51 # define MPI_MIN 0
52 # endif
53 # ifndef MPI_MAX
54 # define MPI_MAX 0
55 # endif
56 # ifndef MPI_SUM
57 # define MPI_SUM 0
58 # endif
59 # ifndef MPI_LOR
60 # define MPI_LOR 0
61 # endif
62 #endif
63 
64 
65 
79 #ifdef DEAL_II_WITH_MPI
80 # if DEAL_II_MPI_VERSION_GTE(3, 0)
81 
82 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
83 
84 # else
85 
86 # include <type_traits>
87 
88 # define DEAL_II_MPI_CONST_CAST(expr) \
89  const_cast<typename std::remove_const< \
90  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
91 
92 # endif
93 #endif
94 
95 
96 
98 
99 
100 // Forward type declarations to allow MPI sums over tensorial types
101 #ifndef DOXYGEN
102 template <int rank, int dim, typename Number>
103 class Tensor;
104 template <int rank, int dim, typename Number>
105 class SymmetricTensor;
106 template <typename Number>
107 class SparseMatrix;
108 class IndexSet;
109 #endif
110 
111 namespace Utilities
112 {
125  IndexSet
126  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
127  const unsigned int n_partitions,
128  const IndexSet::size_type total_size);
129 
137  namespace MPI
138  {
147  unsigned int
148  n_mpi_processes(const MPI_Comm &mpi_communicator);
149 
158  unsigned int
159  this_mpi_process(const MPI_Comm &mpi_communicator);
160 
165  const std::vector<unsigned int>
167  const MPI_Comm &comm_small);
168 
190  std::vector<unsigned int>
192  const MPI_Comm & mpi_comm,
193  const std::vector<unsigned int> &destinations);
194 
214  unsigned int
216  const MPI_Comm & mpi_comm,
217  const std::vector<unsigned int> &destinations);
218 
235  MPI_Comm
236  duplicate_communicator(const MPI_Comm &mpi_communicator);
237 
247  void
248  free_communicator(MPI_Comm &mpi_communicator);
249 
263  {
264  public:
268  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
269  : comm(duplicate_communicator(communicator))
270  {}
271 
276 
281  {
283  }
284 
288  const MPI_Comm &
289  operator*() const
290  {
291  return comm;
292  }
293 
294 
299  operator=(const DuplicatedCommunicator &) = delete;
300 
301  private:
306  };
307 
338  {
339  public:
346  {
347  public:
351  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
352  : mutex(mutex)
353  , comm(comm)
354  {
355  mutex.lock(comm);
356  }
357 
362  {
363  mutex.unlock(comm);
364  }
365 
366  private:
374  const MPI_Comm comm;
375  };
376 
380  explicit CollectiveMutex();
381 
385  ~CollectiveMutex();
386 
393  void
394  lock(const MPI_Comm &comm);
395 
402  void
403  unlock(const MPI_Comm &comm);
404 
405  private:
409  bool locked;
410 
415  };
416 
417 
418 
446 #ifdef DEAL_II_WITH_MPI
447  int
448  create_group(const MPI_Comm & comm,
449  const MPI_Group &group,
450  const int tag,
451  MPI_Comm * new_comm);
452 #endif
453 
462  std::vector<IndexSet>
464  const IndexSet::size_type locally_owned_size);
465 
473  IndexSet
475  const MPI_Comm & comm,
476  const IndexSet::size_type total_size);
477 
478 #ifdef DEAL_II_WITH_MPI
479 
494  template <class Iterator, typename Number = long double>
495  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
496  mean_and_standard_deviation(const Iterator begin,
497  const Iterator end,
498  const MPI_Comm &comm);
499 #endif
500 
501 
549  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
550  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
551 
571  template <typename T>
572  T
573  sum(const T &t, const MPI_Comm &mpi_communicator);
574 
584  template <typename T, typename U>
585  void
586  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
587 
597  template <typename T>
598  void
600  const MPI_Comm & mpi_communicator,
601  const ArrayView<T> & sums);
602 
608  template <int rank, int dim, typename Number>
611  const MPI_Comm & mpi_communicator);
612 
618  template <int rank, int dim, typename Number>
620  sum(const Tensor<rank, dim, Number> &local,
621  const MPI_Comm & mpi_communicator);
622 
631  template <typename Number>
632  void
633  sum(const SparseMatrix<Number> &local,
634  const MPI_Comm & mpi_communicator,
635  SparseMatrix<Number> & global);
636 
656  template <typename T>
657  T
658  max(const T &t, const MPI_Comm &mpi_communicator);
659 
669  template <typename T, typename U>
670  void
671  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
672 
682  template <typename T>
683  void
685  const MPI_Comm & mpi_communicator,
686  const ArrayView<T> & maxima);
687 
707  template <typename T>
708  T
709  min(const T &t, const MPI_Comm &mpi_communicator);
710 
720  template <typename T, typename U>
721  void
722  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
723 
733  template <typename T>
734  void
736  const MPI_Comm & mpi_communicator,
737  const ArrayView<T> & minima);
738 
762  template <typename T>
763  T
764  logical_or(const T &t, const MPI_Comm &mpi_communicator);
765 
780  template <typename T, typename U>
781  void
782  logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
783 
793  template <typename T>
794  void
796  const MPI_Comm & mpi_communicator,
797  const ArrayView<T> & results);
798 
813  struct MinMaxAvg
814  {
819  double sum;
820 
825  double min;
826 
831  double max;
832 
841  unsigned int min_index;
842 
851  unsigned int max_index;
852 
857  double avg;
858  };
859 
874  MinMaxAvg
875  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
876 
888  std::vector<MinMaxAvg>
889  min_max_avg(const std::vector<double> &my_value,
890  const MPI_Comm & mpi_communicator);
891 
892 
905  void
906  min_max_avg(const ArrayView<const double> &my_values,
907  const ArrayView<MinMaxAvg> & result,
908  const MPI_Comm & mpi_communicator);
909 
910 
955  {
956  public:
1003  int & argc,
1004  char **& argv,
1005  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1006 
1011  ~MPI_InitFinalize();
1012 
1039  static void
1040  register_request(MPI_Request &request);
1041 
1045  static void
1046  unregister_request(MPI_Request &request);
1047 
1055  struct Signals
1056  {
1061  boost::signals2::signal<void()> at_mpi_init;
1062 
1069  boost::signals2::signal<void()> at_mpi_finalize;
1070  };
1071 
1073 
1074  private:
1078  static std::set<MPI_Request *> requests;
1079  };
1080 
1092  bool
1093  job_supports_mpi();
1094 
1112  template <typename T>
1113  std::map<unsigned int, T>
1114  some_to_some(const MPI_Comm & comm,
1115  const std::map<unsigned int, T> &objects_to_send);
1116 
1130  template <typename T>
1131  std::vector<T>
1132  all_gather(const MPI_Comm &comm, const T &object_to_send);
1133 
1149  template <typename T>
1150  std::vector<T>
1151  gather(const MPI_Comm & comm,
1152  const T & object_to_send,
1153  const unsigned int root_process = 0);
1154 
1173  template <typename T>
1174  T
1175  broadcast(const MPI_Comm & comm,
1176  const T & object_to_send,
1177  const unsigned int root_process = 0);
1178 
1191  template <typename T>
1192  T
1193  reduce(const T & local_value,
1194  const MPI_Comm & comm,
1195  const std::function<T(const T &, const T &)> &combiner,
1196  const unsigned int root_process = 0);
1197 
1207  template <typename T>
1208  T
1209  all_reduce(const T & local_value,
1210  const MPI_Comm & comm,
1211  const std::function<T(const T &, const T &)> &combiner);
1212 
1255  std::vector<unsigned int>
1256  compute_index_owner(const IndexSet &owned_indices,
1257  const IndexSet &indices_to_look_up,
1258  const MPI_Comm &comm);
1259 
1267  template <typename T>
1268  std::vector<T>
1269  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1270 
1274  template <typename T>
1275  std::set<T>
1276  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1277 
1278 #ifndef DOXYGEN
1279  // declaration for an internal function that lives in mpi.templates.h
1280  namespace internal
1281  {
1282  template <typename T>
1283  void
1284  all_reduce(const MPI_Op & mpi_op,
1285  const ArrayView<const T> &values,
1286  const MPI_Comm & mpi_communicator,
1287  const ArrayView<T> & output);
1288  }
1289 
1290  // Since these depend on N they must live in the header file
1291  template <typename T, unsigned int N>
1292  void
1293  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1294  {
1295  internal::all_reduce(MPI_SUM,
1297  mpi_communicator,
1298  ArrayView<T>(sums, N));
1299  }
1300 
1301  template <typename T, unsigned int N>
1302  void
1303  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1304  {
1305  internal::all_reduce(MPI_MAX,
1307  mpi_communicator,
1308  ArrayView<T>(maxima, N));
1309  }
1310 
1311  template <typename T, unsigned int N>
1312  void
1313  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1314  {
1315  internal::all_reduce(MPI_MIN,
1317  mpi_communicator,
1318  ArrayView<T>(minima, N));
1319  }
1320 
1321  template <typename T, unsigned int N>
1322  void
1323  logical_or(const T (&values)[N],
1324  const MPI_Comm &mpi_communicator,
1325  T (&results)[N])
1326  {
1327  static_assert(std::is_integral<T>::value,
1328  "The MPI_LOR operation only allows integral data types.");
1329 
1330  internal::all_reduce(MPI_LOR,
1332  mpi_communicator,
1333  ArrayView<T>(results, N));
1334  }
1335 
1336  template <typename T>
1337  std::map<unsigned int, T>
1338  some_to_some(const MPI_Comm & comm,
1339  const std::map<unsigned int, T> &objects_to_send)
1340  {
1341 # ifndef DEAL_II_WITH_MPI
1342  (void)comm;
1343  Assert(objects_to_send.size() < 2,
1344  ExcMessage("Cannot send to more than one processor."));
1345  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1346  objects_to_send.size() == 0,
1347  ExcMessage("Can only send to myself or to nobody."));
1348  return objects_to_send;
1349 # else
1350  const auto my_proc = this_mpi_process(comm);
1351 
1352  std::map<unsigned int, T> received_objects;
1353 
1354  std::vector<unsigned int> send_to;
1355  send_to.reserve(objects_to_send.size());
1356  for (const auto &m : objects_to_send)
1357  if (m.first == my_proc)
1358  received_objects[my_proc] = m.second;
1359  else
1360  send_to.emplace_back(m.first);
1361 
1362  const unsigned int n_point_point_communications =
1364 
1365  // Protect the following communication:
1366  static CollectiveMutex mutex;
1367  CollectiveMutex::ScopedLock lock(mutex, comm);
1368 
1369  // If we have something to send, or we expect something from other
1370  // processors, we need to visit one of the two scopes below. Otherwise,
1371  // no other action is required by this mpi process, and we can safely
1372  // return.
1373  if (send_to.size() == 0 && n_point_point_communications == 0)
1374  return received_objects;
1375 
1376  const int mpi_tag =
1378 
1379  // Sending buffers
1380  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1381  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1382  {
1383  unsigned int i = 0;
1384  for (const auto &rank_obj : objects_to_send)
1385  if (rank_obj.first != my_proc)
1386  {
1387  const auto &rank = rank_obj.first;
1388  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1389  /*allow_compression=*/false);
1390  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1391  buffers_to_send[i].size(),
1392  MPI_CHAR,
1393  rank,
1394  mpi_tag,
1395  comm,
1396  &buffer_send_requests[i]);
1397  AssertThrowMPI(ierr);
1398  ++i;
1399  }
1400  }
1401 
1402  // Fill the output map
1403  {
1404  std::vector<char> buffer;
1405  // We do this on a first come/first served basis
1406  for (unsigned int i = 0; i < n_point_point_communications; ++i)
1407  {
1408  // Probe what's going on. Take data from the first available sender
1409  MPI_Status status;
1410  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1411  AssertThrowMPI(ierr);
1412 
1413  // Length of the message
1414  int len;
1415  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1416  AssertThrowMPI(ierr);
1417  buffer.resize(len);
1418 
1419  // Source rank
1420  const unsigned int rank = status.MPI_SOURCE;
1421 
1422  // Actually receive the message
1423  ierr = MPI_Recv(buffer.data(),
1424  len,
1425  MPI_CHAR,
1426  status.MPI_SOURCE,
1427  status.MPI_TAG,
1428  comm,
1429  MPI_STATUS_IGNORE);
1430  AssertThrowMPI(ierr);
1431  Assert(received_objects.find(rank) == received_objects.end(),
1433  "I should not receive again from this rank"));
1434  received_objects[rank] =
1435  Utilities::unpack<T>(buffer,
1436  /*allow_compression=*/false);
1437  }
1438  }
1439 
1440  // Wait to have sent all objects.
1441  const int ierr = MPI_Waitall(send_to.size(),
1442  buffer_send_requests.data(),
1443  MPI_STATUSES_IGNORE);
1444  AssertThrowMPI(ierr);
1445 
1446  return received_objects;
1447 # endif // deal.II with MPI
1448  }
1449 
1450  template <typename T>
1451  std::vector<T>
1452  all_gather(const MPI_Comm &comm, const T &object)
1453  {
1454  if (job_supports_mpi() == false)
1455  return {object};
1456 
1457 # ifndef DEAL_II_WITH_MPI
1458  (void)comm;
1459  std::vector<T> v(1, object);
1460  return v;
1461 # else
1462  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1463 
1464  std::vector<char> buffer = Utilities::pack(object);
1465 
1466  int n_local_data = buffer.size();
1467 
1468  // Vector to store the size of loc_data_array for every process
1469  std::vector<int> size_all_data(n_procs, 0);
1470 
1471  // Exchanging the size of each buffer
1472  int ierr = MPI_Allgather(
1473  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1474  AssertThrowMPI(ierr);
1475 
1476  // Now computing the displacement, relative to recvbuf,
1477  // at which to store the incoming buffer
1478  std::vector<int> rdispls(n_procs);
1479  rdispls[0] = 0;
1480  for (unsigned int i = 1; i < n_procs; ++i)
1481  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1482 
1483  // Step 3: exchange the buffer:
1484  std::vector<char> received_unrolled_buffer(rdispls.back() +
1485  size_all_data.back());
1486 
1487  ierr = MPI_Allgatherv(buffer.data(),
1488  n_local_data,
1489  MPI_CHAR,
1490  received_unrolled_buffer.data(),
1491  size_all_data.data(),
1492  rdispls.data(),
1493  MPI_CHAR,
1494  comm);
1495  AssertThrowMPI(ierr);
1496 
1497  std::vector<T> received_objects(n_procs);
1498  for (unsigned int i = 0; i < n_procs; ++i)
1499  {
1500  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1501  rdispls[i],
1502  received_unrolled_buffer.begin() +
1503  rdispls[i] + size_all_data[i]);
1504  received_objects[i] = Utilities::unpack<T>(local_buffer);
1505  }
1506 
1507  return received_objects;
1508 # endif
1509  }
1510 
1511  template <typename T>
1512  std::vector<T>
1513  gather(const MPI_Comm & comm,
1514  const T & object_to_send,
1515  const unsigned int root_process)
1516  {
1517 # ifndef DEAL_II_WITH_MPI
1518  (void)comm;
1519  (void)root_process;
1520  std::vector<T> v(1, object_to_send);
1521  return v;
1522 # else
1523  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1524  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1525 
1526  AssertIndexRange(root_process, n_procs);
1527 
1528  std::vector<char> buffer = Utilities::pack(object_to_send);
1529  int n_local_data = buffer.size();
1530 
1531  // Vector to store the size of loc_data_array for every process
1532  // only the root process needs to allocate memory for that purpose
1533  std::vector<int> size_all_data;
1534  if (my_rank == root_process)
1535  size_all_data.resize(n_procs, 0);
1536 
1537  // Exchanging the size of each buffer
1538  int ierr = MPI_Gather(&n_local_data,
1539  1,
1540  MPI_INT,
1541  size_all_data.data(),
1542  1,
1543  MPI_INT,
1544  root_process,
1545  comm);
1546  AssertThrowMPI(ierr);
1547 
1548  // Now computing the displacement, relative to recvbuf,
1549  // at which to store the incoming buffer; only for root
1550  std::vector<int> rdispls;
1551  if (my_rank == root_process)
1552  {
1553  rdispls.resize(n_procs, 0);
1554  for (unsigned int i = 1; i < n_procs; ++i)
1555  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1556  }
1557  // exchange the buffer:
1558  std::vector<char> received_unrolled_buffer;
1559  if (my_rank == root_process)
1560  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1561 
1562  ierr = MPI_Gatherv(buffer.data(),
1563  n_local_data,
1564  MPI_CHAR,
1565  received_unrolled_buffer.data(),
1566  size_all_data.data(),
1567  rdispls.data(),
1568  MPI_CHAR,
1569  root_process,
1570  comm);
1571  AssertThrowMPI(ierr);
1572 
1573  std::vector<T> received_objects;
1574 
1575  if (my_rank == root_process)
1576  {
1577  received_objects.resize(n_procs);
1578 
1579  for (unsigned int i = 0; i < n_procs; ++i)
1580  {
1581  const std::vector<char> local_buffer(
1582  received_unrolled_buffer.begin() + rdispls[i],
1583  received_unrolled_buffer.begin() + rdispls[i] +
1584  size_all_data[i]);
1585  received_objects[i] = Utilities::unpack<T>(local_buffer);
1586  }
1587  }
1588  return received_objects;
1589 # endif
1590  }
1591 
1592 
1593 
1594  template <typename T>
1595  T
1596  broadcast(const MPI_Comm & comm,
1597  const T & object_to_send,
1598  const unsigned int root_process)
1599  {
1600 # ifndef DEAL_II_WITH_MPI
1601  (void)comm;
1602  (void)root_process;
1603  return object_to_send;
1604 # else
1605  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1606  AssertIndexRange(root_process, n_procs);
1607  (void)n_procs;
1608 
1609  std::vector<char> buffer;
1610  unsigned int buffer_size = numbers::invalid_unsigned_int;
1611 
1612  // On the root process, pack the data and determine what the
1613  // buffer size needs to be.
1614  if (this_mpi_process(comm) == root_process)
1615  {
1616  buffer = Utilities::pack(object_to_send, false);
1617  buffer_size = buffer.size();
1618  }
1619 
1620  // Exchange the size of buffer
1621  int ierr = MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED, root_process, comm);
1622  AssertThrowMPI(ierr);
1623 
1624  // If not on the root process, correctly size the buffer to
1625  // receive the data, then do exactly that.
1626  if (this_mpi_process(comm) != root_process)
1627  buffer.resize(buffer_size);
1628 
1629  ierr =
1630  MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, root_process, comm);
1631  AssertThrowMPI(ierr);
1632 
1633  if (Utilities::MPI::this_mpi_process(comm) == root_process)
1634  return object_to_send;
1635  else
1636  return Utilities::unpack<T>(buffer, false);
1637 # endif
1638  }
1639 
1640 
1641 # ifdef DEAL_II_WITH_MPI
1642  template <class Iterator, typename Number>
1643  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1644  mean_and_standard_deviation(const Iterator begin,
1645  const Iterator end,
1646  const MPI_Comm &comm)
1647  {
1648  // below we do simple and straight-forward implementation. More elaborate
1649  // options are:
1650  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1651  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1652  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1653  using Std = typename numbers::NumberTraits<Number>::real_type;
1654  const Number sum = std::accumulate(begin, end, Number(0.));
1655 
1656  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1657  Assert(size > 0, ExcDivideByZero());
1658  const Number mean =
1659  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1660  Std sq_sum = 0.;
1661  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1662  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1663  });
1664  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1665  return std::make_pair(mean,
1666  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1667  }
1668 # endif
1669 
1670 #endif
1671  } // end of namespace MPI
1672 } // end of namespace Utilities
1673 
1674 
1676 
1677 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:196
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.h:550
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:287
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:569
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:587
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1720
static const char U
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1069
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:263
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:351
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:171
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1461
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1278
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:407
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:181
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const MPI_Comm & operator*() const
Definition: mpi.h:289
static Signals signals
Definition: mpi.h:1072
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1220
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1219
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1778
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:160
Definition: tensor.h:506
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:406
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
static std::set< MPI_Request * > requests
Definition: mpi.h:1078
static const char N
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1061
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:268
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:392
bool job_supports_mpi()
Definition: mpi.cc:1204
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:841
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:851
static ::ExceptionBase & ExcInternalError()
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete