Reference documentation for deal.II version GIT d7aca55de5 2022-08-10 12:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/numbers.h>
25 #include <deal.II/base/utilities.h>
26 
27 #include <boost/signals2.hpp>
28 
29 #include <complex>
30 #include <map>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
36 // without MPI, we would still like to use
37 // some constructs with MPI data
38 // types. Therefore, create some dummies
39 using MPI_Comm = int;
40 using MPI_Request = int;
41 using MPI_Datatype = int;
42 using MPI_Op = int;
43 # ifndef MPI_COMM_WORLD
44 # define MPI_COMM_WORLD 0
45 # endif
46 # ifndef MPI_COMM_SELF
47 # define MPI_COMM_SELF 0
48 # endif
49 # ifndef MPI_COMM_NULL
50 # define MPI_COMM_NULL 0
51 # endif
52 # ifndef MPI_REQUEST_NULL
53 # define MPI_REQUEST_NULL 0
54 # endif
55 # ifndef MPI_MIN
56 # define MPI_MIN 0
57 # endif
58 # ifndef MPI_MAX
59 # define MPI_MAX 0
60 # endif
61 # ifndef MPI_SUM
62 # define MPI_SUM 0
63 # endif
64 # ifndef MPI_LOR
65 # define MPI_LOR 0
66 # endif
67 #endif
68 
69 
70 
84 #ifdef DEAL_II_WITH_MPI
85 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
86 #endif
87 
88 
89 
91 
92 
93 // Forward type declarations to allow MPI sums over tensorial types
94 #ifndef DOXYGEN
95 template <int rank, int dim, typename Number>
96 class Tensor;
97 template <int rank, int dim, typename Number>
98 class SymmetricTensor;
99 template <typename Number>
100 class SparseMatrix;
101 class IndexSet;
102 #endif
103 
104 namespace Utilities
105 {
118  IndexSet
120  const unsigned int my_partition_id,
121  const unsigned int n_partitions,
122  const types::global_dof_index total_size);
123 
131  namespace MPI
132  {
141  template <typename T>
142  constexpr bool is_mpi_type = is_same_as_any_of<T,
143  char,
144  signed short,
145  signed int,
146  signed long,
147  signed long long,
148  signed char,
149  unsigned char,
150  unsigned short,
151  unsigned int,
152  unsigned long int,
153  unsigned long long,
154  float,
155  double,
156  long double,
157  bool,
158  std::complex<float>,
159  std::complex<double>,
160  std::complex<long double>,
161  wchar_t>::value;
162 
171  unsigned int
172  n_mpi_processes(const MPI_Comm &mpi_communicator);
173 
182  unsigned int
183  this_mpi_process(const MPI_Comm &mpi_communicator);
184 
189  const std::vector<unsigned int>
190  mpi_processes_within_communicator(const MPI_Comm &comm_large,
191  const MPI_Comm &comm_small);
192 
214  std::vector<unsigned int>
216  const MPI_Comm & mpi_comm,
217  const std::vector<unsigned int> &destinations);
218 
238  unsigned int
240  const MPI_Comm & mpi_comm,
241  const std::vector<unsigned int> &destinations);
242 
259  MPI_Comm
260  duplicate_communicator(const MPI_Comm &mpi_communicator);
261 
271  void
272  free_communicator(MPI_Comm &mpi_communicator);
273 
287  {
288  public:
292  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
293  : comm(duplicate_communicator(communicator))
294  {}
295 
300 
305  {
307  }
308 
312  const MPI_Comm &
313  operator*() const
314  {
315  return comm;
316  }
317 
318 
324 
325  private:
329  MPI_Comm comm;
330  };
331 
332 
333 
364  {
365  public:
372  {
373  public:
377  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
378  : mutex(mutex)
379  , comm(comm)
380  {
381  mutex.lock(comm);
382  }
383 
388  {
389  mutex.unlock(comm);
390  }
391 
392  private:
400  const MPI_Comm comm;
401  };
402 
406  explicit CollectiveMutex();
407 
412 
419  void
420  lock(const MPI_Comm &comm);
421 
428  void
429  unlock(const MPI_Comm &comm);
430 
431  private:
435  bool locked;
436 
440  MPI_Request request;
441  };
442 
443 
444 
491  template <typename T>
492  class Future
493  {
494  public:
499  template <typename W, typename G>
500  Future(W &&wait_operation, G &&get_and_cleanup_operation);
501 
507  Future(const Future &) = delete;
508 
512  Future(Future &&) noexcept = default;
513 
517  ~Future();
518 
524  Future &
525  operator=(const Future &) = delete;
526 
530  Future &
531  operator=(Future &&) noexcept = default;
532 
540  void
541  wait();
542 
554  T
555  get();
556 
557  private:
561  std::function<void()> wait_function;
562  std::function<T()> get_and_cleanup_function;
563 
567  bool is_done;
568 
573  };
574 
575 
576 
606 #ifdef DEAL_II_WITH_MPI
608  create_group(const MPI_Comm & comm,
609  const MPI_Group &group,
610  const int tag,
611  MPI_Comm * new_comm);
612 #endif
613 
622  std::vector<IndexSet>
624  const MPI_Comm & comm,
625  const types::global_dof_index locally_owned_size);
626 
634  IndexSet
636  const MPI_Comm & comm,
637  const types::global_dof_index total_size);
638 
639 #ifdef DEAL_II_WITH_MPI
655  template <class Iterator, typename Number = long double>
656  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
658  const Iterator end,
659  const MPI_Comm &comm);
660 #endif
661 
662 
710  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
711  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
712 
732  template <typename T>
733  T
734  sum(const T &t, const MPI_Comm &mpi_communicator);
735 
745  template <typename T, typename U>
746  void
747  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
748 
758  template <typename T>
759  void
761  const MPI_Comm & mpi_communicator,
762  const ArrayView<T> & sums);
763 
769  template <int rank, int dim, typename Number>
772  const MPI_Comm & mpi_communicator);
773 
779  template <int rank, int dim, typename Number>
782  const MPI_Comm & mpi_communicator);
783 
792  template <typename Number>
793  void
794  sum(const SparseMatrix<Number> &local,
795  const MPI_Comm & mpi_communicator,
796  SparseMatrix<Number> & global);
797 
817  template <typename T>
818  T
819  max(const T &t, const MPI_Comm &mpi_communicator);
820 
830  template <typename T, typename U>
831  void
832  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
833 
843  template <typename T>
844  void
846  const MPI_Comm & mpi_communicator,
847  const ArrayView<T> & maxima);
848 
868  template <typename T>
869  T
870  min(const T &t, const MPI_Comm &mpi_communicator);
871 
881  template <typename T, typename U>
882  void
883  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
884 
894  template <typename T>
895  void
897  const MPI_Comm & mpi_communicator,
898  const ArrayView<T> & minima);
899 
923  template <typename T>
924  T
925  logical_or(const T &t, const MPI_Comm &mpi_communicator);
926 
941  template <typename T, typename U>
942  void
943  logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
944 
954  template <typename T>
955  void
957  const MPI_Comm & mpi_communicator,
958  const ArrayView<T> & results);
959 
974  struct MinMaxAvg
975  {
980  double sum;
981 
986  double min;
987 
992  double max;
993 
1002  unsigned int min_index;
1003 
1012  unsigned int max_index;
1013 
1018  double avg;
1019  };
1020 
1035  MinMaxAvg
1036  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
1037 
1049  std::vector<MinMaxAvg>
1050  min_max_avg(const std::vector<double> &my_value,
1051  const MPI_Comm & mpi_communicator);
1052 
1053 
1066  void
1067  min_max_avg(const ArrayView<const double> &my_values,
1068  const ArrayView<MinMaxAvg> & result,
1069  const MPI_Comm & mpi_communicator);
1070 
1071 
1116  {
1117  public:
1164  int & argc,
1165  char **& argv,
1166  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1167 
1172  ~MPI_InitFinalize();
1173 
1200  static void
1201  register_request(MPI_Request &request);
1202 
1206  static void
1207  unregister_request(MPI_Request &request);
1208 
1216  struct Signals
1217  {
1222  boost::signals2::signal<void()> at_mpi_init;
1223 
1230  boost::signals2::signal<void()> at_mpi_finalize;
1231  };
1232 
1234 
1235  private:
1239  static std::set<MPI_Request *> requests;
1240  };
1241 
1253  bool
1254  job_supports_mpi();
1255 
1273  template <typename T>
1274  std::map<unsigned int, T>
1275  some_to_some(const MPI_Comm & comm,
1276  const std::map<unsigned int, T> &objects_to_send);
1277 
1291  template <typename T>
1292  std::vector<T>
1293  all_gather(const MPI_Comm &comm, const T &object_to_send);
1294 
1310  template <typename T>
1311  std::vector<T>
1312  gather(const MPI_Comm & comm,
1313  const T & object_to_send,
1314  const unsigned int root_process = 0);
1315 
1351  template <typename T>
1352  std::enable_if_t<is_mpi_type<T> == false, T>
1353  broadcast(const MPI_Comm & comm,
1354  const T & object_to_send,
1355  const unsigned int root_process = 0);
1356 
1379  template <typename T>
1380  std::enable_if_t<is_mpi_type<T> == true, T>
1381  broadcast(const MPI_Comm & comm,
1382  const T & object_to_send,
1383  const unsigned int root_process = 0);
1384 
1401  template <typename T>
1402  void
1403  broadcast(T * buffer,
1404  const size_t count,
1405  const unsigned int root,
1406  const MPI_Comm & comm);
1407 
1420  template <typename T>
1421  T
1422  reduce(const T & local_value,
1423  const MPI_Comm & comm,
1424  const std::function<T(const T &, const T &)> &combiner,
1425  const unsigned int root_process = 0);
1426 
1436  template <typename T>
1437  T
1438  all_reduce(const T & local_value,
1439  const MPI_Comm & comm,
1440  const std::function<T(const T &, const T &)> &combiner);
1441 
1442 
1463  template <typename T>
1464  Future<void>
1465  isend(const T & object,
1466  MPI_Comm communicator,
1467  const unsigned int target_rank,
1468  const unsigned int mpi_tag = 0);
1469 
1470 
1487  template <typename T>
1488  Future<T>
1489  irecv(MPI_Comm communicator,
1490  const unsigned int source_rank,
1491  const unsigned int mpi_tag = 0);
1492 
1493 
1536  std::vector<unsigned int>
1537  compute_index_owner(const IndexSet &owned_indices,
1538  const IndexSet &indices_to_look_up,
1539  const MPI_Comm &comm);
1540 
1548  template <typename T>
1549  std::vector<T>
1550  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1551 
1555  template <typename T>
1556  std::set<T>
1557  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1558 
1559 
1560 
1561  /* --------------------------- inline functions ------------------------- */
1562 
1563  namespace internal
1564  {
1570  namespace MPIDataTypes
1571  {
1572 #ifdef DEAL_II_WITH_MPI
1573  inline MPI_Datatype
1574  mpi_type_id(const bool *)
1575  {
1576  return MPI_CXX_BOOL;
1577  }
1578 
1579 
1580 
1581  inline MPI_Datatype
1582  mpi_type_id(const char *)
1583  {
1584  return MPI_CHAR;
1585  }
1586 
1587 
1588 
1589  inline MPI_Datatype
1590  mpi_type_id(const signed char *)
1591  {
1592  return MPI_SIGNED_CHAR;
1593  }
1594 
1595 
1596 
1597  inline MPI_Datatype
1598  mpi_type_id(const wchar_t *)
1599  {
1600  return MPI_WCHAR;
1601  }
1602 
1603 
1604 
1605  inline MPI_Datatype
1606  mpi_type_id(const short *)
1607  {
1608  return MPI_SHORT;
1609  }
1610 
1611 
1612 
1613  inline MPI_Datatype
1614  mpi_type_id(const int *)
1615  {
1616  return MPI_INT;
1617  }
1618 
1619 
1620 
1621  inline MPI_Datatype
1622  mpi_type_id(const long int *)
1623  {
1624  return MPI_LONG;
1625  }
1626 
1627 
1628 
1629  inline MPI_Datatype
1630  mpi_type_id(const long long int *)
1631  {
1632  return MPI_LONG_LONG;
1633  }
1634 
1635 
1636 
1637  inline MPI_Datatype
1638  mpi_type_id(const unsigned char *)
1639  {
1640  return MPI_UNSIGNED_CHAR;
1641  }
1642 
1643 
1644 
1645  inline MPI_Datatype
1646  mpi_type_id(const unsigned short *)
1647  {
1648  return MPI_UNSIGNED_SHORT;
1649  }
1650 
1651 
1652 
1653  inline MPI_Datatype
1654  mpi_type_id(const unsigned int *)
1655  {
1656  return MPI_UNSIGNED;
1657  }
1658 
1659 
1660 
1661  inline MPI_Datatype
1662  mpi_type_id(const unsigned long int *)
1663  {
1664  return MPI_UNSIGNED_LONG;
1665  }
1666 
1667 
1668 
1669  inline MPI_Datatype
1670  mpi_type_id(const unsigned long long int *)
1671  {
1672  return MPI_UNSIGNED_LONG_LONG;
1673  }
1674 
1675 
1676 
1677  inline MPI_Datatype
1678  mpi_type_id(const float *)
1679  {
1680  return MPI_FLOAT;
1681  }
1682 
1683 
1684 
1685  inline MPI_Datatype
1686  mpi_type_id(const double *)
1687  {
1688  return MPI_DOUBLE;
1689  }
1690 
1691 
1692 
1693  inline MPI_Datatype
1694  mpi_type_id(const long double *)
1695  {
1696  return MPI_LONG_DOUBLE;
1697  }
1698 
1699 
1700 
1701  inline MPI_Datatype
1702  mpi_type_id(const std::complex<float> *)
1703  {
1704  return MPI_COMPLEX;
1705  }
1706 
1707 
1708 
1709  inline MPI_Datatype
1710  mpi_type_id(const std::complex<double> *)
1711  {
1712  return MPI_DOUBLE_COMPLEX;
1713  }
1714 #endif
1715  } // namespace MPIDataTypes
1716  } // namespace internal
1717 
1718 
1719 
1720 #ifdef DEAL_II_WITH_MPI
1738  template <typename T>
1739  const MPI_Datatype
1741  static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1742 #endif
1743 
1744 #ifndef DOXYGEN
1745  namespace internal
1746  {
1747  // declaration for an internal function that lives in mpi.templates.h
1748  template <typename T>
1749  void
1750  all_reduce(const MPI_Op & mpi_op,
1751  const ArrayView<const T> &values,
1752  const MPI_Comm & mpi_communicator,
1753  const ArrayView<T> & output);
1754  } // namespace internal
1755 
1756 
1757  template <typename T>
1758  template <typename W, typename G>
1759  Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1760  : wait_function(wait_operation)
1761  , get_and_cleanup_function(get_and_cleanup_operation)
1762  , is_done(false)
1763  , get_was_called(false)
1764  {}
1765 
1766 
1767 
1768  template <typename T>
1769  Future<T>::~Future()
1770  {
1771  // If there is a clean-up function, and if it has not been
1772  // called yet, then do so. Note that we may not have a
1773  // clean-up function (not even an empty one) if the current
1774  // object has been moved from, into another object, and as
1775  // a consequence the std::function objects are now empty
1776  // even though they were initialized in the constructor.
1777  // (A std::function object whose object is a an empty lambda
1778  // function, [](){}, is not an empty std::function object.)
1779  if ((get_was_called == false) && get_and_cleanup_function)
1780  get();
1781  }
1782 
1783 
1784 
1785  template <typename T>
1786  void
1787  Future<T>::wait()
1788  {
1789  if (is_done == false)
1790  {
1791  wait_function();
1792 
1793  is_done = true;
1794  }
1795  }
1796 
1797 
1798  template <typename T>
1799  T
1800  Future<T>::get()
1801  {
1802  Assert(get_was_called == false,
1803  ExcMessage(
1804  "You can't call get() more than once on a Future object."));
1805  get_was_called = true;
1806 
1807  wait();
1808  return get_and_cleanup_function();
1809  }
1810 
1811 
1812 
1813  template <typename T, unsigned int N>
1814  void
1815  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1816  {
1817  internal::all_reduce(MPI_SUM,
1819  mpi_communicator,
1820  ArrayView<T>(sums, N));
1821  }
1822 
1823 
1824 
1825  template <typename T, unsigned int N>
1826  void
1827  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1828  {
1829  internal::all_reduce(MPI_MAX,
1831  mpi_communicator,
1832  ArrayView<T>(maxima, N));
1833  }
1834 
1835 
1836 
1837  template <typename T, unsigned int N>
1838  void
1839  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1840  {
1841  internal::all_reduce(MPI_MIN,
1843  mpi_communicator,
1844  ArrayView<T>(minima, N));
1845  }
1846 
1847 
1848 
1849  template <typename T, unsigned int N>
1850  void
1851  logical_or(const T (&values)[N],
1852  const MPI_Comm &mpi_communicator,
1853  T (&results)[N])
1854  {
1855  static_assert(std::is_integral<T>::value,
1856  "The MPI_LOR operation only allows integral data types.");
1857 
1858  internal::all_reduce(MPI_LOR,
1860  mpi_communicator,
1861  ArrayView<T>(results, N));
1862  }
1863 
1864 
1865 
1866  template <typename T>
1867  std::map<unsigned int, T>
1868  some_to_some(const MPI_Comm & comm,
1869  const std::map<unsigned int, T> &objects_to_send)
1870  {
1871 # ifndef DEAL_II_WITH_MPI
1872  (void)comm;
1873  Assert(objects_to_send.size() < 2,
1874  ExcMessage("Cannot send to more than one processor."));
1875  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1876  objects_to_send.size() == 0,
1877  ExcMessage("Can only send to myself or to nobody."));
1878  return objects_to_send;
1879 # else
1880  const auto my_proc = this_mpi_process(comm);
1881 
1882  std::map<unsigned int, T> received_objects;
1883 
1884  std::vector<unsigned int> send_to;
1885  send_to.reserve(objects_to_send.size());
1886  for (const auto &m : objects_to_send)
1887  if (m.first == my_proc)
1888  received_objects[my_proc] = m.second;
1889  else
1890  send_to.emplace_back(m.first);
1891 
1892  const unsigned int n_expected_incoming_messages =
1894 
1895  // Protect the following communication:
1896  static CollectiveMutex mutex;
1897  CollectiveMutex::ScopedLock lock(mutex, comm);
1898 
1899  // If we have something to send, or we expect something from other
1900  // processors, we need to visit one of the two scopes below. Otherwise,
1901  // no other action is required by this mpi process, and we can safely
1902  // return.
1903  if (send_to.size() == 0 && n_expected_incoming_messages == 0)
1904  return received_objects;
1905 
1906  const int mpi_tag =
1908 
1909  // Sending buffers
1910  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1911  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1912  {
1913  unsigned int i = 0;
1914  for (const auto &rank_obj : objects_to_send)
1915  if (rank_obj.first != my_proc)
1916  {
1917  const auto &rank = rank_obj.first;
1918  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1919  /*allow_compression=*/false);
1920  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1921  buffers_to_send[i].size(),
1922  MPI_CHAR,
1923  rank,
1924  mpi_tag,
1925  comm,
1926  &buffer_send_requests[i]);
1927  AssertThrowMPI(ierr);
1928  ++i;
1929  }
1930  }
1931 
1932  // Fill the output map
1933  {
1934  std::vector<char> buffer;
1935  // We do this on a first come/first served basis
1936  for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1937  {
1938  // Probe what's going on. Take data from the first available sender
1939  MPI_Status status;
1940  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1941  AssertThrowMPI(ierr);
1942 
1943  // Length of the message
1944  int len;
1945  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1946  AssertThrowMPI(ierr);
1947  buffer.resize(len);
1948 
1949  // Source rank
1950  const unsigned int rank = status.MPI_SOURCE;
1951 
1952  // Actually receive the message
1953  ierr = MPI_Recv(buffer.data(),
1954  len,
1955  MPI_CHAR,
1956  status.MPI_SOURCE,
1957  status.MPI_TAG,
1958  comm,
1959  MPI_STATUS_IGNORE);
1960  AssertThrowMPI(ierr);
1961  Assert(received_objects.find(rank) == received_objects.end(),
1963  "I should not receive again from this rank"));
1964  received_objects[rank] =
1965  Utilities::unpack<T>(buffer,
1966  /*allow_compression=*/false);
1967  }
1968  }
1969 
1970  // Wait to have sent all objects.
1971  const int ierr = MPI_Waitall(send_to.size(),
1972  buffer_send_requests.data(),
1973  MPI_STATUSES_IGNORE);
1974  AssertThrowMPI(ierr);
1975 
1976  return received_objects;
1977 # endif // deal.II with MPI
1978  }
1979 
1980 
1981 
1982  template <typename T>
1983  std::vector<T>
1984  all_gather(const MPI_Comm &comm, const T &object)
1985  {
1986  if (job_supports_mpi() == false)
1987  return {object};
1988 
1989 # ifndef DEAL_II_WITH_MPI
1990  (void)comm;
1991  std::vector<T> v(1, object);
1992  return v;
1993 # else
1994  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1995 
1996  std::vector<char> buffer = Utilities::pack(object);
1997 
1998  int n_local_data = buffer.size();
1999 
2000  // Vector to store the size of loc_data_array for every process
2001  std::vector<int> size_all_data(n_procs, 0);
2002 
2003  // Exchanging the size of each buffer
2004  int ierr = MPI_Allgather(
2005  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
2006  AssertThrowMPI(ierr);
2007 
2008  // Now computing the displacement, relative to recvbuf,
2009  // at which to store the incoming buffer
2010  std::vector<int> rdispls(n_procs);
2011  rdispls[0] = 0;
2012  for (unsigned int i = 1; i < n_procs; ++i)
2013  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2014 
2015  // Step 3: exchange the buffer:
2016  std::vector<char> received_unrolled_buffer(rdispls.back() +
2017  size_all_data.back());
2018 
2019  ierr = MPI_Allgatherv(buffer.data(),
2020  n_local_data,
2021  MPI_CHAR,
2022  received_unrolled_buffer.data(),
2023  size_all_data.data(),
2024  rdispls.data(),
2025  MPI_CHAR,
2026  comm);
2027  AssertThrowMPI(ierr);
2028 
2029  std::vector<T> received_objects(n_procs);
2030  for (unsigned int i = 0; i < n_procs; ++i)
2031  {
2032  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2033  rdispls[i],
2034  received_unrolled_buffer.begin() +
2035  rdispls[i] + size_all_data[i]);
2036  received_objects[i] = Utilities::unpack<T>(local_buffer);
2037  }
2038 
2039  return received_objects;
2040 # endif
2041  }
2042 
2043 
2044 
2045  template <typename T>
2046  std::vector<T>
2047  gather(const MPI_Comm & comm,
2048  const T & object_to_send,
2049  const unsigned int root_process)
2050  {
2051 # ifndef DEAL_II_WITH_MPI
2052  (void)comm;
2053  (void)root_process;
2054  std::vector<T> v(1, object_to_send);
2055  return v;
2056 # else
2057  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2058  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2059 
2060  AssertIndexRange(root_process, n_procs);
2061 
2062  std::vector<char> buffer = Utilities::pack(object_to_send);
2063  int n_local_data = buffer.size();
2064 
2065  // Vector to store the size of loc_data_array for every process
2066  // only the root process needs to allocate memory for that purpose
2067  std::vector<int> size_all_data;
2068  if (my_rank == root_process)
2069  size_all_data.resize(n_procs, 0);
2070 
2071  // Exchanging the size of each buffer
2072  int ierr = MPI_Gather(&n_local_data,
2073  1,
2074  MPI_INT,
2075  size_all_data.data(),
2076  1,
2077  MPI_INT,
2078  root_process,
2079  comm);
2080  AssertThrowMPI(ierr);
2081 
2082  // Now computing the displacement, relative to recvbuf,
2083  // at which to store the incoming buffer; only for root
2084  std::vector<int> rdispls;
2085  if (my_rank == root_process)
2086  {
2087  rdispls.resize(n_procs, 0);
2088  for (unsigned int i = 1; i < n_procs; ++i)
2089  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2090  }
2091  // exchange the buffer:
2092  std::vector<char> received_unrolled_buffer;
2093  if (my_rank == root_process)
2094  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2095 
2096  ierr = MPI_Gatherv(buffer.data(),
2097  n_local_data,
2098  MPI_CHAR,
2099  received_unrolled_buffer.data(),
2100  size_all_data.data(),
2101  rdispls.data(),
2102  MPI_CHAR,
2103  root_process,
2104  comm);
2105  AssertThrowMPI(ierr);
2106 
2107  std::vector<T> received_objects;
2108 
2109  if (my_rank == root_process)
2110  {
2111  received_objects.resize(n_procs);
2112 
2113  for (unsigned int i = 0; i < n_procs; ++i)
2114  {
2115  const std::vector<char> local_buffer(
2116  received_unrolled_buffer.begin() + rdispls[i],
2117  received_unrolled_buffer.begin() + rdispls[i] +
2118  size_all_data[i]);
2119  received_objects[i] = Utilities::unpack<T>(local_buffer);
2120  }
2121  }
2122  return received_objects;
2123 # endif
2124  }
2125 
2126 
2127 
2128  template <typename T>
2129  void
2130  broadcast(T * buffer,
2131  const size_t count,
2132  const unsigned int root,
2133  const MPI_Comm & comm)
2134  {
2135 # ifndef DEAL_II_WITH_MPI
2136  (void)buffer;
2137  (void)count;
2138  (void)root;
2139  (void)comm;
2140 # else
2141  Assert(root < n_mpi_processes(comm),
2142  ExcMessage("Invalid root rank specified."));
2143 
2144  // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2145  // iteration:
2146  const size_t max_send_count = std::numeric_limits<signed int>::max();
2147 
2148  size_t total_sent_count = 0;
2149  while (total_sent_count < count)
2150  {
2151  const size_t current_count =
2152  std::min(count - total_sent_count, max_send_count);
2153 
2154  const int ierr = MPI_Bcast(buffer + total_sent_count,
2155  current_count,
2156  mpi_type_id_for_type<decltype(*buffer)>,
2157  root,
2158  comm);
2159  AssertThrowMPI(ierr);
2160  total_sent_count += current_count;
2161  }
2162 # endif
2163  }
2164 
2165 
2166 
2167  template <typename T>
2168  std::enable_if_t<is_mpi_type<T> == false, T>
2169  broadcast(const MPI_Comm & comm,
2170  const T & object_to_send,
2171  const unsigned int root_process)
2172  {
2173 # ifndef DEAL_II_WITH_MPI
2174  (void)comm;
2175  (void)root_process;
2176  return object_to_send;
2177 # else
2178  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2179  AssertIndexRange(root_process, n_procs);
2180  (void)n_procs;
2181 
2182  std::vector<char> buffer;
2183  std::size_t buffer_size = numbers::invalid_size_type;
2184 
2185  // On the root process, pack the data and determine what the
2186  // buffer size needs to be.
2187  if (this_mpi_process(comm) == root_process)
2188  {
2189  buffer = Utilities::pack(object_to_send, false);
2190  buffer_size = buffer.size();
2191  }
2192 
2193  // Exchange the size of buffer
2194  int ierr = MPI_Bcast(&buffer_size,
2195  1,
2196  mpi_type_id_for_type<decltype(buffer_size)>,
2197  root_process,
2198  comm);
2199  AssertThrowMPI(ierr);
2200 
2201  // If not on the root process, correctly size the buffer to
2202  // receive the data, then do exactly that.
2203  if (this_mpi_process(comm) != root_process)
2204  buffer.resize(buffer_size);
2205 
2206  broadcast(buffer.data(), buffer_size, root_process, comm);
2207 
2208  if (Utilities::MPI::this_mpi_process(comm) == root_process)
2209  return object_to_send;
2210  else
2211  return Utilities::unpack<T>(buffer, false);
2212 # endif
2213  }
2214 
2215 
2216 
2217  template <typename T>
2218  std::enable_if_t<is_mpi_type<T> == true, T>
2219  broadcast(const MPI_Comm & comm,
2220  const T & object_to_send,
2221  const unsigned int root_process)
2222  {
2223 # ifndef DEAL_II_WITH_MPI
2224  (void)comm;
2225  (void)root_process;
2226  return object_to_send;
2227 # else
2228 
2229  T object = object_to_send;
2230  int ierr =
2231  MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2232  AssertThrowMPI(ierr);
2233 
2234  return object;
2235 # endif
2236  }
2237 
2238 
2239  template <typename T>
2240  Future<void>
2241  isend(const T & object,
2242  MPI_Comm communicator,
2243  const unsigned int target_rank,
2244  const unsigned int mpi_tag)
2245  {
2246 # ifndef DEAL_II_WITH_MPI
2247  Assert(false,
2248  ExcMessage(
2249  "This function is not useful when called without MPI."));
2250  (void)object;
2251  (void)communicator;
2252  (void)target_rank;
2253  (void)mpi_tag;
2254  return Future<void>([]() {}, []() {});
2255 # else
2256  // Create a pointer to a send buffer into which we pack the object
2257  // to be sent. The buffer will be released by the Future object once
2258  // the send has been verified to have succeeded.
2259  //
2260  // Conceptually, we would like this send buffer to be a
2261  // std::unique_ptr object whose ownership is later handed over
2262  // to the cleanup function. That has the disadvantage that the
2263  // cleanup object is a non-copyable lambda capture, leading to
2264  // awkward semantics. Instead, we use a std::shared_ptr; we move
2265  // this shared pointer into the cleanup function, which means
2266  // that there is exactly one shared pointer who owns the buffer
2267  // at any given time, though the latter is not an important
2268  // optimization.
2269  std::shared_ptr<std::vector<char>> send_buffer =
2270  std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2271 
2272  // Now start the send, and store the result in a request object that
2273  // we can then wait for later:
2274  MPI_Request request;
2275  const int ierr =
2276  MPI_Isend(send_buffer->data(),
2277  send_buffer->size(),
2278  mpi_type_id_for_type<decltype(*send_buffer->data())>,
2279  target_rank,
2280  mpi_tag,
2281  communicator,
2282  &request);
2283  AssertThrowMPI(ierr);
2284 
2285  // Then return a std::future-like object that has a wait()
2286  // function one can use to wait for the communication to finish,
2287  // and that has a cleanup function to be called at some point
2288  // after that makes sure the send buffer gets deallocated. This
2289  // cleanup function takes over ownership of the send buffer.
2290  //
2291  // Note that the body of the lambda function of the clean-up
2292  // function could be left empty. If that were so, once the
2293  // lambda function object goes out of scope, the 'send_buffer'
2294  // member of the closure object goes out of scope as well and so
2295  // the send_buffer is destroyed. But we may want to release the
2296  // buffer itself as early as possible, and so we clear the
2297  // buffer when the Future::get() function is called.
2298  auto wait = [request]() mutable {
2299  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2300  AssertThrowMPI(ierr);
2301  };
2302  auto cleanup = [send_buffer = std::move(send_buffer)]() {
2303  send_buffer->clear();
2304  };
2305  return Future<void>(wait, cleanup);
2306 # endif
2307  }
2308 
2309 
2310 
2311  template <typename T>
2312  Future<T>
2313  irecv(MPI_Comm communicator,
2314  const unsigned int source_rank,
2315  const unsigned int mpi_tag)
2316  {
2317 # ifndef DEAL_II_WITH_MPI
2318  Assert(false,
2319  ExcMessage(
2320  "This function is not useful when called without MPI."));
2321  (void)communicator;
2322  (void)source_rank;
2323  (void)mpi_tag;
2324  return Future<void>([]() {}, []() { return T{}; });
2325 # else
2326  // Use a 'probe' operation for the 'wait' operation of the
2327  // Future this function returns. It will trigger whenever we get
2328  // the incoming message. Later, once we have received the message, we
2329  // can query its size and allocate a receiver buffer.
2330  //
2331  // Since we may be waiting for multiple messages from the same
2332  // incoming process (with possibly the same tag -- we can't
2333  // know), we must make sure that the 'probe' operation we have
2334  // here (and which we use to determine the buffer size) matches
2335  // the 'recv' operation with which we actually get the data
2336  // later on. This is exactly what the 'MPI_Mprobe' function and
2337  // its 'I'mmediate variant is there for, coupled with the
2338  // 'MPI_Mrecv' call that would put into the clean-up function
2339  // below.
2340  std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2341  std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2342 
2343  auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2344  const int ierr = MPI_Mprobe(
2345  source_rank, mpi_tag, communicator, message.get(), status.get());
2346  AssertThrowMPI(ierr);
2347  };
2348 
2349 
2350  // Now also define the function that actually gets the data:
2351  auto get = [status, message]() {
2352  int number_amount;
2353  int ierr;
2354  ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2355  AssertThrowMPI(ierr);
2356 
2357  std::vector<char> receive_buffer(number_amount);
2358 
2359  // Then actually get the data, using the matching MPI_Mrecv to the above
2360  // MPI_Mprobe:
2361  ierr = MPI_Mrecv(receive_buffer.data(),
2362  number_amount,
2363  mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2364  message.get(),
2365  status.get());
2366  AssertThrowMPI(ierr);
2367 
2368  // Return the unpacked object:
2369  return Utilities::unpack<T>(receive_buffer, false);
2370  };
2371 
2372  return Future<T>(wait, get);
2373 # endif
2374  }
2375 
2376 
2377 
2378 # ifdef DEAL_II_WITH_MPI
2379  template <class Iterator, typename Number>
2380  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2381  mean_and_standard_deviation(const Iterator begin,
2382  const Iterator end,
2383  const MPI_Comm &comm)
2384  {
2385  // below we do simple and straight-forward implementation. More elaborate
2386  // options are:
2387  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2388  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2389  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2390  using Std = typename numbers::NumberTraits<Number>::real_type;
2391  const Number sum = std::accumulate(begin, end, Number(0.));
2392 
2393  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2394  Assert(size > 0, ExcDivideByZero());
2395  const Number mean =
2396  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2397  Std sq_sum = 0.;
2398  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2400  });
2401  sq_sum = Utilities::MPI::sum(sq_sum, comm);
2402  return std::make_pair(mean,
2403  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2404  }
2405 # endif
2406 
2407 #endif
2408  } // end of namespace MPI
2409 } // end of namespace Utilities
2410 
2411 
2413 
2414 #endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Definition: tensor.h:503
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:377
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1136
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1102
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:292
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
const MPI_Comm & operator*() const
Definition: mpi.h:313
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition: mpi.h:561
std::function< T()> get_and_cleanup_function
Definition: mpi.h:562
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
Definition: mpi.h:1239
static Signals signals
Definition: mpi.h:1233
#define DEAL_II_DEPRECATED
Definition: config.h:164
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char U
static const char T
static const char N
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
MPI_Datatype mpi_type_id(const bool *)
Definition: mpi.h:1574
MPI_Datatype mpi_type_id(const std::complex< double > *)
Definition: mpi.h:1710
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:199
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1043
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
void logical_or(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &results)
void broadcast(T *buffer, const size_t count, const unsigned int root, const MPI_Comm &comm)
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void min(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &minima)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:266
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:222
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:251
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:424
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:156
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:309
void max(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &maxima)
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1027
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:188
T sum(const T &t, const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:168
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1740
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:145
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:119
T max(const T &t, const MPI_Comm &mpi_communicator)
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
constexpr bool is_mpi_type
Definition: mpi.h:142
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:209
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1483
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:74
static const unsigned int invalid_unsigned_int
Definition: types.h:201
const types::global_dof_index invalid_size_type
Definition: types.h:210
unsigned int global_dof_index
Definition: types.h:76
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1222
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1230
unsigned int max_index
Definition: mpi.h:1012
unsigned int min_index
Definition: mpi.h:1002
static constexpr std::enable_if_t< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type > abs_square(const number &x)
const MPI_Comm & comm
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)