Reference documentation for deal.II version GIT db2fd67796 2022-12-07 23:35:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_stub.h>
23 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/numbers.h>
26 #include <deal.II/base/utilities.h>
27 
28 #include <boost/signals2.hpp>
29 
30 #include <complex>
31 #include <limits>
32 #include <map>
33 #include <numeric>
34 #include <set>
35 #include <vector>
36 
37 
38 
52 #ifdef DEAL_II_WITH_MPI
53 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
54 #endif
55 
56 
57 
59 
60 
61 // Forward type declarations to allow MPI sums over tensorial types
62 #ifndef DOXYGEN
63 template <int rank, int dim, typename Number>
64 class Tensor;
65 template <int rank, int dim, typename Number>
66 class SymmetricTensor;
67 template <typename Number>
68 class SparseMatrix;
69 class IndexSet;
70 #endif
71 
72 namespace Utilities
73 {
86  IndexSet
88  const unsigned int my_partition_id,
89  const unsigned int n_partitions,
90  const types::global_dof_index total_size);
91 
99  namespace MPI
100  {
109  template <typename T>
110  constexpr bool is_mpi_type = is_same_as_any_of<T,
111  char,
112  signed short,
113  signed int,
114  signed long,
115  signed long long,
116  signed char,
117  unsigned char,
118  unsigned short,
119  unsigned int,
120  unsigned long int,
121  unsigned long long,
122  float,
123  double,
124  long double,
125  bool,
126  std::complex<float>,
127  std::complex<double>,
128  std::complex<long double>,
129  wchar_t>::value;
130 
139  unsigned int
140  n_mpi_processes(const MPI_Comm &mpi_communicator);
141 
150  unsigned int
151  this_mpi_process(const MPI_Comm &mpi_communicator);
152 
157  const std::vector<unsigned int>
158  mpi_processes_within_communicator(const MPI_Comm &comm_large,
159  const MPI_Comm &comm_small);
160 
182  std::vector<unsigned int>
184  const MPI_Comm & mpi_comm,
185  const std::vector<unsigned int> &destinations);
186 
206  unsigned int
208  const MPI_Comm & mpi_comm,
209  const std::vector<unsigned int> &destinations);
210 
227  MPI_Comm
228  duplicate_communicator(const MPI_Comm &mpi_communicator);
229 
239  void
240  free_communicator(MPI_Comm &mpi_communicator);
241 
255  {
256  public:
260  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
261  : comm(duplicate_communicator(communicator))
262  {}
263 
268 
273  {
275  }
276 
280  const MPI_Comm &
281  operator*() const
282  {
283  return comm;
284  }
285 
286 
292 
293  private:
297  MPI_Comm comm;
298  };
299 
300 
301 
332  {
333  public:
340  {
341  public:
345  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
346  : mutex(mutex)
347  , comm(comm)
348  {
349  mutex.lock(comm);
350  }
351 
356  {
357  mutex.unlock(comm);
358  }
359 
360  private:
368  const MPI_Comm comm;
369  };
370 
374  explicit CollectiveMutex();
375 
380 
387  void
388  lock(const MPI_Comm &comm);
389 
396  void
397  unlock(const MPI_Comm &comm);
398 
399  private:
403  bool locked;
404 
408  MPI_Request request;
409  };
410 
411 
412 
459  template <typename T>
460  class Future
461  {
462  public:
467  template <typename W, typename G>
468  Future(W &&wait_operation, G &&get_and_cleanup_operation);
469 
475  Future(const Future &) = delete;
476 
480  Future(Future &&) noexcept = default;
481 
485  ~Future();
486 
492  Future &
493  operator=(const Future &) = delete;
494 
498  Future &
499  operator=(Future &&) noexcept = default;
500 
508  void
509  wait();
510 
522  T
523  get();
524 
525  private:
529  std::function<void()> wait_function;
530  std::function<T()> get_and_cleanup_function;
531 
535  bool is_done;
536 
541  };
542 
543 
544 
574 #ifdef DEAL_II_WITH_MPI
576  create_group(const MPI_Comm & comm,
577  const MPI_Group &group,
578  const int tag,
579  MPI_Comm * new_comm);
580 #endif
581 
590  std::vector<IndexSet>
592  const MPI_Comm & comm,
593  const types::global_dof_index locally_owned_size);
594 
602  IndexSet
604  const MPI_Comm & comm,
605  const types::global_dof_index total_size);
606 
607 #ifdef DEAL_II_WITH_MPI
623  template <class Iterator, typename Number = long double>
624  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
626  const Iterator end,
627  const MPI_Comm &comm);
628 #endif
629 
630 
678  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
679  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
680 
700  template <typename T>
701  T
702  sum(const T &t, const MPI_Comm &mpi_communicator);
703 
713  template <typename T, typename U>
714  void
715  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
716 
726  template <typename T>
727  void
729  const MPI_Comm & mpi_communicator,
730  const ArrayView<T> & sums);
731 
737  template <int rank, int dim, typename Number>
740  const MPI_Comm & mpi_communicator);
741 
747  template <int rank, int dim, typename Number>
750  const MPI_Comm & mpi_communicator);
751 
760  template <typename Number>
761  void
762  sum(const SparseMatrix<Number> &local,
763  const MPI_Comm & mpi_communicator,
764  SparseMatrix<Number> & global);
765 
785  template <typename T>
786  T
787  max(const T &t, const MPI_Comm &mpi_communicator);
788 
798  template <typename T, typename U>
799  void
800  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
801 
811  template <typename T>
812  void
814  const MPI_Comm & mpi_communicator,
815  const ArrayView<T> & maxima);
816 
836  template <typename T>
837  T
838  min(const T &t, const MPI_Comm &mpi_communicator);
839 
849  template <typename T, typename U>
850  void
851  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
852 
862  template <typename T>
863  void
865  const MPI_Comm & mpi_communicator,
866  const ArrayView<T> & minima);
867 
891  template <typename T>
892  T
893  logical_or(const T &t, const MPI_Comm &mpi_communicator);
894 
909  template <typename T, typename U>
910  void
911  logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
912 
922  template <typename T>
923  void
925  const MPI_Comm & mpi_communicator,
926  const ArrayView<T> & results);
927 
942  struct MinMaxAvg
943  {
948  double sum;
949 
954  double min;
955 
960  double max;
961 
970  unsigned int min_index;
971 
980  unsigned int max_index;
981 
986  double avg;
987  };
988 
1003  MinMaxAvg
1004  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
1005 
1017  std::vector<MinMaxAvg>
1018  min_max_avg(const std::vector<double> &my_value,
1019  const MPI_Comm & mpi_communicator);
1020 
1021 
1034  void
1035  min_max_avg(const ArrayView<const double> &my_values,
1036  const ArrayView<MinMaxAvg> & result,
1037  const MPI_Comm & mpi_communicator);
1038 
1039 
1084  {
1085  public:
1132  int & argc,
1133  char **& argv,
1134  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1135 
1140  ~MPI_InitFinalize();
1141 
1168  static void
1169  register_request(MPI_Request &request);
1170 
1174  static void
1175  unregister_request(MPI_Request &request);
1176 
1184  struct Signals
1185  {
1190  boost::signals2::signal<void()> at_mpi_init;
1191 
1198  boost::signals2::signal<void()> at_mpi_finalize;
1199  };
1200 
1202 
1203  private:
1207  static std::set<MPI_Request *> requests;
1208  };
1209 
1221  bool
1222  job_supports_mpi();
1223 
1241  template <typename T>
1242  std::map<unsigned int, T>
1243  some_to_some(const MPI_Comm & comm,
1244  const std::map<unsigned int, T> &objects_to_send);
1245 
1259  template <typename T>
1260  std::vector<T>
1261  all_gather(const MPI_Comm &comm, const T &object_to_send);
1262 
1278  template <typename T>
1279  std::vector<T>
1280  gather(const MPI_Comm & comm,
1281  const T & object_to_send,
1282  const unsigned int root_process = 0);
1283 
1298  template <typename T>
1299  T
1300  scatter(const MPI_Comm & comm,
1301  const std::vector<T> &objects_to_send,
1302  const unsigned int root_process = 0);
1303 
1339  template <typename T>
1340  std::enable_if_t<is_mpi_type<T> == false, T>
1341  broadcast(const MPI_Comm & comm,
1342  const T & object_to_send,
1343  const unsigned int root_process = 0);
1344 
1367  template <typename T>
1368  std::enable_if_t<is_mpi_type<T> == true, T>
1369  broadcast(const MPI_Comm & comm,
1370  const T & object_to_send,
1371  const unsigned int root_process = 0);
1372 
1389  template <typename T>
1390  void
1391  broadcast(T * buffer,
1392  const size_t count,
1393  const unsigned int root,
1394  const MPI_Comm & comm);
1395 
1408  template <typename T>
1409  T
1410  reduce(const T & local_value,
1411  const MPI_Comm & comm,
1412  const std::function<T(const T &, const T &)> &combiner,
1413  const unsigned int root_process = 0);
1414 
1424  template <typename T>
1425  T
1426  all_reduce(const T & local_value,
1427  const MPI_Comm & comm,
1428  const std::function<T(const T &, const T &)> &combiner);
1429 
1430 
1451  template <typename T>
1452  Future<void>
1453  isend(const T & object,
1454  MPI_Comm communicator,
1455  const unsigned int target_rank,
1456  const unsigned int mpi_tag = 0);
1457 
1458 
1475  template <typename T>
1476  Future<T>
1477  irecv(MPI_Comm communicator,
1478  const unsigned int source_rank,
1479  const unsigned int mpi_tag = 0);
1480 
1481 
1524  std::vector<unsigned int>
1525  compute_index_owner(const IndexSet &owned_indices,
1526  const IndexSet &indices_to_look_up,
1527  const MPI_Comm &comm);
1528 
1536  template <typename T>
1537  std::vector<T>
1538  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1539 
1543  template <typename T>
1544  std::set<T>
1545  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1546 
1547 
1548 
1549  /* --------------------------- inline functions ------------------------- */
1550 
1551  namespace internal
1552  {
1558  namespace MPIDataTypes
1559  {
1560 #ifdef DEAL_II_WITH_MPI
1561  inline MPI_Datatype
1562  mpi_type_id(const bool *)
1563  {
1564  return MPI_CXX_BOOL;
1565  }
1566 
1567 
1568 
1569  inline MPI_Datatype
1570  mpi_type_id(const char *)
1571  {
1572  return MPI_CHAR;
1573  }
1574 
1575 
1576 
1577  inline MPI_Datatype
1578  mpi_type_id(const signed char *)
1579  {
1580  return MPI_SIGNED_CHAR;
1581  }
1582 
1583 
1584 
1585  inline MPI_Datatype
1586  mpi_type_id(const wchar_t *)
1587  {
1588  return MPI_WCHAR;
1589  }
1590 
1591 
1592 
1593  inline MPI_Datatype
1594  mpi_type_id(const short *)
1595  {
1596  return MPI_SHORT;
1597  }
1598 
1599 
1600 
1601  inline MPI_Datatype
1602  mpi_type_id(const int *)
1603  {
1604  return MPI_INT;
1605  }
1606 
1607 
1608 
1609  inline MPI_Datatype
1610  mpi_type_id(const long int *)
1611  {
1612  return MPI_LONG;
1613  }
1614 
1615 
1616 
1617  inline MPI_Datatype
1618  mpi_type_id(const long long int *)
1619  {
1620  return MPI_LONG_LONG;
1621  }
1622 
1623 
1624 
1625  inline MPI_Datatype
1626  mpi_type_id(const unsigned char *)
1627  {
1628  return MPI_UNSIGNED_CHAR;
1629  }
1630 
1631 
1632 
1633  inline MPI_Datatype
1634  mpi_type_id(const unsigned short *)
1635  {
1636  return MPI_UNSIGNED_SHORT;
1637  }
1638 
1639 
1640 
1641  inline MPI_Datatype
1642  mpi_type_id(const unsigned int *)
1643  {
1644  return MPI_UNSIGNED;
1645  }
1646 
1647 
1648 
1649  inline MPI_Datatype
1650  mpi_type_id(const unsigned long int *)
1651  {
1652  return MPI_UNSIGNED_LONG;
1653  }
1654 
1655 
1656 
1657  inline MPI_Datatype
1658  mpi_type_id(const unsigned long long int *)
1659  {
1660  return MPI_UNSIGNED_LONG_LONG;
1661  }
1662 
1663 
1664 
1665  inline MPI_Datatype
1666  mpi_type_id(const float *)
1667  {
1668  return MPI_FLOAT;
1669  }
1670 
1671 
1672 
1673  inline MPI_Datatype
1674  mpi_type_id(const double *)
1675  {
1676  return MPI_DOUBLE;
1677  }
1678 
1679 
1680 
1681  inline MPI_Datatype
1682  mpi_type_id(const long double *)
1683  {
1684  return MPI_LONG_DOUBLE;
1685  }
1686 
1687 
1688 
1689  inline MPI_Datatype
1690  mpi_type_id(const std::complex<float> *)
1691  {
1692  return MPI_COMPLEX;
1693  }
1694 
1695 
1696 
1697  inline MPI_Datatype
1698  mpi_type_id(const std::complex<double> *)
1699  {
1700  return MPI_DOUBLE_COMPLEX;
1701  }
1702 #endif
1703  } // namespace MPIDataTypes
1704  } // namespace internal
1705 
1706 
1707 
1708 #ifdef DEAL_II_WITH_MPI
1726  template <typename T>
1727  const MPI_Datatype
1729  static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1730 #endif
1731 
1732 #ifndef DOXYGEN
1733  namespace internal
1734  {
1735  // declaration for an internal function that lives in mpi.templates.h
1736  template <typename T>
1737  void
1738  all_reduce(const MPI_Op & mpi_op,
1739  const ArrayView<const T> &values,
1740  const MPI_Comm & mpi_communicator,
1741  const ArrayView<T> & output);
1742  } // namespace internal
1743 
1744 
1745  template <typename T>
1746  template <typename W, typename G>
1747  Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1748  : wait_function(wait_operation)
1749  , get_and_cleanup_function(get_and_cleanup_operation)
1750  , is_done(false)
1751  , get_was_called(false)
1752  {}
1753 
1754 
1755 
1756  template <typename T>
1757  Future<T>::~Future()
1758  {
1759  // If there is a clean-up function, and if it has not been
1760  // called yet, then do so. Note that we may not have a
1761  // clean-up function (not even an empty one) if the current
1762  // object has been moved from, into another object, and as
1763  // a consequence the std::function objects are now empty
1764  // even though they were initialized in the constructor.
1765  // (A std::function object whose object is a an empty lambda
1766  // function, [](){}, is not an empty std::function object.)
1767  if ((get_was_called == false) && get_and_cleanup_function)
1768  get();
1769  }
1770 
1771 
1772 
1773  template <typename T>
1774  void
1775  Future<T>::wait()
1776  {
1777  if (is_done == false)
1778  {
1779  wait_function();
1780 
1781  is_done = true;
1782  }
1783  }
1784 
1785 
1786  template <typename T>
1787  T
1788  Future<T>::get()
1789  {
1790  Assert(get_was_called == false,
1791  ExcMessage(
1792  "You can't call get() more than once on a Future object."));
1793  get_was_called = true;
1794 
1795  wait();
1796  return get_and_cleanup_function();
1797  }
1798 
1799 
1800 
1801  template <typename T, unsigned int N>
1802  void
1803  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1804  {
1805  internal::all_reduce(MPI_SUM,
1807  mpi_communicator,
1808  ArrayView<T>(sums, N));
1809  }
1810 
1811 
1812 
1813  template <typename T, unsigned int N>
1814  void
1815  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1816  {
1817  internal::all_reduce(MPI_MAX,
1819  mpi_communicator,
1820  ArrayView<T>(maxima, N));
1821  }
1822 
1823 
1824 
1825  template <typename T, unsigned int N>
1826  void
1827  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1828  {
1829  internal::all_reduce(MPI_MIN,
1831  mpi_communicator,
1832  ArrayView<T>(minima, N));
1833  }
1834 
1835 
1836 
1837  template <typename T, unsigned int N>
1838  void
1839  logical_or(const T (&values)[N],
1840  const MPI_Comm &mpi_communicator,
1841  T (&results)[N])
1842  {
1843  static_assert(std::is_integral<T>::value,
1844  "The MPI_LOR operation only allows integral data types.");
1845 
1846  internal::all_reduce(MPI_LOR,
1848  mpi_communicator,
1849  ArrayView<T>(results, N));
1850  }
1851 
1852 
1853 
1854  template <typename T>
1855  std::map<unsigned int, T>
1856  some_to_some(const MPI_Comm & comm,
1857  const std::map<unsigned int, T> &objects_to_send)
1858  {
1859 # ifndef DEAL_II_WITH_MPI
1860  (void)comm;
1861  Assert(objects_to_send.size() < 2,
1862  ExcMessage("Cannot send to more than one processor."));
1863  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1864  objects_to_send.size() == 0,
1865  ExcMessage("Can only send to myself or to nobody."));
1866  return objects_to_send;
1867 # else
1868  const auto my_proc = this_mpi_process(comm);
1869 
1870  std::map<unsigned int, T> received_objects;
1871 
1872  std::vector<unsigned int> send_to;
1873  send_to.reserve(objects_to_send.size());
1874  for (const auto &m : objects_to_send)
1875  if (m.first == my_proc)
1876  received_objects[my_proc] = m.second;
1877  else
1878  send_to.emplace_back(m.first);
1879 
1880  const unsigned int n_expected_incoming_messages =
1882 
1883  // Protect the following communication:
1884  static CollectiveMutex mutex;
1885  CollectiveMutex::ScopedLock lock(mutex, comm);
1886 
1887  // If we have something to send, or we expect something from other
1888  // processors, we need to visit one of the two scopes below. Otherwise,
1889  // no other action is required by this mpi process, and we can safely
1890  // return.
1891  if (send_to.size() == 0 && n_expected_incoming_messages == 0)
1892  return received_objects;
1893 
1894  const int mpi_tag =
1896 
1897  // Sending buffers
1898  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1899  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1900  {
1901  unsigned int i = 0;
1902  for (const auto &rank_obj : objects_to_send)
1903  if (rank_obj.first != my_proc)
1904  {
1905  const auto &rank = rank_obj.first;
1906  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1907  /*allow_compression=*/false);
1908  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1909  buffers_to_send[i].size(),
1910  MPI_CHAR,
1911  rank,
1912  mpi_tag,
1913  comm,
1914  &buffer_send_requests[i]);
1915  AssertThrowMPI(ierr);
1916  ++i;
1917  }
1918  }
1919 
1920  // Fill the output map
1921  {
1922  std::vector<char> buffer;
1923  // We do this on a first come/first served basis
1924  for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1925  {
1926  // Probe what's going on. Take data from the first available sender
1927  MPI_Status status;
1928  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1929  AssertThrowMPI(ierr);
1930 
1931  // Length of the message
1932  int len;
1933  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1934  AssertThrowMPI(ierr);
1935  buffer.resize(len);
1936 
1937  // Source rank
1938  const unsigned int rank = status.MPI_SOURCE;
1939 
1940  // Actually receive the message
1941  ierr = MPI_Recv(buffer.data(),
1942  len,
1943  MPI_CHAR,
1944  status.MPI_SOURCE,
1945  status.MPI_TAG,
1946  comm,
1947  MPI_STATUS_IGNORE);
1948  AssertThrowMPI(ierr);
1949  Assert(received_objects.find(rank) == received_objects.end(),
1951  "I should not receive again from this rank"));
1952  received_objects[rank] =
1953  Utilities::unpack<T>(buffer,
1954  /*allow_compression=*/false);
1955  }
1956  }
1957 
1958  // Wait to have sent all objects.
1959  const int ierr = MPI_Waitall(send_to.size(),
1960  buffer_send_requests.data(),
1961  MPI_STATUSES_IGNORE);
1962  AssertThrowMPI(ierr);
1963 
1964  return received_objects;
1965 # endif // deal.II with MPI
1966  }
1967 
1968 
1969 
1970  template <typename T>
1971  std::vector<T>
1972  all_gather(const MPI_Comm &comm, const T &object)
1973  {
1974  if (job_supports_mpi() == false)
1975  return {object};
1976 
1977 # ifndef DEAL_II_WITH_MPI
1978  (void)comm;
1979  std::vector<T> v(1, object);
1980  return v;
1981 # else
1982  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1983 
1984  std::vector<char> buffer = Utilities::pack(object);
1985 
1986  int n_local_data = buffer.size();
1987 
1988  // Vector to store the size of loc_data_array for every process
1989  std::vector<int> size_all_data(n_procs, 0);
1990 
1991  // Exchanging the size of each buffer
1992  int ierr = MPI_Allgather(
1993  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1994  AssertThrowMPI(ierr);
1995 
1996  // Now computing the displacement, relative to recvbuf,
1997  // at which to store the incoming buffer
1998  std::vector<int> rdispls(n_procs);
1999  rdispls[0] = 0;
2000  for (unsigned int i = 1; i < n_procs; ++i)
2001  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2002 
2003  // Step 3: exchange the buffer:
2004  std::vector<char> received_unrolled_buffer(rdispls.back() +
2005  size_all_data.back());
2006 
2007  ierr = MPI_Allgatherv(buffer.data(),
2008  n_local_data,
2009  MPI_CHAR,
2010  received_unrolled_buffer.data(),
2011  size_all_data.data(),
2012  rdispls.data(),
2013  MPI_CHAR,
2014  comm);
2015  AssertThrowMPI(ierr);
2016 
2017  std::vector<T> received_objects(n_procs);
2018  for (unsigned int i = 0; i < n_procs; ++i)
2019  {
2020  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2021  rdispls[i],
2022  received_unrolled_buffer.begin() +
2023  rdispls[i] + size_all_data[i]);
2024  received_objects[i] = Utilities::unpack<T>(local_buffer);
2025  }
2026 
2027  return received_objects;
2028 # endif
2029  }
2030 
2031 
2032 
2033  template <typename T>
2034  std::vector<T>
2035  gather(const MPI_Comm & comm,
2036  const T & object_to_send,
2037  const unsigned int root_process)
2038  {
2039 # ifndef DEAL_II_WITH_MPI
2040  (void)comm;
2041  (void)root_process;
2042  std::vector<T> v(1, object_to_send);
2043  return v;
2044 # else
2045  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2046  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2047 
2048  AssertIndexRange(root_process, n_procs);
2049 
2050  std::vector<char> buffer = Utilities::pack(object_to_send);
2051  int n_local_data = buffer.size();
2052 
2053  // Vector to store the size of loc_data_array for every process
2054  // only the root process needs to allocate memory for that purpose
2055  std::vector<int> size_all_data;
2056  if (my_rank == root_process)
2057  size_all_data.resize(n_procs, 0);
2058 
2059  // Exchanging the size of each buffer
2060  int ierr = MPI_Gather(&n_local_data,
2061  1,
2062  MPI_INT,
2063  size_all_data.data(),
2064  1,
2065  MPI_INT,
2066  root_process,
2067  comm);
2068  AssertThrowMPI(ierr);
2069 
2070  // Now computing the displacement, relative to recvbuf,
2071  // at which to store the incoming buffer; only for root
2072  std::vector<int> rdispls;
2073  if (my_rank == root_process)
2074  {
2075  rdispls.resize(n_procs, 0);
2076  for (unsigned int i = 1; i < n_procs; ++i)
2077  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2078  }
2079  // exchange the buffer:
2080  std::vector<char> received_unrolled_buffer;
2081  if (my_rank == root_process)
2082  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2083 
2084  ierr = MPI_Gatherv(buffer.data(),
2085  n_local_data,
2086  MPI_CHAR,
2087  received_unrolled_buffer.data(),
2088  size_all_data.data(),
2089  rdispls.data(),
2090  MPI_CHAR,
2091  root_process,
2092  comm);
2093  AssertThrowMPI(ierr);
2094 
2095  std::vector<T> received_objects;
2096 
2097  if (my_rank == root_process)
2098  {
2099  received_objects.resize(n_procs);
2100 
2101  for (unsigned int i = 0; i < n_procs; ++i)
2102  {
2103  const std::vector<char> local_buffer(
2104  received_unrolled_buffer.begin() + rdispls[i],
2105  received_unrolled_buffer.begin() + rdispls[i] +
2106  size_all_data[i]);
2107  received_objects[i] = Utilities::unpack<T>(local_buffer);
2108  }
2109  }
2110  return received_objects;
2111 # endif
2112  }
2113 
2114 
2115 
2116  template <typename T>
2117  T
2118  scatter(const MPI_Comm & comm,
2119  const std::vector<T> &objects_to_send,
2120  const unsigned int root_process)
2121  {
2122 # ifndef DEAL_II_WITH_MPI
2123  (void)comm;
2124  (void)root_process;
2125 
2126  AssertDimension(objects_to_send.size(), 1);
2127 
2128  return objects_to_send[0];
2129 # else
2130  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2131  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2132 
2133  AssertIndexRange(root_process, n_procs);
2134  AssertThrow(
2135  (my_rank != root_process && objects_to_send.size() == 0) ||
2136  objects_to_send.size() == n_procs,
2137  ExcMessage(
2138  "The number of objects to be scattered must correspond to the number processes."));
2139 
2140  std::vector<char> send_buffer;
2141  std::vector<int> send_counts;
2142  std::vector<int> send_displacements;
2143 
2144  if (my_rank == root_process)
2145  {
2146  send_counts.resize(n_procs, 0);
2147  send_displacements.resize(n_procs + 1, 0);
2148 
2149  for (unsigned int i = 0; i < n_procs; ++i)
2150  {
2151  const auto packed_data = Utilities::pack(objects_to_send[i]);
2152  send_buffer.insert(send_buffer.end(),
2153  packed_data.begin(),
2154  packed_data.end());
2155  send_counts[i] = packed_data.size();
2156  }
2157 
2158  for (unsigned int i = 0; i < n_procs; ++i)
2159  send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2160  }
2161 
2162  int n_local_data;
2163  int ierr = MPI_Scatter(send_counts.data(),
2164  1,
2165  MPI_INT,
2166  &n_local_data,
2167  1,
2168  MPI_INT,
2169  root_process,
2170  comm);
2171  AssertThrowMPI(ierr);
2172 
2173  std::vector<char> recv_buffer(n_local_data);
2174 
2175  ierr = MPI_Scatterv(send_buffer.data(),
2176  send_counts.data(),
2177  send_displacements.data(),
2178  MPI_CHAR,
2179  recv_buffer.data(),
2180  n_local_data,
2181  MPI_CHAR,
2182  root_process,
2183  comm);
2184  AssertThrowMPI(ierr);
2185 
2186  return Utilities::unpack<T>(recv_buffer);
2187 # endif
2188  }
2189 
2190 
2191  template <typename T>
2192  void
2193  broadcast(T * buffer,
2194  const size_t count,
2195  const unsigned int root,
2196  const MPI_Comm & comm)
2197  {
2198 # ifndef DEAL_II_WITH_MPI
2199  (void)buffer;
2200  (void)count;
2201  (void)root;
2202  (void)comm;
2203 # else
2204  Assert(root < n_mpi_processes(comm),
2205  ExcMessage("Invalid root rank specified."));
2206 
2207  // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2208  // iteration:
2209  const size_t max_send_count = std::numeric_limits<signed int>::max();
2210 
2211  size_t total_sent_count = 0;
2212  while (total_sent_count < count)
2213  {
2214  const size_t current_count =
2215  std::min(count - total_sent_count, max_send_count);
2216 
2217  const int ierr = MPI_Bcast(buffer + total_sent_count,
2218  current_count,
2219  mpi_type_id_for_type<decltype(*buffer)>,
2220  root,
2221  comm);
2222  AssertThrowMPI(ierr);
2223  total_sent_count += current_count;
2224  }
2225 # endif
2226  }
2227 
2228 
2229 
2230  template <typename T>
2231  std::enable_if_t<is_mpi_type<T> == false, T>
2232  broadcast(const MPI_Comm & comm,
2233  const T & object_to_send,
2234  const unsigned int root_process)
2235  {
2236 # ifndef DEAL_II_WITH_MPI
2237  (void)comm;
2238  (void)root_process;
2239  return object_to_send;
2240 # else
2241  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2242  AssertIndexRange(root_process, n_procs);
2243  (void)n_procs;
2244 
2245  std::vector<char> buffer;
2246  std::size_t buffer_size = numbers::invalid_size_type;
2247 
2248  // On the root process, pack the data and determine what the
2249  // buffer size needs to be.
2250  if (this_mpi_process(comm) == root_process)
2251  {
2252  buffer = Utilities::pack(object_to_send, false);
2253  buffer_size = buffer.size();
2254  }
2255 
2256  // Exchange the size of buffer
2257  int ierr = MPI_Bcast(&buffer_size,
2258  1,
2259  mpi_type_id_for_type<decltype(buffer_size)>,
2260  root_process,
2261  comm);
2262  AssertThrowMPI(ierr);
2263 
2264  // If not on the root process, correctly size the buffer to
2265  // receive the data, then do exactly that.
2266  if (this_mpi_process(comm) != root_process)
2267  buffer.resize(buffer_size);
2268 
2269  broadcast(buffer.data(), buffer_size, root_process, comm);
2270 
2271  if (Utilities::MPI::this_mpi_process(comm) == root_process)
2272  return object_to_send;
2273  else
2274  return Utilities::unpack<T>(buffer, false);
2275 # endif
2276  }
2277 
2278 
2279 
2280  template <typename T>
2281  std::enable_if_t<is_mpi_type<T> == true, T>
2282  broadcast(const MPI_Comm & comm,
2283  const T & object_to_send,
2284  const unsigned int root_process)
2285  {
2286 # ifndef DEAL_II_WITH_MPI
2287  (void)comm;
2288  (void)root_process;
2289  return object_to_send;
2290 # else
2291 
2292  T object = object_to_send;
2293  int ierr =
2294  MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2295  AssertThrowMPI(ierr);
2296 
2297  return object;
2298 # endif
2299  }
2300 
2301 
2302  template <typename T>
2303  Future<void>
2304  isend(const T & object,
2305  MPI_Comm communicator,
2306  const unsigned int target_rank,
2307  const unsigned int mpi_tag)
2308  {
2309 # ifndef DEAL_II_WITH_MPI
2310  Assert(false, ExcNeedsMPI());
2311  (void)object;
2312  (void)communicator;
2313  (void)target_rank;
2314  (void)mpi_tag;
2315  return Future<void>([]() {}, []() {});
2316 # else
2317  // Create a pointer to a send buffer into which we pack the object
2318  // to be sent. The buffer will be released by the Future object once
2319  // the send has been verified to have succeeded.
2320  //
2321  // Conceptually, we would like this send buffer to be a
2322  // std::unique_ptr object whose ownership is later handed over
2323  // to the cleanup function. That has the disadvantage that the
2324  // cleanup object is a non-copyable lambda capture, leading to
2325  // awkward semantics. Instead, we use a std::shared_ptr; we move
2326  // this shared pointer into the cleanup function, which means
2327  // that there is exactly one shared pointer who owns the buffer
2328  // at any given time, though the latter is not an important
2329  // optimization.
2330  std::shared_ptr<std::vector<char>> send_buffer =
2331  std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2332 
2333  // Now start the send, and store the result in a request object that
2334  // we can then wait for later:
2335  MPI_Request request;
2336  const int ierr =
2337  MPI_Isend(send_buffer->data(),
2338  send_buffer->size(),
2339  mpi_type_id_for_type<decltype(*send_buffer->data())>,
2340  target_rank,
2341  mpi_tag,
2342  communicator,
2343  &request);
2344  AssertThrowMPI(ierr);
2345 
2346  // Then return a std::future-like object that has a wait()
2347  // function one can use to wait for the communication to finish,
2348  // and that has a cleanup function to be called at some point
2349  // after that makes sure the send buffer gets deallocated. This
2350  // cleanup function takes over ownership of the send buffer.
2351  //
2352  // Note that the body of the lambda function of the clean-up
2353  // function could be left empty. If that were so, once the
2354  // lambda function object goes out of scope, the 'send_buffer'
2355  // member of the closure object goes out of scope as well and so
2356  // the send_buffer is destroyed. But we may want to release the
2357  // buffer itself as early as possible, and so we clear the
2358  // buffer when the Future::get() function is called.
2359  auto wait = [request]() mutable {
2360  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2361  AssertThrowMPI(ierr);
2362  };
2363  auto cleanup = [send_buffer = std::move(send_buffer)]() {
2364  send_buffer->clear();
2365  };
2366  return Future<void>(wait, cleanup);
2367 # endif
2368  }
2369 
2370 
2371 
2372  template <typename T>
2373  Future<T>
2374  irecv(MPI_Comm communicator,
2375  const unsigned int source_rank,
2376  const unsigned int mpi_tag)
2377  {
2378 # ifndef DEAL_II_WITH_MPI
2379  Assert(false, ExcNeedsMPI());
2380  (void)communicator;
2381  (void)source_rank;
2382  (void)mpi_tag;
2383  return Future<void>([]() {}, []() { return T{}; });
2384 # else
2385  // Use a 'probe' operation for the 'wait' operation of the
2386  // Future this function returns. It will trigger whenever we get
2387  // the incoming message. Later, once we have received the message, we
2388  // can query its size and allocate a receiver buffer.
2389  //
2390  // Since we may be waiting for multiple messages from the same
2391  // incoming process (with possibly the same tag -- we can't
2392  // know), we must make sure that the 'probe' operation we have
2393  // here (and which we use to determine the buffer size) matches
2394  // the 'recv' operation with which we actually get the data
2395  // later on. This is exactly what the 'MPI_Mprobe' function and
2396  // its 'I'mmediate variant is there for, coupled with the
2397  // 'MPI_Mrecv' call that would put into the clean-up function
2398  // below.
2399  std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2400  std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2401 
2402  auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2403  const int ierr = MPI_Mprobe(
2404  source_rank, mpi_tag, communicator, message.get(), status.get());
2405  AssertThrowMPI(ierr);
2406  };
2407 
2408 
2409  // Now also define the function that actually gets the data:
2410  auto get = [status, message]() {
2411  int number_amount;
2412  int ierr;
2413  ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2414  AssertThrowMPI(ierr);
2415 
2416  std::vector<char> receive_buffer(number_amount);
2417 
2418  // Then actually get the data, using the matching MPI_Mrecv to the above
2419  // MPI_Mprobe:
2420  ierr = MPI_Mrecv(receive_buffer.data(),
2421  number_amount,
2422  mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2423  message.get(),
2424  status.get());
2425  AssertThrowMPI(ierr);
2426 
2427  // Return the unpacked object:
2428  return Utilities::unpack<T>(receive_buffer, false);
2429  };
2430 
2431  return Future<T>(wait, get);
2432 # endif
2433  }
2434 
2435 
2436 
2437 # ifdef DEAL_II_WITH_MPI
2438  template <class Iterator, typename Number>
2439  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2440  mean_and_standard_deviation(const Iterator begin,
2441  const Iterator end,
2442  const MPI_Comm &comm)
2443  {
2444  // below we do simple and straight-forward implementation. More elaborate
2445  // options are:
2446  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2447  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2448  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2449  using Std = typename numbers::NumberTraits<Number>::real_type;
2450  const Number sum = std::accumulate(begin, end, Number(0.));
2451 
2452  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2453  Assert(size > 0, ExcDivideByZero());
2454  const Number mean =
2455  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2456  Std sq_sum = 0.;
2457  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2459  });
2460  sq_sum = Utilities::MPI::sum(sq_sum, comm);
2461  return std::make_pair(mean,
2462  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2463  }
2464 # endif
2465 
2466 #endif
2467  } // end of namespace MPI
2468 } // end of namespace Utilities
2469 
2470 
2472 
2473 #endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Definition: tensor.h:503
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:345
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1181
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1147
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:260
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
const MPI_Comm & operator*() const
Definition: mpi.h:281
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition: mpi.h:529
std::function< T()> get_and_cleanup_function
Definition: mpi.h:530
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
#define DEAL_II_DEPRECATED
Definition: config.h:164
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:458
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:459
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1501
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1695
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1818
#define AssertIndexRange(index, range)
Definition: exceptions.h:1760
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1611
static const char U
static const char T
static const char N
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
MPI_Datatype mpi_type_id(const bool *)
Definition: mpi.h:1562
MPI_Datatype mpi_type_id(const std::complex< double > *)
Definition: mpi.h:1698
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:200
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1044
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
void logical_or(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &results)
void broadcast(T *buffer, const size_t count, const unsigned int root, const MPI_Comm &comm)
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void min(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &minima)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:267
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:223
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:252
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:425
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:157
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:310
void max(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &maxima)
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1028
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:189
T sum(const T &t, const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:169
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1728
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:146
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:120
T max(const T &t, const MPI_Comm &mpi_communicator)
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
T scatter(const MPI_Comm &comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
constexpr bool is_mpi_type
Definition: mpi.h:110
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:210
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1334
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:75
static const unsigned int invalid_unsigned_int
Definition: types.h:206
const types::global_dof_index invalid_size_type
Definition: types.h:215
unsigned int global_dof_index
Definition: types.h:81
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
unsigned int max_index
Definition: mpi.h:980
unsigned int min_index
Definition: mpi.h:970
static constexpr std::enable_if_t< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type > abs_square(const number &x)
const MPI_Comm & comm
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)