Reference documentation for deal.II version GIT 9042b9283b 2023-12-02 14:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_stub.h>
23 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/numbers.h>
26 #include <deal.II/base/utilities.h>
27 
28 #include <boost/signals2.hpp>
29 
30 #include <complex>
31 #include <limits>
32 #include <map>
33 #include <numeric>
34 #include <set>
35 #include <vector>
36 
37 
38 
52 #ifdef DEAL_II_WITH_MPI
53 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
54 #endif
55 
56 
57 
59 
60 
61 // Forward type declarations to allow MPI sums over tensorial types
62 #ifndef DOXYGEN
63 template <int rank, int dim, typename Number>
64 class Tensor;
65 template <int rank, int dim, typename Number>
66 class SymmetricTensor;
67 template <typename Number>
68 class SparseMatrix;
69 class IndexSet;
70 #endif
71 
72 namespace Utilities
73 {
86  IndexSet
88  const unsigned int my_partition_id,
89  const unsigned int n_partitions,
90  const types::global_dof_index total_size);
91 
99  namespace MPI
100  {
109  template <typename T>
110  constexpr bool is_mpi_type = is_same_as_any_of<T,
111  char,
112  signed short,
113  signed int,
114  signed long,
115  signed long long,
116  signed char,
117  unsigned char,
118  unsigned short,
119  unsigned int,
120  unsigned long int,
121  unsigned long long,
122  float,
123  double,
124  long double,
125  bool,
126  std::complex<float>,
127  std::complex<double>,
128  std::complex<long double>,
129  wchar_t>::value;
130 
139  unsigned int
140  n_mpi_processes(const MPI_Comm mpi_communicator);
141 
150  unsigned int
151  this_mpi_process(const MPI_Comm mpi_communicator);
152 
157  std::vector<unsigned int>
158  mpi_processes_within_communicator(const MPI_Comm comm_large,
159  const MPI_Comm comm_small);
160 
182  std::vector<unsigned int>
184  const MPI_Comm mpi_comm,
185  const std::vector<unsigned int> &destinations);
186 
206  unsigned int
208  const MPI_Comm mpi_comm,
209  const std::vector<unsigned int> &destinations);
210 
227  MPI_Comm
228  duplicate_communicator(const MPI_Comm mpi_communicator);
229 
239  void
240  free_communicator(MPI_Comm mpi_communicator);
241 
255  {
256  public:
260  explicit DuplicatedCommunicator(const MPI_Comm communicator)
261  : comm(duplicate_communicator(communicator))
262  {}
263 
268 
273  {
275  }
276 
280  MPI_Comm
281  operator*() const
282  {
283  return comm;
284  }
285 
286 
292 
293  private:
297  MPI_Comm comm;
298  };
299 
300 
301 
332  {
333  public:
340  {
341  public:
345  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
346  : mutex(mutex)
347  , comm(comm)
348  {
349  mutex.lock(comm);
350  }
351 
356  {
357  mutex.unlock(comm);
358  }
359 
360  private:
368  const MPI_Comm comm;
369  };
370 
374  explicit CollectiveMutex();
375 
380 
387  void
388  lock(const MPI_Comm comm);
389 
396  void
397  unlock(const MPI_Comm comm);
398 
399  private:
403  bool locked;
404 
408  MPI_Request request;
409  };
410 
411 
412 
459  template <typename T>
460  class Future
461  {
462  public:
467  template <typename W, typename G>
468  Future(W &&wait_operation, G &&get_and_cleanup_operation);
469 
475  Future(const Future &) = delete;
476 
480  Future(Future &&) noexcept = default;
481 
485  ~Future();
486 
492  Future &
493  operator=(const Future &) = delete;
494 
498  Future &
499  operator=(Future &&) noexcept = default;
500 
508  void
509  wait();
510 
522  T
523  get();
524 
525  private:
529  std::function<void()> wait_function;
530  std::function<T()> get_and_cleanup_function;
531 
535  bool is_done;
536 
541  };
542 
543 
544 
574 #ifdef DEAL_II_WITH_MPI
576  create_group(const MPI_Comm comm,
577  const MPI_Group &group,
578  const int tag,
579  MPI_Comm *new_comm);
580 #endif
581 
590  std::vector<IndexSet>
592  const MPI_Comm comm,
593  const types::global_dof_index locally_owned_size);
594 
602  IndexSet
604  const MPI_Comm comm,
605  const types::global_dof_index total_size);
606 
607 #ifdef DEAL_II_WITH_MPI
623  template <class Iterator, typename Number = long double>
624  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
626  const Iterator end,
627  const MPI_Comm comm);
628 #endif
629 
630 
678  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
679  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
680 
700  template <typename T>
701  T
702  sum(const T &t, const MPI_Comm mpi_communicator);
703 
713  template <typename T, typename U>
714  void
715  sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
716 
726  template <typename T>
727  void
729  const MPI_Comm mpi_communicator,
730  const ArrayView<T> &sums);
731 
737  template <int rank, int dim, typename Number>
740  const MPI_Comm mpi_communicator);
741 
747  template <int rank, int dim, typename Number>
750  const MPI_Comm mpi_communicator);
751 
760  template <typename Number>
761  void
762  sum(const SparseMatrix<Number> &local,
763  const MPI_Comm mpi_communicator,
764  SparseMatrix<Number> &global);
765 
785  template <typename T>
786  T
787  max(const T &t, const MPI_Comm mpi_communicator);
788 
798  template <typename T, typename U>
799  void
800  max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
801 
811  template <typename T>
812  void
814  const MPI_Comm mpi_communicator,
815  const ArrayView<T> &maxima);
816 
836  template <typename T>
837  T
838  min(const T &t, const MPI_Comm mpi_communicator);
839 
849  template <typename T, typename U>
850  void
851  min(const T &values, const MPI_Comm mpi_communicator, U &minima);
852 
862  template <typename T>
863  void
865  const MPI_Comm mpi_communicator,
866  const ArrayView<T> &minima);
867 
891  template <typename T>
892  T
893  logical_or(const T &t, const MPI_Comm mpi_communicator);
894 
909  template <typename T, typename U>
910  void
911  logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
912 
922  template <typename T>
923  void
925  const MPI_Comm mpi_communicator,
926  const ArrayView<T> &results);
927 
942  struct MinMaxAvg
943  {
948  double sum;
949 
954  double min;
955 
960  double max;
961 
970  unsigned int min_index;
971 
980  unsigned int max_index;
981 
986  double avg;
987  };
988 
1003  MinMaxAvg
1004  min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
1005 
1017  std::vector<MinMaxAvg>
1018  min_max_avg(const std::vector<double> &my_value,
1019  const MPI_Comm mpi_communicator);
1020 
1021 
1034  void
1035  min_max_avg(const ArrayView<const double> &my_values,
1036  const ArrayView<MinMaxAvg> &result,
1037  const MPI_Comm mpi_communicator);
1038 
1039 
1084  {
1085  public:
1132  int &argc,
1133  char **&argv,
1134  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1135 
1140  ~MPI_InitFinalize();
1141 
1168  static void
1169  register_request(MPI_Request &request);
1170 
1174  static void
1175  unregister_request(MPI_Request &request);
1176 
1184  struct Signals
1185  {
1190  boost::signals2::signal<void()> at_mpi_init;
1191 
1198  boost::signals2::signal<void()> at_mpi_finalize;
1199  };
1200 
1202 
1203  private:
1207  static std::set<MPI_Request *> requests;
1208 
1209 #ifdef DEAL_II_WITH_PETSC
1211 #endif
1212  };
1213 
1225  bool
1226  job_supports_mpi();
1227 
1245  template <typename T>
1246  std::map<unsigned int, T>
1247  some_to_some(const MPI_Comm comm,
1248  const std::map<unsigned int, T> &objects_to_send);
1249 
1263  template <typename T>
1264  std::vector<T>
1265  all_gather(const MPI_Comm comm, const T &object_to_send);
1266 
1282  template <typename T>
1283  std::vector<T>
1284  gather(const MPI_Comm comm,
1285  const T &object_to_send,
1286  const unsigned int root_process = 0);
1287 
1302  template <typename T>
1303  T
1304  scatter(const MPI_Comm comm,
1305  const std::vector<T> &objects_to_send,
1306  const unsigned int root_process = 0);
1307 
1343  template <typename T>
1344  std::enable_if_t<is_mpi_type<T> == false, T>
1345  broadcast(const MPI_Comm comm,
1346  const T &object_to_send,
1347  const unsigned int root_process = 0);
1348 
1371  template <typename T>
1372  std::enable_if_t<is_mpi_type<T> == true, T>
1373  broadcast(const MPI_Comm comm,
1374  const T &object_to_send,
1375  const unsigned int root_process = 0);
1376 
1393  template <typename T>
1394  void
1395  broadcast(T *buffer,
1396  const size_t count,
1397  const unsigned int root,
1398  const MPI_Comm comm);
1399 
1412  template <typename T>
1413  T
1414  reduce(const T &local_value,
1415  const MPI_Comm comm,
1416  const std::function<T(const T &, const T &)> &combiner,
1417  const unsigned int root_process = 0);
1418 
1428  template <typename T>
1429  T
1430  all_reduce(const T &local_value,
1431  const MPI_Comm comm,
1432  const std::function<T(const T &, const T &)> &combiner);
1433 
1434 
1455  template <typename T>
1456  Future<void>
1457  isend(const T &object,
1458  MPI_Comm communicator,
1459  const unsigned int target_rank,
1460  const unsigned int mpi_tag = 0);
1461 
1462 
1479  template <typename T>
1480  Future<T>
1481  irecv(MPI_Comm communicator,
1482  const unsigned int source_rank,
1483  const unsigned int mpi_tag = 0);
1484 
1485 
1528  std::vector<unsigned int>
1529  compute_index_owner(const IndexSet &owned_indices,
1530  const IndexSet &indices_to_look_up,
1531  const MPI_Comm comm);
1532 
1540  template <typename T>
1541  std::vector<T>
1542  compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1543 
1547  template <typename T>
1548  std::set<T>
1549  compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1550 
1551 
1552 
1553  /* --------------------------- inline functions ------------------------- */
1554 
1555  namespace internal
1556  {
1562  namespace MPIDataTypes
1563  {
1564 #ifdef DEAL_II_WITH_MPI
1565  inline MPI_Datatype
1566  mpi_type_id(const bool *)
1567  {
1568  return MPI_CXX_BOOL;
1569  }
1570 
1571 
1572 
1573  inline MPI_Datatype
1574  mpi_type_id(const char *)
1575  {
1576  return MPI_CHAR;
1577  }
1578 
1579 
1580 
1581  inline MPI_Datatype
1582  mpi_type_id(const signed char *)
1583  {
1584  return MPI_SIGNED_CHAR;
1585  }
1586 
1587 
1588 
1589  inline MPI_Datatype
1590  mpi_type_id(const wchar_t *)
1591  {
1592  return MPI_WCHAR;
1593  }
1594 
1595 
1596 
1597  inline MPI_Datatype
1598  mpi_type_id(const short *)
1599  {
1600  return MPI_SHORT;
1601  }
1602 
1603 
1604 
1605  inline MPI_Datatype
1606  mpi_type_id(const int *)
1607  {
1608  return MPI_INT;
1609  }
1610 
1611 
1612 
1613  inline MPI_Datatype
1614  mpi_type_id(const long int *)
1615  {
1616  return MPI_LONG;
1617  }
1618 
1619 
1620 
1621  inline MPI_Datatype
1622  mpi_type_id(const long long int *)
1623  {
1624  return MPI_LONG_LONG;
1625  }
1626 
1627 
1628 
1629  inline MPI_Datatype
1630  mpi_type_id(const unsigned char *)
1631  {
1632  return MPI_UNSIGNED_CHAR;
1633  }
1634 
1635 
1636 
1637  inline MPI_Datatype
1638  mpi_type_id(const unsigned short *)
1639  {
1640  return MPI_UNSIGNED_SHORT;
1641  }
1642 
1643 
1644 
1645  inline MPI_Datatype
1646  mpi_type_id(const unsigned int *)
1647  {
1648  return MPI_UNSIGNED;
1649  }
1650 
1651 
1652 
1653  inline MPI_Datatype
1654  mpi_type_id(const unsigned long int *)
1655  {
1656  return MPI_UNSIGNED_LONG;
1657  }
1658 
1659 
1660 
1661  inline MPI_Datatype
1662  mpi_type_id(const unsigned long long int *)
1663  {
1664  return MPI_UNSIGNED_LONG_LONG;
1665  }
1666 
1667 
1668 
1669  inline MPI_Datatype
1670  mpi_type_id(const float *)
1671  {
1672  return MPI_FLOAT;
1673  }
1674 
1675 
1676 
1677  inline MPI_Datatype
1678  mpi_type_id(const double *)
1679  {
1680  return MPI_DOUBLE;
1681  }
1682 
1683 
1684 
1685  inline MPI_Datatype
1686  mpi_type_id(const long double *)
1687  {
1688  return MPI_LONG_DOUBLE;
1689  }
1690 
1691 
1692 
1693  inline MPI_Datatype
1694  mpi_type_id(const std::complex<float> *)
1695  {
1696  return MPI_COMPLEX;
1697  }
1698 
1699 
1700 
1701  inline MPI_Datatype
1702  mpi_type_id(const std::complex<double> *)
1703  {
1704  return MPI_DOUBLE_COMPLEX;
1705  }
1706 #endif
1707  } // namespace MPIDataTypes
1708  } // namespace internal
1709 
1710 
1711 
1712 #ifdef DEAL_II_WITH_MPI
1730  template <typename T>
1731  const MPI_Datatype mpi_type_id_for_type =
1733  static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1734 #endif
1735 
1736 #ifndef DOXYGEN
1737  namespace internal
1738  {
1739  // declaration for an internal function that lives in mpi.templates.h
1740  template <typename T>
1741  void
1742  all_reduce(const MPI_Op &mpi_op,
1743  const ArrayView<const T> &values,
1744  const MPI_Comm mpi_communicator,
1745  const ArrayView<T> &output);
1746  } // namespace internal
1747 
1748 
1749  template <typename T>
1750  template <typename W, typename G>
1751  Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1752  : wait_function(wait_operation)
1753  , get_and_cleanup_function(get_and_cleanup_operation)
1754  , is_done(false)
1755  , get_was_called(false)
1756  {}
1757 
1758 
1759 
1760  template <typename T>
1761  Future<T>::~Future()
1762  {
1763  // If there is a clean-up function, and if it has not been
1764  // called yet, then do so. Note that we may not have a
1765  // clean-up function (not even an empty one) if the current
1766  // object has been moved from, into another object, and as
1767  // a consequence the std::function objects are now empty
1768  // even though they were initialized in the constructor.
1769  // (A std::function object whose object is a an empty lambda
1770  // function, [](){}, is not an empty std::function object.)
1771  if ((get_was_called == false) && get_and_cleanup_function)
1772  get();
1773  }
1774 
1775 
1776 
1777  template <typename T>
1778  void
1779  Future<T>::wait()
1780  {
1781  if (is_done == false)
1782  {
1783  wait_function();
1784 
1785  is_done = true;
1786  }
1787  }
1788 
1789 
1790  template <typename T>
1791  T
1792  Future<T>::get()
1793  {
1794  Assert(get_was_called == false,
1795  ExcMessage(
1796  "You can't call get() more than once on a Future object."));
1797  get_was_called = true;
1798 
1799  wait();
1800  return get_and_cleanup_function();
1801  }
1802 
1803 
1804 
1805  template <typename T, unsigned int N>
1806  void
1807  sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1808  {
1809  internal::all_reduce(MPI_SUM,
1811  mpi_communicator,
1812  ArrayView<T>(sums, N));
1813  }
1814 
1815 
1816 
1817  template <typename T, unsigned int N>
1818  void
1819  max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1820  {
1821  internal::all_reduce(MPI_MAX,
1823  mpi_communicator,
1824  ArrayView<T>(maxima, N));
1825  }
1826 
1827 
1828 
1829  template <typename T, unsigned int N>
1830  void
1831  min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1832  {
1833  internal::all_reduce(MPI_MIN,
1835  mpi_communicator,
1836  ArrayView<T>(minima, N));
1837  }
1838 
1839 
1840 
1841  template <typename T, unsigned int N>
1842  void
1843  logical_or(const T (&values)[N],
1844  const MPI_Comm mpi_communicator,
1845  T (&results)[N])
1846  {
1847  static_assert(std::is_integral_v<T>,
1848  "The MPI_LOR operation only allows integral data types.");
1849 
1850  internal::all_reduce(MPI_LOR,
1852  mpi_communicator,
1853  ArrayView<T>(results, N));
1854  }
1855 
1856 
1857 
1858  template <typename T>
1859  std::map<unsigned int, T>
1860  some_to_some(const MPI_Comm comm,
1861  const std::map<unsigned int, T> &objects_to_send)
1862  {
1863 # ifndef DEAL_II_WITH_MPI
1864  (void)comm;
1865  Assert(objects_to_send.size() < 2,
1866  ExcMessage("Cannot send to more than one processor."));
1867  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1868  objects_to_send.empty(),
1869  ExcMessage("Can only send to myself or to nobody."));
1870  return objects_to_send;
1871 # else
1872  const auto my_proc = this_mpi_process(comm);
1873 
1874  std::map<unsigned int, T> received_objects;
1875 
1876  std::vector<unsigned int> send_to;
1877  send_to.reserve(objects_to_send.size());
1878  for (const auto &m : objects_to_send)
1879  if (m.first == my_proc)
1880  received_objects[my_proc] = m.second;
1881  else
1882  send_to.emplace_back(m.first);
1883 
1884  const unsigned int n_expected_incoming_messages =
1886 
1887  // Protect the following communication:
1888  static CollectiveMutex mutex;
1889  CollectiveMutex::ScopedLock lock(mutex, comm);
1890 
1891  // If we have something to send, or we expect something from other
1892  // processors, we need to visit one of the two scopes below. Otherwise,
1893  // no other action is required by this mpi process, and we can safely
1894  // return.
1895  if (send_to.empty() && n_expected_incoming_messages == 0)
1896  return received_objects;
1897 
1898  const int mpi_tag =
1900 
1901  // Sending buffers
1902  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1903  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1904  {
1905  unsigned int i = 0;
1906  for (const auto &rank_obj : objects_to_send)
1907  if (rank_obj.first != my_proc)
1908  {
1909  const auto &rank = rank_obj.first;
1910  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1911  /*allow_compression=*/false);
1912  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1913  buffers_to_send[i].size(),
1914  MPI_CHAR,
1915  rank,
1916  mpi_tag,
1917  comm,
1918  &buffer_send_requests[i]);
1919  AssertThrowMPI(ierr);
1920  ++i;
1921  }
1922  }
1923 
1924  // Fill the output map
1925  {
1926  std::vector<char> buffer;
1927  // We do this on a first come/first served basis
1928  for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1929  {
1930  // Probe what's going on. Take data from the first available sender
1931  MPI_Status status;
1932  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1933  AssertThrowMPI(ierr);
1934 
1935  // Length of the message
1936  int len;
1937  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1938  AssertThrowMPI(ierr);
1939  buffer.resize(len);
1940 
1941  // Source rank
1942  const unsigned int rank = status.MPI_SOURCE;
1943 
1944  // Actually receive the message
1945  ierr = MPI_Recv(buffer.data(),
1946  len,
1947  MPI_CHAR,
1948  status.MPI_SOURCE,
1949  status.MPI_TAG,
1950  comm,
1951  MPI_STATUS_IGNORE);
1952  AssertThrowMPI(ierr);
1953  Assert(received_objects.find(rank) == received_objects.end(),
1955  "I should not receive again from this rank"));
1956  received_objects[rank] =
1957  Utilities::unpack<T>(buffer,
1958  /*allow_compression=*/false);
1959  }
1960  }
1961 
1962  // Wait to have sent all objects.
1963  const int ierr = MPI_Waitall(send_to.size(),
1964  buffer_send_requests.data(),
1965  MPI_STATUSES_IGNORE);
1966  AssertThrowMPI(ierr);
1967 
1968  return received_objects;
1969 # endif // deal.II with MPI
1970  }
1971 
1972 
1973 
1974  template <typename T>
1975  std::vector<T>
1976  all_gather(const MPI_Comm comm, const T &object)
1977  {
1978  if (job_supports_mpi() == false)
1979  return {object};
1980 
1981 # ifndef DEAL_II_WITH_MPI
1982  (void)comm;
1983  std::vector<T> v(1, object);
1984  return v;
1985 # else
1986  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1987 
1988  std::vector<char> buffer = Utilities::pack(object);
1989 
1990  int n_local_data = buffer.size();
1991 
1992  // Vector to store the size of loc_data_array for every process
1993  std::vector<int> size_all_data(n_procs, 0);
1994 
1995  // Exchanging the size of each buffer
1996  int ierr = MPI_Allgather(
1997  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1998  AssertThrowMPI(ierr);
1999 
2000  // Now computing the displacement, relative to recvbuf,
2001  // at which to store the incoming buffer
2002  std::vector<int> rdispls(n_procs);
2003  rdispls[0] = 0;
2004  for (unsigned int i = 1; i < n_procs; ++i)
2005  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2006 
2007  // Step 3: exchange the buffer:
2008  std::vector<char> received_unrolled_buffer(rdispls.back() +
2009  size_all_data.back());
2010 
2011  ierr = MPI_Allgatherv(buffer.data(),
2012  n_local_data,
2013  MPI_CHAR,
2014  received_unrolled_buffer.data(),
2015  size_all_data.data(),
2016  rdispls.data(),
2017  MPI_CHAR,
2018  comm);
2019  AssertThrowMPI(ierr);
2020 
2021  std::vector<T> received_objects(n_procs);
2022  for (unsigned int i = 0; i < n_procs; ++i)
2023  {
2024  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2025  rdispls[i],
2026  received_unrolled_buffer.begin() +
2027  rdispls[i] + size_all_data[i]);
2028  received_objects[i] = Utilities::unpack<T>(local_buffer);
2029  }
2030 
2031  return received_objects;
2032 # endif
2033  }
2034 
2035 
2036 
2037  template <typename T>
2038  std::vector<T>
2039  gather(const MPI_Comm comm,
2040  const T &object_to_send,
2041  const unsigned int root_process)
2042  {
2043 # ifndef DEAL_II_WITH_MPI
2044  (void)comm;
2045  (void)root_process;
2046  std::vector<T> v(1, object_to_send);
2047  return v;
2048 # else
2049  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2050  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2051 
2052  AssertIndexRange(root_process, n_procs);
2053 
2054  std::vector<char> buffer = Utilities::pack(object_to_send);
2055  int n_local_data = buffer.size();
2056 
2057  // Vector to store the size of loc_data_array for every process
2058  // only the root process needs to allocate memory for that purpose
2059  std::vector<int> size_all_data;
2060  if (my_rank == root_process)
2061  size_all_data.resize(n_procs, 0);
2062 
2063  // Exchanging the size of each buffer
2064  int ierr = MPI_Gather(&n_local_data,
2065  1,
2066  MPI_INT,
2067  size_all_data.data(),
2068  1,
2069  MPI_INT,
2070  root_process,
2071  comm);
2072  AssertThrowMPI(ierr);
2073 
2074  // Now computing the displacement, relative to recvbuf,
2075  // at which to store the incoming buffer; only for root
2076  std::vector<int> rdispls;
2077  if (my_rank == root_process)
2078  {
2079  rdispls.resize(n_procs, 0);
2080  for (unsigned int i = 1; i < n_procs; ++i)
2081  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2082  }
2083  // exchange the buffer:
2084  std::vector<char> received_unrolled_buffer;
2085  if (my_rank == root_process)
2086  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2087 
2088  ierr = MPI_Gatherv(buffer.data(),
2089  n_local_data,
2090  MPI_CHAR,
2091  received_unrolled_buffer.data(),
2092  size_all_data.data(),
2093  rdispls.data(),
2094  MPI_CHAR,
2095  root_process,
2096  comm);
2097  AssertThrowMPI(ierr);
2098 
2099  std::vector<T> received_objects;
2100 
2101  if (my_rank == root_process)
2102  {
2103  received_objects.resize(n_procs);
2104 
2105  for (unsigned int i = 0; i < n_procs; ++i)
2106  {
2107  const std::vector<char> local_buffer(
2108  received_unrolled_buffer.begin() + rdispls[i],
2109  received_unrolled_buffer.begin() + rdispls[i] +
2110  size_all_data[i]);
2111  received_objects[i] = Utilities::unpack<T>(local_buffer);
2112  }
2113  }
2114  return received_objects;
2115 # endif
2116  }
2117 
2118 
2119 
2120  template <typename T>
2121  T
2122  scatter(const MPI_Comm comm,
2123  const std::vector<T> &objects_to_send,
2124  const unsigned int root_process)
2125  {
2126 # ifndef DEAL_II_WITH_MPI
2127  (void)comm;
2128  (void)root_process;
2129 
2130  AssertDimension(objects_to_send.size(), 1);
2131 
2132  return objects_to_send[0];
2133 # else
2134  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2135  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2136 
2137  AssertIndexRange(root_process, n_procs);
2138  AssertThrow(
2139  (my_rank != root_process && objects_to_send.empty()) ||
2140  objects_to_send.size() == n_procs,
2141  ExcMessage(
2142  "The number of objects to be scattered must correspond to the number processes."));
2143 
2144  std::vector<char> send_buffer;
2145  std::vector<int> send_counts;
2146  std::vector<int> send_displacements;
2147 
2148  if (my_rank == root_process)
2149  {
2150  send_counts.resize(n_procs, 0);
2151  send_displacements.resize(n_procs + 1, 0);
2152 
2153  for (unsigned int i = 0; i < n_procs; ++i)
2154  {
2155  const auto packed_data = Utilities::pack(objects_to_send[i]);
2156  send_buffer.insert(send_buffer.end(),
2157  packed_data.begin(),
2158  packed_data.end());
2159  send_counts[i] = packed_data.size();
2160  }
2161 
2162  for (unsigned int i = 0; i < n_procs; ++i)
2163  send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2164  }
2165 
2166  int n_local_data;
2167  int ierr = MPI_Scatter(send_counts.data(),
2168  1,
2169  MPI_INT,
2170  &n_local_data,
2171  1,
2172  MPI_INT,
2173  root_process,
2174  comm);
2175  AssertThrowMPI(ierr);
2176 
2177  std::vector<char> recv_buffer(n_local_data);
2178 
2179  ierr = MPI_Scatterv(send_buffer.data(),
2180  send_counts.data(),
2181  send_displacements.data(),
2182  MPI_CHAR,
2183  recv_buffer.data(),
2184  n_local_data,
2185  MPI_CHAR,
2186  root_process,
2187  comm);
2188  AssertThrowMPI(ierr);
2189 
2190  return Utilities::unpack<T>(recv_buffer);
2191 # endif
2192  }
2193 
2194 
2195  template <typename T>
2196  void
2197  broadcast(T *buffer,
2198  const size_t count,
2199  const unsigned int root,
2200  const MPI_Comm comm)
2201  {
2202 # ifndef DEAL_II_WITH_MPI
2203  (void)buffer;
2204  (void)count;
2205  (void)root;
2206  (void)comm;
2207 # else
2208  Assert(root < n_mpi_processes(comm),
2209  ExcMessage("Invalid root rank specified."));
2210 
2211  // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2212  // iteration:
2213  const size_t max_send_count = std::numeric_limits<signed int>::max();
2214 
2215  size_t total_sent_count = 0;
2216  while (total_sent_count < count)
2217  {
2218  const size_t current_count =
2219  std::min(count - total_sent_count, max_send_count);
2220 
2221  const int ierr = MPI_Bcast(buffer + total_sent_count,
2222  current_count,
2223  mpi_type_id_for_type<decltype(*buffer)>,
2224  root,
2225  comm);
2226  AssertThrowMPI(ierr);
2227  total_sent_count += current_count;
2228  }
2229 # endif
2230  }
2231 
2232 
2233 
2234  template <typename T>
2235  std::enable_if_t<is_mpi_type<T> == false, T>
2236  broadcast(const MPI_Comm comm,
2237  const T &object_to_send,
2238  const unsigned int root_process)
2239  {
2240 # ifndef DEAL_II_WITH_MPI
2241  (void)comm;
2242  (void)root_process;
2243  return object_to_send;
2244 # else
2245  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2246  AssertIndexRange(root_process, n_procs);
2247  (void)n_procs;
2248 
2249  std::vector<char> buffer;
2250  std::size_t buffer_size = numbers::invalid_size_type;
2251 
2252  // On the root process, pack the data and determine what the
2253  // buffer size needs to be.
2254  if (this_mpi_process(comm) == root_process)
2255  {
2256  buffer = Utilities::pack(object_to_send, false);
2257  buffer_size = buffer.size();
2258  }
2259 
2260  // Exchange the size of buffer
2261  int ierr = MPI_Bcast(&buffer_size,
2262  1,
2263  mpi_type_id_for_type<decltype(buffer_size)>,
2264  root_process,
2265  comm);
2266  AssertThrowMPI(ierr);
2267 
2268  // If not on the root process, correctly size the buffer to
2269  // receive the data, then do exactly that.
2270  if (this_mpi_process(comm) != root_process)
2271  buffer.resize(buffer_size);
2272 
2273  broadcast(buffer.data(), buffer_size, root_process, comm);
2274 
2275  if (Utilities::MPI::this_mpi_process(comm) == root_process)
2276  return object_to_send;
2277  else
2278  return Utilities::unpack<T>(buffer, false);
2279 # endif
2280  }
2281 
2282 
2283 
2284  template <typename T>
2285  std::enable_if_t<is_mpi_type<T> == true, T>
2286  broadcast(const MPI_Comm comm,
2287  const T &object_to_send,
2288  const unsigned int root_process)
2289  {
2290 # ifndef DEAL_II_WITH_MPI
2291  (void)comm;
2292  (void)root_process;
2293  return object_to_send;
2294 # else
2295 
2296  T object = object_to_send;
2297  int ierr =
2298  MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2299  AssertThrowMPI(ierr);
2300 
2301  return object;
2302 # endif
2303  }
2304 
2305 
2306  template <typename T>
2307  Future<void>
2308  isend(const T &object,
2309  MPI_Comm communicator,
2310  const unsigned int target_rank,
2311  const unsigned int mpi_tag)
2312  {
2313 # ifndef DEAL_II_WITH_MPI
2314  Assert(false, ExcNeedsMPI());
2315  (void)object;
2316  (void)communicator;
2317  (void)target_rank;
2318  (void)mpi_tag;
2319  return Future<void>([]() {}, []() {});
2320 # else
2321  // Create a pointer to a send buffer into which we pack the object
2322  // to be sent. The buffer will be released by the Future object once
2323  // the send has been verified to have succeeded.
2324  //
2325  // Conceptually, we would like this send buffer to be a
2326  // std::unique_ptr object whose ownership is later handed over
2327  // to the cleanup function. That has the disadvantage that the
2328  // cleanup object is a non-copyable lambda capture, leading to
2329  // awkward semantics. Instead, we use a std::shared_ptr; we move
2330  // this shared pointer into the cleanup function, which means
2331  // that there is exactly one shared pointer who owns the buffer
2332  // at any given time, though the latter is not an important
2333  // optimization.
2334  std::shared_ptr<std::vector<char>> send_buffer =
2335  std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2336 
2337  // Now start the send, and store the result in a request object that
2338  // we can then wait for later:
2339  MPI_Request request;
2340  const int ierr =
2341  MPI_Isend(send_buffer->data(),
2342  send_buffer->size(),
2343  mpi_type_id_for_type<decltype(*send_buffer->data())>,
2344  target_rank,
2345  mpi_tag,
2346  communicator,
2347  &request);
2348  AssertThrowMPI(ierr);
2349 
2350  // Then return a std::future-like object that has a wait()
2351  // function one can use to wait for the communication to finish,
2352  // and that has a cleanup function to be called at some point
2353  // after that makes sure the send buffer gets deallocated. This
2354  // cleanup function takes over ownership of the send buffer.
2355  //
2356  // Note that the body of the lambda function of the clean-up
2357  // function could be left empty. If that were so, once the
2358  // lambda function object goes out of scope, the 'send_buffer'
2359  // member of the closure object goes out of scope as well and so
2360  // the send_buffer is destroyed. But we may want to release the
2361  // buffer itself as early as possible, and so we clear the
2362  // buffer when the Future::get() function is called.
2363  auto wait = [request]() mutable {
2364  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2365  AssertThrowMPI(ierr);
2366  };
2367  auto cleanup = [send_buffer = std::move(send_buffer)]() {
2368  send_buffer->clear();
2369  };
2370  return Future<void>(wait, cleanup);
2371 # endif
2372  }
2373 
2374 
2375 
2376  template <typename T>
2377  Future<T>
2378  irecv(MPI_Comm communicator,
2379  const unsigned int source_rank,
2380  const unsigned int mpi_tag)
2381  {
2382 # ifndef DEAL_II_WITH_MPI
2383  Assert(false, ExcNeedsMPI());
2384  (void)communicator;
2385  (void)source_rank;
2386  (void)mpi_tag;
2387  return Future<void>([]() {}, []() { return T{}; });
2388 # else
2389  // Use a 'probe' operation for the 'wait' operation of the
2390  // Future this function returns. It will trigger whenever we get
2391  // the incoming message. Later, once we have received the message, we
2392  // can query its size and allocate a receiver buffer.
2393  //
2394  // Since we may be waiting for multiple messages from the same
2395  // incoming process (with possibly the same tag -- we can't
2396  // know), we must make sure that the 'probe' operation we have
2397  // here (and which we use to determine the buffer size) matches
2398  // the 'recv' operation with which we actually get the data
2399  // later on. This is exactly what the 'MPI_Mprobe' function and
2400  // its 'I'mmediate variant is there for, coupled with the
2401  // 'MPI_Mrecv' call that would put into the clean-up function
2402  // below.
2403  std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2404  std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2405 
2406  auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2407  const int ierr = MPI_Mprobe(
2408  source_rank, mpi_tag, communicator, message.get(), status.get());
2409  AssertThrowMPI(ierr);
2410  };
2411 
2412 
2413  // Now also define the function that actually gets the data:
2414  auto get = [status, message]() {
2415  int number_amount;
2416  int ierr;
2417  ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2418  AssertThrowMPI(ierr);
2419 
2420  std::vector<char> receive_buffer(number_amount);
2421 
2422  // Then actually get the data, using the matching MPI_Mrecv to the above
2423  // MPI_Mprobe:
2424  ierr = MPI_Mrecv(receive_buffer.data(),
2425  number_amount,
2426  mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2427  message.get(),
2428  status.get());
2429  AssertThrowMPI(ierr);
2430 
2431  // Return the unpacked object:
2432  return Utilities::unpack<T>(receive_buffer, false);
2433  };
2434 
2435  return Future<T>(wait, get);
2436 # endif
2437  }
2438 
2439 
2440 
2441 # ifdef DEAL_II_WITH_MPI
2442  template <class Iterator, typename Number>
2443  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2444  mean_and_standard_deviation(const Iterator begin,
2445  const Iterator end,
2446  const MPI_Comm comm)
2447  {
2448  // below we do simple and straight-forward implementation. More elaborate
2449  // options are:
2450  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2451  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2452  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2453  using Std = typename numbers::NumberTraits<Number>::real_type;
2454  const Number sum = std::accumulate(begin, end, Number(0.));
2455 
2456  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2457  Assert(size > 0, ExcDivideByZero());
2458  const Number mean =
2459  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2460  Std sq_sum = 0.;
2461  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2463  });
2464  sq_sum = Utilities::MPI::sum(sq_sum, comm);
2465  return std::make_pair(mean,
2466  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2467  }
2468 # endif
2469 
2470 #endif
2471  } // end of namespace MPI
2472 } // end of namespace Utilities
2473 
2474 
2476 
2477 #endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Definition: tensor.h:516
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition: mpi.h:345
void lock(const MPI_Comm comm)
Definition: mpi.cc:1192
void unlock(const MPI_Comm comm)
Definition: mpi.cc:1229
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition: mpi.h:260
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition: mpi.h:529
std::function< T()> get_and_cleanup_function
Definition: mpi.h:530
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
#define DEAL_II_DEPRECATED
Definition: config.h:178
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1631
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1820
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1947
#define AssertIndexRange(index, range)
Definition: exceptions.h:1888
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1732
static const char U
static const char T
static const char N
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
MPI_Datatype mpi_type_id(const bool *)
Definition: mpi.h:1566
MPI_Datatype mpi_type_id(const std::complex< double > *)
Definition: mpi.h:1702
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:234
void max(const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &maxima)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:278
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:149
T max(const T &t, const MPI_Comm mpi_communicator)
void broadcast(T *buffer, const size_t count, const unsigned int root, const MPI_Comm comm)
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition: mpi.cc:1089
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition: mpi.cc:263
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:321
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:221
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition: mpi.cc:200
void min(const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &minima)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1073
void logical_or(const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &results)
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1731
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
void free_communicator(MPI_Comm mpi_communicator)
Definition: mpi.cc:211
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition: mpi.cc:180
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:436
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition: mpi.cc:123
constexpr bool is_mpi_type
Definition: mpi.h:110
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1343
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:79
static const unsigned int invalid_unsigned_int
Definition: types.h:221
const types::global_dof_index invalid_size_type
Definition: types.h:234
unsigned int global_dof_index
Definition: types.h:82
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
unsigned int max_index
Definition: mpi.h:980
unsigned int min_index
Definition: mpi.h:970
static constexpr real_type abs_square(const number &x)
Definition: numbers.h:585
const MPI_Comm comm
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)