Reference documentation for deal.II version GIT relicensing-422-gb369f187d8 2024-04-17 18:10:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
27
28#include <complex>
29#include <limits>
30#include <map>
31#include <numeric>
32#include <vector>
33
34
35
49#ifdef DEAL_II_WITH_MPI
50# define DEAL_II_MPI_CONST_CAST(expr) (expr)
51#endif
52
53
54
56
57
58// Forward type declarations to allow MPI sums over tensorial types
59#ifndef DOXYGEN
60template <int rank, int dim, typename Number>
61class Tensor;
62template <int rank, int dim, typename Number>
63class SymmetricTensor;
64template <typename Number>
65class SparseMatrix;
66class IndexSet;
67#endif
68
69namespace Utilities
70{
85 const unsigned int my_partition_id,
86 const unsigned int n_partitions,
87 const types::global_dof_index total_size);
88
96 namespace MPI
97 {
106 template <typename T>
107 constexpr bool is_mpi_type = is_same_as_any_of<T,
108 char,
109 signed short,
110 signed int,
111 signed long,
112 signed long long,
113 signed char,
114 unsigned char,
115 unsigned short,
116 unsigned int,
117 unsigned long int,
118 unsigned long long,
119 float,
120 double,
121 long double,
122 bool,
123 std::complex<float>,
124 std::complex<double>,
125 std::complex<long double>,
126 wchar_t>::value;
127
136 unsigned int
137 n_mpi_processes(const MPI_Comm mpi_communicator);
138
147 unsigned int
148 this_mpi_process(const MPI_Comm mpi_communicator);
149
154 std::vector<unsigned int>
156 const MPI_Comm comm_small);
157
179 std::vector<unsigned int>
181 const MPI_Comm mpi_comm,
182 const std::vector<unsigned int> &destinations);
183
203 unsigned int
205 const MPI_Comm mpi_comm,
206 const std::vector<unsigned int> &destinations);
207
225 duplicate_communicator(const MPI_Comm mpi_communicator);
226
236 void
237 free_communicator(MPI_Comm mpi_communicator);
238
252 {
253 public:
257 explicit DuplicatedCommunicator(const MPI_Comm communicator)
258 : comm(duplicate_communicator(communicator))
259 {}
260
265
273
278 operator*() const
279 {
280 return comm;
281 }
282
283
289
290 private:
295 };
296
297
298
329 {
330 public:
337 {
338 public:
343 : mutex(mutex)
344 , comm(comm)
345 {
346 mutex.lock(comm);
347 }
348
353 {
355 }
356
357 private:
366 };
367
372
377
384 void
385 lock(const MPI_Comm comm);
386
393 void
394 unlock(const MPI_Comm comm);
395
396 private:
400 bool locked;
401
405 MPI_Request request;
406 };
407
408
409
456 template <typename T>
457 class Future
458 {
459 public:
464 template <typename W, typename G>
465 Future(W &&wait_operation, G &&get_and_cleanup_operation);
466
472 Future(const Future &) = delete;
473
477 Future(Future &&) noexcept = default;
478
483
489 Future &
490 operator=(const Future &) = delete;
491
495 Future &
496 operator=(Future &&) noexcept = default;
497
505 void
507
519 T
521
522 private:
526 std::function<void()> wait_function;
528
533
538 };
539
540
541
571#ifdef DEAL_II_WITH_MPI
574 const MPI_Group &group,
575 const int tag,
576 MPI_Comm *new_comm);
577#endif
578
587 std::vector<IndexSet>
589 const MPI_Comm comm,
590 const types::global_dof_index locally_owned_size);
591
601 const MPI_Comm comm,
602 const types::global_dof_index total_size);
603
604#ifdef DEAL_II_WITH_MPI
620 template <class Iterator, typename Number = long double>
621 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
622 mean_and_standard_deviation(const Iterator begin,
623 const Iterator end,
624 const MPI_Comm comm);
625#endif
626
627
675 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
676 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
677
697 template <typename T>
698 T
699 sum(const T &t, const MPI_Comm mpi_communicator);
700
710 template <typename T, typename U>
711 void
712 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
713
723 template <typename T>
724 void
725 sum(const ArrayView<const T> &values,
726 const MPI_Comm mpi_communicator,
727 const ArrayView<T> &sums);
728
734 template <int rank, int dim, typename Number>
737 const MPI_Comm mpi_communicator);
738
744 template <int rank, int dim, typename Number>
747 const MPI_Comm mpi_communicator);
748
757 template <typename Number>
758 void
760 const MPI_Comm mpi_communicator,
761 SparseMatrix<Number> &global);
762
782 template <typename T>
783 T
784 max(const T &t, const MPI_Comm mpi_communicator);
785
795 template <typename T, typename U>
796 void
797 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
798
808 template <typename T>
809 void
810 max(const ArrayView<const T> &values,
811 const MPI_Comm mpi_communicator,
812 const ArrayView<T> &maxima);
813
833 template <typename T>
834 T
835 min(const T &t, const MPI_Comm mpi_communicator);
836
846 template <typename T, typename U>
847 void
848 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
849
859 template <typename T>
860 void
861 min(const ArrayView<const T> &values,
862 const MPI_Comm mpi_communicator,
863 const ArrayView<T> &minima);
864
888 template <typename T>
889 T
890 logical_or(const T &t, const MPI_Comm mpi_communicator);
891
906 template <typename T, typename U>
907 void
908 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
909
919 template <typename T>
920 void
922 const MPI_Comm mpi_communicator,
923 const ArrayView<T> &results);
924
940 {
945 double sum;
946
951 double min;
952
957 double max;
958
967 unsigned int min_index;
968
977 unsigned int max_index;
978
983 double avg;
984 };
985
1000 MinMaxAvg
1001 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
1002
1014 std::vector<MinMaxAvg>
1015 min_max_avg(const std::vector<double> &my_value,
1016 const MPI_Comm mpi_communicator);
1017
1018
1031 void
1032 min_max_avg(const ArrayView<const double> &my_values,
1033 const ArrayView<MinMaxAvg> &result,
1034 const MPI_Comm mpi_communicator);
1035
1036
1081 {
1082 public:
1129 int &argc,
1130 char **&argv,
1131 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1132
1138 };
1139
1151 bool
1153
1171 template <typename T>
1172 std::map<unsigned int, T>
1174 const std::map<unsigned int, T> &objects_to_send);
1175
1189 template <typename T>
1190 std::vector<T>
1191 all_gather(const MPI_Comm comm, const T &object_to_send);
1192
1208 template <typename T>
1209 std::vector<T>
1211 const T &object_to_send,
1212 const unsigned int root_process = 0);
1213
1228 template <typename T>
1229 T
1231 const std::vector<T> &objects_to_send,
1232 const unsigned int root_process = 0);
1233
1269 template <typename T>
1270 std::enable_if_t<is_mpi_type<T> == false, T>
1272 const T &object_to_send,
1273 const unsigned int root_process = 0);
1274
1297 template <typename T>
1298 std::enable_if_t<is_mpi_type<T> == true, T>
1300 const T &object_to_send,
1301 const unsigned int root_process = 0);
1302
1319 template <typename T>
1320 void
1321 broadcast(T *buffer,
1322 const size_t count,
1323 const unsigned int root,
1324 const MPI_Comm comm);
1325
1338 template <typename T>
1339 T
1340 reduce(const T &local_value,
1341 const MPI_Comm comm,
1342 const std::function<T(const T &, const T &)> &combiner,
1343 const unsigned int root_process = 0);
1344
1345
1357 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1358 std::pair<T, T>
1359 partial_and_total_sum(const T &value, const MPI_Comm comm);
1360
1361
1371 template <typename T>
1372 T
1373 all_reduce(const T &local_value,
1374 const MPI_Comm comm,
1375 const std::function<T(const T &, const T &)> &combiner);
1376
1377
1398 template <typename T>
1400 isend(const T &object,
1401 MPI_Comm communicator,
1402 const unsigned int target_rank,
1403 const unsigned int mpi_tag = 0);
1404
1405
1422 template <typename T>
1423 Future<T>
1424 irecv(MPI_Comm communicator,
1425 const unsigned int source_rank,
1426 const unsigned int mpi_tag = 0);
1427
1428
1471 std::vector<unsigned int>
1472 compute_index_owner(const IndexSet &owned_indices,
1473 const IndexSet &indices_to_look_up,
1474 const MPI_Comm comm);
1475
1483 template <typename T>
1484 std::vector<T>
1485 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1486
1490 template <typename T>
1491 std::set<T>
1492 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1493
1494
1495
1496 /* --------------------------- inline functions ------------------------- */
1497
1498 namespace internal
1499 {
1505 namespace MPIDataTypes
1506 {
1507#ifdef DEAL_II_WITH_MPI
1508 inline MPI_Datatype
1509 mpi_type_id(const bool *)
1510 {
1511 return MPI_CXX_BOOL;
1512 }
1513
1514
1515
1516 inline MPI_Datatype
1517 mpi_type_id(const char *)
1518 {
1519 return MPI_CHAR;
1520 }
1521
1522
1523
1524 inline MPI_Datatype
1525 mpi_type_id(const signed char *)
1526 {
1527 return MPI_SIGNED_CHAR;
1528 }
1529
1530
1531
1532 inline MPI_Datatype
1533 mpi_type_id(const wchar_t *)
1534 {
1535 return MPI_WCHAR;
1536 }
1537
1538
1539
1540 inline MPI_Datatype
1541 mpi_type_id(const short *)
1542 {
1543 return MPI_SHORT;
1544 }
1545
1546
1547
1548 inline MPI_Datatype
1549 mpi_type_id(const int *)
1550 {
1551 return MPI_INT;
1552 }
1553
1554
1555
1556 inline MPI_Datatype
1557 mpi_type_id(const long int *)
1558 {
1559 return MPI_LONG;
1560 }
1561
1562
1563
1564 inline MPI_Datatype
1565 mpi_type_id(const long long int *)
1566 {
1567 return MPI_LONG_LONG;
1568 }
1569
1570
1571
1572 inline MPI_Datatype
1573 mpi_type_id(const unsigned char *)
1574 {
1575 return MPI_UNSIGNED_CHAR;
1576 }
1577
1578
1579
1580 inline MPI_Datatype
1581 mpi_type_id(const unsigned short *)
1582 {
1583 return MPI_UNSIGNED_SHORT;
1584 }
1585
1586
1587
1588 inline MPI_Datatype
1589 mpi_type_id(const unsigned int *)
1590 {
1591 return MPI_UNSIGNED;
1592 }
1593
1594
1595
1596 inline MPI_Datatype
1597 mpi_type_id(const unsigned long int *)
1598 {
1599 return MPI_UNSIGNED_LONG;
1600 }
1601
1602
1603
1604 inline MPI_Datatype
1605 mpi_type_id(const unsigned long long int *)
1606 {
1607 return MPI_UNSIGNED_LONG_LONG;
1608 }
1609
1610
1611
1612 inline MPI_Datatype
1613 mpi_type_id(const float *)
1614 {
1615 return MPI_FLOAT;
1616 }
1617
1618
1619
1620 inline MPI_Datatype
1621 mpi_type_id(const double *)
1622 {
1623 return MPI_DOUBLE;
1624 }
1625
1626
1627
1628 inline MPI_Datatype
1629 mpi_type_id(const long double *)
1630 {
1631 return MPI_LONG_DOUBLE;
1632 }
1633
1634
1635
1636 inline MPI_Datatype
1637 mpi_type_id(const std::complex<float> *)
1638 {
1639 return MPI_COMPLEX;
1640 }
1641
1642
1643
1644 inline MPI_Datatype
1645 mpi_type_id(const std::complex<double> *)
1646 {
1647 return MPI_DOUBLE_COMPLEX;
1648 }
1649#endif
1650 } // namespace MPIDataTypes
1651 } // namespace internal
1652
1653
1654
1655#ifdef DEAL_II_WITH_MPI
1673 template <typename T>
1674 inline const MPI_Datatype mpi_type_id_for_type =
1676 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1677#endif
1678
1679#ifndef DOXYGEN
1680 namespace internal
1681 {
1682 // declaration for an internal function that lives in mpi.templates.h
1683 template <typename T>
1684 void
1685 all_reduce(const MPI_Op &mpi_op,
1686 const ArrayView<const T> &values,
1687 const MPI_Comm mpi_communicator,
1688 const ArrayView<T> &output);
1689 } // namespace internal
1690
1691
1692 template <typename T>
1693 template <typename W, typename G>
1694 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1695 : wait_function(wait_operation)
1696 , get_and_cleanup_function(get_and_cleanup_operation)
1697 , is_done(false)
1698 , get_was_called(false)
1699 {}
1700
1701
1702
1703 template <typename T>
1704 Future<T>::~Future()
1705 {
1706 // If there is a clean-up function, and if it has not been
1707 // called yet, then do so. Note that we may not have a
1708 // clean-up function (not even an empty one) if the current
1709 // object has been moved from, into another object, and as
1710 // a consequence the std::function objects are now empty
1711 // even though they were initialized in the constructor.
1712 // (A std::function object whose object is a an empty lambda
1713 // function, [](){}, is not an empty std::function object.)
1714 if ((get_was_called == false) && get_and_cleanup_function)
1715 get();
1716 }
1717
1718
1719
1720 template <typename T>
1721 void
1722 Future<T>::wait()
1723 {
1724 if (is_done == false)
1725 {
1726 wait_function();
1727
1728 is_done = true;
1729 }
1730 }
1731
1732
1733 template <typename T>
1734 T
1735 Future<T>::get()
1736 {
1737 Assert(get_was_called == false,
1738 ExcMessage(
1739 "You can't call get() more than once on a Future object."));
1740 get_was_called = true;
1741
1742 wait();
1743 return get_and_cleanup_function();
1744 }
1745
1746
1747
1748 template <typename T, unsigned int N>
1749 void
1750 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1751 {
1752 internal::all_reduce(MPI_SUM,
1753 ArrayView<const T>(values, N),
1754 mpi_communicator,
1755 ArrayView<T>(sums, N));
1756 }
1757
1758
1759
1760 template <typename T, unsigned int N>
1761 void
1762 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1763 {
1764 internal::all_reduce(MPI_MAX,
1765 ArrayView<const T>(values, N),
1766 mpi_communicator,
1767 ArrayView<T>(maxima, N));
1768 }
1769
1770
1771
1772 template <typename T, unsigned int N>
1773 void
1774 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1775 {
1776 internal::all_reduce(MPI_MIN,
1777 ArrayView<const T>(values, N),
1778 mpi_communicator,
1779 ArrayView<T>(minima, N));
1780 }
1781
1782
1783
1784 template <typename T, unsigned int N>
1785 void
1786 logical_or(const T (&values)[N],
1787 const MPI_Comm mpi_communicator,
1788 T (&results)[N])
1789 {
1790 static_assert(std::is_integral_v<T>,
1791 "The MPI_LOR operation only allows integral data types.");
1792
1793 internal::all_reduce(MPI_LOR,
1794 ArrayView<const T>(values, N),
1795 mpi_communicator,
1796 ArrayView<T>(results, N));
1797 }
1798
1799
1800
1801 template <typename T>
1802 std::map<unsigned int, T>
1804 const std::map<unsigned int, T> &objects_to_send)
1805 {
1806# ifndef DEAL_II_WITH_MPI
1807 (void)comm;
1808 Assert(objects_to_send.size() < 2,
1809 ExcMessage("Cannot send to more than one processor."));
1810 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1811 objects_to_send.empty(),
1812 ExcMessage("Can only send to myself or to nobody."));
1813 return objects_to_send;
1814# else
1815 const auto my_proc = this_mpi_process(comm);
1816
1817 std::map<unsigned int, T> received_objects;
1818
1819 std::vector<unsigned int> send_to;
1820 send_to.reserve(objects_to_send.size());
1821 for (const auto &m : objects_to_send)
1822 if (m.first == my_proc)
1823 received_objects[my_proc] = m.second;
1824 else
1825 send_to.emplace_back(m.first);
1826
1827 const unsigned int n_expected_incoming_messages =
1829
1830 // Protect the following communication:
1831 static CollectiveMutex mutex;
1832 CollectiveMutex::ScopedLock lock(mutex, comm);
1833
1834 // If we have something to send, or we expect something from other
1835 // processors, we need to visit one of the two scopes below. Otherwise,
1836 // no other action is required by this mpi process, and we can safely
1837 // return.
1838 if (send_to.empty() && n_expected_incoming_messages == 0)
1839 return received_objects;
1840
1841 const int mpi_tag =
1842 internal::Tags::compute_point_to_point_communication_pattern;
1843
1844 // Sending buffers
1845 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1846 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1847 {
1848 unsigned int i = 0;
1849 for (const auto &rank_obj : objects_to_send)
1850 if (rank_obj.first != my_proc)
1851 {
1852 const auto &rank = rank_obj.first;
1853 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1854 /*allow_compression=*/false);
1855 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1856 buffers_to_send[i].size(),
1857 MPI_CHAR,
1858 rank,
1859 mpi_tag,
1860 comm,
1861 &buffer_send_requests[i]);
1862 AssertThrowMPI(ierr);
1863 ++i;
1864 }
1865 }
1866
1867 // Fill the output map
1868 {
1869 std::vector<char> buffer;
1870 // We do this on a first come/first served basis
1871 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1872 {
1873 // Probe what's going on. Take data from the first available sender
1874 MPI_Status status;
1875 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1876 AssertThrowMPI(ierr);
1877
1878 // Length of the message
1879 int len;
1880 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1881 AssertThrowMPI(ierr);
1882 buffer.resize(len);
1883
1884 // Source rank
1885 const unsigned int rank = status.MPI_SOURCE;
1886
1887 // Actually receive the message
1888 ierr = MPI_Recv(buffer.data(),
1889 len,
1890 MPI_CHAR,
1891 status.MPI_SOURCE,
1892 status.MPI_TAG,
1893 comm,
1894 MPI_STATUS_IGNORE);
1895 AssertThrowMPI(ierr);
1896 Assert(received_objects.find(rank) == received_objects.end(),
1898 "I should not receive again from this rank"));
1899 received_objects[rank] =
1900 Utilities::unpack<T>(buffer,
1901 /*allow_compression=*/false);
1902 }
1903 }
1904
1905 // Wait to have sent all objects.
1906 const int ierr = MPI_Waitall(send_to.size(),
1907 buffer_send_requests.data(),
1908 MPI_STATUSES_IGNORE);
1909 AssertThrowMPI(ierr);
1910
1911 return received_objects;
1912# endif // deal.II with MPI
1913 }
1914
1915
1916
1917 template <typename T, typename>
1918 std::pair<T, T>
1919 partial_and_total_sum(const T &value, const MPI_Comm comm)
1920 {
1921# ifndef DEAL_II_WITH_MPI
1922 (void)comm;
1923 return {0, value};
1924# else
1926 return {0, value};
1927 else
1928 {
1929 T prefix = {};
1930
1931 // First obtain every process's prefix sum:
1932 int ierr =
1933 MPI_Exscan(&value,
1934 &prefix,
1935 1,
1936 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
1937 MPI_SUM,
1938 comm);
1939 AssertThrowMPI(ierr);
1940
1941 // Then we also need the total sum. We could obtain it by
1942 // calling Utilities::MPI::sum(), but it is cheaper if we
1943 // broadcast it from the last process, which can compute it
1944 // from its own prefix sum plus its own value.
1946 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
1947
1948 return {prefix, sum};
1949 }
1950# endif
1951 }
1952
1953
1954
1955 template <typename T>
1956 std::vector<T>
1957 all_gather(const MPI_Comm comm, const T &object)
1958 {
1959 if (job_supports_mpi() == false)
1960 return {object};
1961
1962# ifndef DEAL_II_WITH_MPI
1963 (void)comm;
1964 std::vector<T> v(1, object);
1965 return v;
1966# else
1967 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1968
1969 std::vector<char> buffer = Utilities::pack(object);
1970
1971 int n_local_data = buffer.size();
1972
1973 // Vector to store the size of loc_data_array for every process
1974 std::vector<int> size_all_data(n_procs, 0);
1975
1976 // Exchanging the size of each buffer
1977 int ierr = MPI_Allgather(
1978 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1979 AssertThrowMPI(ierr);
1980
1981 // Now computing the displacement, relative to recvbuf,
1982 // at which to store the incoming buffer
1983 std::vector<int> rdispls(n_procs);
1984 rdispls[0] = 0;
1985 for (unsigned int i = 1; i < n_procs; ++i)
1986 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1987
1988 // Step 3: exchange the buffer:
1989 std::vector<char> received_unrolled_buffer(rdispls.back() +
1990 size_all_data.back());
1991
1992 ierr = MPI_Allgatherv(buffer.data(),
1993 n_local_data,
1994 MPI_CHAR,
1995 received_unrolled_buffer.data(),
1996 size_all_data.data(),
1997 rdispls.data(),
1998 MPI_CHAR,
1999 comm);
2000 AssertThrowMPI(ierr);
2001
2002 std::vector<T> received_objects(n_procs);
2003 for (unsigned int i = 0; i < n_procs; ++i)
2004 {
2005 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2006 rdispls[i],
2007 received_unrolled_buffer.begin() +
2008 rdispls[i] + size_all_data[i]);
2009 received_objects[i] = Utilities::unpack<T>(local_buffer);
2010 }
2011
2012 return received_objects;
2013# endif
2014 }
2015
2016
2017
2018 template <typename T>
2019 std::vector<T>
2020 gather(const MPI_Comm comm,
2021 const T &object_to_send,
2022 const unsigned int root_process)
2023 {
2024# ifndef DEAL_II_WITH_MPI
2025 (void)comm;
2026 (void)root_process;
2027 std::vector<T> v(1, object_to_send);
2028 return v;
2029# else
2030 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2031 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2032
2033 AssertIndexRange(root_process, n_procs);
2034
2035 std::vector<char> buffer = Utilities::pack(object_to_send);
2036 int n_local_data = buffer.size();
2037
2038 // Vector to store the size of loc_data_array for every process
2039 // only the root process needs to allocate memory for that purpose
2040 std::vector<int> size_all_data;
2041 if (my_rank == root_process)
2042 size_all_data.resize(n_procs, 0);
2043
2044 // Exchanging the size of each buffer
2045 int ierr = MPI_Gather(&n_local_data,
2046 1,
2047 MPI_INT,
2048 size_all_data.data(),
2049 1,
2050 MPI_INT,
2051 root_process,
2052 comm);
2053 AssertThrowMPI(ierr);
2054
2055 // Now computing the displacement, relative to recvbuf,
2056 // at which to store the incoming buffer; only for root
2057 std::vector<int> rdispls;
2058 if (my_rank == root_process)
2059 {
2060 rdispls.resize(n_procs, 0);
2061 for (unsigned int i = 1; i < n_procs; ++i)
2062 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2063 }
2064 // exchange the buffer:
2065 std::vector<char> received_unrolled_buffer;
2066 if (my_rank == root_process)
2067 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2068
2069 ierr = MPI_Gatherv(buffer.data(),
2070 n_local_data,
2071 MPI_CHAR,
2072 received_unrolled_buffer.data(),
2073 size_all_data.data(),
2074 rdispls.data(),
2075 MPI_CHAR,
2076 root_process,
2077 comm);
2078 AssertThrowMPI(ierr);
2079
2080 std::vector<T> received_objects;
2081
2082 if (my_rank == root_process)
2083 {
2084 received_objects.resize(n_procs);
2085
2086 for (unsigned int i = 0; i < n_procs; ++i)
2087 {
2088 const std::vector<char> local_buffer(
2089 received_unrolled_buffer.begin() + rdispls[i],
2090 received_unrolled_buffer.begin() + rdispls[i] +
2091 size_all_data[i]);
2092 received_objects[i] = Utilities::unpack<T>(local_buffer);
2093 }
2094 }
2095 return received_objects;
2096# endif
2097 }
2098
2099
2100
2101 template <typename T>
2102 T
2103 scatter(const MPI_Comm comm,
2104 const std::vector<T> &objects_to_send,
2105 const unsigned int root_process)
2106 {
2107# ifndef DEAL_II_WITH_MPI
2108 (void)comm;
2109 (void)root_process;
2110
2111 AssertDimension(objects_to_send.size(), 1);
2112
2113 return objects_to_send[0];
2114# else
2115 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2116 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2117
2118 AssertIndexRange(root_process, n_procs);
2120 (my_rank != root_process && objects_to_send.empty()) ||
2121 objects_to_send.size() == n_procs,
2122 ExcMessage(
2123 "The number of objects to be scattered must correspond to the number processes."));
2124
2125 std::vector<char> send_buffer;
2126 std::vector<int> send_counts;
2127 std::vector<int> send_displacements;
2128
2129 if (my_rank == root_process)
2130 {
2131 send_counts.resize(n_procs, 0);
2132 send_displacements.resize(n_procs + 1, 0);
2133
2134 for (unsigned int i = 0; i < n_procs; ++i)
2135 {
2136 const auto packed_data = Utilities::pack(objects_to_send[i]);
2137 send_buffer.insert(send_buffer.end(),
2138 packed_data.begin(),
2139 packed_data.end());
2140 send_counts[i] = packed_data.size();
2141 }
2142
2143 for (unsigned int i = 0; i < n_procs; ++i)
2144 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2145 }
2146
2147 int n_local_data;
2148 int ierr = MPI_Scatter(send_counts.data(),
2149 1,
2150 MPI_INT,
2151 &n_local_data,
2152 1,
2153 MPI_INT,
2154 root_process,
2155 comm);
2156 AssertThrowMPI(ierr);
2157
2158 std::vector<char> recv_buffer(n_local_data);
2159
2160 ierr = MPI_Scatterv(send_buffer.data(),
2161 send_counts.data(),
2162 send_displacements.data(),
2163 MPI_CHAR,
2164 recv_buffer.data(),
2165 n_local_data,
2166 MPI_CHAR,
2167 root_process,
2168 comm);
2169 AssertThrowMPI(ierr);
2170
2171 return Utilities::unpack<T>(recv_buffer);
2172# endif
2173 }
2174
2175
2176 template <typename T>
2177 void
2178 broadcast(T *buffer,
2179 const size_t count,
2180 const unsigned int root,
2181 const MPI_Comm comm)
2182 {
2183# ifndef DEAL_II_WITH_MPI
2184 (void)buffer;
2185 (void)count;
2186 (void)root;
2187 (void)comm;
2188# else
2189 Assert(root < n_mpi_processes(comm),
2190 ExcMessage("Invalid root rank specified."));
2191
2192 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2193 // iteration:
2194 const size_t max_send_count = std::numeric_limits<signed int>::max();
2195
2196 size_t total_sent_count = 0;
2197 while (total_sent_count < count)
2198 {
2199 const size_t current_count =
2200 std::min(count - total_sent_count, max_send_count);
2201
2202 const int ierr = MPI_Bcast(buffer + total_sent_count,
2203 current_count,
2204 mpi_type_id_for_type<decltype(*buffer)>,
2205 root,
2206 comm);
2207 AssertThrowMPI(ierr);
2208 total_sent_count += current_count;
2209 }
2210# endif
2211 }
2212
2213
2214
2215 template <typename T>
2216 std::enable_if_t<is_mpi_type<T> == false, T>
2217 broadcast(const MPI_Comm comm,
2218 const T &object_to_send,
2219 const unsigned int root_process)
2220 {
2221# ifndef DEAL_II_WITH_MPI
2222 (void)comm;
2223 (void)root_process;
2224 return object_to_send;
2225# else
2226 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2227 AssertIndexRange(root_process, n_procs);
2228 (void)n_procs;
2229
2230 std::vector<char> buffer;
2231 std::size_t buffer_size = numbers::invalid_size_type;
2232
2233 // On the root process, pack the data and determine what the
2234 // buffer size needs to be.
2235 if (this_mpi_process(comm) == root_process)
2236 {
2237 buffer = Utilities::pack(object_to_send, false);
2238 buffer_size = buffer.size();
2239 }
2240
2241 // Exchange the size of buffer
2242 int ierr = MPI_Bcast(&buffer_size,
2243 1,
2244 mpi_type_id_for_type<decltype(buffer_size)>,
2245 root_process,
2246 comm);
2247 AssertThrowMPI(ierr);
2248
2249 // If not on the root process, correctly size the buffer to
2250 // receive the data, then do exactly that.
2251 if (this_mpi_process(comm) != root_process)
2252 buffer.resize(buffer_size);
2253
2254 broadcast(buffer.data(), buffer_size, root_process, comm);
2255
2256 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2257 return object_to_send;
2258 else
2259 return Utilities::unpack<T>(buffer, false);
2260# endif
2261 }
2262
2263
2264
2265 template <typename T>
2266 std::enable_if_t<is_mpi_type<T> == true, T>
2267 broadcast(const MPI_Comm comm,
2268 const T &object_to_send,
2269 const unsigned int root_process)
2270 {
2271# ifndef DEAL_II_WITH_MPI
2272 (void)comm;
2273 (void)root_process;
2274 return object_to_send;
2275# else
2276
2277 T object = object_to_send;
2278 int ierr =
2279 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2280 AssertThrowMPI(ierr);
2281
2282 return object;
2283# endif
2284 }
2285
2286
2287 template <typename T>
2288 Future<void>
2289 isend(const T &object,
2290 MPI_Comm communicator,
2291 const unsigned int target_rank,
2292 const unsigned int mpi_tag)
2293 {
2294# ifndef DEAL_II_WITH_MPI
2295 Assert(false, ExcNeedsMPI());
2296 (void)object;
2297 (void)communicator;
2298 (void)target_rank;
2299 (void)mpi_tag;
2300 return Future<void>([]() {}, []() {});
2301# else
2302 // Create a pointer to a send buffer into which we pack the object
2303 // to be sent. The buffer will be released by the Future object once
2304 // the send has been verified to have succeeded.
2305 //
2306 // Conceptually, we would like this send buffer to be a
2307 // std::unique_ptr object whose ownership is later handed over
2308 // to the cleanup function. That has the disadvantage that the
2309 // cleanup object is a non-copyable lambda capture, leading to
2310 // awkward semantics. Instead, we use a std::shared_ptr; we move
2311 // this shared pointer into the cleanup function, which means
2312 // that there is exactly one shared pointer who owns the buffer
2313 // at any given time, though the latter is not an important
2314 // optimization.
2315 std::shared_ptr<std::vector<char>> send_buffer =
2316 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2317
2318 // Now start the send, and store the result in a request object that
2319 // we can then wait for later:
2320 MPI_Request request;
2321 const int ierr =
2322 MPI_Isend(send_buffer->data(),
2323 send_buffer->size(),
2324 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2325 target_rank,
2326 mpi_tag,
2327 communicator,
2328 &request);
2329 AssertThrowMPI(ierr);
2330
2331 // Then return a std::future-like object that has a wait()
2332 // function one can use to wait for the communication to finish,
2333 // and that has a cleanup function to be called at some point
2334 // after that makes sure the send buffer gets deallocated. This
2335 // cleanup function takes over ownership of the send buffer.
2336 //
2337 // Note that the body of the lambda function of the clean-up
2338 // function could be left empty. If that were so, once the
2339 // lambda function object goes out of scope, the 'send_buffer'
2340 // member of the closure object goes out of scope as well and so
2341 // the send_buffer is destroyed. But we may want to release the
2342 // buffer itself as early as possible, and so we clear the
2343 // buffer when the Future::get() function is called.
2344 auto wait = [request]() mutable {
2345 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2346 AssertThrowMPI(ierr);
2347 };
2348 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2349 send_buffer->clear();
2350 };
2351 return Future<void>(wait, cleanup);
2352# endif
2353 }
2354
2355
2356
2357 template <typename T>
2358 Future<T>
2359 irecv(MPI_Comm communicator,
2360 const unsigned int source_rank,
2361 const unsigned int mpi_tag)
2362 {
2363# ifndef DEAL_II_WITH_MPI
2364 Assert(false, ExcNeedsMPI());
2365 (void)communicator;
2366 (void)source_rank;
2367 (void)mpi_tag;
2368 return Future<void>([]() {}, []() { return T{}; });
2369# else
2370 // Use a 'probe' operation for the 'wait' operation of the
2371 // Future this function returns. It will trigger whenever we get
2372 // the incoming message. Later, once we have received the message, we
2373 // can query its size and allocate a receiver buffer.
2374 //
2375 // Since we may be waiting for multiple messages from the same
2376 // incoming process (with possibly the same tag -- we can't
2377 // know), we must make sure that the 'probe' operation we have
2378 // here (and which we use to determine the buffer size) matches
2379 // the 'recv' operation with which we actually get the data
2380 // later on. This is exactly what the 'MPI_Mprobe' function and
2381 // its 'I'mmediate variant is there for, coupled with the
2382 // 'MPI_Mrecv' call that would put into the clean-up function
2383 // below.
2384 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2385 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2386
2387 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2388 const int ierr = MPI_Mprobe(
2389 source_rank, mpi_tag, communicator, message.get(), status.get());
2390 AssertThrowMPI(ierr);
2391 };
2392
2393
2394 // Now also define the function that actually gets the data:
2395 auto get = [status, message]() {
2396 int number_amount;
2397 int ierr;
2398 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2399 AssertThrowMPI(ierr);
2400
2401 std::vector<char> receive_buffer(number_amount);
2402
2403 // Then actually get the data, using the matching MPI_Mrecv to the above
2404 // MPI_Mprobe:
2405 ierr = MPI_Mrecv(receive_buffer.data(),
2406 number_amount,
2407 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2408 message.get(),
2409 status.get());
2410 AssertThrowMPI(ierr);
2411
2412 // Return the unpacked object:
2413 return Utilities::unpack<T>(receive_buffer, false);
2414 };
2415
2416 return Future<T>(wait, get);
2417# endif
2418 }
2419
2420
2421
2422# ifdef DEAL_II_WITH_MPI
2423 template <class Iterator, typename Number>
2424 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2425 mean_and_standard_deviation(const Iterator begin,
2426 const Iterator end,
2427 const MPI_Comm comm)
2428 {
2429 // below we do simple and straight-forward implementation. More elaborate
2430 // options are:
2431 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2432 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2433 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2434 using Std = typename numbers::NumberTraits<Number>::real_type;
2435 const Number sum = std::accumulate(begin, end, Number(0.));
2436
2437 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2438 Assert(size > 0, ExcDivideByZero());
2439 const Number mean =
2440 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2441 Std sq_sum = 0.;
2442 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2444 });
2445 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2446 return std::make_pair(mean,
2447 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2448 }
2449# endif
2450
2451#endif
2452 } // end of namespace MPI
2453} // end of namespace Utilities
2454
2455
2457
2458#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:342
void lock(const MPI_Comm comm)
Definition mpi.cc:816
void unlock(const MPI_Comm comm)
Definition mpi.cc:853
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:257
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:526
std::function< T()> get_and_cleanup_function
Definition mpi.h:527
Future(W &&wait_operation, G &&get_and_cleanup_operation)
#define DEAL_II_DEPRECATED
Definition config.h:207
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
if(marked_vertices.size() !=0) for(auto it
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1509
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:177
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:221
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:713
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:206
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:264
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition mpi.cc:164
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:697
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:379
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
constexpr bool is_mpi_type
Definition mpi.h:107
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
static const unsigned int invalid_unsigned_int
Definition types.h:220
const types::global_dof_index invalid_size_type
Definition types.h:233
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
unsigned int max_index
Definition mpi.h:977
unsigned int min_index
Definition mpi.h:967
static constexpr real_type abs_square(const number &x)
Definition numbers.h:584
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)