deal.II version GIT relicensing-2017-g7ae82fad8b 2024-10-22 19:20:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
27
28#include <complex>
29#include <limits>
30#include <map>
31#include <numeric>
32#include <vector>
33
34
35
49#ifdef DEAL_II_WITH_MPI
50# define DEAL_II_MPI_CONST_CAST(expr) (expr)
51#endif
52
53
54
56
57
58// Forward type declarations to allow MPI sums over tensorial types
59#ifndef DOXYGEN
60template <int rank, int dim, typename Number>
61class Tensor;
62template <int rank, int dim, typename Number>
63class SymmetricTensor;
64template <typename Number>
65class SparseMatrix;
66class IndexSet;
67#endif
68
69namespace Utilities
70{
85 const unsigned int my_partition_id,
86 const unsigned int n_partitions,
87 const types::global_dof_index total_size);
88
96 namespace MPI
97 {
106 template <typename T>
107 constexpr bool is_mpi_type = is_same_as_any_of<T,
108 char,
109 signed short,
110 signed int,
111 signed long,
112 signed long long,
113 signed char,
114 unsigned char,
115 unsigned short,
116 unsigned int,
117 unsigned long int,
118 unsigned long long,
119 float,
120 double,
121 long double,
122 bool,
123 std::complex<float>,
124 std::complex<double>,
125 std::complex<long double>,
126 wchar_t>::value;
127
136 unsigned int
137 n_mpi_processes(const MPI_Comm mpi_communicator);
138
147 unsigned int
148 this_mpi_process(const MPI_Comm mpi_communicator);
149
154 std::vector<unsigned int>
156 const MPI_Comm comm_small);
157
179 std::vector<unsigned int>
181 const MPI_Comm mpi_comm,
182 const std::vector<unsigned int> &destinations);
183
203 unsigned int
205 const MPI_Comm mpi_comm,
206 const std::vector<unsigned int> &destinations);
207
225 duplicate_communicator(const MPI_Comm mpi_communicator);
226
236 void
237 free_communicator(MPI_Comm mpi_communicator);
238
252 {
253 public:
257 explicit DuplicatedCommunicator(const MPI_Comm communicator)
258 : comm(duplicate_communicator(communicator))
259 {}
260
265
273
278 operator*() const
279 {
280 return comm;
281 }
282
283
289
290 private:
295 };
296
297
298
329 {
330 public:
337 {
338 public:
343 : mutex(mutex)
344 , comm(comm)
345 {
346 mutex.lock(comm);
347 }
348
353 {
355 }
356
357 private:
366 };
367
372
377
384 void
385 lock(const MPI_Comm comm);
386
393 void
394 unlock(const MPI_Comm comm);
395
396 private:
400 bool locked;
401
405 MPI_Request request;
406 };
407
408
409
456 template <typename T>
457 class Future
458 {
459 public:
464 template <typename W, typename G>
465 Future(W &&wait_operation, G &&get_and_cleanup_operation);
466
472 Future(const Future &) = delete;
473
477 Future(Future &&) noexcept = default;
478
483
489 Future &
490 operator=(const Future &) = delete;
491
495 Future &
496 operator=(Future &&) noexcept = default;
497
505 void
507
519 T
521
522 private:
526 std::function<void()> wait_function;
528
533
538 };
539
548 std::vector<IndexSet>
550 const MPI_Comm comm,
551 const types::global_dof_index locally_owned_size);
552
562 const MPI_Comm comm,
563 const types::global_dof_index total_size);
564
565#ifdef DEAL_II_WITH_MPI
581 template <class Iterator, typename Number = long double>
582 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
583 mean_and_standard_deviation(const Iterator begin,
584 const Iterator end,
585 const MPI_Comm comm);
586#endif
587
588
636 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
637 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
638
658 template <typename T>
659 T
660 sum(const T &t, const MPI_Comm mpi_communicator);
661
671 template <typename T, typename U>
672 void
673 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
674
684 template <typename T>
685 void
686 sum(const ArrayView<const T> &values,
687 const MPI_Comm mpi_communicator,
688 const ArrayView<T> &sums);
689
695 template <int rank, int dim, typename Number>
698 const MPI_Comm mpi_communicator);
699
705 template <int rank, int dim, typename Number>
708 const MPI_Comm mpi_communicator);
709
718 template <typename Number>
719 void
721 const MPI_Comm mpi_communicator,
722 SparseMatrix<Number> &global);
723
743 template <typename T>
744 T
745 max(const T &t, const MPI_Comm mpi_communicator);
746
756 template <typename T, typename U>
757 void
758 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
759
769 template <typename T>
770 void
771 max(const ArrayView<const T> &values,
772 const MPI_Comm mpi_communicator,
773 const ArrayView<T> &maxima);
774
794 template <typename T>
795 T
796 min(const T &t, const MPI_Comm mpi_communicator);
797
807 template <typename T, typename U>
808 void
809 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
810
820 template <typename T>
821 void
822 min(const ArrayView<const T> &values,
823 const MPI_Comm mpi_communicator,
824 const ArrayView<T> &minima);
825
849 template <typename T>
850 T
851 logical_or(const T &t, const MPI_Comm mpi_communicator);
852
867 template <typename T, typename U>
868 void
869 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
870
880 template <typename T>
881 void
883 const MPI_Comm mpi_communicator,
884 const ArrayView<T> &results);
885
901 {
906 double sum;
907
912 double min;
913
918 double max;
919
928 unsigned int min_index;
929
938 unsigned int max_index;
939
944 double avg;
945 };
946
962 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
963
975 std::vector<MinMaxAvg>
976 min_max_avg(const std::vector<double> &my_value,
977 const MPI_Comm mpi_communicator);
978
979
992 void
993 min_max_avg(const ArrayView<const double> &my_values,
994 const ArrayView<MinMaxAvg> &result,
995 const MPI_Comm mpi_communicator);
996
997
1042 {
1043 public:
1090 int &argc,
1091 char **&argv,
1092 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1093
1099 };
1100
1112 bool
1114
1132 template <typename T>
1133 std::map<unsigned int, T>
1135 const std::map<unsigned int, T> &objects_to_send);
1136
1150 template <typename T>
1151 std::vector<T>
1152 all_gather(const MPI_Comm comm, const T &object_to_send);
1153
1169 template <typename T>
1170 std::vector<T>
1172 const T &object_to_send,
1173 const unsigned int root_process = 0);
1174
1189 template <typename T>
1190 T
1192 const std::vector<T> &objects_to_send,
1193 const unsigned int root_process = 0);
1194
1233 template <typename T>
1234 T
1236 const T &object_to_send,
1237 const unsigned int root_process = 0);
1238
1255 template <typename T>
1256 void
1257 broadcast(T *buffer,
1258 const size_t count,
1259 const unsigned int root,
1260 const MPI_Comm comm);
1261
1274 template <typename T>
1275 T
1276 reduce(const T &local_value,
1277 const MPI_Comm comm,
1278 const std::function<T(const T &, const T &)> &combiner,
1279 const unsigned int root_process = 0);
1280
1281
1293 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1294 std::pair<T, T>
1295 partial_and_total_sum(const T &value, const MPI_Comm comm);
1296
1297
1307 template <typename T>
1308 T
1309 all_reduce(const T &local_value,
1310 const MPI_Comm comm,
1311 const std::function<T(const T &, const T &)> &combiner);
1312
1313
1334 template <typename T>
1336 isend(const T &object,
1337 MPI_Comm communicator,
1338 const unsigned int target_rank,
1339 const unsigned int mpi_tag = 0);
1340
1341
1358 template <typename T>
1359 Future<T>
1360 irecv(MPI_Comm communicator,
1361 const unsigned int source_rank,
1362 const unsigned int mpi_tag = 0);
1363
1364
1407 std::vector<unsigned int>
1408 compute_index_owner(const IndexSet &owned_indices,
1409 const IndexSet &indices_to_look_up,
1410 const MPI_Comm comm);
1411
1419 template <typename T>
1420 std::vector<T>
1421 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1422
1426 template <typename T>
1427 std::set<T>
1428 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1429
1430
1431
1432 /* --------------------------- inline functions ------------------------- */
1433
1434 namespace internal
1435 {
1441 namespace MPIDataTypes
1442 {
1443#ifdef DEAL_II_WITH_MPI
1444 inline MPI_Datatype
1445 mpi_type_id(const bool *)
1446 {
1447 return MPI_CXX_BOOL;
1448 }
1449
1450
1451
1452 inline MPI_Datatype
1453 mpi_type_id(const char *)
1454 {
1455 return MPI_CHAR;
1456 }
1457
1458
1459
1460 inline MPI_Datatype
1461 mpi_type_id(const signed char *)
1462 {
1463 return MPI_SIGNED_CHAR;
1464 }
1465
1466
1467
1468 inline MPI_Datatype
1469 mpi_type_id(const wchar_t *)
1470 {
1471 return MPI_WCHAR;
1472 }
1473
1474
1475
1476 inline MPI_Datatype
1477 mpi_type_id(const short *)
1478 {
1479 return MPI_SHORT;
1480 }
1481
1482
1483
1484 inline MPI_Datatype
1485 mpi_type_id(const int *)
1486 {
1487 return MPI_INT;
1488 }
1489
1490
1491
1492 inline MPI_Datatype
1493 mpi_type_id(const long int *)
1494 {
1495 return MPI_LONG;
1496 }
1497
1498
1499
1500 inline MPI_Datatype
1501 mpi_type_id(const long long int *)
1502 {
1503 return MPI_LONG_LONG;
1504 }
1505
1506
1507
1508 inline MPI_Datatype
1509 mpi_type_id(const unsigned char *)
1510 {
1511 return MPI_UNSIGNED_CHAR;
1512 }
1513
1514
1515
1516 inline MPI_Datatype
1517 mpi_type_id(const unsigned short *)
1518 {
1519 return MPI_UNSIGNED_SHORT;
1520 }
1521
1522
1523
1524 inline MPI_Datatype
1525 mpi_type_id(const unsigned int *)
1526 {
1527 return MPI_UNSIGNED;
1528 }
1529
1530
1531
1532 inline MPI_Datatype
1533 mpi_type_id(const unsigned long int *)
1534 {
1535 return MPI_UNSIGNED_LONG;
1536 }
1537
1538
1539
1540 inline MPI_Datatype
1541 mpi_type_id(const unsigned long long int *)
1542 {
1543 return MPI_UNSIGNED_LONG_LONG;
1544 }
1545
1546
1547
1548 inline MPI_Datatype
1549 mpi_type_id(const float *)
1550 {
1551 return MPI_FLOAT;
1552 }
1553
1554
1555
1556 inline MPI_Datatype
1557 mpi_type_id(const double *)
1558 {
1559 return MPI_DOUBLE;
1560 }
1561
1562
1563
1564 inline MPI_Datatype
1565 mpi_type_id(const long double *)
1566 {
1567 return MPI_LONG_DOUBLE;
1568 }
1569
1570
1571
1572 inline MPI_Datatype
1573 mpi_type_id(const std::complex<float> *)
1574 {
1575 return MPI_COMPLEX;
1576 }
1577
1578
1579
1580 inline MPI_Datatype
1581 mpi_type_id(const std::complex<double> *)
1582 {
1583 return MPI_DOUBLE_COMPLEX;
1584 }
1585#endif
1586 } // namespace MPIDataTypes
1587 } // namespace internal
1588
1589
1590
1591#ifdef DEAL_II_WITH_MPI
1609 template <typename T>
1610 inline const MPI_Datatype mpi_type_id_for_type =
1612 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1613#endif
1614
1615#ifndef DOXYGEN
1616 namespace internal
1617 {
1618 // declaration for an internal function that lives in mpi.templates.h
1619 template <typename T>
1620 void
1621 all_reduce(const MPI_Op &mpi_op,
1622 const ArrayView<const T> &values,
1623 const MPI_Comm mpi_communicator,
1624 const ArrayView<T> &output);
1625 } // namespace internal
1626
1627
1628 template <typename T>
1629 template <typename W, typename G>
1630 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1631 : wait_function(wait_operation)
1632 , get_and_cleanup_function(get_and_cleanup_operation)
1633 , is_done(false)
1634 , get_was_called(false)
1635 {}
1636
1637
1638
1639 template <typename T>
1640 Future<T>::~Future()
1641 {
1642 // If there is a clean-up function, and if it has not been
1643 // called yet, then do so. Note that we may not have a
1644 // clean-up function (not even an empty one) if the current
1645 // object has been moved from, into another object, and as
1646 // a consequence the std::function objects are now empty
1647 // even though they were initialized in the constructor.
1648 // (A std::function object whose object is a an empty lambda
1649 // function, [](){}, is not an empty std::function object.)
1650 if ((get_was_called == false) && get_and_cleanup_function)
1651 get();
1652 }
1653
1654
1655
1656 template <typename T>
1657 void
1658 Future<T>::wait()
1659 {
1660 if (is_done == false)
1661 {
1662 wait_function();
1663
1664 is_done = true;
1665 }
1666 }
1667
1668
1669 template <typename T>
1670 T
1671 Future<T>::get()
1672 {
1673 Assert(get_was_called == false,
1674 ExcMessage(
1675 "You can't call get() more than once on a Future object."));
1676 get_was_called = true;
1677
1678 wait();
1679 return get_and_cleanup_function();
1680 }
1681
1682
1683
1684 template <typename T, unsigned int N>
1685 void
1686 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1687 {
1688 internal::all_reduce(MPI_SUM,
1689 ArrayView<const T>(values, N),
1690 mpi_communicator,
1691 ArrayView<T>(sums, N));
1692 }
1693
1694
1695
1696 template <typename T, unsigned int N>
1697 void
1698 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1699 {
1700 internal::all_reduce(MPI_MAX,
1701 ArrayView<const T>(values, N),
1702 mpi_communicator,
1703 ArrayView<T>(maxima, N));
1704 }
1705
1706
1707
1708 template <typename T, unsigned int N>
1709 void
1710 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1711 {
1712 internal::all_reduce(MPI_MIN,
1713 ArrayView<const T>(values, N),
1714 mpi_communicator,
1715 ArrayView<T>(minima, N));
1716 }
1717
1718
1719
1720 template <typename T, unsigned int N>
1721 void
1722 logical_or(const T (&values)[N],
1723 const MPI_Comm mpi_communicator,
1724 T (&results)[N])
1725 {
1726 static_assert(std::is_integral_v<T>,
1727 "The MPI_LOR operation only allows integral data types.");
1728
1729 internal::all_reduce(MPI_LOR,
1730 ArrayView<const T>(values, N),
1731 mpi_communicator,
1732 ArrayView<T>(results, N));
1733 }
1734
1735
1736
1737 template <typename T>
1738 std::map<unsigned int, T>
1740 const std::map<unsigned int, T> &objects_to_send)
1741 {
1742# ifndef DEAL_II_WITH_MPI
1743 (void)comm;
1744 Assert(objects_to_send.size() < 2,
1745 ExcMessage("Cannot send to more than one processor."));
1746 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1747 objects_to_send.empty(),
1748 ExcMessage("Can only send to myself or to nobody."));
1749 return objects_to_send;
1750# else
1751 const auto my_proc = this_mpi_process(comm);
1752
1753 std::map<unsigned int, T> received_objects;
1754
1755 std::vector<unsigned int> send_to;
1756 send_to.reserve(objects_to_send.size());
1757 for (const auto &m : objects_to_send)
1758 if (m.first == my_proc)
1759 received_objects[my_proc] = m.second;
1760 else
1761 send_to.emplace_back(m.first);
1762
1763 const unsigned int n_expected_incoming_messages =
1765
1766 // Protect the following communication:
1767 static CollectiveMutex mutex;
1768 CollectiveMutex::ScopedLock lock(mutex, comm);
1769
1770 // If we have something to send, or we expect something from other
1771 // processors, we need to visit one of the two scopes below. Otherwise,
1772 // no other action is required by this mpi process, and we can safely
1773 // return.
1774 if (send_to.empty() && n_expected_incoming_messages == 0)
1775 return received_objects;
1776
1777 const int mpi_tag =
1778 internal::Tags::compute_point_to_point_communication_pattern;
1779
1780 // Sending buffers
1781 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1782 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1783 {
1784 unsigned int i = 0;
1785 for (const auto &rank_obj : objects_to_send)
1786 if (rank_obj.first != my_proc)
1787 {
1788 const auto &rank = rank_obj.first;
1789 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1790 /*allow_compression=*/false);
1791 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1792 buffers_to_send[i].size(),
1793 MPI_CHAR,
1794 rank,
1795 mpi_tag,
1796 comm,
1797 &buffer_send_requests[i]);
1798 AssertThrowMPI(ierr);
1799 ++i;
1800 }
1801 }
1802
1803 // Fill the output map
1804 {
1805 std::vector<char> buffer;
1806 // We do this on a first come/first served basis
1807 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1808 {
1809 // Probe what's going on. Take data from the first available sender
1810 MPI_Status status;
1811 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1812 AssertThrowMPI(ierr);
1813
1814 // Length of the message
1815 int len;
1816 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1817 AssertThrowMPI(ierr);
1818 buffer.resize(len);
1819
1820 // Source rank
1821 const unsigned int rank = status.MPI_SOURCE;
1822
1823 // Actually receive the message
1824 ierr = MPI_Recv(buffer.data(),
1825 len,
1826 MPI_CHAR,
1827 status.MPI_SOURCE,
1828 status.MPI_TAG,
1829 comm,
1830 MPI_STATUS_IGNORE);
1831 AssertThrowMPI(ierr);
1832 Assert(received_objects.find(rank) == received_objects.end(),
1834 "I should not receive again from this rank"));
1835 received_objects[rank] =
1836 Utilities::unpack<T>(buffer,
1837 /*allow_compression=*/false);
1838 }
1839 }
1840
1841 // Wait to have sent all objects.
1842 const int ierr = MPI_Waitall(send_to.size(),
1843 buffer_send_requests.data(),
1844 MPI_STATUSES_IGNORE);
1845 AssertThrowMPI(ierr);
1846
1847 return received_objects;
1848# endif // deal.II with MPI
1849 }
1850
1851
1852
1853 template <typename T, typename>
1854 std::pair<T, T>
1855 partial_and_total_sum(const T &value, const MPI_Comm comm)
1856 {
1857# ifndef DEAL_II_WITH_MPI
1858 (void)comm;
1859 return {0, value};
1860# else
1862 return {0, value};
1863 else
1864 {
1865 T prefix = {};
1866
1867 // First obtain every process's prefix sum:
1868 int ierr =
1869 MPI_Exscan(&value,
1870 &prefix,
1871 1,
1872 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
1873 MPI_SUM,
1874 comm);
1875 AssertThrowMPI(ierr);
1876
1877 // Then we also need the total sum. We could obtain it by
1878 // calling Utilities::MPI::sum(), but it is cheaper if we
1879 // broadcast it from the last process, which can compute it
1880 // from its own prefix sum plus its own value.
1882 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
1883
1884 return {prefix, sum};
1885 }
1886# endif
1887 }
1888
1889
1890
1891 template <typename T>
1892 std::vector<T>
1893 all_gather(const MPI_Comm comm, const T &object)
1894 {
1895 if (job_supports_mpi() == false)
1896 return {object};
1897
1898# ifndef DEAL_II_WITH_MPI
1899 (void)comm;
1900 std::vector<T> v(1, object);
1901 return v;
1902# else
1903 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1904
1905 std::vector<char> buffer = Utilities::pack(object);
1906
1907 int n_local_data = buffer.size();
1908
1909 // Vector to store the size of loc_data_array for every process
1910 std::vector<int> size_all_data(n_procs, 0);
1911
1912 // Exchanging the size of each buffer
1913 int ierr = MPI_Allgather(
1914 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1915 AssertThrowMPI(ierr);
1916
1917 // Now computing the displacement, relative to recvbuf,
1918 // at which to store the incoming buffer
1919 std::vector<int> rdispls(n_procs);
1920 rdispls[0] = 0;
1921 for (unsigned int i = 1; i < n_procs; ++i)
1922 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1923
1924 // Step 3: exchange the buffer:
1925 std::vector<char> received_unrolled_buffer(rdispls.back() +
1926 size_all_data.back());
1927
1928 ierr = MPI_Allgatherv(buffer.data(),
1929 n_local_data,
1930 MPI_CHAR,
1931 received_unrolled_buffer.data(),
1932 size_all_data.data(),
1933 rdispls.data(),
1934 MPI_CHAR,
1935 comm);
1936 AssertThrowMPI(ierr);
1937
1938 std::vector<T> received_objects(n_procs);
1939 for (unsigned int i = 0; i < n_procs; ++i)
1940 {
1941 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1942 rdispls[i],
1943 received_unrolled_buffer.begin() +
1944 rdispls[i] + size_all_data[i]);
1945 received_objects[i] = Utilities::unpack<T>(local_buffer);
1946 }
1947
1948 return received_objects;
1949# endif
1950 }
1951
1952
1953
1954 template <typename T>
1955 std::vector<T>
1956 gather(const MPI_Comm comm,
1957 const T &object_to_send,
1958 const unsigned int root_process)
1959 {
1960# ifndef DEAL_II_WITH_MPI
1961 (void)comm;
1962 (void)root_process;
1963 std::vector<T> v(1, object_to_send);
1964 return v;
1965# else
1966 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1967 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1968
1969 AssertIndexRange(root_process, n_procs);
1970
1971 std::vector<char> buffer = Utilities::pack(object_to_send);
1972 int n_local_data = buffer.size();
1973
1974 // Vector to store the size of loc_data_array for every process
1975 // only the root process needs to allocate memory for that purpose
1976 std::vector<int> size_all_data;
1977 if (my_rank == root_process)
1978 size_all_data.resize(n_procs, 0);
1979
1980 // Exchanging the size of each buffer
1981 int ierr = MPI_Gather(&n_local_data,
1982 1,
1983 MPI_INT,
1984 size_all_data.data(),
1985 1,
1986 MPI_INT,
1987 root_process,
1988 comm);
1989 AssertThrowMPI(ierr);
1990
1991 // Now computing the displacement, relative to recvbuf,
1992 // at which to store the incoming buffer; only for root
1993 std::vector<int> rdispls;
1994 if (my_rank == root_process)
1995 {
1996 rdispls.resize(n_procs, 0);
1997 for (unsigned int i = 1; i < n_procs; ++i)
1998 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1999 }
2000 // exchange the buffer:
2001 std::vector<char> received_unrolled_buffer;
2002 if (my_rank == root_process)
2003 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2004
2005 ierr = MPI_Gatherv(buffer.data(),
2006 n_local_data,
2007 MPI_CHAR,
2008 received_unrolled_buffer.data(),
2009 size_all_data.data(),
2010 rdispls.data(),
2011 MPI_CHAR,
2012 root_process,
2013 comm);
2014 AssertThrowMPI(ierr);
2015
2016 std::vector<T> received_objects;
2017
2018 if (my_rank == root_process)
2019 {
2020 received_objects.resize(n_procs);
2021
2022 for (unsigned int i = 0; i < n_procs; ++i)
2023 {
2024 const std::vector<char> local_buffer(
2025 received_unrolled_buffer.begin() + rdispls[i],
2026 received_unrolled_buffer.begin() + rdispls[i] +
2027 size_all_data[i]);
2028 received_objects[i] = Utilities::unpack<T>(local_buffer);
2029 }
2030 }
2031 return received_objects;
2032# endif
2033 }
2034
2035
2036
2037 template <typename T>
2038 T
2039 scatter(const MPI_Comm comm,
2040 const std::vector<T> &objects_to_send,
2041 const unsigned int root_process)
2042 {
2043# ifndef DEAL_II_WITH_MPI
2044 (void)comm;
2045 (void)root_process;
2046
2047 AssertDimension(objects_to_send.size(), 1);
2048
2049 return objects_to_send[0];
2050# else
2051 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2052 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2053
2054 AssertIndexRange(root_process, n_procs);
2056 (my_rank != root_process && objects_to_send.empty()) ||
2057 objects_to_send.size() == n_procs,
2058 ExcMessage(
2059 "The number of objects to be scattered must correspond to the number processes."));
2060
2061 std::vector<char> send_buffer;
2062 std::vector<int> send_counts;
2063 std::vector<int> send_displacements;
2064
2065 if (my_rank == root_process)
2066 {
2067 send_counts.resize(n_procs, 0);
2068 send_displacements.resize(n_procs + 1, 0);
2069
2070 for (unsigned int i = 0; i < n_procs; ++i)
2071 {
2072 const auto packed_data = Utilities::pack(objects_to_send[i]);
2073 send_buffer.insert(send_buffer.end(),
2074 packed_data.begin(),
2075 packed_data.end());
2076 send_counts[i] = packed_data.size();
2077 }
2078
2079 for (unsigned int i = 0; i < n_procs; ++i)
2080 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2081 }
2082
2083 int n_local_data;
2084 int ierr = MPI_Scatter(send_counts.data(),
2085 1,
2086 MPI_INT,
2087 &n_local_data,
2088 1,
2089 MPI_INT,
2090 root_process,
2091 comm);
2092 AssertThrowMPI(ierr);
2093
2094 std::vector<char> recv_buffer(n_local_data);
2095
2096 ierr = MPI_Scatterv(send_buffer.data(),
2097 send_counts.data(),
2098 send_displacements.data(),
2099 MPI_CHAR,
2100 recv_buffer.data(),
2101 n_local_data,
2102 MPI_CHAR,
2103 root_process,
2104 comm);
2105 AssertThrowMPI(ierr);
2106
2107 return Utilities::unpack<T>(recv_buffer);
2108# endif
2109 }
2110
2111
2112 template <typename T>
2113 void
2114 broadcast(T *buffer,
2115 const size_t count,
2116 const unsigned int root,
2117 const MPI_Comm comm)
2118 {
2119# ifndef DEAL_II_WITH_MPI
2120 (void)buffer;
2121 (void)count;
2122 (void)root;
2123 (void)comm;
2124# else
2125 Assert(root < n_mpi_processes(comm),
2126 ExcMessage("Invalid root rank specified."));
2127
2128 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2129 // iteration:
2130 const size_t max_send_count = std::numeric_limits<signed int>::max();
2131
2132 size_t total_sent_count = 0;
2133 while (total_sent_count < count)
2134 {
2135 const size_t current_count =
2136 std::min(count - total_sent_count, max_send_count);
2137
2138 const int ierr = MPI_Bcast(buffer + total_sent_count,
2139 current_count,
2140 mpi_type_id_for_type<decltype(*buffer)>,
2141 root,
2142 comm);
2143 AssertThrowMPI(ierr);
2144 total_sent_count += current_count;
2145 }
2146# endif
2147 }
2148
2149
2150
2151 template <typename T>
2152 T
2153 broadcast(const MPI_Comm comm,
2154 const T &object_to_send,
2155 const unsigned int root_process)
2156 {
2157# ifndef DEAL_II_WITH_MPI
2158 (void)comm;
2159 (void)root_process;
2160 return object_to_send;
2161# else
2162 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2163 AssertIndexRange(root_process, n_procs);
2164 (void)n_procs;
2165
2166 if constexpr (is_mpi_type<T>)
2167 {
2168 T object = object_to_send;
2169 int ierr =
2170 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2171 AssertThrowMPI(ierr);
2172
2173 return object;
2174 }
2175 else
2176 {
2177 std::vector<char> buffer;
2178 std::size_t buffer_size = numbers::invalid_size_type;
2179
2180 // On the root process, pack the data and determine what the
2181 // buffer size needs to be.
2182 if (this_mpi_process(comm) == root_process)
2183 {
2184 buffer = Utilities::pack(object_to_send, false);
2185 buffer_size = buffer.size();
2186 }
2187
2188 // Exchange the size of buffer
2189 int ierr = MPI_Bcast(&buffer_size,
2190 1,
2191 mpi_type_id_for_type<decltype(buffer_size)>,
2192 root_process,
2193 comm);
2194 AssertThrowMPI(ierr);
2195
2196 // If not on the root process, correctly size the buffer to
2197 // receive the data, then do exactly that.
2198 if (this_mpi_process(comm) != root_process)
2199 buffer.resize(buffer_size);
2200
2201 broadcast(buffer.data(), buffer_size, root_process, comm);
2202
2203 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2204 return object_to_send;
2205 else
2206 return Utilities::unpack<T>(buffer, false);
2207 }
2208# endif
2209 }
2210
2211
2212
2213 template <typename T>
2214 Future<void>
2215 isend(const T &object,
2216 MPI_Comm communicator,
2217 const unsigned int target_rank,
2218 const unsigned int mpi_tag)
2219 {
2220# ifndef DEAL_II_WITH_MPI
2221 Assert(false, ExcNeedsMPI());
2222 (void)object;
2223 (void)communicator;
2224 (void)target_rank;
2225 (void)mpi_tag;
2226 return Future<void>([]() {}, []() {});
2227# else
2228 // Create a pointer to a send buffer into which we pack the object
2229 // to be sent. The buffer will be released by the Future object once
2230 // the send has been verified to have succeeded.
2231 //
2232 // Conceptually, we would like this send buffer to be a
2233 // std::unique_ptr object whose ownership is later handed over
2234 // to the cleanup function. That has the disadvantage that the
2235 // cleanup object is a non-copyable lambda capture, leading to
2236 // awkward semantics. Instead, we use a std::shared_ptr; we move
2237 // this shared pointer into the cleanup function, which means
2238 // that there is exactly one shared pointer who owns the buffer
2239 // at any given time, though the latter is not an important
2240 // optimization.
2241 std::shared_ptr<std::vector<char>> send_buffer =
2242 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2243
2244 // Now start the send, and store the result in a request object that
2245 // we can then wait for later:
2246 MPI_Request request;
2247 const int ierr =
2248 MPI_Isend(send_buffer->data(),
2249 send_buffer->size(),
2250 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2251 target_rank,
2252 mpi_tag,
2253 communicator,
2254 &request);
2255 AssertThrowMPI(ierr);
2256
2257 // Then return a std::future-like object that has a wait()
2258 // function one can use to wait for the communication to finish,
2259 // and that has a cleanup function to be called at some point
2260 // after that makes sure the send buffer gets deallocated. This
2261 // cleanup function takes over ownership of the send buffer.
2262 //
2263 // Note that the body of the lambda function of the clean-up
2264 // function could be left empty. If that were so, once the
2265 // lambda function object goes out of scope, the 'send_buffer'
2266 // member of the closure object goes out of scope as well and so
2267 // the send_buffer is destroyed. But we may want to release the
2268 // buffer itself as early as possible, and so we clear the
2269 // buffer when the Future::get() function is called.
2270 auto wait = [request]() mutable {
2271 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2272 AssertThrowMPI(ierr);
2273 };
2274 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2275 send_buffer->clear();
2276 };
2277 return Future<void>(wait, cleanup);
2278# endif
2279 }
2280
2281
2282
2283 template <typename T>
2284 Future<T>
2285 irecv(MPI_Comm communicator,
2286 const unsigned int source_rank,
2287 const unsigned int mpi_tag)
2288 {
2289# ifndef DEAL_II_WITH_MPI
2290 Assert(false, ExcNeedsMPI());
2291 (void)communicator;
2292 (void)source_rank;
2293 (void)mpi_tag;
2294 return Future<void>([]() {}, []() { return T{}; });
2295# else
2296 // Use a 'probe' operation for the 'wait' operation of the
2297 // Future this function returns. It will trigger whenever we get
2298 // the incoming message. Later, once we have received the message, we
2299 // can query its size and allocate a receiver buffer.
2300 //
2301 // Since we may be waiting for multiple messages from the same
2302 // incoming process (with possibly the same tag -- we can't
2303 // know), we must make sure that the 'probe' operation we have
2304 // here (and which we use to determine the buffer size) matches
2305 // the 'recv' operation with which we actually get the data
2306 // later on. This is exactly what the 'MPI_Mprobe' function and
2307 // its 'I'mmediate variant is there for, coupled with the
2308 // 'MPI_Mrecv' call that would put into the clean-up function
2309 // below.
2310 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2311 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2312
2313 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2314 const int ierr = MPI_Mprobe(
2315 source_rank, mpi_tag, communicator, message.get(), status.get());
2316 AssertThrowMPI(ierr);
2317 };
2318
2319
2320 // Now also define the function that actually gets the data:
2321 auto get = [status, message]() {
2322 int number_amount;
2323 int ierr;
2324 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2325 AssertThrowMPI(ierr);
2326
2327 std::vector<char> receive_buffer(number_amount);
2328
2329 // Then actually get the data, using the matching MPI_Mrecv to the above
2330 // MPI_Mprobe:
2331 ierr = MPI_Mrecv(receive_buffer.data(),
2332 number_amount,
2333 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2334 message.get(),
2335 status.get());
2336 AssertThrowMPI(ierr);
2337
2338 // Return the unpacked object:
2339 return Utilities::unpack<T>(receive_buffer, false);
2340 };
2341
2342 return Future<T>(wait, get);
2343# endif
2344 }
2345
2346
2347
2348# ifdef DEAL_II_WITH_MPI
2349 template <class Iterator, typename Number>
2350 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2351 mean_and_standard_deviation(const Iterator begin,
2352 const Iterator end,
2353 const MPI_Comm comm)
2354 {
2355 // below we do simple and straight-forward implementation. More elaborate
2356 // options are:
2357 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2358 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2359 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2360 using Std = typename numbers::NumberTraits<Number>::real_type;
2361 const Number sum = std::accumulate(begin, end, Number(0.));
2362
2363 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2364 Assert(size > 0, ExcDivideByZero());
2365 const Number mean =
2366 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2367 Std sq_sum = 0.;
2368 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2370 });
2371 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2372 return std::make_pair(mean,
2373 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2374 }
2375# endif
2376
2377#endif
2378 } // end of namespace MPI
2379} // end of namespace Utilities
2380
2381
2383
2384#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:342
void lock(const MPI_Comm comm)
Definition mpi.cc:799
void unlock(const MPI_Comm comm)
Definition mpi.cc:836
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:257
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:526
std::function< T()> get_and_cleanup_function
Definition mpi.h:527
Future(W &&wait_operation, G &&get_and_cleanup_operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1445
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:164
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:208
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:697
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:193
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:251
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:681
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1610
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:362
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
constexpr bool is_mpi_type
Definition mpi.h:107
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
static const unsigned int invalid_unsigned_int
Definition types.h:220
const types::global_dof_index invalid_size_type
Definition types.h:233
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition types.h:32
*braid_SplitCommworld & comm
unsigned int max_index
Definition mpi.h:938
unsigned int min_index
Definition mpi.h:928
static constexpr real_type abs_square(const number &x)
Definition numbers.h:579
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)