Reference documentation for deal.II version GIT a3f17f8a20 2023-06-02 10:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi_consensus_algorithms.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_consensus_algorithm_h
17 #define dealii_mpi_consensus_algorithm_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/mpi.h>
22 #include <deal.II/base/mpi.templates.h>
23 #include <deal.II/base/mpi_tags.h>
24 
26 
27 
28 namespace Utilities
29 {
30  namespace MPI
31  {
132  namespace ConsensusAlgorithms
133  {
158  template <typename RequestType, typename AnswerType>
159  class Process
160  {
161  public:
166  virtual ~Process() = default;
167 
174  virtual std::vector<unsigned int>
176 
186  virtual void
187  create_request(const unsigned int other_rank, RequestType &send_buffer);
188 
201  virtual void
202  answer_request(const unsigned int other_rank,
203  const RequestType &buffer_recv,
204  AnswerType & request_buffer);
205 
213  virtual void
214  read_answer(const unsigned int other_rank,
215  const AnswerType & recv_buffer);
216  };
217 
218 
219 
233  template <typename RequestType, typename AnswerType>
234  class Interface
235  {
236  public:
241 
256  const MPI_Comm comm);
257 
262  virtual ~Interface() = default;
263 
274  std::vector<unsigned int>
275  run();
276 
285  std::vector<unsigned int>
287 
311  virtual std::vector<unsigned int>
313  const std::vector<unsigned int> & targets,
314  const std::function<RequestType(const unsigned int)> &create_request,
315  const std::function<AnswerType(const unsigned int,
316  const RequestType &)> &answer_request,
317  const std::function<void(const unsigned int, const AnswerType &)>
318  & process_answer,
319  const MPI_Comm comm) = 0;
320 
321  private:
331 
339  MPI_Comm comm;
340  };
341 
342 
356  template <typename RequestType, typename AnswerType>
357  class NBX : public Interface<RequestType, AnswerType>
358  {
359  public:
363  NBX() = default;
364 
379 
383  virtual ~NBX() = default;
384 
385  // Import the declarations from the base class.
387 
391  virtual std::vector<unsigned int>
393  const std::vector<unsigned int> & targets,
394  const std::function<RequestType(const unsigned int)> &create_request,
395  const std::function<AnswerType(const unsigned int,
396  const RequestType &)> &answer_request,
397  const std::function<void(const unsigned int, const AnswerType &)>
398  & process_answer,
399  const MPI_Comm comm) override;
400 
401  private:
402 #ifdef DEAL_II_WITH_MPI
406  std::vector<std::vector<char>> send_buffers;
407 
411  std::vector<MPI_Request> send_requests;
412 
420  std::vector<std::unique_ptr<std::vector<char>>> request_buffers;
421 
425  std::vector<std::unique_ptr<MPI_Request>> request_requests;
426 
430  unsigned int n_outstanding_answers;
431 
432  // request for barrier
433  MPI_Request barrier_request;
434 #endif
435 
439  std::set<unsigned int> requesting_processes;
440 
446  bool
448  const std::function<void(const unsigned int, const AnswerType &)>
449  & process_answer,
450  const MPI_Comm comm);
451 
456  void
457  signal_finish(const MPI_Comm comm);
458 
464  bool
466 
472  void
474  const std::function<AnswerType(const unsigned int,
475  const RequestType &)> &answer_request,
476  const MPI_Comm comm);
477 
482  void
484  const std::vector<unsigned int> & targets,
485  const std::function<RequestType(const unsigned int)> &create_request,
486  const MPI_Comm comm);
487 
492  void
494  };
495 
496 
541  template <typename RequestType, typename AnswerType>
542  std::vector<unsigned int>
543  nbx(const std::vector<unsigned int> & targets,
544  const std::function<RequestType(const unsigned int)> &create_request,
545  const std::function<AnswerType(const unsigned int,
546  const RequestType &)> &answer_request,
547  const std::function<void(const unsigned int, const AnswerType &)>
548  & process_answer,
549  const MPI_Comm comm);
550 
588  template <typename RequestType>
589  std::vector<unsigned int>
590  nbx(const std::vector<unsigned int> & targets,
591  const std::function<RequestType(const unsigned int)> &create_request,
592  const std::function<void(const unsigned int, const RequestType &)>
593  & process_request,
594  const MPI_Comm comm);
595 
621  template <typename RequestType, typename AnswerType>
622  class PEX : public Interface<RequestType, AnswerType>
623  {
624  public:
628  PEX() = default;
629 
630 
645 
649  virtual ~PEX() = default;
650 
651  // Import the declarations from the base class.
653 
657  virtual std::vector<unsigned int>
659  const std::vector<unsigned int> & targets,
660  const std::function<RequestType(const unsigned int)> &create_request,
661  const std::function<AnswerType(const unsigned int,
662  const RequestType &)> &answer_request,
663  const std::function<void(const unsigned int, const AnswerType &)>
664  & process_answer,
665  const MPI_Comm comm) override;
666 
667  private:
668 #ifdef DEAL_II_WITH_MPI
672  std::vector<std::vector<char>> send_buffers;
673 
677  std::vector<std::vector<char>> recv_buffers;
678 
682  std::vector<MPI_Request> send_request_requests;
683 
687  std::vector<std::vector<char>> requests_buffers;
688 
692  std::vector<MPI_Request> send_answer_requests;
693 #endif
697  std::set<unsigned int> requesting_processes;
698 
703  unsigned int
705  const std::vector<unsigned int> & targets,
706  const std::function<RequestType(const unsigned int)> &create_request,
707  const MPI_Comm comm);
708 
713  void
715  const unsigned int index,
716  const std::function<AnswerType(const unsigned int,
717  const RequestType &)> &answer_request,
718  const MPI_Comm comm);
719 
724  void
726  const unsigned int n_targets,
727  const std::function<void(const unsigned int, const AnswerType &)>
728  & process_answer,
729  const MPI_Comm comm);
730 
735  void
737  };
738 
739 
740 
797  template <typename RequestType, typename AnswerType>
798  std::vector<unsigned int>
799  pex(const std::vector<unsigned int> & targets,
800  const std::function<RequestType(const unsigned int)> &create_request,
801  const std::function<AnswerType(const unsigned int,
802  const RequestType &)> &answer_request,
803  const std::function<void(const unsigned int, const AnswerType &)>
804  & process_answer,
805  const MPI_Comm comm);
806 
844  template <typename RequestType>
845  std::vector<unsigned int>
846  pex(const std::vector<unsigned int> & targets,
847  const std::function<RequestType(const unsigned int)> &create_request,
848  const std::function<void(const unsigned int, const RequestType &)>
849  & process_request,
850  const MPI_Comm comm);
851 
852 
857  template <typename RequestType, typename AnswerType>
858  class Serial : public Interface<RequestType, AnswerType>
859  {
860  public:
864  Serial() = default;
865 
880 
881  // Import the declarations from the base class.
883 
887  virtual std::vector<unsigned int>
889  const std::vector<unsigned int> & targets,
890  const std::function<RequestType(const unsigned int)> &create_request,
891  const std::function<AnswerType(const unsigned int,
892  const RequestType &)> &answer_request,
893  const std::function<void(const unsigned int, const AnswerType &)>
894  & process_answer,
895  const MPI_Comm comm) override;
896  };
897 
898 
899 
932  template <typename RequestType, typename AnswerType>
933  std::vector<unsigned int>
935  const std::vector<unsigned int> & targets,
936  const std::function<RequestType(const unsigned int)> &create_request,
937  const std::function<AnswerType(const unsigned int, const RequestType &)>
938  &answer_request,
939  const std::function<void(const unsigned int, const AnswerType &)>
940  & process_answer,
941  const MPI_Comm comm);
942 
972  template <typename RequestType>
973  std::vector<unsigned int>
975  const std::vector<unsigned int> & targets,
976  const std::function<RequestType(const unsigned int)> &create_request,
977  const std::function<void(const unsigned int, const RequestType &)>
978  & process_request,
979  const MPI_Comm comm);
980 
981 
982 
995  template <typename RequestType, typename AnswerType>
996  class Selector : public Interface<RequestType, AnswerType>
997  {
998  public:
1002  Selector() = default;
1003 
1018  const MPI_Comm comm);
1019 
1023  virtual ~Selector() = default;
1024 
1025  // Import the declarations from the base class.
1027 
1033  virtual std::vector<unsigned int>
1035  const std::vector<unsigned int> & targets,
1036  const std::function<RequestType(const unsigned int)> &create_request,
1037  const std::function<AnswerType(const unsigned int,
1038  const RequestType &)> &answer_request,
1039  const std::function<void(const unsigned int, const AnswerType &)>
1040  & process_answer,
1041  const MPI_Comm comm) override;
1042 
1043  private:
1044  // Pointer to the actual ConsensusAlgorithms::Interface implementation.
1045  std::shared_ptr<Interface<RequestType, AnswerType>> consensus_algo;
1046  };
1047 
1048 
1049 
1094  template <typename RequestType, typename AnswerType>
1095  std::vector<unsigned int>
1097  const std::vector<unsigned int> & targets,
1098  const std::function<RequestType(const unsigned int)> &create_request,
1099  const std::function<AnswerType(const unsigned int, const RequestType &)>
1100  &answer_request,
1101  const std::function<void(const unsigned int, const AnswerType &)>
1102  & process_answer,
1103  const MPI_Comm comm);
1104 
1142  template <typename RequestType>
1143  std::vector<unsigned int>
1145  const std::vector<unsigned int> & targets,
1146  const std::function<RequestType(const unsigned int)> &create_request,
1147  const std::function<void(const unsigned int, const RequestType &)>
1148  & process_request,
1149  const MPI_Comm comm);
1150 
1151 
1158  template <typename RequestType, typename AnswerType>
1160  : public Process<RequestType, AnswerType>
1161  {
1162  public:
1173  const std::function<std::vector<unsigned int>()>
1174  &function_compute_targets,
1175  const std::function<void(const unsigned int, RequestType &)>
1176  & function_create_request = {},
1177  const std::function<void(const unsigned int,
1178  const RequestType &,
1179  AnswerType &)> &function_answer_request = {},
1180  const std::function<void(const unsigned int, const AnswerType &)>
1181  &function_read_answer = {});
1182 
1186  std::vector<unsigned int>
1187  compute_targets() override;
1188 
1192  void
1193  create_request(const unsigned int other_rank,
1194  RequestType & send_buffer) override;
1195 
1199  void
1200  answer_request(const unsigned int other_rank,
1201  const RequestType &buffer_recv,
1202  AnswerType & request_buffer) override;
1203 
1207  void
1208  read_answer(const unsigned int other_rank,
1209  const AnswerType & recv_buffer) override;
1210 
1211  private:
1212  const std::function<std::vector<unsigned int>()>
1214  const std::function<void(const int, RequestType &)>
1216  const std::function<
1217  void(const unsigned int, const RequestType &, AnswerType &)>
1219  const std::function<void(const int, const AnswerType &)>
1221  };
1222 
1223 
1224 #ifndef DOXYGEN
1225  // Implementation of the functions in this namespace.
1226 
1227  template <typename RequestType, typename AnswerType>
1228  std::vector<unsigned int>
1229  nbx(const std::vector<unsigned int> & targets,
1230  const std::function<RequestType(const unsigned int)> &create_request,
1231  const std::function<AnswerType(const unsigned int,
1232  const RequestType &)> &answer_request,
1233  const std::function<void(const unsigned int, const AnswerType &)>
1234  & process_answer,
1235  const MPI_Comm comm)
1236  {
1238  targets, create_request, answer_request, process_answer, comm);
1239  }
1240 
1241 
1242 
1243  template <typename RequestType>
1244  std::vector<unsigned int>
1245  nbx(const std::vector<unsigned int> & targets,
1246  const std::function<RequestType(const unsigned int)> &create_request,
1247  const std::function<void(const unsigned int, const RequestType &)>
1248  & process_request,
1249  const MPI_Comm comm)
1250  {
1251  // TODO: For the moment, simply implement this special case by
1252  // forwarding to the other function with rewritten function
1253  // objects and using an empty type as answer type. This way,
1254  // we have the interface in place and can provide a more
1255  // efficient implementation later on.
1256  using EmptyType = std::tuple<>;
1257 
1258  return nbx<RequestType, EmptyType>(
1259  targets,
1260  create_request,
1261  // answer_request:
1262  [&process_request](const unsigned int source_rank,
1263  const RequestType &request) -> EmptyType {
1264  process_request(source_rank, request);
1265  // Return something. What it is is arbitrary here, except that
1266  // we want it to be as small an object as possible. Using
1267  // std::tuple<> is interpreted as an empty object that is packed
1268  // down to a zero-length char array.
1269  return {};
1270  },
1271  // process_answer:
1272  [](const unsigned int /*target_rank */,
1273  const EmptyType & /*answer*/) {},
1274  comm);
1275  }
1276 
1277 
1278 
1279  template <typename RequestType, typename AnswerType>
1280  std::vector<unsigned int>
1281  pex(const std::vector<unsigned int> & targets,
1282  const std::function<RequestType(const unsigned int)> &create_request,
1283  const std::function<AnswerType(const unsigned int,
1284  const RequestType &)> &answer_request,
1285  const std::function<void(const unsigned int, const AnswerType &)>
1286  & process_answer,
1287  const MPI_Comm comm)
1288  {
1289  return PEX<RequestType, AnswerType>().run(
1290  targets, create_request, answer_request, process_answer, comm);
1291  }
1292 
1293 
1294 
1295  template <typename RequestType>
1296  std::vector<unsigned int>
1297  pex(const std::vector<unsigned int> & targets,
1298  const std::function<RequestType(const unsigned int)> &create_request,
1299  const std::function<void(const unsigned int, const RequestType &)>
1300  & process_request,
1301  const MPI_Comm comm)
1302  {
1303  // TODO: For the moment, simply implement this special case by
1304  // forwarding to the other function with rewritten function
1305  // objects and using an empty type as answer type. This way,
1306  // we have the interface in place and can provide a more
1307  // efficient implementation later on.
1308  using EmptyType = std::tuple<>;
1309 
1310  return pex<RequestType, EmptyType>(
1311  targets,
1312  create_request,
1313  // answer_request:
1314  [&process_request](const unsigned int source_rank,
1315  const RequestType &request) -> EmptyType {
1316  process_request(source_rank, request);
1317  // Return something. What it is is arbitrary here, except that
1318  // we want it to be as small an object as possible. Using
1319  // std::tuple<> is interpreted as an empty object that is packed
1320  // down to a zero-length char array.
1321  return {};
1322  },
1323  // process_answer:
1324  [](const unsigned int /*target_rank */,
1325  const EmptyType & /*answer*/) {},
1326  comm);
1327  }
1328 
1329 
1330 
1331  template <typename RequestType, typename AnswerType>
1332  std::vector<unsigned int>
1333  serial(
1334  const std::vector<unsigned int> & targets,
1335  const std::function<RequestType(const unsigned int)> &create_request,
1336  const std::function<AnswerType(const unsigned int, const RequestType &)>
1337  &answer_request,
1338  const std::function<void(const unsigned int, const AnswerType &)>
1339  & process_answer,
1340  const MPI_Comm comm)
1341  {
1342  return Serial<RequestType, AnswerType>().run(
1343  targets, create_request, answer_request, process_answer, comm);
1344  }
1345 
1346 
1347 
1348  template <typename RequestType>
1349  std::vector<unsigned int>
1350  serial(
1351  const std::vector<unsigned int> & targets,
1352  const std::function<RequestType(const unsigned int)> &create_request,
1353  const std::function<void(const unsigned int, const RequestType &)>
1354  & process_request,
1355  const MPI_Comm comm)
1356  {
1357  // TODO: For the moment, simply implement this special case by
1358  // forwarding to the other function with rewritten function
1359  // objects and using an empty type as answer type. This way,
1360  // we have the interface in place and can provide a more
1361  // efficient implementation later on.
1362  using EmptyType = std::tuple<>;
1363 
1364  return serial<RequestType, EmptyType>(
1365  targets,
1366  create_request,
1367  // answer_request:
1368  [&process_request](const unsigned int source_rank,
1369  const RequestType &request) -> EmptyType {
1370  process_request(source_rank, request);
1371  // Return something. What it is is arbitrary here, except that
1372  // we want it to be as small an object as possible. Using
1373  // std::tuple<> is interpreted as an empty object that is packed
1374  // down to a zero-length char array.
1375  return {};
1376  },
1377  // process_answer:
1378  [](const unsigned int /*target_rank */,
1379  const EmptyType & /*answer*/) {},
1380  comm);
1381  }
1382 
1383 
1384 
1385  template <typename RequestType, typename AnswerType>
1386  std::vector<unsigned int>
1387  selector(
1388  const std::vector<unsigned int> & targets,
1389  const std::function<RequestType(const unsigned int)> &create_request,
1390  const std::function<AnswerType(const unsigned int, const RequestType &)>
1391  &answer_request,
1392  const std::function<void(const unsigned int, const AnswerType &)>
1393  & process_answer,
1394  const MPI_Comm comm)
1395  {
1396  return Selector<RequestType, AnswerType>().run(
1397  targets, create_request, answer_request, process_answer, comm);
1398  }
1399 
1400 
1401 
1402  template <typename RequestType>
1403  std::vector<unsigned int>
1404  selector(
1405  const std::vector<unsigned int> & targets,
1406  const std::function<RequestType(const unsigned int)> &create_request,
1407  const std::function<void(const unsigned int, const RequestType &)>
1408  & process_request,
1409  const MPI_Comm comm)
1410  {
1411  // TODO: For the moment, simply implement this special case by
1412  // forwarding to the other function with rewritten function
1413  // objects and using an empty type as answer type. This way,
1414  // we have the interface in place and can provide a more
1415  // efficient implementation later on.
1416  using EmptyType = std::tuple<>;
1417 
1418  return selector<RequestType, EmptyType>(
1419  targets,
1420  create_request,
1421  // answer_request:
1422  [&process_request](const unsigned int source_rank,
1423  const RequestType &request) -> EmptyType {
1424  process_request(source_rank, request);
1425  // Return something. What it is is arbitrary here, except that
1426  // we want it to be as small an object as possible. Using
1427  // std::tuple<> is interpreted as an empty object that is packed
1428  // down to a zero-length char array.
1429  return {};
1430  },
1431  // process_answer:
1432  [](const unsigned int /*target_rank */,
1433  const EmptyType & /*answer*/) {},
1434  comm);
1435  }
1436 
1437 
1438 
1439  template <typename RequestType, typename AnswerType>
1441  const std::function<std::vector<unsigned int>()>
1442  &function_compute_targets,
1443  const std::function<void(const unsigned int, RequestType &)>
1444  & function_create_request,
1445  const std::function<void(const unsigned int,
1446  const RequestType &,
1447  AnswerType &)> &function_answer_request,
1448  const std::function<void(const unsigned int, const AnswerType &)>
1449  &function_read_answer)
1450  : function_compute_targets(function_compute_targets)
1451  , function_create_request(function_create_request)
1452  , function_answer_request(function_answer_request)
1453  , function_read_answer(function_read_answer)
1454  {}
1455 
1456 
1457 
1458  template <typename RequestType, typename AnswerType>
1459  std::vector<unsigned int>
1461  {
1462  return function_compute_targets();
1463  }
1464 
1465 
1466 
1467  template <typename RequestType, typename AnswerType>
1468  void
1470  const unsigned int other_rank,
1471  RequestType & send_buffer)
1472  {
1473  if (function_create_request)
1474  function_create_request(other_rank, send_buffer);
1475  }
1476 
1477 
1478 
1479  template <typename RequestType, typename AnswerType>
1480  void
1482  const unsigned int other_rank,
1483  const RequestType &buffer_recv,
1484  AnswerType & request_buffer)
1485  {
1486  if (function_answer_request)
1487  function_answer_request(other_rank, buffer_recv, request_buffer);
1488  }
1489 
1490 
1491 
1492  template <typename RequestType, typename AnswerType>
1493  void
1495  const unsigned int other_rank,
1496  const AnswerType & recv_buffer)
1497  {
1498  if (function_read_answer)
1499  function_read_answer(other_rank, recv_buffer);
1500  }
1501 
1502 #endif
1503 
1504 
1505  } // namespace ConsensusAlgorithms
1506  } // end of namespace MPI
1507 } // end of namespace Utilities
1508 
1509 
1510 
1511 #ifndef DOXYGEN
1512 
1513 // ----------------- Implementation of template functions
1514 
1515 namespace Utilities
1516 {
1517  namespace MPI
1518  {
1519  namespace ConsensusAlgorithms
1520  {
1521  namespace
1522  {
1538 # ifndef DEAL_II_MSVC
1539  [[gnu::unused]]
1540 # endif
1541  inline bool
1542  has_unique_elements(const std::vector<unsigned int> &targets)
1543  {
1544  std::vector<unsigned int> my_destinations = targets;
1545  std::sort(my_destinations.begin(), my_destinations.end());
1546  return (std::adjacent_find(my_destinations.begin(),
1547  my_destinations.end()) ==
1548  my_destinations.end());
1549  }
1550 
1551 
1552 
1556  inline void
1557  handle_exception(std::exception_ptr &&exception, const MPI_Comm comm)
1558  {
1559 # ifdef DEAL_II_WITH_MPI
1560  // an exception within a ConsensusAlgorithm likely causes an
1561  // MPI deadlock. Abort with a reasonable error message instead.
1562  try
1563  {
1564  std::rethrow_exception(exception);
1565  }
1566  catch (ExceptionBase &exc)
1567  {
1568  // report name of the deal.II exception:
1569  std::cerr
1570  << std::endl
1571  << std::endl
1572  << "----------------------------------------------------"
1573  << std::endl;
1574  std::cerr
1575  << "Exception '" << exc.get_exc_name() << "'"
1576  << " on rank " << Utilities::MPI::this_mpi_process(comm)
1577  << " on processing: " << std::endl
1578  << exc.what() << std::endl
1579  << "Aborting!" << std::endl
1580  << "----------------------------------------------------"
1581  << std::endl;
1582 
1583  // Then bring down the whole MPI world
1584  MPI_Abort(comm, 255);
1585  }
1586  catch (std::exception &exc)
1587  {
1588  std::cerr
1589  << std::endl
1590  << std::endl
1591  << "----------------------------------------------------"
1592  << std::endl;
1593  std::cerr
1594  << "Exception within ConsensusAlgorithm"
1595  << " on rank " << Utilities::MPI::this_mpi_process(comm)
1596  << " on processing: " << std::endl
1597  << exc.what() << std::endl
1598  << "Aborting!" << std::endl
1599  << "----------------------------------------------------"
1600  << std::endl;
1601 
1602  // Then bring down the whole MPI world
1603  MPI_Abort(comm, 255);
1604  }
1605  catch (...)
1606  {
1607  std::cerr
1608  << std::endl
1609  << std::endl
1610  << "----------------------------------------------------"
1611  << std::endl;
1612  std::cerr
1613  << "Unknown exception within ConsensusAlgorithm!" << std::endl
1614  << "Aborting!" << std::endl
1615  << "----------------------------------------------------"
1616  << std::endl;
1617 
1618  // Then bring down the whole MPI world
1619  MPI_Abort(comm, 255);
1620  }
1621 # else
1622  (void)comm;
1623 
1624  // No need to be concerned about deadlocks without MPI.
1625  // Defer to exception handling further up the callstack.
1626  std::rethrow_exception(exception);
1627 # endif
1628  }
1629  } // namespace
1630 
1631 
1632 
1633  template <typename RequestType, typename AnswerType>
1634  void
1636  const RequestType &,
1637  AnswerType &)
1638  {
1639  // nothing to do
1640  }
1641 
1642 
1643 
1644  template <typename RequestType, typename AnswerType>
1645  void
1647  RequestType &)
1648  {
1649  // nothing to do
1650  }
1651 
1652 
1653 
1654  template <typename RequestType, typename AnswerType>
1655  void
1657  const AnswerType &)
1658  {
1659  // nothing to do
1660  }
1661 
1662 
1663 
1664  template <typename RequestType, typename AnswerType>
1666  Process<RequestType, AnswerType> &process,
1667  const MPI_Comm comm)
1668  : process(&process)
1669  , comm(comm)
1670  {}
1671 
1672 
1673 
1674  template <typename RequestType, typename AnswerType>
1676  : process(nullptr)
1677  , comm(MPI_COMM_NULL)
1678  {}
1679 
1680 
1681 
1682  template <typename RequestType, typename AnswerType>
1683  std::vector<unsigned int>
1685  {
1686  Assert(process != nullptr,
1687  ExcMessage("This function can only be called if the "
1688  "deprecated non-default constructor of this class "
1689  "has previously been called to set the Process "
1690  "object and a communicator."));
1691  return run(*process, comm);
1692  }
1693 
1694 
1695 
1696  template <typename RequestType, typename AnswerType>
1697  std::vector<unsigned int>
1699  Process<RequestType, AnswerType> &process,
1700  const MPI_Comm comm)
1701  {
1702  // Unpack the 'process' object and call the function that takes
1703  // function objects for all operations.
1704  return run(
1705  process.compute_targets(),
1706  /* create_request: */
1707  [&process](const unsigned int target) {
1708  RequestType request;
1709  process.create_request(target, request);
1710  return request;
1711  },
1712  /* answer_request: */
1713  [&process](const unsigned int source, const RequestType &request) {
1714  AnswerType answer;
1715  process.answer_request(source, request, answer);
1716  return answer;
1717  },
1718  /* process_answer: */
1719  [&process](const unsigned int target, const AnswerType &answer) {
1720  process.read_answer(target, answer);
1721  },
1722  comm);
1723  }
1724 
1725 
1726 
1727  template <typename RequestType, typename AnswerType>
1729  Process<RequestType, AnswerType> &process,
1730  const MPI_Comm comm)
1731  : Interface<RequestType, AnswerType>(process, comm)
1732  {}
1733 
1734 
1735 
1736  template <typename RequestType, typename AnswerType>
1737  std::vector<unsigned int>
1739  const std::vector<unsigned int> & targets,
1740  const std::function<RequestType(const unsigned int)> &create_request,
1741  const std::function<AnswerType(const unsigned int, const RequestType &)>
1742  &answer_request,
1743  const std::function<void(const unsigned int, const AnswerType &)>
1744  & process_answer,
1745  const MPI_Comm comm)
1746  {
1747  Assert(has_unique_elements(targets),
1748  ExcMessage("The consensus algorithms expect that each process "
1749  "only sends a single message to another process, "
1750  "but the targets provided include duplicates."));
1751 
1752  static CollectiveMutex mutex;
1753  CollectiveMutex::ScopedLock lock(mutex, comm);
1754 
1755  try
1756  {
1757  // 1) Send data to identified targets and start receiving
1758  // the answers from these very same processes.
1759  start_communication(targets, create_request, comm);
1760 
1761  // 2) Until all posted receive operations are known to have
1762  // completed, answer requests and keep checking whether all
1763  // requests of this process have been answered.
1764  //
1765  // The requests that we catch in the answer_requests()
1766  // function originate elsewhere, that is, they are not in
1767  // response to our own messages
1768  //
1769  // Note also that we may not catch all incoming requests in
1770  // the following two lines: our own requests may have been
1771  // satisfied before we've dealt with all incoming requests.
1772  // That's ok: We will get around to dealing with all
1773  // remaining message later. We just want to move on to the
1774  // next step as early as possible.
1775  while (all_locally_originated_receives_are_completed(process_answer,
1776  comm) == false)
1777  maybe_answer_one_request(answer_request, comm);
1778 
1779  // 3) Signal to all other processes that all requests of this
1780  // process have been answered
1781  signal_finish(comm);
1782 
1783  // 4) Nevertheless, this process has to keep on answering
1784  // (potential) incoming requests until all processes have
1785  // received the answer to all requests
1786  while (all_remotely_originated_receives_are_completed() == false)
1787  maybe_answer_one_request(answer_request, comm);
1788 
1789  // 5) process the answer to all requests
1790  clean_up_and_end_communication(comm);
1791  }
1792  catch (...)
1793  {
1794  handle_exception(std::current_exception(), comm);
1795  }
1796 
1797  return std::vector<unsigned int>(requesting_processes.begin(),
1798  requesting_processes.end());
1799  }
1800 
1801 
1802 
1803  template <typename RequestType, typename AnswerType>
1804  void
1806  const std::vector<unsigned int> & targets,
1807  const std::function<RequestType(const unsigned int)> &create_request,
1808  const MPI_Comm comm)
1809  {
1810 # ifdef DEAL_II_WITH_MPI
1811  // 1)
1812  const auto n_targets = targets.size();
1813 
1814  const int tag_request = Utilities::MPI::internal::Tags::
1816 
1817  // 2) allocate memory
1818  send_requests.resize(n_targets);
1819  send_buffers.resize(n_targets);
1820 
1821  {
1822  // 4) send and receive
1823  for (unsigned int index = 0; index < n_targets; ++index)
1824  {
1825  const unsigned int rank = targets[index];
1827 
1828  auto &send_buffer = send_buffers[index];
1829  send_buffer =
1830  (create_request ? Utilities::pack(create_request(rank), false) :
1831  std::vector<char>());
1832 
1833  // Post a request to send data
1834  auto ierr = MPI_Isend(send_buffer.data(),
1835  send_buffer.size(),
1836  MPI_CHAR,
1837  rank,
1838  tag_request,
1839  comm,
1840  &send_requests[index]);
1841  AssertThrowMPI(ierr);
1842  }
1843 
1844  // Also record that we expect an answer from each target we sent
1845  // a request to:
1846  n_outstanding_answers = n_targets;
1847  }
1848 # else
1849  (void)targets;
1850  (void)create_request;
1851  (void)comm;
1852 # endif
1853  }
1854 
1855 
1856 
1857  template <typename RequestType, typename AnswerType>
1858  bool
1861  const std::function<void(const unsigned int, const AnswerType &)>
1862  & process_answer,
1863  const MPI_Comm comm)
1864  {
1865 # ifdef DEAL_II_WITH_MPI
1866  // We know that all requests have come in when we have pending
1867  // messages from all targets with the right tag (some of which we may
1868  // have already taken care of below, after discovering their existence).
1869  // We can check for pending messages with MPI_IProbe, which returns
1870  // immediately with a return code that indicates whether
1871  // it has found a message from any process with a given
1872  // tag.
1873  if (n_outstanding_answers == 0)
1874  return true;
1875  else
1876  {
1877  const int tag_deliver = Utilities::MPI::internal::Tags::
1879 
1880  int request_is_pending;
1881  MPI_Status status;
1882  const auto ierr = MPI_Iprobe(
1883  MPI_ANY_SOURCE, tag_deliver, comm, &request_is_pending, &status);
1884  AssertThrowMPI(ierr);
1885 
1886  // If there is no pending message with this tag,
1887  // then we are clearly not done receiving everything
1888  // yet -- so return false.
1889  if (request_is_pending == 0)
1890  return false;
1891  else
1892  {
1893  // OK, so we have gotten a reply to our request from
1894  // one rank. Let us process it.
1895  const auto target = status.MPI_SOURCE;
1896 
1897  // Then query the size of the message, allocate enough memory,
1898  // receive the data, and process it.
1899  int message_size;
1900  {
1901  const int ierr =
1902  MPI_Get_count(&status, MPI_CHAR, &message_size);
1903  AssertThrowMPI(ierr);
1904  }
1905  std::vector<char> recv_buffer(message_size);
1906 
1907  {
1908  const int tag_deliver = Utilities::MPI::internal::Tags::
1910 
1911  const int ierr = MPI_Recv(recv_buffer.data(),
1912  recv_buffer.size(),
1913  MPI_CHAR,
1914  target,
1915  tag_deliver,
1916  comm,
1917  MPI_STATUS_IGNORE);
1918  AssertThrowMPI(ierr);
1919  }
1920 
1921  if (process_answer)
1922  process_answer(target,
1923  Utilities::unpack<AnswerType>(recv_buffer,
1924  false));
1925 
1926  // Finally, remove this rank from the list of outstanding
1927  // targets:
1928  --n_outstanding_answers;
1929 
1930  // We could do another go-around from the top of this
1931  // else-branch to see whether there are actually other messages
1932  // that are currently pending. But that would mean spending
1933  // substantial time in receiving answers while we should also be
1934  // sending answers to requests we have received from other
1935  // places. So let it be enough for now. If there are outstanding
1936  // answers, we will get back to this function before long and
1937  // can take care of them then.
1938  return (n_outstanding_answers == 0);
1939  }
1940  }
1941 
1942 # else
1943  (void)process_answer;
1944  (void)comm;
1945 
1946  return true;
1947 # endif
1948  }
1949 
1950 
1951 
1952  template <typename RequestType, typename AnswerType>
1953  void
1955  const std::function<AnswerType(const unsigned int, const RequestType &)>
1956  & answer_request,
1957  const MPI_Comm comm)
1958  {
1959 # ifdef DEAL_II_WITH_MPI
1960 
1961  const int tag_request = Utilities::MPI::internal::Tags::
1963  const int tag_deliver = Utilities::MPI::internal::Tags::
1965 
1966  // Check if there is a request pending. By selecting the
1967  // tag_request tag, these are other processes asking for
1968  // our own replies, not these other processes' replies
1969  // to our own requests.
1970  //
1971  // There may be multiple such pending messages. We
1972  // only answer one.
1973  MPI_Status status;
1974  int request_is_pending;
1975  const auto ierr = MPI_Iprobe(
1976  MPI_ANY_SOURCE, tag_request, comm, &request_is_pending, &status);
1977  AssertThrowMPI(ierr);
1978 
1979  if (request_is_pending != 0)
1980  {
1981  // Get the rank of the requesting process and add it to the
1982  // list of requesting processes (which may contain duplicates).
1983  const auto other_rank = status.MPI_SOURCE;
1984 
1985  Assert(requesting_processes.find(other_rank) ==
1986  requesting_processes.end(),
1987  ExcMessage("Process is requesting a second time!"));
1988  requesting_processes.insert(other_rank);
1989 
1990  // get size of incoming message
1991  int number_amount;
1992  auto ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
1993  AssertThrowMPI(ierr);
1994 
1995  // allocate memory for incoming message
1996  std::vector<char> buffer_recv(number_amount);
1997  ierr = MPI_Recv(buffer_recv.data(),
1998  number_amount,
1999  MPI_CHAR,
2000  other_rank,
2001  tag_request,
2002  comm,
2003  MPI_STATUS_IGNORE);
2004  AssertThrowMPI(ierr);
2005 
2006  // Allocate memory for an answer message to the current request,
2007  // and ask the 'process' object to produce an answer:
2008  request_buffers.emplace_back(std::make_unique<std::vector<char>>());
2009  auto &request_buffer = *request_buffers.back();
2010  if (answer_request)
2011  request_buffer =
2012  Utilities::pack(answer_request(other_rank,
2013  Utilities::unpack<RequestType>(
2014  buffer_recv, false)),
2015  false);
2016 
2017  // Then initiate sending the answer back to the requester.
2018  request_requests.emplace_back(std::make_unique<MPI_Request>());
2019  ierr = MPI_Isend(request_buffer.data(),
2020  request_buffer.size(),
2021  MPI_CHAR,
2022  other_rank,
2023  tag_deliver,
2024  comm,
2025  request_requests.back().get());
2026  AssertThrowMPI(ierr);
2027  }
2028 # else
2029  (void)answer_request;
2030  (void)comm;
2031 # endif
2032  }
2033 
2034 
2035 
2036  template <typename RequestType, typename AnswerType>
2037  void
2039  {
2040 # ifdef DEAL_II_WITH_MPI
2041  const auto ierr = MPI_Ibarrier(comm, &barrier_request);
2042  AssertThrowMPI(ierr);
2043 # else
2044  (void)comm;
2045 # endif
2046  }
2047 
2048 
2049 
2050  template <typename RequestType, typename AnswerType>
2051  bool
2052  NBX<RequestType,
2053  AnswerType>::all_remotely_originated_receives_are_completed()
2054  {
2055 # ifdef DEAL_II_WITH_MPI
2056  int all_ranks_reached_barrier;
2057  const auto ierr = MPI_Test(&barrier_request,
2058  &all_ranks_reached_barrier,
2059  MPI_STATUS_IGNORE);
2060  AssertThrowMPI(ierr);
2061  return all_ranks_reached_barrier != 0;
2062 # else
2063  return true;
2064 # endif
2065  }
2066 
2067 
2068 
2069  template <typename RequestType, typename AnswerType>
2070  void
2072  const MPI_Comm comm)
2073  {
2074  (void)comm;
2075 # ifdef DEAL_II_WITH_MPI
2076  // clean up
2077  {
2078  if (send_requests.size() > 0)
2079  {
2080  const int ierr = MPI_Waitall(send_requests.size(),
2081  send_requests.data(),
2082  MPI_STATUSES_IGNORE);
2083  AssertThrowMPI(ierr);
2084  }
2085 
2086  int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
2087  AssertThrowMPI(ierr);
2088 
2089  for (auto &i : request_requests)
2090  {
2091  ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
2092  AssertThrowMPI(ierr);
2093  }
2094 
2095 # ifdef DEBUG
2096  // note: IBarrier seems to make problem during testing, this
2097  // additional Barrier seems to help
2098  ierr = MPI_Barrier(comm);
2099  AssertThrowMPI(ierr);
2100 # endif
2101  }
2102 # endif
2103  }
2104 
2105 
2106 
2107  template <typename RequestType, typename AnswerType>
2109  Process<RequestType, AnswerType> &process,
2110  const MPI_Comm comm)
2111  : Interface<RequestType, AnswerType>(process, comm)
2112  {}
2113 
2114 
2115 
2116  template <typename RequestType, typename AnswerType>
2117  std::vector<unsigned int>
2119  const std::vector<unsigned int> & targets,
2120  const std::function<RequestType(const unsigned int)> &create_request,
2121  const std::function<AnswerType(const unsigned int, const RequestType &)>
2122  &answer_request,
2123  const std::function<void(const unsigned int, const AnswerType &)>
2124  & process_answer,
2125  const MPI_Comm comm)
2126  {
2127  Assert(has_unique_elements(targets),
2128  ExcMessage("The consensus algorithms expect that each process "
2129  "only sends a single message to another process, "
2130  "but the targets provided include duplicates."));
2131 
2132  static CollectiveMutex mutex;
2133  CollectiveMutex::ScopedLock lock(mutex, comm);
2134 
2135  try
2136  {
2137  // 1) Send requests and start receiving the answers.
2138  // In particular, determine how many requests we should expect
2139  // on the current process.
2140  const unsigned int n_requests =
2141  start_communication(targets, create_request, comm);
2142 
2143  // 2) Answer requests:
2144  for (unsigned int request = 0; request < n_requests; ++request)
2145  answer_one_request(request, answer_request, comm);
2146 
2147  // 3) Process answers:
2148  process_incoming_answers(targets.size(), process_answer, comm);
2149 
2150  // 4) Make sure all sends have successfully terminated:
2151  clean_up_and_end_communication();
2152  }
2153  catch (...)
2154  {
2155  handle_exception(std::current_exception(), comm);
2156  }
2157 
2158  return std::vector<unsigned int>(requesting_processes.begin(),
2159  requesting_processes.end());
2160  }
2161 
2162 
2163 
2164  template <typename RequestType, typename AnswerType>
2165  unsigned int
2167  const std::vector<unsigned int> & targets,
2168  const std::function<RequestType(const unsigned int)> &create_request,
2169  const MPI_Comm comm)
2170  {
2171 # ifdef DEAL_II_WITH_MPI
2172  const int tag_request = Utilities::MPI::internal::Tags::
2174 
2175  // 1) determine with which processes this process wants to communicate
2176  // with
2177  const unsigned int n_targets = targets.size();
2178 
2179  // 2) determine who wants to communicate with this process
2180  const unsigned int n_sources =
2182 
2183  // 2) allocate memory
2184  recv_buffers.resize(n_targets);
2185  send_buffers.resize(n_targets);
2186  send_request_requests.resize(n_targets);
2187 
2188  send_answer_requests.resize(n_sources);
2189  requests_buffers.resize(n_sources);
2190 
2191  // 4) send and receive
2192  for (unsigned int i = 0; i < n_targets; ++i)
2193  {
2194  const unsigned int rank = targets[i];
2196 
2197  // pack data which should be sent
2198  auto &send_buffer = send_buffers[i];
2199  if (create_request)
2200  send_buffer = Utilities::pack(create_request(rank), false);
2201 
2202  // start to send data
2203  auto ierr = MPI_Isend(send_buffer.data(),
2204  send_buffer.size(),
2205  MPI_CHAR,
2206  rank,
2207  tag_request,
2208  comm,
2209  &send_request_requests[i]);
2210  AssertThrowMPI(ierr);
2211  }
2212 
2213  return n_sources;
2214 # else
2215  (void)targets;
2216  (void)create_request;
2217  (void)comm;
2218  return 0;
2219 # endif
2220  }
2221 
2222 
2223 
2224  template <typename RequestType, typename AnswerType>
2225  void
2227  const unsigned int index,
2228  const std::function<AnswerType(const unsigned int, const RequestType &)>
2229  & answer_request,
2230  const MPI_Comm comm)
2231  {
2232 # ifdef DEAL_II_WITH_MPI
2233  const int tag_request = Utilities::MPI::internal::Tags::
2235  const int tag_deliver = Utilities::MPI::internal::Tags::
2237 
2238  // Wait until we have a message ready for retrieval, though we don't
2239  // care which process it is from.
2240  MPI_Status status;
2241  int ierr = MPI_Probe(MPI_ANY_SOURCE, tag_request, comm, &status);
2242  AssertThrowMPI(ierr);
2243 
2244  // Get rank of incoming message and verify that it makes sense
2245  const unsigned int other_rank = status.MPI_SOURCE;
2246 
2247  Assert(requesting_processes.find(other_rank) ==
2248  requesting_processes.end(),
2249  ExcMessage(
2250  "A process is sending a request after a request from "
2251  "the same process has previously already been "
2252  "received. This algorithm does not expect this to happen."));
2253  requesting_processes.insert(other_rank);
2254 
2255  // Actually get the incoming message:
2256  int number_amount;
2257  ierr = MPI_Get_count(&status, MPI_CHAR, &number_amount);
2258  AssertThrowMPI(ierr);
2259 
2260  std::vector<char> buffer_recv(number_amount);
2261  ierr = MPI_Recv(buffer_recv.data(),
2262  number_amount,
2263  MPI_CHAR,
2264  other_rank,
2265  tag_request,
2266  comm,
2267  &status);
2268  AssertThrowMPI(ierr);
2269 
2270  // Process request by asking the user-provided function for
2271  // the answer and post a send for it.
2272  auto &request_buffer = requests_buffers[index];
2273  request_buffer =
2274  (answer_request ?
2275  Utilities::pack(answer_request(other_rank,
2276  Utilities::unpack<RequestType>(
2277  buffer_recv, false)),
2278  false) :
2279  std::vector<char>());
2280 
2281  ierr = MPI_Isend(request_buffer.data(),
2282  request_buffer.size(),
2283  MPI_CHAR,
2284  other_rank,
2285  tag_deliver,
2286  comm,
2287  &send_answer_requests[index]);
2288  AssertThrowMPI(ierr);
2289 # else
2290  (void)answer_request;
2291  (void)comm;
2292  (void)index;
2293 # endif
2294  }
2295 
2296 
2297 
2298  template <typename RequestType, typename AnswerType>
2299  void
2301  const unsigned int n_targets,
2302  const std::function<void(const unsigned int, const AnswerType &)>
2303  & process_answer,
2304  const MPI_Comm comm)
2305  {
2306 # ifdef DEAL_II_WITH_MPI
2307  const int tag_deliver = Utilities::MPI::internal::Tags::
2309 
2310  // We know how many targets we have sent requests to. These
2311  // targets will all eventually send us their responses, but
2312  // we need not process them in order -- rather, just see what
2313  // comes in and then look at message originators' ranks and
2314  // message sizes
2315  for (unsigned int i = 0; i < n_targets; ++i)
2316  {
2317  MPI_Status status;
2318  {
2319  const int ierr =
2320  MPI_Probe(MPI_ANY_SOURCE, tag_deliver, comm, &status);
2321  AssertThrowMPI(ierr);
2322  }
2323 
2324  const auto other_rank = status.MPI_SOURCE;
2325  int message_size;
2326  {
2327  const int ierr = MPI_Get_count(&status, MPI_CHAR, &message_size);
2328  AssertThrowMPI(ierr);
2329  }
2330  std::vector<char> recv_buffer(message_size);
2331 
2332  // Now actually receive the answer. Because the MPI_Probe
2333  // above blocks until we have a message, we know that the
2334  // following MPI_Recv call will immediately succeed.
2335  {
2336  const int ierr = MPI_Recv(recv_buffer.data(),
2337  recv_buffer.size(),
2338  MPI_CHAR,
2339  other_rank,
2340  tag_deliver,
2341  comm,
2342  MPI_STATUS_IGNORE);
2343  AssertThrowMPI(ierr);
2344  }
2345 
2346  if (process_answer)
2347  process_answer(other_rank,
2348  Utilities::unpack<AnswerType>(recv_buffer, false));
2349  }
2350 # else
2351  (void)n_targets;
2352  (void)process_answer;
2353  (void)comm;
2354 # endif
2355  }
2356 
2357 
2358 
2359  template <typename RequestType, typename AnswerType>
2360  void
2362  {
2363 # ifdef DEAL_II_WITH_MPI
2364  // Finalize all MPI_Request objects for both the
2365  // send-request and receive-answer operations.
2366  if (send_request_requests.size() > 0)
2367  {
2368  const int ierr = MPI_Waitall(send_request_requests.size(),
2369  send_request_requests.data(),
2370  MPI_STATUSES_IGNORE);
2371  AssertThrowMPI(ierr);
2372  }
2373 
2374  // Then also check the send-answer requests.
2375  if (send_answer_requests.size() > 0)
2376  {
2377  const int ierr = MPI_Waitall(send_answer_requests.size(),
2378  send_answer_requests.data(),
2379  MPI_STATUSES_IGNORE);
2380  AssertThrowMPI(ierr);
2381  }
2382 # endif
2383  }
2384 
2385 
2386 
2387  template <typename RequestType, typename AnswerType>
2389  Process<RequestType, AnswerType> &process,
2390  const MPI_Comm comm)
2391  : Interface<RequestType, AnswerType>(process, comm)
2392  {}
2393 
2394 
2395 
2396  template <typename RequestType, typename AnswerType>
2397  std::vector<unsigned int>
2399  const std::vector<unsigned int> & targets,
2400  const std::function<RequestType(const unsigned int)> &create_request,
2401  const std::function<AnswerType(const unsigned int, const RequestType &)>
2402  &answer_request,
2403  const std::function<void(const unsigned int, const AnswerType &)>
2404  & process_answer,
2405  const MPI_Comm comm)
2406  {
2407  (void)comm;
2410  ExcMessage("You shouldn't use the 'Serial' class on "
2411  "communicators that have more than one process "
2412  "associated with it."));
2413 
2414  // The only valid target for a serial program is itself.
2415  if (targets.size() != 0)
2416  {
2417  Assert(targets.size() == 1,
2418  ExcMessage(
2419  "On a single process, the only valid target "
2420  "is process zero (the process itself), which can only be "
2421  "listed once."));
2422  AssertDimension(targets[0], 0);
2423 
2424  // Since the caller indicates that there is a target, and since we
2425  // know that it is the current process, let the process send
2426  // something to itself.
2427  const RequestType request =
2428  (create_request ? create_request(0) : RequestType());
2429  const AnswerType answer =
2430  (answer_request ? answer_request(0, request) : AnswerType());
2431 
2432  if (process_answer)
2433  process_answer(0, answer);
2434  }
2435 
2436  return targets; // nothing to do
2437  }
2438 
2439 
2440 
2441  template <typename RequestType, typename AnswerType>
2443  Process<RequestType, AnswerType> &process,
2444  const MPI_Comm comm)
2445  : Interface<RequestType, AnswerType>(process, comm)
2446  {}
2447 
2448 
2449 
2450  template <typename RequestType, typename AnswerType>
2451  std::vector<unsigned int>
2453  const std::vector<unsigned int> & targets,
2454  const std::function<RequestType(const unsigned int)> &create_request,
2455  const std::function<AnswerType(const unsigned int, const RequestType &)>
2456  &answer_request,
2457  const std::function<void(const unsigned int, const AnswerType &)>
2458  & process_answer,
2459  const MPI_Comm comm)
2460  {
2461  // Depending on the number of processes we switch between
2462  // implementations. We reduce the threshold for debug mode to be
2463  // able to test also the non-blocking implementation. This feature
2464  // is tested by:
2465  // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=10.output
2466 
2467  const unsigned int n_procs = (Utilities::MPI::job_supports_mpi() ?
2469  1);
2470 # ifdef DEAL_II_WITH_MPI
2471 # ifdef DEBUG
2472  if (n_procs > 10)
2473 # else
2474  if (n_procs > 99)
2475 # endif
2476  consensus_algo.reset(new NBX<RequestType, AnswerType>());
2477  else
2478 # endif
2479  if (n_procs > 1)
2480  consensus_algo.reset(new PEX<RequestType, AnswerType>());
2481  else
2482  consensus_algo.reset(new Serial<RequestType, AnswerType>());
2483 
2484  return consensus_algo->run(
2485  targets, create_request, answer_request, process_answer, comm);
2486  }
2487 
2488 
2489  } // namespace ConsensusAlgorithms
2490  } // end of namespace MPI
2491 } // end of namespace Utilities
2492 
2493 #endif // DOXYGEN
2494 
2495 
2497 
2498 #endif
const char * get_exc_name() const
Definition: exceptions.cc:173
virtual const char * what() const noexcept override
Definition: exceptions.cc:161
const std::function< void(const unsigned int, const RequestType &, AnswerType &)> function_answer_request
void create_request(const unsigned int other_rank, RequestType &send_buffer) override
const std::function< void(const int, RequestType &)> function_create_request
void read_answer(const unsigned int other_rank, const AnswerType &recv_buffer) override
std::vector< unsigned int > compute_targets() override
void answer_request(const unsigned int other_rank, const RequestType &buffer_recv, AnswerType &request_buffer) override
const std::function< void(const int, const AnswerType &)> function_read_answer
AnonymousProcess(const std::function< std::vector< unsigned int >()> &function_compute_targets, const std::function< void(const unsigned int, RequestType &)> &function_create_request={}, const std::function< void(const unsigned int, const RequestType &, AnswerType &)> &function_answer_request={}, const std::function< void(const unsigned int, const AnswerType &)> &function_read_answer={})
const std::function< std::vector< unsigned int >)> function_compute_targets
std::vector< unsigned int > run(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
Interface(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)=0
void start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
void maybe_answer_one_request(const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
NBX(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
std::vector< std::unique_ptr< std::vector< char > > > request_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void clean_up_and_end_communication(const MPI_Comm comm)
bool all_locally_originated_receives_are_completed(const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< std::unique_ptr< MPI_Request > > request_requests
std::vector< std::vector< char > > send_buffers
void signal_finish(const MPI_Comm comm)
PEX(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
unsigned int start_communication(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const MPI_Comm comm)
std::vector< std::vector< char > > requests_buffers
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::vector< std::vector< char > > send_buffers
void answer_one_request(const unsigned int index, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const MPI_Comm comm)
std::vector< std::vector< char > > recv_buffers
void process_incoming_answers(const unsigned int n_targets, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
virtual void answer_request(const unsigned int other_rank, const RequestType &buffer_recv, AnswerType &request_buffer)
virtual void read_answer(const unsigned int other_rank, const AnswerType &recv_buffer)
virtual std::vector< unsigned int > compute_targets()=0
virtual void create_request(const unsigned int other_rank, RequestType &send_buffer)
Selector(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
std::shared_ptr< Interface< RequestType, AnswerType > > consensus_algo
Serial(Process< RequestType, AnswerType > &process, const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
#define DEAL_II_DEPRECATED
Definition: config.h:174
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
#define Assert(cond, exc)
Definition: exceptions.h:1614
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1787
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1914
#define AssertIndexRange(index, range)
Definition: exceptions.h:1855
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > nbx(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > selector(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
std::vector< unsigned int > pex(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
@ consensus_algorithm_nbx_process_deliver
ConsensusAlgorithms::NBX::process.
Definition: mpi_tags.h:91
@ consensus_algorithm_pex_process_deliver
ConsensusAlgorithms::PEX::process.
Definition: mpi_tags.h:96
@ consensus_algorithm_nbx_answer_request
ConsensusAlgorithms::NBX::process.
Definition: mpi_tags.h:89
@ consensus_algorithm_pex_answer_request
ConsensusAlgorithms::PEX::process.
Definition: mpi_tags.h:94
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:150
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:161
bool job_supports_mpi()
Definition: mpi.cc:1048
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:429
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1351
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
const MPI_Comm comm