Reference documentation for deal.II version Git 34859fde61 2019-12-10 11:26:22 -0700
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/mpi_compute_index_owner_internal.h>
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/multithread_info.h>
24 #include <deal.II/base/utilities.h>
25 
26 #include <deal.II/lac/la_parallel_block_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/vector_memory.h>
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
37 # include <deal.II/lac/trilinos_parallel_block_vector.h>
38 # include <deal.II/lac/trilinos_vector.h>
39 # include <deal.II/lac/vector_memory.h>
40 
41 # include <Epetra_MpiComm.h>
42 # endif
43 #endif
44 
45 #ifdef DEAL_II_WITH_PETSC
46 # include <deal.II/lac/petsc_block_vector.h>
47 # include <deal.II/lac/petsc_vector.h>
48 
49 # include <petscsys.h>
50 #endif
51 
52 #ifdef DEAL_II_WITH_SLEPC
53 # include <deal.II/lac/slepc_solver.h>
54 
55 # include <slepcsys.h>
56 #endif
57 
58 #ifdef DEAL_II_WITH_P4EST
59 # include <p4est_bits.h>
60 #endif
61 
62 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
63 # include <zoltan_cpp.h>
64 #endif
65 
66 DEAL_II_NAMESPACE_OPEN
67 
68 
69 namespace Utilities
70 {
71  namespace MPI
72  {
73 #ifdef DEAL_II_WITH_MPI
74  unsigned int
75  n_mpi_processes(const MPI_Comm &mpi_communicator)
76  {
77  int n_jobs = 1;
78  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
79  AssertThrowMPI(ierr);
80 
81  return n_jobs;
82  }
83 
84 
85  unsigned int
86  this_mpi_process(const MPI_Comm &mpi_communicator)
87  {
88  int rank = 0;
89  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
90  AssertThrowMPI(ierr);
91 
92  return rank;
93  }
94 
95 
96  MPI_Comm
97  duplicate_communicator(const MPI_Comm &mpi_communicator)
98  {
99  MPI_Comm new_communicator;
100  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
101  AssertThrowMPI(ierr);
102  return new_communicator;
103  }
104 
105 
106 
107  void
108  free_communicator(MPI_Comm &mpi_communicator)
109  {
110  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
111  const int ierr = MPI_Comm_free(&mpi_communicator);
112  AssertThrowMPI(ierr);
113  }
114 
115 
116 
117  int
118  create_group(const MPI_Comm & comm,
119  const MPI_Group &group,
120  const int tag,
121  MPI_Comm * new_comm)
122  {
123 # if DEAL_II_MPI_VERSION_GTE(3, 0)
124  return MPI_Comm_create_group(comm, group, tag, new_comm);
125 # else
126  int rank;
127  int ierr = MPI_Comm_rank(comm, &rank);
128  AssertThrowMPI(ierr);
129 
130  int grp_rank;
131  ierr = MPI_Group_rank(group, &grp_rank);
132  AssertThrowMPI(ierr);
133  if (grp_rank == MPI_UNDEFINED)
134  {
135  *new_comm = MPI_COMM_NULL;
136  return MPI_SUCCESS;
137  }
138 
139  int grp_size;
140  ierr = MPI_Group_size(group, &grp_size);
141  AssertThrowMPI(ierr);
142 
143  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
144  AssertThrowMPI(ierr);
145 
146  MPI_Group parent_grp;
147  ierr = MPI_Comm_group(comm, &parent_grp);
148  AssertThrowMPI(ierr);
149 
150  std::vector<int> pids(grp_size);
151  std::vector<int> grp_pids(grp_size);
152  std::iota(grp_pids.begin(), grp_pids.end(), 0);
153  ierr = MPI_Group_translate_ranks(
154  group, grp_size, grp_pids.data(), parent_grp, pids.data());
155  AssertThrowMPI(ierr);
156  ierr = MPI_Group_free(&parent_grp);
157  AssertThrowMPI(ierr);
158 
159  MPI_Comm comm_old = *new_comm;
160  MPI_Comm ic;
161  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
162  {
163  const int gid = grp_rank / merge_sz;
164  comm_old = *new_comm;
165  if (gid % 2 == 0)
166  {
167  if ((gid + 1) * merge_sz < grp_size)
168  {
169  ierr = (MPI_Intercomm_create(
170  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
171  AssertThrowMPI(ierr);
172  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
173  AssertThrowMPI(ierr);
174  }
175  }
176  else
177  {
178  ierr = MPI_Intercomm_create(
179  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
180  AssertThrowMPI(ierr);
181  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
182  AssertThrowMPI(ierr);
183  }
184  if (*new_comm != comm_old)
185  {
186  ierr = MPI_Comm_free(&ic);
187  AssertThrowMPI(ierr);
188  ierr = MPI_Comm_free(&comm_old);
189  AssertThrowMPI(ierr);
190  }
191  }
192 
193  return MPI_SUCCESS;
194 # endif
195  }
196 
197 
198 
199  std::vector<IndexSet>
200  create_ascending_partitioning(const MPI_Comm & comm,
201  const IndexSet::size_type &local_size)
202  {
203  const unsigned int n_proc = n_mpi_processes(comm);
204  const std::vector<IndexSet::size_type> sizes =
205  all_gather(comm, local_size);
206  const auto total_size =
207  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
208 
209  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
210 
211  IndexSet::size_type begin = 0;
212  for (unsigned int i = 0; i < n_proc; ++i)
213  {
214  res[i].add_range(begin, begin + sizes[i]);
215  begin = begin + sizes[i];
216  }
217 
218  return res;
219  }
220 
221 
222 
228  : public ConsensusAlgorithmProcess<int, int>
229  {
230  public:
231  ConsensusAlgorithmProcessTargets(const std::vector<unsigned int> &target)
232  : target(target)
233  {}
234 
235  using T1 = int;
236  using T2 = int;
237 
238  virtual void
239  process_request(const unsigned int other_rank,
240  const std::vector<T1> &,
241  std::vector<T2> &) override
242  {
243  this->sources.push_back(other_rank);
244  }
245 
251  virtual std::vector<unsigned int>
252  compute_targets() override
253  {
254  return target;
255  }
256 
262  std::vector<unsigned int>
264  {
265  std::sort(sources.begin(), sources.end());
266  return sources;
267  }
268 
269  private:
273  const std::vector<unsigned int> &target;
274 
278  std::vector<unsigned int> sources;
279  };
280 
281 
282 
283  std::vector<unsigned int>
285  const MPI_Comm & mpi_comm,
286  const std::vector<unsigned int> &destinations)
287  {
288  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
289  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
290  (void)myid;
291  (void)n_procs;
292 
293  for (const unsigned int destination : destinations)
294  {
295  (void)destination;
296  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
297  Assert(destination != myid,
298  ExcMessage(
299  "There is no point in communicating with ourselves."));
300  }
301 
302 # if DEAL_II_MPI_VERSION_GTE(3, 0)
303 
304  ConsensusAlgorithmProcessTargets process(destinations);
305  ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
306  ConsensusAlgorithmProcessTargets::T2>
307  consensus_algorithm(process, mpi_comm);
308  consensus_algorithm.run();
309  return process.get_result();
310 
311 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
312 
313  static CollectiveMutex mutex;
314  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
315 
316  const int mpi_tag =
318 
319  // Calculate the number of messages to send to each process
320  std::vector<unsigned int> dest_vector(n_procs);
321  for (const auto &el : destinations)
322  ++dest_vector[el];
323 
324  // Find how many processes will send to this one
325  // by reducing with sum and then scattering the
326  // results over all processes
327  unsigned int n_recv_from;
328  const int ierr = MPI_Reduce_scatter_block(
329  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
330 
331  AssertThrowMPI(ierr);
332 
333  // Send myid to every process in `destinations` vector...
334  std::vector<MPI_Request> send_requests(destinations.size());
335  for (const auto &el : destinations)
336  {
337  const int ierr =
338  MPI_Isend(&myid,
339  1,
340  MPI_UNSIGNED,
341  el,
342  mpi_tag,
343  mpi_comm,
344  send_requests.data() + (&el - destinations.data()));
345  AssertThrowMPI(ierr);
346  }
347 
348 
349  // Receive `n_recv_from` times from the processes
350  // who communicate with this one. Store the obtained id's
351  // in the resulting vector
352  std::vector<unsigned int> origins(n_recv_from);
353  for (auto &el : origins)
354  {
355  const int ierr = MPI_Recv(&el,
356  1,
357  MPI_UNSIGNED,
358  MPI_ANY_SOURCE,
359  mpi_tag,
360  mpi_comm,
361  MPI_STATUS_IGNORE);
362  AssertThrowMPI(ierr);
363  }
364 
365  if (destinations.size() > 0)
366  {
367  const int ierr = MPI_Waitall(destinations.size(),
368  send_requests.data(),
369  MPI_STATUSES_IGNORE);
370  AssertThrowMPI(ierr);
371  }
372 
373  return origins;
374 # else
375  // let all processors communicate the maximal number of destinations
376  // they have
377  const unsigned int max_n_destinations =
378  Utilities::MPI::max(destinations.size(), mpi_comm);
379 
380  if (max_n_destinations == 0)
381  // all processes have nothing to send/receive:
382  return std::vector<unsigned int>();
383 
384  // now that we know the number of data packets every processor wants to
385  // send, set up a buffer with the maximal size and copy our destinations
386  // in there, padded with -1's
387  std::vector<unsigned int> my_destinations(max_n_destinations,
389  std::copy(destinations.begin(),
390  destinations.end(),
391  my_destinations.begin());
392 
393  // now exchange these (we could communicate less data if we used
394  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
395  // processors in this case, which is more expensive than the reduction
396  // operation above in MPI_Allreduce)
397  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
398  const int ierr = MPI_Allgather(my_destinations.data(),
399  max_n_destinations,
400  MPI_UNSIGNED,
401  all_destinations.data(),
402  max_n_destinations,
403  MPI_UNSIGNED,
404  mpi_comm);
405  AssertThrowMPI(ierr);
406 
407  // now we know who is going to communicate with whom. collect who is
408  // going to communicate with us!
409  std::vector<unsigned int> origins;
410  for (unsigned int i = 0; i < n_procs; ++i)
411  for (unsigned int j = 0; j < max_n_destinations; ++j)
412  if (all_destinations[i * max_n_destinations + j] == myid)
413  origins.push_back(i);
414  else if (all_destinations[i * max_n_destinations + j] ==
416  break;
417 
418  return origins;
419 # endif
420  }
421 
422 
423 
424  unsigned int
426  const MPI_Comm & mpi_comm,
427  const std::vector<unsigned int> &destinations)
428  {
429  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
430 
431  for (const unsigned int destination : destinations)
432  {
433  (void)destination;
434  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
435  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
436  ExcMessage(
437  "There is no point in communicating with ourselves."));
438  }
439 
440  // Calculate the number of messages to send to each process
441  std::vector<unsigned int> dest_vector(n_procs);
442  for (const auto &el : destinations)
443  ++dest_vector[el];
444 
445 # if DEAL_II_MPI_VERSION_GTE(2, 2)
446  // Find out how many processes will send to this one
447  // MPI_Reduce_scatter(_block) does exactly this
448  unsigned int n_recv_from = 0;
449 
450  const int ierr = MPI_Reduce_scatter_block(
451  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
452 
453  AssertThrowMPI(ierr);
454 
455  return n_recv_from;
456 # else
457  // Find out how many processes will send to this one
458  // by reducing with sum and then scattering the
459  // results over all processes
460  std::vector<unsigned int> buffer(dest_vector.size());
461  unsigned int n_recv_from = 0;
462 
463  MPI_Reduce(dest_vector.data(),
464  buffer.data(),
465  dest_vector.size(),
466  MPI_UNSIGNED,
467  MPI_SUM,
468  0,
469  mpi_comm);
470  MPI_Scatter(buffer.data(),
471  1,
472  MPI_UNSIGNED,
473  &n_recv_from,
474  1,
475  MPI_UNSIGNED,
476  0,
477  mpi_comm);
478 
479  return n_recv_from;
480 # endif
481  }
482 
483 
484 
485  namespace
486  {
487  // custom MIP_Op for calculate_collective_mpi_min_max_avg
488  void
489  max_reduce(const void *in_lhs_,
490  void * inout_rhs_,
491  int * len,
492  MPI_Datatype *)
493  {
494  (void)len;
495  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
496  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
497 
498  Assert(*len == 1, ExcInternalError());
499 
500  inout_rhs->sum += in_lhs->sum;
501  if (inout_rhs->min > in_lhs->min)
502  {
503  inout_rhs->min = in_lhs->min;
504  inout_rhs->min_index = in_lhs->min_index;
505  }
506  else if (inout_rhs->min == in_lhs->min)
507  {
508  // choose lower cpu index when tied to make operator commutative
509  if (inout_rhs->min_index > in_lhs->min_index)
510  inout_rhs->min_index = in_lhs->min_index;
511  }
512 
513  if (inout_rhs->max < in_lhs->max)
514  {
515  inout_rhs->max = in_lhs->max;
516  inout_rhs->max_index = in_lhs->max_index;
517  }
518  else if (inout_rhs->max == in_lhs->max)
519  {
520  // choose lower cpu index when tied to make operator commutative
521  if (inout_rhs->max_index > in_lhs->max_index)
522  inout_rhs->max_index = in_lhs->max_index;
523  }
524  }
525  } // namespace
526 
527 
528 
529  MinMaxAvg
530  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
531  {
532  // If MPI was not started, we have a serial computation and cannot run
533  // the other MPI commands
534  if (job_supports_mpi() == false)
535  {
536  MinMaxAvg result;
537  result.sum = my_value;
538  result.avg = my_value;
539  result.min = my_value;
540  result.max = my_value;
541  result.min_index = 0;
542  result.max_index = 0;
543 
544  return result;
545  }
546 
547  // To avoid uninitialized values on some MPI implementations, provide
548  // result with a default value already...
549  MinMaxAvg result = {0.,
550  std::numeric_limits<double>::max(),
551  -std::numeric_limits<double>::max(),
552  0,
553  0,
554  0.};
555 
556  const unsigned int my_id =
557  ::Utilities::MPI::this_mpi_process(mpi_communicator);
558  const unsigned int numproc =
559  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
560 
561  MPI_Op op;
562  int ierr =
563  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
564  true,
565  &op);
566  AssertThrowMPI(ierr);
567 
568  MinMaxAvg in;
569  in.sum = in.min = in.max = my_value;
570  in.min_index = in.max_index = my_id;
571 
572  MPI_Datatype type;
573  int lengths[] = {3, 2};
574  MPI_Aint displacements[] = {0, offsetof(MinMaxAvg, min_index)};
575  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT};
576 
577  ierr = MPI_Type_create_struct(2, lengths, displacements, types, &type);
578  AssertThrowMPI(ierr);
579 
580  ierr = MPI_Type_commit(&type);
581  AssertThrowMPI(ierr);
582  ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
583  AssertThrowMPI(ierr);
584 
585  ierr = MPI_Type_free(&type);
586  AssertThrowMPI(ierr);
587 
588  ierr = MPI_Op_free(&op);
589  AssertThrowMPI(ierr);
590 
591  result.avg = result.sum / numproc;
592 
593  return result;
594  }
595 
596 #else
597 
598  unsigned int
599  n_mpi_processes(const MPI_Comm &)
600  {
601  return 1;
602  }
603 
604 
605 
606  unsigned int
607  this_mpi_process(const MPI_Comm &)
608  {
609  return 0;
610  }
611 
612 
613 
614  std::vector<IndexSet>
615  create_ascending_partitioning(const MPI_Comm & /*comm*/,
616  const IndexSet::size_type &local_size)
617  {
618  return std::vector<IndexSet>(1, complete_index_set(local_size));
619  }
620 
621 
622 
623  MPI_Comm
624  duplicate_communicator(const MPI_Comm &mpi_communicator)
625  {
626  return mpi_communicator;
627  }
628 
629 
630 
631  void
632  free_communicator(MPI_Comm & /*mpi_communicator*/)
633  {}
634 
635 
636 
637  MinMaxAvg
638  min_max_avg(const double my_value, const MPI_Comm &)
639  {
640  MinMaxAvg result;
641 
642  result.sum = my_value;
643  result.avg = my_value;
644  result.min = my_value;
645  result.max = my_value;
646  result.min_index = 0;
647  result.max_index = 0;
648 
649  return result;
650  }
651 
652 #endif
653 
654 
655 
657  char **& argv,
658  const unsigned int max_num_threads)
659  {
660  static bool constructor_has_already_run = false;
661  (void)constructor_has_already_run;
662  Assert(constructor_has_already_run == false,
663  ExcMessage("You can only create a single object of this class "
664  "in a program since it initializes the MPI system."));
665 
666 
667  int ierr = 0;
668 #ifdef DEAL_II_WITH_MPI
669  // if we have PETSc, we will initialize it and let it handle MPI.
670  // Otherwise, we will do it.
671  int MPI_has_been_started = 0;
672  ierr = MPI_Initialized(&MPI_has_been_started);
673  AssertThrowMPI(ierr);
674  AssertThrow(MPI_has_been_started == 0,
675  ExcMessage("MPI error. You can only start MPI once!"));
676 
677  int provided;
678  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
679  // we might use several threads but never call two MPI functions at the
680  // same time. For an explanation see on why we do this see
681  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
682  int wanted = MPI_THREAD_SERIALIZED;
683  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
684  AssertThrowMPI(ierr);
685 
686  // disable for now because at least some implementations always return
687  // MPI_THREAD_SINGLE.
688  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
689  // ExcMessage("MPI reports that we are not allowed to use multiple
690  // threads."));
691 #else
692  // make sure the compiler doesn't warn about these variables
693  (void)argc;
694  (void)argv;
695  (void)ierr;
696 #endif
697 
698  // we are allowed to call MPI_Init ourselves and PETScInitialize will
699  // detect this. This allows us to use MPI_Init_thread instead.
700 #ifdef DEAL_II_WITH_PETSC
701 # ifdef DEAL_II_WITH_SLEPC
702  // Initialize SLEPc (with PETSc):
703  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
705 # else
706  // or just initialize PETSc alone:
707  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
708  AssertThrow(ierr == 0, ExcPETScError(ierr));
709 # endif
710 
711  // Disable PETSc exception handling. This just prints a large wall
712  // of text that is not particularly helpful for what we do:
713  PetscPopSignalHandler();
714 #endif
715 
716  // Initialize zoltan
717 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
718  float version;
719  Zoltan_Initialize(argc, argv, &version);
720 #endif
721 
722 #ifdef DEAL_II_WITH_P4EST
723  // Initialize p4est and libsc components
724 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
725 # else
726  // This feature is broken in version 2.0.0 for calls to
727  // MPI_Comm_create_group (see cburstedde/p4est#30).
728  // Disabling it leads to more verbose p4est error messages
729  // which should be fine.
730  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
731 # endif
732  p4est_init(nullptr, SC_LP_SILENT);
733 #endif
734 
735  constructor_has_already_run = true;
736 
737 
738  // Now also see how many threads we'd like to run
739  if (max_num_threads != numbers::invalid_unsigned_int)
740  {
741  // set maximum number of threads (also respecting the environment
742  // variable that the called function evaluates) based on what the
743  // user asked
744  MultithreadInfo::set_thread_limit(max_num_threads);
745  }
746  else
747  // user wants automatic choice
748  {
749 #ifdef DEAL_II_WITH_MPI
750  // we need to figure out how many MPI processes there are on the
751  // current node, as well as how many CPU cores we have. for the
752  // first task, check what get_hostname() returns and then do an
753  // allgather so each processor gets the answer
754  //
755  // in calculating the length of the string, don't forget the
756  // terminating \0 on C-style strings
757  const std::string hostname = Utilities::System::get_hostname();
758  const unsigned int max_hostname_size =
759  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
760  std::vector<char> hostname_array(max_hostname_size);
761  std::copy(hostname.c_str(),
762  hostname.c_str() + hostname.size() + 1,
763  hostname_array.begin());
764 
765  std::vector<char> all_hostnames(max_hostname_size *
766  MPI::n_mpi_processes(MPI_COMM_WORLD));
767  const int ierr = MPI_Allgather(hostname_array.data(),
768  max_hostname_size,
769  MPI_CHAR,
770  all_hostnames.data(),
771  max_hostname_size,
772  MPI_CHAR,
773  MPI_COMM_WORLD);
774  AssertThrowMPI(ierr);
775 
776  // search how often our own hostname appears and the how-manyth
777  // instance the current process represents
778  unsigned int n_local_processes = 0;
779  unsigned int nth_process_on_host = 0;
780  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
781  ++i)
782  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
783  hostname)
784  {
785  ++n_local_processes;
786  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
787  ++nth_process_on_host;
788  }
789  Assert(nth_process_on_host > 0, ExcInternalError());
790 
791 
792  // compute how many cores each process gets. if the number does not
793  // divide evenly, then we get one more core if we are among the
794  // first few processes
795  //
796  // if the number would be zero, round up to one since every process
797  // needs to have at least one thread
798  const unsigned int n_threads =
799  std::max(MultithreadInfo::n_cores() / n_local_processes +
800  (nth_process_on_host <=
801  MultithreadInfo::n_cores() % n_local_processes ?
802  1 :
803  0),
804  1U);
805 #else
806  const unsigned int n_threads = MultithreadInfo::n_cores();
807 #endif
808 
809  // finally set this number of threads
811  }
812  }
813 
814 
815 
816  void
818  {
819  // insert if it is not in the set already:
820  requests.insert(&request);
821  }
822 
823 
824 
825  void
827  {
828  Assert(
829  requests.find(&request) != requests.end(),
830  ExcMessage(
831  "You tried to call unregister_request() with an invalid request."));
832 
833  requests.erase(&request);
834  }
835 
836 
837 
838  std::set<MPI_Request *> MPI_InitFinalize::requests;
839 
840 
841 
843  {
844  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
845  // are no longer used at this point. this is relevant because the static
846  // object destructors run for these vectors at the end of the program
847  // would run after MPI_Finalize is called, leading to errors
848 
849 #ifdef DEAL_II_WITH_MPI
850  // Before exiting, wait for nonblocking communication to complete:
851  for (auto request : requests)
852  {
853  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
854  AssertThrowMPI(ierr);
855  }
856 
857  // Start with deal.II MPI vectors and delete vectors from the pools:
859  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
861  release_unused_memory();
863  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
865  release_unused_memory();
866 
867  // Next with Trilinos:
868 # if defined(DEAL_II_WITH_TRILINOS)
870  TrilinosWrappers::MPI::Vector>::release_unused_memory();
872  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
873 # endif
874 #endif
875 
876 
877  // Now deal with PETSc (with or without MPI). Only delete the vectors if
878  // finalize hasn't been called yet, otherwise this will lead to errors.
879 #ifdef DEAL_II_WITH_PETSC
880  if ((PetscInitializeCalled == PETSC_TRUE) &&
881  (PetscFinalizeCalled == PETSC_FALSE))
882  {
884  PETScWrappers::MPI::Vector>::release_unused_memory();
886  PETScWrappers::MPI::BlockVector>::release_unused_memory();
887 
888 # ifdef DEAL_II_WITH_SLEPC
889  // and now end SLEPc (with PETSc)
890  SlepcFinalize();
891 # else
892  // or just end PETSc.
893  PetscFinalize();
894 # endif
895  }
896 #endif
897 
898 // There is a similar issue with CUDA: The destructor of static objects might
899 // run after the CUDA driver is unloaded. Hence, also release all memory
900 // related to CUDA vectors.
901 #ifdef DEAL_II_WITH_CUDA
904  release_unused_memory();
907  release_unused_memory();
908 #endif
909 
910 #ifdef DEAL_II_WITH_P4EST
911  // now end p4est and libsc
912  // Note: p4est has no finalize function
913  sc_finalize();
914 #endif
915 
916 
917  // only MPI_Finalize if we are running with MPI. We also need to do this
918  // when running PETSc, because we initialize MPI ourselves before
919  // calling PetscInitialize
920 #ifdef DEAL_II_WITH_MPI
921  if (job_supports_mpi() == true)
922  {
923 # if __cpp_lib_uncaught_exceptions >= 201411
924  // std::uncaught_exception() is deprecated in c++17
925  if (std::uncaught_exceptions() > 0)
926 # else
927  if (std::uncaught_exception() == true)
928 # endif
929  {
930  std::cerr
931  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
932  << this_mpi_process(MPI_COMM_WORLD)
933  << ". Skipping MPI_Finalize() to avoid a deadlock."
934  << std::endl;
935  }
936  else
937  {
938  const int ierr = MPI_Finalize();
939  (void)ierr;
940  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
941  }
942  }
943 #endif
944  }
945 
946 
947 
948  bool
950  {
951 #ifdef DEAL_II_WITH_MPI
952  int MPI_has_been_started = 0;
953  const int ierr = MPI_Initialized(&MPI_has_been_started);
954  AssertThrowMPI(ierr);
955 
956  return (MPI_has_been_started > 0);
957 #else
958  return false;
959 #endif
960  }
961 
962  template <typename T1, typename T2>
963  void
965  const std::vector<T1> &,
966  std::vector<T2> &)
967  {
968  // nothing to do
969  }
970 
971 
972 
973  template <typename T1, typename T2>
974  void
976  std::vector<T1> &)
977  {
978  // nothing to do
979  }
980 
981 
982 
983  template <typename T1, typename T2>
984  void
986  std::vector<T2> &)
987  {
988  // nothing to do
989  }
990 
991 
992 
993  template <typename T1, typename T2>
994  void
996  const int,
997  const std::vector<T2> &)
998  {
999  // nothing to do
1000  }
1001 
1002 
1003 
1004  template <typename T1, typename T2>
1007  const MPI_Comm & comm)
1008  : process(process)
1009  , comm(comm)
1010  , my_rank(this_mpi_process(comm))
1011  , n_procs(n_mpi_processes(comm))
1012  {}
1013 
1014 
1015 
1016  template <typename T1, typename T2>
1019  const MPI_Comm & comm)
1020  : ConsensusAlgorithm<T1, T2>(process, comm)
1021  {}
1022 
1023 
1024 
1025  template <typename T1, typename T2>
1026  void
1028  {
1029  static CollectiveMutex mutex;
1030  CollectiveMutex::ScopedLock lock(mutex, this->comm);
1031 
1032  // 1) send requests and start receiving the answers
1034 
1035  // 2) answer requests and check if all requests of this process have been
1036  // answered
1037  while (!check_own_state())
1038  process_requests();
1039 
1040  // 3) signal to all other processes that all requests of this process have
1041  // been answered
1042  signal_finish();
1043 
1044  // 4) nevertheless, this process has to keep on answering (potential)
1045  // incoming requests until all processes have received the
1046  // answer to all requests
1047  while (!check_global_state())
1048  process_requests();
1049 
1050  // 5) process the answer to all requests
1052  }
1053 
1054 
1055 
1056  template <typename T1, typename T2>
1057  bool
1059  {
1060 #ifdef DEAL_II_WITH_MPI
1061  int all_receive_requests_are_done;
1062  const auto ierr = MPI_Testall(recv_requests.size(),
1063  recv_requests.data(),
1064  &all_receive_requests_are_done,
1065  MPI_STATUSES_IGNORE);
1066  AssertThrowMPI(ierr);
1067 
1068  return all_receive_requests_are_done;
1069 #else
1070  return true;
1071 #endif
1072  }
1073 
1074 
1075 
1076  template <typename T1, typename T2>
1077  void
1079  {
1080 #ifdef DEAL_II_WITH_MPI
1081 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1082  const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
1083  AssertThrowMPI(ierr);
1084 # else
1085  AssertThrow(
1086  false,
1087  ExcMessage(
1088  "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
1089 # endif
1090 #endif
1091  }
1092 
1093 
1094 
1095  template <typename T1, typename T2>
1096  bool
1098  {
1099 #ifdef DEAL_II_WITH_MPI
1100  int all_ranks_reached_barrier;
1101  const auto ierr = MPI_Test(&barrier_request,
1102  &all_ranks_reached_barrier,
1103  MPI_STATUSES_IGNORE);
1104  AssertThrowMPI(ierr);
1105  return all_ranks_reached_barrier;
1106 #else
1107  return true;
1108 #endif
1109  }
1110 
1111 
1112 
1113  template <typename T1, typename T2>
1114  void
1116  {
1117 #ifdef DEAL_II_WITH_MPI
1118 
1119  const int tag_request =
1121  const int tag_deliver =
1123 
1124  // check if there is a request pending
1125  MPI_Status status;
1126  int request_is_pending;
1127  const auto ierr = MPI_Iprobe(
1128  MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
1129  AssertThrowMPI(ierr);
1130 
1131  if (request_is_pending) // request is pending
1132  {
1133  // get rank of requesting process
1134  const auto other_rank = status.MPI_SOURCE;
1135 
1136 # ifdef DEBUG
1137  Assert(requesting_processes.find(other_rank) ==
1138  requesting_processes.end(),
1139  ExcMessage("Process is requesting a second time!"));
1140  requesting_processes.insert(other_rank);
1141 # endif
1142 
1143  std::vector<T1> buffer_recv;
1144  // get size of of incoming message
1145  int number_amount;
1146  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1147  AssertThrowMPI(ierr);
1148 
1149  // allocate memory for incoming message
1150  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1151  buffer_recv.resize(number_amount / sizeof(T1));
1152  ierr = MPI_Recv(buffer_recv.data(),
1153  number_amount,
1154  MPI_BYTE,
1155  other_rank,
1156  tag_request,
1157  this->comm,
1158  &status);
1159  AssertThrowMPI(ierr);
1160 
1161  // allocate memory for answer message
1162  request_buffers.emplace_back(
1163  std_cxx14::make_unique<std::vector<T2>>());
1164  request_requests.emplace_back(std_cxx14::make_unique<MPI_Request>());
1165 
1166  // process request
1167  auto &request_buffer = *request_buffers.back();
1168  this->process.process_request(other_rank,
1169  buffer_recv,
1170  request_buffer);
1171 
1172  // start to send answer back
1173  ierr = MPI_Isend(request_buffer.data(),
1174  request_buffer.size() * sizeof(T2),
1175  MPI_BYTE,
1176  other_rank,
1177  tag_deliver,
1178  this->comm,
1179  request_requests.back().get());
1180  AssertThrowMPI(ierr);
1181  }
1182 #endif
1183  }
1184 
1185 
1186 
1187  template <typename T1, typename T2>
1188  void
1190  {
1191 #ifdef DEAL_II_WITH_MPI
1192  // 1)
1193  targets = this->process.compute_targets();
1194  const auto n_targets = targets.size();
1195 
1196  const int tag_request =
1198  const int tag_deliver =
1200 
1201  // 2) allocate memory
1202  recv_buffers.resize(n_targets);
1203  recv_requests.resize(n_targets);
1204  send_requests.resize(n_targets);
1205  send_buffers.resize(n_targets);
1206 
1207  {
1208  // 4) send and receive
1209  for (unsigned int i = 0; i < n_targets; i++)
1210  {
1211  const unsigned int rank = targets[i];
1212  const unsigned int index = i;
1213 
1214  // translate index set to a list of pairs
1215  auto &send_buffer = send_buffers[index];
1216  this->process.pack_recv_buffer(rank, send_buffer);
1217 
1218  // start to send data
1219  auto ierr = MPI_Isend(send_buffer.data(),
1220  send_buffer.size() * sizeof(T1),
1221  MPI_BYTE,
1222  rank,
1223  tag_request,
1224  this->comm,
1225  &send_requests[index]);
1226  AssertThrowMPI(ierr);
1227 
1228  // start to receive data
1229  auto &recv_buffer = recv_buffers[index];
1230  this->process.prepare_recv_buffer(rank, recv_buffer);
1231  ierr = MPI_Irecv(recv_buffer.data(),
1232  recv_buffer.size() * sizeof(T2),
1233  MPI_BYTE,
1234  rank,
1235  tag_deliver,
1236  this->comm,
1237  &recv_requests[index]);
1238  AssertThrowMPI(ierr);
1239  }
1240  }
1241 #endif
1242  }
1243 
1244 
1245 
1246  template <typename T1, typename T2>
1247  void
1249  {
1250 #ifdef DEAL_II_WITH_MPI
1251  // clean up
1252  {
1253  if (send_requests.size() > 0)
1254  {
1255  const int ierr = MPI_Waitall(send_requests.size(),
1256  send_requests.data(),
1257  MPI_STATUSES_IGNORE);
1258  AssertThrowMPI(ierr);
1259  }
1260 
1261  if (recv_requests.size() > 0)
1262  {
1263  const int ierr = MPI_Waitall(recv_requests.size(),
1264  recv_requests.data(),
1265  MPI_STATUSES_IGNORE);
1266  AssertThrowMPI(ierr);
1267  }
1268 
1269 
1270  const int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1271  AssertThrowMPI(ierr);
1272 
1273  for (auto &i : request_requests)
1274  {
1275  const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1276  AssertThrowMPI(ierr);
1277  }
1278 
1279 # ifdef DEBUG
1280  // note: IBarrier seems to make problem during testing, this additional
1281  // Barrier seems to help
1282  MPI_Barrier(this->comm);
1283 # endif
1284  }
1285 
1286  // unpack data
1287  {
1288  for (unsigned int i = 0; i < targets.size(); i++)
1289  this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
1290  }
1291 #endif
1292  }
1293 
1294 
1295 
1296  template <typename T1, typename T2>
1299  const MPI_Comm & comm)
1300  : ConsensusAlgorithm<T1, T2>(process, comm)
1301  {}
1302 
1303 
1304 
1305  template <typename T1, typename T2>
1306  void
1308  {
1309  static CollectiveMutex mutex;
1310  CollectiveMutex::ScopedLock lock(mutex, this->comm);
1311 
1312  // 1) send requests and start receiving the answers
1313  // especially determine how many requests are expected
1314  const unsigned int n_requests = start_communication();
1315 
1316  // 2) answer requests
1317  for (unsigned int request = 0; request < n_requests; request++)
1318  process_requests(request);
1319 
1320  // 3) process answers
1322  }
1323 
1324 
1325 
1326  template <typename T1, typename T2>
1327  void
1329  {
1330 #ifdef DEAL_II_WITH_MPI
1331  const int tag_request =
1333  const int tag_deliver =
1335 
1336  MPI_Status status;
1337  MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
1338 
1339  // get rank of incoming message
1340  const auto other_rank = status.MPI_SOURCE;
1341 
1342  std::vector<T1> buffer_recv;
1343 
1344  // get size of incoming message
1345  int number_amount;
1346  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1347  AssertThrowMPI(ierr);
1348 
1349  // allocate memory for incoming message
1350  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1351  buffer_recv.resize(number_amount / sizeof(T1));
1352  ierr = MPI_Recv(buffer_recv.data(),
1353  number_amount,
1354  MPI_BYTE,
1355  other_rank,
1356  tag_request,
1357  this->comm,
1358  &status);
1359  AssertThrowMPI(ierr);
1360 
1361  // process request
1362  auto &request_buffer = requests_buffers[index];
1363  this->process.process_request(other_rank, buffer_recv, request_buffer);
1364 
1365  // start to send answer back
1366  ierr = MPI_Isend(request_buffer.data(),
1367  request_buffer.size() * sizeof(T2),
1368  MPI_BYTE,
1369  other_rank,
1370  tag_deliver,
1371  this->comm,
1372  &requests_answers[index]);
1373  AssertThrowMPI(ierr);
1374 #else
1375  (void)index;
1376 #endif
1377  }
1378 
1379 
1380 
1381  template <typename T1, typename T2>
1382  unsigned int
1384  {
1385 #ifdef DEAL_II_WITH_MPI
1386  // 1) determine with which processes this process wants to communicate
1387  targets = this->process.compute_targets();
1388 
1389  const int tag_request =
1391  const int tag_deliver =
1393 
1394  // 2) determine who wants to communicate with this process
1395  sources =
1397 
1398  const auto n_targets = targets.size();
1399  const auto n_sources = sources.size();
1400 
1401  // 2) allocate memory
1402  recv_buffers.resize(n_targets);
1403  send_buffers.resize(n_targets);
1404  send_and_recv_buffers.resize(2 * n_targets);
1405 
1406  requests_answers.resize(n_sources);
1407  requests_buffers.resize(n_sources);
1408 
1409  // 4) send and receive
1410  for (unsigned int i = 0; i < n_targets; i++)
1411  {
1412  const unsigned int rank = targets[i];
1413 
1414  // pack data which should be sent
1415  auto &send_buffer = send_buffers[i];
1416  this->process.pack_recv_buffer(rank, send_buffer);
1417 
1418  // start to send data
1419  auto ierr = MPI_Isend(send_buffer.data(),
1420  send_buffer.size() * sizeof(T1),
1421  MPI_BYTE,
1422  rank,
1423  tag_request,
1424  this->comm,
1425  &send_and_recv_buffers[n_targets + i]);
1426  AssertThrowMPI(ierr);
1427 
1428  // start to receive data
1429  auto &recv_buffer = recv_buffers[i];
1430  this->process.prepare_recv_buffer(rank, recv_buffer);
1431  ierr = MPI_Irecv(recv_buffer.data(),
1432  recv_buffer.size() * sizeof(T2),
1433  MPI_BYTE,
1434  rank,
1435  tag_deliver,
1436  this->comm,
1437  &send_and_recv_buffers[i]);
1438  AssertThrowMPI(ierr);
1439  }
1440 
1441  return sources.size();
1442 #else
1443  return 0;
1444 #endif
1445  }
1446 
1447 
1448 
1449  template <typename T1, typename T2>
1450  void
1452  {
1453 #ifdef DEAL_II_WITH_MPI
1454  // finalize all MPI_Requests
1455  if (send_and_recv_buffers.size() > 0)
1456  {
1457  auto ierr = MPI_Waitall(send_and_recv_buffers.size(),
1458  send_and_recv_buffers.data(),
1459  MPI_STATUSES_IGNORE);
1460  AssertThrowMPI(ierr);
1461  }
1462 
1463  if (requests_answers.size() > 0)
1464  {
1465  auto ierr = MPI_Waitall(requests_answers.size(),
1466  requests_answers.data(),
1467  MPI_STATUSES_IGNORE);
1468  AssertThrowMPI(ierr);
1469  }
1470 
1471  // unpack received data
1472  for (unsigned int i = 0; i < targets.size(); i++)
1473  this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
1474 #endif
1475  }
1476 
1477 
1478 
1479  template <typename T1, typename T2>
1482  const MPI_Comm & comm)
1483  : ConsensusAlgorithm<T1, T2>(process, comm)
1484  {
1485  // Depending on the number of processes we switch between implementations.
1486  // We reduce the threshold for debug mode to be able to test also the
1487  // non-blocking implementation. This feature is tested by:
1488  // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
1489 #ifdef DEAL_II_WITH_MPI
1490 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1491 # ifdef DEBUG
1492  if (Utilities::MPI::n_mpi_processes(comm) > 14)
1493 # else
1494  if (Utilities::MPI::n_mpi_processes(comm) > 99)
1495 # endif
1496  consensus_algo.reset(new ConsensusAlgorithm_NBX<T1, T2>(process, comm));
1497  else
1498 # endif
1499 #endif
1500  consensus_algo.reset(new ConsensusAlgorithm_PEX<T1, T2>(process, comm));
1501  }
1502 
1503 
1504 
1505  template <typename T1, typename T2>
1506  void
1508  {
1509  consensus_algo->run();
1510  }
1511 
1512 
1513 
1514  std::vector<unsigned int>
1515  compute_index_owner(const IndexSet &owned_indices,
1516  const IndexSet &indices_to_look_up,
1517  const MPI_Comm &comm)
1518  {
1519  Assert(owned_indices.size() == indices_to_look_up.size(),
1520  ExcMessage("IndexSets have to have the same sizes."));
1521 
1522  Assert(
1523  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1524  ExcMessage("IndexSets have to have the same size on all processes."));
1525 
1526  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1527 
1528  // Step 1: setup dictionary
1529  // The input owned_indices can be partitioned arbitrarily. In the
1530  // dictionary, the index set is statically repartitioned among the
1531  // processes again and extended with information with the actual owner
1532  // of that the index.
1534  owned_indices, indices_to_look_up, comm, owning_ranks);
1535 
1536  // Step 2: read dictionary
1537  // Communicate with the process who owns the index in the static
1538  // partition (i.e. in the dictionary). This process returns the actual
1539  // owner of the index.
1541  std::pair<types::global_dof_index, types::global_dof_index>,
1542  unsigned int>
1543  consensus_algorithm(process, comm);
1544  consensus_algorithm.run();
1545 
1546  return owning_ranks;
1547  }
1548 
1549  template class ConsensusAlgorithmSelector<
1550  std::pair<types::global_dof_index, types::global_dof_index>,
1551  unsigned int>;
1552 
1553 
1554 
1556  : locked(false)
1557  , request(MPI_REQUEST_NULL)
1558  {
1560  }
1561 
1562 
1563 
1565  {
1566  Assert(
1567  !locked,
1568  ExcMessage(
1569  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1570 
1572  }
1573 
1574 
1575 
1576  void
1577  CollectiveMutex::lock(MPI_Comm comm)
1578  {
1579  (void)comm;
1580 
1581  Assert(
1582  !locked,
1583  ExcMessage(
1584  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1585 
1586 #ifdef DEAL_II_WITH_MPI
1587 
1588  // TODO: For now, we implement this mutex with a blocking barrier
1589  // in the lock and unlock. It needs to be tested, if we can move
1590  // to a nonblocking barrier (code disabled below).
1591 
1592  const int ierr = MPI_Barrier(comm);
1593  AssertThrowMPI(ierr);
1594 
1595 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1596  // wait for non-blocking barrier to finish. This is a noop the
1597  // first time we lock().
1598  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1599  AssertThrowMPI(ierr);
1600 # else
1601  // nothing to do as blocking barrier already completed
1602 # endif
1603 #endif
1604 
1605  locked = true;
1606  }
1607 
1608 
1609 
1610  void
1612  {
1613  (void)comm;
1614 
1615  Assert(
1616  locked,
1617  ExcMessage(
1618  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1619 
1620 #ifdef DEAL_II_WITH_MPI
1621 
1622  // TODO: For now, we implement this mutex with a blocking barrier
1623  // in the lock and unlock. It needs to be tested, if we can move
1624  // to a nonblocking barrier (code disabled below):
1625 
1626 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1627  const int ierr = MPI_Ibarrier(comm, &request);
1628  AssertThrowMPI(ierr);
1629 # else
1630  const int ierr = MPI_Barrier(comm);
1631  AssertThrowMPI(ierr);
1632 # endif
1633 #endif
1634 
1635  locked = false;
1636  }
1637 
1638 #include "mpi.inst"
1639  } // end of namespace MPI
1640 } // end of namespace Utilities
1641 
1642 DEAL_II_NAMESPACE_CLOSE
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:826
std::vector< unsigned int > sources
Definition: mpi.h:1292
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1475
std::vector< std::unique_ptr< std::vector< T2 > > > request_buffers
Definition: mpi.h:1175
std::vector< unsigned int > get_result()
Definition: mpi.cc:263
std::vector< std::unique_ptr< MPI_Request > > request_requests
Definition: mpi.h:1180
std::vector< unsigned int > targets
Definition: mpi.h:1287
virtual std::vector< unsigned int > compute_targets()=0
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:425
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1304
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:656
std::set< unsigned int > requesting_processes
Definition: mpi.h:1190
#define AssertThrow(cond, exc)
Definition: exceptions.h:1523
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void unlock(MPI_Comm comm)
Definition: mpi.cc:1611
virtual void prepare_recv_buffer(const int other_rank, std::vector< T2 > &recv_buffer)
Definition: mpi.cc:985
virtual void run() override
Definition: mpi.cc:1507
ConsensusAlgorithmSelector(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1480
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1154
size_type size() const
Definition: index_set.h:1607
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:108
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:1170
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:252
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1164
virtual void pack_recv_buffer(const int other_rank, std::vector< T1 > &send_buffer)
Definition: mpi.cc:975
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1411
std::vector< MPI_Request > send_requests
Definition: mpi.h:1159
virtual void run() override
Definition: mpi.cc:1027
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1299
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:118
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1314
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1515
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1319
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
virtual void unpack_recv_buffer(const int other_rank, const std::vector< T2 > &recv_buffer)
Definition: mpi.cc:995
void lock(MPI_Comm comm)
Definition: mpi.cc:1577
ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1017
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:1016
static void register_request(MPI_Request &request)
Definition: mpi.cc:817
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1699
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:97
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:1085
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1309
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
std::vector< unsigned int > sources
Definition: mpi.cc:278
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &buffer_recv, std::vector< T2 > &request_buffer)
Definition: mpi.cc:964
ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1297
static std::set< MPI_Request * > requests
Definition: mpi.h:863
virtual void run() override
Definition: mpi.cc:1307
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:86
std::vector< unsigned int > targets
Definition: mpi.h:1149
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:239
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:200
const std::vector< unsigned int > & target
Definition: mpi.cc:273
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:530
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:284
bool job_supports_mpi()
Definition: mpi.cc:949
size_type n_elements() const
Definition: index_set.h:1806
unsigned int min_index
Definition: mpi.h:686
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:696
static ::ExceptionBase & ExcInternalError()