Reference documentation for deal.II version Git c4a4607bd1 2020-01-22 19:33:00 -0500
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/mpi_compute_index_owner_internal.h>
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/multithread_info.h>
24 #include <deal.II/base/utilities.h>
25 
26 #include <deal.II/lac/la_parallel_block_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/vector_memory.h>
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
37 # include <deal.II/lac/trilinos_parallel_block_vector.h>
38 # include <deal.II/lac/trilinos_vector.h>
39 # include <deal.II/lac/vector_memory.h>
40 
41 # include <Epetra_MpiComm.h>
42 # endif
43 #endif
44 
45 #ifdef DEAL_II_WITH_PETSC
46 # include <deal.II/lac/petsc_block_vector.h>
47 # include <deal.II/lac/petsc_vector.h>
48 
49 # include <petscsys.h>
50 #endif
51 
52 #ifdef DEAL_II_WITH_SLEPC
53 # include <deal.II/lac/slepc_solver.h>
54 
55 # include <slepcsys.h>
56 #endif
57 
58 #ifdef DEAL_II_WITH_P4EST
59 # include <p4est_bits.h>
60 #endif
61 
62 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
63 # include <zoltan_cpp.h>
64 #endif
65 
66 DEAL_II_NAMESPACE_OPEN
67 
68 
69 namespace Utilities
70 {
71  namespace MPI
72  {
73 #ifdef DEAL_II_WITH_MPI
74  unsigned int
75  n_mpi_processes(const MPI_Comm &mpi_communicator)
76  {
77  int n_jobs = 1;
78  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
79  AssertThrowMPI(ierr);
80 
81  return n_jobs;
82  }
83 
84 
85  unsigned int
86  this_mpi_process(const MPI_Comm &mpi_communicator)
87  {
88  int rank = 0;
89  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
90  AssertThrowMPI(ierr);
91 
92  return rank;
93  }
94 
95 
96  MPI_Comm
97  duplicate_communicator(const MPI_Comm &mpi_communicator)
98  {
99  MPI_Comm new_communicator;
100  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
101  AssertThrowMPI(ierr);
102  return new_communicator;
103  }
104 
105 
106 
107  void
108  free_communicator(MPI_Comm &mpi_communicator)
109  {
110  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
111  const int ierr = MPI_Comm_free(&mpi_communicator);
112  AssertThrowMPI(ierr);
113  }
114 
115 
116 
117  int
118  create_group(const MPI_Comm & comm,
119  const MPI_Group &group,
120  const int tag,
121  MPI_Comm * new_comm)
122  {
123 # if DEAL_II_MPI_VERSION_GTE(3, 0)
124  return MPI_Comm_create_group(comm, group, tag, new_comm);
125 # else
126  int rank;
127  int ierr = MPI_Comm_rank(comm, &rank);
128  AssertThrowMPI(ierr);
129 
130  int grp_rank;
131  ierr = MPI_Group_rank(group, &grp_rank);
132  AssertThrowMPI(ierr);
133  if (grp_rank == MPI_UNDEFINED)
134  {
135  *new_comm = MPI_COMM_NULL;
136  return MPI_SUCCESS;
137  }
138 
139  int grp_size;
140  ierr = MPI_Group_size(group, &grp_size);
141  AssertThrowMPI(ierr);
142 
143  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
144  AssertThrowMPI(ierr);
145 
146  MPI_Group parent_grp;
147  ierr = MPI_Comm_group(comm, &parent_grp);
148  AssertThrowMPI(ierr);
149 
150  std::vector<int> pids(grp_size);
151  std::vector<int> grp_pids(grp_size);
152  std::iota(grp_pids.begin(), grp_pids.end(), 0);
153  ierr = MPI_Group_translate_ranks(
154  group, grp_size, grp_pids.data(), parent_grp, pids.data());
155  AssertThrowMPI(ierr);
156  ierr = MPI_Group_free(&parent_grp);
157  AssertThrowMPI(ierr);
158 
159  MPI_Comm comm_old = *new_comm;
160  MPI_Comm ic;
161  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
162  {
163  const int gid = grp_rank / merge_sz;
164  comm_old = *new_comm;
165  if (gid % 2 == 0)
166  {
167  if ((gid + 1) * merge_sz < grp_size)
168  {
169  ierr = (MPI_Intercomm_create(
170  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
171  AssertThrowMPI(ierr);
172  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
173  AssertThrowMPI(ierr);
174  }
175  }
176  else
177  {
178  ierr = MPI_Intercomm_create(
179  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
180  AssertThrowMPI(ierr);
181  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
182  AssertThrowMPI(ierr);
183  }
184  if (*new_comm != comm_old)
185  {
186  ierr = MPI_Comm_free(&ic);
187  AssertThrowMPI(ierr);
188  ierr = MPI_Comm_free(&comm_old);
189  AssertThrowMPI(ierr);
190  }
191  }
192 
193  return MPI_SUCCESS;
194 # endif
195  }
196 
197 
198 
199  std::vector<IndexSet>
200  create_ascending_partitioning(const MPI_Comm & comm,
201  const IndexSet::size_type &local_size)
202  {
203  const unsigned int n_proc = n_mpi_processes(comm);
204  const std::vector<IndexSet::size_type> sizes =
205  all_gather(comm, local_size);
206  const auto total_size =
207  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
208 
209  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
210 
211  IndexSet::size_type begin = 0;
212  for (unsigned int i = 0; i < n_proc; ++i)
213  {
214  res[i].add_range(begin, begin + sizes[i]);
215  begin = begin + sizes[i];
216  }
217 
218  return res;
219  }
220 
221 
222 
228  : public ConsensusAlgorithmProcess<int, int>
229  {
230  public:
231  ConsensusAlgorithmProcessTargets(const std::vector<unsigned int> &target)
232  : target(target)
233  {}
234 
235  using T1 = int;
236  using T2 = int;
237 
238  virtual void
239  answer_request(const unsigned int other_rank,
240  const std::vector<T1> &,
241  std::vector<T2> &) override
242  {
243  this->sources.push_back(other_rank);
244  }
245 
251  virtual std::vector<unsigned int>
252  compute_targets() override
253  {
254  return target;
255  }
256 
262  std::vector<unsigned int>
264  {
265  std::sort(sources.begin(), sources.end());
266  return sources;
267  }
268 
269  private:
273  const std::vector<unsigned int> &target;
274 
278  std::vector<unsigned int> sources;
279  };
280 
281 
282 
283  std::vector<unsigned int>
285  const MPI_Comm & mpi_comm,
286  const std::vector<unsigned int> &destinations)
287  {
288  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
289  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
290  (void)myid;
291  (void)n_procs;
292 
293  for (const unsigned int destination : destinations)
294  {
295  (void)destination;
296  AssertIndexRange(destination, n_procs);
297  Assert(destination != myid,
298  ExcMessage(
299  "There is no point in communicating with ourselves."));
300  }
301 
302 # if DEAL_II_MPI_VERSION_GTE(3, 0)
303 
304  ConsensusAlgorithmProcessTargets process(destinations);
305  ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
306  ConsensusAlgorithmProcessTargets::T2>
307  consensus_algorithm(process, mpi_comm);
308  consensus_algorithm.run();
309  return process.get_result();
310 
311 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
312 
313  static CollectiveMutex mutex;
314  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
315 
316  const int mpi_tag =
318 
319  // Calculate the number of messages to send to each process
320  std::vector<unsigned int> dest_vector(n_procs);
321  for (const auto &el : destinations)
322  ++dest_vector[el];
323 
324  // Find how many processes will send to this one
325  // by reducing with sum and then scattering the
326  // results over all processes
327  unsigned int n_recv_from;
328  const int ierr = MPI_Reduce_scatter_block(
329  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
330 
331  AssertThrowMPI(ierr);
332 
333  // Send myid to every process in `destinations` vector...
334  std::vector<MPI_Request> send_requests(destinations.size());
335  for (const auto &el : destinations)
336  {
337  const int ierr =
338  MPI_Isend(&myid,
339  1,
340  MPI_UNSIGNED,
341  el,
342  mpi_tag,
343  mpi_comm,
344  send_requests.data() + (&el - destinations.data()));
345  AssertThrowMPI(ierr);
346  }
347 
348 
349  // Receive `n_recv_from` times from the processes
350  // who communicate with this one. Store the obtained id's
351  // in the resulting vector
352  std::vector<unsigned int> origins(n_recv_from);
353  for (auto &el : origins)
354  {
355  const int ierr = MPI_Recv(&el,
356  1,
357  MPI_UNSIGNED,
358  MPI_ANY_SOURCE,
359  mpi_tag,
360  mpi_comm,
361  MPI_STATUS_IGNORE);
362  AssertThrowMPI(ierr);
363  }
364 
365  if (destinations.size() > 0)
366  {
367  const int ierr = MPI_Waitall(destinations.size(),
368  send_requests.data(),
369  MPI_STATUSES_IGNORE);
370  AssertThrowMPI(ierr);
371  }
372 
373  return origins;
374 # else
375  // let all processors communicate the maximal number of destinations
376  // they have
377  const unsigned int max_n_destinations =
378  Utilities::MPI::max(destinations.size(), mpi_comm);
379 
380  if (max_n_destinations == 0)
381  // all processes have nothing to send/receive:
382  return std::vector<unsigned int>();
383 
384  // now that we know the number of data packets every processor wants to
385  // send, set up a buffer with the maximal size and copy our destinations
386  // in there, padded with -1's
387  std::vector<unsigned int> my_destinations(max_n_destinations,
389  std::copy(destinations.begin(),
390  destinations.end(),
391  my_destinations.begin());
392 
393  // now exchange these (we could communicate less data if we used
394  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
395  // processors in this case, which is more expensive than the reduction
396  // operation above in MPI_Allreduce)
397  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
398  const int ierr = MPI_Allgather(my_destinations.data(),
399  max_n_destinations,
400  MPI_UNSIGNED,
401  all_destinations.data(),
402  max_n_destinations,
403  MPI_UNSIGNED,
404  mpi_comm);
405  AssertThrowMPI(ierr);
406 
407  // now we know who is going to communicate with whom. collect who is
408  // going to communicate with us!
409  std::vector<unsigned int> origins;
410  for (unsigned int i = 0; i < n_procs; ++i)
411  for (unsigned int j = 0; j < max_n_destinations; ++j)
412  if (all_destinations[i * max_n_destinations + j] == myid)
413  origins.push_back(i);
414  else if (all_destinations[i * max_n_destinations + j] ==
416  break;
417 
418  return origins;
419 # endif
420  }
421 
422 
423 
424  unsigned int
426  const MPI_Comm & mpi_comm,
427  const std::vector<unsigned int> &destinations)
428  {
429  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
430 
431  for (const unsigned int destination : destinations)
432  {
433  (void)destination;
434  AssertIndexRange(destination, n_procs);
435  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
436  ExcMessage(
437  "There is no point in communicating with ourselves."));
438  }
439 
440  // Calculate the number of messages to send to each process
441  std::vector<unsigned int> dest_vector(n_procs);
442  for (const auto &el : destinations)
443  ++dest_vector[el];
444 
445 # if DEAL_II_MPI_VERSION_GTE(2, 2)
446  // Find out how many processes will send to this one
447  // MPI_Reduce_scatter(_block) does exactly this
448  unsigned int n_recv_from = 0;
449 
450  const int ierr = MPI_Reduce_scatter_block(
451  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
452 
453  AssertThrowMPI(ierr);
454 
455  return n_recv_from;
456 # else
457  // Find out how many processes will send to this one
458  // by reducing with sum and then scattering the
459  // results over all processes
460  std::vector<unsigned int> buffer(dest_vector.size());
461  unsigned int n_recv_from = 0;
462 
463  MPI_Reduce(dest_vector.data(),
464  buffer.data(),
465  dest_vector.size(),
466  MPI_UNSIGNED,
467  MPI_SUM,
468  0,
469  mpi_comm);
470  MPI_Scatter(buffer.data(),
471  1,
472  MPI_UNSIGNED,
473  &n_recv_from,
474  1,
475  MPI_UNSIGNED,
476  0,
477  mpi_comm);
478 
479  return n_recv_from;
480 # endif
481  }
482 
483 
484 
485  namespace
486  {
487  // custom MIP_Op for calculate_collective_mpi_min_max_avg
488  void
489  max_reduce(const void *in_lhs_,
490  void * inout_rhs_,
491  int * len,
492  MPI_Datatype *)
493  {
494  (void)len;
495  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
496  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
497 
498  Assert(*len == 1, ExcInternalError());
499 
500  inout_rhs->sum += in_lhs->sum;
501  if (inout_rhs->min > in_lhs->min)
502  {
503  inout_rhs->min = in_lhs->min;
504  inout_rhs->min_index = in_lhs->min_index;
505  }
506  else if (inout_rhs->min == in_lhs->min)
507  {
508  // choose lower cpu index when tied to make operator commutative
509  if (inout_rhs->min_index > in_lhs->min_index)
510  inout_rhs->min_index = in_lhs->min_index;
511  }
512 
513  if (inout_rhs->max < in_lhs->max)
514  {
515  inout_rhs->max = in_lhs->max;
516  inout_rhs->max_index = in_lhs->max_index;
517  }
518  else if (inout_rhs->max == in_lhs->max)
519  {
520  // choose lower cpu index when tied to make operator commutative
521  if (inout_rhs->max_index > in_lhs->max_index)
522  inout_rhs->max_index = in_lhs->max_index;
523  }
524  }
525  } // namespace
526 
527 
528 
529  MinMaxAvg
530  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
531  {
532  // If MPI was not started, we have a serial computation and cannot run
533  // the other MPI commands
534  if (job_supports_mpi() == false)
535  {
536  MinMaxAvg result;
537  result.sum = my_value;
538  result.avg = my_value;
539  result.min = my_value;
540  result.max = my_value;
541  result.min_index = 0;
542  result.max_index = 0;
543 
544  return result;
545  }
546 
547  // To avoid uninitialized values on some MPI implementations, provide
548  // result with a default value already...
549  MinMaxAvg result = {0.,
550  std::numeric_limits<double>::max(),
551  -std::numeric_limits<double>::max(),
552  0,
553  0,
554  0.};
555 
556  const unsigned int my_id =
557  ::Utilities::MPI::this_mpi_process(mpi_communicator);
558  const unsigned int numproc =
559  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
560 
561  MPI_Op op;
562  int ierr =
563  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
564  true,
565  &op);
566  AssertThrowMPI(ierr);
567 
568  MinMaxAvg in;
569  in.sum = in.min = in.max = my_value;
570  in.min_index = in.max_index = my_id;
571 
572  MPI_Datatype type;
573  int lengths[] = {3, 2};
574  MPI_Aint displacements[] = {0, offsetof(MinMaxAvg, min_index)};
575  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT};
576 
577  ierr = MPI_Type_create_struct(2, lengths, displacements, types, &type);
578  AssertThrowMPI(ierr);
579 
580  ierr = MPI_Type_commit(&type);
581  AssertThrowMPI(ierr);
582  ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
583  AssertThrowMPI(ierr);
584 
585  ierr = MPI_Type_free(&type);
586  AssertThrowMPI(ierr);
587 
588  ierr = MPI_Op_free(&op);
589  AssertThrowMPI(ierr);
590 
591  result.avg = result.sum / numproc;
592 
593  return result;
594  }
595 
596 #else
597 
598  unsigned int
599  n_mpi_processes(const MPI_Comm &)
600  {
601  return 1;
602  }
603 
604 
605 
606  unsigned int
607  this_mpi_process(const MPI_Comm &)
608  {
609  return 0;
610  }
611 
612 
613 
614  std::vector<IndexSet>
615  create_ascending_partitioning(const MPI_Comm & /*comm*/,
616  const IndexSet::size_type &local_size)
617  {
618  return std::vector<IndexSet>(1, complete_index_set(local_size));
619  }
620 
621 
622 
623  MPI_Comm
624  duplicate_communicator(const MPI_Comm &mpi_communicator)
625  {
626  return mpi_communicator;
627  }
628 
629 
630 
631  void
632  free_communicator(MPI_Comm & /*mpi_communicator*/)
633  {}
634 
635 
636 
637  MinMaxAvg
638  min_max_avg(const double my_value, const MPI_Comm &)
639  {
640  MinMaxAvg result;
641 
642  result.sum = my_value;
643  result.avg = my_value;
644  result.min = my_value;
645  result.max = my_value;
646  result.min_index = 0;
647  result.max_index = 0;
648 
649  return result;
650  }
651 
652 #endif
653 
654 
655 
657  char **& argv,
658  const unsigned int max_num_threads)
659  {
660  static bool constructor_has_already_run = false;
661  (void)constructor_has_already_run;
662  Assert(constructor_has_already_run == false,
663  ExcMessage("You can only create a single object of this class "
664  "in a program since it initializes the MPI system."));
665 
666 
667  int ierr = 0;
668 #ifdef DEAL_II_WITH_MPI
669  // if we have PETSc, we will initialize it and let it handle MPI.
670  // Otherwise, we will do it.
671  int MPI_has_been_started = 0;
672  ierr = MPI_Initialized(&MPI_has_been_started);
673  AssertThrowMPI(ierr);
674  AssertThrow(MPI_has_been_started == 0,
675  ExcMessage("MPI error. You can only start MPI once!"));
676 
677  int provided;
678  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
679  // we might use several threads but never call two MPI functions at the
680  // same time. For an explanation see on why we do this see
681  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
682  int wanted = MPI_THREAD_SERIALIZED;
683  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
684  AssertThrowMPI(ierr);
685 
686  // disable for now because at least some implementations always return
687  // MPI_THREAD_SINGLE.
688  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
689  // ExcMessage("MPI reports that we are not allowed to use multiple
690  // threads."));
691 #else
692  // make sure the compiler doesn't warn about these variables
693  (void)argc;
694  (void)argv;
695  (void)ierr;
696 #endif
697 
698  // we are allowed to call MPI_Init ourselves and PETScInitialize will
699  // detect this. This allows us to use MPI_Init_thread instead.
700 #ifdef DEAL_II_WITH_PETSC
701 # ifdef DEAL_II_WITH_SLEPC
702  // Initialize SLEPc (with PETSc):
703  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
705 # else
706  // or just initialize PETSc alone:
707  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
708  AssertThrow(ierr == 0, ExcPETScError(ierr));
709 # endif
710 
711  // Disable PETSc exception handling. This just prints a large wall
712  // of text that is not particularly helpful for what we do:
713  PetscPopSignalHandler();
714 #endif
715 
716  // Initialize zoltan
717 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
718  float version;
719  Zoltan_Initialize(argc, argv, &version);
720 #endif
721 
722 #ifdef DEAL_II_WITH_P4EST
723  // Initialize p4est and libsc components
724 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
725 # else
726  // This feature is broken in version 2.0.0 for calls to
727  // MPI_Comm_create_group (see cburstedde/p4est#30).
728  // Disabling it leads to more verbose p4est error messages
729  // which should be fine.
730  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
731 # endif
732  p4est_init(nullptr, SC_LP_SILENT);
733 #endif
734 
735  constructor_has_already_run = true;
736 
737 
738  // Now also see how many threads we'd like to run
739  if (max_num_threads != numbers::invalid_unsigned_int)
740  {
741  // set maximum number of threads (also respecting the environment
742  // variable that the called function evaluates) based on what the
743  // user asked
744  MultithreadInfo::set_thread_limit(max_num_threads);
745  }
746  else
747  // user wants automatic choice
748  {
749 #ifdef DEAL_II_WITH_MPI
750  // we need to figure out how many MPI processes there are on the
751  // current node, as well as how many CPU cores we have. for the
752  // first task, check what get_hostname() returns and then do an
753  // allgather so each processor gets the answer
754  //
755  // in calculating the length of the string, don't forget the
756  // terminating \0 on C-style strings
757  const std::string hostname = Utilities::System::get_hostname();
758  const unsigned int max_hostname_size =
759  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
760  std::vector<char> hostname_array(max_hostname_size);
761  std::copy(hostname.c_str(),
762  hostname.c_str() + hostname.size() + 1,
763  hostname_array.begin());
764 
765  std::vector<char> all_hostnames(max_hostname_size *
766  MPI::n_mpi_processes(MPI_COMM_WORLD));
767  const int ierr = MPI_Allgather(hostname_array.data(),
768  max_hostname_size,
769  MPI_CHAR,
770  all_hostnames.data(),
771  max_hostname_size,
772  MPI_CHAR,
773  MPI_COMM_WORLD);
774  AssertThrowMPI(ierr);
775 
776  // search how often our own hostname appears and the how-manyth
777  // instance the current process represents
778  unsigned int n_local_processes = 0;
779  unsigned int nth_process_on_host = 0;
780  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
781  ++i)
782  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
783  hostname)
784  {
785  ++n_local_processes;
786  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
787  ++nth_process_on_host;
788  }
789  Assert(nth_process_on_host > 0, ExcInternalError());
790 
791 
792  // compute how many cores each process gets. if the number does not
793  // divide evenly, then we get one more core if we are among the
794  // first few processes
795  //
796  // if the number would be zero, round up to one since every process
797  // needs to have at least one thread
798  const unsigned int n_threads =
799  std::max(MultithreadInfo::n_cores() / n_local_processes +
800  (nth_process_on_host <=
801  MultithreadInfo::n_cores() % n_local_processes ?
802  1 :
803  0),
804  1U);
805 #else
806  const unsigned int n_threads = MultithreadInfo::n_cores();
807 #endif
808 
809  // finally set this number of threads
811  }
812  }
813 
814 
815 
816  void
818  {
819  // insert if it is not in the set already:
820  requests.insert(&request);
821  }
822 
823 
824 
825  void
827  {
828  Assert(
829  requests.find(&request) != requests.end(),
830  ExcMessage(
831  "You tried to call unregister_request() with an invalid request."));
832 
833  requests.erase(&request);
834  }
835 
836 
837 
838  std::set<MPI_Request *> MPI_InitFinalize::requests;
839 
840 
841 
843  {
844  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
845  // are no longer used at this point. this is relevant because the static
846  // object destructors run for these vectors at the end of the program
847  // would run after MPI_Finalize is called, leading to errors
848 
849 #ifdef DEAL_II_WITH_MPI
850  // Before exiting, wait for nonblocking communication to complete:
851  for (auto request : requests)
852  {
853  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
854  AssertThrowMPI(ierr);
855  }
856 
857  // Start with deal.II MPI vectors and delete vectors from the pools:
859  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
861  release_unused_memory();
863  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
865  release_unused_memory();
866 
867  // Next with Trilinos:
868 # if defined(DEAL_II_WITH_TRILINOS)
870  TrilinosWrappers::MPI::Vector>::release_unused_memory();
872  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
873 # endif
874 #endif
875 
876 
877  // Now deal with PETSc (with or without MPI). Only delete the vectors if
878  // finalize hasn't been called yet, otherwise this will lead to errors.
879 #ifdef DEAL_II_WITH_PETSC
880  if ((PetscInitializeCalled == PETSC_TRUE) &&
881  (PetscFinalizeCalled == PETSC_FALSE))
882  {
884  PETScWrappers::MPI::Vector>::release_unused_memory();
886  PETScWrappers::MPI::BlockVector>::release_unused_memory();
887 
888 # ifdef DEAL_II_WITH_SLEPC
889  // and now end SLEPc (with PETSc)
890  SlepcFinalize();
891 # else
892  // or just end PETSc.
893  PetscFinalize();
894 # endif
895  }
896 #endif
897 
898 // There is a similar issue with CUDA: The destructor of static objects might
899 // run after the CUDA driver is unloaded. Hence, also release all memory
900 // related to CUDA vectors.
901 #ifdef DEAL_II_WITH_CUDA
904  release_unused_memory();
907  release_unused_memory();
908 #endif
909 
910 #ifdef DEAL_II_WITH_P4EST
911  // now end p4est and libsc
912  // Note: p4est has no finalize function
913  sc_finalize();
914 #endif
915 
916 
917  // only MPI_Finalize if we are running with MPI. We also need to do this
918  // when running PETSc, because we initialize MPI ourselves before
919  // calling PetscInitialize
920 #ifdef DEAL_II_WITH_MPI
921  if (job_supports_mpi() == true)
922  {
923 # if __cpp_lib_uncaught_exceptions >= 201411
924  // std::uncaught_exception() is deprecated in c++17
925  if (std::uncaught_exceptions() > 0)
926 # else
927  if (std::uncaught_exception() == true)
928 # endif
929  {
930  std::cerr
931  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
932  << this_mpi_process(MPI_COMM_WORLD)
933  << ". Skipping MPI_Finalize() to avoid a deadlock."
934  << std::endl;
935  }
936  else
937  {
938  const int ierr = MPI_Finalize();
939  (void)ierr;
940  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
941  }
942  }
943 #endif
944  }
945 
946 
947 
948  bool
950  {
951 #ifdef DEAL_II_WITH_MPI
952  int MPI_has_been_started = 0;
953  const int ierr = MPI_Initialized(&MPI_has_been_started);
954  AssertThrowMPI(ierr);
955 
956  return (MPI_has_been_started > 0);
957 #else
958  return false;
959 #endif
960  }
961 
962  template <typename T1, typename T2>
963  void
965  const std::vector<T1> &,
966  std::vector<T2> &)
967  {
968  // nothing to do
969  }
970 
971 
972 
973  template <typename T1, typename T2>
974  void
976  std::vector<T1> &)
977  {
978  // nothing to do
979  }
980 
981 
982 
983  template <typename T1, typename T2>
984  void
986  const int,
987  std::vector<T2> &)
988  {
989  // nothing to do
990  }
991 
992 
993 
994  template <typename T1, typename T2>
995  void
997  const std::vector<T2> &)
998  {
999  // nothing to do
1000  }
1001 
1002 
1003 
1004  template <typename T1, typename T2>
1007  const MPI_Comm & comm)
1008  : process(process)
1009  , comm(comm)
1010  , my_rank(this_mpi_process(comm))
1011  , n_procs(n_mpi_processes(comm))
1012  {}
1013 
1014 
1015 
1016  template <typename T1, typename T2>
1019  const MPI_Comm & comm)
1020  : ConsensusAlgorithm<T1, T2>(process, comm)
1021  {}
1022 
1023 
1024 
1025  template <typename T1, typename T2>
1026  void
1028  {
1029  static CollectiveMutex mutex;
1030  CollectiveMutex::ScopedLock lock(mutex, this->comm);
1031 
1032  // 1) send requests and start receiving the answers
1034 
1035  // 2) answer requests and check if all requests of this process have been
1036  // answered
1037  while (!check_own_state())
1038  answer_requests();
1039 
1040  // 3) signal to all other processes that all requests of this process have
1041  // been answered
1042  signal_finish();
1043 
1044  // 4) nevertheless, this process has to keep on answering (potential)
1045  // incoming requests until all processes have received the
1046  // answer to all requests
1047  while (!check_global_state())
1048  answer_requests();
1049 
1050  // 5) process the answer to all requests
1052  }
1053 
1054 
1055 
1056  template <typename T1, typename T2>
1057  bool
1059  {
1060 #ifdef DEAL_II_WITH_MPI
1061  int all_receive_requests_are_done;
1062  const auto ierr = MPI_Testall(recv_requests.size(),
1063  recv_requests.data(),
1064  &all_receive_requests_are_done,
1065  MPI_STATUSES_IGNORE);
1066  AssertThrowMPI(ierr);
1067 
1068  return all_receive_requests_are_done;
1069 #else
1070  return true;
1071 #endif
1072  }
1073 
1074 
1075 
1076  template <typename T1, typename T2>
1077  void
1079  {
1080 #ifdef DEAL_II_WITH_MPI
1081 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1082  const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
1083  AssertThrowMPI(ierr);
1084 # else
1085  AssertThrow(
1086  false,
1087  ExcMessage(
1088  "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
1089 # endif
1090 #endif
1091  }
1092 
1093 
1094 
1095  template <typename T1, typename T2>
1096  bool
1098  {
1099 #ifdef DEAL_II_WITH_MPI
1100  int all_ranks_reached_barrier;
1101  const auto ierr = MPI_Test(&barrier_request,
1102  &all_ranks_reached_barrier,
1103  MPI_STATUSES_IGNORE);
1104  AssertThrowMPI(ierr);
1105  return all_ranks_reached_barrier;
1106 #else
1107  return true;
1108 #endif
1109  }
1110 
1111 
1112 
1113  template <typename T1, typename T2>
1114  void
1116  {
1117 #ifdef DEAL_II_WITH_MPI
1118 
1119  const int tag_request =
1121  const int tag_deliver =
1123 
1124  // check if there is a request pending
1125  MPI_Status status;
1126  int request_is_pending;
1127  const auto ierr = MPI_Iprobe(
1128  MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
1129  AssertThrowMPI(ierr);
1130 
1131  if (request_is_pending) // request is pending
1132  {
1133  // get rank of requesting process
1134  const auto other_rank = status.MPI_SOURCE;
1135 
1136 # ifdef DEBUG
1137  Assert(requesting_processes.find(other_rank) ==
1138  requesting_processes.end(),
1139  ExcMessage("Process is requesting a second time!"));
1140  requesting_processes.insert(other_rank);
1141 # endif
1142 
1143  std::vector<T1> buffer_recv;
1144  // get size of of incoming message
1145  int number_amount;
1146  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1147  AssertThrowMPI(ierr);
1148 
1149  // allocate memory for incoming message
1150  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1151  buffer_recv.resize(number_amount / sizeof(T1));
1152  ierr = MPI_Recv(buffer_recv.data(),
1153  number_amount,
1154  MPI_BYTE,
1155  other_rank,
1156  tag_request,
1157  this->comm,
1158  &status);
1159  AssertThrowMPI(ierr);
1160 
1161  // allocate memory for answer message
1162  request_buffers.emplace_back(
1163  std_cxx14::make_unique<std::vector<T2>>());
1164  request_requests.emplace_back(std_cxx14::make_unique<MPI_Request>());
1165 
1166  // process request
1167  auto &request_buffer = *request_buffers.back();
1168  this->process.answer_request(other_rank, buffer_recv, request_buffer);
1169 
1170  // start to send answer back
1171  ierr = MPI_Isend(request_buffer.data(),
1172  request_buffer.size() * sizeof(T2),
1173  MPI_BYTE,
1174  other_rank,
1175  tag_deliver,
1176  this->comm,
1177  request_requests.back().get());
1178  AssertThrowMPI(ierr);
1179  }
1180 #endif
1181  }
1182 
1183 
1184 
1185  template <typename T1, typename T2>
1186  void
1188  {
1189 #ifdef DEAL_II_WITH_MPI
1190  // 1)
1191  targets = this->process.compute_targets();
1192  const auto n_targets = targets.size();
1193 
1194  const int tag_request =
1196  const int tag_deliver =
1198 
1199  // 2) allocate memory
1200  recv_buffers.resize(n_targets);
1201  recv_requests.resize(n_targets);
1202  send_requests.resize(n_targets);
1203  send_buffers.resize(n_targets);
1204 
1205  {
1206  // 4) send and receive
1207  for (unsigned int i = 0; i < n_targets; i++)
1208  {
1209  const unsigned int rank = targets[i];
1210  const unsigned int index = i;
1211 
1212  // translate index set to a list of pairs
1213  auto &send_buffer = send_buffers[index];
1214  this->process.create_request(rank, send_buffer);
1215 
1216  // start to send data
1217  auto ierr = MPI_Isend(send_buffer.data(),
1218  send_buffer.size() * sizeof(T1),
1219  MPI_BYTE,
1220  rank,
1221  tag_request,
1222  this->comm,
1223  &send_requests[index]);
1224  AssertThrowMPI(ierr);
1225 
1226  // start to receive data
1227  auto &recv_buffer = recv_buffers[index];
1228  this->process.prepare_buffer_for_answer(rank, recv_buffer);
1229  ierr = MPI_Irecv(recv_buffer.data(),
1230  recv_buffer.size() * sizeof(T2),
1231  MPI_BYTE,
1232  rank,
1233  tag_deliver,
1234  this->comm,
1235  &recv_requests[index]);
1236  AssertThrowMPI(ierr);
1237  }
1238  }
1239 #endif
1240  }
1241 
1242 
1243 
1244  template <typename T1, typename T2>
1245  void
1247  {
1248 #ifdef DEAL_II_WITH_MPI
1249  // clean up
1250  {
1251  if (send_requests.size() > 0)
1252  {
1253  const int ierr = MPI_Waitall(send_requests.size(),
1254  send_requests.data(),
1255  MPI_STATUSES_IGNORE);
1256  AssertThrowMPI(ierr);
1257  }
1258 
1259  if (recv_requests.size() > 0)
1260  {
1261  const int ierr = MPI_Waitall(recv_requests.size(),
1262  recv_requests.data(),
1263  MPI_STATUSES_IGNORE);
1264  AssertThrowMPI(ierr);
1265  }
1266 
1267 
1268  const int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1269  AssertThrowMPI(ierr);
1270 
1271  for (auto &i : request_requests)
1272  {
1273  const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1274  AssertThrowMPI(ierr);
1275  }
1276 
1277 # ifdef DEBUG
1278  // note: IBarrier seems to make problem during testing, this additional
1279  // Barrier seems to help
1280  MPI_Barrier(this->comm);
1281 # endif
1282  }
1283 
1284  // unpack data
1285  {
1286  for (unsigned int i = 0; i < targets.size(); i++)
1287  this->process.read_answer(targets[i], recv_buffers[i]);
1288  }
1289 #endif
1290  }
1291 
1292 
1293 
1294  template <typename T1, typename T2>
1297  const MPI_Comm & comm)
1298  : ConsensusAlgorithm<T1, T2>(process, comm)
1299  {}
1300 
1301 
1302 
1303  template <typename T1, typename T2>
1304  void
1306  {
1307  static CollectiveMutex mutex;
1308  CollectiveMutex::ScopedLock lock(mutex, this->comm);
1309 
1310  // 1) send requests and start receiving the answers
1311  // especially determine how many requests are expected
1312  const unsigned int n_requests = start_communication();
1313 
1314  // 2) answer requests
1315  for (unsigned int request = 0; request < n_requests; request++)
1316  answer_requests(request);
1317 
1318  // 3) process answers
1320  }
1321 
1322 
1323 
1324  template <typename T1, typename T2>
1325  void
1327  {
1328 #ifdef DEAL_II_WITH_MPI
1329  const int tag_request =
1331  const int tag_deliver =
1333 
1334  MPI_Status status;
1335  MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
1336 
1337  // get rank of incoming message
1338  const auto other_rank = status.MPI_SOURCE;
1339 
1340  std::vector<T1> buffer_recv;
1341 
1342  // get size of incoming message
1343  int number_amount;
1344  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1345  AssertThrowMPI(ierr);
1346 
1347  // allocate memory for incoming message
1348  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1349  buffer_recv.resize(number_amount / sizeof(T1));
1350  ierr = MPI_Recv(buffer_recv.data(),
1351  number_amount,
1352  MPI_BYTE,
1353  other_rank,
1354  tag_request,
1355  this->comm,
1356  &status);
1357  AssertThrowMPI(ierr);
1358 
1359  // process request
1360  auto &request_buffer = requests_buffers[index];
1361  this->process.answer_request(other_rank, buffer_recv, request_buffer);
1362 
1363  // start to send answer back
1364  ierr = MPI_Isend(request_buffer.data(),
1365  request_buffer.size() * sizeof(T2),
1366  MPI_BYTE,
1367  other_rank,
1368  tag_deliver,
1369  this->comm,
1370  &requests_answers[index]);
1371  AssertThrowMPI(ierr);
1372 #else
1373  (void)index;
1374 #endif
1375  }
1376 
1377 
1378 
1379  template <typename T1, typename T2>
1380  unsigned int
1382  {
1383 #ifdef DEAL_II_WITH_MPI
1384  // 1) determine with which processes this process wants to communicate
1385  targets = this->process.compute_targets();
1386 
1387  const int tag_request =
1389  const int tag_deliver =
1391 
1392  // 2) determine who wants to communicate with this process
1393  sources =
1395 
1396  const auto n_targets = targets.size();
1397  const auto n_sources = sources.size();
1398 
1399  // 2) allocate memory
1400  recv_buffers.resize(n_targets);
1401  send_buffers.resize(n_targets);
1402  send_and_recv_buffers.resize(2 * n_targets);
1403 
1404  requests_answers.resize(n_sources);
1405  requests_buffers.resize(n_sources);
1406 
1407  // 4) send and receive
1408  for (unsigned int i = 0; i < n_targets; i++)
1409  {
1410  const unsigned int rank = targets[i];
1411 
1412  // pack data which should be sent
1413  auto &send_buffer = send_buffers[i];
1414  this->process.create_request(rank, send_buffer);
1415 
1416  // start to send data
1417  auto ierr = MPI_Isend(send_buffer.data(),
1418  send_buffer.size() * sizeof(T1),
1419  MPI_BYTE,
1420  rank,
1421  tag_request,
1422  this->comm,
1423  &send_and_recv_buffers[n_targets + i]);
1424  AssertThrowMPI(ierr);
1425 
1426  // start to receive data
1427  auto &recv_buffer = recv_buffers[i];
1428  this->process.prepare_buffer_for_answer(rank, recv_buffer);
1429  ierr = MPI_Irecv(recv_buffer.data(),
1430  recv_buffer.size() * sizeof(T2),
1431  MPI_BYTE,
1432  rank,
1433  tag_deliver,
1434  this->comm,
1435  &send_and_recv_buffers[i]);
1436  AssertThrowMPI(ierr);
1437  }
1438 
1439  return sources.size();
1440 #else
1441  return 0;
1442 #endif
1443  }
1444 
1445 
1446 
1447  template <typename T1, typename T2>
1448  void
1450  {
1451 #ifdef DEAL_II_WITH_MPI
1452  // finalize all MPI_Requests
1453  if (send_and_recv_buffers.size() > 0)
1454  {
1455  auto ierr = MPI_Waitall(send_and_recv_buffers.size(),
1456  send_and_recv_buffers.data(),
1457  MPI_STATUSES_IGNORE);
1458  AssertThrowMPI(ierr);
1459  }
1460 
1461  if (requests_answers.size() > 0)
1462  {
1463  auto ierr = MPI_Waitall(requests_answers.size(),
1464  requests_answers.data(),
1465  MPI_STATUSES_IGNORE);
1466  AssertThrowMPI(ierr);
1467  }
1468 
1469  // unpack received data
1470  for (unsigned int i = 0; i < targets.size(); i++)
1471  this->process.read_answer(targets[i], recv_buffers[i]);
1472 #endif
1473  }
1474 
1475 
1476 
1477  template <typename T1, typename T2>
1480  const MPI_Comm & comm)
1481  : ConsensusAlgorithm<T1, T2>(process, comm)
1482  {
1483  // Depending on the number of processes we switch between implementations.
1484  // We reduce the threshold for debug mode to be able to test also the
1485  // non-blocking implementation. This feature is tested by:
1486  // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
1487 #ifdef DEAL_II_WITH_MPI
1488 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1489 # ifdef DEBUG
1490  if (Utilities::MPI::n_mpi_processes(comm) > 14)
1491 # else
1492  if (Utilities::MPI::n_mpi_processes(comm) > 99)
1493 # endif
1494  consensus_algo.reset(new ConsensusAlgorithm_NBX<T1, T2>(process, comm));
1495  else
1496 # endif
1497 #endif
1498  consensus_algo.reset(new ConsensusAlgorithm_PEX<T1, T2>(process, comm));
1499  }
1500 
1501 
1502 
1503  template <typename T1, typename T2>
1504  void
1506  {
1507  consensus_algo->run();
1508  }
1509 
1510 
1511 
1512  std::vector<unsigned int>
1513  compute_index_owner(const IndexSet &owned_indices,
1514  const IndexSet &indices_to_look_up,
1515  const MPI_Comm &comm)
1516  {
1517  Assert(owned_indices.size() == indices_to_look_up.size(),
1518  ExcMessage("IndexSets have to have the same sizes."));
1519 
1520  Assert(
1521  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1522  ExcMessage("IndexSets have to have the same size on all processes."));
1523 
1524  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1525 
1526  // Step 1: setup dictionary
1527  // The input owned_indices can be partitioned arbitrarily. In the
1528  // dictionary, the index set is statically repartitioned among the
1529  // processes again and extended with information with the actual owner
1530  // of that the index.
1532  owned_indices, indices_to_look_up, comm, owning_ranks);
1533 
1534  // Step 2: read dictionary
1535  // Communicate with the process who owns the index in the static
1536  // partition (i.e. in the dictionary). This process returns the actual
1537  // owner of the index.
1539  std::pair<types::global_dof_index, types::global_dof_index>,
1540  unsigned int>
1541  consensus_algorithm(process, comm);
1542  consensus_algorithm.run();
1543 
1544  return owning_ranks;
1545  }
1546 
1547  template class ConsensusAlgorithmSelector<
1548  std::pair<types::global_dof_index, types::global_dof_index>,
1549  unsigned int>;
1550 
1551 
1552 
1554  : locked(false)
1555  , request(MPI_REQUEST_NULL)
1556  {
1558  }
1559 
1560 
1561 
1563  {
1564  Assert(
1565  !locked,
1566  ExcMessage(
1567  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1568 
1570  }
1571 
1572 
1573 
1574  void
1575  CollectiveMutex::lock(MPI_Comm comm)
1576  {
1577  (void)comm;
1578 
1579  Assert(
1580  !locked,
1581  ExcMessage(
1582  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1583 
1584 #ifdef DEAL_II_WITH_MPI
1585 
1586  // TODO: For now, we implement this mutex with a blocking barrier
1587  // in the lock and unlock. It needs to be tested, if we can move
1588  // to a nonblocking barrier (code disabled below).
1589 
1590  const int ierr = MPI_Barrier(comm);
1591  AssertThrowMPI(ierr);
1592 
1593 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1594  // wait for non-blocking barrier to finish. This is a noop the
1595  // first time we lock().
1596  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1597  AssertThrowMPI(ierr);
1598 # else
1599  // nothing to do as blocking barrier already completed
1600 # endif
1601 #endif
1602 
1603  locked = true;
1604  }
1605 
1606 
1607 
1608  void
1610  {
1611  (void)comm;
1612 
1613  Assert(
1614  locked,
1615  ExcMessage(
1616  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1617 
1618 #ifdef DEAL_II_WITH_MPI
1619 
1620  // TODO: For now, we implement this mutex with a blocking barrier
1621  // in the lock and unlock. It needs to be tested, if we can move
1622  // to a nonblocking barrier (code disabled below):
1623 
1624 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1625  const int ierr = MPI_Ibarrier(comm, &request);
1626  AssertThrowMPI(ierr);
1627 # else
1628  const int ierr = MPI_Barrier(comm);
1629  AssertThrowMPI(ierr);
1630 # endif
1631 #endif
1632 
1633  locked = false;
1634  }
1635 
1636 #include "mpi.inst"
1637  } // end of namespace MPI
1638 } // end of namespace Utilities
1639 
1640 DEAL_II_NAMESPACE_CLOSE
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:826
std::vector< unsigned int > sources
Definition: mpi.h:1301
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1475
std::vector< std::unique_ptr< std::vector< T2 > > > request_buffers
Definition: mpi.h:1182
std::vector< unsigned int > get_result()
Definition: mpi.cc:263
std::vector< std::unique_ptr< MPI_Request > > request_requests
Definition: mpi.h:1187
std::vector< unsigned int > targets
Definition: mpi.h:1296
virtual std::vector< unsigned int > compute_targets()=0
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:425
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1313
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:656
std::set< unsigned int > requesting_processes
Definition: mpi.h:1197
#define AssertThrow(cond, exc)
Definition: exceptions.h:1523
types::global_dof_index size_type
Definition: index_set.h:85
void unlock(MPI_Comm comm)
Definition: mpi.cc:1609
virtual void run() override
Definition: mpi.cc:1505
ConsensusAlgorithmSelector(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1478
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1161
size_type size() const
Definition: index_set.h:1625
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:108
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:1177
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:252
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1171
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:239
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1411
std::vector< MPI_Request > send_requests
Definition: mpi.h:1166
virtual void run() override
Definition: mpi.cc:1027
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1308
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:118
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1323
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1513
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1328
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
virtual void read_answer(const int other_rank, const std::vector< T2 > &recv_buffer)
Definition: mpi.cc:996
void lock(MPI_Comm comm)
Definition: mpi.cc:1575
ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1017
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:1062
static void register_request(MPI_Request &request)
Definition: mpi.cc:817
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1699
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:97
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:1090
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1318
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
std::vector< unsigned int > sources
Definition: mpi.cc:278
ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1295
static std::set< MPI_Request * > requests
Definition: mpi.h:863
virtual void run() override
Definition: mpi.cc:1305
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:86
virtual void create_request(const int other_rank, std::vector< T1 > &send_buffer)
Definition: mpi.cc:975
std::vector< unsigned int > targets
Definition: mpi.h:1156
virtual void prepare_buffer_for_answer(const int other_rank, std::vector< T2 > &recv_buffer)
Definition: mpi.cc:985
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:200
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &buffer_recv, std::vector< T2 > &request_buffer)
Definition: mpi.cc:964
const std::vector< unsigned int > & target
Definition: mpi.cc:273
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:530
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:284
bool job_supports_mpi()
Definition: mpi.cc:949
size_type n_elements() const
Definition: index_set.h:1823
unsigned int min_index
Definition: mpi.h:686
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:696
static ::ExceptionBase & ExcInternalError()