Reference documentation for deal.II version Git 7a0e96d111 2021-06-21 21:20:26 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
22 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/utilities.h>
25 
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
39 
40 # include <Epetra_MpiComm.h>
41 # endif
42 #endif
43 
44 #ifdef DEAL_II_WITH_PETSC
47 
48 # include <petscsys.h>
49 #endif
50 
51 #ifdef DEAL_II_WITH_SLEPC
53 
54 # include <slepcsys.h>
55 #endif
56 
57 #ifdef DEAL_II_WITH_P4EST
58 # include <p4est_bits.h>
59 #endif
60 
61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62 # include <zoltan_cpp.h>
63 #endif
64 
66 
67 
68 namespace Utilities
69 {
70  IndexSet
71  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
72  const unsigned int n_partitions,
73  const IndexSet::size_type total_size)
74  {
75  const unsigned int remain = total_size % n_partitions;
76 
77  const IndexSet::size_type min_size = total_size / n_partitions;
78 
80  min_size * my_partition_id + std::min(my_partition_id, remain);
81  const IndexSet::size_type end =
82  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
83  IndexSet result(total_size);
84  result.add_range(begin, end);
85  return result;
86  }
87 
88  namespace MPI
89  {
90  MinMaxAvg
91  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
92  {
93  MinMaxAvg result;
95  ArrayView<MinMaxAvg>(result),
96  mpi_communicator);
97 
98  return result;
99  }
100 
101 
102 
103  std::vector<MinMaxAvg>
104  min_max_avg(const std::vector<double> &my_values,
105  const MPI_Comm & mpi_communicator)
106  {
107  std::vector<MinMaxAvg> results(my_values.size());
108  min_max_avg(my_values, results, mpi_communicator);
109 
110  return results;
111  }
112 
113 
114 
115 #ifdef DEAL_II_WITH_MPI
116  unsigned int
117  n_mpi_processes(const MPI_Comm &mpi_communicator)
118  {
119  int n_jobs = 1;
120  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
121  AssertThrowMPI(ierr);
122 
123  return n_jobs;
124  }
125 
126 
127  unsigned int
128  this_mpi_process(const MPI_Comm &mpi_communicator)
129  {
130  int rank = 0;
131  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
132  AssertThrowMPI(ierr);
133 
134  return rank;
135  }
136 
137 
138 
139  const std::vector<unsigned int>
141  const MPI_Comm &comm_small)
142  {
143  if (Utilities::MPI::job_supports_mpi() == false)
144  return std::vector<unsigned int>{0};
145 
146  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
147  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
148 
149  std::vector<unsigned int> ranks(size);
150  const int ierr = MPI_Allgather(
151  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
152  AssertThrowMPI(ierr);
153 
154  return ranks;
155  }
156 
157 
158 
159  MPI_Comm
160  duplicate_communicator(const MPI_Comm &mpi_communicator)
161  {
162  MPI_Comm new_communicator;
163  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
164  AssertThrowMPI(ierr);
165  return new_communicator;
166  }
167 
168 
169 
170  void
171  free_communicator(MPI_Comm &mpi_communicator)
172  {
173  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
174  const int ierr = MPI_Comm_free(&mpi_communicator);
175  AssertThrowMPI(ierr);
176  }
177 
178 
179 
180  int
182  const MPI_Group &group,
183  const int tag,
184  MPI_Comm * new_comm)
185  {
186 # if DEAL_II_MPI_VERSION_GTE(3, 0)
187  return MPI_Comm_create_group(comm, group, tag, new_comm);
188 # else
189  int rank;
190  int ierr = MPI_Comm_rank(comm, &rank);
191  AssertThrowMPI(ierr);
192 
193  int grp_rank;
194  ierr = MPI_Group_rank(group, &grp_rank);
195  AssertThrowMPI(ierr);
196  if (grp_rank == MPI_UNDEFINED)
197  {
198  *new_comm = MPI_COMM_NULL;
199  return MPI_SUCCESS;
200  }
201 
202  int grp_size;
203  ierr = MPI_Group_size(group, &grp_size);
204  AssertThrowMPI(ierr);
205 
206  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
207  AssertThrowMPI(ierr);
208 
209  MPI_Group parent_grp;
210  ierr = MPI_Comm_group(comm, &parent_grp);
211  AssertThrowMPI(ierr);
212 
213  std::vector<int> pids(grp_size);
214  std::vector<int> grp_pids(grp_size);
215  std::iota(grp_pids.begin(), grp_pids.end(), 0);
216  ierr = MPI_Group_translate_ranks(
217  group, grp_size, grp_pids.data(), parent_grp, pids.data());
218  AssertThrowMPI(ierr);
219  ierr = MPI_Group_free(&parent_grp);
220  AssertThrowMPI(ierr);
221 
222  MPI_Comm comm_old = *new_comm;
223  MPI_Comm ic;
224  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
225  {
226  const int gid = grp_rank / merge_sz;
227  comm_old = *new_comm;
228  if (gid % 2 == 0)
229  {
230  if ((gid + 1) * merge_sz < grp_size)
231  {
232  ierr = (MPI_Intercomm_create(
233  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
234  AssertThrowMPI(ierr);
235  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
236  AssertThrowMPI(ierr);
237  }
238  }
239  else
240  {
241  ierr = MPI_Intercomm_create(
242  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
243  AssertThrowMPI(ierr);
244  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
245  AssertThrowMPI(ierr);
246  }
247  if (*new_comm != comm_old)
248  {
249  ierr = MPI_Comm_free(&ic);
250  AssertThrowMPI(ierr);
251  ierr = MPI_Comm_free(&comm_old);
252  AssertThrowMPI(ierr);
253  }
254  }
255 
256  return MPI_SUCCESS;
257 # endif
258  }
259 
260 
261 
262  std::vector<IndexSet>
264  const IndexSet::size_type locally_owned_size)
265  {
266  const unsigned int n_proc = n_mpi_processes(comm);
267  const std::vector<IndexSet::size_type> sizes =
268  all_gather(comm, locally_owned_size);
269  const auto total_size =
270  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
271 
272  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
273 
275  for (unsigned int i = 0; i < n_proc; ++i)
276  {
277  res[i].add_range(begin, begin + sizes[i]);
278  begin = begin + sizes[i];
279  }
280 
281  return res;
282  }
283 
284  IndexSet
286  const IndexSet::size_type total_size)
287  {
288  const unsigned int this_proc = this_mpi_process(comm);
289  const unsigned int n_proc = n_mpi_processes(comm);
290 
292  n_proc,
293  total_size);
294  }
295 
296 
297 
298  std::vector<unsigned int>
300  const MPI_Comm & mpi_comm,
301  const std::vector<unsigned int> &destinations)
302  {
303  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
304  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
305  (void)myid;
306  (void)n_procs;
307 
308  for (const unsigned int destination : destinations)
309  {
310  (void)destination;
311  AssertIndexRange(destination, n_procs);
312  }
313 
314 # if DEAL_II_MPI_VERSION_GTE(3, 0)
315 
317  [&]() { return destinations; });
318  ConsensusAlgorithms::NBX<char, char> consensus_algorithm(process,
319  mpi_comm);
320  return consensus_algorithm.run();
321 
322 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
323 
324  static CollectiveMutex mutex;
325  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
326 
327  const int mpi_tag =
329 
330  // Calculate the number of messages to send to each process
331  std::vector<unsigned int> dest_vector(n_procs);
332  for (const auto &el : destinations)
333  ++dest_vector[el];
334 
335  // Find how many processes will send to this one
336  // by reducing with sum and then scattering the
337  // results over all processes
338  unsigned int n_recv_from;
339  const int ierr = MPI_Reduce_scatter_block(
340  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
341 
342  AssertThrowMPI(ierr);
343 
344  // Send myid to every process in `destinations` vector...
345  std::vector<MPI_Request> send_requests(destinations.size());
346  for (const auto &el : destinations)
347  {
348  const int ierr =
349  MPI_Isend(&myid,
350  1,
351  MPI_UNSIGNED,
352  el,
353  mpi_tag,
354  mpi_comm,
355  send_requests.data() + (&el - destinations.data()));
356  AssertThrowMPI(ierr);
357  }
358 
359 
360  // Receive `n_recv_from` times from the processes
361  // who communicate with this one. Store the obtained id's
362  // in the resulting vector
363  std::vector<unsigned int> origins(n_recv_from);
364  for (auto &el : origins)
365  {
366  const int ierr = MPI_Recv(&el,
367  1,
368  MPI_UNSIGNED,
369  MPI_ANY_SOURCE,
370  mpi_tag,
371  mpi_comm,
372  MPI_STATUS_IGNORE);
373  AssertThrowMPI(ierr);
374  }
375 
376  if (destinations.size() > 0)
377  {
378  const int ierr = MPI_Waitall(destinations.size(),
379  send_requests.data(),
380  MPI_STATUSES_IGNORE);
381  AssertThrowMPI(ierr);
382  }
383 
384  return origins;
385 # else
386  // let all processors communicate the maximal number of destinations
387  // they have
388  const unsigned int max_n_destinations =
389  Utilities::MPI::max(destinations.size(), mpi_comm);
390 
391  if (max_n_destinations == 0)
392  // all processes have nothing to send/receive:
393  return std::vector<unsigned int>();
394 
395  // now that we know the number of data packets every processor wants to
396  // send, set up a buffer with the maximal size and copy our destinations
397  // in there, padded with -1's
398  std::vector<unsigned int> my_destinations(max_n_destinations,
400  std::copy(destinations.begin(),
401  destinations.end(),
402  my_destinations.begin());
403 
404  // now exchange these (we could communicate less data if we used
405  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
406  // processors in this case, which is more expensive than the reduction
407  // operation above in MPI_Allreduce)
408  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
409  const int ierr = MPI_Allgather(my_destinations.data(),
410  max_n_destinations,
411  MPI_UNSIGNED,
412  all_destinations.data(),
413  max_n_destinations,
414  MPI_UNSIGNED,
415  mpi_comm);
416  AssertThrowMPI(ierr);
417 
418  // now we know who is going to communicate with whom. collect who is
419  // going to communicate with us!
420  std::vector<unsigned int> origins;
421  for (unsigned int i = 0; i < n_procs; ++i)
422  for (unsigned int j = 0; j < max_n_destinations; ++j)
423  if (all_destinations[i * max_n_destinations + j] == myid)
424  origins.push_back(i);
425  else if (all_destinations[i * max_n_destinations + j] ==
427  break;
428 
429  return origins;
430 # endif
431  }
432 
433 
434 
435  unsigned int
437  const MPI_Comm & mpi_comm,
438  const std::vector<unsigned int> &destinations)
439  {
441  destinations)
442  .size();
443  }
444 
445 
446 
447  namespace
448  {
449  // custom MIP_Op for calculate_collective_mpi_min_max_avg
450  void
451  max_reduce(const void *in_lhs_,
452  void * inout_rhs_,
453  int * len,
454  MPI_Datatype *)
455  {
456  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
457  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
458 
459  for (int i = 0; i < *len; i++)
460  {
461  inout_rhs[i].sum += in_lhs[i].sum;
462  if (inout_rhs[i].min > in_lhs[i].min)
463  {
464  inout_rhs[i].min = in_lhs[i].min;
465  inout_rhs[i].min_index = in_lhs[i].min_index;
466  }
467  else if (inout_rhs[i].min == in_lhs[i].min)
468  {
469  // choose lower cpu index when tied to make operator commutative
470  if (inout_rhs[i].min_index > in_lhs[i].min_index)
471  inout_rhs[i].min_index = in_lhs[i].min_index;
472  }
473 
474  if (inout_rhs[i].max < in_lhs[i].max)
475  {
476  inout_rhs[i].max = in_lhs[i].max;
477  inout_rhs[i].max_index = in_lhs[i].max_index;
478  }
479  else if (inout_rhs[i].max == in_lhs[i].max)
480  {
481  // choose lower cpu index when tied to make operator commutative
482  if (inout_rhs[i].max_index > in_lhs[i].max_index)
483  inout_rhs[i].max_index = in_lhs[i].max_index;
484  }
485  }
486  }
487  } // namespace
488 
489 
490 
491  void
493  const ArrayView<MinMaxAvg> & result,
494  const MPI_Comm & mpi_communicator)
495  {
496  // If MPI was not started, we have a serial computation and cannot run
497  // the other MPI commands
498  if (job_supports_mpi() == false ||
499  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
500  {
501  for (unsigned int i = 0; i < my_values.size(); i++)
502  {
503  result[i].sum = my_values[i];
504  result[i].avg = my_values[i];
505  result[i].min = my_values[i];
506  result[i].max = my_values[i];
507  result[i].min_index = 0;
508  result[i].max_index = 0;
509  }
510  return;
511  }
512 
513  /*
514  * A custom MPI datatype handle describing the memory layout of the
515  * MinMaxAvg struct. Initialized on first pass control reaches the
516  * static variable. So hopefully not initialized too early.
517  */
518  static MPI_Datatype type = []() {
519  MPI_Datatype type;
520 
521  int lengths[] = {3, 2, 1};
522 
523  MPI_Aint displacements[] = {0,
524  offsetof(MinMaxAvg, min_index),
525  offsetof(MinMaxAvg, avg)};
526 
527  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
528 
529  int ierr =
530  MPI_Type_create_struct(3, lengths, displacements, types, &type);
531  AssertThrowMPI(ierr);
532 
533  ierr = MPI_Type_commit(&type);
534  AssertThrowMPI(ierr);
535 
536  /* Ensure that we free the allocated datatype again at the end of
537  * the program run just before we call MPI_Finalize():*/
538  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
539  int ierr = MPI_Type_free(&type);
540  AssertThrowMPI(ierr);
541  });
542 
543  return type;
544  }();
545 
546  /*
547  * A custom MPI op handle for our max_reduce function.
548  * Initialized on first pass control reaches the static variable. So
549  * hopefully not initialized too early.
550  */
551  static MPI_Op op = []() {
552  MPI_Op op;
553 
554  int ierr =
555  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
556  true,
557  &op);
558  AssertThrowMPI(ierr);
559 
560  /* Ensure that we free the allocated op again at the end of the
561  * program run just before we call MPI_Finalize():*/
562  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
563  int ierr = MPI_Op_free(&op);
564  AssertThrowMPI(ierr);
565  });
566 
567  return op;
568  }();
569 
570  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
571  Utilities::MPI::max(my_values.size(), mpi_communicator));
572 
573  AssertDimension(my_values.size(), result.size());
574 
575  // To avoid uninitialized values on some MPI implementations, provide
576  // result with a default value already...
577  MinMaxAvg dummy = {0.,
580  0,
581  0,
582  0.};
583 
584  for (auto &i : result)
585  i = dummy;
586 
587  const unsigned int my_id =
588  ::Utilities::MPI::this_mpi_process(mpi_communicator);
589  const unsigned int numproc =
590  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
591 
592  std::vector<MinMaxAvg> in(my_values.size());
593 
594  for (unsigned int i = 0; i < my_values.size(); i++)
595  {
596  in[i].sum = in[i].min = in[i].max = my_values[i];
597  in[i].min_index = in[i].max_index = my_id;
598  }
599 
600  int ierr = MPI_Allreduce(
601  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
602  AssertThrowMPI(ierr);
603 
604  for (auto &r : result)
605  r.avg = r.sum / numproc;
606  }
607 
608 
609 #else
610 
611  unsigned int
612  n_mpi_processes(const MPI_Comm &)
613  {
614  return 1;
615  }
616 
617 
618 
619  unsigned int
620  this_mpi_process(const MPI_Comm &)
621  {
622  return 0;
623  }
624 
625 
626 
627  const std::vector<unsigned int>
629  {
630  return std::vector<unsigned int>{0};
631  }
632 
633 
634 
635  std::vector<IndexSet>
636  create_ascending_partitioning(const MPI_Comm & /*comm*/,
637  const IndexSet::size_type locally_owned_size)
638  {
639  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
640  }
641 
642  IndexSet
644  const IndexSet::size_type total_size)
645  {
646  return complete_index_set(total_size);
647  }
648 
649 
650 
651  MPI_Comm
652  duplicate_communicator(const MPI_Comm &mpi_communicator)
653  {
654  return mpi_communicator;
655  }
656 
657 
658 
659  void
660  free_communicator(MPI_Comm & /*mpi_communicator*/)
661  {}
662 
663 
664 
665  void
666  min_max_avg(const ArrayView<const double> &my_values,
667  const ArrayView<MinMaxAvg> & result,
668  const MPI_Comm &)
669  {
670  AssertDimension(my_values.size(), result.size());
671 
672  for (unsigned int i = 0; i < my_values.size(); i++)
673  {
674  result[i].sum = my_values[i];
675  result[i].avg = my_values[i];
676  result[i].min = my_values[i];
677  result[i].max = my_values[i];
678  result[i].min_index = 0;
679  result[i].max_index = 0;
680  }
681  }
682 
683 #endif
684 
685  /* Force initialization of static struct: */
688 
689 
691  char **& argv,
692  const unsigned int max_num_threads)
693  {
694  static bool constructor_has_already_run = false;
695  (void)constructor_has_already_run;
696  Assert(constructor_has_already_run == false,
697  ExcMessage("You can only create a single object of this class "
698  "in a program since it initializes the MPI system."));
699 
700 
701  int ierr = 0;
702 #ifdef DEAL_II_WITH_MPI
703  // if we have PETSc, we will initialize it and let it handle MPI.
704  // Otherwise, we will do it.
705  int MPI_has_been_started = 0;
706  ierr = MPI_Initialized(&MPI_has_been_started);
707  AssertThrowMPI(ierr);
708  AssertThrow(MPI_has_been_started == 0,
709  ExcMessage("MPI error. You can only start MPI once!"));
710 
711  int provided;
712  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
713  // we might use several threads but never call two MPI functions at the
714  // same time. For an explanation see on why we do this see
715  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
716  int wanted = MPI_THREAD_SERIALIZED;
717  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
718  AssertThrowMPI(ierr);
719 
720  // disable for now because at least some implementations always return
721  // MPI_THREAD_SINGLE.
722  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
723  // ExcMessage("MPI reports that we are not allowed to use multiple
724  // threads."));
725 #else
726  // make sure the compiler doesn't warn about these variables
727  (void)argc;
728  (void)argv;
729  (void)ierr;
730 #endif
731 
732  // we are allowed to call MPI_Init ourselves and PETScInitialize will
733  // detect this. This allows us to use MPI_Init_thread instead.
734 #ifdef DEAL_II_WITH_PETSC
735 # ifdef DEAL_II_WITH_SLEPC
736  // Initialize SLEPc (with PETSc):
737  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
739 # else
740  // or just initialize PETSc alone:
741  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
742  AssertThrow(ierr == 0, ExcPETScError(ierr));
743 # endif
744 
745  // Disable PETSc exception handling. This just prints a large wall
746  // of text that is not particularly helpful for what we do:
747  PetscPopSignalHandler();
748 #endif
749 
750  // Initialize zoltan
751 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
752  float version;
753  Zoltan_Initialize(argc, argv, &version);
754 #endif
755 
756 #ifdef DEAL_II_WITH_P4EST
757  // Initialize p4est and libsc components
758 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
759 # else
760  // This feature is broken in version 2.0.0 for calls to
761  // MPI_Comm_create_group (see cburstedde/p4est#30).
762  // Disabling it leads to more verbose p4est error messages
763  // which should be fine.
764  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
765 # endif
766  p4est_init(nullptr, SC_LP_SILENT);
767 #endif
768 
769  constructor_has_already_run = true;
770 
771 
772  // Now also see how many threads we'd like to run
773  if (max_num_threads != numbers::invalid_unsigned_int)
774  {
775  // set maximum number of threads (also respecting the environment
776  // variable that the called function evaluates) based on what the
777  // user asked
778  MultithreadInfo::set_thread_limit(max_num_threads);
779  }
780  else
781  // user wants automatic choice
782  {
783 #ifdef DEAL_II_WITH_MPI
784  // we need to figure out how many MPI processes there are on the
785  // current node, as well as how many CPU cores we have. for the
786  // first task, check what get_hostname() returns and then do an
787  // allgather so each processor gets the answer
788  //
789  // in calculating the length of the string, don't forget the
790  // terminating \0 on C-style strings
791  const std::string hostname = Utilities::System::get_hostname();
792  const unsigned int max_hostname_size =
793  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
794  std::vector<char> hostname_array(max_hostname_size);
795  std::copy(hostname.c_str(),
796  hostname.c_str() + hostname.size() + 1,
797  hostname_array.begin());
798 
799  std::vector<char> all_hostnames(max_hostname_size *
800  MPI::n_mpi_processes(MPI_COMM_WORLD));
801  const int ierr = MPI_Allgather(hostname_array.data(),
802  max_hostname_size,
803  MPI_CHAR,
804  all_hostnames.data(),
805  max_hostname_size,
806  MPI_CHAR,
807  MPI_COMM_WORLD);
808  AssertThrowMPI(ierr);
809 
810  // search how often our own hostname appears and the how-manyth
811  // instance the current process represents
812  unsigned int n_local_processes = 0;
813  unsigned int nth_process_on_host = 0;
814  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
815  ++i)
816  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
817  hostname)
818  {
819  ++n_local_processes;
820  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
821  ++nth_process_on_host;
822  }
823  Assert(nth_process_on_host > 0, ExcInternalError());
824 
825 
826  // compute how many cores each process gets. if the number does not
827  // divide evenly, then we get one more core if we are among the
828  // first few processes
829  //
830  // if the number would be zero, round up to one since every process
831  // needs to have at least one thread
832  const unsigned int n_threads =
833  std::max(MultithreadInfo::n_cores() / n_local_processes +
834  (nth_process_on_host <=
835  MultithreadInfo::n_cores() % n_local_processes ?
836  1 :
837  0),
838  1U);
839 #else
840  const unsigned int n_threads = MultithreadInfo::n_cores();
841 #endif
842 
843  // finally set this number of threads
845  }
846 
847  // As a final step call the at_mpi_init() signal handler.
849  }
850 
851 
852 
853  void
855  {
856  // insert if it is not in the set already:
857  requests.insert(&request);
858  }
859 
860 
861 
862  void
864  {
865  Assert(
866  requests.find(&request) != requests.end(),
867  ExcMessage(
868  "You tried to call unregister_request() with an invalid request."));
869 
870  requests.erase(&request);
871  }
872 
873 
874 
875  std::set<MPI_Request *> MPI_InitFinalize::requests;
876 
877 
878 
880  {
881  // First, call the at_mpi_finalize() signal handler.
883 
884  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
885  // are no longer used at this point. this is relevant because the static
886  // object destructors run for these vectors at the end of the program
887  // would run after MPI_Finalize is called, leading to errors
888 
889 #ifdef DEAL_II_WITH_MPI
890  // Before exiting, wait for nonblocking communication to complete:
891  for (auto request : requests)
892  {
893  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
894  AssertThrowMPI(ierr);
895  }
896 
897  // Start with deal.II MPI vectors and delete vectors from the pools:
899  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
901  release_unused_memory();
903  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
905  release_unused_memory();
906 
907  // Next with Trilinos:
908 # ifdef DEAL_II_WITH_TRILINOS
910  TrilinosWrappers::MPI::Vector>::release_unused_memory();
912  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
913 # endif
914 #endif
915 
916 
917  // Now deal with PETSc (with or without MPI). Only delete the vectors if
918  // finalize hasn't been called yet, otherwise this will lead to errors.
919 #ifdef DEAL_II_WITH_PETSC
920  if ((PetscInitializeCalled == PETSC_TRUE) &&
921  (PetscFinalizeCalled == PETSC_FALSE))
922  {
924  PETScWrappers::MPI::Vector>::release_unused_memory();
926  PETScWrappers::MPI::BlockVector>::release_unused_memory();
927 
928 # ifdef DEAL_II_WITH_SLEPC
929  // and now end SLEPc (with PETSc)
930  SlepcFinalize();
931 # else
932  // or just end PETSc.
933  PetscFinalize();
934 # endif
935  }
936 #endif
937 
938 // There is a similar issue with CUDA: The destructor of static objects might
939 // run after the CUDA driver is unloaded. Hence, also release all memory
940 // related to CUDA vectors.
941 #ifdef DEAL_II_WITH_CUDA
944  release_unused_memory();
947  release_unused_memory();
948 #endif
949 
950 #ifdef DEAL_II_WITH_P4EST
951  // now end p4est and libsc
952  // Note: p4est has no finalize function
953  sc_finalize();
954 #endif
955 
956 
957  // only MPI_Finalize if we are running with MPI. We also need to do this
958  // when running PETSc, because we initialize MPI ourselves before
959  // calling PetscInitialize
960 #ifdef DEAL_II_WITH_MPI
961  if (job_supports_mpi() == true)
962  {
963 # if __cpp_lib_uncaught_exceptions >= 201411
964  // std::uncaught_exception() is deprecated in c++17
965  if (std::uncaught_exceptions() > 0)
966 # else
967  if (std::uncaught_exception() == true)
968 # endif
969  {
970  // do not try to call MPI_Finalize to avoid a deadlock.
971  }
972  else
973  {
974  const int ierr = MPI_Finalize();
975  (void)ierr;
976  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
977  }
978  }
979 #endif
980  }
981 
982 
983 
984  bool
986  {
987 #ifdef DEAL_II_WITH_MPI
988  int MPI_has_been_started = 0;
989  const int ierr = MPI_Initialized(&MPI_has_been_started);
990  AssertThrowMPI(ierr);
991 
992  return (MPI_has_been_started > 0);
993 #else
994  return false;
995 #endif
996  }
997 
998 
999 
1000  std::vector<unsigned int>
1001  compute_index_owner(const IndexSet &owned_indices,
1002  const IndexSet &indices_to_look_up,
1003  const MPI_Comm &comm)
1004  {
1005  Assert(owned_indices.size() == indices_to_look_up.size(),
1006  ExcMessage("IndexSets have to have the same sizes."));
1007 
1008  Assert(
1009  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1010  ExcMessage("IndexSets have to have the same size on all processes."));
1011 
1012  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1013 
1014  // Step 1: setup dictionary
1015  // The input owned_indices can be partitioned arbitrarily. In the
1016  // dictionary, the index set is statically repartitioned among the
1017  // processes again and extended with information with the actual owner
1018  // of that the index.
1020  owned_indices, indices_to_look_up, comm, owning_ranks);
1021 
1022  // Step 2: read dictionary
1023  // Communicate with the process who owns the index in the static
1024  // partition (i.e. in the dictionary). This process returns the actual
1025  // owner of the index.
1027  std::pair<types::global_dof_index, types::global_dof_index>,
1028  unsigned int>
1029  consensus_algorithm(process, comm);
1030  consensus_algorithm.run();
1031 
1032  return owning_ranks;
1033  }
1034 
1035 
1036 
1038  : locked(false)
1039  , request(MPI_REQUEST_NULL)
1040  {
1042  }
1043 
1044 
1045 
1047  {
1048  Assert(
1049  !locked,
1050  ExcMessage(
1051  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1052 
1054  }
1055 
1056 
1057 
1058  void
1060  {
1061  (void)comm;
1062 
1063  Assert(
1064  !locked,
1065  ExcMessage(
1066  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1067 
1068 #ifdef DEAL_II_WITH_MPI
1069 
1070  // TODO: For now, we implement this mutex with a blocking barrier
1071  // in the lock and unlock. It needs to be tested, if we can move
1072  // to a nonblocking barrier (code disabled below).
1073 
1074  const int ierr = MPI_Barrier(comm);
1075  AssertThrowMPI(ierr);
1076 
1077 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1078  // wait for non-blocking barrier to finish. This is a noop the
1079  // first time we lock().
1080  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1081  AssertThrowMPI(ierr);
1082 # else
1083  // nothing to do as blocking barrier already completed
1084 # endif
1085 #endif
1086 
1087  locked = true;
1088  }
1089 
1090 
1091 
1092  void
1094  {
1095  (void)comm;
1096 
1097  Assert(
1098  locked,
1099  ExcMessage(
1100  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1101 
1102 #ifdef DEAL_II_WITH_MPI
1103 
1104  // TODO: For now, we implement this mutex with a blocking barrier
1105  // in the lock and unlock. It needs to be tested, if we can move
1106  // to a nonblocking barrier (code disabled below):
1107 
1108 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1109  const int ierr = MPI_Ibarrier(comm, &request);
1110  AssertThrowMPI(ierr);
1111 # else
1112  const int ierr = MPI_Barrier(comm);
1113  AssertThrowMPI(ierr);
1114 # endif
1115 #endif
1116 
1117  locked = false;
1118  }
1119 
1120 
1121 #ifndef DOXYGEN
1122  // explicit instantiations
1123  template bool
1124  logical_or<bool>(const bool &, const MPI_Comm &);
1125 
1126 
1127  template void
1128  logical_or<bool>(const ArrayView<const bool> &,
1129  const MPI_Comm &,
1130  const ArrayView<bool> &);
1131 
1132 
1133  template std::vector<unsigned int>
1134  compute_set_union(const std::vector<unsigned int> &vec,
1135  const MPI_Comm & comm);
1136 
1137 
1138  template std::set<unsigned int>
1139  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1140 #endif
1141 
1142 #include "mpi.inst"
1143  } // end of namespace MPI
1144 } // end of namespace Utilities
1145 
int gid(const Epetra_BlockMap &map, int i)
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:863
static const unsigned int invalid_unsigned_int
Definition: types.h:196
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1528
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1657
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:285
****code ** MPI_Finalize()
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:436
#define AssertIndexRange(index, range)
Definition: exceptions.h:1722
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:690
static const char U
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
types::global_dof_index size_type
Definition: index_set.h:83
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1017
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:263
std::size_t size() const
Definition: array_view.h:574
size_type size() const
Definition: index_set.h:1634
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:171
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1465
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1059
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1093
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:399
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:181
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
static Signals signals
Definition: mpi.h:1020
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1001
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1673
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1013
*braid_SplitCommworld & comm
std::string get_hostname()
Definition: utilities.cc:1004
static void register_request(MPI_Request &request)
Definition: mpi.cc:854
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1780
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:160
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:398
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
static std::set< MPI_Request * > requests
Definition: mpi.h:1026
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1009
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:299
bool job_supports_mpi()
Definition: mpi.cc:985
size_type n_elements() const
Definition: index_set.h:1832
void copy(const T *begin, const T *end, U *dest)
unsigned int min_index
Definition: mpi.h:789
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:799
static ::ExceptionBase & ExcInternalError()