Reference documentation for deal.II version GIT c415589cf0 2022-08-14 18:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
23 #include <deal.II/base/mpi_tags.h>
25 #include <deal.II/base/utilities.h>
26 
30 
31 #include <boost/serialization/utility.hpp>
32 
33 #include <iostream>
34 #include <numeric>
35 #include <set>
36 #include <vector>
37 
38 #ifdef DEAL_II_WITH_TRILINOS
39 # ifdef DEAL_II_WITH_MPI
42 
43 # include <Epetra_MpiComm.h>
44 # endif
45 #endif
46 
47 #ifdef DEAL_II_WITH_PETSC
50 
51 # include <petscsys.h>
52 #endif
53 
54 #ifdef DEAL_II_WITH_SLEPC
56 
57 # include <slepcsys.h>
58 #endif
59 
60 #ifdef DEAL_II_WITH_P4EST
61 # include <p4est_bits.h>
62 #endif
63 
64 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
65 # include <zoltan_cpp.h>
66 #endif
67 
69 
70 
71 namespace Utilities
72 {
73  IndexSet
75  const unsigned int my_partition_id,
76  const unsigned int n_partitions,
77  const types::global_dof_index total_size)
78  {
79  static_assert(
80  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
81  "IndexSet::size_type must match types::global_dof_index for "
82  "using this function");
83  const unsigned int remain = total_size % n_partitions;
84 
85  const IndexSet::size_type min_size = total_size / n_partitions;
86 
88  min_size * my_partition_id + std::min(my_partition_id, remain);
89  const IndexSet::size_type end =
90  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
91  IndexSet result(total_size);
92  result.add_range(begin, end);
93  return result;
94  }
95 
96  namespace MPI
97  {
98 #ifdef DEAL_II_WITH_MPI
99  // Provide definitions of template variables for all valid instantiations.
100  template const MPI_Datatype mpi_type_id_for_type<bool>;
101  template const MPI_Datatype mpi_type_id_for_type<char>;
102  template const MPI_Datatype mpi_type_id_for_type<signed char>;
103  template const MPI_Datatype mpi_type_id_for_type<short>;
104  template const MPI_Datatype mpi_type_id_for_type<int>;
105  template const MPI_Datatype mpi_type_id_for_type<long int>;
106  template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
107  template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
108  template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
109  template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
110  template const MPI_Datatype mpi_type_id_for_type<float>;
111  template const MPI_Datatype mpi_type_id_for_type<double>;
112  template const MPI_Datatype mpi_type_id_for_type<long double>;
113  template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
114  template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
115 #endif
116 
117 
118  MinMaxAvg
119  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
120  {
121  MinMaxAvg result;
123  ArrayView<MinMaxAvg>(result),
124  mpi_communicator);
125 
126  return result;
127  }
128 
129 
130 
131  std::vector<MinMaxAvg>
132  min_max_avg(const std::vector<double> &my_values,
133  const MPI_Comm & mpi_communicator)
134  {
135  std::vector<MinMaxAvg> results(my_values.size());
136  min_max_avg(my_values, results, mpi_communicator);
137 
138  return results;
139  }
140 
141 
142 
143 #ifdef DEAL_II_WITH_MPI
144  unsigned int
145  n_mpi_processes(const MPI_Comm &mpi_communicator)
146  {
147  int n_jobs = 1;
148  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
149  AssertThrowMPI(ierr);
150 
151  return n_jobs;
152  }
153 
154 
155  unsigned int
156  this_mpi_process(const MPI_Comm &mpi_communicator)
157  {
158  int rank = 0;
159  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
160  AssertThrowMPI(ierr);
161 
162  return rank;
163  }
164 
165 
166 
167  const std::vector<unsigned int>
168  mpi_processes_within_communicator(const MPI_Comm &comm_large,
169  const MPI_Comm &comm_small)
170  {
171  if (Utilities::MPI::job_supports_mpi() == false)
172  return std::vector<unsigned int>{0};
173 
174  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
175  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
176 
177  std::vector<unsigned int> ranks(size);
178  const int ierr = MPI_Allgather(
179  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
180  AssertThrowMPI(ierr);
181 
182  return ranks;
183  }
184 
185 
186 
187  MPI_Comm
188  duplicate_communicator(const MPI_Comm &mpi_communicator)
189  {
190  MPI_Comm new_communicator;
191  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
192  AssertThrowMPI(ierr);
193  return new_communicator;
194  }
195 
196 
197 
198  void
199  free_communicator(MPI_Comm &mpi_communicator)
200  {
201  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
202  const int ierr = MPI_Comm_free(&mpi_communicator);
203  AssertThrowMPI(ierr);
204  }
205 
206 
207 
208  int
209  create_group(const MPI_Comm & comm,
210  const MPI_Group &group,
211  const int tag,
212  MPI_Comm * new_comm)
213  {
214  const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
215  AssertThrowMPI(ierr);
216  return ierr;
217  }
218 
219 
220 
221  std::vector<IndexSet>
223  const MPI_Comm & comm,
224  const types::global_dof_index locally_owned_size)
225  {
226  static_assert(
227  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
228  "IndexSet::size_type must match types::global_dof_index for "
229  "using this function");
230  const unsigned int n_proc = n_mpi_processes(comm);
231  const std::vector<IndexSet::size_type> sizes =
232  all_gather(comm, locally_owned_size);
233  const auto total_size =
234  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
235 
236  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
237 
239  for (unsigned int i = 0; i < n_proc; ++i)
240  {
241  res[i].add_range(begin, begin + sizes[i]);
242  begin = begin + sizes[i];
243  }
244 
245  return res;
246  }
247 
248 
249 
250  IndexSet
252  const MPI_Comm & comm,
253  const types::global_dof_index total_size)
254  {
255  const unsigned int this_proc = this_mpi_process(comm);
256  const unsigned int n_proc = n_mpi_processes(comm);
257 
259  n_proc,
260  total_size);
261  }
262 
263 
264 
265  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
266  create_mpi_data_type_n_bytes(const std::size_t n_bytes)
267  {
268  MPI_Datatype result;
269  int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
270  AssertThrowMPI(ierr);
271  ierr = MPI_Type_commit(&result);
272  AssertThrowMPI(ierr);
273 
274 # ifdef DEBUG
275  MPI_Count size64;
276  ierr = MPI_Type_size_x(result, &size64);
277  AssertThrowMPI(ierr);
278 
279  Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
280 # endif
281 
282  // Now put the new data type into a std::unique_ptr with a custom
283  // deleter. We call the std::unique_ptr constructor that as first
284  // argument takes a pointer (here, a pointer to a copy of the `result`
285  // object, and as second argument a pointer-to-function, for which
286  // we here use a lambda function without captures that acts as the
287  // 'deleter' object: it calls `MPI_Type_free` and then deletes the
288  // pointer. To avoid a compiler warning about a null this pointer
289  // in the lambda (which don't make sense: the lambda doesn't store
290  // anything), we create the deleter first.
291  auto deleter = [](MPI_Datatype *p) {
292  if (p != nullptr)
293  {
294  const int ierr = MPI_Type_free(p);
295  (void)ierr;
296  AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
297 
298  delete p;
299  }
300  };
301 
302  return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
303  new MPI_Datatype(result), deleter);
304  }
305 
306 
307 
308  std::vector<unsigned int>
310  const MPI_Comm & mpi_comm,
311  const std::vector<unsigned int> &destinations)
312  {
313  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
314  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
315  (void)myid;
316  (void)n_procs;
317 
318  for (const unsigned int destination : destinations)
319  {
320  (void)destination;
321  AssertIndexRange(destination, n_procs);
322  }
323 
324 
325  // Have a little function that checks if destinations provided
326  // to the current process are unique. The way it does this is
327  // to create a sorted list of destinations and then walk through
328  // the list and look at successive elements -- if we find the
329  // same number twice, we know that the destinations were not
330  // unique
331  const bool my_destinations_are_unique = [destinations]() {
332  if (destinations.size() == 0)
333  return true;
334  else
335  {
336  std::vector<unsigned int> my_destinations = destinations;
337  std::sort(my_destinations.begin(), my_destinations.end());
338  return (std::adjacent_find(my_destinations.begin(),
339  my_destinations.end()) ==
340  my_destinations.end());
341  }
342  }();
343 
344  // If all processes report that they have unique destinations,
345  // then we can short-cut the process using a consensus algorithm (which
346  // is implemented only for the case of unique destinations):
347  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
348  1)
349  {
350  return ConsensusAlgorithms::nbx<char, char>(
351  destinations, {}, {}, {}, mpi_comm);
352  }
353 
354  // So we need to run a different algorithm, specifically one that
355  // requires more memory -- MPI_Reduce_scatter_block will require memory
356  // proportional to the number of processes involved; that function is
357  // available for MPI 2.2 or later:
358  static CollectiveMutex mutex;
359  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
360 
361  const int mpi_tag =
363 
364  // Calculate the number of messages to send to each process
365  std::vector<unsigned int> dest_vector(n_procs);
366  for (const auto &el : destinations)
367  ++dest_vector[el];
368 
369  // Find how many processes will send to this one
370  // by reducing with sum and then scattering the
371  // results over all processes
372  unsigned int n_recv_from;
373  const int ierr = MPI_Reduce_scatter_block(
374  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
375 
376  AssertThrowMPI(ierr);
377 
378  // Send myid to every process in `destinations` vector...
379  std::vector<MPI_Request> send_requests(destinations.size());
380  for (const auto &el : destinations)
381  {
382  const int ierr =
383  MPI_Isend(&myid,
384  1,
385  MPI_UNSIGNED,
386  el,
387  mpi_tag,
388  mpi_comm,
389  send_requests.data() + (&el - destinations.data()));
390  AssertThrowMPI(ierr);
391  }
392 
393 
394  // Receive `n_recv_from` times from the processes
395  // who communicate with this one. Store the obtained id's
396  // in the resulting vector
397  std::vector<unsigned int> origins(n_recv_from);
398  for (auto &el : origins)
399  {
400  const int ierr = MPI_Recv(&el,
401  1,
402  MPI_UNSIGNED,
403  MPI_ANY_SOURCE,
404  mpi_tag,
405  mpi_comm,
406  MPI_STATUS_IGNORE);
407  AssertThrowMPI(ierr);
408  }
409 
410  if (destinations.size() > 0)
411  {
412  const int ierr = MPI_Waitall(destinations.size(),
413  send_requests.data(),
414  MPI_STATUSES_IGNORE);
415  AssertThrowMPI(ierr);
416  }
417 
418  return origins;
419  }
420 
421 
422 
423  unsigned int
425  const MPI_Comm & mpi_comm,
426  const std::vector<unsigned int> &destinations)
427  {
428  // Have a little function that checks if destinations provided
429  // to the current process are unique:
430  const bool my_destinations_are_unique = [destinations]() {
431  std::vector<unsigned int> my_destinations = destinations;
432  const unsigned int n_destinations = my_destinations.size();
433  std::sort(my_destinations.begin(), my_destinations.end());
434  my_destinations.erase(std::unique(my_destinations.begin(),
435  my_destinations.end()),
436  my_destinations.end());
437  return (my_destinations.size() == n_destinations);
438  }();
439 
440  // If all processes report that they have unique destinations,
441  // then we can short-cut the process using a consensus algorithm:
442 
443  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
444  1)
445  {
446  return ConsensusAlgorithms::nbx<char, char>(
447  destinations, {}, {}, {}, mpi_comm)
448  .size();
449  }
450  else
451  {
452  const unsigned int n_procs =
454 
455  for (const unsigned int destination : destinations)
456  {
457  (void)destination;
458  AssertIndexRange(destination, n_procs);
459  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
460  ExcMessage(
461  "There is no point in communicating with ourselves."));
462  }
463 
464  // Calculate the number of messages to send to each process
465  std::vector<unsigned int> dest_vector(n_procs);
466  for (const auto &el : destinations)
467  ++dest_vector[el];
468 
469  // Find out how many processes will send to this one
470  // MPI_Reduce_scatter(_block) does exactly this
471  unsigned int n_recv_from = 0;
472 
473  const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
474  &n_recv_from,
475  1,
476  MPI_UNSIGNED,
477  MPI_SUM,
478  mpi_comm);
479 
480  AssertThrowMPI(ierr);
481 
482  return n_recv_from;
483  }
484  }
485 
486 
487 
488  namespace
489  {
490  // custom MIP_Op for calculate_collective_mpi_min_max_avg
491  void
492  max_reduce(const void *in_lhs_,
493  void * inout_rhs_,
494  int * len,
495  MPI_Datatype *)
496  {
497  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
498  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
499 
500  for (int i = 0; i < *len; ++i)
501  {
502  inout_rhs[i].sum += in_lhs[i].sum;
503  if (inout_rhs[i].min > in_lhs[i].min)
504  {
505  inout_rhs[i].min = in_lhs[i].min;
506  inout_rhs[i].min_index = in_lhs[i].min_index;
507  }
508  else if (inout_rhs[i].min == in_lhs[i].min)
509  {
510  // choose lower cpu index when tied to make operator commutative
511  if (inout_rhs[i].min_index > in_lhs[i].min_index)
512  inout_rhs[i].min_index = in_lhs[i].min_index;
513  }
514 
515  if (inout_rhs[i].max < in_lhs[i].max)
516  {
517  inout_rhs[i].max = in_lhs[i].max;
518  inout_rhs[i].max_index = in_lhs[i].max_index;
519  }
520  else if (inout_rhs[i].max == in_lhs[i].max)
521  {
522  // choose lower cpu index when tied to make operator commutative
523  if (inout_rhs[i].max_index > in_lhs[i].max_index)
524  inout_rhs[i].max_index = in_lhs[i].max_index;
525  }
526  }
527  }
528  } // namespace
529 
530 
531 
532  void
534  const ArrayView<MinMaxAvg> & result,
535  const MPI_Comm & mpi_communicator)
536  {
537  // If MPI was not started, we have a serial computation and cannot run
538  // the other MPI commands
539  if (job_supports_mpi() == false ||
540  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
541  {
542  for (unsigned int i = 0; i < my_values.size(); ++i)
543  {
544  result[i].sum = my_values[i];
545  result[i].avg = my_values[i];
546  result[i].min = my_values[i];
547  result[i].max = my_values[i];
548  result[i].min_index = 0;
549  result[i].max_index = 0;
550  }
551  return;
552  }
553 
554  /*
555  * A custom MPI datatype handle describing the memory layout of the
556  * MinMaxAvg struct. Initialized on first pass control reaches the
557  * static variable. So hopefully not initialized too early.
558  */
559  static MPI_Datatype type = []() {
560  MPI_Datatype type;
561 
562  int lengths[] = {3, 2, 1};
563 
564  MPI_Aint displacements[] = {0,
565  offsetof(MinMaxAvg, min_index),
566  offsetof(MinMaxAvg, avg)};
567 
568  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
569 
570  int ierr =
571  MPI_Type_create_struct(3, lengths, displacements, types, &type);
572  AssertThrowMPI(ierr);
573 
574  ierr = MPI_Type_commit(&type);
575  AssertThrowMPI(ierr);
576 
577  /* Ensure that we free the allocated datatype again at the end of
578  * the program run just before we call MPI_Finalize():*/
579  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
580  int ierr = MPI_Type_free(&type);
581  AssertThrowMPI(ierr);
582  });
583 
584  return type;
585  }();
586 
587  /*
588  * A custom MPI op handle for our max_reduce function.
589  * Initialized on first pass control reaches the static variable. So
590  * hopefully not initialized too early.
591  */
592  static MPI_Op op = []() {
593  MPI_Op op;
594 
595  int ierr =
596  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
597  static_cast<int>(true),
598  &op);
599  AssertThrowMPI(ierr);
600 
601  /* Ensure that we free the allocated op again at the end of the
602  * program run just before we call MPI_Finalize():*/
603  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
604  int ierr = MPI_Op_free(&op);
605  AssertThrowMPI(ierr);
606  });
607 
608  return op;
609  }();
610 
611  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
612  Utilities::MPI::max(my_values.size(), mpi_communicator));
613 
614  AssertDimension(my_values.size(), result.size());
615 
616  // To avoid uninitialized values on some MPI implementations, provide
617  // result with a default value already...
618  MinMaxAvg dummy = {0.,
620  std::numeric_limits<double>::lowest(),
621  0,
622  0,
623  0.};
624 
625  for (auto &i : result)
626  i = dummy;
627 
628  const unsigned int my_id =
629  ::Utilities::MPI::this_mpi_process(mpi_communicator);
630  const unsigned int numproc =
631  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
632 
633  std::vector<MinMaxAvg> in(my_values.size());
634 
635  for (unsigned int i = 0; i < my_values.size(); ++i)
636  {
637  in[i].sum = in[i].min = in[i].max = my_values[i];
638  in[i].min_index = in[i].max_index = my_id;
639  }
640 
641  int ierr = MPI_Allreduce(
642  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
643  AssertThrowMPI(ierr);
644 
645  for (auto &r : result)
646  r.avg = r.sum / numproc;
647  }
648 
649 
650 #else
651 
652  unsigned int
653  n_mpi_processes(const MPI_Comm &)
654  {
655  return 1;
656  }
657 
658 
659 
660  unsigned int
661  this_mpi_process(const MPI_Comm &)
662  {
663  return 0;
664  }
665 
666 
667 
668  const std::vector<unsigned int>
669  mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &)
670  {
671  return std::vector<unsigned int>{0};
672  }
673 
674 
675 
676  std::vector<IndexSet>
678  const MPI_Comm & /*comm*/,
679  const types::global_dof_index locally_owned_size)
680  {
681  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
682  }
683 
684  IndexSet
686  const MPI_Comm & /*comm*/,
687  const types::global_dof_index total_size)
688  {
689  return complete_index_set(total_size);
690  }
691 
692 
693 
694  MPI_Comm
695  duplicate_communicator(const MPI_Comm &mpi_communicator)
696  {
697  return mpi_communicator;
698  }
699 
700 
701 
702  void
703  free_communicator(MPI_Comm & /*mpi_communicator*/)
704  {}
705 
706 
707 
708  void
709  min_max_avg(const ArrayView<const double> &my_values,
710  const ArrayView<MinMaxAvg> & result,
711  const MPI_Comm &)
712  {
713  AssertDimension(my_values.size(), result.size());
714 
715  for (unsigned int i = 0; i < my_values.size(); ++i)
716  {
717  result[i].sum = my_values[i];
718  result[i].avg = my_values[i];
719  result[i].min = my_values[i];
720  result[i].max = my_values[i];
721  result[i].min_index = 0;
722  result[i].max_index = 0;
723  }
724  }
725 
726 #endif
727 
728  /* Force initialization of static struct: */
729  MPI_InitFinalize::Signals MPI_InitFinalize::signals =
730  MPI_InitFinalize::Signals();
731 
732 
734  char **& argv,
735  const unsigned int max_num_threads)
736  {
737  static bool constructor_has_already_run = false;
738  (void)constructor_has_already_run;
739  Assert(constructor_has_already_run == false,
740  ExcMessage("You can only create a single object of this class "
741  "in a program since it initializes the MPI system."));
742 
743 
744  int ierr = 0;
745 #ifdef DEAL_II_WITH_MPI
746  // if we have PETSc, we will initialize it and let it handle MPI.
747  // Otherwise, we will do it.
748  int MPI_has_been_started = 0;
749  ierr = MPI_Initialized(&MPI_has_been_started);
750  AssertThrowMPI(ierr);
751  AssertThrow(MPI_has_been_started == 0,
752  ExcMessage("MPI error. You can only start MPI once!"));
753 
754  int provided;
755  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
756  // we might use several threads but never call two MPI functions at the
757  // same time. For an explanation see on why we do this see
758  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
759  int wanted = MPI_THREAD_SERIALIZED;
760  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
761  AssertThrowMPI(ierr);
762 
763  // disable for now because at least some implementations always return
764  // MPI_THREAD_SINGLE.
765  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
766  // ExcMessage("MPI reports that we are not allowed to use multiple
767  // threads."));
768 #else
769  // make sure the compiler doesn't warn about these variables
770  (void)argc;
771  (void)argv;
772  (void)ierr;
773 #endif
774 
775  // we are allowed to call MPI_Init ourselves and PETScInitialize will
776  // detect this. This allows us to use MPI_Init_thread instead.
777 #ifdef DEAL_II_WITH_PETSC
778 # ifdef DEAL_II_WITH_SLEPC
779  // Initialize SLEPc (with PETSc):
780  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
782 # else
783  // or just initialize PETSc alone:
784  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
785  AssertThrow(ierr == 0, ExcPETScError(ierr));
786 # endif
787 
788  // Disable PETSc exception handling. This just prints a large wall
789  // of text that is not particularly helpful for what we do:
790  PetscPopSignalHandler();
791 #endif
792 
793  // Initialize zoltan
794 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
795  float version;
796  Zoltan_Initialize(argc, argv, &version);
797 #endif
798 
799 #ifdef DEAL_II_WITH_P4EST
800  // Initialize p4est and libsc components
801 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
802  // This feature is broken in version 2.0.0 for calls to
803  // MPI_Comm_create_group (see cburstedde/p4est#30).
804  // Disabling it leads to more verbose p4est error messages
805  // which should be fine.
806  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
807 # endif
808  p4est_init(nullptr, SC_LP_SILENT);
809 #endif
810 
811  constructor_has_already_run = true;
812 
813 
814  // Now also see how many threads we'd like to run
815  if (max_num_threads != numbers::invalid_unsigned_int)
816  {
817  // set maximum number of threads (also respecting the environment
818  // variable that the called function evaluates) based on what the
819  // user asked
820  MultithreadInfo::set_thread_limit(max_num_threads);
821  }
822  else
823  // user wants automatic choice
824  {
825 #ifdef DEAL_II_WITH_MPI
826  // we need to figure out how many MPI processes there are on the
827  // current node, as well as how many CPU cores we have. for the
828  // first task, check what get_hostname() returns and then do an
829  // allgather so each processor gets the answer
830  //
831  // in calculating the length of the string, don't forget the
832  // terminating \0 on C-style strings
833  const std::string hostname = Utilities::System::get_hostname();
834  const unsigned int max_hostname_size =
835  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
836  std::vector<char> hostname_array(max_hostname_size);
837  std::copy(hostname.c_str(),
838  hostname.c_str() + hostname.size() + 1,
839  hostname_array.begin());
840 
841  std::vector<char> all_hostnames(max_hostname_size *
842  MPI::n_mpi_processes(MPI_COMM_WORLD));
843  const int ierr = MPI_Allgather(hostname_array.data(),
844  max_hostname_size,
845  MPI_CHAR,
846  all_hostnames.data(),
847  max_hostname_size,
848  MPI_CHAR,
849  MPI_COMM_WORLD);
850  AssertThrowMPI(ierr);
851 
852  // search how often our own hostname appears and the how-manyth
853  // instance the current process represents
854  unsigned int n_local_processes = 0;
855  unsigned int nth_process_on_host = 0;
856  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
857  ++i)
858  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
859  hostname)
860  {
861  ++n_local_processes;
862  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
863  ++nth_process_on_host;
864  }
865  Assert(nth_process_on_host > 0, ExcInternalError());
866 
867 
868  // compute how many cores each process gets. if the number does not
869  // divide evenly, then we get one more core if we are among the
870  // first few processes
871  //
872  // if the number would be zero, round up to one since every process
873  // needs to have at least one thread
874  const unsigned int n_threads =
875  std::max(MultithreadInfo::n_cores() / n_local_processes +
876  (nth_process_on_host <=
877  MultithreadInfo::n_cores() % n_local_processes ?
878  1 :
879  0),
880  1U);
881 #else
882  const unsigned int n_threads = MultithreadInfo::n_cores();
883 #endif
884 
885  // finally set this number of threads
887  }
888 
889  // As a final step call the at_mpi_init() signal handler.
891  }
892 
893 
894 
895  void
897  {
898  // insert if it is not in the set already:
899  requests.insert(&request);
900  }
901 
902 
903 
904  void
906  {
907  Assert(
908  requests.find(&request) != requests.end(),
909  ExcMessage(
910  "You tried to call unregister_request() with an invalid request."));
911 
912  requests.erase(&request);
913  }
914 
915 
916 
917  std::set<MPI_Request *> MPI_InitFinalize::requests;
918 
919 
920 
922  {
923  // First, call the at_mpi_finalize() signal handler.
925 
926  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
927  // are no longer used at this point. this is relevant because the static
928  // object destructors run for these vectors at the end of the program
929  // would run after MPI_Finalize is called, leading to errors
930 
931 #ifdef DEAL_II_WITH_MPI
932  // Before exiting, wait for nonblocking communication to complete:
933  for (auto request : requests)
934  {
935  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
936  AssertThrowMPI(ierr);
937  }
938 
939  // Start with deal.II MPI vectors and delete vectors from the pools:
941  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
943  release_unused_memory();
945  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
947  release_unused_memory();
948 
949  // Next with Trilinos:
950 # ifdef DEAL_II_WITH_TRILINOS
952  TrilinosWrappers::MPI::Vector>::release_unused_memory();
954  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
955 # endif
956 #endif
957 
958 
959  // Now deal with PETSc (with or without MPI). Only delete the vectors if
960  // finalize hasn't been called yet, otherwise this will lead to errors.
961 #ifdef DEAL_II_WITH_PETSC
962  if ((PetscInitializeCalled == PETSC_TRUE) &&
963  (PetscFinalizeCalled == PETSC_FALSE))
964  {
966  PETScWrappers::MPI::Vector>::release_unused_memory();
968  PETScWrappers::MPI::BlockVector>::release_unused_memory();
969 
970 # ifdef DEAL_II_WITH_SLEPC
971  // and now end SLEPc (with PETSc)
972  SlepcFinalize();
973 # else
974  // or just end PETSc.
975  PetscFinalize();
976 # endif
977  }
978 #endif
979 
980 // There is a similar issue with CUDA: The destructor of static objects might
981 // run after the CUDA driver is unloaded. Hence, also release all memory
982 // related to CUDA vectors.
983 #ifdef DEAL_II_WITH_CUDA
986  release_unused_memory();
989  release_unused_memory();
990 #endif
991 
992 #ifdef DEAL_II_WITH_P4EST
993  // now end p4est and libsc
994  // Note: p4est has no finalize function
995  sc_finalize();
996 #endif
997 
998 
999  // only MPI_Finalize if we are running with MPI. We also need to do this
1000  // when running PETSc, because we initialize MPI ourselves before
1001  // calling PetscInitialize
1002 #ifdef DEAL_II_WITH_MPI
1003  if (job_supports_mpi() == true)
1004  {
1005 # if __cpp_lib_uncaught_exceptions >= 201411
1006  // std::uncaught_exception() is deprecated in c++17
1007  if (std::uncaught_exceptions() > 0)
1008 # else
1009  if (std::uncaught_exception() == true)
1010 # endif
1011  {
1012  // do not try to call MPI_Finalize to avoid a deadlock.
1013  }
1014  else
1015  {
1016  const int ierr = MPI_Finalize();
1017  (void)ierr;
1018  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1019  }
1020  }
1021 #endif
1022  }
1023 
1024 
1025 
1026  bool
1028  {
1029 #ifdef DEAL_II_WITH_MPI
1030  int MPI_has_been_started = 0;
1031  const int ierr = MPI_Initialized(&MPI_has_been_started);
1032  AssertThrowMPI(ierr);
1033 
1034  return (MPI_has_been_started > 0);
1035 #else
1036  return false;
1037 #endif
1038  }
1039 
1040 
1041 
1042  std::vector<unsigned int>
1043  compute_index_owner(const IndexSet &owned_indices,
1044  const IndexSet &indices_to_look_up,
1045  const MPI_Comm &comm)
1046  {
1047  Assert(owned_indices.size() == indices_to_look_up.size(),
1048  ExcMessage("IndexSets have to have the same sizes."));
1049 
1050  Assert(
1051  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1052  ExcMessage("IndexSets have to have the same size on all processes."));
1053 
1054  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1055 
1056  // Step 1: setup dictionary
1057  // The input owned_indices can be partitioned arbitrarily. In the
1058  // dictionary, the index set is statically repartitioned among the
1059  // processes again and extended with information with the actual owner
1060  // of that the index.
1062  owned_indices, indices_to_look_up, comm, owning_ranks);
1063 
1064  // Step 2: read dictionary
1065  // Communicate with the process who owns the index in the static
1066  // partition (i.e. in the dictionary). This process returns the actual
1067  // owner of the index.
1069  std::vector<
1070  std::pair<types::global_dof_index, types::global_dof_index>>,
1071  std::vector<unsigned int>>
1072  consensus_algorithm(process, comm);
1073  consensus_algorithm.run();
1074 
1075  return owning_ranks;
1076  }
1077 
1078 
1079 
1081  : locked(false)
1082  , request(MPI_REQUEST_NULL)
1083  {
1085  }
1086 
1087 
1088 
1090  {
1091  Assert(
1092  !locked,
1093  ExcMessage(
1094  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1095 
1097  }
1098 
1099 
1100 
1101  void
1102  CollectiveMutex::lock(const MPI_Comm &comm)
1103  {
1104  (void)comm;
1105 
1106  Assert(
1107  !locked,
1108  ExcMessage(
1109  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1110 
1111 #ifdef DEAL_II_WITH_MPI
1112 
1113  // TODO: For now, we implement this mutex with a blocking barrier
1114  // in the lock and unlock. It needs to be tested, if we can move
1115  // to a nonblocking barrier (code disabled below).
1116 
1117  const int ierr = MPI_Barrier(comm);
1118  AssertThrowMPI(ierr);
1119 
1120 # if 0
1121  // wait for non-blocking barrier to finish. This is a noop the
1122  // first time we lock().
1123  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1124  AssertThrowMPI(ierr);
1125 # else
1126  // nothing to do as blocking barrier already completed
1127 # endif
1128 #endif
1129 
1130  locked = true;
1131  }
1132 
1133 
1134 
1135  void
1137  {
1138  (void)comm;
1139 
1140  Assert(
1141  locked,
1142  ExcMessage(
1143  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1144 
1145 #ifdef DEAL_II_WITH_MPI
1146 
1147  // TODO: For now, we implement this mutex with a blocking barrier
1148  // in the lock and unlock. It needs to be tested, if we can move
1149  // to a nonblocking barrier (code disabled below):
1150 # if 0
1151  const int ierr = MPI_Ibarrier(comm, &request);
1152  AssertThrowMPI(ierr);
1153 # else
1154  const int ierr = MPI_Barrier(comm);
1155  AssertThrowMPI(ierr);
1156 # endif
1157 #endif
1158 
1159  locked = false;
1160  }
1161 
1162 
1163 #ifndef DOXYGEN
1164  // explicit instantiations
1165 
1166  // booleans aren't in MPI_SCALARS
1167  template bool
1168  reduce(const bool &,
1169  const MPI_Comm &,
1170  const std::function<bool(const bool &, const bool &)> &,
1171  const unsigned int);
1172 
1173  template std::vector<bool>
1174  reduce(const std::vector<bool> &,
1175  const MPI_Comm &,
1176  const std::function<std::vector<bool>(const std::vector<bool> &,
1177  const std::vector<bool> &)> &,
1178  const unsigned int);
1179 
1180  template bool
1181  all_reduce(const bool &,
1182  const MPI_Comm &,
1183  const std::function<bool(const bool &, const bool &)> &);
1184 
1185  template std::vector<bool>
1186  all_reduce(
1187  const std::vector<bool> &,
1188  const MPI_Comm &,
1189  const std::function<std::vector<bool>(const std::vector<bool> &,
1190  const std::vector<bool> &)> &);
1191 
1192  // We need an explicit instantiation of this for the same reason as the
1193  // other types described in mpi.inst.in
1194  template void
1195  internal::all_reduce<bool>(const MPI_Op &,
1196  const ArrayView<const bool> &,
1197  const MPI_Comm &,
1198  const ArrayView<bool> &);
1199 
1200 
1201  template bool
1202  logical_or<bool>(const bool &, const MPI_Comm &);
1203 
1204 
1205  template void
1206  logical_or<bool>(const ArrayView<const bool> &,
1207  const MPI_Comm &,
1208  const ArrayView<bool> &);
1209 
1210 
1211  template std::vector<unsigned int>
1212  compute_set_union(const std::vector<unsigned int> &vec,
1213  const MPI_Comm & comm);
1214 
1215 
1216  template std::set<unsigned int>
1217  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1218 #endif
1219 
1220 #include "mpi.inst"
1221  } // end of namespace MPI
1222 } // end of namespace Utilities
1223 
value_type * data() const noexcept
Definition: array_view.h:553
std::size_t size() const
Definition: array_view.h:576
size_type size() const
Definition: index_set.h:1635
size_type n_elements() const
Definition: index_set.h:1783
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1671
types::global_dof_index size_type
Definition: index_set.h:84
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1136
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1102
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:905
static std::set< MPI_Request * > requests
Definition: mpi.h:1239
static Signals signals
Definition: mpi.h:1233
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:733
static void register_request(MPI_Request &request)
Definition: mpi.cc:896
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1536
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1063
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
template const MPI_Datatype mpi_type_id_for_type< int >
Definition: mpi.cc:104
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:199
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1043
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition: mpi.cc:106
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:266
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition: mpi.cc:102
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition: mpi.cc:100
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:222
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition: mpi.cc:107
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:251
template const MPI_Datatype mpi_type_id_for_type< short >
Definition: mpi.cc:103
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:424
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
template const MPI_Datatype mpi_type_id_for_type< double >
Definition: mpi.cc:111
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:156
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:309
template const MPI_Datatype mpi_type_id_for_type< char >
Definition: mpi.cc:101
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition: mpi.cc:112
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition: mpi.cc:109
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition: mpi.cc:108
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1027
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:188
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:168
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:145
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:119
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition: mpi.cc:105
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
Definition: mpi.cc:110
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:209
std::string get_hostname()
Definition: utilities.cc:1001
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:74
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:201
Definition: types.h:32
unsigned int global_dof_index
Definition: types.h:76
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1222
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1230
const MPI_Comm & comm