Reference documentation for deal.II version GIT b8135fa6eb 2023-03-25 15:55:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
23 #include <deal.II/base/mpi_tags.h>
25 #include <deal.II/base/utilities.h>
26 
32 
33 #include <boost/serialization/utility.hpp>
34 
35 #include <Kokkos_Core.hpp>
36 
37 #include <iostream>
38 #include <limits>
39 #include <numeric>
40 #include <set>
41 #include <vector>
42 
43 #ifdef DEAL_II_WITH_TRILINOS
44 # ifdef DEAL_II_WITH_MPI
47 
48 # include <Epetra_MpiComm.h>
49 # endif
50 #endif
51 
52 #ifdef DEAL_II_WITH_PETSC
55 
56 # include <petscsys.h>
57 #endif
58 
59 #ifdef DEAL_II_WITH_SLEPC
61 
62 # include <slepcsys.h>
63 #endif
64 
65 #ifdef DEAL_II_WITH_P4EST
66 # include <p4est_bits.h>
67 #endif
68 
69 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
70 # include <zoltan_cpp.h>
71 #endif
72 
74 
75 
76 namespace Utilities
77 {
78  IndexSet
80  const unsigned int my_partition_id,
81  const unsigned int n_partitions,
82  const types::global_dof_index total_size)
83  {
84  static_assert(
85  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
86  "IndexSet::size_type must match types::global_dof_index for "
87  "using this function");
88  const unsigned int remain = total_size % n_partitions;
89 
90  const IndexSet::size_type min_size = total_size / n_partitions;
91 
93  min_size * my_partition_id + std::min(my_partition_id, remain);
94  const IndexSet::size_type end =
95  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
96  IndexSet result(total_size);
97  result.add_range(begin, end);
98  return result;
99  }
100 
101  namespace MPI
102  {
103 #ifdef DEAL_II_WITH_MPI
104  // Provide definitions of template variables for all valid instantiations.
105  template const MPI_Datatype mpi_type_id_for_type<bool>;
106  template const MPI_Datatype mpi_type_id_for_type<char>;
107  template const MPI_Datatype mpi_type_id_for_type<signed char>;
108  template const MPI_Datatype mpi_type_id_for_type<short>;
109  template const MPI_Datatype mpi_type_id_for_type<int>;
110  template const MPI_Datatype mpi_type_id_for_type<long int>;
111  template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
112  template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
113  template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
114  template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
115  template const MPI_Datatype mpi_type_id_for_type<float>;
116  template const MPI_Datatype mpi_type_id_for_type<double>;
117  template const MPI_Datatype mpi_type_id_for_type<long double>;
118  template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
119  template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
120 #endif
121 
122 
123  MinMaxAvg
124  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
125  {
126  MinMaxAvg result;
128  ArrayView<MinMaxAvg>(result),
129  mpi_communicator);
130 
131  return result;
132  }
133 
134 
135 
136  std::vector<MinMaxAvg>
137  min_max_avg(const std::vector<double> &my_values,
138  const MPI_Comm & mpi_communicator)
139  {
140  std::vector<MinMaxAvg> results(my_values.size());
141  min_max_avg(my_values, results, mpi_communicator);
142 
143  return results;
144  }
145 
146 
147 
148 #ifdef DEAL_II_WITH_MPI
149  unsigned int
150  n_mpi_processes(const MPI_Comm &mpi_communicator)
151  {
152  int n_jobs = 1;
153  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
154  AssertThrowMPI(ierr);
155 
156  return n_jobs;
157  }
158 
159 
160  unsigned int
161  this_mpi_process(const MPI_Comm &mpi_communicator)
162  {
163  int rank = 0;
164  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
165  AssertThrowMPI(ierr);
166 
167  return rank;
168  }
169 
170 
171 
172  const std::vector<unsigned int>
173  mpi_processes_within_communicator(const MPI_Comm &comm_large,
174  const MPI_Comm &comm_small)
175  {
176  if (Utilities::MPI::job_supports_mpi() == false)
177  return std::vector<unsigned int>{0};
178 
179  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
180  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
181 
182  std::vector<unsigned int> ranks(size);
183  const int ierr = MPI_Allgather(
184  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
185  AssertThrowMPI(ierr);
186 
187  return ranks;
188  }
189 
190 
191 
192  MPI_Comm
193  duplicate_communicator(const MPI_Comm &mpi_communicator)
194  {
195  MPI_Comm new_communicator;
196  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
197  AssertThrowMPI(ierr);
198  return new_communicator;
199  }
200 
201 
202 
203  void
204  free_communicator(MPI_Comm &mpi_communicator)
205  {
206  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
207  const int ierr = MPI_Comm_free(&mpi_communicator);
208  AssertThrowMPI(ierr);
209  }
210 
211 
212 
213  int
214  create_group(const MPI_Comm & comm,
215  const MPI_Group &group,
216  const int tag,
217  MPI_Comm * new_comm)
218  {
219  const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
220  AssertThrowMPI(ierr);
221  return ierr;
222  }
223 
224 
225 
226  std::vector<IndexSet>
228  const MPI_Comm & comm,
229  const types::global_dof_index locally_owned_size)
230  {
231  static_assert(
232  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
233  "IndexSet::size_type must match types::global_dof_index for "
234  "using this function");
235  const unsigned int n_proc = n_mpi_processes(comm);
236  const std::vector<IndexSet::size_type> sizes =
237  all_gather(comm, locally_owned_size);
238  const auto total_size =
239  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
240 
241  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
242 
244  for (unsigned int i = 0; i < n_proc; ++i)
245  {
246  res[i].add_range(begin, begin + sizes[i]);
247  begin = begin + sizes[i];
248  }
249 
250  return res;
251  }
252 
253 
254 
255  IndexSet
257  const MPI_Comm & comm,
258  const types::global_dof_index total_size)
259  {
260  const unsigned int this_proc = this_mpi_process(comm);
261  const unsigned int n_proc = n_mpi_processes(comm);
262 
264  n_proc,
265  total_size);
266  }
267 
268 
269 
270  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
271  create_mpi_data_type_n_bytes(const std::size_t n_bytes)
272  {
273  MPI_Datatype result;
274  int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
275  AssertThrowMPI(ierr);
276  ierr = MPI_Type_commit(&result);
277  AssertThrowMPI(ierr);
278 
279 # ifdef DEBUG
280  MPI_Count size64;
281  ierr = MPI_Type_size_x(result, &size64);
282  AssertThrowMPI(ierr);
283 
284  Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
285 # endif
286 
287  // Now put the new data type into a std::unique_ptr with a custom
288  // deleter. We call the std::unique_ptr constructor that as first
289  // argument takes a pointer (here, a pointer to a copy of the `result`
290  // object, and as second argument a pointer-to-function, for which
291  // we here use a lambda function without captures that acts as the
292  // 'deleter' object: it calls `MPI_Type_free` and then deletes the
293  // pointer. To avoid a compiler warning about a null this pointer
294  // in the lambda (which don't make sense: the lambda doesn't store
295  // anything), we create the deleter first.
296  auto deleter = [](MPI_Datatype *p) {
297  if (p != nullptr)
298  {
299  const int ierr = MPI_Type_free(p);
300  (void)ierr;
301  AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
302 
303  delete p;
304  }
305  };
306 
307  return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
308  new MPI_Datatype(result), deleter);
309  }
310 
311 
312 
313  std::vector<unsigned int>
315  const MPI_Comm & mpi_comm,
316  const std::vector<unsigned int> &destinations)
317  {
318  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
319  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
320  (void)myid;
321  (void)n_procs;
322 
323  for (const unsigned int destination : destinations)
324  {
325  (void)destination;
326  AssertIndexRange(destination, n_procs);
327  }
328 
329 
330  // Have a little function that checks if destinations provided
331  // to the current process are unique. The way it does this is
332  // to create a sorted list of destinations and then walk through
333  // the list and look at successive elements -- if we find the
334  // same number twice, we know that the destinations were not
335  // unique
336  const bool my_destinations_are_unique = [destinations]() {
337  if (destinations.size() == 0)
338  return true;
339  else
340  {
341  std::vector<unsigned int> my_destinations = destinations;
342  std::sort(my_destinations.begin(), my_destinations.end());
343  return (std::adjacent_find(my_destinations.begin(),
344  my_destinations.end()) ==
345  my_destinations.end());
346  }
347  }();
348 
349  // If all processes report that they have unique destinations,
350  // then we can short-cut the process using a consensus algorithm (which
351  // is implemented only for the case of unique destinations):
352  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
353  1)
354  {
355  return ConsensusAlgorithms::nbx<char, char>(
356  destinations, {}, {}, {}, mpi_comm);
357  }
358 
359  // So we need to run a different algorithm, specifically one that
360  // requires more memory -- MPI_Reduce_scatter_block will require memory
361  // proportional to the number of processes involved; that function is
362  // available for MPI 2.2 or later:
363  static CollectiveMutex mutex;
364  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
365 
366  const int mpi_tag =
368 
369  // Calculate the number of messages to send to each process
370  std::vector<unsigned int> dest_vector(n_procs);
371  for (const auto &el : destinations)
372  ++dest_vector[el];
373 
374  // Find how many processes will send to this one
375  // by reducing with sum and then scattering the
376  // results over all processes
377  unsigned int n_recv_from;
378  const int ierr = MPI_Reduce_scatter_block(
379  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
380 
381  AssertThrowMPI(ierr);
382 
383  // Send myid to every process in `destinations` vector...
384  std::vector<MPI_Request> send_requests(destinations.size());
385  for (const auto &el : destinations)
386  {
387  const int ierr =
388  MPI_Isend(&myid,
389  1,
390  MPI_UNSIGNED,
391  el,
392  mpi_tag,
393  mpi_comm,
394  send_requests.data() + (&el - destinations.data()));
395  AssertThrowMPI(ierr);
396  }
397 
398 
399  // Receive `n_recv_from` times from the processes
400  // who communicate with this one. Store the obtained id's
401  // in the resulting vector
402  std::vector<unsigned int> origins(n_recv_from);
403  for (auto &el : origins)
404  {
405  const int ierr = MPI_Recv(&el,
406  1,
407  MPI_UNSIGNED,
408  MPI_ANY_SOURCE,
409  mpi_tag,
410  mpi_comm,
411  MPI_STATUS_IGNORE);
412  AssertThrowMPI(ierr);
413  }
414 
415  if (destinations.size() > 0)
416  {
417  const int ierr = MPI_Waitall(destinations.size(),
418  send_requests.data(),
419  MPI_STATUSES_IGNORE);
420  AssertThrowMPI(ierr);
421  }
422 
423  return origins;
424  }
425 
426 
427 
428  unsigned int
430  const MPI_Comm & mpi_comm,
431  const std::vector<unsigned int> &destinations)
432  {
433  // Have a little function that checks if destinations provided
434  // to the current process are unique:
435  const bool my_destinations_are_unique = [destinations]() {
436  std::vector<unsigned int> my_destinations = destinations;
437  const unsigned int n_destinations = my_destinations.size();
438  std::sort(my_destinations.begin(), my_destinations.end());
439  my_destinations.erase(std::unique(my_destinations.begin(),
440  my_destinations.end()),
441  my_destinations.end());
442  return (my_destinations.size() == n_destinations);
443  }();
444 
445  // If all processes report that they have unique destinations,
446  // then we can short-cut the process using a consensus algorithm:
447 
448  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
449  1)
450  {
451  return ConsensusAlgorithms::nbx<char, char>(
452  destinations, {}, {}, {}, mpi_comm)
453  .size();
454  }
455  else
456  {
457  const unsigned int n_procs =
459 
460  for (const unsigned int destination : destinations)
461  {
462  (void)destination;
463  AssertIndexRange(destination, n_procs);
464  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
465  ExcMessage(
466  "There is no point in communicating with ourselves."));
467  }
468 
469  // Calculate the number of messages to send to each process
470  std::vector<unsigned int> dest_vector(n_procs);
471  for (const auto &el : destinations)
472  ++dest_vector[el];
473 
474  // Find out how many processes will send to this one
475  // MPI_Reduce_scatter(_block) does exactly this
476  unsigned int n_recv_from = 0;
477 
478  const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
479  &n_recv_from,
480  1,
481  MPI_UNSIGNED,
482  MPI_SUM,
483  mpi_comm);
484 
485  AssertThrowMPI(ierr);
486 
487  return n_recv_from;
488  }
489  }
490 
491 
492 
493  namespace
494  {
495  // custom MIP_Op for calculate_collective_mpi_min_max_avg
496  void
497  max_reduce(const void *in_lhs_,
498  void * inout_rhs_,
499  int * len,
500  MPI_Datatype *)
501  {
502  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
503  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
504 
505  for (int i = 0; i < *len; ++i)
506  {
507  inout_rhs[i].sum += in_lhs[i].sum;
508  if (inout_rhs[i].min > in_lhs[i].min)
509  {
510  inout_rhs[i].min = in_lhs[i].min;
511  inout_rhs[i].min_index = in_lhs[i].min_index;
512  }
513  else if (inout_rhs[i].min == in_lhs[i].min)
514  {
515  // choose lower cpu index when tied to make operator commutative
516  if (inout_rhs[i].min_index > in_lhs[i].min_index)
517  inout_rhs[i].min_index = in_lhs[i].min_index;
518  }
519 
520  if (inout_rhs[i].max < in_lhs[i].max)
521  {
522  inout_rhs[i].max = in_lhs[i].max;
523  inout_rhs[i].max_index = in_lhs[i].max_index;
524  }
525  else if (inout_rhs[i].max == in_lhs[i].max)
526  {
527  // choose lower cpu index when tied to make operator commutative
528  if (inout_rhs[i].max_index > in_lhs[i].max_index)
529  inout_rhs[i].max_index = in_lhs[i].max_index;
530  }
531  }
532  }
533  } // namespace
534 
535 
536 
537  void
539  const ArrayView<MinMaxAvg> & result,
540  const MPI_Comm & mpi_communicator)
541  {
542  // If MPI was not started, we have a serial computation and cannot run
543  // the other MPI commands
544  if (job_supports_mpi() == false ||
545  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
546  {
547  for (unsigned int i = 0; i < my_values.size(); ++i)
548  {
549  result[i].sum = my_values[i];
550  result[i].avg = my_values[i];
551  result[i].min = my_values[i];
552  result[i].max = my_values[i];
553  result[i].min_index = 0;
554  result[i].max_index = 0;
555  }
556  return;
557  }
558 
559  /*
560  * A custom MPI datatype handle describing the memory layout of the
561  * MinMaxAvg struct. Initialized on first pass control reaches the
562  * static variable. So hopefully not initialized too early.
563  */
564  static MPI_Datatype type = []() {
565  MPI_Datatype type;
566 
567  int lengths[] = {3, 2, 1};
568 
569  MPI_Aint displacements[] = {0,
570  offsetof(MinMaxAvg, min_index),
571  offsetof(MinMaxAvg, avg)};
572 
573  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
574 
575  int ierr =
576  MPI_Type_create_struct(3, lengths, displacements, types, &type);
577  AssertThrowMPI(ierr);
578 
579  ierr = MPI_Type_commit(&type);
580  AssertThrowMPI(ierr);
581 
582  /* Ensure that we free the allocated datatype again at the end of
583  * the program run just before we call MPI_Finalize():*/
584  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
585  int ierr = MPI_Type_free(&type);
586  AssertThrowMPI(ierr);
587  });
588 
589  return type;
590  }();
591 
592  /*
593  * A custom MPI op handle for our max_reduce function.
594  * Initialized on first pass control reaches the static variable. So
595  * hopefully not initialized too early.
596  */
597  static MPI_Op op = []() {
598  MPI_Op op;
599 
600  int ierr =
601  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
602  static_cast<int>(true),
603  &op);
604  AssertThrowMPI(ierr);
605 
606  /* Ensure that we free the allocated op again at the end of the
607  * program run just before we call MPI_Finalize():*/
608  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
609  int ierr = MPI_Op_free(&op);
610  AssertThrowMPI(ierr);
611  });
612 
613  return op;
614  }();
615 
616  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
617  Utilities::MPI::max(my_values.size(), mpi_communicator));
618 
619  AssertDimension(my_values.size(), result.size());
620 
621  // To avoid uninitialized values on some MPI implementations, provide
622  // result with a default value already...
623  MinMaxAvg dummy = {0.,
625  std::numeric_limits<double>::lowest(),
626  0,
627  0,
628  0.};
629 
630  for (auto &i : result)
631  i = dummy;
632 
633  const unsigned int my_id =
634  ::Utilities::MPI::this_mpi_process(mpi_communicator);
635  const unsigned int numproc =
636  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
637 
638  std::vector<MinMaxAvg> in(my_values.size());
639 
640  for (unsigned int i = 0; i < my_values.size(); ++i)
641  {
642  in[i].sum = in[i].min = in[i].max = my_values[i];
643  in[i].min_index = in[i].max_index = my_id;
644  }
645 
646  int ierr = MPI_Allreduce(
647  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
648  AssertThrowMPI(ierr);
649 
650  for (auto &r : result)
651  r.avg = r.sum / numproc;
652  }
653 
654 
655 #else
656 
657  unsigned int
658  n_mpi_processes(const MPI_Comm &)
659  {
660  return 1;
661  }
662 
663 
664 
665  unsigned int
666  this_mpi_process(const MPI_Comm &)
667  {
668  return 0;
669  }
670 
671 
672 
673  const std::vector<unsigned int>
674  mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &)
675  {
676  return std::vector<unsigned int>{0};
677  }
678 
679 
680 
681  std::vector<IndexSet>
683  const MPI_Comm & /*comm*/,
684  const types::global_dof_index locally_owned_size)
685  {
686  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
687  }
688 
689  IndexSet
691  const MPI_Comm & /*comm*/,
692  const types::global_dof_index total_size)
693  {
694  return complete_index_set(total_size);
695  }
696 
697 
698 
699  MPI_Comm
700  duplicate_communicator(const MPI_Comm &mpi_communicator)
701  {
702  return mpi_communicator;
703  }
704 
705 
706 
707  void
708  free_communicator(MPI_Comm & /*mpi_communicator*/)
709  {}
710 
711 
712 
713  void
714  min_max_avg(const ArrayView<const double> &my_values,
715  const ArrayView<MinMaxAvg> & result,
716  const MPI_Comm &)
717  {
718  AssertDimension(my_values.size(), result.size());
719 
720  for (unsigned int i = 0; i < my_values.size(); ++i)
721  {
722  result[i].sum = my_values[i];
723  result[i].avg = my_values[i];
724  result[i].min = my_values[i];
725  result[i].max = my_values[i];
726  result[i].min_index = 0;
727  result[i].max_index = 0;
728  }
729  }
730 
731 #endif
732 
733  /* Force initialization of static struct: */
734  MPI_InitFinalize::Signals MPI_InitFinalize::signals =
735  MPI_InitFinalize::Signals();
736 
737 
739  char **& argv,
740  const unsigned int max_num_threads)
741  {
742  static bool constructor_has_already_run = false;
743  (void)constructor_has_already_run;
744  Assert(constructor_has_already_run == false,
745  ExcMessage("You can only create a single object of this class "
746  "in a program since it initializes the MPI system."));
747 
748 
749  int ierr = 0;
750 #ifdef DEAL_II_WITH_MPI
751  // if we have PETSc, we will initialize it and let it handle MPI.
752  // Otherwise, we will do it.
753  int MPI_has_been_started = 0;
754  ierr = MPI_Initialized(&MPI_has_been_started);
755  AssertThrowMPI(ierr);
756  AssertThrow(MPI_has_been_started == 0,
757  ExcMessage("MPI error. You can only start MPI once!"));
758 
759  int provided;
760  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
761  // we might use several threads but never call two MPI functions at the
762  // same time. For an explanation see on why we do this see
763  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
764  int wanted = MPI_THREAD_SERIALIZED;
765  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
766  AssertThrowMPI(ierr);
767 
768  // disable for now because at least some implementations always return
769  // MPI_THREAD_SINGLE.
770  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
771  // ExcMessage("MPI reports that we are not allowed to use multiple
772  // threads."));
773 #else
774  // make sure the compiler doesn't warn about these variables
775  (void)argc;
776  (void)argv;
777  (void)ierr;
778 #endif
779 
780  // Initialize Kokkos
781  {
782  // argv has argc+1 elements and the last one is a nullptr. For appending
783  // one element we thus create a new argv by copying the first argc
784  // elements, append the new option, and then a nullptr.
785  std::vector<char *> argv_new(argc + 2);
786  for (int i = 0; i < argc; ++i)
787  argv_new[i] = argv[i];
788  std::stringstream threads_flag;
789 #if KOKKOS_VERSION >= 30700
790  threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
791 #else
792  threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
793 #endif
794  std::string threads_flag_string = threads_flag.str();
795  argv_new[argc] = const_cast<char *>(threads_flag_string.c_str());
796  argv_new[argc + 1] = nullptr;
797  // The first argument in Kokkos::initialzie is of type int&. Hence, we
798  // need to define a new variable to pass to it (insted of using argc+1
799  // inline).
800  int argc_new = argc + 1;
801  Kokkos::initialize(argc_new, argv_new.data());
802  }
803 
804  // we are allowed to call MPI_Init ourselves and PETScInitialize will
805  // detect this. This allows us to use MPI_Init_thread instead.
806 #ifdef DEAL_II_WITH_PETSC
807 # ifdef DEAL_II_WITH_SLEPC
808  // Initialize SLEPc (with PETSc):
809  finalize_petscslepc = SlepcInitializeCalled ? false : true;
810  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
812 # else
813  // or just initialize PETSc alone:
814  finalize_petscslepc = PetscInitializeCalled ? false : true;
815  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
816  AssertThrow(ierr == 0, ExcPETScError(ierr));
817 # endif
818 
819  // Disable PETSc exception handling. This just prints a large wall
820  // of text that is not particularly helpful for what we do:
821  PetscPopSignalHandler();
822 #endif
823 
824  // Initialize zoltan
825 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
826  float version;
827  Zoltan_Initialize(argc, argv, &version);
828 #endif
829 
830 #ifdef DEAL_II_WITH_P4EST
831  // Initialize p4est and libsc components
832 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
833  // This feature is broken in version 2.0.0 for calls to
834  // MPI_Comm_create_group (see cburstedde/p4est#30).
835  // Disabling it leads to more verbose p4est error messages
836  // which should be fine.
837  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
838 # endif
839  p4est_init(nullptr, SC_LP_SILENT);
840 #endif
841 
842  constructor_has_already_run = true;
843 
844 
845  // Now also see how many threads we'd like to run
846  if (max_num_threads != numbers::invalid_unsigned_int)
847  {
848  // set maximum number of threads (also respecting the environment
849  // variable that the called function evaluates) based on what the
850  // user asked
851  MultithreadInfo::set_thread_limit(max_num_threads);
852  }
853  else
854  // user wants automatic choice
855  {
856 #ifdef DEAL_II_WITH_MPI
857  // we need to figure out how many MPI processes there are on the
858  // current node, as well as how many CPU cores we have. for the
859  // first task, check what get_hostname() returns and then do an
860  // allgather so each processor gets the answer
861  //
862  // in calculating the length of the string, don't forget the
863  // terminating \0 on C-style strings
864  const std::string hostname = Utilities::System::get_hostname();
865  const unsigned int max_hostname_size =
866  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
867  std::vector<char> hostname_array(max_hostname_size);
868  std::copy(hostname.c_str(),
869  hostname.c_str() + hostname.size() + 1,
870  hostname_array.begin());
871 
872  std::vector<char> all_hostnames(max_hostname_size *
873  MPI::n_mpi_processes(MPI_COMM_WORLD));
874  const int ierr = MPI_Allgather(hostname_array.data(),
875  max_hostname_size,
876  MPI_CHAR,
877  all_hostnames.data(),
878  max_hostname_size,
879  MPI_CHAR,
880  MPI_COMM_WORLD);
881  AssertThrowMPI(ierr);
882 
883  // search how often our own hostname appears and the how-manyth
884  // instance the current process represents
885  unsigned int n_local_processes = 0;
886  unsigned int nth_process_on_host = 0;
887  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
888  ++i)
889  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
890  hostname)
891  {
892  ++n_local_processes;
893  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
894  ++nth_process_on_host;
895  }
896  Assert(nth_process_on_host > 0, ExcInternalError());
897 
898 
899  // compute how many cores each process gets. if the number does not
900  // divide evenly, then we get one more core if we are among the
901  // first few processes
902  //
903  // if the number would be zero, round up to one since every process
904  // needs to have at least one thread
905  const unsigned int n_threads =
906  std::max(MultithreadInfo::n_cores() / n_local_processes +
907  (nth_process_on_host <=
908  MultithreadInfo::n_cores() % n_local_processes ?
909  1 :
910  0),
911  1U);
912 #else
913  const unsigned int n_threads = MultithreadInfo::n_cores();
914 #endif
915 
916  // finally set this number of threads
918  }
919 
920  // As a final step call the at_mpi_init() signal handler.
922  }
923 
924 
925 
926  void
928  {
929  // insert if it is not in the set already:
930  requests.insert(&request);
931  }
932 
933 
934 
935  void
937  {
938  Assert(
939  requests.find(&request) != requests.end(),
940  ExcMessage(
941  "You tried to call unregister_request() with an invalid request."));
942 
943  requests.erase(&request);
944  }
945 
946 
947 
948  std::set<MPI_Request *> MPI_InitFinalize::requests;
949 
950 
951 
953  {
954  // First, call the at_mpi_finalize() signal handler.
956 
957  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
958  // are no longer used at this point. this is relevant because the static
959  // object destructors run for these vectors at the end of the program
960  // would run after MPI_Finalize is called, leading to errors
961 
962 #ifdef DEAL_II_WITH_MPI
963  // Before exiting, wait for nonblocking communication to complete:
964  for (auto request : requests)
965  {
966  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
967  AssertThrowMPI(ierr);
968  }
969 
970  // Start with deal.II MPI vectors and delete vectors from the pools:
972  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
974  release_unused_memory();
976  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
978  release_unused_memory();
979 
980  // Next with Trilinos:
981 # ifdef DEAL_II_WITH_TRILINOS
983  TrilinosWrappers::MPI::Vector>::release_unused_memory();
985  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
986 # endif
987 #endif
988 
989 
990  // Now deal with PETSc (with or without MPI). Only delete the vectors if
991  // finalize hasn't been called yet, otherwise this will lead to errors.
992 #ifdef DEAL_II_WITH_PETSC
993  if (!PetscFinalizeCalled)
994  {
996  PETScWrappers::MPI::Vector>::release_unused_memory();
998  PETScWrappers::MPI::BlockVector>::release_unused_memory();
999  }
1000 # ifdef DEAL_II_WITH_SLEPC
1001  // and now end SLEPc with PETSc if we did so
1002  if (finalize_petscslepc)
1003  SlepcFinalize();
1004 # else
1005  // or just end PETSc if we did so
1006  if (finalize_petscslepc)
1007  PetscFinalize();
1008 # endif
1009 #endif
1010 
1011 #ifdef DEAL_II_WITH_P4EST
1012  // now end p4est and libsc
1013  // Note: p4est has no finalize function
1014  sc_finalize();
1015 #endif
1016 
1017 
1018  // Finalize Kokkos
1019  Kokkos::finalize();
1020 
1021  // only MPI_Finalize if we are running with MPI. We also need to do this
1022  // when running PETSc, because we initialize MPI ourselves before
1023  // calling PetscInitialize
1024 #ifdef DEAL_II_WITH_MPI
1025  if (job_supports_mpi() == true)
1026  {
1027 # if __cpp_lib_uncaught_exceptions >= 201411
1028  // std::uncaught_exception() is deprecated in c++17
1029  if (std::uncaught_exceptions() > 0)
1030 # else
1031  if (std::uncaught_exception() == true)
1032 # endif
1033  {
1034  // do not try to call MPI_Finalize to avoid a deadlock.
1035  }
1036  else
1037  {
1038  const int ierr = MPI_Finalize();
1039  (void)ierr;
1040  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1041  }
1042  }
1043 #endif
1044  }
1045 
1046 
1047 
1048  bool
1050  {
1051 #ifdef DEAL_II_WITH_MPI
1052  int MPI_has_been_started = 0;
1053  const int ierr = MPI_Initialized(&MPI_has_been_started);
1054  AssertThrowMPI(ierr);
1055 
1056  return (MPI_has_been_started > 0);
1057 #else
1058  return false;
1059 #endif
1060  }
1061 
1062 
1063 
1064  std::vector<unsigned int>
1065  compute_index_owner(const IndexSet &owned_indices,
1066  const IndexSet &indices_to_look_up,
1067  const MPI_Comm &comm)
1068  {
1069  Assert(owned_indices.size() == indices_to_look_up.size(),
1070  ExcMessage("IndexSets have to have the same sizes."));
1071 
1072  Assert(
1073  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1074  ExcMessage("IndexSets have to have the same size on all processes."));
1075 
1076  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1077 
1078  // Step 1: setup dictionary
1079  // The input owned_indices can be partitioned arbitrarily. In the
1080  // dictionary, the index set is statically repartitioned among the
1081  // processes again and extended with information with the actual owner
1082  // of that the index.
1084  owned_indices, indices_to_look_up, comm, owning_ranks);
1085 
1086  // Step 2: read dictionary
1087  // Communicate with the process who owns the index in the static
1088  // partition (i.e. in the dictionary). This process returns the actual
1089  // owner of the index.
1091  std::vector<
1092  std::pair<types::global_dof_index, types::global_dof_index>>,
1093  std::vector<unsigned int>>
1094  consensus_algorithm;
1095  consensus_algorithm.run(process, comm);
1096 
1097  return owning_ranks;
1098  }
1099 
1100 
1101 
1102  namespace internal
1103  {
1104  namespace CollectiveMutexImplementation
1105  {
1110  void
1112  {
1113 #ifdef DEAL_II_WITH_MPI
1114 # if __cpp_lib_uncaught_exceptions >= 201411
1115  // std::uncaught_exception() is deprecated in c++17
1116  if (std::uncaught_exceptions() != 0)
1117 # else
1118  if (std::uncaught_exception() == true)
1119 # endif
1120  {
1121  std::cerr
1122  << "---------------------------------------------------------\n"
1123  << "An exception was thrown inside a section of the program\n"
1124  << "guarded by a CollectiveMutex.\n"
1125  << "Because a CollectiveMutex guards critical communication\n"
1126  << "handling the exception would likely\n"
1127  << "deadlock because only the current process is aware of the\n"
1128  << "exception. To prevent this deadlock, the program will be\n"
1129  << "aborted.\n"
1130  << "---------------------------------------------------------"
1131  << std::endl;
1132 
1133  MPI_Abort(MPI_COMM_WORLD, 1);
1134  }
1135 #endif
1136  }
1137  } // namespace CollectiveMutexImplementation
1138  } // namespace internal
1139 
1140 
1141 
1143  : locked(false)
1144  , request(MPI_REQUEST_NULL)
1145  {
1147  }
1148 
1149 
1150 
1152  {
1153  // First check if this destructor is called during exception handling
1154  // if so, abort.
1156 
1157  Assert(
1158  !locked,
1159  ExcMessage(
1160  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1161 
1163  }
1164 
1165 
1166 
1167  void
1168  CollectiveMutex::lock(const MPI_Comm &comm)
1169  {
1170  (void)comm;
1171 
1172  Assert(
1173  !locked,
1174  ExcMessage(
1175  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1176 
1177 #ifdef DEAL_II_WITH_MPI
1178 
1179  // TODO: For now, we implement this mutex with a blocking barrier
1180  // in the lock and unlock. It needs to be tested, if we can move
1181  // to a nonblocking barrier (code disabled below).
1182 
1183  const int ierr = MPI_Barrier(comm);
1184  AssertThrowMPI(ierr);
1185 
1186 # if 0
1187  // wait for non-blocking barrier to finish. This is a noop the
1188  // first time we lock().
1189  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1190  AssertThrowMPI(ierr);
1191 # else
1192  // nothing to do as blocking barrier already completed
1193 # endif
1194 #endif
1195 
1196  locked = true;
1197  }
1198 
1199 
1200 
1201  void
1203  {
1204  (void)comm;
1205 
1206  // First check if this function is called during exception handling
1207  // if so, abort. This can happen if a ScopedLock is destroyed.
1209 
1210  Assert(
1211  locked,
1212  ExcMessage(
1213  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1214 
1215 #ifdef DEAL_II_WITH_MPI
1216 
1217  // TODO: For now, we implement this mutex with a blocking barrier
1218  // in the lock and unlock. It needs to be tested, if we can move
1219  // to a nonblocking barrier (code disabled below):
1220 # if 0
1221  const int ierr = MPI_Ibarrier(comm, &request);
1222  AssertThrowMPI(ierr);
1223 # else
1224  const int ierr = MPI_Barrier(comm);
1225  AssertThrowMPI(ierr);
1226 # endif
1227 #endif
1228 
1229  locked = false;
1230  }
1231 
1232 
1233 #ifndef DOXYGEN
1234  // explicit instantiations
1235 
1236  // booleans aren't in MPI_SCALARS
1237  template bool
1238  reduce(const bool &,
1239  const MPI_Comm &,
1240  const std::function<bool(const bool &, const bool &)> &,
1241  const unsigned int);
1242 
1243  template std::vector<bool>
1244  reduce(const std::vector<bool> &,
1245  const MPI_Comm &,
1246  const std::function<std::vector<bool>(const std::vector<bool> &,
1247  const std::vector<bool> &)> &,
1248  const unsigned int);
1249 
1250  template bool
1251  all_reduce(const bool &,
1252  const MPI_Comm &,
1253  const std::function<bool(const bool &, const bool &)> &);
1254 
1255  template std::vector<bool>
1256  all_reduce(
1257  const std::vector<bool> &,
1258  const MPI_Comm &,
1259  const std::function<std::vector<bool>(const std::vector<bool> &,
1260  const std::vector<bool> &)> &);
1261 
1262  // We need an explicit instantiation of this for the same reason as the
1263  // other types described in mpi.inst.in
1264  template void
1265  internal::all_reduce<bool>(const MPI_Op &,
1266  const ArrayView<const bool> &,
1267  const MPI_Comm &,
1268  const ArrayView<bool> &);
1269 
1270 
1271  template bool
1272  logical_or<bool>(const bool &, const MPI_Comm &);
1273 
1274 
1275  template void
1276  logical_or<bool>(const ArrayView<const bool> &,
1277  const MPI_Comm &,
1278  const ArrayView<bool> &);
1279 
1280 
1281  template std::vector<unsigned int>
1282  compute_set_union(const std::vector<unsigned int> &vec,
1283  const MPI_Comm & comm);
1284 
1285 
1286  template std::set<unsigned int>
1287  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1288 #endif
1289 
1290 #include "mpi.inst"
1291  } // end of namespace MPI
1292 } // end of namespace Utilities
1293 
value_type * data() const noexcept
Definition: array_view.h:553
std::size_t size() const
Definition: array_view.h:576
size_type size() const
Definition: index_set.h:1648
size_type n_elements() const
Definition: index_set.h:1796
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1684
types::global_dof_index size_type
Definition: index_set.h:77
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1202
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1168
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm &comm) override
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:936
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:738
static void register_request(MPI_Request &request)
Definition: mpi.cc:927
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1586
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1759
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1886
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1628
#define AssertIndexRange(index, range)
Definition: exceptions.h:1827
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1675
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1076
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
template const MPI_Datatype mpi_type_id_for_type< int >
Definition: mpi.cc:109
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:204
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1065
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition: mpi.cc:111
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:271
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition: mpi.cc:107
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition: mpi.cc:105
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:227
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition: mpi.cc:112
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:256
template const MPI_Datatype mpi_type_id_for_type< short >
Definition: mpi.cc:108
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:429
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
template const MPI_Datatype mpi_type_id_for_type< double >
Definition: mpi.cc:116
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:161
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:314
template const MPI_Datatype mpi_type_id_for_type< char >
Definition: mpi.cc:106
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition: mpi.cc:117
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition: mpi.cc:114
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition: mpi.cc:113
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1049
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:193
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:173
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:150
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:124
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition: mpi.cc:110
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
Definition: mpi.cc:115
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:214
std::string get_hostname()
Definition: utilities.cc:999
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:79
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:213
Definition: types.h:33
unsigned int global_dof_index
Definition: types.h:82
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
const MPI_Comm & comm