Reference documentation for deal.II version Git 71165045b9 2021-01-23 20:21:06 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
22 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/utilities.h>
25 
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
39 
40 # include <Epetra_MpiComm.h>
41 # endif
42 #endif
43 
44 #ifdef DEAL_II_WITH_PETSC
47 
48 # include <petscsys.h>
49 #endif
50 
51 #ifdef DEAL_II_WITH_SLEPC
53 
54 # include <slepcsys.h>
55 #endif
56 
57 #ifdef DEAL_II_WITH_P4EST
58 # include <p4est_bits.h>
59 #endif
60 
61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62 # include <zoltan_cpp.h>
63 #endif
64 
66 
67 
68 namespace Utilities
69 {
70  IndexSet
71  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
72  const unsigned int n_partitions,
73  const IndexSet::size_type total_size)
74  {
75  const unsigned int remain = total_size % n_partitions;
76 
77  const IndexSet::size_type min_size = total_size / n_partitions;
78 
80  min_size * my_partition_id + std::min(my_partition_id, remain);
81  const IndexSet::size_type end =
82  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
83  IndexSet result(total_size);
84  result.add_range(begin, end);
85  return result;
86  }
87 
88  namespace MPI
89  {
90  MinMaxAvg
91  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
92  {
93  MinMaxAvg result;
95  ArrayView<MinMaxAvg>(result),
96  mpi_communicator);
97 
98  return result;
99  }
100 
101 
102 
103  std::vector<MinMaxAvg>
104  min_max_avg(const std::vector<double> &my_values,
105  const MPI_Comm & mpi_communicator)
106  {
107  std::vector<MinMaxAvg> results(my_values.size());
108  min_max_avg(my_values, results, mpi_communicator);
109 
110  return results;
111  }
112 
113 
114 
115 #ifdef DEAL_II_WITH_MPI
116  unsigned int
117  n_mpi_processes(const MPI_Comm &mpi_communicator)
118  {
119  int n_jobs = 1;
120  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
121  AssertThrowMPI(ierr);
122 
123  return n_jobs;
124  }
125 
126 
127  unsigned int
128  this_mpi_process(const MPI_Comm &mpi_communicator)
129  {
130  int rank = 0;
131  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
132  AssertThrowMPI(ierr);
133 
134  return rank;
135  }
136 
137 
138 
139  const std::vector<unsigned int>
141  const MPI_Comm &comm_small)
142  {
143  if (Utilities::MPI::job_supports_mpi() == false)
144  return std::vector<unsigned int>{0};
145 
146  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
147  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
148 
149  std::vector<unsigned int> ranks(size);
150  MPI_Allgather(
151  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
152 
153  return ranks;
154  }
155 
156 
157 
158  MPI_Comm
159  duplicate_communicator(const MPI_Comm &mpi_communicator)
160  {
161  MPI_Comm new_communicator;
162  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
163  AssertThrowMPI(ierr);
164  return new_communicator;
165  }
166 
167 
168 
169  void
170  free_communicator(MPI_Comm &mpi_communicator)
171  {
172  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
173  const int ierr = MPI_Comm_free(&mpi_communicator);
174  AssertThrowMPI(ierr);
175  }
176 
177 
178 
179  int
181  const MPI_Group &group,
182  const int tag,
183  MPI_Comm * new_comm)
184  {
185 # if DEAL_II_MPI_VERSION_GTE(3, 0)
186  return MPI_Comm_create_group(comm, group, tag, new_comm);
187 # else
188  int rank;
189  int ierr = MPI_Comm_rank(comm, &rank);
190  AssertThrowMPI(ierr);
191 
192  int grp_rank;
193  ierr = MPI_Group_rank(group, &grp_rank);
194  AssertThrowMPI(ierr);
195  if (grp_rank == MPI_UNDEFINED)
196  {
197  *new_comm = MPI_COMM_NULL;
198  return MPI_SUCCESS;
199  }
200 
201  int grp_size;
202  ierr = MPI_Group_size(group, &grp_size);
203  AssertThrowMPI(ierr);
204 
205  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
206  AssertThrowMPI(ierr);
207 
208  MPI_Group parent_grp;
209  ierr = MPI_Comm_group(comm, &parent_grp);
210  AssertThrowMPI(ierr);
211 
212  std::vector<int> pids(grp_size);
213  std::vector<int> grp_pids(grp_size);
214  std::iota(grp_pids.begin(), grp_pids.end(), 0);
215  ierr = MPI_Group_translate_ranks(
216  group, grp_size, grp_pids.data(), parent_grp, pids.data());
217  AssertThrowMPI(ierr);
218  ierr = MPI_Group_free(&parent_grp);
219  AssertThrowMPI(ierr);
220 
221  MPI_Comm comm_old = *new_comm;
222  MPI_Comm ic;
223  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
224  {
225  const int gid = grp_rank / merge_sz;
226  comm_old = *new_comm;
227  if (gid % 2 == 0)
228  {
229  if ((gid + 1) * merge_sz < grp_size)
230  {
231  ierr = (MPI_Intercomm_create(
232  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
233  AssertThrowMPI(ierr);
234  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
235  AssertThrowMPI(ierr);
236  }
237  }
238  else
239  {
240  ierr = MPI_Intercomm_create(
241  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
242  AssertThrowMPI(ierr);
243  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
244  AssertThrowMPI(ierr);
245  }
246  if (*new_comm != comm_old)
247  {
248  ierr = MPI_Comm_free(&ic);
249  AssertThrowMPI(ierr);
250  ierr = MPI_Comm_free(&comm_old);
251  AssertThrowMPI(ierr);
252  }
253  }
254 
255  return MPI_SUCCESS;
256 # endif
257  }
258 
259 
260 
261  std::vector<IndexSet>
263  const IndexSet::size_type local_size)
264  {
265  const unsigned int n_proc = n_mpi_processes(comm);
266  const std::vector<IndexSet::size_type> sizes =
267  all_gather(comm, local_size);
268  const auto total_size =
269  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
270 
271  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
272 
274  for (unsigned int i = 0; i < n_proc; ++i)
275  {
276  res[i].add_range(begin, begin + sizes[i]);
277  begin = begin + sizes[i];
278  }
279 
280  return res;
281  }
282 
283  IndexSet
285  const IndexSet::size_type total_size)
286  {
287  const unsigned int this_proc = this_mpi_process(comm);
288  const unsigned int n_proc = n_mpi_processes(comm);
289 
291  n_proc,
292  total_size);
293  }
294 
295 
296 
302  : public ConsensusAlgorithms::Process<unsigned int, unsigned int>
303  {
304  public:
305  ConsensusAlgorithmsProcessTargets(const std::vector<unsigned int> &target)
306  : target(target)
307  {}
308 
309  using T1 = unsigned int;
310  using T2 = unsigned int;
311 
312  virtual void
313  answer_request(const unsigned int other_rank,
314  const std::vector<T1> &,
315  std::vector<T2> &) override
316  {
317  this->sources.push_back(other_rank);
318  }
319 
325  virtual std::vector<unsigned int>
326  compute_targets() override
327  {
328  return target;
329  }
330 
336  std::vector<unsigned int>
338  {
339  std::sort(sources.begin(), sources.end());
340  return sources;
341  }
342 
343  private:
347  const std::vector<unsigned int> &target;
348 
352  std::vector<unsigned int> sources;
353  };
354 
355 
356 
357  std::vector<unsigned int>
359  const MPI_Comm & mpi_comm,
360  const std::vector<unsigned int> &destinations)
361  {
362  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
363  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
364  (void)myid;
365  (void)n_procs;
366 
367  for (const unsigned int destination : destinations)
368  {
369  (void)destination;
370  AssertIndexRange(destination, n_procs);
371  Assert(destination != myid,
372  ExcMessage(
373  "There is no point in communicating with ourselves."));
374  }
375 
376 # if DEAL_II_MPI_VERSION_GTE(3, 0)
377 
378  ConsensusAlgorithmsProcessTargets process(destinations);
381  consensus_algorithm(process, mpi_comm);
382  consensus_algorithm.run();
383  return process.get_result();
384 
385 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
386 
387  static CollectiveMutex mutex;
388  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
389 
390  const int mpi_tag =
392 
393  // Calculate the number of messages to send to each process
394  std::vector<unsigned int> dest_vector(n_procs);
395  for (const auto &el : destinations)
396  ++dest_vector[el];
397 
398  // Find how many processes will send to this one
399  // by reducing with sum and then scattering the
400  // results over all processes
401  unsigned int n_recv_from;
402  const int ierr = MPI_Reduce_scatter_block(
403  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
404 
405  AssertThrowMPI(ierr);
406 
407  // Send myid to every process in `destinations` vector...
408  std::vector<MPI_Request> send_requests(destinations.size());
409  for (const auto &el : destinations)
410  {
411  const int ierr =
412  MPI_Isend(&myid,
413  1,
414  MPI_UNSIGNED,
415  el,
416  mpi_tag,
417  mpi_comm,
418  send_requests.data() + (&el - destinations.data()));
419  AssertThrowMPI(ierr);
420  }
421 
422 
423  // Receive `n_recv_from` times from the processes
424  // who communicate with this one. Store the obtained id's
425  // in the resulting vector
426  std::vector<unsigned int> origins(n_recv_from);
427  for (auto &el : origins)
428  {
429  const int ierr = MPI_Recv(&el,
430  1,
431  MPI_UNSIGNED,
432  MPI_ANY_SOURCE,
433  mpi_tag,
434  mpi_comm,
435  MPI_STATUS_IGNORE);
436  AssertThrowMPI(ierr);
437  }
438 
439  if (destinations.size() > 0)
440  {
441  const int ierr = MPI_Waitall(destinations.size(),
442  send_requests.data(),
443  MPI_STATUSES_IGNORE);
444  AssertThrowMPI(ierr);
445  }
446 
447  return origins;
448 # else
449  // let all processors communicate the maximal number of destinations
450  // they have
451  const unsigned int max_n_destinations =
452  Utilities::MPI::max(destinations.size(), mpi_comm);
453 
454  if (max_n_destinations == 0)
455  // all processes have nothing to send/receive:
456  return std::vector<unsigned int>();
457 
458  // now that we know the number of data packets every processor wants to
459  // send, set up a buffer with the maximal size and copy our destinations
460  // in there, padded with -1's
461  std::vector<unsigned int> my_destinations(max_n_destinations,
463  std::copy(destinations.begin(),
464  destinations.end(),
465  my_destinations.begin());
466 
467  // now exchange these (we could communicate less data if we used
468  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
469  // processors in this case, which is more expensive than the reduction
470  // operation above in MPI_Allreduce)
471  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
472  const int ierr = MPI_Allgather(my_destinations.data(),
473  max_n_destinations,
474  MPI_UNSIGNED,
475  all_destinations.data(),
476  max_n_destinations,
477  MPI_UNSIGNED,
478  mpi_comm);
479  AssertThrowMPI(ierr);
480 
481  // now we know who is going to communicate with whom. collect who is
482  // going to communicate with us!
483  std::vector<unsigned int> origins;
484  for (unsigned int i = 0; i < n_procs; ++i)
485  for (unsigned int j = 0; j < max_n_destinations; ++j)
486  if (all_destinations[i * max_n_destinations + j] == myid)
487  origins.push_back(i);
488  else if (all_destinations[i * max_n_destinations + j] ==
490  break;
491 
492  return origins;
493 # endif
494  }
495 
496 
497 
498  unsigned int
500  const MPI_Comm & mpi_comm,
501  const std::vector<unsigned int> &destinations)
502  {
503  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
504 
505  for (const unsigned int destination : destinations)
506  {
507  (void)destination;
508  AssertIndexRange(destination, n_procs);
509  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
510  ExcMessage(
511  "There is no point in communicating with ourselves."));
512  }
513 
514  // Calculate the number of messages to send to each process
515  std::vector<unsigned int> dest_vector(n_procs);
516  for (const auto &el : destinations)
517  ++dest_vector[el];
518 
519 # if DEAL_II_MPI_VERSION_GTE(2, 2)
520  // Find out how many processes will send to this one
521  // MPI_Reduce_scatter(_block) does exactly this
522  unsigned int n_recv_from = 0;
523 
524  const int ierr = MPI_Reduce_scatter_block(
525  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
526 
527  AssertThrowMPI(ierr);
528 
529  return n_recv_from;
530 # else
531  // Find out how many processes will send to this one
532  // by reducing with sum and then scattering the
533  // results over all processes
534  std::vector<unsigned int> buffer(dest_vector.size());
535  unsigned int n_recv_from = 0;
536 
537  MPI_Reduce(dest_vector.data(),
538  buffer.data(),
539  dest_vector.size(),
540  MPI_UNSIGNED,
541  MPI_SUM,
542  0,
543  mpi_comm);
544  MPI_Scatter(buffer.data(),
545  1,
546  MPI_UNSIGNED,
547  &n_recv_from,
548  1,
549  MPI_UNSIGNED,
550  0,
551  mpi_comm);
552 
553  return n_recv_from;
554 # endif
555  }
556 
557 
558 
559  namespace
560  {
561  // custom MIP_Op for calculate_collective_mpi_min_max_avg
562  void
563  max_reduce(const void *in_lhs_,
564  void * inout_rhs_,
565  int * len,
566  MPI_Datatype *)
567  {
568  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
569  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
570 
571  for (int i = 0; i < *len; i++)
572  {
573  inout_rhs[i].sum += in_lhs[i].sum;
574  if (inout_rhs[i].min > in_lhs[i].min)
575  {
576  inout_rhs[i].min = in_lhs[i].min;
577  inout_rhs[i].min_index = in_lhs[i].min_index;
578  }
579  else if (inout_rhs[i].min == in_lhs[i].min)
580  {
581  // choose lower cpu index when tied to make operator commutative
582  if (inout_rhs[i].min_index > in_lhs[i].min_index)
583  inout_rhs[i].min_index = in_lhs[i].min_index;
584  }
585 
586  if (inout_rhs[i].max < in_lhs[i].max)
587  {
588  inout_rhs[i].max = in_lhs[i].max;
589  inout_rhs[i].max_index = in_lhs[i].max_index;
590  }
591  else if (inout_rhs[i].max == in_lhs[i].max)
592  {
593  // choose lower cpu index when tied to make operator commutative
594  if (inout_rhs[i].max_index > in_lhs[i].max_index)
595  inout_rhs[i].max_index = in_lhs[i].max_index;
596  }
597  }
598  }
599  } // namespace
600 
601 
602 
603  void
605  const ArrayView<MinMaxAvg> & result,
606  const MPI_Comm & mpi_communicator)
607  {
608  // If MPI was not started, we have a serial computation and cannot run
609  // the other MPI commands
610  if (job_supports_mpi() == false ||
611  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
612  {
613  for (unsigned int i = 0; i < my_values.size(); i++)
614  {
615  result[i].sum = my_values[i];
616  result[i].avg = my_values[i];
617  result[i].min = my_values[i];
618  result[i].max = my_values[i];
619  result[i].min_index = 0;
620  result[i].max_index = 0;
621  }
622  return;
623  }
624 
625  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
626  Utilities::MPI::max(my_values.size(), mpi_communicator));
627 
628  AssertDimension(my_values.size(), result.size());
629 
630 
631 
632  // To avoid uninitialized values on some MPI implementations, provide
633  // result with a default value already...
634  MinMaxAvg dummy = {0.,
637  0,
638  0,
639  0.};
640 
641  for (auto &i : result)
642  i = dummy;
643 
644  const unsigned int my_id =
645  ::Utilities::MPI::this_mpi_process(mpi_communicator);
646  const unsigned int numproc =
647  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
648 
649  MPI_Op op;
650  int ierr =
651  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
652  true,
653  &op);
654  AssertThrowMPI(ierr);
655 
656  std::vector<MinMaxAvg> in(my_values.size());
657 
658  for (unsigned int i = 0; i < my_values.size(); i++)
659  {
660  in[i].sum = in[i].min = in[i].max = my_values[i];
661  in[i].min_index = in[i].max_index = my_id;
662  }
663 
664  MPI_Datatype type;
665  int lengths[] = {3, 2, 1};
666  MPI_Aint displacements[] = {0,
667  offsetof(MinMaxAvg, min_index),
668  offsetof(MinMaxAvg, avg)};
669  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
670 
671  ierr = MPI_Type_create_struct(3, lengths, displacements, types, &type);
672  AssertThrowMPI(ierr);
673 
674  ierr = MPI_Type_commit(&type);
675  AssertThrowMPI(ierr);
676  ierr = MPI_Allreduce(
677  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
678  AssertThrowMPI(ierr);
679 
680  ierr = MPI_Type_free(&type);
681  AssertThrowMPI(ierr);
682 
683  ierr = MPI_Op_free(&op);
684  AssertThrowMPI(ierr);
685 
686  for (auto &r : result)
687  r.avg = r.sum / numproc;
688  }
689 
690 
691 #else
692 
693  unsigned int
694  n_mpi_processes(const MPI_Comm &)
695  {
696  return 1;
697  }
698 
699 
700 
701  unsigned int
702  this_mpi_process(const MPI_Comm &)
703  {
704  return 0;
705  }
706 
707 
708 
709  const std::vector<unsigned int>
711  {
712  return std::vector<unsigned int>{0};
713  }
714 
715 
716 
717  std::vector<IndexSet>
718  create_ascending_partitioning(const MPI_Comm & /*comm*/,
719  const IndexSet::size_type local_size)
720  {
721  return std::vector<IndexSet>(1, complete_index_set(local_size));
722  }
723 
724  IndexSet
726  const IndexSet::size_type total_size)
727  {
728  return complete_index_set(total_size);
729  }
730 
731 
732 
733  MPI_Comm
734  duplicate_communicator(const MPI_Comm &mpi_communicator)
735  {
736  return mpi_communicator;
737  }
738 
739 
740 
741  void
742  free_communicator(MPI_Comm & /*mpi_communicator*/)
743  {}
744 
745 
746 
747  void
748  min_max_avg(const ArrayView<const double> &my_values,
749  const ArrayView<MinMaxAvg> & result,
750  const MPI_Comm &)
751  {
752  AssertDimension(my_values.size(), result.size());
753 
754  for (unsigned int i = 0; i < my_values.size(); i++)
755  {
756  result[i].sum = my_values[i];
757  result[i].avg = my_values[i];
758  result[i].min = my_values[i];
759  result[i].max = my_values[i];
760  result[i].min_index = 0;
761  result[i].max_index = 0;
762  }
763  }
764 
765 #endif
766 
767 
768 
770  char **& argv,
771  const unsigned int max_num_threads)
772  {
773  static bool constructor_has_already_run = false;
774  (void)constructor_has_already_run;
775  Assert(constructor_has_already_run == false,
776  ExcMessage("You can only create a single object of this class "
777  "in a program since it initializes the MPI system."));
778 
779 
780  int ierr = 0;
781 #ifdef DEAL_II_WITH_MPI
782  // if we have PETSc, we will initialize it and let it handle MPI.
783  // Otherwise, we will do it.
784  int MPI_has_been_started = 0;
785  ierr = MPI_Initialized(&MPI_has_been_started);
786  AssertThrowMPI(ierr);
787  AssertThrow(MPI_has_been_started == 0,
788  ExcMessage("MPI error. You can only start MPI once!"));
789 
790  int provided;
791  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
792  // we might use several threads but never call two MPI functions at the
793  // same time. For an explanation see on why we do this see
794  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
795  int wanted = MPI_THREAD_SERIALIZED;
796  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
797  AssertThrowMPI(ierr);
798 
799  // disable for now because at least some implementations always return
800  // MPI_THREAD_SINGLE.
801  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
802  // ExcMessage("MPI reports that we are not allowed to use multiple
803  // threads."));
804 #else
805  // make sure the compiler doesn't warn about these variables
806  (void)argc;
807  (void)argv;
808  (void)ierr;
809 #endif
810 
811  // we are allowed to call MPI_Init ourselves and PETScInitialize will
812  // detect this. This allows us to use MPI_Init_thread instead.
813 #ifdef DEAL_II_WITH_PETSC
814 # ifdef DEAL_II_WITH_SLEPC
815  // Initialize SLEPc (with PETSc):
816  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
818 # else
819  // or just initialize PETSc alone:
820  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
821  AssertThrow(ierr == 0, ExcPETScError(ierr));
822 # endif
823 
824  // Disable PETSc exception handling. This just prints a large wall
825  // of text that is not particularly helpful for what we do:
826  PetscPopSignalHandler();
827 #endif
828 
829  // Initialize zoltan
830 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
831  float version;
832  Zoltan_Initialize(argc, argv, &version);
833 #endif
834 
835 #ifdef DEAL_II_WITH_P4EST
836  // Initialize p4est and libsc components
837 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
838 # else
839  // This feature is broken in version 2.0.0 for calls to
840  // MPI_Comm_create_group (see cburstedde/p4est#30).
841  // Disabling it leads to more verbose p4est error messages
842  // which should be fine.
843  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
844 # endif
845  p4est_init(nullptr, SC_LP_SILENT);
846 #endif
847 
848  constructor_has_already_run = true;
849 
850 
851  // Now also see how many threads we'd like to run
852  if (max_num_threads != numbers::invalid_unsigned_int)
853  {
854  // set maximum number of threads (also respecting the environment
855  // variable that the called function evaluates) based on what the
856  // user asked
857  MultithreadInfo::set_thread_limit(max_num_threads);
858  }
859  else
860  // user wants automatic choice
861  {
862 #ifdef DEAL_II_WITH_MPI
863  // we need to figure out how many MPI processes there are on the
864  // current node, as well as how many CPU cores we have. for the
865  // first task, check what get_hostname() returns and then do an
866  // allgather so each processor gets the answer
867  //
868  // in calculating the length of the string, don't forget the
869  // terminating \0 on C-style strings
870  const std::string hostname = Utilities::System::get_hostname();
871  const unsigned int max_hostname_size =
872  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
873  std::vector<char> hostname_array(max_hostname_size);
874  std::copy(hostname.c_str(),
875  hostname.c_str() + hostname.size() + 1,
876  hostname_array.begin());
877 
878  std::vector<char> all_hostnames(max_hostname_size *
879  MPI::n_mpi_processes(MPI_COMM_WORLD));
880  const int ierr = MPI_Allgather(hostname_array.data(),
881  max_hostname_size,
882  MPI_CHAR,
883  all_hostnames.data(),
884  max_hostname_size,
885  MPI_CHAR,
886  MPI_COMM_WORLD);
887  AssertThrowMPI(ierr);
888 
889  // search how often our own hostname appears and the how-manyth
890  // instance the current process represents
891  unsigned int n_local_processes = 0;
892  unsigned int nth_process_on_host = 0;
893  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
894  ++i)
895  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
896  hostname)
897  {
898  ++n_local_processes;
899  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
900  ++nth_process_on_host;
901  }
902  Assert(nth_process_on_host > 0, ExcInternalError());
903 
904 
905  // compute how many cores each process gets. if the number does not
906  // divide evenly, then we get one more core if we are among the
907  // first few processes
908  //
909  // if the number would be zero, round up to one since every process
910  // needs to have at least one thread
911  const unsigned int n_threads =
912  std::max(MultithreadInfo::n_cores() / n_local_processes +
913  (nth_process_on_host <=
914  MultithreadInfo::n_cores() % n_local_processes ?
915  1 :
916  0),
917  1U);
918 #else
919  const unsigned int n_threads = MultithreadInfo::n_cores();
920 #endif
921 
922  // finally set this number of threads
924  }
925  }
926 
927 
928 
929  void
931  {
932  // insert if it is not in the set already:
933  requests.insert(&request);
934  }
935 
936 
937 
938  void
940  {
941  Assert(
942  requests.find(&request) != requests.end(),
943  ExcMessage(
944  "You tried to call unregister_request() with an invalid request."));
945 
946  requests.erase(&request);
947  }
948 
949 
950 
951  std::set<MPI_Request *> MPI_InitFinalize::requests;
952 
953 
954 
956  {
957  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
958  // are no longer used at this point. this is relevant because the static
959  // object destructors run for these vectors at the end of the program
960  // would run after MPI_Finalize is called, leading to errors
961 
962 #ifdef DEAL_II_WITH_MPI
963  // Before exiting, wait for nonblocking communication to complete:
964  for (auto request : requests)
965  {
966  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
967  AssertThrowMPI(ierr);
968  }
969 
970  // Start with deal.II MPI vectors and delete vectors from the pools:
972  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
974  release_unused_memory();
976  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
978  release_unused_memory();
979 
980  // Next with Trilinos:
981 # if defined(DEAL_II_WITH_TRILINOS)
983  TrilinosWrappers::MPI::Vector>::release_unused_memory();
985  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
986 # endif
987 #endif
988 
989 
990  // Now deal with PETSc (with or without MPI). Only delete the vectors if
991  // finalize hasn't been called yet, otherwise this will lead to errors.
992 #ifdef DEAL_II_WITH_PETSC
993  if ((PetscInitializeCalled == PETSC_TRUE) &&
994  (PetscFinalizeCalled == PETSC_FALSE))
995  {
997  PETScWrappers::MPI::Vector>::release_unused_memory();
999  PETScWrappers::MPI::BlockVector>::release_unused_memory();
1000 
1001 # ifdef DEAL_II_WITH_SLEPC
1002  // and now end SLEPc (with PETSc)
1003  SlepcFinalize();
1004 # else
1005  // or just end PETSc.
1006  PetscFinalize();
1007 # endif
1008  }
1009 #endif
1010 
1011 // There is a similar issue with CUDA: The destructor of static objects might
1012 // run after the CUDA driver is unloaded. Hence, also release all memory
1013 // related to CUDA vectors.
1014 #ifdef DEAL_II_WITH_CUDA
1017  release_unused_memory();
1020  release_unused_memory();
1021 #endif
1022 
1023 #ifdef DEAL_II_WITH_P4EST
1024  // now end p4est and libsc
1025  // Note: p4est has no finalize function
1026  sc_finalize();
1027 #endif
1028 
1029 
1030  // only MPI_Finalize if we are running with MPI. We also need to do this
1031  // when running PETSc, because we initialize MPI ourselves before
1032  // calling PetscInitialize
1033 #ifdef DEAL_II_WITH_MPI
1034  if (job_supports_mpi() == true)
1035  {
1036 # if __cpp_lib_uncaught_exceptions >= 201411
1037  // std::uncaught_exception() is deprecated in c++17
1038  if (std::uncaught_exceptions() > 0)
1039 # else
1040  if (std::uncaught_exception() == true)
1041 # endif
1042  {
1043  // do not try to call MPI_Finalize to avoid a deadlock.
1044  }
1045  else
1046  {
1047  const int ierr = MPI_Finalize();
1048  (void)ierr;
1049  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1050  }
1051  }
1052 #endif
1053  }
1054 
1055 
1056 
1057  bool
1059  {
1060 #ifdef DEAL_II_WITH_MPI
1061  int MPI_has_been_started = 0;
1062  const int ierr = MPI_Initialized(&MPI_has_been_started);
1063  AssertThrowMPI(ierr);
1064 
1065  return (MPI_has_been_started > 0);
1066 #else
1067  return false;
1068 #endif
1069  }
1070 
1071 
1072 
1073  std::vector<unsigned int>
1074  compute_index_owner(const IndexSet &owned_indices,
1075  const IndexSet &indices_to_look_up,
1076  const MPI_Comm &comm)
1077  {
1078  Assert(owned_indices.size() == indices_to_look_up.size(),
1079  ExcMessage("IndexSets have to have the same sizes."));
1080 
1081  Assert(
1082  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1083  ExcMessage("IndexSets have to have the same size on all processes."));
1084 
1085  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1086 
1087  // Step 1: setup dictionary
1088  // The input owned_indices can be partitioned arbitrarily. In the
1089  // dictionary, the index set is statically repartitioned among the
1090  // processes again and extended with information with the actual owner
1091  // of that the index.
1093  owned_indices, indices_to_look_up, comm, owning_ranks);
1094 
1095  // Step 2: read dictionary
1096  // Communicate with the process who owns the index in the static
1097  // partition (i.e. in the dictionary). This process returns the actual
1098  // owner of the index.
1100  std::pair<types::global_dof_index, types::global_dof_index>,
1101  unsigned int>
1102  consensus_algorithm(process, comm);
1103  consensus_algorithm.run();
1104 
1105  return owning_ranks;
1106  }
1107 
1108 
1109 
1111  : locked(false)
1112  , request(MPI_REQUEST_NULL)
1113  {
1115  }
1116 
1117 
1118 
1120  {
1121  Assert(
1122  !locked,
1123  ExcMessage(
1124  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1125 
1127  }
1128 
1129 
1130 
1131  void
1133  {
1134  (void)comm;
1135 
1136  Assert(
1137  !locked,
1138  ExcMessage(
1139  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1140 
1141 #ifdef DEAL_II_WITH_MPI
1142 
1143  // TODO: For now, we implement this mutex with a blocking barrier
1144  // in the lock and unlock. It needs to be tested, if we can move
1145  // to a nonblocking barrier (code disabled below).
1146 
1147  const int ierr = MPI_Barrier(comm);
1148  AssertThrowMPI(ierr);
1149 
1150 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1151  // wait for non-blocking barrier to finish. This is a noop the
1152  // first time we lock().
1153  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1154  AssertThrowMPI(ierr);
1155 # else
1156  // nothing to do as blocking barrier already completed
1157 # endif
1158 #endif
1159 
1160  locked = true;
1161  }
1162 
1163 
1164 
1165  void
1167  {
1168  (void)comm;
1169 
1170  Assert(
1171  locked,
1172  ExcMessage(
1173  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1174 
1175 #ifdef DEAL_II_WITH_MPI
1176 
1177  // TODO: For now, we implement this mutex with a blocking barrier
1178  // in the lock and unlock. It needs to be tested, if we can move
1179  // to a nonblocking barrier (code disabled below):
1180 
1181 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1182  const int ierr = MPI_Ibarrier(comm, &request);
1183  AssertThrowMPI(ierr);
1184 # else
1185  const int ierr = MPI_Barrier(comm);
1186  AssertThrowMPI(ierr);
1187 # endif
1188 #endif
1189 
1190  locked = false;
1191  }
1192 
1193 
1194  template std::vector<unsigned int>
1195  compute_set_union(const std::vector<unsigned int> &vec,
1196  const MPI_Comm & comm);
1197 
1198 
1199  template std::set<unsigned int>
1200  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1201 
1202 #include "mpi.inst"
1203  } // end of namespace MPI
1204 } // end of namespace Utilities
1205 
int gid(const Epetra_BlockMap &map, int i)
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:939
static const unsigned int invalid_unsigned_int
Definition: types.h:196
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1529
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1623
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:326
std::vector< unsigned int > sources
Definition: mpi.cc:352
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:284
****code ** MPI_Finalize()
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:499
#define AssertIndexRange(index, range)
Definition: exceptions.h:1691
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:769
static const char U
#define AssertThrow(cond, exc)
Definition: exceptions.h:1576
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
types::global_dof_index size_type
Definition: index_set.h:83
std::size_t size() const
Definition: array_view.h:542
size_type size() const
Definition: index_set.h:1634
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:170
static ::ExceptionBase & ExcMessage(std::string arg1)
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:313
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1466
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1132
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1166
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:380
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:180
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
std::vector< unsigned int > get_result()
Definition: mpi.cc:337
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1074
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1673
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1013
ConsensusAlgorithmsProcessTargets(const std::vector< unsigned int > &target)
Definition: mpi.cc:305
Definition: cuda.h:32
*braid_SplitCommworld & comm
std::string get_hostname()
Definition: utilities.cc:1004
static void register_request(MPI_Request &request)
Definition: mpi.cc:930
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1747
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:159
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:379
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
static std::set< MPI_Request * > requests
Definition: mpi.h:934
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type local_size)
Definition: mpi.cc:262
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:358
const std::vector< unsigned int > & target
Definition: mpi.cc:347
bool job_supports_mpi()
Definition: mpi.cc:1058
size_type n_elements() const
Definition: index_set.h:1832
void copy(const T *begin, const T *end, U *dest)
unsigned int min_index
Definition: mpi.h:723
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:733
static ::ExceptionBase & ExcInternalError()