Reference documentation for deal.II version GIT relicensing-233-g802318d791 2024-03-28 20:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
19#include <deal.II/base/mpi.templates.h>
25
31
32#include <boost/serialization/utility.hpp>
33
34#include <Kokkos_Core.hpp>
35
36#include <iostream>
37#include <limits>
38#include <numeric>
39#include <set>
40#include <vector>
41
42#ifdef DEAL_II_WITH_TRILINOS
43# ifdef DEAL_II_WITH_MPI
46
47# include <Epetra_MpiComm.h>
48# endif
49#endif
50
51#ifdef DEAL_II_WITH_PETSC
54
55# include <petscsys.h>
56#endif
57
58#ifdef DEAL_II_WITH_SLEPC
60
61# include <slepcsys.h>
62#endif
63
64#ifdef DEAL_II_WITH_P4EST
65# include <p4est_bits.h>
66#endif
67
68#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
69# include <zoltan_cpp.h>
70#endif
71
73
74
75namespace Utilities
76{
79 const unsigned int my_partition_id,
80 const unsigned int n_partitions,
81 const types::global_dof_index total_size)
82 {
83 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
84 "IndexSet::size_type must match types::global_dof_index for "
85 "using this function");
86 const unsigned int remain = total_size % n_partitions;
87
88 const IndexSet::size_type min_size = total_size / n_partitions;
89
90 const IndexSet::size_type begin =
91 min_size * my_partition_id + std::min(my_partition_id, remain);
92 const IndexSet::size_type end =
93 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
94 IndexSet result(total_size);
95 result.add_range(begin, end);
96 return result;
97 }
98
99 namespace MPI
100 {
101 MinMaxAvg
102 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
103 {
104 MinMaxAvg result;
106 ArrayView<MinMaxAvg>(result),
107 mpi_communicator);
108
109 return result;
110 }
111
112
113
114 std::vector<MinMaxAvg>
115 min_max_avg(const std::vector<double> &my_values,
116 const MPI_Comm mpi_communicator)
117 {
118 std::vector<MinMaxAvg> results(my_values.size());
119 min_max_avg(my_values, results, mpi_communicator);
120
121 return results;
122 }
123
124
125
126#ifdef DEAL_II_WITH_MPI
127 unsigned int
128 n_mpi_processes(const MPI_Comm mpi_communicator)
129 {
130 if (job_supports_mpi())
131 {
132 int n_jobs = 1;
133 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
134 AssertThrowMPI(ierr);
135 return n_jobs;
136 }
137 else
138 return 1;
139 }
140
141
142 unsigned int
143 this_mpi_process(const MPI_Comm mpi_communicator)
144 {
145 if (job_supports_mpi())
146 {
147 int rank = 0;
148 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
149 AssertThrowMPI(ierr);
150 return rank;
151 }
152 else
153 return 0;
154 }
155
156
157
158 std::vector<unsigned int>
160 const MPI_Comm comm_small)
161 {
163 return std::vector<unsigned int>{0};
164
165 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
166 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
167
168 std::vector<unsigned int> ranks(size);
169 const int ierr = MPI_Allgather(
170 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
171 AssertThrowMPI(ierr);
172
173 return ranks;
174 }
175
176
177
179 duplicate_communicator(const MPI_Comm mpi_communicator)
180 {
181 MPI_Comm new_communicator;
182 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
183 AssertThrowMPI(ierr);
184 return new_communicator;
185 }
186
187
188
189 void
190 free_communicator(MPI_Comm mpi_communicator)
191 {
192 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
193 const int ierr = MPI_Comm_free(&mpi_communicator);
194 AssertThrowMPI(ierr);
195 }
196
197
198
199 int
201 const MPI_Group &group,
202 const int tag,
203 MPI_Comm *new_comm)
204 {
205 const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
206 AssertThrowMPI(ierr);
207 return ierr;
208 }
209
210
211
212 std::vector<IndexSet>
214 const MPI_Comm comm,
215 const types::global_dof_index locally_owned_size)
216 {
217 static_assert(
218 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
219 "IndexSet::size_type must match types::global_dof_index for "
220 "using this function");
221 const unsigned int n_proc = n_mpi_processes(comm);
222 const std::vector<IndexSet::size_type> sizes =
223 all_gather(comm, locally_owned_size);
224 const auto total_size =
225 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
226
227 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
228
229 IndexSet::size_type begin = 0;
230 for (unsigned int i = 0; i < n_proc; ++i)
231 {
232 res[i].add_range(begin, begin + sizes[i]);
233 begin = begin + sizes[i];
234 }
235
236 return res;
237 }
238
239
240
243 const MPI_Comm comm,
244 const types::global_dof_index total_size)
245 {
246 const unsigned int this_proc = this_mpi_process(comm);
247 const unsigned int n_proc = n_mpi_processes(comm);
248
250 n_proc,
251 total_size);
252 }
253
254
255
256 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
257 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
258 {
259 MPI_Datatype result;
260 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
261 AssertThrowMPI(ierr);
262 ierr = MPI_Type_commit(&result);
263 AssertThrowMPI(ierr);
264
265# ifdef DEBUG
266 MPI_Count size64;
267 ierr = MPI_Type_size_x(result, &size64);
268 AssertThrowMPI(ierr);
269
270 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
271# endif
272
273 // Now put the new data type into a std::unique_ptr with a custom
274 // deleter. We call the std::unique_ptr constructor that as first
275 // argument takes a pointer (here, a pointer to a copy of the `result`
276 // object, and as second argument a pointer-to-function, for which
277 // we here use a lambda function without captures that acts as the
278 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
279 // pointer. To avoid a compiler warning about a null this pointer
280 // in the lambda (which don't make sense: the lambda doesn't store
281 // anything), we create the deleter first.
282 auto deleter = [](MPI_Datatype *p) {
283 if (p != nullptr)
284 {
285 const int ierr = MPI_Type_free(p);
286 (void)ierr;
287 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
288
289 delete p;
290 }
291 };
292
293 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
294 new MPI_Datatype(result), deleter);
295 }
296
297
298
299 std::vector<unsigned int>
301 const MPI_Comm mpi_comm,
302 const std::vector<unsigned int> &destinations)
303 {
304 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
305 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
306 (void)myid;
307 (void)n_procs;
308
309 for (const unsigned int destination : destinations)
310 {
311 (void)destination;
312 AssertIndexRange(destination, n_procs);
313 }
314
315
316 // Have a little function that checks if destinations provided
317 // to the current process are unique. The way it does this is
318 // to create a sorted list of destinations and then walk through
319 // the list and look at successive elements -- if we find the
320 // same number twice, we know that the destinations were not
321 // unique
322 const bool my_destinations_are_unique = [destinations]() {
323 if (destinations.empty())
324 return true;
325 else
326 {
327 std::vector<unsigned int> my_destinations = destinations;
328 std::sort(my_destinations.begin(), my_destinations.end());
329 return (std::adjacent_find(my_destinations.begin(),
330 my_destinations.end()) ==
331 my_destinations.end());
332 }
333 }();
334
335 // If all processes report that they have unique destinations,
336 // then we can short-cut the process using a consensus algorithm (which
337 // is implemented only for the case of unique destinations):
338 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
339 1)
340 {
341 return ConsensusAlgorithms::nbx<char, char>(
342 destinations, {}, {}, {}, mpi_comm);
343 }
344
345 // So we need to run a different algorithm, specifically one that
346 // requires more memory -- MPI_Reduce_scatter_block will require memory
347 // proportional to the number of processes involved; that function is
348 // available for MPI 2.2 or later:
349 static CollectiveMutex mutex;
350 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
351
352 const int mpi_tag =
354
355 // Calculate the number of messages to send to each process
356 std::vector<unsigned int> dest_vector(n_procs);
357 for (const auto &el : destinations)
358 ++dest_vector[el];
359
360 // Find how many processes will send to this one
361 // by reducing with sum and then scattering the
362 // results over all processes
363 unsigned int n_recv_from;
364 const int ierr = MPI_Reduce_scatter_block(
365 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
366
367 AssertThrowMPI(ierr);
368
369 // Send myid to every process in `destinations` vector...
370 std::vector<MPI_Request> send_requests(destinations.size());
371 for (const auto &el : destinations)
372 {
373 const int ierr =
374 MPI_Isend(&myid,
375 1,
376 MPI_UNSIGNED,
377 el,
378 mpi_tag,
379 mpi_comm,
380 send_requests.data() + (&el - destinations.data()));
381 AssertThrowMPI(ierr);
382 }
383
384
385 // Receive `n_recv_from` times from the processes
386 // who communicate with this one. Store the obtained id's
387 // in the resulting vector
388 std::vector<unsigned int> origins(n_recv_from);
389 for (auto &el : origins)
390 {
391 const int ierr = MPI_Recv(&el,
392 1,
393 MPI_UNSIGNED,
394 MPI_ANY_SOURCE,
395 mpi_tag,
396 mpi_comm,
397 MPI_STATUS_IGNORE);
398 AssertThrowMPI(ierr);
399 }
400
401 if (destinations.size() > 0)
402 {
403 const int ierr = MPI_Waitall(destinations.size(),
404 send_requests.data(),
405 MPI_STATUSES_IGNORE);
406 AssertThrowMPI(ierr);
407 }
408
409 return origins;
410 }
411
412
413
414 unsigned int
416 const MPI_Comm mpi_comm,
417 const std::vector<unsigned int> &destinations)
418 {
419 // Have a little function that checks if destinations provided
420 // to the current process are unique:
421 const bool my_destinations_are_unique = [destinations]() {
422 std::vector<unsigned int> my_destinations = destinations;
423 const unsigned int n_destinations = my_destinations.size();
424 std::sort(my_destinations.begin(), my_destinations.end());
425 my_destinations.erase(std::unique(my_destinations.begin(),
426 my_destinations.end()),
427 my_destinations.end());
428 return (my_destinations.size() == n_destinations);
429 }();
430
431 // If all processes report that they have unique destinations,
432 // then we can short-cut the process using a consensus algorithm:
433
434 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
435 1)
436 {
437 return ConsensusAlgorithms::nbx<char, char>(
438 destinations, {}, {}, {}, mpi_comm)
439 .size();
440 }
441 else
442 {
443 const unsigned int n_procs =
445
446 for (const unsigned int destination : destinations)
447 {
448 (void)destination;
449 AssertIndexRange(destination, n_procs);
450 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
452 "There is no point in communicating with ourselves."));
453 }
454
455 // Calculate the number of messages to send to each process
456 std::vector<unsigned int> dest_vector(n_procs);
457 for (const auto &el : destinations)
458 ++dest_vector[el];
459
460 // Find out how many processes will send to this one
461 // MPI_Reduce_scatter(_block) does exactly this
462 unsigned int n_recv_from = 0;
463
464 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
465 &n_recv_from,
466 1,
467 MPI_UNSIGNED,
468 MPI_SUM,
469 mpi_comm);
470
471 AssertThrowMPI(ierr);
472
473 return n_recv_from;
474 }
475 }
476
477
478
479 namespace
480 {
481 // custom MIP_Op for calculate_collective_mpi_min_max_avg
482 void
483 max_reduce(const void *in_lhs_,
484 void *inout_rhs_,
485 int *len,
486 MPI_Datatype *)
487 {
488 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
489 MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
490
491 for (int i = 0; i < *len; ++i)
492 {
493 inout_rhs[i].sum += in_lhs[i].sum;
494 if (inout_rhs[i].min > in_lhs[i].min)
495 {
496 inout_rhs[i].min = in_lhs[i].min;
497 inout_rhs[i].min_index = in_lhs[i].min_index;
498 }
499 else if (inout_rhs[i].min == in_lhs[i].min)
500 {
501 // choose lower cpu index when tied to make operator commutative
502 if (inout_rhs[i].min_index > in_lhs[i].min_index)
503 inout_rhs[i].min_index = in_lhs[i].min_index;
504 }
505
506 if (inout_rhs[i].max < in_lhs[i].max)
507 {
508 inout_rhs[i].max = in_lhs[i].max;
509 inout_rhs[i].max_index = in_lhs[i].max_index;
510 }
511 else if (inout_rhs[i].max == in_lhs[i].max)
512 {
513 // choose lower cpu index when tied to make operator commutative
514 if (inout_rhs[i].max_index > in_lhs[i].max_index)
515 inout_rhs[i].max_index = in_lhs[i].max_index;
516 }
517 }
518 }
519 } // namespace
520
521
522
523 void
525 const ArrayView<MinMaxAvg> &result,
526 const MPI_Comm mpi_communicator)
527 {
528 // If MPI was not started, we have a serial computation and cannot run
529 // the other MPI commands
530 if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
531 {
532 for (unsigned int i = 0; i < my_values.size(); ++i)
533 {
534 result[i].sum = my_values[i];
535 result[i].avg = my_values[i];
536 result[i].min = my_values[i];
537 result[i].max = my_values[i];
538 result[i].min_index = 0;
539 result[i].max_index = 0;
540 }
541 return;
542 }
543
544 /*
545 * A custom MPI datatype handle describing the memory layout of the
546 * MinMaxAvg struct. Initialized on first pass control reaches the
547 * static variable. So hopefully not initialized too early.
548 */
549 static MPI_Datatype type = []() {
550 MPI_Datatype type;
551
552 int lengths[] = {3, 2, 1};
553
554 MPI_Aint displacements[] = {0,
555 offsetof(MinMaxAvg, min_index),
556 offsetof(MinMaxAvg, avg)};
557
558 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
559
560 int ierr =
561 MPI_Type_create_struct(3, lengths, displacements, types, &type);
562 AssertThrowMPI(ierr);
563
564 ierr = MPI_Type_commit(&type);
565 AssertThrowMPI(ierr);
566
567 /* Ensure that we free the allocated datatype again at the end of
568 * the program run just before we call MPI_Finalize():*/
569 MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
570 int ierr = MPI_Type_free(&type);
571 AssertThrowMPI(ierr);
572 });
573
574 return type;
575 }();
576
577 /*
578 * A custom MPI op handle for our max_reduce function.
579 * Initialized on first pass control reaches the static variable. So
580 * hopefully not initialized too early.
581 */
582 static MPI_Op op = []() {
583 MPI_Op op;
584
585 int ierr =
586 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
587 static_cast<int>(true),
588 &op);
589 AssertThrowMPI(ierr);
590
591 /* Ensure that we free the allocated op again at the end of the
592 * program run just before we call MPI_Finalize():*/
593 MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
594 int ierr = MPI_Op_free(&op);
595 AssertThrowMPI(ierr);
596 });
597
598 return op;
599 }();
600
601 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
602 Utilities::MPI::max(my_values.size(), mpi_communicator));
603
604 AssertDimension(my_values.size(), result.size());
605
606 // To avoid uninitialized values on some MPI implementations, provide
607 // result with a default value already...
608 MinMaxAvg dummy = {0.,
609 std::numeric_limits<double>::max(),
610 std::numeric_limits<double>::lowest(),
611 0,
612 0,
613 0.};
614
615 for (auto &i : result)
616 i = dummy;
617
618 const unsigned int my_id =
619 ::Utilities::MPI::this_mpi_process(mpi_communicator);
620 const unsigned int numproc =
621 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
622
623 std::vector<MinMaxAvg> in(my_values.size());
624
625 for (unsigned int i = 0; i < my_values.size(); ++i)
626 {
627 in[i].sum = in[i].min = in[i].max = my_values[i];
628 in[i].min_index = in[i].max_index = my_id;
629 }
630
631 int ierr = MPI_Allreduce(
632 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
633 AssertThrowMPI(ierr);
634
635 for (auto &r : result)
636 r.avg = r.sum / numproc;
637 }
638
639
640#else
641
642 unsigned int
644 {
645 return 1;
646 }
647
648
649
650 unsigned int
652 {
653 return 0;
654 }
655
656
657
658 std::vector<unsigned int>
660 {
661 return std::vector<unsigned int>{0};
662 }
663
664
665
666 std::vector<IndexSet>
668 const MPI_Comm /*comm*/,
669 const types::global_dof_index locally_owned_size)
670 {
671 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
672 }
673
676 const MPI_Comm /*comm*/,
677 const types::global_dof_index total_size)
678 {
679 return complete_index_set(total_size);
680 }
681
682
683
685 duplicate_communicator(const MPI_Comm mpi_communicator)
686 {
687 return mpi_communicator;
688 }
689
690
691
692 void
693 free_communicator(MPI_Comm /*mpi_communicator*/)
694 {}
695
696
697
698 void
699 min_max_avg(const ArrayView<const double> &my_values,
700 const ArrayView<MinMaxAvg> &result,
701 const MPI_Comm)
702 {
703 AssertDimension(my_values.size(), result.size());
704
705 for (unsigned int i = 0; i < my_values.size(); ++i)
706 {
707 result[i].sum = my_values[i];
708 result[i].avg = my_values[i];
709 result[i].min = my_values[i];
710 result[i].max = my_values[i];
711 result[i].min_index = 0;
712 result[i].max_index = 0;
713 }
714 }
715
716#endif
717
718 /* Force initialization of static struct: */
719 MPI_InitFinalize::Signals MPI_InitFinalize::signals =
720 MPI_InitFinalize::Signals();
721
722
724 char **&argv,
725 const unsigned int max_num_threads)
726 {
727 static bool constructor_has_already_run = false;
728 (void)constructor_has_already_run;
729 Assert(constructor_has_already_run == false,
730 ExcMessage("You can only create a single object of this class "
731 "in a program since it initializes the MPI system."));
732
733
734 int ierr = 0;
735#ifdef DEAL_II_WITH_MPI
736 // if we have PETSc, we will initialize it and let it handle MPI.
737 // Otherwise, we will do it.
738 int MPI_has_been_started = 0;
739 ierr = MPI_Initialized(&MPI_has_been_started);
740 AssertThrowMPI(ierr);
741 AssertThrow(MPI_has_been_started == 0,
742 ExcMessage("MPI error. You can only start MPI once!"));
743
744 int provided;
745 // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
746 // we might use several threads but never call two MPI functions at the
747 // same time. For an explanation see on why we do this see
748 // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
749 int wanted = MPI_THREAD_SERIALIZED;
750 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
751 AssertThrowMPI(ierr);
752
753 // disable for now because at least some implementations always return
754 // MPI_THREAD_SINGLE.
755 // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
756 // ExcMessage("MPI reports that we are not allowed to use multiple
757 // threads."));
758#else
759 // make sure the compiler doesn't warn about these variables
760 (void)argc;
761 (void)argv;
762 (void)ierr;
763#endif
764
765 // Initialize Kokkos
766 {
767 // argv has argc+1 elements and the last one is a nullptr. For appending
768 // one element we thus create a new argv by copying the first argc
769 // elements, append the new option, and then a nullptr.
770 //
771 // We do get in trouble, though, if a user program is called with
772 // '--help' as a command line argument. This '--help' gets passed on to
773 // Kokkos, which promptly responds with a lengthy message that the user
774 // likely did not intend. As a consequence, filter out this specific
775 // flag.
776 std::vector<char *> argv_new;
777 for (auto *const arg : make_array_view(&argv[0], &argv[0] + argc))
778 if (strcmp(arg, "--help") != 0)
779 argv_new.push_back(arg);
780
781 std::stringstream threads_flag;
782#if KOKKOS_VERSION >= 30700
783 threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
784#else
785 threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
786#endif
787 const std::string threads_flag_string = threads_flag.str();
788 argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
789 argv_new.push_back(nullptr);
790
791 // The first argument in Kokkos::initialize is of type int&. Hence, we
792 // need to define a new variable to pass to it (instead of using argc+1
793 // inline).
794 int argc_new = argv_new.size() - 1;
795 Kokkos::initialize(argc_new, argv_new.data());
796 }
797
798 // we are allowed to call MPI_Init ourselves and PETScInitialize will
799 // detect this. This allows us to use MPI_Init_thread instead.
800#ifdef DEAL_II_WITH_PETSC
801 PetscErrorCode pierr;
802# ifdef DEAL_II_WITH_SLEPC
803 // Initialize SLEPc (with PETSc):
804 finalize_petscslepc = SlepcInitializeCalled ? false : true;
805 pierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
807# else
808 // or just initialize PETSc alone:
809 finalize_petscslepc = PetscInitializeCalled ? false : true;
810 pierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
811 AssertThrow(pierr == 0, ExcPETScError(pierr));
812# endif
813
814 // Disable PETSc exception handling. This just prints a large wall
815 // of text that is not particularly helpful for what we do:
816 pierr = PetscPopSignalHandler();
817 AssertThrow(pierr == 0, ExcPETScError(pierr));
818#endif
819
820 // Initialize zoltan
821#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
822 float version;
823 Zoltan_Initialize(argc, argv, &version);
824#endif
825
826#ifdef DEAL_II_WITH_P4EST
827 // Initialize p4est and libsc components
828# if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
829 // This feature is broken in version 2.0.0 for calls to
830 // MPI_Comm_create_group (see cburstedde/p4est#30).
831 // Disabling it leads to more verbose p4est error messages
832 // which should be fine.
833 sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
834# endif
835 p4est_init(nullptr, SC_LP_SILENT);
836#endif
837
838 constructor_has_already_run = true;
839
840
841 // Now also see how many threads we'd like to run
842 if (max_num_threads != numbers::invalid_unsigned_int)
843 {
844 // set maximum number of threads (also respecting the environment
845 // variable that the called function evaluates) based on what the
846 // user asked
847 MultithreadInfo::set_thread_limit(max_num_threads);
848 }
849 else
850 // user wants automatic choice
851 {
852#ifdef DEAL_II_WITH_MPI
853 // we need to figure out how many MPI processes there are on the
854 // current node, as well as how many CPU cores we have. for the
855 // first task, check what get_hostname() returns and then do an
856 // allgather so each processor gets the answer
857 //
858 // in calculating the length of the string, don't forget the
859 // terminating \0 on C-style strings
860 const std::string hostname = Utilities::System::get_hostname();
861 const unsigned int max_hostname_size =
862 Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
863 std::vector<char> hostname_array(max_hostname_size);
864 std::copy(hostname.c_str(),
865 hostname.c_str() + hostname.size() + 1,
866 hostname_array.begin());
867
868 std::vector<char> all_hostnames(max_hostname_size *
869 MPI::n_mpi_processes(MPI_COMM_WORLD));
870 const int ierr = MPI_Allgather(hostname_array.data(),
871 max_hostname_size,
872 MPI_CHAR,
873 all_hostnames.data(),
874 max_hostname_size,
875 MPI_CHAR,
876 MPI_COMM_WORLD);
877 AssertThrowMPI(ierr);
878
879 // search how often our own hostname appears and the how-manyth
880 // instance the current process represents
881 unsigned int n_local_processes = 0;
882 unsigned int nth_process_on_host = 0;
883 for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
884 ++i)
885 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
886 hostname)
887 {
888 ++n_local_processes;
889 if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
890 ++nth_process_on_host;
891 }
892 Assert(nth_process_on_host > 0, ExcInternalError());
893
894
895 // compute how many cores each process gets. if the number does not
896 // divide evenly, then we get one more core if we are among the
897 // first few processes
898 //
899 // if the number would be zero, round up to one since every process
900 // needs to have at least one thread
901 const unsigned int n_threads =
902 std::max(MultithreadInfo::n_cores() / n_local_processes +
903 (nth_process_on_host <=
904 MultithreadInfo::n_cores() % n_local_processes ?
905 1 :
906 0),
907 1U);
908#else
909 const unsigned int n_threads = MultithreadInfo::n_cores();
910#endif
911
912 // finally set this number of threads
914 }
915
916 // As a final step call the at_mpi_init() signal handler.
918 }
919
920
921
922 void
924 {
925 // insert if it is not in the set already:
926 requests.insert(&request);
927 }
928
929
930
931 void
933 {
934 Assert(
935 requests.find(&request) != requests.end(),
937 "You tried to call unregister_request() with an invalid request."));
938
939 requests.erase(&request);
940 }
941
942
943
944 std::set<MPI_Request *> MPI_InitFinalize::requests;
945
946
947
949 {
950 // First, call the at_mpi_finalize() signal handler.
952
953 // make memory pool release all PETSc/Trilinos/MPI-based vectors that
954 // are no longer used at this point. this is relevant because the static
955 // object destructors run for these vectors at the end of the program
956 // would run after MPI_Finalize is called, leading to errors
957
958#ifdef DEAL_II_WITH_MPI
959 // Before exiting, wait for nonblocking communication to complete:
960 for (auto *request : requests)
961 {
962 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
963 AssertThrowMPI(ierr);
964 }
965
966 // Start with deal.II MPI vectors and delete vectors from the pools:
968 LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
970 release_unused_memory();
972 LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
974 release_unused_memory();
975
976 // Next with Trilinos:
977# ifdef DEAL_II_WITH_TRILINOS
979 TrilinosWrappers::MPI::Vector>::release_unused_memory();
981 TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
982# endif
983#endif
984
985
986 // Now deal with PETSc (with or without MPI). Only delete the vectors if
987 // finalize hasn't been called yet, otherwise this will lead to errors.
988#ifdef DEAL_II_WITH_PETSC
989 if (!PetscFinalizeCalled)
990 {
992 PETScWrappers::MPI::Vector>::release_unused_memory();
994 PETScWrappers::MPI::BlockVector>::release_unused_memory();
995 }
996# ifdef DEAL_II_WITH_SLEPC
997 // and now end SLEPc with PETSc if we did so
999 {
1000 PetscErrorCode ierr = SlepcFinalize();
1001 AssertThrow(ierr == 0,
1003 }
1004# else
1005 // or just end PETSc if we did so
1007 {
1008 PetscErrorCode ierr = PetscFinalize();
1009 AssertThrow(ierr == 0, ExcPETScError(ierr));
1010 }
1011# endif
1012#endif
1013
1014#ifdef DEAL_II_WITH_P4EST
1015 // now end p4est and libsc
1016 // Note: p4est has no finalize function
1017 sc_finalize();
1018#endif
1019
1020
1021 // Finalize Kokkos
1022 Kokkos::finalize();
1023
1024 // only MPI_Finalize if we are running with MPI. We also need to do this
1025 // when running PETSc, because we initialize MPI ourselves before
1026 // calling PetscInitialize
1027#ifdef DEAL_II_WITH_MPI
1028 if (job_supports_mpi() == true)
1029 {
1030# if __cpp_lib_uncaught_exceptions >= 201411
1031 // std::uncaught_exception() is deprecated in c++17
1032 if (std::uncaught_exceptions() > 0)
1033# else
1034 if (std::uncaught_exception() == true)
1035# endif
1036 {
1037 // do not try to call MPI_Finalize to avoid a deadlock.
1038 }
1039 else
1040 {
1041 const int ierr = MPI_Finalize();
1042 (void)ierr;
1043 AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1044 }
1045 }
1046#endif
1047 }
1048
1049
1050
1051 bool
1053 {
1054#ifdef DEAL_II_WITH_MPI
1055 int MPI_has_been_started = 0;
1056 const int ierr = MPI_Initialized(&MPI_has_been_started);
1057 AssertThrowMPI(ierr);
1058
1059 return (MPI_has_been_started > 0);
1060#else
1061 return false;
1062#endif
1063 }
1064
1065
1066
1067 std::vector<unsigned int>
1068 compute_index_owner(const IndexSet &owned_indices,
1069 const IndexSet &indices_to_look_up,
1070 const MPI_Comm comm)
1071 {
1072 Assert(owned_indices.size() == indices_to_look_up.size(),
1073 ExcMessage("IndexSets have to have the same sizes."));
1074
1075 Assert(
1076 owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1077 ExcMessage("IndexSets have to have the same size on all processes."));
1078
1079 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1080
1081 // Step 1: setup dictionary
1082 // The input owned_indices can be partitioned arbitrarily. In the
1083 // dictionary, the index set is statically repartitioned among the
1084 // processes again and extended with information with the actual owner
1085 // of that the index.
1087 owned_indices, indices_to_look_up, comm, owning_ranks);
1088
1089 // Step 2: read dictionary
1090 // Communicate with the process who owns the index in the static
1091 // partition (i.e. in the dictionary). This process returns the actual
1092 // owner of the index.
1094 std::vector<
1095 std::pair<types::global_dof_index, types::global_dof_index>>,
1096 std::vector<unsigned int>>
1097 consensus_algorithm;
1098 consensus_algorithm.run(process, comm);
1099
1100 return owning_ranks;
1101 }
1102
1103
1104
1105 namespace internal
1106 {
1107 namespace CollectiveMutexImplementation
1108 {
1113 void
1115 {
1116#ifdef DEAL_II_WITH_MPI
1117# if __cpp_lib_uncaught_exceptions >= 201411
1118 // std::uncaught_exception() is deprecated in c++17
1119 if (std::uncaught_exceptions() != 0)
1120# else
1121 if (std::uncaught_exception() == true)
1122# endif
1123 {
1124 std::cerr
1125 << "---------------------------------------------------------\n"
1126 << "An exception was thrown inside a section of the program\n"
1127 << "guarded by a CollectiveMutex.\n"
1128 << "Because a CollectiveMutex guards critical communication\n"
1129 << "handling the exception would likely\n"
1130 << "deadlock because only the current process is aware of the\n"
1131 << "exception. To prevent this deadlock, the program will be\n"
1132 << "aborted.\n"
1133 << "---------------------------------------------------------"
1134 << std::endl;
1135
1136 MPI_Abort(MPI_COMM_WORLD, 1);
1137 }
1138#endif
1139 }
1140 } // namespace CollectiveMutexImplementation
1141 } // namespace internal
1142
1143
1144
1146 : locked(false)
1147 , request(MPI_REQUEST_NULL)
1148 {
1150 }
1151
1152
1153
1155 {
1156 // First check if this destructor is called during exception handling
1157 // if so, abort.
1159
1160 Assert(
1161 !locked,
1162 ExcMessage(
1163 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1164
1166 }
1167
1168
1169
1170 void
1172 {
1173 (void)comm;
1174
1175 Assert(
1176 !locked,
1177 ExcMessage(
1178 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1179
1180#ifdef DEAL_II_WITH_MPI
1181
1182 if (job_supports_mpi())
1183 {
1184 // TODO: For now, we implement this mutex with a blocking barrier in
1185 // the lock and unlock. It needs to be tested, if we can move to a
1186 // nonblocking barrier (code disabled below).
1187
1188 const int ierr = MPI_Barrier(comm);
1189 AssertThrowMPI(ierr);
1190
1191# if 0
1192 // wait for non-blocking barrier to finish. This is a noop the
1193 // first time we lock().
1194 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1195 AssertThrowMPI(ierr);
1196# else
1197 // nothing to do as blocking barrier already completed
1198# endif
1199 }
1200#endif
1201
1202 locked = true;
1203 }
1204
1205
1206
1207 void
1209 {
1210 (void)comm;
1211
1212 // First check if this function is called during exception handling
1213 // if so, abort. This can happen if a ScopedLock is destroyed.
1215
1216 Assert(
1217 locked,
1218 ExcMessage(
1219 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1220
1221#ifdef DEAL_II_WITH_MPI
1222
1223 if (job_supports_mpi())
1224 {
1225 // TODO: For now, we implement this mutex with a blocking barrier
1226 // in the lock and unlock. It needs to be tested, if we can move
1227 // to a nonblocking barrier (code disabled below):
1228# if 0
1229 const int ierr = MPI_Ibarrier(comm, &request);
1230 AssertThrowMPI(ierr);
1231# else
1232 const int ierr = MPI_Barrier(comm);
1233 AssertThrowMPI(ierr);
1234# endif
1235 }
1236#endif
1237
1238 locked = false;
1239 }
1240
1241
1242#ifndef DOXYGEN
1243 // explicit instantiations
1244
1245 // booleans aren't in MPI_SCALARS
1246 template bool
1247 reduce(const bool &,
1248 const MPI_Comm,
1249 const std::function<bool(const bool &, const bool &)> &,
1250 const unsigned int);
1251
1252 template std::vector<bool>
1253 reduce(const std::vector<bool> &,
1254 const MPI_Comm,
1255 const std::function<std::vector<bool>(const std::vector<bool> &,
1256 const std::vector<bool> &)> &,
1257 const unsigned int);
1258
1259 template bool
1260 all_reduce(const bool &,
1261 const MPI_Comm,
1262 const std::function<bool(const bool &, const bool &)> &);
1263
1264 template std::vector<bool>
1265 all_reduce(
1266 const std::vector<bool> &,
1267 const MPI_Comm,
1268 const std::function<std::vector<bool>(const std::vector<bool> &,
1269 const std::vector<bool> &)> &);
1270
1271 // We need an explicit instantiation of this for the same reason as the
1272 // other types described in mpi.inst.in
1273 template void
1274 internal::all_reduce<bool>(const MPI_Op &,
1275 const ArrayView<const bool> &,
1276 const MPI_Comm,
1277 const ArrayView<bool> &);
1278
1279
1280 template bool
1281 logical_or<bool>(const bool &, const MPI_Comm);
1282
1283
1284 template void
1285 logical_or<bool>(const ArrayView<const bool> &,
1286 const MPI_Comm,
1287 const ArrayView<bool> &);
1288
1289
1290 template std::vector<unsigned int>
1291 compute_set_union(const std::vector<unsigned int> &vec,
1292 const MPI_Comm comm);
1293
1294
1295 template std::set<unsigned int>
1296 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
1297#endif
1298
1299#include "mpi.inst"
1300 } // end of namespace MPI
1301} // end of namespace Utilities
1302
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:949
value_type * data() const noexcept
Definition array_view.h:661
std::size_t size() const
Definition array_view.h:684
size_type size() const
Definition index_set.h:1765
size_type n_elements() const
Definition index_set.h:1923
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1792
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void lock(const MPI_Comm comm)
Definition mpi.cc:1171
void unlock(const MPI_Comm comm)
Definition mpi.cc:1208
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
static void unregister_request(MPI_Request &request)
Definition mpi.cc:932
static std::set< MPI_Request * > requests
Definition mpi.h:1206
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:723
static void register_request(MPI_Request &request)
Definition mpi.cc:923
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1193
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:56
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:213
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:257
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:128
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1068
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:242
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:300
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition mpi.cc:200
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:179
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:1052
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:190
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:159
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:415
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:102
std::string get_hostname()
Definition utilities.cc:998
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:78
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
****code *  *  MPI_Finalize()
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
Definition mpi.h:1189
boost::signals2::signal< void()> at_mpi_finalize
Definition mpi.h:1197