deal.II version GIT relicensing-1989-gd7a2c90e4e 2024-10-14 01:50:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
19#include <deal.II/base/mpi.templates.h>
24
27
28#include <boost/serialization/utility.hpp>
29
30#include <iostream>
31#include <limits>
32#include <numeric>
33#include <set>
34#include <vector>
35
37
38
39namespace Utilities
40{
43 const unsigned int my_partition_id,
44 const unsigned int n_partitions,
45 const types::global_dof_index total_size)
46 {
47 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
48 "IndexSet::size_type must match types::global_dof_index for "
49 "using this function");
50 const unsigned int remain = total_size % n_partitions;
51
52 const IndexSet::size_type min_size = total_size / n_partitions;
53
54 const IndexSet::size_type begin =
55 min_size * my_partition_id + std::min(my_partition_id, remain);
56 const IndexSet::size_type end =
57 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
58 IndexSet result(total_size);
59 result.add_range(begin, end);
60 return result;
61 }
62
63 namespace MPI
64 {
65 MinMaxAvg
66 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
67 {
68 MinMaxAvg result;
71 mpi_communicator);
72
73 return result;
74 }
75
76
77
78 std::vector<MinMaxAvg>
79 min_max_avg(const std::vector<double> &my_values,
80 const MPI_Comm mpi_communicator)
81 {
82 std::vector<MinMaxAvg> results(my_values.size());
83 min_max_avg(my_values, results, mpi_communicator);
84
85 return results;
86 }
87
88
89
90#ifdef DEAL_II_WITH_MPI
91 unsigned int
92 n_mpi_processes(const MPI_Comm mpi_communicator)
93 {
94 if (job_supports_mpi())
95 {
96 int n_jobs = 1;
97 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
98 AssertThrowMPI(ierr);
99 return n_jobs;
100 }
101 else
102 return 1;
103 }
104
105
106 unsigned int
107 this_mpi_process(const MPI_Comm mpi_communicator)
108 {
109 if (job_supports_mpi())
110 {
111 int rank = 0;
112 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
113 AssertThrowMPI(ierr);
114 return rank;
115 }
116 else
117 return 0;
118 }
119
120
121
122 std::vector<unsigned int>
124 const MPI_Comm comm_small)
125 {
127 return std::vector<unsigned int>{0};
128
129 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
130 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
131
132 std::vector<unsigned int> ranks(size);
133 const int ierr = MPI_Allgather(
134 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
135 AssertThrowMPI(ierr);
136
137 return ranks;
138 }
139
140
141
143 duplicate_communicator(const MPI_Comm mpi_communicator)
144 {
145 MPI_Comm new_communicator;
146 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
147 AssertThrowMPI(ierr);
148 return new_communicator;
149 }
150
151
152
153 void
154 free_communicator(MPI_Comm mpi_communicator)
155 {
156 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
157 const int ierr = MPI_Comm_free(&mpi_communicator);
158 AssertThrowMPI(ierr);
159 }
160
161
162
163 std::vector<IndexSet>
165 const MPI_Comm comm,
166 const types::global_dof_index locally_owned_size)
167 {
168 static_assert(
169 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
170 "IndexSet::size_type must match types::global_dof_index for "
171 "using this function");
172 const unsigned int n_proc = n_mpi_processes(comm);
173 const std::vector<IndexSet::size_type> sizes =
174 all_gather(comm, locally_owned_size);
175 const auto total_size =
176 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
177
178 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
179
180 IndexSet::size_type begin = 0;
181 for (unsigned int i = 0; i < n_proc; ++i)
182 {
183 res[i].add_range(begin, begin + sizes[i]);
184 begin = begin + sizes[i];
185 }
186
187 return res;
188 }
189
190
191
194 const MPI_Comm comm,
195 const types::global_dof_index total_size)
196 {
197 const unsigned int this_proc = this_mpi_process(comm);
198 const unsigned int n_proc = n_mpi_processes(comm);
199
201 n_proc,
202 total_size);
203 }
204
205
206
207 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
208 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
209 {
210 MPI_Datatype result;
211 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
212 AssertThrowMPI(ierr);
213 ierr = MPI_Type_commit(&result);
214 AssertThrowMPI(ierr);
215
216# ifdef DEBUG
217 MPI_Count size64;
218 ierr = MPI_Type_size_x(result, &size64);
219 AssertThrowMPI(ierr);
220
221 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
222# endif
223
224 // Now put the new data type into a std::unique_ptr with a custom
225 // deleter. We call the std::unique_ptr constructor that as first
226 // argument takes a pointer (here, a pointer to a copy of the `result`
227 // object, and as second argument a pointer-to-function, for which
228 // we here use a lambda function without captures that acts as the
229 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
230 // pointer. To avoid a compiler warning about a null this pointer
231 // in the lambda (which don't make sense: the lambda doesn't store
232 // anything), we create the deleter first.
233 auto deleter = [](MPI_Datatype *p) {
234 if (p != nullptr)
235 {
236 [[maybe_unused]] const int ierr = MPI_Type_free(p);
237
238 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
239
240 delete p;
241 }
242 };
243
244 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
245 new MPI_Datatype(result), deleter);
246 }
247
248
249
250 std::vector<unsigned int>
252 const MPI_Comm mpi_comm,
253 const std::vector<unsigned int> &destinations)
254 {
255 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
256 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
257
258# ifdef DEBUG
259 for (const unsigned int destination : destinations)
260 AssertIndexRange(destination, n_procs);
261# endif
262
263 // Have a little function that checks if destinations provided
264 // to the current process are unique. The way it does this is
265 // to create a sorted list of destinations and then walk through
266 // the list and look at successive elements -- if we find the
267 // same number twice, we know that the destinations were not
268 // unique
269 const bool my_destinations_are_unique = [destinations]() {
270 if (destinations.empty())
271 return true;
272 else
273 {
274 std::vector<unsigned int> my_destinations = destinations;
275 std::sort(my_destinations.begin(), my_destinations.end());
276 return (std::adjacent_find(my_destinations.begin(),
277 my_destinations.end()) ==
278 my_destinations.end());
279 }
280 }();
281
282 // If all processes report that they have unique destinations,
283 // then we can short-cut the process using a consensus algorithm (which
284 // is implemented only for the case of unique destinations):
285 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
286 1)
287 {
288 return ConsensusAlgorithms::nbx<char, char>(
289 destinations, {}, {}, {}, mpi_comm);
290 }
291
292 // So we need to run a different algorithm, specifically one that
293 // requires more memory -- MPI_Reduce_scatter_block will require memory
294 // proportional to the number of processes involved; that function is
295 // available for MPI 2.2 or later:
296 static CollectiveMutex mutex;
297 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
298
299 const int mpi_tag =
301
302 // Calculate the number of messages to send to each process
303 std::vector<unsigned int> dest_vector(n_procs);
304 for (const auto &el : destinations)
305 ++dest_vector[el];
306
307 // Find how many processes will send to this one
308 // by reducing with sum and then scattering the
309 // results over all processes
310 unsigned int n_recv_from;
311 const int ierr = MPI_Reduce_scatter_block(
312 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
313
314 AssertThrowMPI(ierr);
315
316 // Send myid to every process in `destinations` vector...
317 std::vector<MPI_Request> send_requests(destinations.size());
318 for (const auto &el : destinations)
319 {
320 const int ierr =
321 MPI_Isend(&myid,
322 1,
323 MPI_UNSIGNED,
324 el,
325 mpi_tag,
326 mpi_comm,
327 send_requests.data() + (&el - destinations.data()));
328 AssertThrowMPI(ierr);
329 }
330
331
332 // Receive `n_recv_from` times from the processes
333 // who communicate with this one. Store the obtained id's
334 // in the resulting vector
335 std::vector<unsigned int> origins(n_recv_from);
336 for (auto &el : origins)
337 {
338 const int ierr = MPI_Recv(&el,
339 1,
340 MPI_UNSIGNED,
341 MPI_ANY_SOURCE,
342 mpi_tag,
343 mpi_comm,
344 MPI_STATUS_IGNORE);
345 AssertThrowMPI(ierr);
346 }
347
348 if (destinations.size() > 0)
349 {
350 const int ierr = MPI_Waitall(destinations.size(),
351 send_requests.data(),
352 MPI_STATUSES_IGNORE);
353 AssertThrowMPI(ierr);
354 }
355
356 return origins;
357 }
358
359
360
361 unsigned int
363 const MPI_Comm mpi_comm,
364 const std::vector<unsigned int> &destinations)
365 {
366 // Have a little function that checks if destinations provided
367 // to the current process are unique:
368 const bool my_destinations_are_unique = [destinations]() {
369 std::vector<unsigned int> my_destinations = destinations;
370 const unsigned int n_destinations = my_destinations.size();
371 std::sort(my_destinations.begin(), my_destinations.end());
372 my_destinations.erase(std::unique(my_destinations.begin(),
373 my_destinations.end()),
374 my_destinations.end());
375 return (my_destinations.size() == n_destinations);
376 }();
377
378 // If all processes report that they have unique destinations,
379 // then we can short-cut the process using a consensus algorithm:
380
381 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
382 1)
383 {
384 return ConsensusAlgorithms::nbx<char, char>(
385 destinations, {}, {}, {}, mpi_comm)
386 .size();
387 }
388 else
389 {
390 const unsigned int n_procs =
392
393# ifdef DEBUG
394 for (const unsigned int destination : destinations)
395 {
396 AssertIndexRange(destination, n_procs);
397 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
399 "There is no point in communicating with ourselves."));
400 }
401# endif
402
403 // Calculate the number of messages to send to each process
404 std::vector<unsigned int> dest_vector(n_procs);
405 for (const auto &el : destinations)
406 ++dest_vector[el];
407
408 // Find out how many processes will send to this one
409 // MPI_Reduce_scatter(_block) does exactly this
410 unsigned int n_recv_from = 0;
411
412 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
413 &n_recv_from,
414 1,
415 MPI_UNSIGNED,
416 MPI_SUM,
417 mpi_comm);
418
419 AssertThrowMPI(ierr);
420
421 return n_recv_from;
422 }
423 }
424
425
426
427 namespace
428 {
429 // custom MIP_Op for calculate_collective_mpi_min_max_avg
430 void
431 max_reduce(const void *in_lhs_,
432 void *inout_rhs_,
433 int *len,
434 MPI_Datatype *)
435 {
436 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
437 MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
438
439 for (int i = 0; i < *len; ++i)
440 {
441 inout_rhs[i].sum += in_lhs[i].sum;
442 if (inout_rhs[i].min > in_lhs[i].min)
443 {
444 inout_rhs[i].min = in_lhs[i].min;
445 inout_rhs[i].min_index = in_lhs[i].min_index;
446 }
447 else if (inout_rhs[i].min == in_lhs[i].min)
448 {
449 // choose lower cpu index when tied to make operator commutative
450 if (inout_rhs[i].min_index > in_lhs[i].min_index)
451 inout_rhs[i].min_index = in_lhs[i].min_index;
452 }
453
454 if (inout_rhs[i].max < in_lhs[i].max)
455 {
456 inout_rhs[i].max = in_lhs[i].max;
457 inout_rhs[i].max_index = in_lhs[i].max_index;
458 }
459 else if (inout_rhs[i].max == in_lhs[i].max)
460 {
461 // choose lower cpu index when tied to make operator commutative
462 if (inout_rhs[i].max_index > in_lhs[i].max_index)
463 inout_rhs[i].max_index = in_lhs[i].max_index;
464 }
465 }
466 }
467 } // namespace
468
469
470
471 void
473 const ArrayView<MinMaxAvg> &result,
474 const MPI_Comm mpi_communicator)
475 {
476 // If MPI was not started, we have a serial computation and cannot run
477 // the other MPI commands
478 if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
479 {
480 for (unsigned int i = 0; i < my_values.size(); ++i)
481 {
482 result[i].sum = my_values[i];
483 result[i].avg = my_values[i];
484 result[i].min = my_values[i];
485 result[i].max = my_values[i];
486 result[i].min_index = 0;
487 result[i].max_index = 0;
488 }
489 return;
490 }
491
492 /*
493 * A custom MPI datatype handle describing the memory layout of the
494 * MinMaxAvg struct. Initialized on first pass control reaches the
495 * static variable. So hopefully not initialized too early.
496 */
497 static MPI_Datatype type = []() {
498 MPI_Datatype type;
499
500 int lengths[] = {3, 2, 1};
501
502 MPI_Aint displacements[] = {0,
503 offsetof(MinMaxAvg, min_index),
504 offsetof(MinMaxAvg, avg)};
505
506 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
507
508 int ierr =
509 MPI_Type_create_struct(3, lengths, displacements, types, &type);
510 AssertThrowMPI(ierr);
511
512 ierr = MPI_Type_commit(&type);
513 AssertThrowMPI(ierr);
514
515 /* Ensure that we free the allocated datatype again at the end of
516 * the program run just before we call MPI_Finalize():*/
517 InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
518 int ierr = MPI_Type_free(&type);
519 AssertThrowMPI(ierr);
520 });
521
522 return type;
523 }();
524
525 /*
526 * A custom MPI op handle for our max_reduce function.
527 * Initialized on first pass control reaches the static variable. So
528 * hopefully not initialized too early.
529 */
530 static MPI_Op op = []() {
531 MPI_Op op;
532
533 int ierr =
534 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
535 static_cast<int>(true),
536 &op);
537 AssertThrowMPI(ierr);
538
539 /* Ensure that we free the allocated op again at the end of the
540 * program run just before we call MPI_Finalize():*/
541 InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
542 int ierr = MPI_Op_free(&op);
543 AssertThrowMPI(ierr);
544 });
545
546 return op;
547 }();
548
549 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
550 Utilities::MPI::max(my_values.size(), mpi_communicator));
551
552 AssertDimension(my_values.size(), result.size());
553
554 // To avoid uninitialized values on some MPI implementations, provide
555 // result with a default value already...
556 MinMaxAvg dummy = {0.,
557 std::numeric_limits<double>::max(),
558 std::numeric_limits<double>::lowest(),
559 0,
560 0,
561 0.};
562
563 for (auto &i : result)
564 i = dummy;
565
566 const unsigned int my_id =
567 ::Utilities::MPI::this_mpi_process(mpi_communicator);
568 const unsigned int numproc =
569 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
570
571 std::vector<MinMaxAvg> in(my_values.size());
572
573 for (unsigned int i = 0; i < my_values.size(); ++i)
574 {
575 in[i].sum = in[i].min = in[i].max = my_values[i];
576 in[i].min_index = in[i].max_index = my_id;
577 }
578
579 int ierr = MPI_Allreduce(
580 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
581 AssertThrowMPI(ierr);
582
583 for (auto &r : result)
584 r.avg = r.sum / numproc;
585 }
586
587
588#else
589
590 unsigned int
592 {
593 return 1;
594 }
595
596
597
598 unsigned int
600 {
601 return 0;
602 }
603
604
605
606 std::vector<unsigned int>
608 {
609 return std::vector<unsigned int>{0};
610 }
611
612
613
614 std::vector<IndexSet>
616 const MPI_Comm /*comm*/,
617 const types::global_dof_index locally_owned_size)
618 {
619 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
620 }
621
624 const MPI_Comm /*comm*/,
625 const types::global_dof_index total_size)
626 {
627 return complete_index_set(total_size);
628 }
629
630
631
633 duplicate_communicator(const MPI_Comm mpi_communicator)
634 {
635 return mpi_communicator;
636 }
637
638
639
640 void
641 free_communicator(MPI_Comm /*mpi_communicator*/)
642 {}
643
644
645
646 void
647 min_max_avg(const ArrayView<const double> &my_values,
648 const ArrayView<MinMaxAvg> &result,
649 const MPI_Comm)
650 {
651 AssertDimension(my_values.size(), result.size());
652
653 for (unsigned int i = 0; i < my_values.size(); ++i)
654 {
655 result[i].sum = my_values[i];
656 result[i].avg = my_values[i];
657 result[i].min = my_values[i];
658 result[i].max = my_values[i];
659 result[i].min_index = 0;
660 result[i].max_index = 0;
661 }
662 }
663
664#endif
665
666
668 char **&argv,
669 const unsigned int max_num_threads)
670 : InitFinalize(argc,
671 argv,
675 max_num_threads)
676 {}
677
678
679
680 bool
682 {
683#ifdef DEAL_II_WITH_MPI
684 int MPI_has_been_started = 0;
685 const int ierr = MPI_Initialized(&MPI_has_been_started);
686 AssertThrowMPI(ierr);
687
688 return (MPI_has_been_started > 0);
689#else
690 return false;
691#endif
692 }
693
694
695
696 std::vector<unsigned int>
697 compute_index_owner(const IndexSet &owned_indices,
698 const IndexSet &indices_to_look_up,
699 const MPI_Comm comm)
700 {
701 Assert(owned_indices.size() == indices_to_look_up.size(),
702 ExcMessage("IndexSets have to have the same sizes."));
703
704 Assert(
705 owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
706 ExcMessage("IndexSets have to have the same size on all processes."));
707
708 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
709
710 // Step 1: setup dictionary
711 // The input owned_indices can be partitioned arbitrarily. In the
712 // dictionary, the index set is statically repartitioned among the
713 // processes again and extended with information with the actual owner
714 // of that the index.
716 owned_indices,
717 indices_to_look_up,
718 comm,
719 owning_ranks,
720 /* keep track of requesters = */ false);
721
722 // Step 2: read dictionary
723 // Communicate with the process who owns the index in the static
724 // partition (i.e. in the dictionary). This process returns the actual
725 // owner of the index.
727 std::vector<
728 std::pair<types::global_dof_index, types::global_dof_index>>,
729 std::vector<unsigned int>>
730 consensus_algorithm;
731 consensus_algorithm.run(process, comm);
732
733 return owning_ranks;
734 }
735
736
737
738 namespace internal
739 {
740 namespace CollectiveMutexImplementation
741 {
746 void
748 {
749#ifdef DEAL_II_WITH_MPI
750 if (std::uncaught_exceptions() > 0)
751 {
752 std::cerr
753 << "---------------------------------------------------------\n"
754 << "An exception was thrown inside a section of the program\n"
755 << "guarded by a CollectiveMutex.\n"
756 << "Because a CollectiveMutex guards critical communication\n"
757 << "handling the exception would likely\n"
758 << "deadlock because only the current process is aware of the\n"
759 << "exception. To prevent this deadlock, the program will be\n"
760 << "aborted.\n"
761 << "---------------------------------------------------------"
762 << std::endl;
763
764 MPI_Abort(MPI_COMM_WORLD, 1);
765 }
766#endif
767 }
768 } // namespace CollectiveMutexImplementation
769 } // namespace internal
770
771
772
774 : locked(false)
775 , request(MPI_REQUEST_NULL)
776 {
778 }
779
780
781
783 {
784 // First check if this destructor is called during exception handling
785 // if so, abort.
787
788 Assert(
789 !locked,
791 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
792
794 }
795
796
797
798 void
800 {
801 Assert(
802 !locked,
804 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
805
806#ifdef DEAL_II_WITH_MPI
807
808 if (job_supports_mpi())
809 {
810 // TODO: For now, we implement this mutex with a blocking barrier in
811 // the lock and unlock. It needs to be tested, if we can move to a
812 // nonblocking barrier (code disabled below).
813
814 const int ierr = MPI_Barrier(comm);
815 AssertThrowMPI(ierr);
816
817# if 0
818 // wait for non-blocking barrier to finish. This is a noop the
819 // first time we lock().
820 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
821 AssertThrowMPI(ierr);
822# else
823 // nothing to do as blocking barrier already completed
824# endif
825 }
826#else
827 (void)comm;
828#endif
829
830 locked = true;
831 }
832
833
834
835 void
837 {
838 // First check if this function is called during exception handling
839 // if so, abort. This can happen if a ScopedLock is destroyed.
841
842 Assert(
843 locked,
845 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
846
847#ifdef DEAL_II_WITH_MPI
848
849 if (job_supports_mpi())
850 {
851 // TODO: For now, we implement this mutex with a blocking barrier
852 // in the lock and unlock. It needs to be tested, if we can move
853 // to a nonblocking barrier (code disabled below):
854# if 0
855 const int ierr = MPI_Ibarrier(comm, &request);
856 AssertThrowMPI(ierr);
857# else
858 const int ierr = MPI_Barrier(comm);
859 AssertThrowMPI(ierr);
860# endif
861 }
862#else
863 (void)comm;
864#endif
865
866 locked = false;
867 }
868
869
870#ifndef DOXYGEN
871 // explicit instantiations
872
873 // booleans aren't in MPI_SCALARS
874 template bool
875 reduce(const bool &,
876 const MPI_Comm,
877 const std::function<bool(const bool &, const bool &)> &,
878 const unsigned int);
879
880 template std::vector<bool>
881 reduce(const std::vector<bool> &,
882 const MPI_Comm,
883 const std::function<std::vector<bool>(const std::vector<bool> &,
884 const std::vector<bool> &)> &,
885 const unsigned int);
886
887 template bool
888 all_reduce(const bool &,
889 const MPI_Comm,
890 const std::function<bool(const bool &, const bool &)> &);
891
892 template std::vector<bool>
894 const std::vector<bool> &,
895 const MPI_Comm,
896 const std::function<std::vector<bool>(const std::vector<bool> &,
897 const std::vector<bool> &)> &);
898
899 // We need an explicit instantiation of this for the same reason as the
900 // other types described in mpi.inst.in
901 template void
902 internal::all_reduce<bool>(const MPI_Op &,
903 const ArrayView<const bool> &,
904 const MPI_Comm,
905 const ArrayView<bool> &);
906
907
908 template bool
909 logical_or<bool>(const bool &, const MPI_Comm);
910
911
912 template void
913 logical_or<bool>(const ArrayView<const bool> &,
914 const MPI_Comm,
915 const ArrayView<bool> &);
916
917
918 template std::vector<unsigned int>
919 compute_set_union(const std::vector<unsigned int> &vec,
920 const MPI_Comm comm);
921
922
923 template std::set<unsigned int>
924 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
925#endif
926
927#include "mpi.inst"
928 } // end of namespace MPI
929} // end of namespace Utilities
930
value_type * data() const noexcept
Definition array_view.h:661
std::size_t size() const
Definition array_view.h:684
size_type size() const
Definition index_set.h:1776
size_type n_elements() const
Definition index_set.h:1934
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
static Signals signals
void lock(const MPI_Comm comm)
Definition mpi.cc:799
void unlock(const MPI_Comm comm)
Definition mpi.cc:836
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:667
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1204
InitializeLibrary
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:55
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:164
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:208
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:697
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:193
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:251
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:681
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:362
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_finalize