19#include <deal.II/base/mpi.templates.h>
32#include <boost/serialization/utility.hpp>
34#include <Kokkos_Core.hpp>
42#ifdef DEAL_II_WITH_TRILINOS
43# ifdef DEAL_II_WITH_MPI
47# include <Epetra_MpiComm.h>
51#ifdef DEAL_II_WITH_PETSC
58#ifdef DEAL_II_WITH_SLEPC
64#ifdef DEAL_II_WITH_P4EST
65# include <p4est_bits.h>
68#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
69# include <zoltan_cpp.h>
79 const unsigned int my_partition_id,
80 const unsigned int n_partitions,
83 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
84 "IndexSet::size_type must match types::global_dof_index for "
85 "using this function");
86 const unsigned int remain = total_size % n_partitions;
91 min_size * my_partition_id +
std::min(my_partition_id, remain);
93 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
114 std::vector<MinMaxAvg>
118 std::vector<MinMaxAvg> results(my_values.size());
126#ifdef DEAL_II_WITH_MPI
133 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
148 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
158 std::vector<unsigned int>
163 return std::vector<unsigned int>{0};
168 std::vector<unsigned int> ranks(size);
169 const int ierr = MPI_Allgather(
170 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
182 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
184 return new_communicator;
193 const int ierr = MPI_Comm_free(&mpi_communicator);
201 const MPI_Group &group,
205 const int ierr = MPI_Comm_create_group(
comm, group, tag, new_comm);
212 std::vector<IndexSet>
218 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
219 "IndexSet::size_type must match types::global_dof_index for "
220 "using this function");
222 const std::vector<IndexSet::size_type> sizes =
224 const auto total_size =
227 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
230 for (
unsigned int i = 0; i < n_proc; ++i)
232 res[i].add_range(begin, begin + sizes[i]);
233 begin = begin + sizes[i];
256 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
262 ierr = MPI_Type_commit(&result);
267 ierr = MPI_Type_size_x(result, &size64);
282 auto deleter = [](MPI_Datatype *p) {
285 const int ierr = MPI_Type_free(p);
293 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
294 new MPI_Datatype(result), deleter);
299 std::vector<unsigned int>
302 const std::vector<unsigned int> &destinations)
309 for (
const unsigned int destination : destinations)
322 const bool my_destinations_are_unique = [destinations]() {
323 if (destinations.empty())
327 std::vector<unsigned int> my_destinations = destinations;
328 std::sort(my_destinations.begin(), my_destinations.end());
329 return (std::adjacent_find(my_destinations.begin(),
330 my_destinations.end()) ==
331 my_destinations.end());
341 return ConsensusAlgorithms::nbx<char, char>(
342 destinations, {}, {}, {}, mpi_comm);
356 std::vector<unsigned int> dest_vector(n_procs);
357 for (
const auto &el : destinations)
363 unsigned int n_recv_from;
364 const int ierr = MPI_Reduce_scatter_block(
365 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
370 std::vector<MPI_Request> send_requests(destinations.size());
371 for (
const auto &el : destinations)
380 send_requests.data() + (&el - destinations.data()));
388 std::vector<unsigned int> origins(n_recv_from);
389 for (
auto &el : origins)
391 const int ierr = MPI_Recv(&el,
401 if (destinations.size() > 0)
403 const int ierr = MPI_Waitall(destinations.size(),
404 send_requests.data(),
405 MPI_STATUSES_IGNORE);
417 const std::vector<unsigned int> &destinations)
421 const bool my_destinations_are_unique = [destinations]() {
422 std::vector<unsigned int> my_destinations = destinations;
423 const unsigned int n_destinations = my_destinations.size();
424 std::sort(my_destinations.begin(), my_destinations.end());
425 my_destinations.erase(std::unique(my_destinations.begin(),
426 my_destinations.end()),
427 my_destinations.end());
428 return (my_destinations.size() == n_destinations);
437 return ConsensusAlgorithms::nbx<char, char>(
438 destinations, {}, {}, {}, mpi_comm)
443 const unsigned int n_procs =
446 for (
const unsigned int destination : destinations)
452 "There is no point in communicating with ourselves."));
456 std::vector<unsigned int> dest_vector(n_procs);
457 for (
const auto &el : destinations)
462 unsigned int n_recv_from = 0;
464 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
483 max_reduce(
const void *in_lhs_,
488 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
489 MinMaxAvg *inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
491 for (
int i = 0; i < *len; ++i)
493 inout_rhs[i].sum += in_lhs[i].sum;
494 if (inout_rhs[i].
min > in_lhs[i].
min)
496 inout_rhs[i].min = in_lhs[i].min;
497 inout_rhs[i].min_index = in_lhs[i].min_index;
499 else if (inout_rhs[i].
min == in_lhs[i].
min)
502 if (inout_rhs[i].min_index > in_lhs[i].min_index)
503 inout_rhs[i].min_index = in_lhs[i].min_index;
506 if (inout_rhs[i].
max < in_lhs[i].
max)
508 inout_rhs[i].max = in_lhs[i].max;
509 inout_rhs[i].max_index = in_lhs[i].max_index;
511 else if (inout_rhs[i].
max == in_lhs[i].
max)
514 if (inout_rhs[i].max_index > in_lhs[i].max_index)
515 inout_rhs[i].max_index = in_lhs[i].max_index;
532 for (
unsigned int i = 0; i < my_values.
size(); ++i)
534 result[i].sum = my_values[i];
535 result[i].avg = my_values[i];
536 result[i].min = my_values[i];
537 result[i].max = my_values[i];
538 result[i].min_index = 0;
539 result[i].max_index = 0;
549 static MPI_Datatype type = []() {
552 int lengths[] = {3, 2, 1};
554 MPI_Aint displacements[] = {0,
558 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
561 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
564 ierr = MPI_Type_commit(&type);
570 int ierr = MPI_Type_free(&type);
582 static MPI_Op op = []() {
586 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
587 static_cast<int>(
true),
594 int ierr = MPI_Op_free(&op);
609 std::numeric_limits<double>::max(),
610 std::numeric_limits<double>::lowest(),
615 for (
auto &i : result)
618 const unsigned int my_id =
620 const unsigned int numproc =
623 std::vector<MinMaxAvg> in(my_values.
size());
625 for (
unsigned int i = 0; i < my_values.
size(); ++i)
627 in[i].sum = in[i].min = in[i].max = my_values[i];
628 in[i].min_index = in[i].max_index = my_id;
631 int ierr = MPI_Allreduce(
632 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
635 for (
auto &r : result)
636 r.avg = r.sum / numproc;
658 std::vector<unsigned int>
661 return std::vector<unsigned int>{0};
666 std::vector<IndexSet>
687 return mpi_communicator;
705 for (
unsigned int i = 0; i < my_values.
size(); ++i)
707 result[i].sum = my_values[i];
708 result[i].avg = my_values[i];
709 result[i].min = my_values[i];
710 result[i].max = my_values[i];
711 result[i].min_index = 0;
712 result[i].max_index = 0;
720 MPI_InitFinalize::Signals();
725 const unsigned int max_num_threads)
727 static bool constructor_has_already_run =
false;
728 (void)constructor_has_already_run;
729 Assert(constructor_has_already_run ==
false,
730 ExcMessage(
"You can only create a single object of this class "
731 "in a program since it initializes the MPI system."));
735#ifdef DEAL_II_WITH_MPI
738 int MPI_has_been_started = 0;
739 ierr = MPI_Initialized(&MPI_has_been_started);
742 ExcMessage(
"MPI error. You can only start MPI once!"));
749 int wanted = MPI_THREAD_SERIALIZED;
750 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
776 std::vector<char *> argv_new;
778 if (strcmp(arg,
"--help") != 0)
779 argv_new.push_back(arg);
781 std::stringstream threads_flag;
782#if KOKKOS_VERSION >= 30700
787 const std::string threads_flag_string = threads_flag.str();
788 argv_new.push_back(
const_cast<char *
>(threads_flag_string.c_str()));
789 argv_new.push_back(
nullptr);
794 int argc_new = argv_new.size() - 1;
795 Kokkos::initialize(argc_new, argv_new.data());
800#ifdef DEAL_II_WITH_PETSC
801 PetscErrorCode pierr;
802# ifdef DEAL_II_WITH_SLEPC
805 pierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
810 pierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
816 pierr = PetscPopSignalHandler();
821#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
823 Zoltan_Initialize(argc, argv, &version);
826#ifdef DEAL_II_WITH_P4EST
828# if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
833 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
835 p4est_init(
nullptr, SC_LP_SILENT);
838 constructor_has_already_run =
true;
852#ifdef DEAL_II_WITH_MPI
861 const unsigned int max_hostname_size =
863 std::vector<char> hostname_array(max_hostname_size);
864 std::copy(hostname.c_str(),
865 hostname.c_str() + hostname.size() + 1,
866 hostname_array.begin());
868 std::vector<char> all_hostnames(max_hostname_size *
870 const int ierr = MPI_Allgather(hostname_array.data(),
873 all_hostnames.data(),
881 unsigned int n_local_processes = 0;
882 unsigned int nth_process_on_host = 0;
885 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
890 ++nth_process_on_host;
901 const unsigned int n_threads =
903 (nth_process_on_host <=
937 "You tried to call unregister_request() with an invalid request."));
958#ifdef DEAL_II_WITH_MPI
962 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
970 release_unused_memory();
974 release_unused_memory();
977# ifdef DEAL_II_WITH_TRILINOS
988#ifdef DEAL_II_WITH_PETSC
989 if (!PetscFinalizeCalled)
996# ifdef DEAL_II_WITH_SLEPC
1000 PetscErrorCode ierr = SlepcFinalize();
1008 PetscErrorCode ierr = PetscFinalize();
1014#ifdef DEAL_II_WITH_P4EST
1027#ifdef DEAL_II_WITH_MPI
1030# if __cpp_lib_uncaught_exceptions >= 201411
1032 if (std::uncaught_exceptions() > 0)
1034 if (std::uncaught_exception() ==
true)
1054#ifdef DEAL_II_WITH_MPI
1055 int MPI_has_been_started = 0;
1056 const int ierr = MPI_Initialized(&MPI_has_been_started);
1059 return (MPI_has_been_started > 0);
1067 std::vector<unsigned int>
1069 const IndexSet &indices_to_look_up,
1073 ExcMessage(
"IndexSets have to have the same sizes."));
1077 ExcMessage(
"IndexSets have to have the same size on all processes."));
1079 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1087 owned_indices, indices_to_look_up,
comm, owning_ranks);
1095 std::pair<types::global_dof_index, types::global_dof_index>>,
1096 std::vector<unsigned int>>
1097 consensus_algorithm;
1098 consensus_algorithm.
run(process,
comm);
1100 return owning_ranks;
1107 namespace CollectiveMutexImplementation
1116#ifdef DEAL_II_WITH_MPI
1117# if __cpp_lib_uncaught_exceptions >= 201411
1119 if (std::uncaught_exceptions() != 0)
1121 if (std::uncaught_exception() ==
true)
1125 <<
"---------------------------------------------------------\n"
1126 <<
"An exception was thrown inside a section of the program\n"
1127 <<
"guarded by a CollectiveMutex.\n"
1128 <<
"Because a CollectiveMutex guards critical communication\n"
1129 <<
"handling the exception would likely\n"
1130 <<
"deadlock because only the current process is aware of the\n"
1131 <<
"exception. To prevent this deadlock, the program will be\n"
1133 <<
"---------------------------------------------------------"
1136 MPI_Abort(MPI_COMM_WORLD, 1);
1147 , request(MPI_REQUEST_NULL)
1163 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1178 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1180#ifdef DEAL_II_WITH_MPI
1188 const int ierr = MPI_Barrier(
comm);
1194 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1219 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1221#ifdef DEAL_II_WITH_MPI
1232 const int ierr = MPI_Barrier(
comm);
1249 const std::function<
bool(
const bool &,
const bool &)> &,
1250 const unsigned int);
1252 template std::vector<bool>
1253 reduce(
const std::vector<bool> &,
1255 const std::function<std::vector<bool>(
const std::vector<bool> &,
1256 const std::vector<bool> &)> &,
1257 const unsigned int);
1262 const std::function<
bool(
const bool &,
const bool &)> &);
1264 template std::vector<bool>
1266 const std::vector<bool> &,
1268 const std::function<std::vector<bool>(
const std::vector<bool> &,
1269 const std::vector<bool> &)> &);
1274 internal::all_reduce<bool>(
const MPI_Op &,
1281 logical_or<bool>(
const bool &,
const MPI_Comm);
1290 template std::vector<unsigned int>
1295 template std::set<unsigned int>
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void lock(const MPI_Comm comm)
void unlock(const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void free_communicator(MPI_Comm mpi_communicator)
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
std::string get_hostname()
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
****code * * MPI_Finalize()
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize