20 #include <deal.II/base/mpi.templates.h>
31 #include <boost/serialization/utility.hpp>
38 #ifdef DEAL_II_WITH_TRILINOS
39 # ifdef DEAL_II_WITH_MPI
43 # include <Epetra_MpiComm.h>
47 #ifdef DEAL_II_WITH_PETSC
51 # include <petscsys.h>
54 #ifdef DEAL_II_WITH_SLEPC
57 # include <slepcsys.h>
60 #ifdef DEAL_II_WITH_P4EST
61 # include <p4est_bits.h>
64 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
65 # include <zoltan_cpp.h>
75 const unsigned int my_partition_id,
76 const unsigned int n_partitions,
80 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
81 "IndexSet::size_type must match types::global_dof_index for "
82 "using this function");
83 const unsigned int remain = total_size % n_partitions;
88 min_size * my_partition_id +
std::min(my_partition_id, remain);
90 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
98 #ifdef DEAL_II_WITH_MPI
113 template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
114 template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
119 min_max_avg(
const double my_value,
const MPI_Comm &mpi_communicator)
131 std::vector<MinMaxAvg>
133 const MPI_Comm & mpi_communicator)
135 std::vector<MinMaxAvg> results(my_values.size());
143 #ifdef DEAL_II_WITH_MPI
148 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
159 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
167 const std::vector<unsigned int>
169 const MPI_Comm &comm_small)
172 return std::vector<unsigned int>{0};
177 std::vector<unsigned int> ranks(size);
178 const int ierr = MPI_Allgather(
179 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
190 MPI_Comm new_communicator;
191 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
193 return new_communicator;
202 const int ierr = MPI_Comm_free(&mpi_communicator);
210 const MPI_Group &group,
214 const int ierr = MPI_Comm_create_group(
comm, group, tag, new_comm);
221 std::vector<IndexSet>
223 const MPI_Comm &
comm,
227 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
228 "IndexSet::size_type must match types::global_dof_index for "
229 "using this function");
231 const std::vector<IndexSet::size_type> sizes =
233 const auto total_size =
236 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
239 for (
unsigned int i = 0; i < n_proc; ++i)
252 const MPI_Comm &
comm,
265 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
271 ierr = MPI_Type_commit(&result);
276 ierr = MPI_Type_size_x(result, &size64);
291 auto deleter = [](MPI_Datatype *p) {
294 const int ierr = MPI_Type_free(p);
302 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
303 new MPI_Datatype(result), deleter);
308 std::vector<unsigned int>
310 const MPI_Comm & mpi_comm,
311 const std::vector<unsigned int> &destinations)
318 for (
const unsigned int destination : destinations)
331 const bool my_destinations_are_unique = [destinations]() {
332 if (destinations.size() == 0)
336 std::vector<unsigned int> my_destinations = destinations;
337 std::sort(my_destinations.begin(), my_destinations.end());
338 return (std::adjacent_find(my_destinations.begin(),
339 my_destinations.end()) ==
340 my_destinations.end());
350 return ConsensusAlgorithms::nbx<char, char>(
351 destinations, {}, {}, {}, mpi_comm);
365 std::vector<unsigned int> dest_vector(n_procs);
366 for (
const auto &el : destinations)
372 unsigned int n_recv_from;
373 const int ierr = MPI_Reduce_scatter_block(
374 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
379 std::vector<MPI_Request> send_requests(destinations.size());
380 for (
const auto &el : destinations)
389 send_requests.data() + (&el - destinations.data()));
397 std::vector<unsigned int> origins(n_recv_from);
398 for (
auto &el : origins)
400 const int ierr = MPI_Recv(&el,
410 if (destinations.size() > 0)
412 const int ierr = MPI_Waitall(destinations.size(),
413 send_requests.data(),
414 MPI_STATUSES_IGNORE);
425 const MPI_Comm & mpi_comm,
426 const std::vector<unsigned int> &destinations)
430 const bool my_destinations_are_unique = [destinations]() {
431 std::vector<unsigned int> my_destinations = destinations;
432 const unsigned int n_destinations = my_destinations.size();
433 std::sort(my_destinations.begin(), my_destinations.end());
434 my_destinations.erase(std::unique(my_destinations.begin(),
435 my_destinations.end()),
436 my_destinations.end());
437 return (my_destinations.size() == n_destinations);
446 return ConsensusAlgorithms::nbx<char, char>(
447 destinations, {}, {}, {}, mpi_comm)
452 const unsigned int n_procs =
455 for (
const unsigned int destination : destinations)
461 "There is no point in communicating with ourselves."));
465 std::vector<unsigned int> dest_vector(n_procs);
466 for (
const auto &el : destinations)
471 unsigned int n_recv_from = 0;
473 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
492 max_reduce(
const void *in_lhs_,
497 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
498 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
500 for (
int i = 0; i < *len; ++i)
502 inout_rhs[i].sum += in_lhs[i].sum;
503 if (inout_rhs[i].
min > in_lhs[i].
min)
505 inout_rhs[i].min = in_lhs[i].min;
506 inout_rhs[i].min_index = in_lhs[i].min_index;
508 else if (inout_rhs[i].
min == in_lhs[i].
min)
511 if (inout_rhs[i].min_index > in_lhs[i].min_index)
512 inout_rhs[i].min_index = in_lhs[i].min_index;
515 if (inout_rhs[i].
max < in_lhs[i].
max)
517 inout_rhs[i].max = in_lhs[i].max;
518 inout_rhs[i].max_index = in_lhs[i].max_index;
520 else if (inout_rhs[i].
max == in_lhs[i].
max)
523 if (inout_rhs[i].max_index > in_lhs[i].max_index)
524 inout_rhs[i].max_index = in_lhs[i].max_index;
535 const MPI_Comm & mpi_communicator)
542 for (
unsigned int i = 0; i < my_values.
size(); ++i)
544 result[i].sum = my_values[i];
545 result[i].avg = my_values[i];
546 result[i].min = my_values[i];
547 result[i].max = my_values[i];
548 result[i].min_index = 0;
549 result[i].max_index = 0;
559 static MPI_Datatype type = []() {
562 int lengths[] = {3, 2, 1};
564 MPI_Aint displacements[] = {0,
568 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
571 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
574 ierr = MPI_Type_commit(&type);
580 int ierr = MPI_Type_free(&type);
592 static MPI_Op op = []() {
596 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
597 static_cast<int>(
true),
604 int ierr = MPI_Op_free(&op);
620 std::numeric_limits<double>::lowest(),
625 for (
auto &i : result)
628 const unsigned int my_id =
630 const unsigned int numproc =
633 std::vector<MinMaxAvg> in(my_values.
size());
635 for (
unsigned int i = 0; i < my_values.
size(); ++i)
637 in[i].sum = in[i].min = in[i].max = my_values[i];
638 in[i].min_index = in[i].max_index = my_id;
641 int ierr = MPI_Allreduce(
642 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
645 for (
auto &r : result)
646 r.avg = r.sum / numproc;
668 const std::vector<unsigned int>
671 return std::vector<unsigned int>{0};
676 std::vector<IndexSet>
697 return mpi_communicator;
715 for (
unsigned int i = 0; i < my_values.
size(); ++i)
717 result[i].sum = my_values[i];
718 result[i].avg = my_values[i];
719 result[i].min = my_values[i];
720 result[i].max = my_values[i];
721 result[i].min_index = 0;
722 result[i].max_index = 0;
730 MPI_InitFinalize::Signals();
735 const unsigned int max_num_threads)
737 static bool constructor_has_already_run =
false;
738 (void)constructor_has_already_run;
739 Assert(constructor_has_already_run ==
false,
740 ExcMessage(
"You can only create a single object of this class "
741 "in a program since it initializes the MPI system."));
745 #ifdef DEAL_II_WITH_MPI
748 int MPI_has_been_started = 0;
749 ierr = MPI_Initialized(&MPI_has_been_started);
752 ExcMessage(
"MPI error. You can only start MPI once!"));
759 int wanted = MPI_THREAD_SERIALIZED;
760 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
777 #ifdef DEAL_II_WITH_PETSC
778 # ifdef DEAL_II_WITH_SLEPC
780 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
784 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
790 PetscPopSignalHandler();
794 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
796 Zoltan_Initialize(argc, argv, &version);
799 #ifdef DEAL_II_WITH_P4EST
801 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
806 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
808 p4est_init(
nullptr, SC_LP_SILENT);
811 constructor_has_already_run =
true;
825 #ifdef DEAL_II_WITH_MPI
834 const unsigned int max_hostname_size =
836 std::vector<char> hostname_array(max_hostname_size);
838 hostname.c_str() + hostname.size() + 1,
839 hostname_array.begin());
841 std::vector<char> all_hostnames(max_hostname_size *
843 const int ierr = MPI_Allgather(hostname_array.data(),
846 all_hostnames.data(),
854 unsigned int n_local_processes = 0;
855 unsigned int nth_process_on_host = 0;
858 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
863 ++nth_process_on_host;
874 const unsigned int n_threads =
876 (nth_process_on_host <=
910 "You tried to call unregister_request() with an invalid request."));
931 #ifdef DEAL_II_WITH_MPI
935 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
943 release_unused_memory();
947 release_unused_memory();
950 # ifdef DEAL_II_WITH_TRILINOS
961 #ifdef DEAL_II_WITH_PETSC
962 if ((PetscInitializeCalled == PETSC_TRUE) &&
963 (PetscFinalizeCalled == PETSC_FALSE))
970 # ifdef DEAL_II_WITH_SLEPC
983 #ifdef DEAL_II_WITH_CUDA
986 release_unused_memory();
989 release_unused_memory();
992 #ifdef DEAL_II_WITH_P4EST
1002 #ifdef DEAL_II_WITH_MPI
1005 # if __cpp_lib_uncaught_exceptions >= 201411
1007 if (std::uncaught_exceptions() > 0)
1009 if (std::uncaught_exception() ==
true)
1029 #ifdef DEAL_II_WITH_MPI
1030 int MPI_has_been_started = 0;
1031 const int ierr = MPI_Initialized(&MPI_has_been_started);
1034 return (MPI_has_been_started > 0);
1042 std::vector<unsigned int>
1044 const IndexSet &indices_to_look_up,
1045 const MPI_Comm &
comm)
1048 ExcMessage(
"IndexSets have to have the same sizes."));
1052 ExcMessage(
"IndexSets have to have the same size on all processes."));
1054 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1062 owned_indices, indices_to_look_up,
comm, owning_ranks);
1070 std::pair<types::global_dof_index, types::global_dof_index>>,
1071 std::vector<unsigned int>>
1072 consensus_algorithm(process,
comm);
1073 consensus_algorithm.run();
1075 return owning_ranks;
1082 , request(MPI_REQUEST_NULL)
1094 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1109 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1111 #ifdef DEAL_II_WITH_MPI
1117 const int ierr = MPI_Barrier(
comm);
1123 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1143 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1145 #ifdef DEAL_II_WITH_MPI
1154 const int ierr = MPI_Barrier(
comm);
1170 const std::function<
bool(
const bool &,
const bool &)> &,
1171 const unsigned int);
1173 template std::vector<bool>
1174 reduce(
const std::vector<bool> &,
1176 const std::function<std::vector<bool>(
const std::vector<bool> &,
1177 const std::vector<bool> &)> &,
1178 const unsigned int);
1183 const std::function<
bool(
const bool &,
const bool &)> &);
1185 template std::vector<bool>
1187 const std::vector<bool> &,
1189 const std::function<std::vector<bool>(
const std::vector<bool> &,
1190 const std::vector<bool> &)> &);
1195 internal::all_reduce<bool>(
const MPI_Op &,
1202 logical_or<bool>(
const bool &,
const MPI_Comm &);
1211 template std::vector<unsigned int>
1213 const MPI_Comm &
comm);
1216 template std::set<unsigned int>
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
types::global_dof_index size_type
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
void lock(const MPI_Comm &comm)
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
template const MPI_Datatype mpi_type_id_for_type< int >
void free_communicator(MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
template const MPI_Datatype mpi_type_id_for_type< signed char >
template const MPI_Datatype mpi_type_id_for_type< bool >
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
template const MPI_Datatype mpi_type_id_for_type< short >
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
template const MPI_Datatype mpi_type_id_for_type< double >
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
template const MPI_Datatype mpi_type_id_for_type< char >
template const MPI_Datatype mpi_type_id_for_type< long double >
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< long int >
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
std::string get_hostname()
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
unsigned int global_dof_index
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize