20 #include <deal.II/base/mpi.templates.h> 35 #ifdef DEAL_II_WITH_TRILINOS 36 # ifdef DEAL_II_WITH_MPI 40 # include <Epetra_MpiComm.h> 44 #ifdef DEAL_II_WITH_PETSC 48 # include <petscsys.h> 51 #ifdef DEAL_II_WITH_SLEPC 54 # include <slepcsys.h> 57 #ifdef DEAL_II_WITH_P4EST 58 # include <p4est_bits.h> 61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 62 # include <zoltan_cpp.h> 72 const unsigned int n_partitions,
75 const unsigned int remain = total_size % n_partitions;
80 min_size * my_partition_id +
std::min(my_partition_id, remain);
82 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
103 std::vector<MinMaxAvg>
107 std::vector<MinMaxAvg> results(my_values.size());
115 #ifdef DEAL_II_WITH_MPI 120 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
131 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
139 const std::vector<unsigned int>
144 return std::vector<unsigned int>{0};
149 std::vector<unsigned int> ranks(size);
151 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
162 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
164 return new_communicator;
173 const int ierr = MPI_Comm_free(&mpi_communicator);
181 const MPI_Group &group,
185 # if DEAL_II_MPI_VERSION_GTE(3, 0) 186 return MPI_Comm_create_group(comm, group, tag, new_comm);
189 int ierr = MPI_Comm_rank(comm, &rank);
193 ierr = MPI_Group_rank(group, &grp_rank);
195 if (grp_rank == MPI_UNDEFINED)
197 *new_comm = MPI_COMM_NULL;
202 ierr = MPI_Group_size(group, &grp_size);
205 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
208 MPI_Group parent_grp;
209 ierr = MPI_Comm_group(comm, &parent_grp);
212 std::vector<int> pids(grp_size);
213 std::vector<int> grp_pids(grp_size);
214 std::iota(grp_pids.begin(), grp_pids.end(), 0);
215 ierr = MPI_Group_translate_ranks(
216 group, grp_size, grp_pids.data(), parent_grp, pids.data());
218 ierr = MPI_Group_free(&parent_grp);
223 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
225 const int gid = grp_rank / merge_sz;
226 comm_old = *new_comm;
229 if ((gid + 1) * merge_sz < grp_size)
231 ierr = (MPI_Intercomm_create(
232 *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
234 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
240 ierr = MPI_Intercomm_create(
241 *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
243 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
246 if (*new_comm != comm_old)
248 ierr = MPI_Comm_free(&ic);
250 ierr = MPI_Comm_free(&comm_old);
261 std::vector<IndexSet>
266 const std::vector<IndexSet::size_type> sizes =
268 const auto total_size =
271 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
274 for (
unsigned int i = 0; i < n_proc; ++i)
276 res[i].add_range(begin, begin + sizes[i]);
277 begin = begin + sizes[i];
310 using T2 =
unsigned int;
314 const std::vector<T1> &,
315 std::vector<T2> &)
override 317 this->
sources.push_back(other_rank);
325 virtual std::vector<unsigned int>
336 std::vector<unsigned int>
357 std::vector<unsigned int>
360 const std::vector<unsigned int> &destinations)
367 for (
const unsigned int destination : destinations)
371 Assert(destination != myid,
373 "There is no point in communicating with ourselves."));
376 # if DEAL_II_MPI_VERSION_GTE(3, 0) 381 consensus_algorithm(process, mpi_comm);
382 consensus_algorithm.run();
385 # elif DEAL_II_MPI_VERSION_GTE(2, 2) 394 std::vector<unsigned int> dest_vector(n_procs);
395 for (
const auto &el : destinations)
401 unsigned int n_recv_from;
402 const int ierr = MPI_Reduce_scatter_block(
403 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
408 std::vector<MPI_Request> send_requests(destinations.size());
409 for (
const auto &el : destinations)
418 send_requests.data() + (&el - destinations.data()));
426 std::vector<unsigned int> origins(n_recv_from);
427 for (
auto &el : origins)
429 const int ierr = MPI_Recv(&el,
439 if (destinations.size() > 0)
441 const int ierr = MPI_Waitall(destinations.size(),
442 send_requests.data(),
443 MPI_STATUSES_IGNORE);
451 const unsigned int max_n_destinations =
454 if (max_n_destinations == 0)
456 return std::vector<unsigned int>();
461 std::vector<unsigned int> my_destinations(max_n_destinations,
465 my_destinations.begin());
471 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
472 const int ierr = MPI_Allgather(my_destinations.data(),
475 all_destinations.data(),
483 std::vector<unsigned int> origins;
484 for (
unsigned int i = 0; i < n_procs; ++i)
485 for (
unsigned int j = 0; j < max_n_destinations; ++j)
486 if (all_destinations[i * max_n_destinations + j] == myid)
487 origins.push_back(i);
488 else if (all_destinations[i * max_n_destinations + j] ==
501 const std::vector<unsigned int> &destinations)
505 for (
const unsigned int destination : destinations)
511 "There is no point in communicating with ourselves."));
515 std::vector<unsigned int> dest_vector(n_procs);
516 for (
const auto &el : destinations)
519 # if DEAL_II_MPI_VERSION_GTE(2, 2) 522 unsigned int n_recv_from = 0;
524 const int ierr = MPI_Reduce_scatter_block(
525 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
534 std::vector<unsigned int> buffer(dest_vector.size());
535 unsigned int n_recv_from = 0;
537 MPI_Reduce(dest_vector.data(),
544 MPI_Scatter(buffer.data(),
563 max_reduce(
const void *in_lhs_,
571 for (
int i = 0; i < *len; i++)
573 inout_rhs[i].
sum += in_lhs[i].
sum;
574 if (inout_rhs[i].
min > in_lhs[i].
min)
576 inout_rhs[i].
min = in_lhs[i].
min;
579 else if (inout_rhs[i].min == in_lhs[i].min)
582 if (inout_rhs[i].min_index > in_lhs[i].min_index)
586 if (inout_rhs[i].
max < in_lhs[i].
max)
588 inout_rhs[i].
max = in_lhs[i].
max;
591 else if (inout_rhs[i].max == in_lhs[i].max)
594 if (inout_rhs[i].max_index > in_lhs[i].max_index)
613 for (
unsigned int i = 0; i < my_values.
size(); i++)
615 result[i].sum = my_values[i];
616 result[i].avg = my_values[i];
617 result[i].min = my_values[i];
618 result[i].max = my_values[i];
619 result[i].min_index = 0;
620 result[i].max_index = 0;
641 for (
auto &i : result)
644 const unsigned int my_id =
646 const unsigned int numproc =
651 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
656 std::vector<MinMaxAvg> in(my_values.
size());
658 for (
unsigned int i = 0; i < my_values.
size(); i++)
660 in[i].sum = in[i].min = in[i].max = my_values[i];
661 in[i].min_index = in[i].max_index = my_id;
665 int lengths[] = {3, 2, 1};
666 MPI_Aint displacements[] = {0,
669 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
671 ierr = MPI_Type_create_struct(3, lengths, displacements, types, &type);
674 ierr = MPI_Type_commit(&type);
676 ierr = MPI_Allreduce(
677 in.data(), result.data(), my_values.
size(), type, op, mpi_communicator);
680 ierr = MPI_Type_free(&type);
683 ierr = MPI_Op_free(&op);
686 for (
auto &r : result)
687 r.avg = r.sum / numproc;
709 const std::vector<unsigned int>
712 return std::vector<unsigned int>{0};
717 std::vector<IndexSet>
736 return mpi_communicator;
754 for (
unsigned int i = 0; i < my_values.
size(); i++)
756 result[i].sum = my_values[i];
757 result[i].avg = my_values[i];
758 result[i].min = my_values[i];
759 result[i].max = my_values[i];
760 result[i].min_index = 0;
761 result[i].max_index = 0;
771 const unsigned int max_num_threads)
773 static bool constructor_has_already_run =
false;
774 (void)constructor_has_already_run;
775 Assert(constructor_has_already_run ==
false,
776 ExcMessage(
"You can only create a single object of this class " 777 "in a program since it initializes the MPI system."));
781 #ifdef DEAL_II_WITH_MPI 784 int MPI_has_been_started = 0;
785 ierr = MPI_Initialized(&MPI_has_been_started);
788 ExcMessage(
"MPI error. You can only start MPI once!"));
795 int wanted = MPI_THREAD_SERIALIZED;
796 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
813 #ifdef DEAL_II_WITH_PETSC 814 # ifdef DEAL_II_WITH_SLEPC 816 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
820 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
826 PetscPopSignalHandler();
830 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 832 Zoltan_Initialize(argc, argv, &version);
835 #ifdef DEAL_II_WITH_P4EST 837 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0) 843 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
845 p4est_init(
nullptr, SC_LP_SILENT);
848 constructor_has_already_run =
true;
862 #ifdef DEAL_II_WITH_MPI 871 const unsigned int max_hostname_size =
873 std::vector<char> hostname_array(max_hostname_size);
875 hostname.c_str() + hostname.size() + 1,
876 hostname_array.begin());
878 std::vector<char> all_hostnames(max_hostname_size *
880 const int ierr = MPI_Allgather(hostname_array.data(),
883 all_hostnames.data(),
891 unsigned int n_local_processes = 0;
892 unsigned int nth_process_on_host = 0;
895 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
900 ++nth_process_on_host;
911 const unsigned int n_threads =
913 (nth_process_on_host <=
933 requests.insert(&request);
942 requests.find(&request) != requests.end(),
944 "You tried to call unregister_request() with an invalid request."));
946 requests.erase(&request);
962 #ifdef DEAL_II_WITH_MPI 964 for (
auto request : requests)
966 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
974 release_unused_memory();
978 release_unused_memory();
981 # if defined(DEAL_II_WITH_TRILINOS) 992 #ifdef DEAL_II_WITH_PETSC 993 if ((PetscInitializeCalled == PETSC_TRUE) &&
994 (PetscFinalizeCalled == PETSC_FALSE))
1001 # ifdef DEAL_II_WITH_SLEPC 1014 #ifdef DEAL_II_WITH_CUDA 1017 release_unused_memory();
1020 release_unused_memory();
1023 #ifdef DEAL_II_WITH_P4EST 1033 #ifdef DEAL_II_WITH_MPI 1036 # if __cpp_lib_uncaught_exceptions >= 201411 1038 if (std::uncaught_exceptions() > 0)
1040 if (std::uncaught_exception() ==
true)
1060 #ifdef DEAL_II_WITH_MPI 1061 int MPI_has_been_started = 0;
1062 const int ierr = MPI_Initialized(&MPI_has_been_started);
1065 return (MPI_has_been_started > 0);
1073 std::vector<unsigned int>
1075 const IndexSet &indices_to_look_up,
1079 ExcMessage(
"IndexSets have to have the same sizes."));
1083 ExcMessage(
"IndexSets have to have the same size on all processes."));
1085 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1093 owned_indices, indices_to_look_up, comm, owning_ranks);
1100 std::pair<types::global_dof_index, types::global_dof_index>,
1102 consensus_algorithm(process, comm);
1103 consensus_algorithm.run();
1105 return owning_ranks;
1112 , request(MPI_REQUEST_NULL)
1124 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1139 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1141 #ifdef DEAL_II_WITH_MPI 1147 const int ierr = MPI_Barrier(comm);
1150 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) 1153 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1173 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1175 #ifdef DEAL_II_WITH_MPI 1181 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) 1182 const int ierr = MPI_Ibarrier(comm, &
request);
1185 const int ierr = MPI_Barrier(comm);
1194 template std::vector<unsigned int>
1199 template std::set<unsigned int>
int gid(const Epetra_BlockMap &map, int i)
static void unregister_request(MPI_Request &request)
static const unsigned int invalid_unsigned_int
#define AssertNothrow(cond, exc)
#define AssertDimension(dim1, dim2)
virtual std::vector< unsigned int > compute_targets() override
std::vector< unsigned int > sources
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
****code ** MPI_Finalize()
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
#define AssertIndexRange(index, range)
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
#define AssertThrow(cond, exc)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
types::global_dof_index size_type
void free_communicator(MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcMessage(std::string arg1)
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
#define Assert(cond, exc)
void lock(const MPI_Comm &comm)
void unlock(const MPI_Comm &comm)
#define DEAL_II_NAMESPACE_CLOSE
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
std::vector< unsigned int > get_result()
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
void add_range(const size_type begin, const size_type end)
IndexSet complete_index_set(const IndexSet::size_type N)
ConsensusAlgorithmsProcessTargets(const std::vector< unsigned int > &target)
*braid_SplitCommworld & comm
std::string get_hostname()
static void register_request(MPI_Request &request)
#define AssertThrowMPI(error_code)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
#define DEAL_II_NAMESPACE_OPEN
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
static std::set< MPI_Request * > requests
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type local_size)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
const std::vector< unsigned int > & target
size_type n_elements() const
void copy(const T *begin, const T *end, U *dest)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()