17 #include <deal.II/base/exceptions.h> 18 #include <deal.II/base/index_set.h> 19 #include <deal.II/base/mpi.h> 20 #include <deal.II/base/mpi.templates.h> 21 #include <deal.II/base/mpi_compute_index_owner_internal.h> 22 #include <deal.II/base/mpi_tags.h> 23 #include <deal.II/base/multithread_info.h> 24 #include <deal.II/base/utilities.h> 26 #include <deal.II/lac/la_parallel_block_vector.h> 27 #include <deal.II/lac/la_parallel_vector.h> 28 #include <deal.II/lac/vector_memory.h> 35 #ifdef DEAL_II_WITH_TRILINOS 36 # ifdef DEAL_II_WITH_MPI 37 # include <deal.II/lac/trilinos_parallel_block_vector.h> 38 # include <deal.II/lac/trilinos_vector.h> 39 # include <deal.II/lac/vector_memory.h> 41 # include <Epetra_MpiComm.h> 45 #ifdef DEAL_II_WITH_PETSC 46 # include <deal.II/lac/petsc_block_vector.h> 47 # include <deal.II/lac/petsc_vector.h> 49 # include <petscsys.h> 52 #ifdef DEAL_II_WITH_SLEPC 53 # include <deal.II/lac/slepc_solver.h> 55 # include <slepcsys.h> 58 #ifdef DEAL_II_WITH_P4EST 59 # include <p4est_bits.h> 62 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 63 # include <zoltan_cpp.h> 66 DEAL_II_NAMESPACE_OPEN
73 #ifdef DEAL_II_WITH_MPI 78 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
89 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
99 MPI_Comm new_communicator;
100 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
102 return new_communicator;
111 const int ierr = MPI_Comm_free(&mpi_communicator);
119 const MPI_Group &group,
123 # if DEAL_II_MPI_VERSION_GTE(3, 0) 124 return MPI_Comm_create_group(comm, group, tag, new_comm);
127 int ierr = MPI_Comm_rank(comm, &rank);
131 ierr = MPI_Group_rank(group, &grp_rank);
133 if (grp_rank == MPI_UNDEFINED)
135 *new_comm = MPI_COMM_NULL;
140 ierr = MPI_Group_size(group, &grp_size);
143 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
146 MPI_Group parent_grp;
147 ierr = MPI_Comm_group(comm, &parent_grp);
150 std::vector<int> pids(grp_size);
151 std::vector<int> grp_pids(grp_size);
152 std::iota(grp_pids.begin(), grp_pids.end(), 0);
153 ierr = MPI_Group_translate_ranks(
154 group, grp_size, grp_pids.data(), parent_grp, pids.data());
156 ierr = MPI_Group_free(&parent_grp);
159 MPI_Comm comm_old = *new_comm;
161 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
163 const int gid = grp_rank / merge_sz;
164 comm_old = *new_comm;
167 if ((gid + 1) * merge_sz < grp_size)
169 ierr = (MPI_Intercomm_create(
170 *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
172 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
178 ierr = MPI_Intercomm_create(
179 *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
181 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
184 if (*new_comm != comm_old)
186 ierr = MPI_Comm_free(&ic);
188 ierr = MPI_Comm_free(&comm_old);
199 std::vector<IndexSet>
204 const std::vector<IndexSet::size_type> sizes =
206 const auto total_size =
209 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
212 for (
unsigned int i = 0; i < n_proc; ++i)
214 res[i].add_range(begin, begin + sizes[i]);
215 begin = begin + sizes[i];
240 const std::vector<T1> &,
241 std::vector<T2> &)
override 243 this->
sources.push_back(other_rank);
251 virtual std::vector<unsigned int>
262 std::vector<unsigned int>
283 std::vector<unsigned int>
285 const MPI_Comm & mpi_comm,
286 const std::vector<unsigned int> &destinations)
293 for (
const unsigned int destination : destinations)
297 Assert(destination != myid,
299 "There is no point in communicating with ourselves."));
302 # if DEAL_II_MPI_VERSION_GTE(3, 0) 306 ConsensusAlgorithmProcessTargets::T2>
307 consensus_algorithm(process, mpi_comm);
308 consensus_algorithm.run();
311 # elif DEAL_II_MPI_VERSION_GTE(2, 2) 320 std::vector<unsigned int> dest_vector(n_procs);
321 for (
const auto &el : destinations)
327 unsigned int n_recv_from;
328 const int ierr = MPI_Reduce_scatter_block(
329 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
334 std::vector<MPI_Request> send_requests(destinations.size());
335 for (
const auto &el : destinations)
344 send_requests.data() + (&el - destinations.data()));
352 std::vector<unsigned int> origins(n_recv_from);
353 for (
auto &el : origins)
355 const int ierr = MPI_Recv(&el,
365 if (destinations.size() > 0)
367 const int ierr = MPI_Waitall(destinations.size(),
368 send_requests.data(),
369 MPI_STATUSES_IGNORE);
377 const unsigned int max_n_destinations =
380 if (max_n_destinations == 0)
382 return std::vector<unsigned int>();
387 std::vector<unsigned int> my_destinations(max_n_destinations,
389 std::copy(destinations.begin(),
391 my_destinations.begin());
397 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
398 const int ierr = MPI_Allgather(my_destinations.data(),
401 all_destinations.data(),
409 std::vector<unsigned int> origins;
410 for (
unsigned int i = 0; i < n_procs; ++i)
411 for (
unsigned int j = 0; j < max_n_destinations; ++j)
412 if (all_destinations[i * max_n_destinations + j] == myid)
413 origins.push_back(i);
414 else if (all_destinations[i * max_n_destinations + j] ==
426 const MPI_Comm & mpi_comm,
427 const std::vector<unsigned int> &destinations)
431 for (
const unsigned int destination : destinations)
437 "There is no point in communicating with ourselves."));
441 std::vector<unsigned int> dest_vector(n_procs);
442 for (
const auto &el : destinations)
445 # if DEAL_II_MPI_VERSION_GTE(2, 2) 448 unsigned int n_recv_from = 0;
450 const int ierr = MPI_Reduce_scatter_block(
451 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
460 std::vector<unsigned int> buffer(dest_vector.size());
461 unsigned int n_recv_from = 0;
463 MPI_Reduce(dest_vector.data(),
470 MPI_Scatter(buffer.data(),
489 max_reduce(
const void *in_lhs_,
500 inout_rhs->
sum += in_lhs->
sum;
501 if (inout_rhs->
min > in_lhs->
min)
503 inout_rhs->
min = in_lhs->
min;
506 else if (inout_rhs->
min == in_lhs->
min)
513 if (inout_rhs->
max < in_lhs->
max)
515 inout_rhs->
max = in_lhs->
max;
518 else if (inout_rhs->
max == in_lhs->
max)
530 min_max_avg(
const double my_value,
const MPI_Comm &mpi_communicator)
537 result.
sum = my_value;
538 result.
avg = my_value;
539 result.
min = my_value;
540 result.
max = my_value;
550 std::numeric_limits<double>::max(),
551 -std::numeric_limits<double>::max(),
556 const unsigned int my_id =
557 ::Utilities::MPI::this_mpi_process(mpi_communicator);
558 const unsigned int numproc =
559 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
563 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
573 int lengths[] = {3, 2};
574 MPI_Aint displacements[] = {0, offsetof(
MinMaxAvg, min_index)};
575 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT};
577 ierr = MPI_Type_create_struct(2, lengths, displacements, types, &type);
580 ierr = MPI_Type_commit(&type);
582 ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
585 ierr = MPI_Type_free(&type);
588 ierr = MPI_Op_free(&op);
591 result.
avg = result.
sum / numproc;
614 std::vector<IndexSet>
618 return std::vector<IndexSet>(1, complete_index_set(local_size));
626 return mpi_communicator;
638 min_max_avg(
const double my_value,
const MPI_Comm &)
642 result.
sum = my_value;
643 result.
avg = my_value;
644 result.
min = my_value;
645 result.
max = my_value;
658 const unsigned int max_num_threads)
660 static bool constructor_has_already_run =
false;
661 (void)constructor_has_already_run;
662 Assert(constructor_has_already_run ==
false,
663 ExcMessage(
"You can only create a single object of this class " 664 "in a program since it initializes the MPI system."));
668 #ifdef DEAL_II_WITH_MPI 671 int MPI_has_been_started = 0;
672 ierr = MPI_Initialized(&MPI_has_been_started);
675 ExcMessage(
"MPI error. You can only start MPI once!"));
682 int wanted = MPI_THREAD_SERIALIZED;
683 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
700 #ifdef DEAL_II_WITH_PETSC 701 # ifdef DEAL_II_WITH_SLEPC 703 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
707 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
713 PetscPopSignalHandler();
717 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 719 Zoltan_Initialize(argc, argv, &version);
722 #ifdef DEAL_II_WITH_P4EST 724 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0) 730 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
732 p4est_init(
nullptr, SC_LP_SILENT);
735 constructor_has_already_run =
true;
749 #ifdef DEAL_II_WITH_MPI 758 const unsigned int max_hostname_size =
760 std::vector<char> hostname_array(max_hostname_size);
761 std::copy(hostname.c_str(),
762 hostname.c_str() + hostname.size() + 1,
763 hostname_array.begin());
765 std::vector<char> all_hostnames(max_hostname_size *
767 const int ierr = MPI_Allgather(hostname_array.data(),
770 all_hostnames.data(),
778 unsigned int n_local_processes = 0;
779 unsigned int nth_process_on_host = 0;
782 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
787 ++nth_process_on_host;
798 const unsigned int n_threads =
800 (nth_process_on_host <=
820 requests.insert(&request);
829 requests.find(&request) != requests.end(),
831 "You tried to call unregister_request() with an invalid request."));
833 requests.erase(&request);
849 #ifdef DEAL_II_WITH_MPI 851 for (
auto request : requests)
853 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
861 release_unused_memory();
865 release_unused_memory();
868 # if defined(DEAL_II_WITH_TRILINOS) 879 #ifdef DEAL_II_WITH_PETSC 880 if ((PetscInitializeCalled == PETSC_TRUE) &&
881 (PetscFinalizeCalled == PETSC_FALSE))
888 # ifdef DEAL_II_WITH_SLEPC 901 #ifdef DEAL_II_WITH_CUDA 904 release_unused_memory();
907 release_unused_memory();
910 #ifdef DEAL_II_WITH_P4EST 920 #ifdef DEAL_II_WITH_MPI 923 # if __cpp_lib_uncaught_exceptions >= 201411 925 if (std::uncaught_exceptions() > 0)
927 if (std::uncaught_exception() ==
true)
931 <<
"ERROR: Uncaught exception in MPI_InitFinalize on proc " 933 <<
". Skipping MPI_Finalize() to avoid a deadlock." 938 const int ierr = MPI_Finalize();
951 #ifdef DEAL_II_WITH_MPI 952 int MPI_has_been_started = 0;
953 const int ierr = MPI_Initialized(&MPI_has_been_started);
956 return (MPI_has_been_started > 0);
962 template <
typename T1,
typename T2>
965 const std::vector<T1> &,
973 template <
typename T1,
typename T2>
983 template <
typename T1,
typename T2>
993 template <
typename T1,
typename T2>
997 const std::vector<T2> &)
1004 template <
typename T1,
typename T2>
1007 const MPI_Comm & comm)
1016 template <
typename T1,
typename T2>
1019 const MPI_Comm & comm)
1025 template <
typename T1,
typename T2>
1056 template <
typename T1,
typename T2>
1060 #ifdef DEAL_II_WITH_MPI 1061 int all_receive_requests_are_done;
1064 &all_receive_requests_are_done,
1065 MPI_STATUSES_IGNORE);
1068 return all_receive_requests_are_done;
1076 template <
typename T1,
typename T2>
1080 #ifdef DEAL_II_WITH_MPI 1081 # if DEAL_II_MPI_VERSION_GTE(3, 0) 1082 const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
1088 "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
1095 template <
typename T1,
typename T2>
1099 #ifdef DEAL_II_WITH_MPI 1100 int all_ranks_reached_barrier;
1101 const auto ierr = MPI_Test(&barrier_request,
1102 &all_ranks_reached_barrier,
1103 MPI_STATUSES_IGNORE);
1105 return all_ranks_reached_barrier;
1113 template <
typename T1,
typename T2>
1117 #ifdef DEAL_II_WITH_MPI 1119 const int tag_request =
1121 const int tag_deliver =
1126 int request_is_pending;
1127 const auto ierr = MPI_Iprobe(
1128 MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
1131 if (request_is_pending)
1134 const auto other_rank = status.MPI_SOURCE;
1139 ExcMessage(
"Process is requesting a second time!"));
1143 std::vector<T1> buffer_recv;
1146 auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1151 buffer_recv.resize(number_amount /
sizeof(T1));
1152 ierr = MPI_Recv(buffer_recv.data(),
1163 std_cxx14::make_unique<std::vector<T2>>());
1173 ierr = MPI_Isend(request_buffer.data(),
1174 request_buffer.size() *
sizeof(T2),
1187 template <
typename T1,
typename T2>
1191 #ifdef DEAL_II_WITH_MPI 1194 const auto n_targets =
targets.size();
1196 const int tag_request =
1198 const int tag_deliver =
1209 for (
unsigned int i = 0; i < n_targets; i++)
1211 const unsigned int rank =
targets[i];
1212 const unsigned int index = i;
1219 auto ierr = MPI_Isend(send_buffer.data(),
1220 send_buffer.size() *
sizeof(T1),
1231 ierr = MPI_Irecv(recv_buffer.data(),
1232 recv_buffer.size() *
sizeof(T2),
1246 template <
typename T1,
typename T2>
1250 #ifdef DEAL_II_WITH_MPI 1257 MPI_STATUSES_IGNORE);
1265 MPI_STATUSES_IGNORE);
1270 const int ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1275 const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1282 MPI_Barrier(this->comm);
1288 for (
unsigned int i = 0; i <
targets.size(); i++)
1296 template <
typename T1,
typename T2>
1299 const MPI_Comm & comm)
1305 template <
typename T1,
typename T2>
1317 for (
unsigned int request = 0; request < n_requests; request++)
1326 template <
typename T1,
typename T2>
1330 #ifdef DEAL_II_WITH_MPI 1331 const int tag_request =
1333 const int tag_deliver =
1337 MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
1340 const auto other_rank = status.MPI_SOURCE;
1342 std::vector<T1> buffer_recv;
1346 auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1351 buffer_recv.resize(number_amount /
sizeof(T1));
1352 ierr = MPI_Recv(buffer_recv.data(),
1363 this->process.
process_request(other_rank, buffer_recv, request_buffer);
1366 ierr = MPI_Isend(request_buffer.data(),
1367 request_buffer.size() *
sizeof(T2),
1381 template <
typename T1,
typename T2>
1385 #ifdef DEAL_II_WITH_MPI 1389 const int tag_request =
1391 const int tag_deliver =
1398 const auto n_targets =
targets.size();
1399 const auto n_sources =
sources.size();
1410 for (
unsigned int i = 0; i < n_targets; i++)
1412 const unsigned int rank =
targets[i];
1419 auto ierr = MPI_Isend(send_buffer.data(),
1420 send_buffer.size() *
sizeof(T1),
1431 ierr = MPI_Irecv(recv_buffer.data(),
1432 recv_buffer.size() *
sizeof(T2),
1449 template <
typename T1,
typename T2>
1453 #ifdef DEAL_II_WITH_MPI 1459 MPI_STATUSES_IGNORE);
1467 MPI_STATUSES_IGNORE);
1472 for (
unsigned int i = 0; i <
targets.size(); i++)
1479 template <
typename T1,
typename T2>
1482 const MPI_Comm & comm)
1489 #ifdef DEAL_II_WITH_MPI 1490 # if DEAL_II_MPI_VERSION_GTE(3, 0) 1505 template <
typename T1,
typename T2>
1509 consensus_algo->run();
1514 std::vector<unsigned int>
1516 const IndexSet &indices_to_look_up,
1517 const MPI_Comm &comm)
1520 ExcMessage(
"IndexSets have to have the same sizes."));
1524 ExcMessage(
"IndexSets have to have the same size on all processes."));
1526 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1534 owned_indices, indices_to_look_up, comm, owning_ranks);
1541 std::pair<types::global_dof_index, types::global_dof_index>,
1543 consensus_algorithm(process, comm);
1544 consensus_algorithm.run();
1546 return owning_ranks;
1550 std::pair<types::global_dof_index, types::global_dof_index>,
1557 , request(MPI_REQUEST_NULL)
1569 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1584 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1586 #ifdef DEAL_II_WITH_MPI 1592 const int ierr = MPI_Barrier(comm);
1595 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) 1598 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1618 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1620 #ifdef DEAL_II_WITH_MPI 1626 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0) 1627 const int ierr = MPI_Ibarrier(comm, &
request);
1630 const int ierr = MPI_Barrier(comm);
1642 DEAL_II_NAMESPACE_CLOSE
static void unregister_request(MPI_Request &request)
std::vector< unsigned int > sources
static const unsigned int invalid_unsigned_int
#define AssertNothrow(cond, exc)
std::vector< std::unique_ptr< std::vector< T2 > > > request_buffers
std::vector< unsigned int > get_result()
void process_requests(int index)
std::vector< std::unique_ptr< MPI_Request > > request_requests
std::vector< unsigned int > targets
virtual std::vector< unsigned int > compute_targets()=0
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
bool check_global_state()
std::vector< std::vector< T2 > > recv_buffers
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
std::set< unsigned int > requesting_processes
unsigned int start_communication()
#define AssertThrow(cond, exc)
types::global_dof_index size_type
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
void unlock(MPI_Comm comm)
virtual void prepare_recv_buffer(const int other_rank, std::vector< T2 > &recv_buffer)
virtual void run() override
ConsensusAlgorithmSelector(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
std::vector< std::vector< T1 > > send_buffers
void free_communicator(MPI_Comm &mpi_communicator)
void start_communication()
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
virtual std::vector< unsigned int > compute_targets() override
std::vector< std::vector< T2 > > recv_buffers
virtual void pack_recv_buffer(const int other_rank, std::vector< T1 > &send_buffer)
void clean_up_and_end_communication()
#define Assert(cond, exc)
std::vector< MPI_Request > send_requests
virtual void run() override
std::vector< std::vector< T1 > > send_buffers
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
std::vector< std::vector< T2 > > requests_buffers
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
std::vector< MPI_Request > requests_answers
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
virtual void unpack_recv_buffer(const int other_rank, const std::vector< T2 > &recv_buffer)
ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
std::string get_hostname()
static void register_request(MPI_Request &request)
#define AssertThrowMPI(error_code)
void clean_up_and_end_communication()
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
ConsensusAlgorithmProcess< T1, T2 > & process
std::vector< MPI_Request > send_and_recv_buffers
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
std::vector< unsigned int > sources
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &buffer_recv, std::vector< T2 > &request_buffer)
ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
static std::set< MPI_Request * > requests
virtual void run() override
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< unsigned int > targets
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
const std::vector< unsigned int > & target
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
size_type n_elements() const
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()