30 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC) 36 using MPI_Datatype =
int;
38 # ifndef MPI_COMM_WORLD 39 # define MPI_COMM_WORLD 0 41 # ifndef MPI_COMM_SELF 42 # define MPI_COMM_SELF 0 44 # ifndef MPI_REQUEST_NULL 45 # define MPI_REQUEST_NULL 0 73 #ifdef DEAL_II_WITH_MPI 74 # if DEAL_II_MPI_VERSION_GTE(3, 0) 76 # define DEAL_II_MPI_CONST_CAST(expr) (expr) 80 # include <type_traits> 82 # define DEAL_II_MPI_CONST_CAST(expr) \ 83 const_cast<typename std::remove_const< \ 84 typename std::remove_pointer<decltype(expr)>::type>::type *>(expr) 96 template <
int rank,
int dim,
typename Number>
98 template <
int rank,
int dim,
typename Number>
100 template <
typename Number>
121 const unsigned int n_partitions,
159 const std::vector<unsigned int>
184 std::vector<unsigned int>
187 const std::vector<unsigned int> &destinations);
211 const std::vector<unsigned int> &destinations);
439 #ifdef DEAL_II_WITH_MPI 442 const MPI_Group &group,
455 std::vector<IndexSet>
471 #ifdef DEAL_II_WITH_MPI 487 template <
class Iterator,
typename Number =
long double>
488 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
513 template <
typename T>
526 template <
typename T,
typename U>
539 template <
typename T>
550 template <
int rank,
int dim,
typename Number>
560 template <
int rank,
int dim,
typename Number>
573 template <
typename Number>
598 template <
typename T>
611 template <
typename T,
typename U>
624 template <
typename T>
649 template <
typename T>
662 template <
typename T,
typename U>
675 template <
typename T>
770 std::vector<MinMaxAvg>
968 template <
typename T>
969 std::map<unsigned int, T>
971 const std::map<unsigned int, T> &objects_to_send);
986 template <
typename T>
1005 template <
typename T>
1008 const T & object_to_send,
1009 const unsigned int root_process = 0);
1053 std::vector<unsigned int>
1055 const IndexSet &indices_to_look_up,
1065 template <
typename T>
1072 template <
typename T>
1080 template <
typename T>
1082 all_reduce(
const MPI_Op & mpi_op,
1089 template <
typename T,
unsigned int N>
1093 internal::all_reduce(MPI_SUM,
1099 template <
typename T,
unsigned int N>
1103 internal::all_reduce(MPI_MAX,
1109 template <
typename T,
unsigned int N>
1113 internal::all_reduce(MPI_MIN,
1119 template <
typename T>
1120 std::map<unsigned int, T>
1122 const std::map<unsigned int, T> &objects_to_send)
1124 # ifndef DEAL_II_WITH_MPI 1126 Assert(objects_to_send.size() < 2,
1127 ExcMessage(
"Cannot send to more than one processor."));
1128 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1129 objects_to_send.size() == 0,
1130 ExcMessage(
"Can only send to myself or to nobody."));
1131 return objects_to_send;
1135 std::map<unsigned int, T> received_objects;
1137 std::vector<unsigned int> send_to;
1138 send_to.reserve(objects_to_send.size());
1139 for (
const auto &m : objects_to_send)
1140 if (m.first == my_proc)
1141 received_objects[my_proc] = m.second;
1143 send_to.emplace_back(m.first);
1145 const unsigned int n_point_point_communications =
1156 if (send_to.size() == 0 && n_point_point_communications == 0)
1157 return received_objects;
1163 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1164 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1167 for (
const auto &rank_obj : objects_to_send)
1168 if (rank_obj.first != my_proc)
1170 const auto &rank = rank_obj.first;
1173 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1174 buffers_to_send[i].size(),
1179 &buffer_send_requests[i]);
1187 std::vector<char> buffer;
1189 for (
unsigned int i = 0; i < n_point_point_communications; ++i)
1193 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1198 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1203 const unsigned int rank = status.MPI_SOURCE;
1206 ierr = MPI_Recv(buffer.data(),
1214 Assert(received_objects.find(rank) == received_objects.end(),
1216 "I should not receive again from this rank"));
1217 received_objects[rank] =
1218 Utilities::unpack<T>(buffer,
1224 const int ierr = MPI_Waitall(send_to.size(),
1225 buffer_send_requests.data(),
1226 MPI_STATUSES_IGNORE);
1229 return received_objects;
1230 # endif // deal.II with MPI 1233 template <
typename T>
1240 # ifndef DEAL_II_WITH_MPI 1242 std::vector<T> v(1,
object);
1249 int n_local_data = buffer.size();
1252 std::vector<int> size_all_data(n_procs, 0);
1256 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT,
comm);
1260 std::vector<int> rdispls(n_procs);
1262 for (
unsigned int i = 1; i < n_procs; ++i)
1263 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1266 std::vector<char> received_unrolled_buffer(rdispls.back() +
1267 size_all_data.back());
1269 MPI_Allgatherv(buffer.data(),
1272 received_unrolled_buffer.data(),
1273 size_all_data.data(),
1278 std::vector<T> received_objects(n_procs);
1279 for (
unsigned int i = 0; i < n_procs; ++i)
1281 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1283 received_unrolled_buffer.begin() +
1284 rdispls[i] + size_all_data[i]);
1285 received_objects[i] = Utilities::unpack<T>(local_buffer);
1288 return received_objects;
1292 template <
typename T>
1295 const T & object_to_send,
1296 const unsigned int root_process)
1298 # ifndef DEAL_II_WITH_MPI 1301 std::vector<T> v(1, object_to_send);
1310 int n_local_data = buffer.size();
1314 std::vector<int> size_all_data;
1315 if (my_rank == root_process)
1316 size_all_data.resize(n_procs, 0);
1319 int ierr = MPI_Gather(&n_local_data,
1322 size_all_data.data(),
1331 std::vector<int> rdispls;
1332 if (my_rank == root_process)
1334 rdispls.resize(n_procs, 0);
1335 for (
unsigned int i = 1; i < n_procs; ++i)
1336 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1339 std::vector<char> received_unrolled_buffer;
1340 if (my_rank == root_process)
1341 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1343 ierr = MPI_Gatherv(buffer.data(),
1346 received_unrolled_buffer.data(),
1347 size_all_data.data(),
1354 std::vector<T> received_objects;
1356 if (my_rank == root_process)
1358 received_objects.resize(n_procs);
1360 for (
unsigned int i = 0; i < n_procs; ++i)
1362 const std::vector<char> local_buffer(
1363 received_unrolled_buffer.begin() + rdispls[i],
1364 received_unrolled_buffer.begin() + rdispls[i] +
1366 received_objects[i] = Utilities::unpack<T>(local_buffer);
1369 return received_objects;
1374 # ifdef DEAL_II_WITH_MPI 1375 template <
class Iterator,
typename Number>
1376 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1387 const Number
sum = std::accumulate(begin, end, Number(0.));
1394 std::for_each(begin, end, [&mean, &sq_sum](
const Number &v) {
1398 return std::make_pair(mean,
1399 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
static const unsigned int invalid_unsigned_int
~DuplicatedCommunicator()
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
#define AssertIndexRange(index, range)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
void free_communicator(MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
void lock(const MPI_Comm &comm)
#define DEAL_II_NAMESPACE_CLOSE
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const MPI_Comm & operator*() const
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
#define DEAL_II_NAMESPACE_OPEN
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static std::set< MPI_Request * > requests
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
DuplicatedCommunicator(const MPI_Comm &communicator)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete