Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
Namespaces | Classes | Functions | Variables
Utilities::MPI Namespace Reference

Namespaces

namespace  ConsensusAlgorithms
 
namespace  internal
 
namespace  LargeCount
 

Classes

class  CollectiveMutex
 
class  CommunicationPatternBase
 
class  DuplicatedCommunicator
 
class  Future
 
struct  MinMaxAvg
 
class  MPI_InitFinalize
 
class  NoncontiguousPartitioner
 
class  Partitioner
 
class  ProcessGrid
 
class  RemotePointEvaluation
 

Functions

unsigned int n_mpi_processes (const MPI_Comm mpi_communicator)
 
unsigned int this_mpi_process (const MPI_Comm mpi_communicator)
 
const std::vector< unsigned intmpi_processes_within_communicator (const MPI_Comm comm_large, const MPI_Comm comm_small)
 
std::vector< unsigned intcompute_point_to_point_communication_pattern (const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
 
unsigned int compute_n_point_to_point_communications (const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
 
MPI_Comm duplicate_communicator (const MPI_Comm mpi_communicator)
 
void free_communicator (MPI_Comm mpi_communicator)
 
int create_group (const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
 
std::vector< IndexSetcreate_ascending_partitioning (const MPI_Comm comm, const types::global_dof_index locally_owned_size)
 
IndexSet create_evenly_distributed_partitioning (const MPI_Comm comm, const types::global_dof_index total_size)
 
template<class Iterator , typename Number = long double>
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation (const Iterator begin, const Iterator end, const MPI_Comm comm)
 
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes (const std::size_t n_bytes)
 
template<typename T >
sum (const T &t, const MPI_Comm mpi_communicator)
 
template<typename T , typename U >
void sum (const T &values, const MPI_Comm mpi_communicator, U &sums)
 
template<typename T >
void sum (const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &sums)
 
template<int rank, int dim, typename Number >
SymmetricTensor< rank, dim, Number > sum (const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
 
template<int rank, int dim, typename Number >
Tensor< rank, dim, Number > sum (const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
 
template<typename Number >
void sum (const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
 
template<typename T >
max (const T &t, const MPI_Comm mpi_communicator)
 
template<typename T , typename U >
void max (const T &values, const MPI_Comm mpi_communicator, U &maxima)
 
template<typename T >
void max (const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &maxima)
 
template<typename T >
min (const T &t, const MPI_Comm mpi_communicator)
 
template<typename T , typename U >
void min (const T &values, const MPI_Comm mpi_communicator, U &minima)
 
template<typename T >
void min (const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &minima)
 
template<typename T >
logical_or (const T &t, const MPI_Comm mpi_communicator)
 
template<typename T , typename U >
void logical_or (const T &values, const MPI_Comm mpi_communicator, U &results)
 
template<typename T >
void logical_or (const ArrayView< const T > &values, const MPI_Comm mpi_communicator, const ArrayView< T > &results)
 
MinMaxAvg min_max_avg (const double my_value, const MPI_Comm mpi_communicator)
 
std::vector< MinMaxAvgmin_max_avg (const std::vector< double > &my_value, const MPI_Comm mpi_communicator)
 
void min_max_avg (const ArrayView< const double > &my_values, const ArrayView< MinMaxAvg > &result, const MPI_Comm mpi_communicator)
 
bool job_supports_mpi ()
 
template<typename T >
std::map< unsigned int, T > some_to_some (const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
 
template<typename T >
std::vector< T > all_gather (const MPI_Comm comm, const T &object_to_send)
 
template<typename T >
std::vector< T > gather (const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
 
template<typename T >
scatter (const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
 
template<typename T >
std::enable_if_t< is_mpi_type< T >==false, T > broadcast (const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
 
template<typename T >
std::enable_if_t< is_mpi_type< T >==true, T > broadcast (const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
 
template<typename T >
void broadcast (T *buffer, const size_t count, const unsigned int root, const MPI_Comm comm)
 
template<typename T >
reduce (const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
 
template<typename T >
all_reduce (const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
 
template<typename T >
Future< void > isend (const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
 
template<typename T >
Future< T > irecv (MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
 
std::vector< unsigned intcompute_index_owner (const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
 
template<typename T >
std::vector< T > compute_set_union (const std::vector< T > &vec, const MPI_Comm comm)
 
template<typename T >
std::set< T > compute_set_union (const std::set< T > &set, const MPI_Comm comm)
 

Variables

template<typename T >
constexpr bool is_mpi_type
 
template<typename T >
const MPI_Datatype mpi_type_id_for_type
 
template const MPI_Datatype mpi_type_id_for_type< bool >
 
template const MPI_Datatype mpi_type_id_for_type< char >
 
template const MPI_Datatype mpi_type_id_for_type< signed char >
 
template const MPI_Datatype mpi_type_id_for_type< short >
 
template const MPI_Datatype mpi_type_id_for_type< int >
 
template const MPI_Datatype mpi_type_id_for_type< long int >
 
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
 
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
 
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
 
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
 
template const MPI_Datatype mpi_type_id_for_type< float >
 
template const MPI_Datatype mpi_type_id_for_type< double >
 
template const MPI_Datatype mpi_type_id_for_type< long double >
 

Detailed Description

A namespace for utility functions that abstract certain operations using the Message Passing Interface (MPI) or provide fallback operations in case deal.II is configured not to use MPI at all.

Function Documentation

◆ n_mpi_processes()

unsigned int Utilities::MPI::n_mpi_processes ( const MPI_Comm  mpi_communicator)

Return the number of MPI processes there exist in the given communicator object. If this is a sequential job (i.e., the program is not using MPI at all, or is using MPI but has been started with only one MPI process), then the communicator necessarily involves only one process and the function returns 1.

Definition at line 150 of file mpi.cc.

◆ this_mpi_process()

unsigned int Utilities::MPI::this_mpi_process ( const MPI_Comm  mpi_communicator)

Return the rank of the present MPI process in the space of processes described by the given communicator. This will be a unique value for each process between zero and (less than) the number of all processes (given by get_n_mpi_processes()).

Definition at line 161 of file mpi.cc.

◆ mpi_processes_within_communicator()

const std::vector< unsigned int > Utilities::MPI::mpi_processes_within_communicator ( const MPI_Comm  comm_large,
const MPI_Comm  comm_small 
)

Return a vector of the ranks (within comm_large) of a subset of processes specified by comm_small.

Definition at line 173 of file mpi.cc.

◆ compute_point_to_point_communication_pattern()

std::vector< unsigned int > Utilities::MPI::compute_point_to_point_communication_pattern ( const MPI_Comm  mpi_comm,
const std::vector< unsigned int > &  destinations 
)

Consider an unstructured communication pattern where every process in an MPI universe wants to send some data to a subset of the other processors. To do that, the other processors need to know who to expect messages from. This function computes this information.

Parameters
mpi_commA communicator that describes the processors that are going to communicate with each other.
destinationsThe list of processors the current process wants to send information to. This list need not be sorted in any way. If it contains duplicate entries that means that multiple messages are intended for a given destination.
Returns
A list of processors that have indicated that they want to send something to the current processor. The resulting list is not sorted. It may contain duplicate entries if processors enter the same destination more than once in their destinations list.

Definition at line 314 of file mpi.cc.

◆ compute_n_point_to_point_communications()

unsigned int Utilities::MPI::compute_n_point_to_point_communications ( const MPI_Comm  mpi_comm,
const std::vector< unsigned int > &  destinations 
)

Simplified (for efficiency) version of the compute_point_to_point_communication_pattern() which only computes the number of processes in an MPI universe to expect communication from.

Parameters
mpi_commA communicator that describes the processors that are going to communicate with each other.
destinationsThe list of processors the current process wants to send information to. This list need not be sorted in any way. If it contains duplicate entries that means that multiple messages are intended for a given destination.
Returns
A number of processors that want to send something to the current processor.

Definition at line 429 of file mpi.cc.

◆ duplicate_communicator()

MPI_Comm Utilities::MPI::duplicate_communicator ( const MPI_Comm  mpi_communicator)

Given a communicator, generate a new communicator that contains the same set of processors but that has a different, unique identifier.

This functionality can be used to ensure that different objects, such as distributed matrices, each have unique communicators over which they can interact without interfering with each other.

When no longer needed, the communicator created here needs to be destroyed using free_communicator().

This function is equivalent to calling MPI_Comm_dup(mpi_communicator, &return_value);.

Definition at line 193 of file mpi.cc.

◆ free_communicator()

void Utilities::MPI::free_communicator ( MPI_Comm  mpi_communicator)

Free the given communicator mpi_communicator that was duplicated using duplicate_communicator().

The argument is passed by reference and will be invalidated and set to the MPI null handle. This function is equivalent to calling MPI_Comm_free(&mpi_communicator);.

Definition at line 204 of file mpi.cc.

◆ create_group()

int Utilities::MPI::create_group ( const MPI_Comm  comm,
const MPI_Group &  group,
const int  tag,
MPI_Comm new_comm 
)

If comm is an intracommunicator, this function returns a new communicator newcomm with communication group defined by the group argument. The function is only collective over the group of processes that actually want to create the communicator, i.e., that are named in the group argument. If multiple threads at a given process perform concurrent create_group() operations, the user must distinguish these operations by providing different tag or comm arguments.

This function was introduced in the MPI-3.0 standard. If available, the corresponding function in the provided MPI implementation is used. Otherwise, the implementation follows the one described in the following publication:

@inproceedings{dinan2011noncollective,
title = {Noncollective communicator creation in MPI},
author = {Dinan, James and Krishnamoorthy, Sriram and Balaji,
Pavan and Hammond, Jeff R and Krishnan, Manojkumar and
Tipparaju, Vinod and Vishnu, Abhinav},
booktitle = {European MPI Users' Group Meeting},
pages = {282--291},
year = {2011},
organization = {Springer}
}
Deprecated:
Use MPI_Comm_create_group directly

Definition at line 214 of file mpi.cc.

◆ create_ascending_partitioning()

std::vector< IndexSet > Utilities::MPI::create_ascending_partitioning ( const MPI_Comm  comm,
const types::global_dof_index  locally_owned_size 
)

Given the number of locally owned elements locally_owned_size, create a 1:1 partitioning of the of elements across the MPI communicator comm. The total size of elements is the sum of locally_owned_size across the MPI communicator. Each process will store contiguous subset of indices, and the index set on process p+1 starts at the index one larger than the last one stored on process p.

Definition at line 227 of file mpi.cc.

◆ create_evenly_distributed_partitioning()

IndexSet Utilities::MPI::create_evenly_distributed_partitioning ( const MPI_Comm  comm,
const types::global_dof_index  total_size 
)

Given the total number of elements total_size, create an evenly distributed 1:1 partitioning of the elements across the MPI communicator comm. Uses comm to determine number of partitions and processor ID to call the create_evenly_distributed_partitioning() function above.

Definition at line 256 of file mpi.cc.

◆ mean_and_standard_deviation()

template<class Iterator , typename Number = long double>
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > Utilities::MPI::mean_and_standard_deviation ( const Iterator  begin,
const Iterator  end,
const MPI_Comm  comm 
)

Calculate mean and standard deviation across the MPI communicator comm for values provided as a range [begin,end). The mean is computed as \(\bar x=\frac 1N \sum x_k\) where the \(x_k\) are the elements pointed to by the begin and end iterators on all processors (i.e., each processor's [begin,end) range points to a subset of the overall number of elements). The standard deviation is calculated as \(\sigma=\sqrt{\frac {1}{N-1} \sum |x_k -\bar x|^2}\), which is known as unbiased sample variance.

Template Parameters
Numberspecifies the type to store the mean value. The standard deviation is stored as the corresponding real type. This allows, for example, to calculate statistics from integer input values.

◆ create_mpi_data_type_n_bytes()

std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> Utilities::MPI::create_mpi_data_type_n_bytes ( const std::size_t  n_bytes)

Create a object that contains an MPI_Datatype that represents n_bytes bytes.

The resulting data type can be used in MPI send/receive or MPI IO to process messages of sizes larger than 2 GB with MPI_Byte as the underlying data type. This helper is required for MPI versions before 4.0 because routines like MPI_Send use a signed integer for the count variable. Instead, you can use this data type with the appropriate size set to the size of your message and by passing 1 as the count.

Note
The function does not just return an object of type MPI_Datatype because such objects need to be destroyed by a call to MPI_Type_free and it is easy to forget to do so (thereby creating a resource leak). Rather, the function returns an object that points to such an MPI_Datatype object, but also has a "deleter" function that ensures that MPI_Type_free is called whenever the object returned by this function goes out of scope.

Usage example: std::vector<char> buffer; [...] if (buffer.size()<(1U<<31)) { // less than 2GB of data MPI_Send(buffer.data(), buffer.size(), MPI_BYTE, dest, tag, comm); } else { // more than 2GB of data const auto bigtype = Utilities::MPI::create_mpi_data_type_n_bytes(buffer.size()); MPI_Send(buffer.data(), 1, *bigtype, dest, tag, comm); } Alternatively, the code in the else branch can be simplified to the following: [...] else { // more than 2GB of data MPI_Send(buffer.data(), 1, *UtilitiesMPI::create_mpi_data_type_n_bytes(buffer.size()), dest, tag, comm); }

Definition at line 271 of file mpi.cc.

◆ sum() [1/6]

template<typename T >
T Utilities::MPI::sum ( const T &  t,
const MPI_Comm  mpi_communicator 
)

Return the sum over all processors of the value t. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t. This function corresponds to the MPI_Allreduce function, i.e. all processors receive the result of this operation.

Note
Sometimes, not all processors need a result and in that case one would call the MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.
This function is only implemented for certain template arguments T, namely float, double, int, unsigned int.

◆ sum() [2/6]

template<typename T , typename U >
void Utilities::MPI::sum ( const T &  values,
const MPI_Comm  mpi_communicator,
U &  sums 
)

Like the previous function, but take the sums over the elements of an array of type T. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.

Input and output arrays may be the same.

◆ sum() [3/6]

template<typename T >
void Utilities::MPI::sum ( const ArrayView< const T > &  values,
const MPI_Comm  mpi_communicator,
const ArrayView< T > &  sums 
)

Like the previous function, but take the sums over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the sum over the i-th entries of the input arrays from each processor.

Input and output arrays may be the same.

◆ sum() [4/6]

template<int rank, int dim, typename Number >
SymmetricTensor< rank, dim, Number > Utilities::MPI::sum ( const SymmetricTensor< rank, dim, Number > &  local,
const MPI_Comm  mpi_communicator 
)

Perform an MPI sum of the entries of a symmetric tensor.

◆ sum() [5/6]

template<int rank, int dim, typename Number >
Tensor< rank, dim, Number > Utilities::MPI::sum ( const Tensor< rank, dim, Number > &  local,
const MPI_Comm  mpi_communicator 
)

Perform an MPI sum of the entries of a tensor.

◆ sum() [6/6]

template<typename Number >
void Utilities::MPI::sum ( const SparseMatrix< Number > &  local,
const MPI_Comm  mpi_communicator,
SparseMatrix< Number > &  global 
)

Perform an MPI sum of the entries of a SparseMatrix.

Note
local and global should have the same sparsity pattern and it should be the same for all MPI processes.

◆ max() [1/3]

template<typename T >
T Utilities::MPI::max ( const T &  t,
const MPI_Comm  mpi_communicator 
)

Return the maximum over all processors of the value t. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t. This function corresponds to the MPI_Allreduce function, i.e. all processors receive the result of this operation.

Note
Sometimes, not all processors need a result and in that case one would call the MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.
This function is only implemented for certain template arguments T, namely float, double, int, unsigned int.

◆ max() [2/3]

template<typename T , typename U >
void Utilities::MPI::max ( const T &  values,
const MPI_Comm  mpi_communicator,
U &  maxima 
)

Like the previous function, but take the maximum over the elements of an array of type T. In other words, the i-th element of the results array is the maximum over the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.

Input and output vectors may be the same.

◆ max() [3/3]

template<typename T >
void Utilities::MPI::max ( const ArrayView< const T > &  values,
const MPI_Comm  mpi_communicator,
const ArrayView< T > &  maxima 
)

Like the previous function, but take the maximum over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the maximum over the i-th entries of the input arrays from each processor.

Input and output arrays may be the same.

◆ min() [1/3]

template<typename T >
T Utilities::MPI::min ( const T &  t,
const MPI_Comm  mpi_communicator 
)

Return the minimum over all processors of the value t. This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of t. This function corresponds to the MPI_Allreduce function, i.e. all processors receive the result of this operation.

Note
Sometimes, not all processors need a result and in that case one would call the MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.
This function is only implemented for certain template arguments T, namely float, double, int, unsigned int.

◆ min() [2/3]

template<typename T , typename U >
void Utilities::MPI::min ( const T &  values,
const MPI_Comm  mpi_communicator,
U &  minima 
)

Like the previous function, but take the minima over the elements of an array of type T. In other words, the i-th element of the results array is the minimum of the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g. they just differ by one of them having a const type qualifier and the other not.

Input and output arrays may be the same.

◆ min() [3/3]

template<typename T >
void Utilities::MPI::min ( const ArrayView< const T > &  values,
const MPI_Comm  mpi_communicator,
const ArrayView< T > &  minima 
)

Like the previous function, but take the minimum over the elements of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the minimum over the i-th entries of the input arrays from each processor.

Input and output arrays may be the same.

◆ logical_or() [1/3]

template<typename T >
T Utilities::MPI::logical_or ( const T &  t,
const MPI_Comm  mpi_communicator 
)

Performs a logical or operation over all processors of the value t. The logical or operator || returns the boolean value true if either or all operands are true and returns false otherwise. If the provided value t corresponds to 0 in its associated data type T, it will be interpreted as false, and true otherwise. Data type T must be of type integral, i.e., bool, char, short, int, long, or any of their variations.

This function is collective over all processors given in the communicator. If deal.II is not configured for use of MPI, this function simply returns the value of value. This function corresponds to the MPI_Allreduce function, i.e., all processors receive the result of this operation.

Note
Sometimes, not all processors need a result and in that case one would call the MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.

◆ logical_or() [2/3]

template<typename T , typename U >
void Utilities::MPI::logical_or ( const T &  values,
const MPI_Comm  mpi_communicator,
U &  results 
)

Like the previous function, but performs the logical or operation on each element of an array. In other words, the i-th element of the results array is the result of the logical or operation applied on the i-th entries of the input arrays from each processor. T and U must decay to the same type, e.g., they just differ by one of them having a const type qualifier and the other not.

Input and output arrays may be the same.

Note
Depending on your standard library, this function may not work with specializations of std::vector for the data type bool. In that case, use a different container or data type.

◆ logical_or() [3/3]

template<typename T >
void Utilities::MPI::logical_or ( const ArrayView< const T > &  values,
const MPI_Comm  mpi_communicator,
const ArrayView< T > &  results 
)

Like the previous function, but performs the logical or operation on each element of an array as specified by the ArrayView arguments. In other words, the i-th element of the results array is the result of the logical or operation applied on the i-th entries of the input arrays from each processor.

Input and output arrays may be the same.

◆ min_max_avg() [1/3]

MinMaxAvg Utilities::MPI::min_max_avg ( const double  my_value,
const MPI_Comm  mpi_communicator 
)

Return sum, average, minimum, maximum, processor id of minimum and maximum as a collective operation of on the given MPI communicator mpi_communicator. Each processor's value is given in my_value and the result will be returned. The result is available on all machines.

Note
Sometimes, not all processors need a result and in that case one would call the MPI_Reduce function instead of the MPI_Allreduce function. The latter is at most twice as expensive, so if you are concerned about performance, it may be worthwhile investigating whether your algorithm indeed needs the result everywhere.

Definition at line 124 of file mpi.cc.

◆ min_max_avg() [2/3]

std::vector< MinMaxAvg > Utilities::MPI::min_max_avg ( const std::vector< double > &  my_value,
const MPI_Comm  mpi_communicator 
)

Same as above but returning the sum, average, minimum, maximum, process id of minimum and maximum as a collective operation on the given MPI communicator mpi_communicator for each entry of the vector.

Note
This function performs a single reduction sweep.
Precondition
Size of the input vector has to be the same on all processes.

Definition at line 137 of file mpi.cc.

◆ min_max_avg() [3/3]

void Utilities::MPI::min_max_avg ( const ArrayView< const double > &  my_values,
const ArrayView< MinMaxAvg > &  result,
const MPI_Comm  mpi_communicator 
)

Same as above but returning the sum, average, minimum, maximum, process id of minimum and maximum as a collective operation on the given MPI communicator mpi_communicator for each entry of the ArrayView.

Note
This function performs a single reduction sweep.
Precondition
Size of the input ArrayView has to be the same on all processes and the input and output ArrayView have to have the same size.

Definition at line 538 of file mpi.cc.

◆ job_supports_mpi()

bool Utilities::MPI::job_supports_mpi ( )

Return whether (i) deal.II has been compiled to support MPI (for example by compiling with CXX=mpiCC) and if so whether (ii) MPI_Init() has been called (for example using the Utilities::MPI::MPI_InitFinalize class). In other words, the result indicates whether the current job is running under MPI.

Note
The function does not take into account whether an MPI job actually runs on more than one processor or is, in fact, a single-node job that happens to run under MPI.

Definition at line 1057 of file mpi.cc.

◆ some_to_some()

template<typename T >
std::map< unsigned int, T > Utilities::MPI::some_to_some ( const MPI_Comm  comm,
const std::map< unsigned int, T > &  objects_to_send 
)

Initiate a some-to-some communication, and exchange arbitrary objects (the class T should be serializable using boost::serialize) between processors.

Parameters
[in]commMPI communicator.
[in]objects_to_sendA map from the rank (unsigned int) of the process meant to receive the data and the object to send (the type T must be serializable for this function to work properly). If this map contains an entry with a key equal to the rank of the current process (i.e., an instruction to a process to send data to itself), then this data item is simply copied to the returned object.
Returns
A map from the rank (unsigned int) of the process which sent the data and object received.

◆ all_gather()

template<typename T >
std::vector< T > Utilities::MPI::all_gather ( const MPI_Comm  comm,
const T &  object_to_send 
)

A generalization of the classic MPI_Allgather function, that accepts arbitrary data types T, as long as boost::serialize accepts T as an argument.

Parameters
[in]commMPI communicator.
[in]object_to_sendAn object to send to all other processes
Returns
A vector of objects, with size equal to the number of processes in the MPI communicator. Each entry contains the object received from the processor with the corresponding rank within the communicator.

◆ gather()

template<typename T >
std::vector< T > Utilities::MPI::gather ( const MPI_Comm  comm,
const T &  object_to_send,
const unsigned int  root_process = 0 
)

A generalization of the classic MPI_Gather function, that accepts arbitrary data types T, as long as boost::serialize accepts T as an argument.

Parameters
[in]commMPI communicator.
[in]object_to_sendan object to send to the root process
[in]root_processThe process, which receives the objects from all processes. By default the process with rank 0 is the root process.
Returns
The root_process receives a vector of objects, with size equal to the number of processes in the MPI communicator. Each entry contains the object received from the processor with the corresponding rank within the communicator. All other processes receive an empty vector.

◆ scatter()

template<typename T >
T Utilities::MPI::scatter ( const MPI_Comm  comm,
const std::vector< T > &  objects_to_send,
const unsigned int  root_process = 0 
)

A generalization of the classic MPI_Scatter function, that accepts arbitrary data types T, as long as boost::serialize accepts T as an argument.

Parameters
[in]commMPI communicator.
[in]objects_to_sendA vector of objects to send from the root process, with size equal to the number of processes. On all other processes the vector is empty.
[in]root_processThe process, which sends the objects to all processes. By default the process with rank 0 is the root process.
Returns
Every process receives an object from the root_process.

◆ broadcast() [1/3]

template<typename T >
std::enable_if_t< is_mpi_type< T >==false, T > Utilities::MPI::broadcast ( const MPI_Comm  comm,
const T &  object_to_send,
const unsigned int  root_process = 0 
)

This function sends an object object_to_send from the process root_process to all other processes.

This function is a generalization of the classic MPI_Bcast function that accepts arbitrary data types T, as long as Utilities::pack() (which in turn uses boost::serialize, see in Utilities::pack() for details) accepts T as an argument.

Note
Be aware that this function is typically a lot more expensive than an MPI_Bcast because the function will use boost::serialization to (de)serialize, and execute a second MPI_Bcast to transmit the size before sending the data itself. On the other hand, if you have a single element of a data type T that is natively supported by MPI, then the compiler will choose another broadcast() overload that is efficient. If you have an array of such elements, you should use the other broadcast() function in this namespace that takes a pointer and a count argument.
Parameters
[in]commMPI communicator.
[in]object_to_sendAn object to send to all processes.
[in]root_processThe process that sends the object to all processes. By default the process with rank 0 is the root process.
Template Parameters
TAny type for which the Utilities::pack() and Utilities::unpack() functions can be used to convert the object into an array of char. The compiler will not select this function if T is a type that is natively supported by MPI and instead use a more efficient overload.
Returns
On the root process, return a copy of object_to_send. On every other process, return a copy of the object sent by the root_process.

◆ broadcast() [2/3]

template<typename T >
std::enable_if_t< is_mpi_type< T >==true, T > Utilities::MPI::broadcast ( const MPI_Comm  comm,
const T &  object_to_send,
const unsigned int  root_process = 0 
)

This function sends an object object_to_send from the process root_process to all other processes.

This function is wrapper around the MPI_Bcast function selected by the compiler whenever T is a data type natively supported by MPI.

Parameters
[in]commMPI communicator.
[in]object_to_sendAn object to send to all processes.
[in]root_processThe process that sends the object to all processes. By default the process with rank 0 is the root process.
Template Parameters
TAny type. The compiler will only select this function if T is a type that is natively supported by MPI. It will choose the other overloaded version of this function if that is not the case.
Returns
On the root process, return a copy of object_to_send. On every other process, return a copy of the object sent by the root_process.

◆ broadcast() [3/3]

template<typename T >
void Utilities::MPI::broadcast ( T *  buffer,
const size_t  count,
const unsigned int  root,
const MPI_Comm  comm 
)

Broadcast the information in buffer from root to all other ranks.

Like MPI_Bcast but with support to send data with a count bigger than 2^31. The datatype to send needs to be supported directly by MPI and is automatically deduced from T.

Throws an exception if any MPI command fails.

Parameters
bufferBuffer of count objects.
countThe number of objects to send. All processes need to specify the correct size.
rootThe rank of the process with the data.
commThe MPI communicator to use.

◆ reduce()

template<typename T >
T Utilities::MPI::reduce ( const T &  local_value,
const MPI_Comm  comm,
const std::function< T(const T &, const T &)> &  combiner,
const unsigned int  root_process = 0 
)

A function that combines values local_value from all processes via a user-specified binary operation combiner on the root_process. As such this function is similar to MPI_Reduce (and Utilities::MPI::min/max()): however on the one hand due to the user-specified binary operation it is slower for built-in types but on the other hand general object types, including ones that store variable amounts of data, can be handled.

In contrast to all_reduce, the result will be only available on a single rank. On all other processes, the returned value is undefined.

◆ all_reduce()

template<typename T >
T Utilities::MPI::all_reduce ( const T &  local_value,
const MPI_Comm  comm,
const std::function< T(const T &, const T &)> &  combiner 
)

A function that combines values local_value from all processes via a user-specified binary operation combiner and distributes the result back to all processes. As such this function is similar to MPI_Allreduce (if it were implemented by a global reduction followed by a broadcast step) but due to the user-specified binary operation also general object types, including ones that store variable amounts of data, can be handled.

◆ isend()

template<typename T >
Future< void > Utilities::MPI::isend ( const T &  object,
MPI_Comm  communicator,
const unsigned int  target_rank,
const unsigned int  mpi_tag = 0 
)

A function that takes a given argument object and, using MPI, sends it to MPI process indicated by the given target_rank. This function is "immediate" (corresponding to the MPI_Isend function), i.e., it immediately returns rather than waiting for the send operation to succeed. Instead, it returns a Future object that can be used to wait for the send operation to complete.

Unlike MPI_Isend, the object to be sent does not need to have a lifetime that extends until the send operation is complete. As a consequence, the first argument to this function may be a temporary variable (such as the result of another function call). That is because the object is internally packaged into a buffer whose lifetime is automatically managed. Using the buffer enables sending arbitrary objects, not just those natively supported by MPI. The only restriction on the type is that it needs to be possible to call Utilities::pack() and Utilities::unpack() on the object.

◆ irecv()

template<typename T >
Future< T > Utilities::MPI::irecv ( MPI_Comm  communicator,
const unsigned int  source_rank,
const unsigned int  mpi_tag = 0 
)

A function that encodes an MPI "receive" function for an object whose type is represented by the template argument. The object is expected to be sent by the MPI process indicated by the given source_rank. This function is "immediate" (corresponding to the MPI_Irecv or a variant of this function), i.e., it immediately returns rather than waiting for the receive operation to succeed. Instead, it returns a Future object that can be used to wait for the send operation to complete, and then to obtain the object received via the Future::get() function.

Unlike MPI_Irecv, the object to be received may be of any type on which one can call Utilities::pack() and Utilities::unpack(), not just those natively supported by MPI.

◆ compute_index_owner()

std::vector< unsigned int > Utilities::MPI::compute_index_owner ( const IndexSet owned_indices,
const IndexSet indices_to_look_up,
const MPI_Comm  comm 
)

Given a partitioned index set space, compute the owning MPI process rank of each element of a second index set according to the partitioned index set. A natural usage of this function is to compute for each ghosted degree of freedom the MPI rank of the process owning that index.

One might think: "But we know which rank a ghost DoF belongs to based on the subdomain id of the cell it is on". But this heuristic fails for DoFs on interfaces between ghost cells with different subdomain_ids, or between a ghost cell and an artificial cell. Furthermore, this function enables a completely abstract exchange of information without the help of the mesh in terms of neighbors.

The first argument passed to this function, owned_indices, must uniquely partition an index space between all processes. Otherwise, there are no limitations on this argument: In particular, there is no need in partitioning the index space into contiguous subsets. Furthermore, there are no limitations on the second index set indices_to_look_up as long as the size matches the first one. It can be chosen arbitrarily and independently on each process. In the case that the second index set also contains locally owned indices, these indices will be treated correctly and the rank of this process is returned for those entries.

Note
This is a collective operation: all processes within the given communicator have to call this function. Since this function does not use MPI_Alltoall or MPI_Allgather, but instead uses non-blocking point-to-point communication instead, and only a single non-blocking barrier, it reduces the memory consumption significantly. This function is suited for large-scale simulations with >100k MPI ranks.
Parameters
[in]owned_indicesIndex set with indices locally owned by this process.
[in]indices_to_look_upIndex set containing indices of which the user is interested the rank of the owning process.
[in]commMPI communicator.
Returns
List containing the MPI process rank for each entry in the index set indices_to_look_up. The order coincides with the order within the ElementIterator.

Definition at line 1073 of file mpi.cc.

◆ compute_set_union() [1/2]

template<typename T >
std::vector< T > Utilities::MPI::compute_set_union ( const std::vector< T > &  vec,
const MPI_Comm  comm 
)

Compute the union of the input vectors vec of all processes in the MPI communicator comm.

Note
This is a collective operation. The result will available on all processes.

◆ compute_set_union() [2/2]

template<typename T >
std::set< T > Utilities::MPI::compute_set_union ( const std::set< T > &  set,
const MPI_Comm  comm 
)

The same as above but for std::set.

Variable Documentation

◆ is_mpi_type

template<typename T >
constexpr bool Utilities::MPI::is_mpi_type
constexpr
Initial value:
char,
signed short,
signed int,
signed long,
signed long long,
signed char,
unsigned char,
unsigned short,
unsigned int,
unsigned long int,
unsigned long long,
float,
double,
long double,
std::complex<float>,
std::complex<double>,
std::complex<long double>,
wchar_t>::value

A template variable that is true if the template argument T is a data type that is natively supported by MPI, and false otherwise. This variable can be used together with std::enable_if to selectively allow template functions only for those data types for which the template type is supported by MPI. The variable is, in essence, a concept in the sense of C++20.

Definition at line 110 of file mpi.h.

◆ mpi_type_id_for_type

template<typename T >
template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< std::complex< double > >
Initial value:
= internal::MPIDataTypes::mpi_type_id(
static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr))

A template variable that translates from the data type given as template argument to the corresponding MPI_Datatype to be used for MPI communication.

As an example, the value of mpi_type_id_for_type<int> is MPI_INT. A common way to use this variable is when sending an object obj via MPI functions to another process, and using mpi_type_id_for_type<decltype(obj)> to infer the correct MPI type to use for the communication.

The type T given here must be one of the data types supported by MPI, such as int or double. It may not be an array of objects of such a type, or a pointer to an object of such a type. The compiler will produce an error if this requirement is not satisfied.

Definition at line 1732 of file mpi.h.

◆ mpi_type_id_for_type< bool >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< bool >

Definition at line 105 of file mpi.cc.

◆ mpi_type_id_for_type< char >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< char >

Definition at line 106 of file mpi.cc.

◆ mpi_type_id_for_type< signed char >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< signed char >

Definition at line 107 of file mpi.cc.

◆ mpi_type_id_for_type< short >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< short >

Definition at line 108 of file mpi.cc.

◆ mpi_type_id_for_type< int >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< int >

Definition at line 109 of file mpi.cc.

◆ mpi_type_id_for_type< long int >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< long int >

Definition at line 110 of file mpi.cc.

◆ mpi_type_id_for_type< unsigned char >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< unsigned char >

Definition at line 111 of file mpi.cc.

◆ mpi_type_id_for_type< unsigned short >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< unsigned short >

Definition at line 112 of file mpi.cc.

◆ mpi_type_id_for_type< unsigned long int >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< unsigned long int >

Definition at line 113 of file mpi.cc.

◆ mpi_type_id_for_type< unsigned long long int >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< unsigned long long int >

Definition at line 114 of file mpi.cc.

◆ mpi_type_id_for_type< float >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< float >

Definition at line 115 of file mpi.cc.

◆ mpi_type_id_for_type< double >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< double >

Definition at line 116 of file mpi.cc.

◆ mpi_type_id_for_type< long double >

template const MPI_Datatype Utilities::MPI::mpi_type_id_for_type< long double >

Definition at line 117 of file mpi.cc.