Reference documentation for deal.II version Git 2a81969 2017-11-21 09:22:35 +0100
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 #include <deal.II/base/array_view.h>
21 
22 #include <vector>
23 #include <map>
24 
25 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
26 // without MPI, we would still like to use
27 // some constructs with MPI data
28 // types. Therefore, create some dummies
29 typedef int MPI_Comm;
30 typedef int MPI_Datatype;
31 typedef int MPI_Op;
32 # ifndef MPI_COMM_WORLD
33 # define MPI_COMM_WORLD 0
34 # endif
35 # ifndef MPI_COMM_SELF
36 # define MPI_COMM_SELF 0
37 # endif
38 # ifndef MPI_MIN
39 # define MPI_MIN 0
40 # endif
41 # ifndef MPI_MAX
42 # define MPI_MAX 0
43 # endif
44 # ifndef MPI_SUM
45 # define MPI_SUM 0
46 # endif
47 #endif
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 //Forward type declarations to allow MPI sums over tensorial types
53 template <int rank, int dim, typename Number> class Tensor;
54 template <int rank, int dim, typename Number> class SymmetricTensor;
55 
56 //Forward type declaration to allow MPI sums over Vector<number> type
57 template <typename Number> class Vector;
58 //Forward type declaration to allow MPI sums over FullMatrix<number> type
59 template <typename Number> class FullMatrix;
60 //Forward type declaration to allow MPI sums over LAPACKFullMatrix<number> type
61 template <typename Number> class LAPACKFullMatrix;
62 
63 
64 namespace Utilities
65 {
73  namespace MPI
74  {
80  unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
81 
90  unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
91 
113  std::vector<unsigned int>
114  compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm,
115  const std::vector<unsigned int> &destinations);
116 
130  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
131 
151  template <typename T>
152  T sum (const T &t,
153  const MPI_Comm &mpi_communicator);
154 
163  template <typename T, unsigned int N>
164  void sum (const T (&values)[N],
165  const MPI_Comm &mpi_communicator,
166  T (&sums)[N]);
167 
177  template <typename T>
178  void sum (const ArrayView<const T> &values,
179  const MPI_Comm &mpi_communicator,
180  const ArrayView<T> &sums);
181 
189  template <typename T>
190  void sum (const std::vector<T> &values,
191  const MPI_Comm &mpi_communicator,
192  std::vector<T> &sums);
193 
200  template <typename T>
201  void sum (const Vector<T> &values,
202  const MPI_Comm &mpi_communicator,
203  Vector<T> &sums);
204 
211  template <typename T>
212  void sum (const FullMatrix<T> &values,
213  const MPI_Comm &mpi_communicator,
214  FullMatrix<T> &sums);
215 
219  template <typename T>
220  void sum (const LAPACKFullMatrix<T> &values,
221  const MPI_Comm &mpi_communicator,
222  LAPACKFullMatrix<T> &sums);
223 
229  template <int rank, int dim, typename Number>
232  const MPI_Comm &mpi_communicator);
233 
239  template <int rank, int dim, typename Number>
241  sum (const Tensor<rank,dim,Number> &local,
242  const MPI_Comm &mpi_communicator);
243 
263  template <typename T>
264  T max (const T &t,
265  const MPI_Comm &mpi_communicator);
266 
275  template <typename T, unsigned int N>
276  void max (const T (&values)[N],
277  const MPI_Comm &mpi_communicator,
278  T (&maxima)[N]);
279 
288  template <typename T>
289  void max (const std::vector<T> &values,
290  const MPI_Comm &mpi_communicator,
291  std::vector<T> &maxima);
292 
312  template <typename T>
313  T min (const T &t,
314  const MPI_Comm &mpi_communicator);
315 
324  template <typename T, unsigned int N>
325  void min (const T (&values)[N],
326  const MPI_Comm &mpi_communicator,
327  T (&minima)[N]);
328 
337  template <typename T>
338  void min (const std::vector<T> &values,
339  const MPI_Comm &mpi_communicator,
340  std::vector<T> &minima);
341 
342 
355  struct MinMaxAvg
356  {
362  double sum;
363  double min;
364  double max;
365 
371  unsigned int min_index;
372  unsigned int max_index;
373 
378  double avg;
379  };
380 
395  MinMaxAvg
396  min_max_avg (const double my_value,
397  const MPI_Comm &mpi_communicator);
398 
422  {
423  public:
469  MPI_InitFinalize (int &argc,
470  char ** &argv,
471  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
472 
478  };
479 
491  bool job_supports_mpi ();
492 
509  template<typename T>
510  std::map<unsigned int, T>
511  some_to_some(const MPI_Comm &comm,
512  const std::map <unsigned int, T> &objects_to_send);
513 
529  template<typename T>
530  std::vector<T>
531  all_gather(const MPI_Comm &comm,
532  const T &object_to_send);
533 
534 #ifndef DOXYGEN
535  // declaration for an internal function that lives in mpi.templates.h
536  namespace internal
537  {
538  template <typename T>
539  void all_reduce (const MPI_Op &mpi_op,
540  const ArrayView<const T> &values,
541  const MPI_Comm &mpi_communicator,
542  const ArrayView<T> &output);
543  }
544 
545  // Since these depend on N they must live in the header file
546  template <typename T, unsigned int N>
547  void sum (const T (&values)[N],
548  const MPI_Comm &mpi_communicator,
549  T (&sums)[N])
550  {
551  internal::all_reduce(MPI_SUM, ArrayView<const T>(values, N),
552  mpi_communicator, ArrayView<T>(sums, N));
553  }
554 
555  template <typename T, unsigned int N>
556  void max (const T (&values)[N],
557  const MPI_Comm &mpi_communicator,
558  T (&maxima)[N])
559  {
560  internal::all_reduce(MPI_MAX, ArrayView<const T>(values, N),
561  mpi_communicator, ArrayView<T>(maxima, N));
562  }
563 
564  template <typename T, unsigned int N>
565  void min (const T (&values)[N],
566  const MPI_Comm &mpi_communicator,
567  T (&minima)[N])
568  {
569  internal::all_reduce(MPI_MIN, ArrayView<const T>(values, N),
570  mpi_communicator, ArrayView<T>(minima, N));
571  }
572 #endif
573  } // end of namespace MPI
574 } // end of namespace Utilities
575 
576 
577 DEAL_II_NAMESPACE_CLOSE
578 
579 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:173
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:295
Definition: mpi.h:59
T sum(const T &t, const MPI_Comm &mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:61
Definition: mpi.h:64
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:81
Definition: mpi.h:53
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:192
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:91
bool job_supports_mpi()
Definition: mpi.cc:517
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:371
T max(const T &t, const MPI_Comm &mpi_communicator)