Reference documentation for deal.II version Git 8a320d4 2018-02-19 08:48:02 +0100
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 #include <deal.II/base/array_view.h>
21 
22 #include <vector>
23 #include <map>
24 
25 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
26 // without MPI, we would still like to use
27 // some constructs with MPI data
28 // types. Therefore, create some dummies
29 typedef int MPI_Comm;
30 typedef int MPI_Datatype;
31 typedef int MPI_Op;
32 # ifndef MPI_COMM_WORLD
33 # define MPI_COMM_WORLD 0
34 # endif
35 # ifndef MPI_COMM_SELF
36 # define MPI_COMM_SELF 0
37 # endif
38 # ifndef MPI_MIN
39 # define MPI_MIN 0
40 # endif
41 # ifndef MPI_MAX
42 # define MPI_MAX 0
43 # endif
44 # ifndef MPI_SUM
45 # define MPI_SUM 0
46 # endif
47 #endif
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 //Forward type declarations to allow MPI sums over tensorial types
53 template <int rank, int dim, typename Number> class Tensor;
54 template <int rank, int dim, typename Number> class SymmetricTensor;
55 
56 
57 namespace Utilities
58 {
66  namespace MPI
67  {
76  unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
77 
86  unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
87 
109  std::vector<unsigned int>
110  compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm,
111  const std::vector<unsigned int> &destinations);
112 
126  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
127 
147  template <typename T>
148  T sum (const T &t,
149  const MPI_Comm &mpi_communicator);
150 
160  template <typename T, typename U>
161  void sum (const T &values,
162  const MPI_Comm &mpi_communicator,
163  U &sums);
164 
174  template <typename T>
175  void sum (const ArrayView<const T> &values,
176  const MPI_Comm &mpi_communicator,
177  const ArrayView<T> &sums);
178 
184  template <int rank, int dim, typename Number>
187  const MPI_Comm &mpi_communicator);
188 
194  template <int rank, int dim, typename Number>
196  sum (const Tensor<rank,dim,Number> &local,
197  const MPI_Comm &mpi_communicator);
198 
218  template <typename T>
219  T max (const T &t,
220  const MPI_Comm &mpi_communicator);
221 
231  template <typename T, typename U>
232  void max (const T &values,
233  const MPI_Comm &mpi_communicator,
234  U &maxima);
235 
245  template <typename T>
246  void max (const ArrayView<const T> &values,
247  const MPI_Comm &mpi_communicator,
248  const ArrayView<T> &maxima);
249 
269  template <typename T>
270  T min (const T &t,
271  const MPI_Comm &mpi_communicator);
272 
282  template <typename T, typename U>
283  void min (const T &values,
284  const MPI_Comm &mpi_communicator,
285  U &minima);
286 
296  template <typename T>
297  void min (const ArrayView<const T> &values,
298  const MPI_Comm &mpi_communicator,
299  const ArrayView<T> &minima);
300 
313  struct MinMaxAvg
314  {
320  double sum;
321  double min;
322  double max;
323 
329  unsigned int min_index;
330  unsigned int max_index;
331 
336  double avg;
337  };
338 
353  MinMaxAvg
354  min_max_avg (const double my_value,
355  const MPI_Comm &mpi_communicator);
356 
380  {
381  public:
427  MPI_InitFinalize (int &argc,
428  char ** &argv,
429  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
430 
436  };
437 
449  bool job_supports_mpi ();
450 
467  template <typename T>
468  std::map<unsigned int, T>
469  some_to_some(const MPI_Comm &comm,
470  const std::map <unsigned int, T> &objects_to_send);
471 
487  template <typename T>
488  std::vector<T>
489  all_gather(const MPI_Comm &comm,
490  const T &object_to_send);
491 
509  template <typename T>
510  std::vector<T>
511  gather(const MPI_Comm &comm,
512  const T &object_to_send,
513  const unsigned int root_process=0);
514 
515 #ifndef DOXYGEN
516  // declaration for an internal function that lives in mpi.templates.h
517  namespace internal
518  {
519  template <typename T>
520  void all_reduce (const MPI_Op &mpi_op,
521  const ArrayView<const T> &values,
522  const MPI_Comm &mpi_communicator,
523  const ArrayView<T> &output);
524  }
525 
526  // Since these depend on N they must live in the header file
527  template <typename T, unsigned int N>
528  void sum (const T (&values)[N],
529  const MPI_Comm &mpi_communicator,
530  T (&sums)[N])
531  {
532  internal::all_reduce(MPI_SUM, ArrayView<const T>(values, N),
533  mpi_communicator, ArrayView<T>(sums, N));
534  }
535 
536  template <typename T, unsigned int N>
537  void max (const T (&values)[N],
538  const MPI_Comm &mpi_communicator,
539  T (&maxima)[N])
540  {
541  internal::all_reduce(MPI_MAX, ArrayView<const T>(values, N),
542  mpi_communicator, ArrayView<T>(maxima, N));
543  }
544 
545  template <typename T, unsigned int N>
546  void min (const T (&values)[N],
547  const MPI_Comm &mpi_communicator,
548  T (&minima)[N])
549  {
550  internal::all_reduce(MPI_MIN, ArrayView<const T>(values, N),
551  mpi_communicator, ArrayView<T>(minima, N));
552  }
553 
554  template<typename T>
555  std::map<unsigned int, T>
556  some_to_some(const MPI_Comm &comm,
557  const std::map<unsigned int, T> &objects_to_send)
558  {
559 #ifndef DEAL_II_WITH_MPI
560  (void)comm;
561  Assert(objects_to_send.size() == 0, ExcMessage("Cannot send to more than one processor."));
562  Assert(objects_to_send.find(0) != objects_to_send.end() || objects_to_send.size() == 0,
563  ExcMessage("Can only send to myself or to nobody."));
564  return objects_to_send;
565 #else
566 
567  std::vector<unsigned int> send_to(objects_to_send.size());
568  {
569  unsigned int i=0;
570  for (const auto &m: objects_to_send)
571  send_to[i++] = m.first;
572  }
573  AssertDimension(send_to.size(), objects_to_send.size());
574 
575  const auto receive_from =
577 
578  // Sending buffers
579  std::vector<std::vector<char> > buffers_to_send(send_to.size());
580  std::vector<MPI_Request> buffer_send_requests(send_to.size());
581  {
582  unsigned int i = 0;
583  for (const auto &rank_obj : objects_to_send)
584  {
585  const auto &rank = rank_obj.first;
586  buffers_to_send[i] = Utilities::pack(rank_obj.second);
587  const int ierr = MPI_Isend(buffers_to_send[i].data(),
588  buffers_to_send[i].size(), MPI_CHAR,
589  rank, 21, comm, &buffer_send_requests[i]);
590  AssertThrowMPI(ierr);
591  ++i;
592  }
593  }
594 
595  // Receiving buffers
596  std::map<unsigned int, T> received_objects;
597  {
598  std::vector<char> buffer;
599  // We do this on a first come/first served basis
600  for (unsigned int i = 0; i<receive_from.size(); ++i)
601  {
602  // Probe what's going on. Take data from the first available sender
603  MPI_Status status;
604  int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
605  AssertThrowMPI(ierr);
606 
607  // Length of the message
608  int len;
609  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
610  AssertThrowMPI(ierr);
611  buffer.resize(len);
612 
613  // Source rank
614  const unsigned int rank = status.MPI_SOURCE;
615 
616  // Actually receive the message
617  ierr = MPI_Recv(buffer.data(), len, MPI_CHAR,
618  rank, 21, comm, MPI_STATUS_IGNORE);
619  AssertThrowMPI(ierr);
620  Assert(received_objects.find(rank) == received_objects.end(),
621  ExcInternalError("I should not receive again from this rank"));
622  received_objects[rank] = Utilities::unpack<T>(buffer);
623  }
624  }
625 
626  // Wait to have sent all objects.
627  MPI_Waitall(send_to.size(), buffer_send_requests.data(),MPI_STATUSES_IGNORE);
628 
629  return received_objects;
630 #endif // deal.II with MPI
631  }
632 
633  template<typename T>
634  std::vector<T> all_gather(const MPI_Comm &comm,
635  const T &object)
636  {
637 #ifndef DEAL_II_WITH_MPI
638  (void)comm;
639  std::vector<T> v(1, object);
640  return v;
641 #else
642  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
643 
644  std::vector<char> buffer = Utilities::pack(object);
645 
646  int n_local_data = buffer.size();
647 
648  // Vector to store the size of loc_data_array for every process
649  std::vector<int> size_all_data(n_procs,0);
650 
651  // Exchanging the size of each buffer
652  MPI_Allgather(&n_local_data, 1, MPI_INT,
653  &(size_all_data[0]), 1, MPI_INT,
654  comm);
655 
656  // Now computing the the displacement, relative to recvbuf,
657  // at which to store the incoming buffer
658  std::vector<int> rdispls(n_procs);
659  rdispls[0] = 0;
660  for (unsigned int i=1; i < n_procs; ++i)
661  rdispls[i] = rdispls[i-1] + size_all_data[i-1];
662 
663  // Step 3: exchange the buffer:
664  std::vector<char> received_unrolled_buffer(rdispls.back() + size_all_data.back());
665 
666  MPI_Allgatherv(buffer.data(), n_local_data, MPI_CHAR,
667  received_unrolled_buffer.data(), size_all_data.data(),
668  rdispls.data(), MPI_CHAR, comm);
669 
670  std::vector<T> received_objects(n_procs);
671  for (unsigned int i= 0; i < n_procs; ++i)
672  {
673  std::vector<char> local_buffer(received_unrolled_buffer.begin()+rdispls[i],
674  received_unrolled_buffer.begin()+rdispls[i]+size_all_data[i]);
675  received_objects[i] = Utilities::unpack<T>(local_buffer);
676  }
677 
678  return received_objects;
679 #endif
680  }
681 
682  template <typename T>
683  std::vector<T>
684  gather(const MPI_Comm &comm,
685  const T &object_to_send,
686  const unsigned int root_process)
687  {
688 #ifndef DEAL_II_WITH_MPI
689  (void)comm;
690  (void)root_process;
691  std::vector<T> v(1, object_to_send);
692  return v;
693 #else
694  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
695  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
696 
697  Assert(root_process < n_procs, ExcIndexRange(root_process,0,n_procs));
698 
699  std::vector<char> buffer = Utilities::pack(object_to_send);
700  int n_local_data = buffer.size();
701 
702  // Vector to store the size of loc_data_array for every process
703  // only the root process needs to allocate memory for that purpose
704  std::vector<int> size_all_data;
705  if (my_rank==root_process)
706  size_all_data.resize(n_procs,0);
707 
708  // Exchanging the size of each buffer
709  int ierr = MPI_Gather(&n_local_data, 1, MPI_INT,
710  size_all_data.data(), 1, MPI_INT,
711  root_process, comm);
712  AssertThrowMPI(ierr);
713 
714  // Now computing the displacement, relative to recvbuf,
715  // at which to store the incoming buffer; only for root
716  std::vector<int> rdispls;
717  if (my_rank==root_process)
718  {
719  rdispls.resize(n_procs,0);
720  for (unsigned int i=1; i<n_procs; ++i)
721  rdispls[i] = rdispls[i-1] + size_all_data[i-1];
722  }
723  // exchange the buffer:
724  std::vector<char> received_unrolled_buffer;
725  if (my_rank==root_process)
726  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
727 
728  ierr = MPI_Gatherv(buffer.data(), n_local_data, MPI_CHAR,
729  received_unrolled_buffer.data(), size_all_data.data(),
730  rdispls.data(), MPI_CHAR,
731  root_process, comm);
732  AssertThrowMPI(ierr);
733 
734  std::vector<T> received_objects;
735 
736  if (my_rank==root_process)
737  {
738  received_objects.resize(n_procs);
739 
740  for (unsigned int i=0; i<n_procs; ++i)
741  {
742  const std::vector<char> local_buffer(received_unrolled_buffer.begin()+rdispls[i],
743  received_unrolled_buffer.begin()+rdispls[i]+size_all_data[i]);
744  received_objects[i] = Utilities::unpack<T>(local_buffer);
745  }
746  }
747  return received_objects;
748 #endif
749  }
750 
751 #endif
752  } // end of namespace MPI
753 } // end of namespace Utilities
754 
755 
756 DEAL_II_NAMESPACE_CLOSE
757 
758 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1202
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:299
static::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:349
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
Definition: mpi.h:57
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1267
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
Definition: mpi.h:53
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:196
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:95
bool job_supports_mpi()
Definition: mpi.cc:527
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:329
T max(const T &t, const MPI_Comm &mpi_communicator)
static::ExceptionBase & ExcInternalError()