Reference documentation for deal.II version Git 54662f1c23 2021-02-26 09:51:10 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
partitioner.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1999 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
18 #include <deal.II/base/partitioner.templates.h>
19 
21 
22 namespace Utilities
23 {
24  namespace MPI
25  {
27  : global_size(0)
28  , local_range_data(
30  , n_ghost_indices_data(0)
31  , n_import_indices_data(0)
32  , n_ghost_indices_in_larger_set(0)
33  , my_pid(0)
34  , n_procs(1)
35  , communicator(MPI_COMM_SELF)
36  , have_ghost_indices(false)
37  {}
38 
39 
40 
41  Partitioner::Partitioner(const unsigned int size)
42  : global_size(size)
45  std::pair<types::global_dof_index, types::global_dof_index>(0, size))
49  , my_pid(0)
50  , n_procs(1)
51  , communicator(MPI_COMM_SELF)
52  , have_ghost_indices(false)
53  {
57  }
58 
59 
60 
62  const types::global_dof_index ghost_size,
63  const MPI_Comm & communicator)
64  : global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
65  communicator))
67  , local_range_data{0, local_size}
68  , n_ghost_indices_data(ghost_size)
74  , have_ghost_indices(true)
75  {
76  types::global_dof_index prefix_sum = 0;
77 
78 #ifdef DEAL_II_WITH_MPI
79  MPI_Exscan(&local_size,
80  &prefix_sum,
81  1,
82  Utilities::MPI::internal::mpi_type_id(&prefix_sum),
83  MPI_SUM,
84  communicator);
85 #endif
86 
87  local_range_data = {prefix_sum, prefix_sum + local_size};
88 
89  locally_owned_range_data.add_range(prefix_sum, prefix_sum + local_size);
91  }
92 
93 
94 
95  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
96  const IndexSet &ghost_indices_in,
97  const MPI_Comm &communicator_in)
98  : global_size(
99  static_cast<types::global_dof_index>(locally_owned_indices.size()))
103  , my_pid(0)
104  , n_procs(1)
105  , communicator(communicator_in)
106  , have_ghost_indices(false)
107  {
108  set_owned_indices(locally_owned_indices);
109  set_ghost_indices(ghost_indices_in);
110  }
111 
112 
113 
114  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
115  const MPI_Comm &communicator_in)
116  : global_size(
117  static_cast<types::global_dof_index>(locally_owned_indices.size()))
121  , my_pid(0)
122  , n_procs(1)
123  , communicator(communicator_in)
124  , have_ghost_indices(false)
125  {
126  set_owned_indices(locally_owned_indices);
127  }
128 
129 
130 
131  void
132  Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
133  const IndexSet &read_write_vector_index_set,
134  const MPI_Comm &communicator_in)
135  {
136  have_ghost_indices = false;
137  communicator = communicator_in;
138  set_owned_indices(vector_space_vector_index_set);
139  set_ghost_indices(read_write_vector_index_set);
140  }
141 
142 
143 
144  void
145  Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
146  {
147  if (Utilities::MPI::job_supports_mpi() == true)
148  {
151  }
152  else
153  {
154  my_pid = 0;
155  n_procs = 1;
156  }
157 
158  // set the local range
159  Assert(locally_owned_indices.is_contiguous() == true,
160  ExcMessage("The index set specified in locally_owned_indices "
161  "is not contiguous."));
162  locally_owned_indices.compress();
163  if (locally_owned_indices.n_elements() > 0)
165  std::pair<types::global_dof_index, types::global_dof_index>(
166  locally_owned_indices.nth_index_in_set(0),
167  locally_owned_indices.nth_index_in_set(0) +
168  locally_owned_indices.n_elements());
169  AssertThrow(
170  local_range_data.second - local_range_data.first <
171  static_cast<types::global_dof_index>(
173  ExcMessage(
174  "Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
175  locally_owned_range_data.set_size(locally_owned_indices.size());
177  local_range_data.second);
179 
180  ghost_indices_data.set_size(locally_owned_indices.size());
181  }
182 
183 
184 
185  void
186  Partitioner::set_ghost_indices(const IndexSet &ghost_indices_in,
187  const IndexSet &larger_ghost_index_set)
188  {
189  // Set ghost indices from input. To be sure that no entries from the
190  // locally owned range are present, subtract the locally owned indices
191  // in any case.
192  Assert(ghost_indices_in.n_elements() == 0 ||
193  ghost_indices_in.size() == locally_owned_range_data.size(),
194  ExcDimensionMismatch(ghost_indices_in.size(),
196 
197  ghost_indices_data = ghost_indices_in;
202  AssertThrow(
204  static_cast<types::global_dof_index>(
206  ExcMessage(
207  "Index overflow: This class supports at most 2^32-1 ghost elements"));
209 
211  Utilities::MPI::max(n_ghost_indices_data, communicator) > 0;
212 
213  // In the rest of this function, we determine the point-to-point
214  // communication pattern of the partitioner. We make up a list with both
215  // the processors the ghost indices actually belong to, and the indices
216  // that are locally held but ghost indices of other processors. This
217  // allows then to import and export data very easily.
218 
219  // find out the end index for each processor and communicate it (this
220  // implies the start index for the next processor)
221 #ifdef DEAL_II_WITH_MPI
222  if (n_procs < 2)
223  {
226  Assert(n_ghost_indices_data == 0, ExcInternalError());
227  return;
228  }
229 
231 
232  // Allow non-zero start index for the vector. Part 1:
233  // Assume for now that the index set of rank 0 starts with 0
234  // and therefore has an increased size.
235  if (my_pid == 0)
236  my_size += local_range_data.first;
237 
238  types::global_dof_index my_shift = 0;
239  {
240  const int ierr = MPI_Exscan(&my_size,
241  &my_shift,
242  1,
244  MPI_SUM,
245  communicator);
246  AssertThrowMPI(ierr);
247  }
248 
249  // Allow non-zero start index for the vector. Part 2:
250  // We correct the assumption made above and let the
251  // index set of rank 0 actually start from the
252  // correct value, i.e. we correct the shift to
253  // its start.
254  if (my_pid == 0)
255  my_shift = local_range_data.first;
256 
257  // Fix the index start in case the index set could not give us that
258  // information.
259  if (local_range_data.first == 0 && my_shift != 0)
260  {
261  const types::global_dof_index old_locally_owned_size =
263  local_range_data.first = my_shift;
264  local_range_data.second = my_shift + old_locally_owned_size;
265  }
266 
267  std::vector<unsigned int> owning_ranks_of_ghosts(
269 
270  // set up dictionary
274  communicator,
275  owning_ranks_of_ghosts,
276  /* track origins of ghosts*/ true);
277 
278  // read dictionary by communicating with the process who owns the index
279  // in the static partition (i.e. in the dictionary). This process
280  // returns the actual owner of the index.
282  std::pair<types::global_dof_index, types::global_dof_index>,
283  unsigned int>
284  consensus_algorithm(process, communicator);
285  consensus_algorithm.run();
286 
287  {
288  ghost_targets_data = {};
289 
290  if (owning_ranks_of_ghosts.size() > 0)
291  {
292  ghost_targets_data.emplace_back(owning_ranks_of_ghosts[0], 0);
293  for (auto i : owning_ranks_of_ghosts)
294  {
295  Assert(i >= ghost_targets_data.back().first,
297  "Expect result of ConsensusAlgorithmsProcess to be "
298  "sorted"));
299  if (i == ghost_targets_data.back().first)
300  ghost_targets_data.back().second++;
301  else
302  ghost_targets_data.emplace_back(i, 1);
303  }
304  }
305  }
306 
307  // find how much the individual processes that want import from me
308  std::map<unsigned int, IndexSet> import_data = process.get_requesters();
309 
310  // count import requests and setup the compressed indices
312  import_targets_data = {};
313  import_targets_data.reserve(import_data.size());
315  import_indices_chunks_by_rank_data.reserve(import_data.size());
317  for (const auto &i : import_data)
318  if (i.second.n_elements() > 0)
319  {
320  import_targets_data.emplace_back(i.first, i.second.n_elements());
321  n_import_indices_data += i.second.n_elements();
324  i.second.n_intervals());
325  }
326 
327  // transform import indices to local index space
328  import_indices_data = {};
330  for (const auto &i : import_data)
331  {
332  Assert((i.second & locally_owned_range_data) == i.second,
333  ExcInternalError("Requested indices must be in local range"));
334  for (auto interval = i.second.begin_intervals();
335  interval != i.second.end_intervals();
336  ++interval)
337  import_indices_data.emplace_back(*interval->begin() -
338  local_range_data.first,
339  interval->last() + 1 -
340  local_range_data.first);
341  }
342 
343 # ifdef DEBUG
344 
345  // simple check: the number of processors to which we want to send
346  // ghosts and the processors to which ghosts reference should be the
347  // same
351 
352  // simple check: the number of indices to exchange should match from the
353  // ghost indices side and the import indices side
355  Utilities::MPI::sum(n_ghost_indices_data, communicator));
356 
357  // expensive check that the communication channel is sane -> do a ghost
358  // exchange step and see whether the ghost indices sent to us by other
359  // processes (ghost_indices) are the same as we hold locally
360  // (ghost_indices_ref).
361  std::vector<types::global_dof_index> ghost_indices_ref;
362  ghost_indices_data.fill_index_vector(ghost_indices_ref);
363  AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
364  std::vector<types::global_dof_index> indices_to_send(n_import_indices());
365  std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
366  std::vector<types::global_dof_index> my_indices;
368  std::vector<MPI_Request> requests;
372  my_indices.data(), my_indices.size()),
373  make_array_view(indices_to_send),
374  make_array_view(ghost_indices),
375  requests);
376  export_to_ghosted_array_finish(make_array_view(ghost_indices), requests);
377  int flag = 0;
378  const int ierr = MPI_Testall(requests.size(),
379  requests.data(),
380  &flag,
381  MPI_STATUSES_IGNORE);
382  AssertThrowMPI(ierr);
383  Assert(flag == 1,
384  ExcMessage(
385  "MPI found unfinished requests. Check communication setup"));
386 
387  for (unsigned int i = 0; i < ghost_indices.size(); ++i)
388  AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
389 
390 # endif
391 
392 #endif // #ifdef DEAL_II_WITH_MPI
393 
394  if (larger_ghost_index_set.size() == 0)
395  {
397  ghost_indices_subset_data.emplace_back(0, n_ghost_indices());
399  }
400  else
401  {
402  AssertDimension(larger_ghost_index_set.size(),
404  Assert(
405  (larger_ghost_index_set & locally_owned_range_data).n_elements() ==
406  0,
407  ExcMessage("Ghost index set should not overlap with owned set."));
408  Assert((larger_ghost_index_set & ghost_indices_data) ==
409  ghost_indices_data,
410  ExcMessage("Larger ghost index set must contain the tight "
411  "ghost index set."));
412 
413  n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
414 
415  // first translate tight ghost indices into indices within the large
416  // set:
417  std::vector<unsigned int> expanded_numbering;
418  for (::IndexSet::size_type index : ghost_indices_data)
419  {
420  Assert(larger_ghost_index_set.is_element(index),
421  ExcMessage("The given larger ghost index set must contain "
422  "all indices in the actual index set."));
423  Assert(
424  larger_ghost_index_set.index_within_set(index) <
425  static_cast<types::global_dof_index>(
427  ExcMessage(
428  "Index overflow: This class supports at most 2^32-1 ghost elements"));
429  expanded_numbering.push_back(
430  larger_ghost_index_set.index_within_set(index));
431  }
432 
433  // now rework expanded_numbering into ranges and store in:
434  std::vector<std::pair<unsigned int, unsigned int>>
435  ghost_indices_subset;
437  ghost_targets_data.size() + 1);
438  // also populate ghost_indices_subset_chunks_by_rank_data
440  unsigned int shift = 0;
441  for (unsigned int p = 0; p < ghost_targets_data.size(); ++p)
442  {
443  unsigned int last_index = numbers::invalid_unsigned_int - 1;
444  for (unsigned int ii = 0; ii < ghost_targets_data[p].second; ii++)
445  {
446  const unsigned int i = shift + ii;
447  if (expanded_numbering[i] == last_index + 1)
448  // if contiguous, increment the end of last range:
449  ghost_indices_subset.back().second++;
450  else
451  // otherwise start a new range
452  ghost_indices_subset.emplace_back(expanded_numbering[i],
453  expanded_numbering[i] +
454  1);
455  last_index = expanded_numbering[i];
456  }
457  shift += ghost_targets_data[p].second;
459  ghost_indices_subset.size();
460  }
461  ghost_indices_subset_data = ghost_indices_subset;
462  }
463  }
464 
465 
466 
467  bool
469  {
470  // if the partitioner points to the same memory location as the calling
471  // processor
472  if (&part == this)
473  return true;
474 #ifdef DEAL_II_WITH_MPI
476  {
477  int communicators_same = 0;
478  const int ierr = MPI_Comm_compare(part.communicator,
479  communicator,
480  &communicators_same);
481  AssertThrowMPI(ierr);
482  if (!(communicators_same == MPI_IDENT ||
483  communicators_same == MPI_CONGRUENT))
484  return false;
485  }
486 #endif
487  return (global_size == part.global_size &&
490  }
491 
492 
493 
494  bool
496  {
497  return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
498  communicator) == 1;
499  }
500 
501 
502 
503  std::size_t
505  {
506  std::size_t memory = (3 * sizeof(types::global_dof_index) +
507  4 * sizeof(unsigned int) + sizeof(MPI_Comm));
516  memory +=
519  return memory;
520  }
521 
522  } // namespace MPI
523 
524 } // end of namespace Utilities
525 
526 
527 
528 // explicit instantiations from .templates.h file
529 #include "partitioner.inst"
530 
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
Definition: partitioner.h:723
bool is_globally_compatible(const Partitioner &part) const
Definition: partitioner.cc:495
static const unsigned int invalid_unsigned_int
Definition: types.h:196
unsigned int locally_owned_size() const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1623
bool is_compatible(const Partitioner &part) const
Definition: partitioner.cc:468
unsigned int local_size() const
types::global_dof_index global_size
Definition: partitioner.h:685
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition: types.h:86
unsigned int n_ghost_indices_data
Definition: partitioner.h:709
STL namespace.
#define AssertThrow(cond, exc)
Definition: exceptions.h:1576
void set_owned_indices(const IndexSet &locally_owned_indices)
Definition: partitioner.cc:145
std::vector< unsigned int > ghost_indices_subset_chunks_by_rank_data
Definition: partitioner.h:767
std::vector< unsigned int > import_indices_chunks_by_rank_data
Definition: partitioner.h:755
size_type size() const
Definition: index_set.h:1634
static ::ExceptionBase & ExcMessage(std::string arg1)
const IndexSet & ghost_indices() const
unsigned int n_ghost_indices() const
Definition: types.h:31
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:258
#define Assert(cond, exc)
Definition: exceptions.h:1466
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1921
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:693
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:394
std::vector< std::pair< unsigned int, unsigned int > > ghost_indices_subset_data
Definition: partitioner.h:775
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:507
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1673
void set_size(const size_type size)
Definition: index_set.h:1622
unsigned int global_dof_index
Definition: types.h:76
types::global_dof_index size() const
void compress() const
Definition: index_set.h:1642
std::size_t memory_consumption() const
Definition: partitioner.cc:504
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1747
bool is_contiguous() const
Definition: index_set.h:1815
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:393
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
Definition: partitioner.h:749
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
Definition: partitioner.h:697
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
Definition: partitioner.h:715
void export_to_ghosted_array_start(const unsigned int communication_channel, const ArrayView< const Number, MemorySpaceType > &locally_owned_array, const ArrayView< Number, MemorySpaceType > &temporary_storage, const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
void export_to_ghosted_array_finish(const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
bool is_element(const size_type index) const
Definition: index_set.h:1765
bool job_supports_mpi()
Definition: mpi.cc:1058
unsigned int n_import_indices() const
unsigned int n_import_indices_data
Definition: partitioner.h:743
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator) override
Definition: partitioner.cc:132
size_type n_elements() const
Definition: index_set.h:1832
size_type nth_index_in_set(const size_type local_index) const
Definition: index_set.h:1880
T max(const T &t, const MPI_Comm &mpi_communicator)
void shift(const Tensor< 1, spacedim > &shift_vector, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:933
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
unsigned int n_ghost_indices_in_larger_set
Definition: partitioner.h:761
static ::ExceptionBase & ExcInternalError()
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
Definition: partitioner.cc:186