Reference documentation for deal.II version Git cb0bd54b52 2019-09-21 16:31:22 -0400
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
partitioner.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 1999 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/mpi_compute_index_owner_internal.h>
17 #include <deal.II/base/partitioner.h>
18 #include <deal.II/base/partitioner.templates.h>
19 
20 DEAL_II_NAMESPACE_OPEN
21 
22 namespace Utilities
23 {
24  namespace MPI
25  {
27  : global_size(0)
28  , local_range_data(
29  std::pair<types::global_dof_index, types::global_dof_index>(0, 0))
30  , n_ghost_indices_data(0)
31  , n_import_indices_data(0)
32  , n_ghost_indices_in_larger_set(0)
33  , my_pid(0)
34  , n_procs(1)
35  , communicator(MPI_COMM_SELF)
36  , have_ghost_indices(false)
37  {}
38 
39 
40 
41  Partitioner::Partitioner(const unsigned int size)
42  : global_size(size)
45  std::pair<types::global_dof_index, types::global_dof_index>(0, size))
49  , my_pid(0)
50  , n_procs(1)
51  , communicator(MPI_COMM_SELF)
52  , have_ghost_indices(false)
53  {
57  }
58 
59 
60 
61  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
62  const IndexSet &ghost_indices_in,
63  const MPI_Comm communicator_in)
64  : global_size(
65  static_cast<types::global_dof_index>(locally_owned_indices.size()))
69  , my_pid(0)
70  , n_procs(1)
71  , communicator(communicator_in)
72  , have_ghost_indices(false)
73  {
74  set_owned_indices(locally_owned_indices);
75  set_ghost_indices(ghost_indices_in);
76  }
77 
78 
79 
80  Partitioner::Partitioner(const IndexSet &locally_owned_indices,
81  const MPI_Comm communicator_in)
82  : global_size(
83  static_cast<types::global_dof_index>(locally_owned_indices.size()))
87  , my_pid(0)
88  , n_procs(1)
89  , communicator(communicator_in)
90  , have_ghost_indices(false)
91  {
92  set_owned_indices(locally_owned_indices);
93  }
94 
95 
96 
97  void
98  Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
99  const IndexSet &read_write_vector_index_set,
100  const MPI_Comm &communicator_in)
101  {
102  have_ghost_indices = false;
103  communicator = communicator_in;
104  set_owned_indices(vector_space_vector_index_set);
105  set_ghost_indices(read_write_vector_index_set);
106  }
107 
108 
109 
110  void
111  Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
112  {
113  if (Utilities::MPI::job_supports_mpi() == true)
114  {
117  }
118  else
119  {
120  my_pid = 0;
121  n_procs = 1;
122  }
123 
124  // set the local range
125  Assert(locally_owned_indices.is_contiguous() == true,
126  ExcMessage("The index set specified in locally_owned_indices "
127  "is not contiguous."));
128  locally_owned_indices.compress();
129  if (locally_owned_indices.n_elements() > 0)
131  std::pair<types::global_dof_index, types::global_dof_index>(
132  locally_owned_indices.nth_index_in_set(0),
133  locally_owned_indices.nth_index_in_set(0) +
134  locally_owned_indices.n_elements());
135  AssertThrow(
136  local_range_data.second - local_range_data.first <
137  static_cast<types::global_dof_index>(
138  std::numeric_limits<unsigned int>::max()),
139  ExcMessage(
140  "Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
141  locally_owned_range_data.set_size(locally_owned_indices.size());
143  local_range_data.second);
145 
146  ghost_indices_data.set_size(locally_owned_indices.size());
147  }
148 
149 
150 
151  void
152  Partitioner::set_ghost_indices(const IndexSet &ghost_indices_in,
153  const IndexSet &larger_ghost_index_set)
154  {
155  // Set ghost indices from input. To be sure that no entries from the
156  // locally owned range are present, subtract the locally owned indices
157  // in any case.
158  Assert(ghost_indices_in.n_elements() == 0 ||
159  ghost_indices_in.size() == locally_owned_range_data.size(),
160  ExcDimensionMismatch(ghost_indices_in.size(),
162 
163  ghost_indices_data = ghost_indices_in;
168  AssertThrow(
170  static_cast<types::global_dof_index>(
171  std::numeric_limits<unsigned int>::max()),
172  ExcMessage(
173  "Index overflow: This class supports at most 2^32-1 ghost elements"));
175 
177  Utilities::MPI::sum(n_ghost_indices_data, communicator) > 0;
178 
179  // In the rest of this function, we determine the point-to-point
180  // communication pattern of the partitioner. We make up a list with both
181  // the processors the ghost indices actually belong to, and the indices
182  // that are locally held but ghost indices of other processors. This
183  // allows then to import and export data very easily.
184 
185  // find out the end index for each processor and communicate it (this
186  // implies the start index for the next processor)
187 #ifdef DEAL_II_WITH_MPI
188  if (n_procs < 2)
189  {
192  Assert(n_ghost_indices_data == 0, ExcInternalError());
193  return;
194  }
195 
197  // Allow non-zero start index for the vector. send this data to all
198  // processors
199  if (my_pid == 0)
200  my_size += local_range_data.first;
201 
202  types::global_dof_index my_shift = 0;
203  {
204  const int ierr = MPI_Exscan(&my_size,
205  &my_shift,
206  1,
207  DEAL_II_DOF_INDEX_MPI_TYPE,
208  MPI_SUM,
209  communicator);
210  AssertThrowMPI(ierr);
211  }
212  if (my_shift != local_range_data.first)
213  {
214  const types::global_dof_index old_local_size = local_size();
215  local_range_data.first = my_shift;
216  local_range_data.second = my_shift + old_local_size;
217  }
218 
219  std::vector<unsigned int> owning_ranks_of_ghosts(
221 
222  // set up dictionary
226  communicator,
227  owning_ranks_of_ghosts,
228  /* track origins of ghosts*/ true);
229 
230  // read dictionary by communicating with the process who owns the index
231  // in the static partition (i.e. in the dictionary). This process
232  // returns the actual owner of the index.
234  std::pair<types::global_dof_index, types::global_dof_index>,
235  unsigned int>
236  consensus_algorithm(process, communicator);
237  consensus_algorithm.run();
238 
239  {
240  ghost_targets_data = {};
241 
242  if (owning_ranks_of_ghosts.size() > 0)
243  {
244  ghost_targets_data.emplace_back(owning_ranks_of_ghosts[0], 0);
245  for (auto i : owning_ranks_of_ghosts)
246  {
247  Assert(i >= ghost_targets_data.back().first,
249  "Expect result of ConsensusAlgorithmProcess to be "
250  "sorted"));
251  if (i == ghost_targets_data.back().first)
252  ghost_targets_data.back().second++;
253  else
254  ghost_targets_data.emplace_back(i, 1);
255  }
256  }
257  }
258 
259  // find how much the individual processes that want import from me
260  std::map<unsigned int, IndexSet> import_data = process.get_requesters();
261 
262  // count import requests and setup the compressed indices
264  import_targets_data = {};
265  import_targets_data.reserve(import_data.size());
267  import_indices_chunks_by_rank_data.reserve(import_data.size());
269  for (const auto &i : import_data)
270  if (i.second.n_elements() > 0)
271  {
272  import_targets_data.emplace_back(i.first, i.second.n_elements());
273  n_import_indices_data += i.second.n_elements();
276  i.second.n_intervals());
277  }
278 
279  // transform import indices to local index space
280  import_indices_data = {};
282  for (const auto &i : import_data)
283  {
284  Assert((i.second & locally_owned_range_data) == i.second,
285  ExcInternalError("Requested indices must be in local range"));
286  for (auto interval = i.second.begin_intervals();
287  interval != i.second.end_intervals();
288  ++interval)
289  import_indices_data.emplace_back(*interval->begin() -
290  local_range_data.first,
291  interval->last() + 1 -
292  local_range_data.first);
293  }
294 
295 # ifdef DEBUG
296 
297  // simple check: the number of processors to which we want to send
298  // ghosts and the processors to which ghosts reference should be the
299  // same
303 
304  // simple check: the number of indices to exchange should match from the
305  // ghost indices side and the import indices side
307  Utilities::MPI::sum(n_ghost_indices_data, communicator));
308 
309  // expensive check that the communication channel is sane -> do a ghost
310  // exchange step and see whether the ghost indices sent to us by other
311  // processes (ghost_indices) are the same as we hold locally
312  // (ghost_indices_ref).
313  std::vector<types::global_dof_index> ghost_indices_ref;
314  ghost_indices_data.fill_index_vector(ghost_indices_ref);
315  AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
316  std::vector<types::global_dof_index> indices_to_send(n_import_indices());
317  std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
318  std::vector<types::global_dof_index> my_indices;
320  std::vector<MPI_Request> requests;
324  my_indices.data(), my_indices.size()),
325  make_array_view(indices_to_send),
326  make_array_view(ghost_indices),
327  requests);
328  export_to_ghosted_array_finish(make_array_view(ghost_indices), requests);
329  int flag = 0;
330  const int ierr = MPI_Testall(requests.size(),
331  requests.data(),
332  &flag,
333  MPI_STATUSES_IGNORE);
334  AssertThrowMPI(ierr);
335  Assert(flag == 1,
336  ExcMessage(
337  "MPI found unfinished requests. Check communication setup"));
338 
339  for (unsigned int i = 0; i < ghost_indices.size(); ++i)
340  AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
341 
342 # endif
343 
344 #endif // #ifdef DEAL_II_WITH_MPI
345 
346  if (larger_ghost_index_set.size() == 0)
347  {
349  ghost_indices_subset_data.emplace_back(local_size(),
350  local_size() +
351  n_ghost_indices());
353  }
354  else
355  {
356  AssertDimension(larger_ghost_index_set.size(),
358  Assert(
359  (larger_ghost_index_set & locally_owned_range_data).n_elements() ==
360  0,
361  ExcMessage("Ghost index set should not overlap with owned set."));
362  Assert((larger_ghost_index_set & ghost_indices_data) ==
363  ghost_indices_data,
364  ExcMessage("Larger ghost index set must contain the tight "
365  "ghost index set."));
366 
367  n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
368 
369  // first translate tight ghost indices into indices within the large
370  // set:
371  std::vector<unsigned int> expanded_numbering;
372  for (::IndexSet::size_type index : ghost_indices_data)
373  {
374  Assert(larger_ghost_index_set.is_element(index),
375  ExcMessage("The given larger ghost index set must contain "
376  "all indices in the actual index set."));
377  Assert(
378  larger_ghost_index_set.index_within_set(index) <
379  static_cast<types::global_dof_index>(
380  std::numeric_limits<unsigned int>::max()),
381  ExcMessage(
382  "Index overflow: This class supports at most 2^32-1 ghost elements"));
383  expanded_numbering.push_back(
384  larger_ghost_index_set.index_within_set(index));
385  }
386 
387  // now rework expanded_numbering into ranges and store in:
388  std::vector<std::pair<unsigned int, unsigned int>>
389  ghost_indices_subset;
391  ghost_targets_data.size() + 1);
392  // also populate ghost_indices_subset_chunks_by_rank_data
394  unsigned int shift = 0;
395  for (unsigned int p = 0; p < ghost_targets_data.size(); ++p)
396  {
397  unsigned int last_index = numbers::invalid_unsigned_int - 1;
398  for (unsigned int ii = 0; ii < ghost_targets_data[p].second; ii++)
399  {
400  const unsigned int i = shift + ii;
401  if (expanded_numbering[i] == last_index + 1)
402  // if contiguous, increment the end of last range:
403  ghost_indices_subset.back().second++;
404  else
405  // otherwise start a new range
406  ghost_indices_subset.emplace_back(expanded_numbering[i],
407  expanded_numbering[i] +
408  1);
409  last_index = expanded_numbering[i];
410  }
411  shift += ghost_targets_data[p].second;
413  ghost_indices_subset.size();
414  }
415  ghost_indices_subset_data = ghost_indices_subset;
416  }
417  }
418 
419 
420 
421  bool
423  {
424  // if the partitioner points to the same memory location as the calling
425  // processor
426  if (&part == this)
427  return true;
428 #ifdef DEAL_II_WITH_MPI
430  {
431  int communicators_same = 0;
432  const int ierr = MPI_Comm_compare(part.communicator,
433  communicator,
434  &communicators_same);
435  AssertThrowMPI(ierr);
436  if (!(communicators_same == MPI_IDENT ||
437  communicators_same == MPI_CONGRUENT))
438  return false;
439  }
440 #endif
441  return (global_size == part.global_size &&
444  }
445 
446 
447 
448  bool
450  {
451  return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
452  communicator) == 1;
453  }
454 
455 
456 
457  std::size_t
459  {
460  std::size_t memory = (3 * sizeof(types::global_dof_index) +
461  4 * sizeof(unsigned int) + sizeof(MPI_Comm));
470  memory +=
473  return memory;
474  }
475 
476  } // namespace MPI
477 
478 } // end of namespace Utilities
479 
480 
481 
482 // explicit instantiations from .templates.h file
483 #include "partitioner.inst"
484 
485 DEAL_II_NAMESPACE_CLOSE
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
Definition: partitioner.h:622
bool is_globally_compatible(const Partitioner &part) const
Definition: partitioner.cc:449
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
bool is_compatible(const Partitioner &part) const
Definition: partitioner.cc:422
unsigned int local_size() const
size_type nth_index_in_set(const unsigned int local_index) const
Definition: index_set.h:1847
types::global_dof_index global_size
Definition: partitioner.h:584
unsigned int n_ghost_indices_data
Definition: partitioner.h:608
STL namespace.
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
types::global_dof_index size_type
Definition: index_set.h:85
void set_owned_indices(const IndexSet &locally_owned_indices)
Definition: partitioner.cc:111
std::vector< unsigned int > ghost_indices_subset_chunks_by_rank_data
Definition: partitioner.h:666
std::vector< unsigned int > import_indices_chunks_by_rank_data
Definition: partitioner.h:654
size_type size() const
Definition: index_set.h:1600
static ::ExceptionBase & ExcMessage(std::string arg1)
const IndexSet & ghost_indices() const
unsigned int n_ghost_indices() const
Definition: types.h:31
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:238
#define Assert(cond, exc)
Definition: exceptions.h:1407
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1888
std::vector< std::pair< unsigned int, unsigned int > > ghost_indices_subset_data
Definition: partitioner.h:674
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
void fill_index_vector(std::vector< size_type > &indices) const
Definition: index_set.cc:476
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1640
void set_size(const size_type size)
Definition: index_set.h:1588
unsigned int global_dof_index
Definition: types.h:89
Definition: cuda.h:31
types::global_dof_index size() const
void compress() const
Definition: index_set.h:1608
std::size_t memory_consumption() const
Definition: partitioner.cc:458
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
bool is_contiguous() const
Definition: index_set.h:1782
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
Definition: partitioner.h:648
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
Definition: partitioner.h:596
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
Definition: partitioner.h:614
void export_to_ghosted_array_start(const unsigned int communication_channel, const ArrayView< const Number, MemorySpaceType > &locally_owned_array, const ArrayView< Number, MemorySpaceType > &temporary_storage, const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
void export_to_ghosted_array_finish(const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
bool is_element(const size_type index) const
Definition: index_set.h:1732
bool job_supports_mpi()
Definition: mpi.cc:810
unsigned int n_import_indices() const
unsigned int n_import_indices_data
Definition: partitioner.h:642
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm &communicator) override
Definition: partitioner.cc:98
size_type n_elements() const
Definition: index_set.h:1799
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
unsigned int n_ghost_indices_in_larger_set
Definition: partitioner.h:660
static ::ExceptionBase & ExcInternalError()
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
Definition: partitioner.cc:152