Reference documentation for deal.II version Git 989c3ebba0 2019-10-17 23:08:47 -0600
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi_compute_index_owner_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_base_mpi_compute_index_owner_internal_h
17 #define dealii_base_mpi_compute_index_owner_internal_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/mpi.h>
22 
23 DEAL_II_NAMESPACE_OPEN
24 
25 namespace Utilities
26 {
27  namespace MPI
28  {
29  namespace internal
30  {
35  namespace ComputeIndexOwner
36  {
42  struct Dictionary
43  {
48  static const unsigned int tag_setup = 11;
49 
53  static const unsigned int range_minimum_grain_size = 4096;
54 
61  std::vector<unsigned int> actually_owning_ranks;
62 
67  std::vector<unsigned int> actually_owning_rank_list;
68 
76 
82  std::pair<types::global_dof_index, types::global_dof_index>
84 
91 
96 
102 
108  unsigned int stride_small_size;
109 
115  void
116  reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
117  {
118  // 1) set up the partition
119  this->partition(owned_indices, comm);
120 
121 #ifdef DEAL_II_WITH_MPI
122  unsigned int my_rank = this_mpi_process(comm);
123 
124  types::global_dof_index dic_local_received = 0;
125  std::map<unsigned int,
126  std::vector<std::pair<types::global_dof_index,
127  types::global_dof_index>>>
128  buffers;
129 
130  // 2) collect relevant processes and process local dict entries
131  for (auto interval = owned_indices.begin_intervals();
132  interval != owned_indices.end_intervals();
133  interval++)
134  {
135  // Due to the granularity of the dictionary, the interval
136  // might be split into several ranges of processor owner
137  // ranks. Here, we process the interval by breaking into
138  // smaller pieces in terms of the dictionary number.
139  std::pair<types::global_dof_index, types::global_dof_index>
140  index_range(*interval->begin(), interval->last() + 1);
141  const unsigned int owner_last =
142  dof_to_dict_rank(interval->last());
143  unsigned int owner_first = numbers::invalid_unsigned_int;
144  while (owner_first != owner_last)
145  {
146  owner_first = dof_to_dict_rank(index_range.first);
147 
148  // this explicitly picks up the formula of
149  // dof_to_dict_rank, so the two places must be in sync
150  types::global_dof_index next_index =
151  std::min((index_range.first / dofs_per_process + 1) *
152  dofs_per_process,
153  index_range.second);
154 
155  Assert(next_index > index_range.first, ExcInternalError());
156 
157 # ifdef DEBUG
158  // make sure that the owner is the same on the current
159  // interval
160  for (types::global_dof_index i = index_range.first + 1;
161  i < next_index;
162  ++i)
163  AssertDimension(owner_first, dof_to_dict_rank(i));
164 # endif
165 
166  // add the interval, either to the local range or into a
167  // buffer to be sent to another processor
168  if (owner_first == my_rank)
169  {
170  std::fill(actually_owning_ranks.data() +
171  index_range.first - local_range.first,
172  actually_owning_ranks.data() + next_index -
173  local_range.first,
174  my_rank);
175  dic_local_received += next_index - index_range.first;
176  if (actually_owning_rank_list.empty())
177  actually_owning_rank_list.push_back(my_rank);
178  }
179  else
180  buffers[owner_first].emplace_back(index_range.first,
181  next_index);
182 
183  index_range.first = next_index;
184  }
185  }
186 
187  n_dict_procs_in_owned_indices = buffers.size();
188  std::vector<MPI_Request> request;
189  request.reserve(n_dict_procs_in_owned_indices);
190 
191  // 3) send messages with local dofs to the right dict process
192  for (const auto &rank_pair : buffers)
193  {
194  request.push_back(MPI_Request());
195  const auto ierr = MPI_Isend(rank_pair.second.data(),
196  rank_pair.second.size() * 2,
197  DEAL_II_DOF_INDEX_MPI_TYPE,
198  rank_pair.first,
199  tag_setup,
200  comm,
201  &request.back());
202  AssertThrowMPI(ierr);
203  }
204 
205  // 4) receive messages until all dofs in dict are processed
206  while (this->local_size != dic_local_received)
207  {
208  // wait for an incoming message
209  MPI_Status status;
210  auto ierr = MPI_Probe(MPI_ANY_SOURCE, tag_setup, comm, &status);
211  AssertThrowMPI(ierr);
212 
213  // retrieve size of incoming message
214  int number_amount;
215  ierr = MPI_Get_count(&status,
216  DEAL_II_DOF_INDEX_MPI_TYPE,
217  &number_amount);
218  AssertThrowMPI(ierr);
219 
220  const auto other_rank = status.MPI_SOURCE;
221  actually_owning_rank_list.push_back(other_rank);
222 
223  // receive message
224  Assert(number_amount % 2 == 0, ExcInternalError());
225  std::vector<
226  std::pair<types::global_dof_index, types::global_dof_index>>
227  buffer(number_amount / 2);
228  ierr = MPI_Recv(buffer.data(),
229  number_amount,
230  DEAL_II_DOF_INDEX_MPI_TYPE,
231  other_rank,
232  tag_setup,
233  comm,
234  &status);
235  AssertThrowMPI(ierr);
236 
237  // process message: loop over all intervals
238  for (auto interval : buffer)
239  {
240 # ifdef DEBUG
241  for (types::global_dof_index i = interval.first;
242  i < interval.second;
243  i++)
244  Assert(actually_owning_ranks[i - local_range.first] ==
246  ExcInternalError());
247  Assert(interval.first >= local_range.first &&
248  interval.first < local_range.second,
249  ExcInternalError());
250  Assert(interval.second > local_range.first &&
251  interval.second <= local_range.second,
252  ExcInternalError());
253 # endif
254 
255  std::fill(actually_owning_ranks.data() + interval.first -
256  local_range.first,
257  actually_owning_ranks.data() + interval.second -
258  local_range.first,
259  other_rank);
260  dic_local_received += interval.second - interval.first;
261  }
262  }
263 
264  std::sort(actually_owning_rank_list.begin(),
265  actually_owning_rank_list.end());
266 
267  for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
268  Assert(actually_owning_rank_list[i] >
269  actually_owning_rank_list[i - 1],
270  ExcInternalError());
271 
272  // 5) make sure that all messages have been sent
273  const auto ierr =
274  MPI_Waitall(request.size(), request.data(), MPI_STATUSES_IGNORE);
275  AssertThrowMPI(ierr);
276 #else
277  (void)owned_indices;
278  (void)comm;
279 #endif
280  }
281 
287  unsigned int
289  {
290  // note: this formula is also explicitly used in reinit()
291  return (i / dofs_per_process) * stride_small_size;
292  }
293 
299  get_index_offset(const unsigned int rank)
300  {
301  return std::min(dofs_per_process * ((rank + stride_small_size - 1) /
302  stride_small_size),
303  size);
304  }
305 
311  unsigned int
312  get_owning_rank_index(const unsigned int rank_in_owned_indices,
313  const unsigned int guess = 0)
314  {
315  AssertIndexRange(guess, actually_owning_rank_list.size());
316  if (actually_owning_rank_list[guess] == rank_in_owned_indices)
317  return guess;
318  else
319  {
320  auto it = std::lower_bound(actually_owning_rank_list.begin(),
321  actually_owning_rank_list.end(),
322  rank_in_owned_indices);
323  Assert(it != actually_owning_rank_list.end(),
324  ExcInternalError());
325  Assert(*it == rank_in_owned_indices, ExcInternalError());
326  return it - actually_owning_rank_list.begin();
327  }
328  }
329 
330  private:
335  void
336  partition(const IndexSet &owned_indices, const MPI_Comm &comm)
337  {
338 #ifdef DEAL_II_WITH_MPI
339  const unsigned int n_procs = n_mpi_processes(comm);
340  const unsigned int my_rank = this_mpi_process(comm);
341 
342  size = owned_indices.size();
343  dofs_per_process = (size + n_procs - 1) / n_procs;
344  if (dofs_per_process < range_minimum_grain_size)
345  {
346  dofs_per_process = range_minimum_grain_size;
347  stride_small_size = dofs_per_process * n_procs / size;
348  }
349  else
350  stride_small_size = 1;
351  local_range.first = get_index_offset(my_rank);
352  local_range.second = get_index_offset(my_rank + 1);
353 
354  local_size = local_range.second - local_range.first;
355 
356  actually_owning_ranks = {};
357  actually_owning_ranks.resize(local_size,
359 #else
360  (void)owned_indices;
361  (void)comm;
362 #endif
363  }
364  };
365 
366 
367 
375  : public ConsensusAlgorithmProcess<
376  std::pair<types::global_dof_index, types::global_dof_index>,
377  unsigned int>
378  {
379  public:
383  ConsensusAlgorithmPayload(const IndexSet &owned_indices,
384  const IndexSet &indices_to_look_up,
385  const MPI_Comm &comm,
386  std::vector<unsigned int> &owning_ranks,
387  const bool track_index_requests = false)
388  : owned_indices(owned_indices)
389  , indices_to_look_up(indices_to_look_up)
390  , comm(comm)
391  , my_rank(this_mpi_process(comm))
392  , n_procs(n_mpi_processes(comm))
393  , track_index_requests(track_index_requests)
394  , owning_ranks(owning_ranks)
395  {
396  dict.reinit(owned_indices, comm);
397  requesters.resize(dict.actually_owning_rank_list.size());
398  }
399 
404 
410 
414  const MPI_Comm comm;
415 
419  const unsigned int my_rank;
420 
425  const unsigned int n_procs;
426 
433 
439  std::vector<unsigned int> &owning_ranks;
440 
450  std::vector<std::vector<
451  std::pair<unsigned int,
452  std::vector<std::pair<unsigned int, unsigned int>>>>>
454 
459 
464  std::map<unsigned int, std::vector<types::global_dof_index>>
466 
471  std::map<unsigned int, std::vector<unsigned int>> recv_indices;
472 
480  virtual void
482  const unsigned int other_rank,
483  const std::vector<std::pair<types::global_dof_index,
484  types::global_dof_index>> &buffer_recv,
485  std::vector<unsigned int> &request_buffer) override
486  {
487  unsigned int owner_index = 0;
488  for (const auto interval : buffer_recv)
489  for (auto i = interval.first; i < interval.second; ++i)
490  {
491  const unsigned int actual_owner =
492  dict.actually_owning_ranks[i - dict.local_range.first];
493  request_buffer.push_back(actual_owner);
494 
495  if (track_index_requests)
496  append_index_origin(i, owner_index, other_rank);
497  }
498  }
499 
504  virtual std::vector<unsigned int>
505  compute_targets() override
506  {
507  std::vector<unsigned int> targets;
508 
509  // 1) collect relevant processes and process local dict entries
510  {
511  unsigned int index = 0;
512  unsigned int owner_index = 0;
513  for (auto i : indices_to_look_up)
514  {
515  unsigned int other_rank = dict.dof_to_dict_rank(i);
516  if (other_rank == my_rank)
517  {
518  owning_ranks[index] =
519  dict.actually_owning_ranks[i - dict.local_range.first];
520  if (track_index_requests)
521  append_index_origin(i, owner_index, my_rank);
522  }
523  else if (targets.empty() || targets.back() != other_rank)
524  targets.push_back(other_rank);
525  index++;
526  }
527  }
528 
529 
530  for (auto i : targets)
531  {
532  recv_indices[i] = {};
533  indices_to_look_up_by_dict_rank[i] = {};
534  }
535 
536  // 3) collect indices for each process
537  {
538  unsigned int index = 0;
539  for (auto i : indices_to_look_up)
540  {
541  unsigned int other_rank = dict.dof_to_dict_rank(i);
542  if (other_rank != my_rank)
543  {
544  recv_indices[other_rank].push_back(index);
545  indices_to_look_up_by_dict_rank[other_rank].push_back(i);
546  }
547  index++;
548  }
549  }
550 
551  Assert(targets.size() == recv_indices.size() &&
552  targets.size() == indices_to_look_up_by_dict_rank.size(),
553  ExcMessage("Size does not match!"));
554 
555  return targets;
556  }
557 
562  virtual void
563  pack_recv_buffer(const int other_rank,
564  std::vector<std::pair<types::global_dof_index,
566  &send_buffer) override
567  {
568  // create index set and compress data to be sent
569  auto & indices_i = indices_to_look_up_by_dict_rank[other_rank];
570  IndexSet is(dict.size);
571  is.add_indices(indices_i.begin(), indices_i.end());
572  is.compress();
573 
574  for (auto interval = is.begin_intervals();
575  interval != is.end_intervals();
576  interval++)
577  send_buffer.emplace_back(*interval->begin(),
578  interval->last() + 1);
579  }
580 
585  virtual void
586  prepare_recv_buffer(const int other_rank,
587  std::vector<unsigned int> &recv_buffer) override
588  {
589  recv_buffer.resize(recv_indices[other_rank].size());
590  }
591 
596  virtual void
598  const int other_rank,
599  const std::vector<unsigned int> &recv_buffer) override
600  {
601  Assert(recv_indices[other_rank].size() == recv_buffer.size(),
602  ExcMessage("Sizes do not match!"));
603 
604  for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++)
605  owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
606  }
607 
617  std::map<unsigned int, IndexSet>
619  {
620  Assert(track_index_requests,
621  ExcMessage("Must enable index range tracking in"
622  "constructor of ConsensusAlgorithmProcess"));
623 
624  std::map<unsigned int, ::IndexSet> requested_indices;
625 
626 #ifdef DEAL_II_WITH_MPI
627 
628  // reserve enough slots for the requests ahead; depending on
629  // whether the owning rank is one of the requesters or not, we
630  // might have one less requests to execute, so fill the requests
631  // on demand.
632  std::vector<MPI_Request> send_requests;
633  send_requests.reserve(requesters.size());
634 
635  // We use an integer vector for the data exchange. Since we send
636  // data associated to intervals with different requesters, we will
637  // need to send (a) the MPI rank of the requester, (b) the number
638  // of intervals directed to this requester, and (c) a list of
639  // intervals, i.e., two integers per interval. The number of items
640  // sent in total can be deduced both via the MPI status message at
641  // the receiver site as well as be counting the buckets from
642  // different requesters.
643  std::vector<std::vector<unsigned int>> send_data(requesters.size());
644  for (unsigned int i = 0; i < requesters.size(); ++i)
645  {
646  // special code for our own indices
647  if (dict.actually_owning_rank_list[i] == my_rank)
648  {
649  for (const auto &j : requesters[i])
650  {
651  const types::global_dof_index index_offset =
652  dict.get_index_offset(my_rank);
653  IndexSet &my_index_set = requested_indices[j.first];
654  my_index_set.set_size(owned_indices.size());
655  for (const auto &interval : j.second)
656  my_index_set.add_range(index_offset + interval.first,
657  index_offset +
658  interval.second);
659  }
660  }
661  else
662  {
663  for (const auto &j : requesters[i])
664  {
665  send_data[i].push_back(j.first);
666  send_data[i].push_back(j.second.size());
667  for (const auto &interval : j.second)
668  {
669  send_data[i].push_back(interval.first);
670  send_data[i].push_back(interval.second);
671  }
672  }
673  send_requests.push_back(MPI_Request());
674  const int ierr =
675  MPI_Isend(send_data[i].data(),
676  send_data[i].size(),
677  MPI_UNSIGNED,
679  1021,
680  comm,
681  &send_requests.back());
682  AssertThrowMPI(ierr);
683  }
684  }
685 
686  // receive the data
687  for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices;
688  ++c)
689  {
690  // wait for an incoming message
691  MPI_Status status;
692  unsigned int ierr =
693  MPI_Probe(MPI_ANY_SOURCE, 1021, comm, &status);
694  AssertThrowMPI(ierr);
695 
696  // retrieve size of incoming message
697  int number_amount;
698  ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
699  AssertThrowMPI(ierr);
700 
701  // receive message
702  Assert(number_amount % 2 == 0, ExcInternalError());
703  std::vector<std::pair<unsigned int, unsigned int>> buffer(
704  number_amount / 2);
705  ierr = MPI_Recv(buffer.data(),
706  number_amount,
707  MPI_UNSIGNED,
708  status.MPI_SOURCE,
709  1021,
710  comm,
711  &status);
712  AssertThrowMPI(ierr);
713 
714  // unpack the message and translate the dictionary-local
715  // indices coming via MPI to the global index range
716  const types::global_dof_index index_offset =
717  dict.get_index_offset(status.MPI_SOURCE);
718  unsigned int offset = 0;
719  while (offset < buffer.size())
720  {
721  AssertIndexRange(offset + buffer[offset].second,
722  buffer.size());
723 
724  IndexSet my_index_set(owned_indices.size());
725  for (unsigned int i = offset + 1;
726  i < offset + buffer[offset].second + 1;
727  ++i)
728  my_index_set.add_range(index_offset + buffer[i].first,
729  index_offset + buffer[i].second);
730 
731  // the underlying index set is able to merge ranges coming
732  // from different ranks due to the partitioning in the
733  // dictionary
734  IndexSet &index_set =
735  requested_indices[buffer[offset].first];
736  if (index_set.size() == 0)
737  index_set.set_size(owned_indices.size());
738  index_set.add_indices(my_index_set);
739 
740  offset += buffer[offset].second + 1;
741  }
742  AssertDimension(offset, buffer.size());
743  }
744 
745  if (send_requests.size() > 0)
746  MPI_Waitall(send_requests.size(),
747  send_requests.data(),
748  MPI_STATUSES_IGNORE);
749 
750 # ifdef DEBUG
751  for (const auto &it : requested_indices)
752  {
753  IndexSet copy_set = it.second;
754  copy_set.subtract_set(owned_indices);
755  Assert(copy_set.n_elements() == 0,
757  "The indices requested from the current "
758  "MPI rank should be locally owned here!"));
759  }
760 # endif
761 
762 #endif // DEAL_II_WITH_MPI
763 
764  return requested_indices;
765  }
766 
767  private:
781  void
783  unsigned int & owner_index,
784  const unsigned int rank_of_request)
785  {
786  // remember who requested which index. We want to use an
787  // std::vector with simple addressing, via a good guess from the
788  // preceding index, rather than std::map, because this is an inner
789  // loop and it avoids the map lookup in every iteration
790  const unsigned int rank_of_owner =
791  dict.actually_owning_ranks[index - dict.local_range.first];
792  owner_index =
793  dict.get_owning_rank_index(rank_of_owner, owner_index);
794  if (requesters[owner_index].empty() ||
795  requesters[owner_index].back().first != rank_of_request)
796  requesters[owner_index].emplace_back(
797  rank_of_request,
798  std::vector<std::pair<unsigned int, unsigned int>>());
799  if (requesters[owner_index].back().second.empty() ||
800  requesters[owner_index].back().second.back().second !=
801  index - dict.local_range.first)
802  requesters[owner_index].back().second.emplace_back(
803  index - dict.local_range.first,
804  index - dict.local_range.first + 1);
805  else
806  ++requesters[owner_index].back().second.back().second;
807  }
808  };
809 
810  } // namespace ComputeIndexOwner
811  } // namespace internal
812  } // namespace MPI
813 } // namespace Utilities
814 
815 DEAL_II_NAMESPACE_CLOSE
816 
817 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
types::global_dof_index get_index_offset(const unsigned int rank)
unsigned int dof_to_dict_rank(const types::global_dof_index i)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1670
size_type size() const
Definition: index_set.h:1600
virtual void pack_recv_buffer(const int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &send_buffer) override
virtual void unpack_recv_buffer(const int other_rank, const std::vector< unsigned int > &recv_buffer) override
static ::ExceptionBase & ExcMessage(std::string arg1)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:238
#define Assert(cond, exc)
Definition: exceptions.h:1407
void append_index_origin(const types::global_dof_index index, unsigned int &owner_index, const unsigned int rank_of_request)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1640
void set_size(const size_type size)
Definition: index_set.h:1588
unsigned int global_dof_index
Definition: types.h:89
virtual void process_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &buffer_recv, std::vector< unsigned int > &request_buffer) override
Definition: cuda.h:31
ConsensusAlgorithmPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
void compress() const
Definition: index_set.h:1608
std::pair< types::global_dof_index, types::global_dof_index > local_range
IntervalIterator begin_intervals() const
Definition: index_set.h:1555
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
IntervalIterator end_intervals() const
Definition: index_set.h:1567
virtual void prepare_recv_buffer(const int other_rank, std::vector< unsigned int > &recv_buffer) override
void reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
void partition(const IndexSet &owned_indices, const MPI_Comm &comm)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
size_type n_elements() const
Definition: index_set.h:1799
std::map< unsigned int, std::vector< types::global_dof_index > > indices_to_look_up_by_dict_rank
static ::ExceptionBase & ExcInternalError()