Reference documentation for deal.II version Git 399dbefe9a 2020-01-19 15:10:10 -0500
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi_compute_index_owner_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_base_mpi_compute_index_owner_internal_h
17 #define dealii_base_mpi_compute_index_owner_internal_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/mpi.h>
22 
23 DEAL_II_NAMESPACE_OPEN
24 
25 namespace Utilities
26 {
27  namespace MPI
28  {
29  namespace internal
30  {
35  namespace ComputeIndexOwner
36  {
46  std::pair<types::global_dof_index, types::global_dof_index>,
47  unsigned int>
48  {
49  public:
54  const std::map<unsigned int,
55  std::vector<std::pair<types::global_dof_index,
57  & buffers,
58  std::vector<unsigned int> &actually_owning_ranks,
59  const std::pair<types::global_dof_index, types::global_dof_index>
60  & local_range,
61  std::vector<unsigned int> &actually_owning_rank_list)
62  : buffers(buffers)
63  , actually_owning_ranks(actually_owning_ranks)
64  , local_range(local_range)
65  , actually_owning_rank_list(actually_owning_rank_list)
66  {}
67 
72  virtual std::vector<unsigned int>
73  compute_targets() override
74  {
75  std::vector<unsigned int> targets;
76  for (const auto &rank_pair : buffers)
77  targets.push_back(rank_pair.first);
78 
79  return targets;
80  }
81 
86  virtual void
87  create_request(const int other_rank,
88  std::vector<std::pair<types::global_dof_index,
90  &send_buffer) override
91  {
92  send_buffer = this->buffers.at(other_rank);
93  }
94 
99  virtual void
101  const unsigned int other_rank,
102  const std::vector<std::pair<types::global_dof_index,
103  types::global_dof_index>> &buffer_recv,
104  std::vector<unsigned int> &request_buffer) override
105  {
106  (void)request_buffer; // not needed
107 
108 
109  // process message: loop over all intervals
110  for (auto interval : buffer_recv)
111  {
112 #ifdef DEBUG
113  for (types::global_dof_index i = interval.first;
114  i < interval.second;
115  i++)
116  Assert(actually_owning_ranks[i - local_range.first] ==
118  ExcInternalError());
119  Assert(interval.first >= local_range.first &&
120  interval.first < local_range.second,
121  ExcInternalError());
122  Assert(interval.second > local_range.first &&
123  interval.second <= local_range.second,
124  ExcInternalError());
125 #endif
126  std::fill(actually_owning_ranks.data() + interval.first -
127  local_range.first,
128  actually_owning_ranks.data() + interval.second -
129  local_range.first,
130  other_rank);
131  }
132  actually_owning_rank_list.push_back(other_rank);
133  }
134 
135  private:
136  const std::map<unsigned int,
137  std::vector<std::pair<types::global_dof_index,
138  types::global_dof_index>>>
139  &buffers;
140 
141  std::vector<unsigned int> &actually_owning_ranks;
142 
143  const std::pair<types::global_dof_index, types::global_dof_index>
144  &local_range;
145 
146  std::vector<unsigned int> &actually_owning_rank_list;
147  };
148 
149 
150 
156  struct Dictionary
157  {
161  static const unsigned int range_minimum_grain_size = 4096;
162 
169  std::vector<unsigned int> actually_owning_ranks;
170 
175  std::vector<unsigned int> actually_owning_rank_list;
176 
184 
190  std::pair<types::global_dof_index, types::global_dof_index>
192 
199 
204 
210 
216  unsigned int stride_small_size;
217 
223  void
224  reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
225  {
226  // 1) set up the partition
227  this->partition(owned_indices, comm);
228 
229 #ifdef DEAL_II_WITH_MPI
230  unsigned int my_rank = this_mpi_process(comm);
231 
232  types::global_dof_index dic_local_received = 0;
233  std::map<unsigned int,
234  std::vector<std::pair<types::global_dof_index,
235  types::global_dof_index>>>
236  buffers;
237 
238  std::fill(actually_owning_ranks.begin(),
239  actually_owning_ranks.end(),
241 
242  // 2) collect relevant processes and process local dict entries
243  for (auto interval = owned_indices.begin_intervals();
244  interval != owned_indices.end_intervals();
245  interval++)
246  {
247  // Due to the granularity of the dictionary, the interval
248  // might be split into several ranges of processor owner
249  // ranks. Here, we process the interval by breaking into
250  // smaller pieces in terms of the dictionary number.
251  std::pair<types::global_dof_index, types::global_dof_index>
252  index_range(*interval->begin(), interval->last() + 1);
253  const unsigned int owner_last =
254  dof_to_dict_rank(interval->last());
255  unsigned int owner_first = numbers::invalid_unsigned_int;
256  while (owner_first != owner_last)
257  {
258  Assert(index_range.first < index_range.second,
259  ExcInternalError());
260 
261  owner_first = dof_to_dict_rank(index_range.first);
262 
263  // this explicitly picks up the formula of
264  // dof_to_dict_rank, so the two places must be in sync
265  types::global_dof_index next_index =
266  std::min(get_index_offset(owner_first + 1),
267  index_range.second);
268 
269  Assert(next_index > index_range.first, ExcInternalError());
270 
271 # ifdef DEBUG
272  // make sure that the owner is the same on the current
273  // interval
274  for (types::global_dof_index i = index_range.first + 1;
275  i < next_index;
276  ++i)
277  AssertDimension(owner_first, dof_to_dict_rank(i));
278 # endif
279 
280  // add the interval, either to the local range or into a
281  // buffer to be sent to another processor
282  if (owner_first == my_rank)
283  {
284  std::fill(actually_owning_ranks.data() +
285  index_range.first - local_range.first,
286  actually_owning_ranks.data() + next_index -
287  local_range.first,
288  my_rank);
289  dic_local_received += next_index - index_range.first;
290  if (actually_owning_rank_list.empty())
291  actually_owning_rank_list.push_back(my_rank);
292  }
293  else
294  buffers[owner_first].emplace_back(index_range.first,
295  next_index);
296 
297  index_range.first = next_index;
298  }
299  }
300 
301  n_dict_procs_in_owned_indices = buffers.size();
302  std::vector<MPI_Request> request;
303 
304  // Check if index set space is partitioned globally without gaps.
305  if (Utilities::MPI::sum(owned_indices.n_elements(), comm) ==
306  owned_indices.size())
307  {
308  // no gaps: setup is simple! Processes send their locally owned
309  // indices to the dictionary. The dictionary stores the sending
310  // rank for each index. The dictionary knows exactly
311  // when it is set up when all indices it is responsible for
312  // have been processed.
313 
314  request.reserve(n_dict_procs_in_owned_indices);
315 
316  // protect the following communication steps using a mutex:
317  static CollectiveMutex mutex;
318  CollectiveMutex::ScopedLock lock(mutex, comm);
319 
320  const int mpi_tag =
322 
323 
324  // 3) send messages with local dofs to the right dict process
325  for (const auto &rank_pair : buffers)
326  {
327  request.push_back(MPI_Request());
328  const int ierr = MPI_Isend(rank_pair.second.data(),
329  rank_pair.second.size() * 2,
330  DEAL_II_DOF_INDEX_MPI_TYPE,
331  rank_pair.first,
332  mpi_tag,
333  comm,
334  &request.back());
335  AssertThrowMPI(ierr);
336  }
337 
338  // 4) receive messages until all dofs in dict are processed
339  while (this->local_size != dic_local_received)
340  {
341  // wait for an incoming message
342  MPI_Status status;
343  auto ierr =
344  MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
345  AssertThrowMPI(ierr);
346 
347  // retrieve size of incoming message
348  int number_amount;
349  ierr = MPI_Get_count(&status,
350  DEAL_II_DOF_INDEX_MPI_TYPE,
351  &number_amount);
352  AssertThrowMPI(ierr);
353 
354  const auto other_rank = status.MPI_SOURCE;
355  actually_owning_rank_list.push_back(other_rank);
356 
357  // receive message
358  Assert(number_amount % 2 == 0, ExcInternalError());
359  std::vector<std::pair<types::global_dof_index,
360  types::global_dof_index>>
361  buffer(number_amount / 2);
362  ierr = MPI_Recv(buffer.data(),
363  number_amount,
364  DEAL_II_DOF_INDEX_MPI_TYPE,
365  status.MPI_SOURCE,
366  status.MPI_TAG,
367  comm,
368  MPI_STATUS_IGNORE);
369  AssertThrowMPI(ierr);
370  // process message: loop over all intervals
371  for (auto interval : buffer)
372  {
373 # ifdef DEBUG
374  for (types::global_dof_index i = interval.first;
375  i < interval.second;
376  i++)
377  Assert(actually_owning_ranks[i - local_range.first] ==
379  ExcInternalError());
380  Assert(interval.first >= local_range.first &&
381  interval.first < local_range.second,
382  ExcInternalError());
383  Assert(interval.second > local_range.first &&
384  interval.second <= local_range.second,
385  ExcInternalError());
386 # endif
387 
388  std::fill(actually_owning_ranks.data() +
389  interval.first - local_range.first,
390  actually_owning_ranks.data() +
391  interval.second - local_range.first,
392  other_rank);
393  dic_local_received += interval.second - interval.first;
394  }
395  }
396  }
397  else
398  {
399  // with gap: use ConsensusAlgorithm to determine when all
400  // dictionaries have been set up.
401 
402  // 3/4) use ConsensusAlgorithm to send messages with local dofs
403  // to the right dict process
404  DictionaryPayLoad temp(buffers,
405  actually_owning_ranks,
406  local_range,
407  actually_owning_rank_list);
408 
410  std::pair<types::global_dof_index, types::global_dof_index>,
411  unsigned int>
412  consensus_algo(temp, comm);
413  consensus_algo.run();
414  }
415 
416  std::sort(actually_owning_rank_list.begin(),
417  actually_owning_rank_list.end());
418 
419  for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
420  Assert(actually_owning_rank_list[i] >
421  actually_owning_rank_list[i - 1],
422  ExcInternalError());
423 
424  // 5) make sure that all messages have been sent
425  if (request.size() > 0)
426  {
427  const int ierr = MPI_Waitall(request.size(),
428  request.data(),
429  MPI_STATUSES_IGNORE);
430  AssertThrowMPI(ierr);
431  }
432 
433 #else
434  (void)owned_indices;
435  (void)comm;
436 #endif
437  }
438 
444  unsigned int
446  {
447  // note: this formula is also explicitly used in
448  // get_index_offset(), so keep the two in sync
449  return (i / dofs_per_process) * stride_small_size;
450  }
451 
457  get_index_offset(const unsigned int rank)
458  {
459  return std::min(dofs_per_process *
460  static_cast<types::global_dof_index>(
461  (rank + stride_small_size - 1) /
462  stride_small_size),
463  size);
464  }
465 
471  unsigned int
472  get_owning_rank_index(const unsigned int rank_in_owned_indices,
473  const unsigned int guess = 0)
474  {
475  AssertIndexRange(guess, actually_owning_rank_list.size());
476  if (actually_owning_rank_list[guess] == rank_in_owned_indices)
477  return guess;
478  else
479  {
480  auto it = std::lower_bound(actually_owning_rank_list.begin(),
481  actually_owning_rank_list.end(),
482  rank_in_owned_indices);
483  Assert(it != actually_owning_rank_list.end(),
484  ExcInternalError());
485  Assert(*it == rank_in_owned_indices, ExcInternalError());
486  return it - actually_owning_rank_list.begin();
487  }
488  }
489 
490  private:
495  void
496  partition(const IndexSet &owned_indices, const MPI_Comm &comm)
497  {
498 #ifdef DEAL_II_WITH_MPI
499  const unsigned int n_procs = n_mpi_processes(comm);
500  const unsigned int my_rank = this_mpi_process(comm);
501 
502  size = owned_indices.size();
503 
504  Assert(size > 0, ExcNotImplemented());
505 
506  dofs_per_process = (size + n_procs - 1) / n_procs;
507  if (dofs_per_process < range_minimum_grain_size)
508  {
509  dofs_per_process = range_minimum_grain_size;
510  stride_small_size = dofs_per_process * n_procs / size;
511  }
512  else
513  stride_small_size = 1;
514  local_range.first = get_index_offset(my_rank);
515  local_range.second = get_index_offset(my_rank + 1);
516 
517  local_size = local_range.second - local_range.first;
518 
519  actually_owning_ranks = {};
520  actually_owning_ranks.resize(local_size,
522 #else
523  (void)owned_indices;
524  (void)comm;
525 #endif
526  }
527  };
528 
529 
530 
538  : public ConsensusAlgorithmProcess<
539  std::pair<types::global_dof_index, types::global_dof_index>,
540  unsigned int>
541  {
542  public:
546  ConsensusAlgorithmPayload(const IndexSet &owned_indices,
547  const IndexSet &indices_to_look_up,
548  const MPI_Comm &comm,
549  std::vector<unsigned int> &owning_ranks,
550  const bool track_index_requests = false)
551  : owned_indices(owned_indices)
552  , indices_to_look_up(indices_to_look_up)
553  , comm(comm)
554  , my_rank(this_mpi_process(comm))
555  , n_procs(n_mpi_processes(comm))
556  , track_index_requests(track_index_requests)
557  , owning_ranks(owning_ranks)
558  {
559  dict.reinit(owned_indices, comm);
560  requesters.resize(dict.actually_owning_rank_list.size());
561  }
562 
567 
573 
577  const MPI_Comm comm;
578 
582  const unsigned int my_rank;
583 
588  const unsigned int n_procs;
589 
596 
602  std::vector<unsigned int> &owning_ranks;
603 
613  std::vector<std::vector<
614  std::pair<unsigned int,
615  std::vector<std::pair<unsigned int, unsigned int>>>>>
617 
622 
627  std::map<unsigned int, std::vector<types::global_dof_index>>
629 
634  std::map<unsigned int, std::vector<unsigned int>> recv_indices;
635 
643  virtual void
645  const unsigned int other_rank,
646  const std::vector<std::pair<types::global_dof_index,
647  types::global_dof_index>> &buffer_recv,
648  std::vector<unsigned int> &request_buffer) override
649  {
650  unsigned int owner_index = 0;
651  for (const auto interval : buffer_recv)
652  for (auto i = interval.first; i < interval.second; ++i)
653  {
654  const unsigned int actual_owner =
655  dict.actually_owning_ranks[i - dict.local_range.first];
656  request_buffer.push_back(actual_owner);
657 
658  if (track_index_requests)
659  append_index_origin(i, owner_index, other_rank);
660  }
661  }
662 
667  virtual std::vector<unsigned int>
668  compute_targets() override
669  {
670  std::vector<unsigned int> targets;
671 
672  // 1) collect relevant processes and process local dict entries
673  {
674  unsigned int index = 0;
675  unsigned int owner_index = 0;
676  for (auto i : indices_to_look_up)
677  {
678  unsigned int other_rank = dict.dof_to_dict_rank(i);
679  if (other_rank == my_rank)
680  {
681  owning_ranks[index] =
682  dict.actually_owning_ranks[i - dict.local_range.first];
683  if (track_index_requests)
684  append_index_origin(i, owner_index, my_rank);
685  }
686  else if (targets.empty() || targets.back() != other_rank)
687  targets.push_back(other_rank);
688  index++;
689  }
690  }
691 
692 
693  for (auto i : targets)
694  {
695  recv_indices[i] = {};
696  indices_to_look_up_by_dict_rank[i] = {};
697  }
698 
699  // 3) collect indices for each process
700  {
701  unsigned int index = 0;
702  for (auto i : indices_to_look_up)
703  {
704  unsigned int other_rank = dict.dof_to_dict_rank(i);
705  if (other_rank != my_rank)
706  {
707  recv_indices[other_rank].push_back(index);
708  indices_to_look_up_by_dict_rank[other_rank].push_back(i);
709  }
710  index++;
711  }
712  }
713 
714  Assert(targets.size() == recv_indices.size() &&
715  targets.size() == indices_to_look_up_by_dict_rank.size(),
716  ExcMessage("Size does not match!"));
717 
718  return targets;
719  }
720 
725  virtual void
726  create_request(const int other_rank,
727  std::vector<std::pair<types::global_dof_index,
729  &send_buffer) override
730  {
731  // create index set and compress data to be sent
732  auto & indices_i = indices_to_look_up_by_dict_rank[other_rank];
733  IndexSet is(dict.size);
734  is.add_indices(indices_i.begin(), indices_i.end());
735  is.compress();
736 
737  for (auto interval = is.begin_intervals();
738  interval != is.end_intervals();
739  interval++)
740  send_buffer.emplace_back(*interval->begin(),
741  interval->last() + 1);
742  }
743 
748  virtual void
750  const int other_rank,
751  std::vector<unsigned int> &recv_buffer) override
752  {
753  recv_buffer.resize(recv_indices[other_rank].size());
754  }
755 
760  virtual void
761  read_answer(const int other_rank,
762  const std::vector<unsigned int> &recv_buffer) override
763  {
764  Assert(recv_indices[other_rank].size() == recv_buffer.size(),
765  ExcMessage("Sizes do not match!"));
766 
767  for (unsigned int j = 0; j < recv_indices[other_rank].size(); j++)
768  owning_ranks[recv_indices[other_rank][j]] = recv_buffer[j];
769  }
770 
780  std::map<unsigned int, IndexSet>
782  {
783  Assert(track_index_requests,
784  ExcMessage("Must enable index range tracking in "
785  "constructor of ConsensusAlgorithmProcess"));
786 
787  std::map<unsigned int, ::IndexSet> requested_indices;
788 
789 #ifdef DEAL_II_WITH_MPI
790 
791  static CollectiveMutex mutex;
792  CollectiveMutex::ScopedLock lock(mutex, comm);
793 
794  const int mpi_tag = Utilities::MPI::internal::Tags::
796 
797  // reserve enough slots for the requests ahead; depending on
798  // whether the owning rank is one of the requesters or not, we
799  // might have one less requests to execute, so fill the requests
800  // on demand.
801  std::vector<MPI_Request> send_requests;
802  send_requests.reserve(requesters.size());
803 
804  // We use an integer vector for the data exchange. Since we send
805  // data associated to intervals with different requesters, we will
806  // need to send (a) the MPI rank of the requester, (b) the number
807  // of intervals directed to this requester, and (c) a list of
808  // intervals, i.e., two integers per interval. The number of items
809  // sent in total can be deduced both via the MPI status message at
810  // the receiver site as well as be counting the buckets from
811  // different requesters.
812  std::vector<std::vector<unsigned int>> send_data(requesters.size());
813  for (unsigned int i = 0; i < requesters.size(); ++i)
814  {
815  // special code for our own indices
816  if (dict.actually_owning_rank_list[i] == my_rank)
817  {
818  for (const auto &j : requesters[i])
819  {
820  const types::global_dof_index index_offset =
821  dict.get_index_offset(my_rank);
822  IndexSet &my_index_set = requested_indices[j.first];
823  my_index_set.set_size(owned_indices.size());
824  for (const auto &interval : j.second)
825  my_index_set.add_range(index_offset + interval.first,
826  index_offset +
827  interval.second);
828  }
829  }
830  else
831  {
832  for (const auto &j : requesters[i])
833  {
834  send_data[i].push_back(j.first);
835  send_data[i].push_back(j.second.size());
836  for (const auto &interval : j.second)
837  {
838  send_data[i].push_back(interval.first);
839  send_data[i].push_back(interval.second);
840  }
841  }
842  send_requests.push_back(MPI_Request());
843  const int ierr =
844  MPI_Isend(send_data[i].data(),
845  send_data[i].size(),
846  MPI_UNSIGNED,
848  mpi_tag,
849  comm,
850  &send_requests.back());
851  AssertThrowMPI(ierr);
852  }
853  }
854 
855  // receive the data
856  for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices;
857  ++c)
858  {
859  // wait for an incoming message
860  MPI_Status status;
861  unsigned int ierr =
862  MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
863  AssertThrowMPI(ierr);
864 
865  // retrieve size of incoming message
866  int number_amount;
867  ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
868  AssertThrowMPI(ierr);
869 
870  // receive message
871  Assert(number_amount % 2 == 0, ExcInternalError());
872  std::vector<std::pair<unsigned int, unsigned int>> buffer(
873  number_amount / 2);
874  ierr = MPI_Recv(buffer.data(),
875  number_amount,
876  MPI_UNSIGNED,
877  status.MPI_SOURCE,
878  status.MPI_TAG,
879  comm,
880  &status);
881  AssertThrowMPI(ierr);
882 
883  // unpack the message and translate the dictionary-local
884  // indices coming via MPI to the global index range
885  const types::global_dof_index index_offset =
886  dict.get_index_offset(status.MPI_SOURCE);
887  unsigned int offset = 0;
888  while (offset < buffer.size())
889  {
890  AssertIndexRange(offset + buffer[offset].second,
891  buffer.size());
892 
893  IndexSet my_index_set(owned_indices.size());
894  for (unsigned int i = offset + 1;
895  i < offset + buffer[offset].second + 1;
896  ++i)
897  my_index_set.add_range(index_offset + buffer[i].first,
898  index_offset + buffer[i].second);
899 
900  // the underlying index set is able to merge ranges coming
901  // from different ranks due to the partitioning in the
902  // dictionary
903  IndexSet &index_set =
904  requested_indices[buffer[offset].first];
905  if (index_set.size() == 0)
906  index_set.set_size(owned_indices.size());
907  index_set.add_indices(my_index_set);
908 
909  offset += buffer[offset].second + 1;
910  }
911  AssertDimension(offset, buffer.size());
912  }
913 
914  if (send_requests.size() > 0)
915  {
916  const auto ierr = MPI_Waitall(send_requests.size(),
917  send_requests.data(),
918  MPI_STATUSES_IGNORE);
919  AssertThrowMPI(ierr);
920  }
921 
922 
923 # ifdef DEBUG
924  for (const auto &it : requested_indices)
925  {
926  IndexSet copy_set = it.second;
927  copy_set.subtract_set(owned_indices);
928  Assert(copy_set.n_elements() == 0,
930  "The indices requested from the current "
931  "MPI rank should be locally owned here!"));
932  }
933 # endif
934 
935 #endif // DEAL_II_WITH_MPI
936 
937  return requested_indices;
938  }
939 
940  private:
954  void
956  unsigned int & owner_index,
957  const unsigned int rank_of_request)
958  {
959  // remember who requested which index. We want to use an
960  // std::vector with simple addressing, via a good guess from the
961  // preceding index, rather than std::map, because this is an inner
962  // loop and it avoids the map lookup in every iteration
963  const unsigned int rank_of_owner =
964  dict.actually_owning_ranks[index - dict.local_range.first];
965  owner_index =
966  dict.get_owning_rank_index(rank_of_owner, owner_index);
967  if (requesters[owner_index].empty() ||
968  requesters[owner_index].back().first != rank_of_request)
969  requesters[owner_index].emplace_back(
970  rank_of_request,
971  std::vector<std::pair<unsigned int, unsigned int>>());
972  if (requesters[owner_index].back().second.empty() ||
973  requesters[owner_index].back().second.back().second !=
974  index - dict.local_range.first)
975  requesters[owner_index].back().second.emplace_back(
976  index - dict.local_range.first,
977  index - dict.local_range.first + 1);
978  else
979  ++requesters[owner_index].back().second.back().second;
980  }
981  };
982 
983  } // namespace ComputeIndexOwner
984  } // namespace internal
985  } // namespace MPI
986 } // namespace Utilities
987 
988 DEAL_II_NAMESPACE_CLOSE
989 
990 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1571
const types::subdomain_id invalid_subdomain_id
Definition: types.h:279
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
types::global_dof_index get_index_offset(const unsigned int rank)
unsigned int dof_to_dict_rank(const types::global_dof_index i)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1694
size_type size() const
Definition: index_set.h:1625
virtual void read_answer(const int other_rank, const std::vector< unsigned int > &recv_buffer) override
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
void subtract_set(const IndexSet &other)
Definition: index_set.cc:238
#define Assert(cond, exc)
Definition: exceptions.h:1411
void append_index_origin(const types::global_dof_index index, unsigned int &owner_index, const unsigned int rank_of_request)
virtual void prepare_buffer_for_answer(const int other_rank, std::vector< unsigned int > &recv_buffer) override
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1664
virtual void create_request(const int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &send_buffer) override
void set_size(const size_type size)
Definition: index_set.h:1613
unsigned int global_dof_index
Definition: types.h:89
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &buffer_recv, std::vector< unsigned int > &request_buffer) override
Definition: cuda.h:31
ConsensusAlgorithmPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
void compress() const
Definition: index_set.h:1633
std::pair< types::global_dof_index, types::global_dof_index > local_range
IntervalIterator begin_intervals() const
Definition: index_set.h:1580
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1699
IntervalIterator end_intervals() const
Definition: index_set.h:1592
void reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
void partition(const IndexSet &owned_indices, const MPI_Comm &comm)
virtual void create_request(const int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &send_buffer) override
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:86
static ::ExceptionBase & ExcNotImplemented()
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index >> &buffer_recv, std::vector< unsigned int > &request_buffer) override
DictionaryPayLoad(const std::map< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index >>> &buffers, std::vector< unsigned int > &actually_owning_ranks, const std::pair< types::global_dof_index, types::global_dof_index > &local_range, std::vector< unsigned int > &actually_owning_rank_list)
size_type n_elements() const
Definition: index_set.h:1823
std::map< unsigned int, std::vector< types::global_dof_index > > indices_to_look_up_by_dict_rank
static ::ExceptionBase & ExcInternalError()