deal.II version GIT relicensing-2025-ge390a5d412 2024-10-25 14:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2022 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#include <deal.II/base/mpi.h>
19
20#include <boost/serialization/utility.hpp>
21
22#include <algorithm>
23
25
26namespace Utilities
27{
28 namespace MPI
29 {
30 namespace internal
31 {
32 namespace ComputeIndexOwner
33 {
36
37
38
40 : use_vector(use_vector)
41 , size(0)
42 {}
43
44
45
46 void
47 FlexibleIndexStorage::reinit(const bool use_vector,
48 const bool index_range_contiguous,
49 const std::size_t size)
50 {
51 this->use_vector = use_vector;
52 this->size = size;
53
54 data = {};
55 data_map.clear();
56
57 // in case we have contiguous indices, only fill the vector upon
58 // first request in `fill`
59 if (use_vector && !index_range_contiguous)
61 }
62
63
64
65 void
67 const std::size_t start,
68 const std::size_t end,
70 {
71 AssertIndexRange(start, size);
72 AssertIndexRange(end, size + 1);
73
74 if (use_vector)
75 {
76 if (data.empty() && end > start)
77 {
78 // in debug mode, we want to track whether we set all
79 // indices, so we first fill an invalid index and only later
80 // the actual ones, whereas we simply assign the given rank
81 // to the complete vector the first time we pass around in
82 // this function in release mode to avoid touching data
83 // unnecessarily (and overwrite the smaller pieces), as the
84 // locally owned part comes first
85#ifdef DEBUG
87 std::fill(data.begin() + start, data.begin() + end, value);
88#else
89 data.resize(size, value);
90#endif
91 }
92 else
93 {
94 AssertDimension(data.size(), size);
95 std::fill(data.begin() + start, data.begin() + end, value);
96 }
97 }
98 else
99 {
100 for (auto i = start; i < end; ++i)
101 data_map[i] = value;
102 }
103 }
104
105
106
108 FlexibleIndexStorage::operator[](const std::size_t index)
109 {
110 AssertIndexRange(index, size);
111
112 if (use_vector)
113 {
114 AssertDimension(data.size(), size);
115 return data[index];
116 }
117 else
118 {
119 return data_map.try_emplace(index, invalid_index_value)
120 .first->second;
121 }
122 }
123
124
125
127 FlexibleIndexStorage::operator[](const std::size_t index) const
128 {
129 AssertIndexRange(index, size);
130
131 if (use_vector)
132 {
133 AssertDimension(data.size(), size);
134 return data[index];
135 }
136 else
137 {
138 if (data_map.find(index) == data_map.end())
139 return invalid_index_value;
140
141 return data_map.at(index);
142 }
143 }
144
145
146
147 bool
148 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
149 {
150 AssertIndexRange(index, size);
151
152 if (use_vector)
153 {
154 if (data.empty())
155 return false;
156
157 AssertDimension(data.size(), size);
158 return data[index] != invalid_index_value;
159 }
160 else
161 return data_map.find(index) != data_map.end();
162 }
163
164
165
166 void
167 Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
168 {
169 // 1) set up the partition
170 this->partition(owned_indices, comm);
171
172 unsigned int my_rank = this_mpi_process(comm);
173
174 types::global_dof_index dic_local_received = 0;
175 std::map<unsigned int,
176 std::vector<std::pair<types::global_dof_index,
178 buffers;
179
180 const auto owned_indices_size_actual =
181 Utilities::MPI::sum(owned_indices.n_elements(), comm);
182
183 actually_owning_ranks.reinit((owned_indices_size_actual *
184 sparsity_factor) > owned_indices.size(),
185 owned_indices_size_actual ==
186 owned_indices.size(),
188
189 // 2) collect relevant processes and process local dict entries
190 for (auto interval = owned_indices.begin_intervals();
191 interval != owned_indices.end_intervals();
192 ++interval)
193 {
194 // Due to the granularity of the dictionary, the interval
195 // might be split into several ranges of processor owner
196 // ranks. Here, we process the interval by breaking into
197 // smaller pieces in terms of the dictionary number.
198 std::pair<types::global_dof_index, types::global_dof_index>
199 index_range(*interval->begin(), interval->last() + 1);
200
201 AssertThrow(index_range.second <= size, ExcInternalError());
202
203 while (index_range.first != index_range.second)
204 {
205 Assert(index_range.first < index_range.second,
207
208 const unsigned int owner =
209 dof_to_dict_rank(index_range.first);
210
211 // this explicitly picks up the formula of
212 // dof_to_dict_rank, so the two places must be in sync
213 const types::global_dof_index next_index =
214 std::min(get_index_offset(owner + 1), index_range.second);
215
216 Assert(next_index > index_range.first, ExcInternalError());
217
218#ifdef DEBUG
219 // make sure that the owner is the same on the current
220 // interval
221 for (types::global_dof_index i = index_range.first + 1;
222 i < next_index;
223 ++i)
225#endif
226
227 // add the interval, either to the local range or into a
228 // buffer to be sent to another processor
229 if (owner == my_rank)
230 {
231 actually_owning_ranks.fill(index_range.first -
232 local_range.first,
233 next_index - local_range.first,
234 my_rank);
235 dic_local_received += next_index - index_range.first;
236 if (actually_owning_rank_list.empty())
237 actually_owning_rank_list.push_back(my_rank);
238 }
239 else
240 buffers[owner].emplace_back(index_range.first, next_index);
241
242 index_range.first = next_index;
243 }
244 }
245
246#ifdef DEAL_II_WITH_MPI
247 n_dict_procs_in_owned_indices = buffers.size();
248 std::vector<MPI_Request> request;
249
250 // Check if index set space is partitioned globally without gaps.
251 if (owned_indices_size_actual == owned_indices.size())
252 {
253 // no gaps: setup is simple! Processes send their locally owned
254 // indices to the dictionary. The dictionary stores the sending
255 // rank for each index. The dictionary knows exactly
256 // when it is set up when all indices it is responsible for
257 // have been processed.
258
259 request.reserve(n_dict_procs_in_owned_indices);
260
261 // protect the following communication steps using a mutex:
262 static CollectiveMutex mutex;
264
265 const int mpi_tag =
267
268
269 // 3) send messages with local dofs to the right dict process
270 for (const auto &rank_pair : buffers)
271 {
272 request.push_back(MPI_Request());
273 const int ierr = MPI_Isend(rank_pair.second.data(),
274 rank_pair.second.size() * 2,
276 rank_pair.first,
277 mpi_tag,
278 comm,
279 &request.back());
280 AssertThrowMPI(ierr);
281 }
282
283 // 4) receive messages until all dofs in dict are processed
284 while (this->locally_owned_size != dic_local_received)
285 {
286 // wait for an incoming message
287 MPI_Status status;
288 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
289 AssertThrowMPI(ierr);
290
291 // retrieve size of incoming message
292 int number_amount;
293 ierr = MPI_Get_count(&status,
295 &number_amount);
296 AssertThrowMPI(ierr);
297
298 const auto other_rank = status.MPI_SOURCE;
299 actually_owning_rank_list.push_back(other_rank);
300
301 // receive message
302 Assert(number_amount % 2 == 0, ExcInternalError());
303 std::vector<
304 std::pair<types::global_dof_index, types::global_dof_index>>
305 buffer(number_amount / 2);
306 ierr = MPI_Recv(buffer.data(),
307 number_amount,
309 status.MPI_SOURCE,
310 status.MPI_TAG,
311 comm,
312 MPI_STATUS_IGNORE);
313 AssertThrowMPI(ierr);
314 // process message: loop over all intervals
315 for (auto interval : buffer)
316 {
317# ifdef DEBUG
318 for (types::global_dof_index i = interval.first;
319 i < interval.second;
320 i++)
322 i - local_range.first) == false,
324 Assert(interval.first >= local_range.first &&
325 interval.first < local_range.second,
327 Assert(interval.second > local_range.first &&
328 interval.second <= local_range.second,
330# endif
331
332 actually_owning_ranks.fill(interval.first -
333 local_range.first,
334 interval.second -
335 local_range.first,
336 other_rank);
337 dic_local_received += interval.second - interval.first;
338 }
339 }
340 }
341 else
342 {
343 // with gap: use a ConsensusAlgorithm to determine when all
344 // dictionaries have been set up.
345
346 // 3/4) use a ConsensusAlgorithm to send messages with local
347 // dofs to the right dict process
348
349 using RequestType = std::vector<
350 std::pair<types::global_dof_index, types::global_dof_index>>;
351
352 ConsensusAlgorithms::selector<RequestType>(
353 /* targets = */
354 [&buffers]() {
355 std::vector<unsigned int> targets;
356 targets.reserve(buffers.size());
357 for (const auto &rank_pair : buffers)
358 targets.emplace_back(rank_pair.first);
359
360 return targets;
361 }(),
362
363 /* create_request = */
364 [&buffers](const unsigned int target_rank) -> RequestType {
365 return buffers.at(target_rank);
366 },
367
368 /* process_request = */
369 [&](const unsigned int source_rank,
370 const RequestType &request) -> void {
371 // process message: loop over all intervals
372 for (auto interval : request)
373 {
374# ifdef DEBUG
375 for (types::global_dof_index i = interval.first;
376 i < interval.second;
377 i++)
378 Assert(
380 i - local_range.first) == false,
382 "Multiple processes seem to own the same global index. "
383 "A possible reason is that the sets of locally owned "
384 "indices are not distinct."));
385 Assert(interval.first < interval.second,
387 Assert(
388 local_range.first <= interval.first &&
389 interval.second <= local_range.second,
391 "The specified interval is not handled by the current process."));
392# endif
393 actually_owning_ranks.fill(interval.first -
394 local_range.first,
395 interval.second -
396 local_range.first,
397 source_rank);
398 }
399 actually_owning_rank_list.push_back(source_rank);
400 },
401
402 comm);
403 }
404
405 std::sort(actually_owning_rank_list.begin(),
407
408 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
412
413 // 5) make sure that all messages have been sent
414 if (request.size() > 0)
415 {
416 const int ierr = MPI_Waitall(request.size(),
417 request.data(),
418 MPI_STATUSES_IGNORE);
419 AssertThrowMPI(ierr);
420 }
421
422#else
423 Assert(buffers.empty(), ExcInternalError());
424 (void)comm;
425 (void)dic_local_received;
426#endif
427 }
428
429
430
431 void
432 Dictionary::partition(const IndexSet &owned_indices,
433 const MPI_Comm comm)
434 {
435 const unsigned int n_procs = n_mpi_processes(comm);
436 const unsigned int my_rank = this_mpi_process(comm);
437
438 size = owned_indices.size();
439
441
443 std::max<types::global_dof_index>((size + n_procs - 1) / n_procs,
445
447 std::max<unsigned int>(dofs_per_process * n_procs / size, 1);
448
449 local_range.first = get_index_offset(my_rank);
450 local_range.second = get_index_offset(my_rank + 1);
451
453 }
454
455
457 const IndexSet &owned_indices,
458 const IndexSet &indices_to_look_up,
459 const MPI_Comm comm,
460 std::vector<unsigned int> &owning_ranks,
461 const bool track_index_requesters)
462 : owned_indices(owned_indices)
463 , indices_to_look_up(indices_to_look_up)
464 , comm(comm)
465 , my_rank(this_mpi_process(comm))
466 , n_procs(n_mpi_processes(comm))
467 , track_index_requesters(track_index_requesters)
468 , owning_ranks(owning_ranks)
469 {
472 }
473
474
475
476 void
478 const unsigned int other_rank,
479 const std::vector<std::pair<types::global_dof_index,
480 types::global_dof_index>> &buffer_recv,
481 std::vector<unsigned int> &request_buffer)
482 {
483 unsigned int owner_index_guess = 0;
484 for (const auto &interval : buffer_recv)
485 for (auto i = interval.first; i < interval.second; ++i)
486 {
487 const unsigned int actual_owner =
489 request_buffer.push_back(actual_owner);
490
493 other_rank,
494 actual_owner,
495 owner_index_guess);
496 }
497 }
498
499
500
501 std::vector<unsigned int>
503 {
504 std::vector<unsigned int> targets;
505
507 unsigned int index = 0;
508 unsigned int owner_index_guess = 0;
509 for (auto i : indices_to_look_up)
510 {
511 unsigned int other_rank = dict.dof_to_dict_rank(i);
512 if (other_rank == my_rank)
513 {
514 owning_ranks[index] =
518 my_rank,
519 owning_ranks[index],
520 owner_index_guess);
521 }
522 else
523 {
524 if (targets.empty() || targets.back() != other_rank)
525 targets.push_back(other_rank);
526 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
527 indices.first.push_back(i);
528 indices.second.push_back(index);
529 }
530 ++index;
531 }
532
533 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
534 ExcMessage("Size does not match!"));
535
536 return targets;
537 }
538
539
540
541 void
543 const unsigned int other_rank,
544 std::vector<std::pair<types::global_dof_index,
545 types::global_dof_index>> &send_buffer)
546 {
547 // create index set and compress data to be sent
548 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
549 IndexSet is(dict.size);
550 is.add_indices(indices_i.begin(), indices_i.end());
551 is.compress();
552
553 for (auto interval = is.begin_intervals();
554 interval != is.end_intervals();
555 ++interval)
556 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
557 }
558
559
560
561 void
563 const unsigned int other_rank,
564 const std::vector<unsigned int> &recv_buffer)
565 {
566 const auto &recv_indices =
567 indices_to_look_up_by_dict_rank[other_rank].second;
568 AssertDimension(recv_indices.size(), recv_buffer.size());
569 for (unsigned int j = 0; j < recv_indices.size(); ++j)
570 owning_ranks[recv_indices[j]] = recv_buffer[j];
571 }
572
573
574
575 std::map<unsigned int, IndexSet>
577 {
579 ExcMessage("Must enable index range tracking in "
580 "constructor of ConsensusAlgorithmProcess"));
581
582 std::map<unsigned int, ::IndexSet> requested_indices;
583
584#ifdef DEAL_II_WITH_MPI
585
586 static CollectiveMutex mutex;
588
589 const int mpi_tag = Utilities::MPI::internal::Tags::
591
592 // reserve enough slots for the requests ahead; depending on
593 // whether the owning rank is one of the requesters or not, we
594 // might have one less requests to execute, so fill the requests
595 // on demand.
596 std::vector<MPI_Request> send_requests;
597 send_requests.reserve(requesters.size());
598
599 // We use an integer vector for the data exchange. Since we send
600 // data associated to intervals with different requesters, we will
601 // need to send (a) the MPI rank of the requester, (b) the number
602 // of intervals directed to this requester, and (c) a list of
603 // intervals, i.e., two integers per interval. The number of items
604 // sent in total can be deduced both via the MPI status message at
605 // the receiver site as well as be counting the buckets from
606 // different requesters.
607 std::vector<std::vector<types::global_dof_index>> send_data(
608 requesters.size());
609 for (unsigned int i = 0; i < requesters.size(); ++i)
610 {
611 // special code for our own indices
613 {
614 for (const auto &j : requesters[i])
615 {
616 const types::global_dof_index index_offset =
618 IndexSet &my_index_set = requested_indices[j.first];
619 my_index_set.set_size(owned_indices.size());
620 for (const auto &interval : j.second)
621 my_index_set.add_range(index_offset + interval.first,
622 index_offset + interval.second);
623 }
624 }
625 else
626 {
627 for (const auto &j : requesters[i])
628 {
629 send_data[i].push_back(j.first);
630 send_data[i].push_back(j.second.size());
631 for (const auto &interval : j.second)
632 {
633 send_data[i].push_back(interval.first);
634 send_data[i].push_back(interval.second);
635 }
636 }
637 send_requests.push_back(MPI_Request());
638 const int ierr =
639 MPI_Isend(send_data[i].data(),
640 send_data[i].size(),
644 mpi_tag,
645 comm,
646 &send_requests.back());
647 AssertThrowMPI(ierr);
648 }
649 }
650
651 // receive the data
652 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
653 {
654 // wait for an incoming message
655 MPI_Status status;
656 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
657 AssertThrowMPI(ierr);
658
659 // retrieve size of incoming message
660 int number_amount;
661 ierr = MPI_Get_count(
662 &status,
663 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
664 &number_amount);
665 AssertThrowMPI(ierr);
666
667 // receive message
668 Assert(number_amount % 2 == 0, ExcInternalError());
669 std::vector<
670 std::pair<types::global_dof_index, types::global_dof_index>>
671 buffer(number_amount / 2);
672 ierr = MPI_Recv(
673 buffer.data(),
674 number_amount,
675 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
676 status.MPI_SOURCE,
677 status.MPI_TAG,
678 comm,
679 &status);
680 AssertThrowMPI(ierr);
681
682 // unpack the message and translate the dictionary-local
683 // indices coming via MPI to the global index range
684 const types::global_dof_index index_offset =
685 dict.get_index_offset(status.MPI_SOURCE);
686 unsigned int offset = 0;
687 while (offset < buffer.size())
688 {
689 AssertIndexRange(offset + buffer[offset].second,
690 buffer.size());
691
692 IndexSet my_index_set(owned_indices.size());
693 for (unsigned int i = offset + 1;
694 i < offset + buffer[offset].second + 1;
695 ++i)
696 my_index_set.add_range(index_offset + buffer[i].first,
697 index_offset + buffer[i].second);
698
699 // the underlying index set is able to merge ranges coming
700 // from different ranks due to the partitioning in the
701 // dictionary
702 IndexSet &index_set = requested_indices[buffer[offset].first];
703 if (index_set.size() == 0)
704 index_set.set_size(owned_indices.size());
705 index_set.add_indices(my_index_set);
706
707 offset += buffer[offset].second + 1;
708 }
709 AssertDimension(offset, buffer.size());
710 }
711
712 if (send_requests.size() > 0)
713 {
714 const auto ierr = MPI_Waitall(send_requests.size(),
715 send_requests.data(),
716 MPI_STATUSES_IGNORE);
717 AssertThrowMPI(ierr);
718 }
719
720
721# ifdef DEBUG
722 for (const auto &it : requested_indices)
723 {
724 IndexSet copy_set = it.second;
725 copy_set.subtract_set(owned_indices);
726 Assert(copy_set.n_elements() == 0,
728 "The indices requested from the current "
729 "MPI rank should be locally owned here!"));
730 }
731# endif
732
733#endif // DEAL_II_WITH_MPI
734
735 return requested_indices;
736 }
737
738
739
740 void
742 const types::global_dof_index index_within_dict,
743 const unsigned int rank_of_request,
744 const unsigned int rank_of_owner,
745 unsigned int &owner_index_guess)
746 {
747 // remember who requested which index. We want to use an
748 // std::vector with simple addressing, via a good guess from the
749 // preceding index, rather than std::map, because this is an inner
750 // loop and it avoids the map lookup in every iteration
751 owner_index_guess =
752 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
753
754 auto &request = requesters[owner_index_guess];
755 if (request.empty() || request.back().first != rank_of_request)
756 request.emplace_back(
757 rank_of_request,
758 std::vector<
759 std::pair<types::global_dof_index, types::global_dof_index>>());
760
761 auto &intervals = request.back().second;
762 if (intervals.empty() || intervals.back().second != index_within_dict)
763 intervals.emplace_back(index_within_dict, index_within_dict + 1);
764 else
765 ++intervals.back().second;
766 }
767 } // namespace ComputeIndexOwner
768 } // namespace internal
769 } // namespace MPI
770} // namespace Utilities
771
IntervalIterator end_intervals() const
Definition index_set.h:1743
size_type size() const
Definition index_set.h:1776
size_type n_elements() const
Definition index_set.h:1934
void set_size(const size_type size)
Definition index_set.h:1764
IntervalIterator begin_intervals() const
Definition index_set.h:1731
void subtract_set(const IndexSet &other)
Definition index_set.cc:473
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
void compress() const
Definition index_set.h:1784
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1831
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > > > requesters
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requesters=false)
void append_index_origin(const types::global_dof_index index_within_dictionary, const unsigned int rank_of_request, const unsigned int rank_of_owner, unsigned int &owner_index_guess)
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
Point< 2 > second
Definition grid_out.cc:4624
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:79
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:76
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1610
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition types.h:81
*braid_SplitCommworld & comm
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void reinit(const IndexSet &owned_indices, const MPI_Comm comm)
void partition(const IndexSet &owned_indices, const MPI_Comm comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:102