Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
index_set.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2005 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18#include <deal.II/base/mpi.h>
19
21
22#include <vector>
23
24#ifdef DEAL_II_WITH_TRILINOS
25# ifdef DEAL_II_WITH_MPI
26# include <Epetra_MpiComm.h>
27# endif
28# include <Epetra_Map.h>
29# include <Epetra_SerialComm.h>
30#endif
31
33
34
35
36#ifdef DEAL_II_WITH_TRILINOS
37
38// the 64-bit path uses a few different names, so put that into a separate
39// implementation
40
41# ifdef DEAL_II_WITH_64BIT_INDICES
42
43IndexSet::IndexSet(const Epetra_BlockMap &map)
44 : is_compressed(true)
45 , index_space_size(1 + map.MaxAllGID64())
46 , largest_range(numbers::invalid_unsigned_int)
47{
48 Assert(map.MinAllGID64() == 0,
50 "The Epetra_BlockMap does not contain the global index 0, "
51 "which means some entries are not present on any processor."));
52
53 // For a contiguous map, we do not need to go through the whole data...
54 if (map.LinearMap())
55 add_range(size_type(map.MinMyGID64()), size_type(map.MaxMyGID64() + 1));
56 else
57 {
58 const size_type n_indices = map.NumMyElements();
59 size_type * indices =
60 reinterpret_cast<size_type *>(map.MyGlobalElements64());
61 add_indices(indices, indices + n_indices);
62 }
63 compress();
64}
65
66# else
67
68// this is the standard 32-bit implementation
69
70IndexSet::IndexSet(const Epetra_BlockMap &map)
71 : is_compressed(true)
72 , index_space_size(1 + map.MaxAllGID())
73 , largest_range(numbers::invalid_unsigned_int)
74{
75 Assert(map.MinAllGID() == 0,
77 "The Epetra_BlockMap does not contain the global index 0, "
78 "which means some entries are not present on any processor."));
79
80 // For a contiguous map, we do not need to go through the whole data...
81 if (map.LinearMap())
82 add_range(size_type(map.MinMyGID()), size_type(map.MaxMyGID() + 1));
83 else
84 {
85 const size_type n_indices = map.NumMyElements();
86 unsigned int * indices =
87 reinterpret_cast<unsigned int *>(map.MyGlobalElements());
88 add_indices(indices, indices + n_indices);
89 }
90 compress();
91}
92
93# endif
94
95#endif // ifdef DEAL_II_WITH_TRILINOS
96
97
98
99void
101{
102 // we will, in the following, modify mutable variables. this can only
103 // work in multithreaded applications if we lock the data structures
104 // via a mutex, so that users can call 'const' functions from threads
105 // in parallel (and these 'const' functions can then call compress()
106 // which itself calls the current function)
107 std::lock_guard<std::mutex> lock(compress_mutex);
108
109 // see if any of the contiguous ranges can be merged. do not use
110 // std::vector::erase in-place as it is quadratic in the number of
111 // ranges. since the ranges are sorted by their first index, determining
112 // overlap isn't all that hard
113 std::vector<Range>::iterator store = ranges.begin();
114 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end();)
115 {
116 std::vector<Range>::iterator next = i;
117 ++next;
118
119 size_type first_index = i->begin;
120 size_type last_index = i->end;
121
122 // see if we can merge any of the following ranges
123 while (next != ranges.end() && (next->begin <= last_index))
124 {
125 last_index = std::max(last_index, next->end);
126 ++next;
127 }
128 i = next;
129
130 // store the new range in the slot we last occupied
131 *store = Range(first_index, last_index);
132 ++store;
133 }
134 // use a compact array with exactly the right amount of storage
135 if (store != ranges.end())
136 {
137 std::vector<Range> new_ranges(ranges.begin(), store);
138 ranges.swap(new_ranges);
139 }
140
141 // now compute indices within set and the range with most elements
142 size_type next_index = 0, largest_range_size = 0;
143 for (std::vector<Range>::iterator i = ranges.begin(); i != ranges.end(); ++i)
144 {
145 Assert(i->begin < i->end, ExcInternalError());
146
147 i->nth_index_in_set = next_index;
148 next_index += (i->end - i->begin);
149 if (i->end - i->begin > largest_range_size)
150 {
151 largest_range_size = i->end - i->begin;
152 largest_range = i - ranges.begin();
153 }
154 }
155 is_compressed = true;
156
157 // check that next_index is correct. needs to be after the previous
158 // statement because we otherwise will get into an endless loop
159 Assert(next_index == n_elements(), ExcInternalError());
160}
161
162
163
164#ifndef DOXYGEN
166IndexSet::operator&(const IndexSet &is) const
167{
168 Assert(size() == is.size(), ExcDimensionMismatch(size(), is.size()));
169
170 compress();
171 is.compress();
172
173 std::vector<Range>::const_iterator r1 = ranges.begin(),
174 r2 = is.ranges.begin();
175 IndexSet result(size());
176
177 while ((r1 != ranges.end()) && (r2 != is.ranges.end()))
178 {
179 // if r1 and r2 do not overlap at all, then move the pointer that sits
180 // to the left of the other up by one
181 if (r1->end <= r2->begin)
182 ++r1;
183 else if (r2->end <= r1->begin)
184 ++r2;
185 else
186 {
187 // the ranges must overlap somehow
188 Assert(((r1->begin <= r2->begin) && (r1->end > r2->begin)) ||
189 ((r2->begin <= r1->begin) && (r2->end > r1->begin)),
191
192 // add the overlapping range to the result
193 result.add_range(std::max(r1->begin, r2->begin),
194 std::min(r1->end, r2->end));
195
196 // now move that iterator that ends earlier one up. note that it has
197 // to be this one because a subsequent range may still have a chance
198 // of overlapping with the range that ends later
199 if (r1->end <= r2->end)
200 ++r1;
201 else
202 ++r2;
203 }
204 }
205
206 result.compress();
207 return result;
208}
209#endif
210
211
212
214IndexSet::get_view(const size_type begin, const size_type end) const
215{
216 Assert(begin <= end,
217 ExcMessage("End index needs to be larger or equal to begin index!"));
218 Assert(end <= size(), ExcMessage("Given range exceeds index set dimension"));
219
220 IndexSet result(end - begin);
221 std::vector<Range>::const_iterator r1 = ranges.begin();
222
223 while (r1 != ranges.end())
224 {
225 if ((r1->end > begin) && (r1->begin < end))
226 {
227 result.add_range(std::max(r1->begin, begin) - begin,
228 std::min(r1->end, end) - begin);
229 }
230 else if (r1->begin >= end)
231 break;
232
233 ++r1;
234 }
235
236 result.compress();
237 return result;
238}
239
240std::vector<IndexSet>
242 const std::vector<types::global_dof_index> &n_indices_per_block) const
243{
244 std::vector<IndexSet> partitioned;
245 const unsigned int n_blocks = n_indices_per_block.size();
246
247 partitioned.reserve(n_blocks);
248 types::global_dof_index start = 0;
249 for (const auto n_block_indices : n_indices_per_block)
250 {
251 partitioned.push_back(this->get_view(start, start + n_block_indices));
252 start += n_block_indices;
253 }
254
255#ifdef DEBUG
257 for (const auto &partition : partitioned)
258 {
259 sum += partition.size();
260 }
261 AssertDimension(sum, this->size());
262#endif
263
264 return partitioned;
265}
266
267void
269{
270 compress();
271 other.compress();
272 is_compressed = false;
273
274
275 // we save all new ranges to our IndexSet in an temporary vector and
276 // add all of them in one go at the end.
277 std::vector<Range> new_ranges;
278
279 std::vector<Range>::iterator own_it = ranges.begin();
280 std::vector<Range>::iterator other_it = other.ranges.begin();
281
282 while (own_it != ranges.end() && other_it != other.ranges.end())
283 {
284 // advance own iterator until we get an overlap
285 if (own_it->end <= other_it->begin)
286 {
287 new_ranges.push_back(*own_it);
288 ++own_it;
289 continue;
290 }
291 // we are done with other_it, so advance
292 if (own_it->begin >= other_it->end)
293 {
294 ++other_it;
295 continue;
296 }
297
298 // Now own_it and other_it overlap. First save the part of own_it that
299 // is before other_it (if not empty).
300 if (own_it->begin < other_it->begin)
301 {
302 Range r(own_it->begin, other_it->begin);
303 r.nth_index_in_set = 0; // fix warning of unused variable
304 new_ranges.push_back(r);
305 }
306 // change own_it to the sub range behind other_it. Do not delete own_it
307 // in any case. As removal would invalidate iterators, we just shrink
308 // the range to an empty one.
309 own_it->begin = other_it->end;
310 if (own_it->begin > own_it->end)
311 {
312 own_it->begin = own_it->end;
313 ++own_it;
314 }
315
316 // continue without advancing iterators, the right one will be advanced
317 // next.
318 }
319
320 // make sure to take over the remaining ranges
321 for (; own_it != ranges.end(); ++own_it)
322 new_ranges.push_back(*own_it);
323
324 ranges.clear();
325
326 // done, now add the temporary ranges
327 const std::vector<Range>::iterator end = new_ranges.end();
328 for (std::vector<Range>::iterator it = new_ranges.begin(); it != end; ++it)
329 add_range(it->begin, it->end);
330
331 compress();
332}
333
334
335
338{
339 IndexSet set(this->size() * other.size());
340 for (const auto el : *this)
341 set.add_indices(other, el * other.size());
342 set.compress();
343 return set;
344}
345
346
347
350{
351 Assert(is_empty() == false,
353 "pop_back() failed, because this IndexSet contains no entries."));
354
355 const size_type index = ranges.back().end - 1;
356 --ranges.back().end;
357
358 if (ranges.back().begin == ranges.back().end)
359 ranges.pop_back();
360
361 return index;
362}
363
364
365
368{
369 Assert(is_empty() == false,
371 "pop_front() failed, because this IndexSet contains no entries."));
372
373 const size_type index = ranges.front().begin;
374 ++ranges.front().begin;
375
376 if (ranges.front().begin == ranges.front().end)
377 ranges.erase(ranges.begin());
378
379 // We have to set this in any case, because nth_index_in_set is no longer
380 // up to date for all but the first range
381 is_compressed = false;
382
383 return index;
384}
385
386
387
388void
390{
391 ranges.insert(Utilities::lower_bound(ranges.begin(), ranges.end(), new_range),
392 new_range);
393}
394
395
396
397void
399 boost::container::small_vector<std::pair<size_type, size_type>, 200>
400 & tmp_ranges,
401 const bool ranges_are_sorted)
402{
403 if (!ranges_are_sorted)
404 std::sort(tmp_ranges.begin(), tmp_ranges.end());
405
406 // if we have many ranges, we first construct a temporary index set (where
407 // we add ranges in a consecutive way, so fast), otherwise, we work with
408 // add_range(). the number 9 is chosen heuristically given the fact that
409 // there are typically up to 8 independent ranges when adding the degrees of
410 // freedom on a 3d cell or 9 when adding degrees of freedom of faces. if
411 // doing cell-by-cell additions, we want to avoid repeated calls to
412 // IndexSet::compress() which gets called upon merging two index sets, so we
413 // want to be in the other branch then.
414 if (tmp_ranges.size() > 9)
415 {
416 IndexSet tmp_set(size());
417 tmp_set.ranges.reserve(tmp_ranges.size());
418 for (const auto &i : tmp_ranges)
419 tmp_set.add_range(i.first, i.second);
420
421 // Case if we have zero or just one range: Add into the other set with
422 // its indices, as that is cheaper
423 if (this->ranges.size() <= 1)
424 {
425 if (this->ranges.size() == 1)
426 tmp_set.add_range(ranges[0].begin, ranges[0].end);
427 std::swap(*this, tmp_set);
428 }
429 else
430 this->add_indices(tmp_set);
431 }
432 else
433 for (const auto &i : tmp_ranges)
434 add_range(i.first, i.second);
435}
436
437
438
439void
440IndexSet::add_indices(const IndexSet &other, const size_type offset)
441{
442 if ((this == &other) && (offset == 0))
443 return;
444
445 if (other.ranges.size() != 0)
446 {
447 AssertIndexRange(other.ranges.back().end - 1, index_space_size);
448 }
449
450 compress();
451 other.compress();
452
453 std::vector<Range>::const_iterator r1 = ranges.begin(),
454 r2 = other.ranges.begin();
455
456 std::vector<Range> new_ranges;
457 // just get the start and end of the ranges right in this method, everything
458 // else will be done in compress()
459 while (r1 != ranges.end() || r2 != other.ranges.end())
460 {
461 // the two ranges do not overlap or we are at the end of one of the
462 // ranges
463 if (r2 == other.ranges.end() ||
464 (r1 != ranges.end() && r1->end < (r2->begin + offset)))
465 {
466 new_ranges.push_back(*r1);
467 ++r1;
468 }
469 else if (r1 == ranges.end() || (r2->end + offset) < r1->begin)
470 {
471 new_ranges.emplace_back(r2->begin + offset, r2->end + offset);
472 ++r2;
473 }
474 else
475 {
476 // ok, we do overlap, so just take the combination of the current
477 // range (do not bother to merge with subsequent ranges)
478 Range next(std::min(r1->begin, r2->begin + offset),
479 std::max(r1->end, r2->end + offset));
480 new_ranges.push_back(next);
481 ++r1;
482 ++r2;
483 }
484 }
485 ranges.swap(new_ranges);
486
487 is_compressed = false;
488 compress();
489}
490
491
492
493void
494IndexSet::write(std::ostream &out) const
495{
496 compress();
497 out << size() << " ";
498 out << ranges.size() << std::endl;
499 std::vector<Range>::const_iterator r = ranges.begin();
500 for (; r != ranges.end(); ++r)
501 {
502 out << r->begin << " " << r->end << std::endl;
503 }
504}
505
506
507
508void
509IndexSet::read(std::istream &in)
510{
511 AssertThrow(in.fail() == false, ExcIO());
512
513 size_type s;
514 unsigned int n_ranges;
515
516 in >> s >> n_ranges;
517 ranges.clear();
518 set_size(s);
519 for (unsigned int i = 0; i < n_ranges; ++i)
520 {
521 AssertThrow(in.fail() == false, ExcIO());
522
523 size_type b, e;
524 in >> b >> e;
525 add_range(b, e);
526 }
527}
528
529
530void
531IndexSet::block_write(std::ostream &out) const
532{
533 AssertThrow(out.fail() == false, ExcIO());
534 out.write(reinterpret_cast<const char *>(&index_space_size),
535 sizeof(index_space_size));
536 std::size_t n_ranges = ranges.size();
537 out.write(reinterpret_cast<const char *>(&n_ranges), sizeof(n_ranges));
538 if (ranges.empty() == false)
539 out.write(reinterpret_cast<const char *>(&*ranges.begin()),
540 ranges.size() * sizeof(Range));
541 AssertThrow(out.fail() == false, ExcIO());
542}
543
544void
545IndexSet::block_read(std::istream &in)
546{
548 std::size_t n_ranges;
549 in.read(reinterpret_cast<char *>(&size), sizeof(size));
550 in.read(reinterpret_cast<char *>(&n_ranges), sizeof(n_ranges));
551 // we have to clear ranges first
552 ranges.clear();
553 set_size(size);
554 ranges.resize(n_ranges, Range(0, 0));
555 if (n_ranges != 0u)
556 in.read(reinterpret_cast<char *>(&*ranges.begin()),
557 ranges.size() * sizeof(Range));
558
559 do_compress(); // needed so that largest_range can be recomputed
560}
561
562
563
564bool
566{
567 // get the element after which we would have to insert a range that
568 // consists of all elements from this element to the end of the index
569 // range plus one. after this call we know that if p!=end() then
570 // p->begin<=index unless there is no such range at all
571 //
572 // if the searched for element is an element of this range, then we're
573 // done. otherwise, the element can't be in one of the following ranges
574 // because otherwise p would be a different iterator
575 //
576 // since we already know the position relative to the largest range (we
577 // called compress!), we can perform the binary search on ranges with
578 // lower/higher number compared to the largest range
579 std::vector<Range>::const_iterator p = std::upper_bound(
580 ranges.begin() +
581 (index < ranges[largest_range].begin ? 0 : largest_range + 1),
582 index < ranges[largest_range].begin ? ranges.begin() + largest_range :
583 ranges.end(),
584 Range(index, size() + 1));
585
586 if (p == ranges.begin())
587 return ((index >= p->begin) && (index < p->end));
588
589 Assert((p == ranges.end()) || (p->begin > index), ExcInternalError());
590
591 // now move to that previous range
592 --p;
593 Assert(p->begin <= index, ExcInternalError());
594
595 return (p->end > index);
596}
597
598
599
602{
603 // find out which chunk the local index n belongs to by using a binary
604 // search. the comparator is based on the end of the ranges.
605 Range r(n, n + 1);
606 r.nth_index_in_set = n;
607
608 const std::vector<Range>::const_iterator p = Utilities::lower_bound(
609 ranges.begin(), ranges.end(), r, Range::nth_index_compare);
610
611 Assert(p != ranges.end(), ExcInternalError());
612 return p->begin + (n - p->nth_index_in_set);
613}
614
615
616
619{
620 // we could try to use the main range for splitting up the search range, but
621 // since we only come here when the largest range did not contain the index,
622 // there is little gain from doing a first step manually.
623 Range r(n, n);
624 std::vector<Range>::const_iterator p =
626
627 // if n is not in this set
628 if (p == ranges.end() || p->end == n || p->begin > n)
630
631 Assert(p != ranges.end(), ExcInternalError());
632 Assert(p->begin <= n, ExcInternalError());
633 Assert(n < p->end, ExcInternalError());
634 return (n - p->begin) + p->nth_index_in_set;
635}
636
637
638
640IndexSet::at(const size_type global_index) const
641{
642 compress();
643 AssertIndexRange(global_index, size());
644
645 if (ranges.empty())
646 return end();
647
648 std::vector<Range>::const_iterator main_range =
649 ranges.begin() + largest_range;
650
651 Range r(global_index, global_index + 1);
652 // This optimization makes the bounds for lower_bound smaller by checking
653 // the largest range first.
654 std::vector<Range>::const_iterator range_begin, range_end;
655 if (global_index < main_range->begin)
656 {
657 range_begin = ranges.begin();
658 range_end = main_range;
659 }
660 else
661 {
662 range_begin = main_range;
663 range_end = ranges.end();
664 }
665
666 // This will give us the first range p=[a,b[ with b>=global_index using
667 // a binary search
668 const std::vector<Range>::const_iterator p =
669 Utilities::lower_bound(range_begin, range_end, r, Range::end_compare);
670
671 // We couldn't find a range, which means we have no range that contains
672 // global_index and also no range behind it, meaning we need to return end().
673 if (p == ranges.end())
674 return end();
675
676 // Finally, we can have two cases: Either global_index is not in [a,b[,
677 // which means we need to return an iterator to a because global_index, ...,
678 // a-1 is not in the IndexSet (if branch). Alternatively, global_index is in
679 // [a,b[ and we will return an iterator pointing directly at global_index
680 // (else branch).
681 if (global_index < p->begin)
682 return {this, static_cast<size_type>(p - ranges.begin()), p->begin};
683 else
684 return {this, static_cast<size_type>(p - ranges.begin()), global_index};
685}
686
687
688
689std::vector<IndexSet::size_type>
691{
692 compress();
693
694 std::vector<size_type> indices;
695 indices.reserve(n_elements());
696
697 for (const auto &range : ranges)
698 for (size_type entry = range.begin; entry < range.end; ++entry)
699 indices.push_back(entry);
700
701 Assert(indices.size() == n_elements(), ExcInternalError());
702
703 return indices;
704}
705
706
707
708void
709IndexSet::fill_index_vector(std::vector<size_type> &indices) const
710{
711 indices = get_index_vector();
712}
713
714
715
716#ifdef DEAL_II_WITH_TRILINOS
717# ifdef DEAL_II_TRILINOS_WITH_TPETRA
718
719Tpetra::Map<int, types::signed_global_dof_index>
721 const bool overlapping) const
722{
723 compress();
724 (void)communicator;
725
726# ifdef DEBUG
727 if (!overlapping)
728 {
729 const size_type n_global_elements =
730 Utilities::MPI::sum(n_elements(), communicator);
731 Assert(n_global_elements == size(),
732 ExcMessage("You are trying to create an Tpetra::Map object "
733 "that partitions elements of an index set "
734 "between processors. However, the union of the "
735 "index sets on different processors does not "
736 "contain all indices exactly once: the sum of "
737 "the number of entries the various processors "
738 "want to store locally is " +
739 std::to_string(n_global_elements) +
740 " whereas the total size of the object to be "
741 "allocated is " +
742 std::to_string(size()) +
743 ". In other words, there are "
744 "either indices that are not spoken for "
745 "by any processor, or there are indices that are "
746 "claimed by multiple processors."));
747 }
748# endif
749
750 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
751 // linear Tpetra::Map. Overlapping IndexSets are never 1:1.
752 const bool linear =
753 overlapping ? false : is_ascending_and_one_to_one(communicator);
754 if (linear)
755 return Tpetra::Map<int, types::signed_global_dof_index>(
756 size(),
757 n_elements(),
758 0,
759# ifdef DEAL_II_WITH_MPI
760 Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
761# else
762 Teuchos::rcp(new Teuchos::Comm<int>())
763# endif
764 );
765 else
766 {
767 const std::vector<size_type> indices = get_index_vector();
768 std::vector<types::signed_global_dof_index> int_indices(indices.size());
769 std::copy(indices.begin(), indices.end(), int_indices.begin());
770 const Teuchos::ArrayView<types::signed_global_dof_index> arr_view(
771 int_indices);
772 return Tpetra::Map<int, types::signed_global_dof_index>(
773 size(),
774 arr_view,
775 0,
776# ifdef DEAL_II_WITH_MPI
777 Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
778# else
779 Teuchos::rcp(new Teuchos::Comm<int>())
780# endif
781 );
782 }
783}
784# endif
785
786
787
788Epetra_Map
790 const bool overlapping) const
791{
792 compress();
793 (void)communicator;
794
795# ifdef DEBUG
796 if (!overlapping)
797 {
798 const size_type n_global_elements =
799 Utilities::MPI::sum(n_elements(), communicator);
800 Assert(n_global_elements == size(),
801 ExcMessage("You are trying to create an Epetra_Map object "
802 "that partitions elements of an index set "
803 "between processors. However, the union of the "
804 "index sets on different processors does not "
805 "contain all indices exactly once: the sum of "
806 "the number of entries the various processors "
807 "want to store locally is " +
808 std::to_string(n_global_elements) +
809 " whereas the total size of the object to be "
810 "allocated is " +
811 std::to_string(size()) +
812 ". In other words, there are "
813 "either indices that are not spoken for "
814 "by any processor, or there are indices that are "
815 "claimed by multiple processors."));
816 }
817# endif
818
819 // Find out if the IndexSet is ascending and 1:1. This corresponds to a
820 // linear EpetraMap. Overlapping IndexSets are never 1:1.
821 const bool linear =
822 overlapping ? false : is_ascending_and_one_to_one(communicator);
823
824 if (linear)
825 return Epetra_Map(TrilinosWrappers::types::int_type(size()),
827 0,
828# ifdef DEAL_II_WITH_MPI
829 Epetra_MpiComm(communicator)
830# else
831 Epetra_SerialComm()
832# endif
833 );
834 else
835 {
836 const std::vector<size_type> indices = get_index_vector();
837 return Epetra_Map(
840 (n_elements() > 0 ?
841 reinterpret_cast<const TrilinosWrappers::types::int_type *>(
842 indices.data()) :
843 nullptr),
844 0,
845# ifdef DEAL_II_WITH_MPI
846 Epetra_MpiComm(communicator)
847# else
848 Epetra_SerialComm()
849# endif
850 );
851 }
852}
853#endif
854
855
856#ifdef DEAL_II_WITH_PETSC
857IS
858IndexSet::make_petsc_is(const MPI_Comm communicator) const
859{
860 std::vector<size_type> indices;
861 fill_index_vector(indices);
862
863 PetscInt n = indices.size();
864 std::vector<PetscInt> pindices(indices.begin(), indices.end());
865
866 IS is;
867 PetscErrorCode ierr =
868 ISCreateGeneral(communicator, n, pindices.data(), PETSC_COPY_VALUES, &is);
869 AssertThrow(ierr == 0, ExcPETScError(ierr));
870
871 return is;
872}
873#endif
874
875
876
877bool
879{
880 // If the sum of local elements does not add up to the total size,
881 // the IndexSet can't be complete.
882 const size_type n_global_elements =
883 Utilities::MPI::sum(n_elements(), communicator);
884 if (n_global_elements != size())
885 return false;
886
887 if (n_global_elements == 0)
888 return true;
889
890#ifdef DEAL_II_WITH_MPI
891 // Non-contiguous IndexSets can't be linear.
892 const bool all_contiguous =
893 (Utilities::MPI::min(is_contiguous() ? 1 : 0, communicator) == 1);
894 if (!all_contiguous)
895 return false;
896
897 bool is_globally_ascending = true;
898 // we know that there is only one interval
899 types::global_dof_index first_local_dof = (n_elements() > 0) ?
900 *(begin_intervals()->begin()) :
902
903 const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
904 const std::vector<types::global_dof_index> global_dofs =
905 Utilities::MPI::gather(communicator, first_local_dof, 0);
906
907 if (my_rank == 0)
908 {
909 // find out if the received std::vector is ascending
910 types::global_dof_index index = 0;
911 while (global_dofs[index] == numbers::invalid_dof_index)
912 ++index;
913 types::global_dof_index old_dof = global_dofs[index++];
914 for (; index < global_dofs.size(); ++index)
915 {
916 const types::global_dof_index new_dof = global_dofs[index];
917 if (new_dof != numbers::invalid_dof_index)
918 {
919 if (new_dof <= old_dof)
920 {
921 is_globally_ascending = false;
922 break;
923 }
924 else
925 old_dof = new_dof;
926 }
927 }
928 }
929
930 // now broadcast the result
931 int is_ascending = is_globally_ascending ? 1 : 0;
932 int ierr = MPI_Bcast(&is_ascending, 1, MPI_INT, 0, communicator);
933 AssertThrowMPI(ierr);
934
935 return (is_ascending == 1);
936#else
937 return true;
938#endif // DEAL_II_WITH_MPI
939}
940
941
942
943std::size_t
945{
949 sizeof(compress_mutex));
950}
951
952
953
ElementIterator begin() const
Definition index_set.h:1150
bool is_element_binary_search(const size_type local_index) const
Definition index_set.cc:565
size_type index_within_set_binary_search(const size_type global_index) const
Definition index_set.cc:618
size_type largest_range
Definition index_set.h:1010
IS make_petsc_is(const MPI_Comm communicator=MPI_COMM_WORLD) const
Definition index_set.cc:858
bool is_ascending_and_one_to_one(const MPI_Comm communicator) const
Definition index_set.cc:878
bool is_contiguous() const
Definition index_set.h:1799
void do_compress() const
Definition index_set.cc:100
ElementIterator at(const size_type global_index) const
Definition index_set.cc:640
size_type size() const
Definition index_set.h:1661
void fill_index_vector(std::vector< size_type > &indices) const
Definition index_set.cc:709
bool is_empty() const
Definition index_set.h:1808
std::vector< IndexSet > split_by_block(const std::vector< types::global_dof_index > &n_indices_per_block) const
Definition index_set.cc:241
size_type n_elements() const
Definition index_set.h:1816
void add_range_lower_bound(const Range &range)
Definition index_set.cc:389
ElementIterator begin() const
Definition index_set.h:1595
size_type pop_front()
Definition index_set.cc:367
void set_size(const size_type size)
Definition index_set.h:1649
void read(std::istream &in)
Definition index_set.cc:509
Epetra_Map make_trilinos_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:789
IndexSet tensor_product(const IndexSet &other) const
Definition index_set.cc:337
void write(std::ostream &out) const
Definition index_set.cc:494
void block_read(std::istream &in)
Definition index_set.cc:545
bool is_compressed
Definition index_set.h:993
void add_ranges_internal(boost::container::small_vector< std::pair< size_type, size_type >, 200 > &tmp_ranges, const bool ranges_are_sorted)
Definition index_set.cc:398
IntervalIterator begin_intervals() const
Definition index_set.h:1616
Tpetra::Map< int, types::signed_global_dof_index > make_tpetra_map(const MPI_Comm communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition index_set.cc:720
std::vector< Range > ranges
Definition index_set.h:983
void subtract_set(const IndexSet &other)
Definition index_set.cc:268
ElementIterator end() const
Definition index_set.h:1607
Threads::Mutex compress_mutex
Definition index_set.h:1016
size_type index_space_size
Definition index_set.h:999
void block_write(std::ostream &out) const
Definition index_set.cc:531
IndexSet get_view(const size_type begin, const size_type end) const
Definition index_set.cc:214
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1697
std::size_t memory_consumption() const
Definition index_set.cc:944
size_type nth_index_in_set_binary_search(const size_type local_index) const
Definition index_set.cc:601
void compress() const
Definition index_set.h:1669
size_type pop_back()
Definition index_set.cc:349
std::vector< size_type > get_index_vector() const
Definition index_set.cc:690
types::global_dof_index size_type
Definition index_set.h:77
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1727
IndexSet operator&(const IndexSet &is) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::string compress(const std::string &input)
Definition utilities.cc:390
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition utilities.h:1016
const types::global_dof_index invalid_dof_index
Definition types.h:233
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
static bool end_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:941
static bool nth_index_compare(const IndexSet::Range &x, const IndexSet::Range &y)
Definition index_set.h:947
size_type nth_index_in_set
Definition index_set.h:911