Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
shared_tria.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2015 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#include <deal.II/base/mpi.h>
17#include <deal.II/base/mpi.templates.h>
19
22
25#include <deal.II/grid/tria.h>
28
30
31#include <type_traits>
32
33
35
36#ifdef DEAL_II_WITH_MPI
37namespace parallel
38{
39 namespace shared
40 {
41 template <int dim, int spacedim>
44 const MPI_Comm mpi_communicator,
45 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
46 smooth_grid,
47 const bool allow_artificial_cells,
48 const Settings settings)
49 : ::parallel::TriangulationBase<dim, spacedim>(mpi_communicator,
50 smooth_grid,
51 false)
53 , allow_artificial_cells(allow_artificial_cells)
54 {
55 const auto partition_settings =
56 (partition_zoltan | partition_metis | partition_zorder |
57 partition_custom_signal) &
59 (void)partition_settings;
60 Assert(partition_settings == partition_auto ||
61 partition_settings == partition_metis ||
62 partition_settings == partition_zoltan ||
63 partition_settings == partition_zorder ||
64 partition_settings == partition_custom_signal,
65 ExcMessage("Settings must contain exactly one type of the active "
66 "cell partitioning scheme."));
67
68 if (settings & construct_multigrid_hierarchy)
69 Assert(allow_artificial_cells,
70 ExcMessage("construct_multigrid_hierarchy requires "
71 "allow_artificial_cells to be set to true."));
72 }
73
74
75
76 template <int dim, int spacedim>
78 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
79 const
80 {
81 return (settings & construct_multigrid_hierarchy);
82 }
83
84
85
86 template <int dim, int spacedim>
88 void Triangulation<dim, spacedim>::partition()
89 {
90# ifdef DEBUG
91 // Check that all meshes are the same (or at least have the same
92 // total number of active cells):
93 const unsigned int max_active_cells =
94 Utilities::MPI::max(this->n_active_cells(), this->get_communicator());
95 Assert(
96 max_active_cells == this->n_active_cells(),
98 "A parallel::shared::Triangulation needs to be refined in the same "
99 "way on all processors, but the participating processors don't "
100 "agree on the number of active cells."));
101# endif
102
103 auto partition_settings = (partition_zoltan | partition_metis |
104 partition_zorder | partition_custom_signal) &
105 settings;
106 if (partition_settings == partition_auto)
107# ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
108 partition_settings = partition_zoltan;
109# elif defined DEAL_II_WITH_METIS
110 partition_settings = partition_metis;
111# else
112 partition_settings = partition_zorder;
113# endif
114
115 if (partition_settings == partition_zoltan)
116 {
117# ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
118 AssertThrow(false,
120 "Choosing 'partition_zoltan' requires the library "
121 "to be compiled with support for Zoltan! "
122 "Instead, you might use 'partition_auto' to select "
123 "a partitioning algorithm that is supported "
124 "by your current configuration."));
125# else
127 this->n_subdomains, *this, SparsityTools::Partitioner::zoltan);
128# endif
129 }
130 else if (partition_settings == partition_metis)
131 {
132# ifndef DEAL_II_WITH_METIS
133 AssertThrow(false,
135 "Choosing 'partition_metis' requires the library "
136 "to be compiled with support for METIS! "
137 "Instead, you might use 'partition_auto' to select "
138 "a partitioning algorithm that is supported "
139 "by your current configuration."));
140# else
141 GridTools::partition_triangulation(this->n_subdomains,
142 *this,
144# endif
145 }
146 else if (partition_settings == partition_zorder)
147 {
148 GridTools::partition_triangulation_zorder(this->n_subdomains, *this);
149 }
150 else if (partition_settings == partition_custom_signal)
151 {
152 // User partitions mesh manually
153 }
154 else
155 {
157 }
158
159 // do not partition multigrid levels if user is
160 // defining a custom partition
161 if ((settings & construct_multigrid_hierarchy) &&
162 !(settings & partition_custom_signal))
164
165 true_subdomain_ids_of_cells.resize(this->n_active_cells());
166
167 // loop over all cells and mark artificial:
169 spacedim>::active_cell_iterator
170 cell = this->begin_active(),
171 endc = this->end();
172
173 if (allow_artificial_cells)
174 {
175 // get active halo layer of (ghost) cells
176 // parallel::shared::Triangulation<dim>::
177 std::function<bool(
180 predicate = IteratorFilters::SubdomainEqualTo(this->my_subdomain);
181
182 const std::vector<typename parallel::shared::Triangulation<
183 dim,
184 spacedim>::active_cell_iterator>
185 active_halo_layer_vector =
187 predicate);
188 std::set<typename parallel::shared::Triangulation<dim, spacedim>::
189 active_cell_iterator>
190 active_halo_layer(active_halo_layer_vector.begin(),
191 active_halo_layer_vector.end());
192
193 for (unsigned int index = 0; cell != endc; cell++, index++)
194 {
195 // store original/true subdomain ids:
196 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
197
198 if (cell->is_locally_owned() == false &&
199 active_halo_layer.find(cell) == active_halo_layer.end())
200 cell->set_subdomain_id(numbers::artificial_subdomain_id);
201 }
202
203 // loop over all cells in multigrid hierarchy and mark artificial:
204 if (settings & construct_multigrid_hierarchy)
205 {
206 true_level_subdomain_ids_of_cells.resize(this->n_levels());
207
208 std::function<bool(
210 cell_iterator &)>
212 for (unsigned int lvl = 0; lvl < this->n_levels(); ++lvl)
213 {
214 true_level_subdomain_ids_of_cells[lvl].resize(
215 this->n_cells(lvl));
216
217 const std::vector<typename parallel::shared::Triangulation<
218 dim,
219 spacedim>::cell_iterator>
220 level_halo_layer_vector =
222 *this, predicate, lvl);
223 std::set<typename parallel::shared::
224 Triangulation<dim, spacedim>::cell_iterator>
225 level_halo_layer(level_halo_layer_vector.begin(),
226 level_halo_layer_vector.end());
227
229 cell_iterator cell = this->begin(lvl),
230 endc = this->end(lvl);
231 for (unsigned int index = 0; cell != endc; cell++, index++)
232 {
233 // Store true level subdomain IDs before setting
234 // artificial
235 true_level_subdomain_ids_of_cells[lvl][index] =
236 cell->level_subdomain_id();
237
238 // for active cells, we must have knowledge of level
239 // subdomain ids of all neighbors to our subdomain, not
240 // just neighbors on the same level. if the cells
241 // subdomain id was not set to artitficial above, we will
242 // also keep its level subdomain id since it is either
243 // owned by this processor or in the ghost layer of the
244 // active mesh.
245 if (cell->is_active() &&
246 cell->subdomain_id() !=
248 continue;
249
250 // we must have knowledge of our parent in the hierarchy
251 if (cell->has_children())
252 {
253 bool keep_cell = false;
254 for (unsigned int c = 0;
255 c < GeometryInfo<dim>::max_children_per_cell;
256 ++c)
257 if (cell->child(c)->level_subdomain_id() ==
258 this->my_subdomain)
259 {
260 keep_cell = true;
261 break;
262 }
263 if (keep_cell)
264 continue;
265 }
266
267 // we must have knowledge of our neighbors on the same
268 // level
269 if (!cell->is_locally_owned_on_level() &&
270 level_halo_layer.find(cell) != level_halo_layer.end())
271 continue;
272
273 // mark all other cells to artificial
274 cell->set_level_subdomain_id(
276 }
277 }
278 }
279 }
280 else
281 {
282 // just store true subdomain ids
283 for (unsigned int index = 0; cell != endc; cell++, index++)
284 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
285 }
286
287# ifdef DEBUG
288 {
289 // Assert that each cell is owned by a processor
290 const unsigned int n_my_cells = std::count_if(
291 this->begin_active(),
293 this->end()),
294 [](const auto &i) { return (i.is_locally_owned()); });
295
296 const unsigned int total_cells =
297 Utilities::MPI::sum(n_my_cells, this->get_communicator());
298 Assert(total_cells == this->n_active_cells(),
299 ExcMessage("Not all cells are assigned to a processor."));
300 }
301
302 // If running with multigrid, assert that each level
303 // cell is owned by a processor
304 if (settings & construct_multigrid_hierarchy)
305 {
306 const unsigned int n_my_cells =
307 std::count_if(this->begin(), this->end(), [](const auto &i) {
308 return (i.is_locally_owned_on_level());
309 });
310
311
312 const unsigned int total_cells =
313 Utilities::MPI::sum(n_my_cells, this->get_communicator());
314 Assert(total_cells == this->n_cells(),
315 ExcMessage("Not all cells are assigned to a processor."));
316 }
317# endif
318 }
319
320
321
322 template <int dim, int spacedim>
324 bool Triangulation<dim, spacedim>::with_artificial_cells() const
325 {
326 return allow_artificial_cells;
327 }
328
329
330
331 template <int dim, int spacedim>
333 const std::vector<types::subdomain_id>
335 {
336 return true_subdomain_ids_of_cells;
337 }
338
339
340
341 template <int dim, int spacedim>
343 const std::vector<types::subdomain_id>
345 const unsigned int level) const
346 {
347 Assert(level < true_level_subdomain_ids_of_cells.size(),
349 Assert(true_level_subdomain_ids_of_cells[level].size() ==
350 this->n_cells(level),
352 return true_level_subdomain_ids_of_cells[level];
353 }
354
355
356
357 template <int dim, int spacedim>
359 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
360 {
361 // make sure that all refinement/coarsening flags are the same on all
362 // processes
363 {
364 // Obtain the type used to store the different possibilities
365 // a cell can be refined. This is a bit awkward because
366 // what `cell->refine_flag_set()` returns is a struct
367 // type, RefinementCase, which internally stores a
368 // std::uint8_t, which actually holds integers of
369 // enum type RefinementPossibilities<dim>::Possibilities.
370 // In the following, use the actual name of the enum, but
371 // make sure that it is in fact a `std::uint8_t` or
372 // equally sized type.
373 using int_type = std::underlying_type_t<
375 static_assert(sizeof(int_type) == sizeof(std::uint8_t),
376 "Internal type mismatch.");
377
378 std::vector<int_type> refinement_configurations(this->n_active_cells() *
379 2,
380 int_type(0));
381 for (const auto &cell : this->active_cell_iterators())
382 if (cell->is_locally_owned())
383 {
384 refinement_configurations[cell->active_cell_index() * 2 + 0] =
385 static_cast<int_type>(cell->refine_flag_set());
386 refinement_configurations[cell->active_cell_index() * 2 + 1] =
387 static_cast<int_type>(cell->coarsen_flag_set() ? 1 : 0);
388 }
389
390 Utilities::MPI::max(refinement_configurations,
391 this->get_communicator(),
392 refinement_configurations);
393
394 for (const auto &cell : this->active_cell_iterators())
395 {
396 cell->clear_refine_flag();
397 cell->clear_coarsen_flag();
398
399 Assert(
400 (refinement_configurations[cell->active_cell_index() * 2 + 0] >
401 0 ?
402 1 :
403 0) +
404 refinement_configurations[cell->active_cell_index() * 2 +
405 1] <=
406 1,
408 "Refinement/coarsening flags of cells are not consistent in parallel!"));
409
410 if (refinement_configurations[cell->active_cell_index() * 2 + 0] !=
411 0)
412 cell->set_refine_flag(RefinementCase<dim>(
413 refinement_configurations[cell->active_cell_index() * 2 + 0]));
414
415 if (refinement_configurations[cell->active_cell_index() * 2 + 1] >
416 0)
417 cell->set_coarsen_flag();
418 }
419 }
420
422 partition();
423 this->update_number_cache();
424 }
425
426
427
428 template <int dim, int spacedim>
430 void Triangulation<dim, spacedim>::create_triangulation(
431 const std::vector<Point<spacedim>> &vertices,
432 const std::vector<CellData<dim>> & cells,
433 const SubCellData & subcelldata)
434 {
435 try
436 {
438 vertices, cells, subcelldata);
439 }
440 catch (
441 const typename ::Triangulation<dim, spacedim>::DistortedCellList
442 &)
443 {
444 // the underlying triangulation should not be checking for distorted
445 // cells
446 Assert(false, ExcInternalError());
447 }
448 partition();
449 this->update_number_cache();
450 }
451
452
453
454 template <int dim, int spacedim>
456 void Triangulation<dim, spacedim>::create_triangulation(
457 const TriangulationDescription::Description<dim, spacedim>
458 &construction_data)
459 {
460 (void)construction_data;
461
462 Assert(false, ExcInternalError());
463 }
464
465
466
467 template <int dim, int spacedim>
469 void Triangulation<dim, spacedim>::copy_triangulation(
470 const ::Triangulation<dim, spacedim> &other_tria)
471 {
472 Assert(
473 (dynamic_cast<
474 const ::parallel::DistributedTriangulationBase<dim, spacedim>
475 *>(&other_tria) == nullptr),
477 "Cannot use this function on parallel::distributed::Triangulation."));
478
480 other_tria);
481 partition();
482 this->update_number_cache();
483 }
484 } // namespace shared
485} // namespace parallel
486
487#else
488
489namespace parallel
490{
491 namespace shared
492 {
493 template <int dim, int spacedim>
496 {
497 Assert(false, ExcNotImplemented());
498 return true;
499 }
500
501
502
503 template <int dim, int spacedim>
506 const
507 {
508 return false;
509 }
510
511
512
513 template <int dim, int spacedim>
515 const std::vector<unsigned int>
517 {
518 Assert(false, ExcNotImplemented());
519 return true_subdomain_ids_of_cells;
520 }
521
522
523
524 template <int dim, int spacedim>
526 const std::vector<unsigned int>
528 const unsigned int) const
529 {
530 Assert(false, ExcNotImplemented());
531 return true_level_subdomain_ids_of_cells;
532 }
533 } // namespace shared
534} // namespace parallel
535
536
537#endif
538
539
540
541namespace internal
542{
543 namespace parallel
544 {
545 namespace shared
546 {
547 template <int dim, int spacedim>
550 : shared_tria(
551 dynamic_cast<
552 const ::parallel::shared::Triangulation<dim, spacedim> *>(
553 &tria))
554 {
555 if (shared_tria && shared_tria->with_artificial_cells())
556 {
557 // Save the current set of subdomain IDs, and set subdomain IDs
558 // to the "true" owner of each cell.
559 const std::vector<types::subdomain_id> &true_subdomain_ids =
560 shared_tria->get_true_subdomain_ids_of_cells();
561
562 saved_subdomain_ids.resize(shared_tria->n_active_cells());
563 for (const auto &cell : shared_tria->active_cell_iterators())
564 {
565 const unsigned int index = cell->active_cell_index();
566 saved_subdomain_ids[index] = cell->subdomain_id();
567 cell->set_subdomain_id(true_subdomain_ids[index]);
568 }
569 }
570 }
571
572
573
574 template <int dim, int spacedim>
577 {
578 if (shared_tria && shared_tria->with_artificial_cells())
579 {
580 // Undo the subdomain modification.
581 for (const auto &cell : shared_tria->active_cell_iterators())
582 {
583 const unsigned int index = cell->active_cell_index();
584 cell->set_subdomain_id(saved_subdomain_ids[index]);
585 }
586 }
587 }
588 } // namespace shared
589 } // namespace parallel
590} // namespace internal
591
592
593/*-------------- Explicit Instantiations -------------------------------*/
594#include "shared_tria.inst"
595
Definition point.h:112
cell_iterator end() const
const SmartPointer< const ::parallel::shared::Triangulation< dim, spacedim > > shared_tria
TemporarilyRestoreSubdomainIds(const Triangulation< dim, spacedim > &tria)
types::subdomain_id my_subdomain
Definition tria_base.h:302
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition tria_base.cc:68
virtual void execute_coarsening_and_refinement() override
typename ::Triangulation< dim, spacedim >::active_cell_iterator active_cell_iterator
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
virtual bool is_multilevel_hierarchy_constructed() const override
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:160
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
Point< 3 > vertices[4]
unsigned int level
Definition grid_out.cc:4618
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
std::vector< typename MeshType::active_cell_iterator > compute_active_cell_halo_layer(const MeshType &mesh, const std::function< bool(const typename MeshType::active_cell_iterator &)> &predicate)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::vector< typename MeshType::cell_iterator > compute_cell_halo_layer_on_level(const MeshType &mesh, const std::function< bool(const typename MeshType::cell_iterator &)> &predicate, const unsigned int level)
void partition_triangulation(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const SparsityTools::Partitioner partitioner=SparsityTools::Partitioner::metis)
T sum(const T &t, const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::subdomain_id artificial_subdomain_id
Definition types.h:315
STL namespace.
const TriangulationDescription::Settings settings
const ::Triangulation< dim, spacedim > & tria