Reference documentation for deal.II version Git 11de1224af 2020-11-24 16:17:24 -0500
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
tria_base.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
19 #include <deal.II/base/mpi.templates.h>
20 #include <deal.II/base/utilities.h>
21 
23 
25 #include <deal.II/grid/tria.h>
28 
32 
33 #include <algorithm>
34 #include <fstream>
35 #include <iostream>
36 #include <numeric>
37 
38 
40 
41 namespace parallel
42 {
43  template <int dim, int spacedim>
45  const MPI_Comm &mpi_communicator,
46  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
47  smooth_grid,
48  const bool check_for_distorted_cells)
49  : ::Triangulation<dim, spacedim>(smooth_grid,
50  check_for_distorted_cells)
51  , mpi_communicator(mpi_communicator)
52  , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
53  , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
54  {
55 #ifndef DEAL_II_WITH_MPI
56  Assert(false, ExcNeedsMPI());
57 #endif
58  }
59 
60 
61 
62  template <int dim, int spacedim>
63  void
65  const ::Triangulation<dim, spacedim> &other_tria)
66  {
67 #ifndef DEAL_II_WITH_MPI
68  (void)other_tria;
69  Assert(false, ExcNeedsMPI());
70 #else
72 
73  if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
74  dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
75  *>(&other_tria))
76  {
77  // release unused vector memory because we will have very different
78  // vectors now
81  }
82 #endif
83  }
84 
85 
86 
87  template <int dim, int spacedim>
88  std::size_t
90  {
91  std::size_t mem =
98  return mem;
99  }
100 
101  template <int dim, int spacedim>
103  {
104  // release unused vector memory because the vector layout is going to look
105  // very different now
108  }
109 
110  template <int dim, int spacedim>
113  , n_global_levels(0)
114  {}
115 
116  template <int dim, int spacedim>
117  unsigned int
119  {
121  }
122 
123  template <int dim, int spacedim>
124  unsigned int
126  {
128  }
129 
130  template <int dim, int spacedim>
133  {
135  }
136 
137  template <int dim, int spacedim>
138  const MPI_Comm &
140  {
141  return mpi_communicator;
142  }
143 
144 #ifdef DEAL_II_WITH_MPI
145  template <int dim, int spacedim>
146  void
148  {
149  number_cache.ghost_owners.clear();
152 
153  if (this->n_levels() == 0)
154  {
155  // Skip communication done below if we do not have any cells
156  // (meaning the Triangulation is empty on all processors). This will
157  // happen when called from the destructor of Triangulation, which
158  // can get called during exception handling causing a hang in this
159  // function.
162  return;
163  }
164 
165 
166  {
167  // find ghost owners
168  for (const auto &cell : this->active_cell_iterators())
169  if (cell->is_ghost())
170  number_cache.ghost_owners.insert(cell->subdomain_id());
171 
174  ExcInternalError());
175  }
176 
177  if (this->n_levels() > 0)
178  for (const auto &cell : this->active_cell_iterators())
179  if (cell->subdomain_id() == my_subdomain)
181 
182  // Potentially cast to a 64 bit type before accumulating to avoid overflow:
184  Utilities::MPI::sum(static_cast<types::global_cell_index>(
186  this->mpi_communicator);
187 
190 
191  // Store MPI ranks of level ghost owners of this processor on all levels.
192  if (this->is_multilevel_hierarchy_constructed() == true)
193  {
195 
196  // if there is nothing to do, then do nothing
197  if (this->n_levels() == 0)
198  return;
199 
200  // find level ghost owners
202  this->begin();
203  cell != this->end();
204  ++cell)
205  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
206  cell->level_subdomain_id() != this->locally_owned_subdomain())
207  this->number_cache.level_ghost_owners.insert(
208  cell->level_subdomain_id());
209 
210 # ifdef DEBUG
211  // Check that level_ghost_owners is symmetric by sending a message to
212  // everyone
213  {
214  int ierr = MPI_Barrier(this->mpi_communicator);
215  AssertThrowMPI(ierr);
216 
217  const int mpi_tag = Utilities::MPI::internal::Tags::
219 
220  // important: preallocate to avoid (re)allocation:
221  std::vector<MPI_Request> requests(
222  this->number_cache.level_ghost_owners.size());
223  unsigned int dummy = 0;
224  unsigned int req_counter = 0;
225 
226  for (std::set<types::subdomain_id>::iterator it =
227  this->number_cache.level_ghost_owners.begin();
228  it != this->number_cache.level_ghost_owners.end();
229  ++it, ++req_counter)
230  {
231  ierr = MPI_Isend(&dummy,
232  1,
233  MPI_UNSIGNED,
234  *it,
235  mpi_tag,
236  this->mpi_communicator,
237  &requests[req_counter]);
238  AssertThrowMPI(ierr);
239  }
240 
241  for (std::set<types::subdomain_id>::iterator it =
242  this->number_cache.level_ghost_owners.begin();
243  it != this->number_cache.level_ghost_owners.end();
244  ++it)
245  {
246  unsigned int dummy;
247  ierr = MPI_Recv(&dummy,
248  1,
249  MPI_UNSIGNED,
250  *it,
251  mpi_tag,
252  this->mpi_communicator,
253  MPI_STATUS_IGNORE);
254  AssertThrowMPI(ierr);
255  }
256 
257  if (requests.size() > 0)
258  {
259  ierr = MPI_Waitall(requests.size(),
260  requests.data(),
261  MPI_STATUSES_IGNORE);
262  AssertThrowMPI(ierr);
263  }
264 
265  ierr = MPI_Barrier(this->mpi_communicator);
266  AssertThrowMPI(ierr);
267  }
268 # endif
269 
270  Assert(this->number_cache.level_ghost_owners.size() <
272  ExcInternalError());
273  }
274 
275  // reset global cell ids
277  }
278 
279 #else
280 
281  template <int dim, int spacedim>
282  void
284  {
285  Assert(false, ExcNotImplemented());
286  }
287 
288 #endif
289 
290  template <int dim, int spacedim>
291  void
293  {
294  // run algorithm for locally-owned cells
296 
297  // translate ReferenceCell::Type to unsigned int (needed by
298  // Utilities::MPI::compute_set_union)
299  std::vector<unsigned int> reference_cell_types_ui;
300 
301  for (const auto &i : this->reference_cell_types)
302  reference_cell_types_ui.push_back(static_cast<unsigned int>(i));
303 
304  // create union
305  reference_cell_types_ui =
306  Utilities::MPI::compute_set_union(reference_cell_types_ui,
307  this->mpi_communicator);
308 
309  // transform back and store result
310  this->reference_cell_types.clear();
311  for (const auto &i : reference_cell_types_ui)
312  this->reference_cell_types.push_back(static_cast<ReferenceCell::Type>(i));
313  }
314 
315 
316  template <int dim, int spacedim>
319  {
320  return my_subdomain;
321  }
322 
323 
324 
325  template <int dim, int spacedim>
326  const std::set<types::subdomain_id> &
328  {
329  return number_cache.ghost_owners;
330  }
331 
332 
333 
334  template <int dim, int spacedim>
335  const std::set<types::subdomain_id> &
337  {
339  }
340 
341 
342 
343  template <int dim, int spacedim>
344  std::map<unsigned int, std::set<::types::subdomain_id>>
346  const
347  {
349  }
350 
351 
352 
353  template <int dim, int spacedim>
354  std::vector<types::boundary_id>
356  {
359  this->mpi_communicator);
360  }
361 
362 
363 
364  template <int dim, int spacedim>
365  std::vector<types::manifold_id>
367  {
370  this->mpi_communicator);
371  }
372 
373 
374 
375  template <int dim, int spacedim>
376  void
378  {
379 #ifndef DEAL_II_WITH_MPI
380  Assert(false, ExcNeedsMPI());
381 #else
382 
383  // currently only implemented for distributed triangulations
385  *>(this) == nullptr)
386  return;
387 
388  // 1) determine number of active locally-owned cells
389  const types::global_cell_index n_locally_owned_cells =
391 
392  // 2) determine the offset of each process
393  types::global_cell_index cell_index = 0;
394 
395  MPI_Exscan(&n_locally_owned_cells,
396  &cell_index,
397  1,
398  Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
399  MPI_SUM,
400  this->mpi_communicator);
401 
402  // 3) give global indices to locally-owned cells and mark all other cells as
403  // invalid
404  for (const auto &cell : this->active_cell_iterators())
405  if (cell->is_locally_owned())
406  cell->set_global_active_cell_index(cell_index++);
407  else
408  cell->set_global_active_cell_index(numbers::invalid_dof_index);
409 
410  // 4) determine the global indices of ghost cells
411  GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
412  *this,
413  [](const auto &cell) { return cell->global_active_cell_index(); },
414  [](const auto &cell, const auto &id) {
415  cell->set_global_active_cell_index(id);
416  });
417 
418  // 5) set up new partitioner
419  IndexSet is_local(this->n_global_active_cells());
420  IndexSet is_ghost(this->n_global_active_cells());
421 
422  for (const auto &cell : this->active_cell_iterators())
423  if (!cell->is_artificial())
424  {
425  const auto index = cell->global_active_cell_index();
426 
427  if (index == numbers::invalid_dof_index)
428  continue;
429 
430  if (cell->is_locally_owned())
431  is_local.add_index(index);
432  else
433  is_ghost.add_index(index);
434  }
435 
437  Utilities::MPI::Partitioner(is_local, is_ghost, this->mpi_communicator);
438 
439  // 6) proceed with multigrid levels if requested
440  if (this->is_multilevel_hierarchy_constructed() == true)
441  {
442  // 1) determine number of locally-owned cells on levels
443  std::vector<types::global_cell_index> n_locally_owned_cells(
444  this->n_global_levels(), 0);
445 
446  for (auto cell : this->cell_iterators())
447  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
448  n_locally_owned_cells[cell->level()]++;
449 
450  // 2) determine the offset of each process
451  std::vector<types::global_cell_index> cell_index(
452  this->n_global_levels(), 0);
453 
454  MPI_Exscan(n_locally_owned_cells.data(),
455  cell_index.data(),
456  this->n_global_levels(),
457  Utilities::MPI::internal::mpi_type_id(
458  n_locally_owned_cells.data()),
459  MPI_SUM,
460  this->mpi_communicator);
461 
462  // 3) determine global number of "active" cells on each level
463  std::vector<types::global_cell_index> n_cells_level(
464  this->n_global_levels(), 0);
465 
466  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
467  n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
468 
469  MPI_Bcast(n_cells_level.data(),
470  this->n_global_levels(),
471  Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
472  this->n_subdomains - 1,
473  this->mpi_communicator);
474 
475  // 4) give global indices to locally-owned cells on level and mark
476  // all other cells as invalid
477  for (auto cell : this->cell_iterators())
478  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
479  cell->set_global_level_cell_index(cell_index[cell->level()]++);
480  else
481  cell->set_global_level_cell_index(numbers::invalid_dof_index);
482 
483  // 5) update the numbers of ghost level cells
487  *this,
488  [](const auto &cell) { return cell->global_level_cell_index(); },
489  [](const auto &cell, const auto &id) {
490  return cell->set_global_level_cell_index(id);
491  });
492 
494  this->n_global_levels());
495 
496  // 6) set up cell partitioners for each level
497  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
498  {
499  IndexSet is_local(n_cells_level[l]);
500  IndexSet is_ghost(n_cells_level[l]);
501 
502  for (const auto &cell : this->cell_iterators_on_level(l))
503  if (cell->level_subdomain_id() !=
505  {
506  const auto index = cell->global_level_cell_index();
507 
508  if (index == numbers::invalid_dof_index)
509  continue;
510 
511  if (cell->level_subdomain_id() ==
512  this->locally_owned_subdomain())
513  is_local.add_index(index);
514  else
515  is_ghost.add_index(index);
516  }
517 
520  is_ghost,
521  this->mpi_communicator);
522  }
523  }
524 
525 #endif
526  }
527 
528 
529 
530  template <int dim, int spacedim>
533  {
535  }
536 
537  template <int dim, int spacedim>
540  const unsigned int level) const
541  {
543  AssertIndexRange(level, this->n_global_levels());
544 
546  }
547 
548  template <int dim, int spacedim>
550  const MPI_Comm &mpi_communicator,
551  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
552  smooth_grid,
553  const bool check_for_distorted_cells)
554  : ::parallel::TriangulationBase<dim, spacedim>(
555  mpi_communicator,
556  smooth_grid,
557  check_for_distorted_cells)
558  {}
559 
560 } // end namespace parallel
561 
562 
563 
564 /*-------------- Explicit Instantiations -------------------------------*/
565 #include "tria_base.inst"
566 
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:132
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
Definition: tria.cc:9762
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:336
const Utilities::MPI::Partitioner & global_level_cell_index_partitioner(const unsigned int level) const
Definition: tria_base.cc:539
static ::ExceptionBase & ExcNeedsMPI()
virtual std::vector< types::boundary_id > get_boundary_ids() const override
Definition: tria_base.cc:355
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:327
void add_index(const size_type index)
Definition: index_set.h:1651
unsigned int global_cell_index
Definition: types.h:105
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition: tria_base.cc:64
MeshSmoothing smooth_grid
Definition: tria.h:3483
IteratorRange< active_cell_iterator > active_cell_iterators() const
Definition: tria.cc:11546
#define AssertIndexRange(index, range)
Definition: exceptions.h:1691
DistributedTriangulationBase(const MPI_Comm &mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:549
virtual std::vector< types::manifold_id > get_manifold_ids() const override
Definition: tria_base.cc:366
TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
Definition: mpi_tags.h:102
virtual ~TriangulationBase() override
Definition: tria_base.cc:102
cell_iterator begin(const unsigned int level=0) const
Definition: tria.cc:11354
std::vector< Utilities::MPI::Partitioner > level_cell_index_partitioners
Definition: tria_base.h:308
unsigned int n_levels() const
cell_iterator end() const
Definition: tria.cc:11440
std::set< types::subdomain_id > ghost_owners
Definition: tria_base.h:293
virtual std::size_t memory_consumption() const override
Definition: tria_base.cc:89
IteratorRange< cell_iterator > cell_iterators_on_level(const unsigned int level) const
Definition: tria.cc:11557
virtual bool is_multilevel_hierarchy_constructed() const =0
const bool check_for_distorted_cells
Definition: tria.h:3962
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1466
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:372
unsigned int level
Definition: grid_out.cc:4343
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
types::global_cell_index n_global_active_cells
Definition: tria_base.h:282
TriangulationBase(const MPI_Comm &mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:44
virtual std::size_t memory_consumption() const
Definition: tria.cc:14546
types::subdomain_id locally_owned_subdomain() const override
Definition: tria_base.cc:318
std::vector< ReferenceCell::Type > reference_cell_types
Definition: tria.h:3489
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
virtual const MPI_Comm & get_communicator() const
Definition: tria_base.cc:139
std::set< types::subdomain_id > level_ghost_owners
Definition: tria_base.h:298
virtual std::map< unsigned int, std::set<::types::subdomain_id > > compute_vertices_with_ghost_neighbors() const
Definition: tria_base.cc:345
const types::subdomain_id artificial_subdomain_id
Definition: types.h:293
virtual void update_number_cache()
Definition: tria_base.cc:147
Definition: cuda.h:32
void update_reference_cell_types() override
Definition: tria_base.cc:292
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:118
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1747
const Utilities::MPI::Partitioner & global_active_cell_index_partitioner() const
Definition: tria_base.cc:532
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:371
virtual unsigned int n_global_levels() const override
Definition: tria_base.cc:125
Utilities::MPI::Partitioner active_cell_index_partitioner
Definition: tria_base.h:303
virtual void update_reference_cell_types()
Definition: tria.cc:12848
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
static ::ExceptionBase & ExcNotImplemented()
IteratorRange< cell_iterator > cell_iterators() const
Definition: tria.cc:11537
types::subdomain_id my_subdomain
Definition: tria_base.h:261
const types::global_dof_index invalid_dof_index
Definition: types.h:211
void exchange_cell_data_to_level_ghosts(const MeshType &mesh, const std::function< std_cxx17::optional< DataType >(const typename MeshType::level_cell_iterator &)> &pack, const std::function< void(const typename MeshType::level_cell_iterator &, const DataType &)> &unpack, const std::function< bool(const typename MeshType::level_cell_iterator &)> &cell_filter=[](const typename MeshType::level_cell_iterator &) { return true;})
T max(const T &t, const MPI_Comm &mpi_communicator)
types::subdomain_id n_subdomains
Definition: tria_base.h:266
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static ::ExceptionBase & ExcInternalError()
std::map< unsigned int, std::set<::types::subdomain_id > > compute_vertices_with_ghost_neighbors(const Triangulation< dim, spacedim > &tria)
Definition: grid_tools.cc:5665