Reference documentation for deal.II version GIT 00e6fe71c9 2023-06-04 19:35:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
tria_base.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
19 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/utilities.h>
22 
25 
28 #include <deal.II/grid/tria.h>
31 
33 
34 #include <algorithm>
35 #include <cstdint>
36 #include <fstream>
37 #include <iostream>
38 #include <limits>
39 #include <numeric>
40 
41 
43 
44 namespace parallel
45 {
46  template <int dim, int spacedim>
47  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
49  const MPI_Comm mpi_communicator,
50  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
51  smooth_grid,
52  const bool check_for_distorted_cells)
53  : ::Triangulation<dim, spacedim>(smooth_grid,
54  check_for_distorted_cells)
55  , mpi_communicator(mpi_communicator)
56  , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
57  , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
58  {
59 #ifndef DEAL_II_WITH_MPI
60  Assert(false, ExcNeedsMPI());
61 #endif
62  }
63 
64 
65 
66  template <int dim, int spacedim>
67  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
68  void TriangulationBase<dim, spacedim>::copy_triangulation(
69  const ::Triangulation<dim, spacedim> &other_tria)
70  {
71 #ifndef DEAL_II_WITH_MPI
72  (void)other_tria;
73  Assert(false, ExcNeedsMPI());
74 #else
76 
77  if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
78  dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
79  *>(&other_tria))
80  {
81  // release unused vector memory because we will have very different
82  // vectors now
85  }
86 #endif
87  }
88 
89 
90 
91  template <int dim, int spacedim>
92  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
93  std::size_t TriangulationBase<dim, spacedim>::memory_consumption() const
94  {
95  std::size_t mem =
97  MemoryConsumption::memory_consumption(this->mpi_communicator) +
100  number_cache.n_global_active_cells) +
101  MemoryConsumption::memory_consumption(number_cache.n_global_levels);
102  return mem;
103  }
104 
105 
106 
107  template <int dim, int spacedim>
108  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
110  {
111  // release unused vector memory because the vector layout is going to look
112  // very different now
115  }
116 
117 
118 
119  template <int dim, int spacedim>
120  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
122  : n_locally_owned_active_cells(0)
123  , number_of_global_coarse_cells(0)
124  , n_global_levels(0)
125  {}
126 
127 
128 
129  template <int dim, int spacedim>
130  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
131  unsigned int TriangulationBase<dim, spacedim>::n_locally_owned_active_cells()
132  const
133  {
134  return number_cache.n_locally_owned_active_cells;
135  }
136 
137 
138 
139  template <int dim, int spacedim>
140  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
141  unsigned int TriangulationBase<dim, spacedim>::n_global_levels() const
142  {
143  return number_cache.n_global_levels;
144  }
145 
146 
147 
148  template <int dim, int spacedim>
149  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
152  {
153  return number_cache.n_global_active_cells;
154  }
155 
156 
157 
158  template <int dim, int spacedim>
159  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
160  MPI_Comm TriangulationBase<dim, spacedim>::get_communicator() const
161  {
162  return mpi_communicator;
163  }
164 
165 
166 
167 #ifdef DEAL_II_WITH_MPI
168  template <int dim, int spacedim>
169  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
170  void TriangulationBase<dim, spacedim>::update_number_cache()
171  {
172  number_cache.ghost_owners.clear();
173  number_cache.level_ghost_owners.clear();
174  number_cache.n_locally_owned_active_cells = 0;
175 
176  if (this->n_levels() == 0)
177  {
178  // Skip communication done below if we do not have any cells
179  // (meaning the Triangulation is empty on all processors). This will
180  // happen when called from the destructor of Triangulation, which
181  // can get called during exception handling causing a hang in this
182  // function.
183  number_cache.n_global_active_cells = 0;
184  number_cache.n_global_levels = 0;
185  return;
186  }
187 
188 
189  {
190  // find ghost owners
191  for (const auto &cell : this->active_cell_iterators())
192  if (cell->is_ghost())
193  number_cache.ghost_owners.insert(cell->subdomain_id());
194 
195  Assert(number_cache.ghost_owners.size() <
196  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
197  ExcInternalError());
198  }
199 
200  if (this->n_levels() > 0)
201  number_cache.n_locally_owned_active_cells = std::count_if(
202  this->begin_active(),
204  this->end()),
205  [](const auto &i) { return i.is_locally_owned(); });
206  else
207  number_cache.n_locally_owned_active_cells = 0;
208 
209  // Potentially cast to a 64 bit type before accumulating to avoid
210  // overflow:
211  number_cache.n_global_active_cells =
213  number_cache.n_locally_owned_active_cells),
214  this->mpi_communicator);
215 
216  number_cache.n_global_levels =
217  Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
218 
219  // Store MPI ranks of level ghost owners of this processor on all
220  // levels.
221  if (this->is_multilevel_hierarchy_constructed() == true)
222  {
223  number_cache.level_ghost_owners.clear();
224 
225  // if there is nothing to do, then do nothing
226  if (this->n_levels() == 0)
227  return;
228 
229  // find level ghost owners
230  for (const auto &cell : this->cell_iterators())
231  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
232  cell->level_subdomain_id() != this->locally_owned_subdomain())
233  this->number_cache.level_ghost_owners.insert(
234  cell->level_subdomain_id());
235 
236 # ifdef DEBUG
237  // Check that level_ghost_owners is symmetric by sending a message
238  // to everyone
239  {
240  int ierr = MPI_Barrier(this->mpi_communicator);
241  AssertThrowMPI(ierr);
242 
243  const int mpi_tag = Utilities::MPI::internal::Tags::
245 
246  // important: preallocate to avoid (re)allocation:
247  std::vector<MPI_Request> requests(
248  this->number_cache.level_ghost_owners.size());
249  unsigned int dummy = 0;
250  unsigned int req_counter = 0;
251 
252  for (const auto &it : this->number_cache.level_ghost_owners)
253  {
254  ierr = MPI_Isend(&dummy,
255  1,
256  MPI_UNSIGNED,
257  it,
258  mpi_tag,
259  this->mpi_communicator,
260  &requests[req_counter]);
261  AssertThrowMPI(ierr);
262  ++req_counter;
263  }
264 
265  for (const auto &it : this->number_cache.level_ghost_owners)
266  {
267  unsigned int dummy;
268  ierr = MPI_Recv(&dummy,
269  1,
270  MPI_UNSIGNED,
271  it,
272  mpi_tag,
273  this->mpi_communicator,
274  MPI_STATUS_IGNORE);
275  AssertThrowMPI(ierr);
276  }
277 
278  if (requests.size() > 0)
279  {
280  ierr = MPI_Waitall(requests.size(),
281  requests.data(),
282  MPI_STATUSES_IGNORE);
284  }
285 
286  ierr = MPI_Barrier(this->mpi_communicator);
288  }
289 # endif
290 
291  Assert(this->number_cache.level_ghost_owners.size() <
292  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
293  ExcInternalError());
294  }
295 
296  this->number_cache.number_of_global_coarse_cells = this->n_cells(0);
297 
298  // reset global cell ids
299  this->reset_global_cell_indices();
300  }
301 
302 #else
303 
304  template <int dim, int spacedim>
305  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
307  {
308  Assert(false, ExcNeedsMPI());
309  }
310 
311 #endif
312 
313  template <int dim, int spacedim>
314  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
315  void TriangulationBase<dim, spacedim>::update_reference_cells()
316  {
317  // run algorithm for locally-owned cells
319 
320  // translate ReferenceCell to unsigned int (needed by
321  // Utilities::MPI::compute_set_union)
322  std::vector<unsigned int> reference_cells_ui;
323 
324  reference_cells_ui.reserve(this->reference_cells.size());
325  for (const auto &i : this->reference_cells)
326  reference_cells_ui.push_back(static_cast<unsigned int>(i));
327 
328  // create union
329  reference_cells_ui =
330  Utilities::MPI::compute_set_union(reference_cells_ui,
331  this->mpi_communicator);
332 
333  // transform back and store result
334  this->reference_cells.clear();
335  for (const auto &i : reference_cells_ui)
336  this->reference_cells.emplace_back(
338  }
339 
340 
341 
342  template <int dim, int spacedim>
343  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
346  {
347  return my_subdomain;
348  }
349 
350 
351 
352  template <int dim, int spacedim>
353  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
354  const std::set<types::subdomain_id>
356  {
357  return number_cache.ghost_owners;
358  }
359 
360 
361 
362  template <int dim, int spacedim>
363  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
364  const std::set<types::subdomain_id>
366  {
367  return number_cache.level_ghost_owners;
368  }
369 
370 
371 
372  template <int dim, int spacedim>
373  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
374  std::vector<types::boundary_id> TriangulationBase<dim, spacedim>::
375  get_boundary_ids() const
376  {
379  this->mpi_communicator);
380  }
381 
382 
383 
384  template <int dim, int spacedim>
385  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
386  std::vector<types::manifold_id> TriangulationBase<dim, spacedim>::
387  get_manifold_ids() const
388  {
391  this->mpi_communicator);
392  }
393 
394 
395 
396  template <int dim, int spacedim>
397  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
398  void TriangulationBase<dim, spacedim>::reset_global_cell_indices()
399  {
400 #ifndef DEAL_II_WITH_MPI
401  Assert(false, ExcNeedsMPI());
402 #else
403  if (const auto pst =
405  this))
406  if (pst->with_artificial_cells() == false)
407  {
408  // Specialization for parallel::shared::Triangulation without
409  // artificial cells. The code below only works if a halo of a single
410  // ghost cells is needed.
411 
412  std::vector<unsigned int> cell_counter(n_subdomains + 1);
413 
414  // count number of cells of each process
415  for (const auto &cell : this->active_cell_iterators())
416  cell_counter[cell->subdomain_id() + 1]++;
417 
418  // take prefix sum to obtain offset of each process
419  for (unsigned int i = 0; i < n_subdomains; ++i)
420  cell_counter[i + 1] += cell_counter[i];
421 
422  AssertDimension(cell_counter.back(), this->n_active_cells());
423 
424  // create partitioners
425  IndexSet is_local(this->n_active_cells());
426  is_local.add_range(cell_counter[my_subdomain],
427  cell_counter[my_subdomain + 1]);
428  number_cache.active_cell_index_partitioner =
429  std::make_shared<const Utilities::MPI::Partitioner>(
430  is_local,
432  this->mpi_communicator);
433 
434  // set global active cell indices and increment process-local counters
435  for (const auto &cell : this->active_cell_iterators())
436  cell->set_global_active_cell_index(
437  cell_counter[cell->subdomain_id()]++);
438 
439  Assert(this->is_multilevel_hierarchy_constructed() == false,
441 
442  return;
443  }
444 
445  // 1) determine number of active locally-owned cells
446  const types::global_cell_index n_locally_owned_cells =
447  this->n_locally_owned_active_cells();
448 
449  // 2) determine the offset of each process
451 
452  const int ierr = MPI_Exscan(
453  &n_locally_owned_cells,
454  &cell_index,
455  1,
456  Utilities::MPI::mpi_type_id_for_type<decltype(n_locally_owned_cells)>,
457  MPI_SUM,
458  this->mpi_communicator);
459  AssertThrowMPI(ierr);
460 
461  // 3) give global indices to locally-owned cells and mark all other cells as
462  // invalid
463  std::pair<types::global_cell_index, types::global_cell_index> my_range;
464  my_range.first = cell_index;
465 
466  for (const auto &cell : this->active_cell_iterators())
467  if (cell->is_locally_owned())
468  cell->set_global_active_cell_index(cell_index++);
469  else
470  cell->set_global_active_cell_index(numbers::invalid_dof_index);
471 
472  my_range.second = cell_index;
473 
474  // 4) determine the global indices of ghost cells
475  std::vector<types::global_dof_index> is_ghost_vector;
476  GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
477  static_cast<::Triangulation<dim, spacedim> &>(*this),
478  [](const auto &cell) { return cell->global_active_cell_index(); },
479  [&is_ghost_vector](const auto &cell, const auto &id) {
480  cell->set_global_active_cell_index(id);
481  is_ghost_vector.push_back(id);
482  });
483 
484  // 5) set up new partitioner
485  IndexSet is_local(this->n_global_active_cells());
486  is_local.add_range(my_range.first, my_range.second);
487 
488  std::sort(is_ghost_vector.begin(), is_ghost_vector.end());
489  IndexSet is_ghost(this->n_global_active_cells());
490  is_ghost.add_indices(is_ghost_vector.begin(), is_ghost_vector.end());
491 
492  number_cache.active_cell_index_partitioner =
493  std::make_shared<const Utilities::MPI::Partitioner>(
494  is_local, is_ghost, this->mpi_communicator);
495 
496  // 6) proceed with multigrid levels if requested
497  if (this->is_multilevel_hierarchy_constructed() == true)
498  {
499  // 1) determine number of locally-owned cells on levels
500  std::vector<types::global_cell_index> n_cells_level(
501  this->n_global_levels(), 0);
502 
503  for (auto cell : this->cell_iterators())
504  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
505  n_cells_level[cell->level()]++;
506 
507  // 2) determine the offset of each process
508  std::vector<types::global_cell_index> cell_index(
509  this->n_global_levels(), 0);
510 
511  int ierr = MPI_Exscan(
512  n_cells_level.data(),
513  cell_index.data(),
514  this->n_global_levels(),
515  Utilities::MPI::mpi_type_id_for_type<decltype(*n_cells_level.data())>,
516  MPI_SUM,
517  this->mpi_communicator);
518  AssertThrowMPI(ierr);
519 
520  // 3) determine global number of "active" cells on each level
521  Utilities::MPI::sum(n_cells_level,
522  this->mpi_communicator,
523  n_cells_level);
524 
525  // 4) give global indices to locally-owned cells on level and mark
526  // all other cells as invalid
527  std::vector<
528  std::pair<types::global_cell_index, types::global_cell_index>>
529  my_ranges(this->n_global_levels());
530  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
531  my_ranges[l].first = cell_index[l];
532 
533  for (auto cell : this->cell_iterators())
534  if (cell->level_subdomain_id() == this->locally_owned_subdomain())
535  cell->set_global_level_cell_index(cell_index[cell->level()]++);
536  else
537  cell->set_global_level_cell_index(numbers::invalid_dof_index);
538 
539  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
540  my_ranges[l].second = cell_index[l];
541 
542  // 5) update the numbers of ghost level cells
543  std::vector<std::vector<types::global_dof_index>> is_ghost_vectors(
544  this->n_global_levels());
548  *this,
549  [](const auto &cell) { return cell->global_level_cell_index(); },
550  [&is_ghost_vectors](const auto &cell, const auto &id) {
551  cell->set_global_level_cell_index(id);
552  is_ghost_vectors[cell->level()].push_back(id);
553  });
554 
555  number_cache.level_cell_index_partitioners.resize(
556  this->n_global_levels());
557 
558  // 6) set up cell partitioners for each level
559  for (unsigned int l = 0; l < this->n_global_levels(); ++l)
560  {
561  IndexSet is_local(n_cells_level[l]);
562  is_local.add_range(my_ranges[l].first, my_ranges[l].second);
563 
564  IndexSet is_ghost(n_cells_level[l]);
565  std::sort(is_ghost_vectors[l].begin(), is_ghost_vectors[l].end());
566  is_ghost.add_indices(is_ghost_vectors[l].begin(),
567  is_ghost_vectors[l].end());
568 
569  number_cache.level_cell_index_partitioners[l] =
570  std::make_shared<const Utilities::MPI::Partitioner>(
571  is_local, is_ghost, this->mpi_communicator);
572  }
573  }
574 
575 #endif
576  }
577 
578 
579 
580  template <int dim, int spacedim>
581  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
582  void TriangulationBase<dim, spacedim>::communicate_locally_moved_vertices(
583  const std::vector<bool> &vertex_locally_moved)
584  {
585  AssertDimension(vertex_locally_moved.size(), this->n_vertices());
586 #ifdef DEBUG
587  {
588  const std::vector<bool> locally_owned_vertices =
590  for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
591  Assert((vertex_locally_moved[i] == false) ||
592  (locally_owned_vertices[i] == true),
593  ExcMessage("The vertex_locally_moved argument must not "
594  "contain vertices that are not locally owned"));
595  }
596 #endif
597 
598  Point<spacedim> invalid_point;
599  for (unsigned int d = 0; d < spacedim; ++d)
600  invalid_point[d] = std::numeric_limits<double>::quiet_NaN();
601 
602  const auto pack = [&](const auto &cell) {
603  std::vector<Point<spacedim>> vertices(cell->n_vertices());
604 
605  for (const auto v : cell->vertex_indices())
606  if (vertex_locally_moved[cell->vertex_index(v)])
607  vertices[v] = cell->vertex(v);
608  else
609  vertices[v] = invalid_point;
610 
611  return vertices;
612  };
613 
614  const auto unpack = [&](const auto &cell, const auto &vertices) {
615  for (const auto v : cell->vertex_indices())
616  if (std::isnan(vertices[v][0]) == false)
617  cell->vertex(v) = vertices[v];
618  };
619 
620  if (this->is_multilevel_hierarchy_constructed())
622  std::vector<Point<spacedim>>>(
623  static_cast<::Triangulation<dim, spacedim> &>(*this),
624  pack,
625  unpack);
626  else
627  GridTools::exchange_cell_data_to_ghosts<std::vector<Point<spacedim>>>(
628  static_cast<::Triangulation<dim, spacedim> &>(*this),
629  pack,
630  unpack);
631  }
632 
633 
634 
635  template <int dim, int spacedim>
636  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
637  const std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<
638  dim,
639  spacedim>::global_active_cell_index_partitioner() const
640  {
641  return number_cache.active_cell_index_partitioner;
642  }
643 
644 
645 
646  template <int dim, int spacedim>
647  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
648  const std::weak_ptr<const Utilities::MPI::Partitioner> TriangulationBase<
649  dim,
650  spacedim>::global_level_cell_index_partitioner(const unsigned int level)
651  const
652  {
653  Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
654  AssertIndexRange(level, this->n_global_levels());
655 
656  return number_cache.level_cell_index_partitioners[level];
657  }
658 
659 
660 
661  template <int dim, int spacedim>
662  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
665  {
666  return number_cache.number_of_global_coarse_cells;
667  }
668 
669 
670 
671  template <int dim, int spacedim>
672  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
674  const MPI_Comm mpi_communicator,
675  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
676  smooth_grid,
677  const bool check_for_distorted_cells)
678  : ::parallel::TriangulationBase<dim, spacedim>(
679  mpi_communicator,
680  smooth_grid,
681  check_for_distorted_cells)
682  , cell_attached_data({0, 0, {}, {}})
683  , data_transfer(mpi_communicator)
684  {}
685 
686 
687 
688  template <int dim, int spacedim>
689  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
690  void DistributedTriangulationBase<dim, spacedim>::clear()
691  {
692  cell_attached_data = {0, 0, {}, {}};
693  data_transfer.clear();
694 
696  }
697 
698 
699 
700  template <int dim, int spacedim>
701  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
702  void DistributedTriangulationBase<dim, spacedim>::save_attached_data(
703  const unsigned int global_first_cell,
704  const unsigned int global_num_cells,
705  const std::string &filename) const
706  {
707  // cast away constness
708  auto tria = const_cast<
710 
711  if (this->cell_attached_data.n_attached_data_sets > 0)
712  {
713  // pack attached data first
714  tria->data_transfer.pack_data(
715  tria->local_cell_relations,
716  tria->cell_attached_data.pack_callbacks_fixed,
717  tria->cell_attached_data.pack_callbacks_variable);
718 
719  // then store buffers in file
720  tria->data_transfer.save(global_first_cell, global_num_cells, filename);
721 
722  // and release the memory afterwards
723  tria->data_transfer.clear();
724  }
725 
726  // clear all of the callback data, as explained in the documentation of
727  // register_data_attach()
728  {
729  tria->cell_attached_data.n_attached_data_sets = 0;
730  tria->cell_attached_data.pack_callbacks_fixed.clear();
731  tria->cell_attached_data.pack_callbacks_variable.clear();
732  }
733  }
734 
735 
736 
737  template <int dim, int spacedim>
738  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
739  bool DistributedTriangulationBase<dim, spacedim>::has_hanging_nodes() const
740  {
741  if (this->n_global_levels() <= 1)
742  return false; // can not have hanging nodes without refined cells
743 
744  // if there are any active cells with level less than n_global_levels()-1,
745  // then there is obviously also one with level n_global_levels()-1, and
746  // consequently there must be a hanging node somewhere.
747  //
748  // The problem is that we cannot just ask for the first active cell, but
749  // instead need to filter over locally owned cells.
750  const bool have_coarser_cell =
751  std::any_of(this->begin_active(this->n_global_levels() - 2),
752  this->end_active(this->n_global_levels() - 2),
753  [](const CellAccessor<dim, spacedim> &cell) {
754  return cell.is_locally_owned();
755  });
756 
757  // return true if at least one process has a coarser cell
758  return Utilities::MPI::max(have_coarser_cell ? 1 : 0,
759  this->mpi_communicator) != 0;
760  }
761 
762 
763 
764  template <int dim, int spacedim>
765  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
766  void DistributedTriangulationBase<dim, spacedim>::load_attached_data(
767  const unsigned int global_first_cell,
768  const unsigned int global_num_cells,
769  const unsigned int local_num_cells,
770  const std::string &filename,
771  const unsigned int n_attached_deserialize_fixed,
772  const unsigned int n_attached_deserialize_variable)
773  {
774  // load saved data, if any was stored
775  if (this->cell_attached_data.n_attached_deserialize > 0)
776  {
777  this->data_transfer.load(global_first_cell,
778  global_num_cells,
779  local_num_cells,
780  filename,
781  n_attached_deserialize_fixed,
782  n_attached_deserialize_variable);
783 
784  this->data_transfer.unpack_cell_status(this->local_cell_relations);
785 
786  // the CellStatus of all stored cells should always be CELL_PERSIST.
787  for (const auto &cell_rel : this->local_cell_relations)
788  {
789  (void)cell_rel;
790  Assert(
791  (cell_rel.second == // cell_status
793  spacedim>::CELL_PERSIST),
794  ExcInternalError());
795  }
796  }
797  }
798 
799 
800 
801  template <int dim, int spacedim>
802  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
803  unsigned int DistributedTriangulationBase<dim, spacedim>::
804  register_data_attach(
805  const std::function<std::vector<char>(const cell_iterator &,
806  const CellStatus)> &pack_callback,
807  const bool returns_variable_size_data)
808  {
809  unsigned int handle = numbers::invalid_unsigned_int;
810 
811  // Add new callback function to the corresponding register.
812  // Encode handles according to returns_variable_size_data.
813  if (returns_variable_size_data)
814  {
815  handle = 2 * cell_attached_data.pack_callbacks_variable.size();
816  cell_attached_data.pack_callbacks_variable.push_back(pack_callback);
817  }
818  else
819  {
820  handle = 2 * cell_attached_data.pack_callbacks_fixed.size() + 1;
821  cell_attached_data.pack_callbacks_fixed.push_back(pack_callback);
822  }
823 
824  // Increase overall counter.
825  ++cell_attached_data.n_attached_data_sets;
826 
827  return handle;
828  }
829 
830 
831 
832  template <int dim, int spacedim>
833  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
834  void DistributedTriangulationBase<dim, spacedim>::notify_ready_to_unpack(
835  const unsigned int handle,
836  const std::function<
837  void(const cell_iterator &,
838  const CellStatus,
839  const boost::iterator_range<std::vector<char>::const_iterator> &)>
840  &unpack_callback)
841  {
842  // perform unpacking
843  data_transfer.unpack_data(local_cell_relations, handle, unpack_callback);
844 
845  // decrease counters
846  --cell_attached_data.n_attached_data_sets;
847  if (cell_attached_data.n_attached_deserialize > 0)
848  --cell_attached_data.n_attached_deserialize;
849 
850  // important: only remove data if we are not in the deserialization
851  // process. There, each SolutionTransfer registers and unpacks before
852  // the next one does this, so n_attached_data_sets is only 1 here. This
853  // would destroy the saved data before the second SolutionTransfer can
854  // get it. This created a bug that is documented in
855  // tests/mpi/p4est_save_03 with more than one SolutionTransfer.
856 
857  if (cell_attached_data.n_attached_data_sets == 0 &&
858  cell_attached_data.n_attached_deserialize == 0)
859  {
860  // everybody got their data, time for cleanup!
861  cell_attached_data.pack_callbacks_fixed.clear();
862  cell_attached_data.pack_callbacks_variable.clear();
863  data_transfer.clear();
864 
865  // reset all cell_status entries after coarsening/refinement
866  for (auto &cell_rel : local_cell_relations)
867  cell_rel.second =
869  }
870  }
871 
872 
873 
874  /* ------------------ class DataTransfer<dim,spacedim> ----------------- */
875 
876 
877  template <int dim, int spacedim>
878  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
880  const MPI_Comm mpi_communicator)
881  : variable_size_data_stored(false)
882  , mpi_communicator(mpi_communicator)
883  {}
884 
885 
886 
887  template <int dim, int spacedim>
888  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
889  void DistributedTriangulationBase<dim, spacedim>::DataTransfer::pack_data(
890  const std::vector<cell_relation_t> &cell_relations,
891  const std::vector<typename CellAttachedData::pack_callback_t>
892  &pack_callbacks_fixed,
893  const std::vector<typename CellAttachedData::pack_callback_t>
894  &pack_callbacks_variable)
895  {
896  Assert(src_data_fixed.size() == 0,
897  ExcMessage("Previously packed data has not been released yet!"));
898  Assert(src_sizes_variable.size() == 0, ExcInternalError());
899 
900  const unsigned int n_callbacks_fixed = pack_callbacks_fixed.size();
901  const unsigned int n_callbacks_variable = pack_callbacks_variable.size();
902 
903  // Store information that we packed variable size data in
904  // a member variable for later.
905  variable_size_data_stored = (n_callbacks_variable > 0);
906 
907  // If variable transfer is scheduled, we will store the data size that
908  // each variable size callback function writes in this auxiliary
909  // container. The information will be stored by each cell in this vector
910  // temporarily.
911  std::vector<unsigned int> cell_sizes_variable_cumulative(
912  n_callbacks_variable);
913 
914  // Prepare the buffer structure, in which each callback function will
915  // store its data for each active cell.
916  // The outmost shell in this container construct corresponds to the
917  // data packed per cell. The next layer resembles the data that
918  // each callback function packs on the corresponding cell. These
919  // buffers are chains of chars stored in an std::vector<char>.
920  // A visualisation of the data structure:
921  /* clang-format off */
922  // | cell_1 | | cell_2 | ...
923  // || callback_1 || callback_2 |...| || callback_1 || callback_2 |...| ...
924  // |||char|char|...|||char|char|...|...| |||char|char|...|||char|char|...|...| ...
925  /* clang-format on */
926  std::vector<std::vector<std::vector<char>>> packed_fixed_size_data(
927  cell_relations.size());
928  std::vector<std::vector<std::vector<char>>> packed_variable_size_data(
929  variable_size_data_stored ? cell_relations.size() : 0);
930 
931  //
932  // --------- Pack data for fixed and variable size transfer ---------
933  //
934  // Iterate over all cells, call all callback functions on each cell,
935  // and store their data in the corresponding buffer scope.
936  {
937  auto cell_rel_it = cell_relations.cbegin();
938  auto data_cell_fixed_it = packed_fixed_size_data.begin();
939  auto data_cell_variable_it = packed_variable_size_data.begin();
940  for (; cell_rel_it != cell_relations.cend(); ++cell_rel_it)
941  {
942  const auto &dealii_cell = cell_rel_it->first;
943  const auto &cell_status = cell_rel_it->second;
944 
945  // Assertions about the tree structure.
946  switch (cell_status)
947  {
949  CELL_PERSIST:
951  CELL_REFINE:
952  // double check the condition that we will only ever attach
953  // data to active cells when we get here
954  Assert(dealii_cell->is_active(), ExcInternalError());
955  break;
956 
958  CELL_COARSEN:
959  // double check the condition that we will only ever attach
960  // data to cells with children when we get here. however, we
961  // can only tolerate one level of coarsening at a time, so
962  // check that the children are all active
963  Assert(dealii_cell->is_active() == false, ExcInternalError());
964  for (unsigned int c = 0;
965  c < GeometryInfo<dim>::max_children_per_cell;
966  ++c)
967  Assert(dealii_cell->child(c)->is_active(),
968  ExcInternalError());
969  break;
970 
972  CELL_INVALID:
973  // do nothing on invalid cells
974  break;
975 
976  default:
977  Assert(false, ExcInternalError());
978  break;
979  }
980 
981  // Reserve memory corresponding to the number of callback
982  // functions that will be called.
983  // If variable size transfer is scheduled, we need to leave
984  // room for an array that holds information about how many
985  // bytes each of the variable size callback functions will
986  // write.
987  // On cells flagged with CELL_INVALID, only its CellStatus
988  // will be stored.
989  const unsigned int n_fixed_size_data_sets_on_cell =
990  1 +
991  ((cell_status ==
993  spacedim>::CELL_INVALID) ?
994  0 :
995  ((variable_size_data_stored ? 1 : 0) + n_callbacks_fixed));
996  data_cell_fixed_it->resize(n_fixed_size_data_sets_on_cell);
997 
998  // We continue with packing all data on this specific cell.
999  auto data_fixed_it = data_cell_fixed_it->begin();
1000 
1001  // First, we pack the CellStatus information.
1002  // to get consistent data sizes on each cell for the fixed size
1003  // transfer, we won't allow compression
1004  *data_fixed_it =
1005  Utilities::pack(cell_status, /*allow_compression=*/false);
1006  ++data_fixed_it;
1007 
1008  // Proceed with all registered callback functions.
1009  // Skip cells with the CELL_INVALID flag.
1010  if (cell_status !=
1012  spacedim>::CELL_INVALID)
1013  {
1014  // Pack fixed size data.
1015  for (auto callback_it = pack_callbacks_fixed.cbegin();
1016  callback_it != pack_callbacks_fixed.cend();
1017  ++callback_it, ++data_fixed_it)
1018  {
1019  *data_fixed_it = (*callback_it)(dealii_cell, cell_status);
1020  }
1021 
1022  // Pack variable size data.
1023  // If we store variable size data, we need to transfer
1024  // the sizes of each corresponding callback function
1025  // via fixed size transfer as well.
1026  if (variable_size_data_stored)
1027  {
1028  const unsigned int n_variable_size_data_sets_on_cell =
1029  ((cell_status ==
1031  CELL_INVALID) ?
1032  0 :
1033  n_callbacks_variable);
1034  data_cell_variable_it->resize(
1035  n_variable_size_data_sets_on_cell);
1036 
1037  auto callback_it = pack_callbacks_variable.cbegin();
1038  auto data_variable_it = data_cell_variable_it->begin();
1039  auto sizes_variable_it =
1040  cell_sizes_variable_cumulative.begin();
1041  for (; callback_it != pack_callbacks_variable.cend();
1042  ++callback_it, ++data_variable_it, ++sizes_variable_it)
1043  {
1044  *data_variable_it =
1045  (*callback_it)(dealii_cell, cell_status);
1046 
1047  // Store data sizes for each callback function first.
1048  // Make it cumulative below.
1049  *sizes_variable_it = data_variable_it->size();
1050  }
1051 
1052  // Turn size vector into its cumulative representation.
1053  std::partial_sum(cell_sizes_variable_cumulative.begin(),
1054  cell_sizes_variable_cumulative.end(),
1055  cell_sizes_variable_cumulative.begin());
1056 
1057  // Serialize cumulative variable size vector
1058  // value-by-value. This way we can circumvent the overhead
1059  // of storing the container object as a whole, since we
1060  // know its size by the number of registered callback
1061  // functions.
1062  data_fixed_it->resize(n_callbacks_variable *
1063  sizeof(unsigned int));
1064  for (unsigned int i = 0; i < n_callbacks_variable; ++i)
1065  std::memcpy(&(data_fixed_it->at(i * sizeof(unsigned int))),
1066  &(cell_sizes_variable_cumulative.at(i)),
1067  sizeof(unsigned int));
1068 
1069  ++data_fixed_it;
1070  }
1071 
1072  // Double check that we packed everything we wanted
1073  // in the fixed size buffers.
1074  Assert(data_fixed_it == data_cell_fixed_it->end(),
1075  ExcInternalError());
1076  }
1077 
1078  ++data_cell_fixed_it;
1079 
1080  // Increment the variable size data iterator
1081  // only if we actually pack this kind of data
1082  // to avoid getting out of bounds.
1083  if (variable_size_data_stored)
1084  ++data_cell_variable_it;
1085  } // loop over cell_relations
1086  }
1087 
1088  //
1089  // ----------- Gather data sizes for fixed size transfer ------------
1090  //
1091  // Generate a vector which stores the sizes of each callback function,
1092  // including the packed CellStatus transfer.
1093  // Find the very first cell that we wrote to with all callback
1094  // functions (i.e. a cell that was not flagged with CELL_INVALID)
1095  // and store the sizes of each buffer.
1096  //
1097  // To deal with the case that at least one of the processors does not
1098  // own any cell at all, we will exchange the information about the data
1099  // sizes among them later. The code in between is still well-defined,
1100  // since the following loops will be skipped.
1101  std::vector<unsigned int> local_sizes_fixed(
1102  1 + n_callbacks_fixed + (variable_size_data_stored ? 1 : 0));
1103  for (const auto &data_cell : packed_fixed_size_data)
1104  {
1105  if (data_cell.size() == local_sizes_fixed.size())
1106  {
1107  auto sizes_fixed_it = local_sizes_fixed.begin();
1108  auto data_fixed_it = data_cell.cbegin();
1109  for (; data_fixed_it != data_cell.cend();
1110  ++data_fixed_it, ++sizes_fixed_it)
1111  {
1112  *sizes_fixed_it = data_fixed_it->size();
1113  }
1114 
1115  break;
1116  }
1117  }
1118 
1119  // Check if all cells have valid sizes.
1120  for (auto data_cell_fixed_it = packed_fixed_size_data.cbegin();
1121  data_cell_fixed_it != packed_fixed_size_data.cend();
1122  ++data_cell_fixed_it)
1123  {
1124  Assert((data_cell_fixed_it->size() == 1) ||
1125  (data_cell_fixed_it->size() == local_sizes_fixed.size()),
1126  ExcInternalError());
1127  }
1128 
1129  // Share information about the packed data sizes
1130  // of all callback functions across all processors, in case one
1131  // of them does not own any cells at all.
1132  std::vector<unsigned int> global_sizes_fixed(local_sizes_fixed.size());
1133  Utilities::MPI::max(local_sizes_fixed,
1134  this->mpi_communicator,
1135  global_sizes_fixed);
1136 
1137  // Construct cumulative sizes, since this is the only information
1138  // we need from now on.
1139  sizes_fixed_cumulative.resize(global_sizes_fixed.size());
1140  std::partial_sum(global_sizes_fixed.begin(),
1141  global_sizes_fixed.end(),
1142  sizes_fixed_cumulative.begin());
1143 
1144  //
1145  // ---------- Gather data sizes for variable size transfer ----------
1146  //
1147  if (variable_size_data_stored)
1148  {
1149  src_sizes_variable.reserve(packed_variable_size_data.size());
1150  for (const auto &data_cell : packed_variable_size_data)
1151  {
1152  int variable_data_size_on_cell = 0;
1153 
1154  for (const auto &data : data_cell)
1155  variable_data_size_on_cell += data.size();
1156 
1157  src_sizes_variable.push_back(variable_data_size_on_cell);
1158  }
1159  }
1160 
1161  //
1162  // ------------------------ Build buffers ---------------------------
1163  //
1164  const unsigned int expected_size_fixed =
1165  cell_relations.size() * sizes_fixed_cumulative.back();
1166  const unsigned int expected_size_variable =
1167  std::accumulate(src_sizes_variable.begin(),
1168  src_sizes_variable.end(),
1170 
1171  // Move every piece of packed fixed size data into the consecutive
1172  // buffer.
1173  src_data_fixed.reserve(expected_size_fixed);
1174  for (const auto &data_cell_fixed : packed_fixed_size_data)
1175  {
1176  // Move every fraction of packed data into the buffer
1177  // reserved for this particular cell.
1178  for (const auto &data_fixed : data_cell_fixed)
1179  std::move(data_fixed.begin(),
1180  data_fixed.end(),
1181  std::back_inserter(src_data_fixed));
1182 
1183  // If we only packed the CellStatus information
1184  // (i.e. encountered a cell flagged CELL_INVALID),
1185  // fill the remaining space with invalid entries.
1186  // We can skip this if there is nothing else to pack.
1187  if ((data_cell_fixed.size() == 1) &&
1188  (sizes_fixed_cumulative.size() > 1))
1189  {
1190  const std::size_t bytes_skipped =
1191  sizes_fixed_cumulative.back() - sizes_fixed_cumulative.front();
1192 
1193  src_data_fixed.insert(src_data_fixed.end(),
1194  bytes_skipped,
1195  static_cast<char>(-1)); // invalid_char
1196  }
1197  }
1198 
1199  // Move every piece of packed variable size data into the consecutive
1200  // buffer.
1201  if (variable_size_data_stored)
1202  {
1203  src_data_variable.reserve(expected_size_variable);
1204  for (const auto &data_cell : packed_variable_size_data)
1205  {
1206  // Move every fraction of packed data into the buffer
1207  // reserved for this particular cell.
1208  for (const auto &data : data_cell)
1209  std::move(data.begin(),
1210  data.end(),
1211  std::back_inserter(src_data_variable));
1212  }
1213  }
1214 
1215  // Double check that we packed everything correctly.
1216  Assert(src_data_fixed.size() == expected_size_fixed, ExcInternalError());
1217  Assert(src_data_variable.size() == expected_size_variable,
1218  ExcInternalError());
1219  }
1220 
1221 
1222 
1223  template <int dim, int spacedim>
1224  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
1226  unpack_cell_status(std::vector<cell_relation_t> &cell_relations) const
1227  {
1228  Assert(sizes_fixed_cumulative.size() > 0,
1229  ExcMessage("No data has been packed!"));
1230  if (cell_relations.size() > 0)
1231  {
1232  Assert(dest_data_fixed.size() > 0,
1233  ExcMessage("No data has been received!"));
1234  }
1235 
1236  // Size of CellStatus object that will be unpacked on each cell.
1237  const unsigned int size = sizes_fixed_cumulative.front();
1238 
1239  // Iterate over all cells and overwrite the CellStatus
1240  // information from the transferred data.
1241  // Proceed buffer iterator position to next cell after
1242  // each iteration.
1243  auto cell_rel_it = cell_relations.begin();
1244  auto dest_fixed_it = dest_data_fixed.cbegin();
1245  for (; cell_rel_it != cell_relations.end();
1246  ++cell_rel_it, dest_fixed_it += sizes_fixed_cumulative.back())
1247  {
1248  cell_rel_it->second = // cell_status
1250  dim,
1251  spacedim>::CellStatus>(dest_fixed_it,
1252  dest_fixed_it + size,
1253  /*allow_compression=*/false);
1254  }
1255  }
1256 
1257 
1258 
1259  template <int dim, int spacedim>
1260  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
1261  void DistributedTriangulationBase<dim, spacedim>::DataTransfer::unpack_data(
1262  const std::vector<cell_relation_t> &cell_relations,
1263  const unsigned int handle,
1264  const std::function<
1265  void(const typename ::Triangulation<dim, spacedim>::cell_iterator &,
1266  const typename ::Triangulation<dim, spacedim>::CellStatus &,
1267  const boost::iterator_range<std::vector<char>::const_iterator> &)>
1268  &unpack_callback) const
1269  {
1270  // We decode the handle returned by register_data_attach() back into
1271  // a format we can use. All even handles belong to those callback
1272  // functions which write/read variable size data, all odd handles
1273  // interact with fixed size buffers.
1274  const bool callback_variable_transfer = (handle % 2 == 0);
1275  const unsigned int callback_index = handle / 2;
1276 
1277  // Cells will always receive fixed size data (i.e., CellStatus
1278  // information), but not necessarily variable size data (e.g., with a
1279  // ParticleHandler a cell might not contain any particle at all).
1280  // Thus it is sufficient to check if fixed size data has been received.
1281  Assert(sizes_fixed_cumulative.size() > 0,
1282  ExcMessage("No data has been packed!"));
1283  if (cell_relations.size() > 0)
1284  {
1285  Assert(dest_data_fixed.size() > 0,
1286  ExcMessage("No data has been received!"));
1287  }
1288 
1289  std::vector<char>::const_iterator dest_data_it;
1290  std::vector<char>::const_iterator dest_sizes_cell_it;
1291 
1292  // Depending on whether our callback function unpacks fixed or
1293  // variable size data, we have to pursue different approaches
1294  // to localize the correct fraction of the buffer from which
1295  // we are allowed to read.
1296  unsigned int offset = numbers::invalid_unsigned_int;
1297  unsigned int size = numbers::invalid_unsigned_int;
1298  unsigned int data_increment = numbers::invalid_unsigned_int;
1299 
1300  if (callback_variable_transfer)
1301  {
1302  // For the variable size data, we need to extract the
1303  // data size from the fixed size buffer on each cell.
1304  //
1305  // We packed this information last, so the last packed
1306  // object in the fixed size buffer corresponds to the
1307  // variable data sizes.
1308  //
1309  // The last entry of sizes_fixed_cumulative corresponds
1310  // to the size of all fixed size data packed on the cell.
1311  // To get the offset for the last packed object, we need
1312  // to get the next-to-last entry.
1313  const unsigned int offset_variable_data_sizes =
1314  sizes_fixed_cumulative[sizes_fixed_cumulative.size() - 2];
1315 
1316  // This iterator points to the data size that the
1317  // callback_function packed for each specific cell.
1318  // Adjust buffer iterator to the offset of the callback
1319  // function so that we only have to advance its position
1320  // to the next cell after each iteration.
1321  dest_sizes_cell_it = dest_data_fixed.cbegin() +
1322  offset_variable_data_sizes +
1323  callback_index * sizeof(unsigned int);
1324 
1325  // Let the data iterator point to the correct buffer.
1326  dest_data_it = dest_data_variable.cbegin();
1327  }
1328  else
1329  {
1330  // For the fixed size data, we can get the information about
1331  // the buffer location on each cell directly from the
1332  // sizes_fixed_cumulative vector.
1333  offset = sizes_fixed_cumulative[callback_index];
1334  size = sizes_fixed_cumulative[callback_index + 1] - offset;
1335  data_increment = sizes_fixed_cumulative.back();
1336 
1337  // Let the data iterator point to the correct buffer.
1338  // Adjust buffer iterator to the offset of the callback
1339  // function so that we only have to advance its position
1340  // to the next cell after each iteration.
1341  if (cell_relations.begin() != cell_relations.end())
1342  dest_data_it = dest_data_fixed.cbegin() + offset;
1343  }
1344 
1345  // Iterate over all cells and unpack the transferred data.
1346  auto cell_rel_it = cell_relations.begin();
1347  auto dest_sizes_it = dest_sizes_variable.cbegin();
1348  for (; cell_rel_it != cell_relations.end(); ++cell_rel_it)
1349  {
1350  const auto &dealii_cell = cell_rel_it->first;
1351  const auto &cell_status = cell_rel_it->second;
1352 
1353  if (callback_variable_transfer)
1354  {
1355  // Update the increment according to the whole data size
1356  // of the current cell.
1357  data_increment = *dest_sizes_it;
1358 
1359  if (cell_status !=
1361  spacedim>::CELL_INVALID)
1362  {
1363  // Extract the corresponding values for offset and size from
1364  // the cumulative sizes array stored in the fixed size
1365  // buffer.
1366  if (callback_index == 0)
1367  offset = 0;
1368  else
1369  std::memcpy(&offset,
1370  &(*(dest_sizes_cell_it - sizeof(unsigned int))),
1371  sizeof(unsigned int));
1372 
1373  std::memcpy(&size,
1374  &(*dest_sizes_cell_it),
1375  sizeof(unsigned int));
1376 
1377  size -= offset;
1378 
1379  // Move the data iterator to the corresponding position
1380  // of the callback function and adjust the increment
1381  // accordingly.
1382  dest_data_it += offset;
1383  data_increment -= offset;
1384  }
1385 
1386  // Advance data size iterators to the next cell, avoid iterating
1387  // past the end of dest_sizes_cell_it
1388  if (cell_rel_it != cell_relations.end() - 1)
1389  dest_sizes_cell_it += sizes_fixed_cumulative.back();
1390  ++dest_sizes_it;
1391  }
1392 
1393  switch (cell_status)
1394  {
1396  spacedim>::CELL_PERSIST:
1398  spacedim>::CELL_COARSEN:
1399  unpack_callback(dealii_cell,
1400  cell_status,
1401  boost::make_iterator_range(dest_data_it,
1402  dest_data_it + size));
1403  break;
1404 
1406  spacedim>::CELL_REFINE:
1407  unpack_callback(dealii_cell->parent(),
1408  cell_status,
1409  boost::make_iterator_range(dest_data_it,
1410  dest_data_it + size));
1411  break;
1412 
1414  spacedim>::CELL_INVALID:
1415  // Skip this cell.
1416  break;
1417 
1418  default:
1419  Assert(false, ExcInternalError());
1420  break;
1421  }
1422 
1423  if (cell_rel_it != cell_relations.end() - 1)
1424  dest_data_it += data_increment;
1425  }
1426  }
1427 
1428 
1429 
1430  template <int dim, int spacedim>
1431  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
1432  void DistributedTriangulationBase<dim, spacedim>::DataTransfer::save(
1433  const unsigned int global_first_cell,
1434  const unsigned int global_num_cells,
1435  const std::string &filename) const
1436  {
1437 #ifdef DEAL_II_WITH_MPI
1438  // Large fractions of this function have been copied from
1439  // DataOutInterface::write_vtu_in_parallel.
1440  // TODO: Write general MPIIO interface.
1441 
1442  Assert(sizes_fixed_cumulative.size() > 0,
1443  ExcMessage("No data has been packed!"));
1444 
1445  const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
1446 
1447  const unsigned int bytes_per_cell = sizes_fixed_cumulative.back();
1448 
1449  //
1450  // ---------- Fixed size data ----------
1451  //
1452  {
1453  const std::string fname_fixed = std::string(filename) + "_fixed.data";
1454 
1455  MPI_Info info;
1456  int ierr = MPI_Info_create(&info);
1457  AssertThrowMPI(ierr);
1458 
1459  MPI_File fh;
1460  ierr = MPI_File_open(mpi_communicator,
1461  fname_fixed.c_str(),
1462  MPI_MODE_CREATE | MPI_MODE_WRONLY,
1463  info,
1464  &fh);
1465  AssertThrowMPI(ierr);
1466 
1467  ierr = MPI_File_set_size(fh, 0); // delete the file contents
1468  AssertThrowMPI(ierr);
1469  // this barrier is necessary, because otherwise others might already
1470  // write while one core is still setting the size to zero.
1471  ierr = MPI_Barrier(mpi_communicator);
1472  AssertThrowMPI(ierr);
1473  ierr = MPI_Info_free(&info);
1474  AssertThrowMPI(ierr);
1475  // ------------------
1476 
1477  // Write cumulative sizes to file.
1478  // Since each processor owns the same information about the data
1479  // sizes, it is sufficient to let only the first processor perform
1480  // this task.
1481  if (myrank == 0)
1482  {
1484  fh,
1485  0,
1486  sizes_fixed_cumulative.data(),
1487  sizes_fixed_cumulative.size(),
1488  MPI_UNSIGNED,
1489  MPI_STATUS_IGNORE);
1490  AssertThrowMPI(ierr);
1491  }
1492 
1493  // Write packed data to file simultaneously.
1494  const MPI_Offset size_header =
1495  sizes_fixed_cumulative.size() * sizeof(unsigned int);
1496 
1497  // Make sure we do the following computation in 64bit integers to be
1498  // able to handle 4GB+ files:
1499  const MPI_Offset my_global_file_position =
1500  size_header +
1501  static_cast<MPI_Offset>(global_first_cell) * bytes_per_cell;
1502 
1503  ierr =
1505  my_global_file_position,
1506  src_data_fixed.data(),
1507  src_data_fixed.size(),
1508  MPI_BYTE,
1509  MPI_STATUS_IGNORE);
1510  AssertThrowMPI(ierr);
1511 
1512  ierr = MPI_File_close(&fh);
1513  AssertThrowMPI(ierr);
1514  }
1515 
1516 
1517 
1518  //
1519  // ---------- Variable size data ----------
1520  //
1521  if (variable_size_data_stored)
1522  {
1523  const std::string fname_variable =
1524  std::string(filename) + "_variable.data";
1525 
1526  MPI_Info info;
1527  int ierr = MPI_Info_create(&info);
1528  AssertThrowMPI(ierr);
1529 
1530  MPI_File fh;
1531  ierr = MPI_File_open(mpi_communicator,
1532  fname_variable.c_str(),
1533  MPI_MODE_CREATE | MPI_MODE_WRONLY,
1534  info,
1535  &fh);
1536  AssertThrowMPI(ierr);
1537 
1538  ierr = MPI_File_set_size(fh, 0); // delete the file contents
1539  AssertThrowMPI(ierr);
1540  // this barrier is necessary, because otherwise others might already
1541  // write while one core is still setting the size to zero.
1542  ierr = MPI_Barrier(mpi_communicator);
1543  AssertThrowMPI(ierr);
1544  ierr = MPI_Info_free(&info);
1545  AssertThrowMPI(ierr);
1546 
1547  // Write sizes of each cell into file simultaneously.
1548  {
1549  const MPI_Offset my_global_file_position =
1550  static_cast<MPI_Offset>(global_first_cell) * sizeof(unsigned int);
1551 
1552  // It is very unlikely that a single process has more than
1553  // 2 billion cells, but we might as well check.
1554  AssertThrow(src_sizes_variable.size() <
1555  static_cast<std::size_t>(
1557  ExcNotImplemented());
1558 
1560  fh,
1561  my_global_file_position,
1562  src_sizes_variable.data(),
1563  src_sizes_variable.size(),
1564  MPI_INT,
1565  MPI_STATUS_IGNORE);
1566  AssertThrowMPI(ierr);
1567  }
1568 
1569  // Gather size of data in bytes we want to store from this
1570  // processor and compute the prefix sum. We do this in 64 bit
1571  // to avoid overflow for files larger than 4GB:
1572  const std::uint64_t size_on_proc = src_data_variable.size();
1573  std::uint64_t prefix_sum = 0;
1574  ierr = MPI_Exscan(&size_on_proc,
1575  &prefix_sum,
1576  1,
1577  MPI_UINT64_T,
1578  MPI_SUM,
1579  mpi_communicator);
1580  AssertThrowMPI(ierr);
1581 
1582  const MPI_Offset my_global_file_position =
1583  static_cast<MPI_Offset>(global_num_cells) * sizeof(unsigned int) +
1584  prefix_sum;
1585 
1586  // Write data consecutively into file.
1587  ierr =
1589  my_global_file_position,
1590  src_data_variable.data(),
1591  src_data_variable.size(),
1592  MPI_BYTE,
1593  MPI_STATUS_IGNORE);
1594  AssertThrowMPI(ierr);
1595 
1596 
1597  ierr = MPI_File_close(&fh);
1598  AssertThrowMPI(ierr);
1599  }
1600 #else
1601  (void)global_first_cell;
1602  (void)global_num_cells;
1603  (void)filename;
1604 
1605  AssertThrow(false, ExcNeedsMPI());
1606 #endif
1607  }
1608 
1609 
1610 
1611  template <int dim, int spacedim>
1612  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
1613  void DistributedTriangulationBase<dim, spacedim>::DataTransfer::load(
1614  const unsigned int global_first_cell,
1615  const unsigned int global_num_cells,
1616  const unsigned int local_num_cells,
1617  const std::string &filename,
1618  const unsigned int n_attached_deserialize_fixed,
1619  const unsigned int n_attached_deserialize_variable)
1620  {
1621 #ifdef DEAL_II_WITH_MPI
1622  // Large fractions of this function have been copied from
1623  // DataOutInterface::write_vtu_in_parallel.
1624  // TODO: Write general MPIIO interface.
1625 
1626  Assert(dest_data_fixed.size() == 0,
1627  ExcMessage("Previously loaded data has not been released yet!"));
1628 
1629  variable_size_data_stored = (n_attached_deserialize_variable > 0);
1630 
1631  //
1632  // ---------- Fixed size data ----------
1633  //
1634  {
1635  const std::string fname_fixed = std::string(filename) + "_fixed.data";
1636 
1637  MPI_Info info;
1638  int ierr = MPI_Info_create(&info);
1639  AssertThrowMPI(ierr);
1640 
1641  MPI_File fh;
1642  ierr = MPI_File_open(
1643  mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh);
1644  AssertThrowMPI(ierr);
1645 
1646  ierr = MPI_Info_free(&info);
1647  AssertThrowMPI(ierr);
1648 
1649  // Read cumulative sizes from file.
1650  // Since all processors need the same information about the data
1651  // sizes, let each of them retrieve it by reading from the same
1652  // location in the file.
1653  sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed +
1654  (variable_size_data_stored ? 1 : 0));
1656  fh,
1657  0,
1658  sizes_fixed_cumulative.data(),
1659  sizes_fixed_cumulative.size(),
1660  MPI_UNSIGNED,
1661  MPI_STATUS_IGNORE);
1662  AssertThrowMPI(ierr);
1663 
1664  // Allocate sufficient memory.
1665  const unsigned int bytes_per_cell = sizes_fixed_cumulative.back();
1666  dest_data_fixed.resize(static_cast<size_t>(local_num_cells) *
1667  bytes_per_cell);
1668 
1669  // Read packed data from file simultaneously.
1670  const MPI_Offset size_header =
1671  sizes_fixed_cumulative.size() * sizeof(unsigned int);
1672 
1673  // Make sure we do the following computation in 64bit integers to be
1674  // able to handle 4GB+ files:
1675  const MPI_Offset my_global_file_position =
1676  size_header +
1677  static_cast<MPI_Offset>(global_first_cell) * bytes_per_cell;
1678 
1680  my_global_file_position,
1681  dest_data_fixed.data(),
1682  dest_data_fixed.size(),
1683  MPI_BYTE,
1684  MPI_STATUS_IGNORE);
1685  AssertThrowMPI(ierr);
1686 
1687 
1688  ierr = MPI_File_close(&fh);
1689  AssertThrowMPI(ierr);
1690  }
1691 
1692  //
1693  // ---------- Variable size data ----------
1694  //
1695  if (variable_size_data_stored)
1696  {
1697  const std::string fname_variable =
1698  std::string(filename) + "_variable.data";
1699 
1700  MPI_Info info;
1701  int ierr = MPI_Info_create(&info);
1702  AssertThrowMPI(ierr);
1703 
1704  MPI_File fh;
1705  ierr = MPI_File_open(
1706  mpi_communicator, fname_variable.c_str(), MPI_MODE_RDONLY, info, &fh);
1707  AssertThrowMPI(ierr);
1708 
1709  ierr = MPI_Info_free(&info);
1710  AssertThrowMPI(ierr);
1711 
1712  // Read sizes of all locally owned cells.
1713  dest_sizes_variable.resize(local_num_cells);
1714 
1715  const MPI_Offset my_global_file_position_sizes =
1716  static_cast<MPI_Offset>(global_first_cell) * sizeof(unsigned int);
1717 
1719  fh,
1720  my_global_file_position_sizes,
1721  dest_sizes_variable.data(),
1722  dest_sizes_variable.size(),
1723  MPI_INT,
1724  MPI_STATUS_IGNORE);
1725  AssertThrowMPI(ierr);
1726 
1727 
1728  // Compute my data size in bytes and compute prefix sum. We do this
1729  // in 64 bit to avoid overflow for files larger than 4 GB:
1730  const std::uint64_t size_on_proc =
1731  std::accumulate(dest_sizes_variable.begin(),
1732  dest_sizes_variable.end(),
1733  0ULL);
1734 
1735  std::uint64_t prefix_sum = 0;
1736  ierr = MPI_Exscan(&size_on_proc,
1737  &prefix_sum,
1738  1,
1739  MPI_UINT64_T,
1740  MPI_SUM,
1741  mpi_communicator);
1742  AssertThrowMPI(ierr);
1743 
1744  const MPI_Offset my_global_file_position =
1745  static_cast<MPI_Offset>(global_num_cells) * sizeof(unsigned int) +
1746  prefix_sum;
1747 
1748  dest_data_variable.resize(size_on_proc);
1749 
1750  ierr =
1752  my_global_file_position,
1753  dest_data_variable.data(),
1754  dest_data_variable.size(),
1755  MPI_BYTE,
1756  MPI_STATUS_IGNORE);
1757  AssertThrowMPI(ierr);
1758 
1759  ierr = MPI_File_close(&fh);
1760  AssertThrowMPI(ierr);
1761  }
1762 #else
1763  (void)global_first_cell;
1764  (void)global_num_cells;
1765  (void)local_num_cells;
1766  (void)filename;
1767  (void)n_attached_deserialize_fixed;
1768  (void)n_attached_deserialize_variable;
1769 
1770  AssertThrow(false, ExcNeedsMPI());
1771 #endif
1772  }
1773 
1774 
1775 
1776  template <int dim, int spacedim>
1777  DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
1778  void DistributedTriangulationBase<dim, spacedim>::DataTransfer::clear()
1779  {
1780  variable_size_data_stored = false;
1781 
1782  // free information about data sizes
1783  sizes_fixed_cumulative.clear();
1784  sizes_fixed_cumulative.shrink_to_fit();
1785 
1786  // free fixed size transfer data
1787  src_data_fixed.clear();
1788  src_data_fixed.shrink_to_fit();
1789 
1790  dest_data_fixed.clear();
1791  dest_data_fixed.shrink_to_fit();
1792 
1793  // free variable size transfer data
1794  src_sizes_variable.clear();
1795  src_sizes_variable.shrink_to_fit();
1796 
1797  src_data_variable.clear();
1798  src_data_variable.shrink_to_fit();
1799 
1800  dest_sizes_variable.clear();
1801  dest_sizes_variable.shrink_to_fit();
1802 
1803  dest_data_variable.clear();
1804  dest_data_variable.shrink_to_fit();
1805  }
1806 
1807 } // end namespace parallel
1808 
1809 
1810 
1811 /*-------------- Explicit Instantiations -------------------------------*/
1812 #include "tria_base.inst"
1813 
bool is_locally_owned() const
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1089
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1697
virtual void clear()
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
void save(Archive &ar, const unsigned int version) const
virtual std::size_t memory_consumption() const
virtual void update_reference_cells()
virtual void clear() override
Definition: tria_base.cc:690
virtual void load(const std::string &filename)=0
typename ::Triangulation< dim, spacedim >::CellStatus CellStatus
Definition: tria_base.h:462
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
Definition: tria_base.h:459
typename std::pair< cell_iterator, CellStatus > cell_relation_t
Definition: tria_base.h:727
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:365
virtual types::global_cell_index n_global_active_cells() const override
Definition: tria_base.cc:151
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:355
virtual unsigned int n_global_levels() const override
Definition: tria_base.cc:141
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:131
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_CXX20_REQUIRES(condition)
Definition: config.h:162
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
Point< 3 > vertices[4]
Point< 2 > second
Definition: grid_out.cc:4616
Point< 2 > first
Definition: grid_out.cc:4615
unsigned int level
Definition: grid_out.cc:4618
unsigned int cell_index
Definition: grid_tools.cc:1196
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1614
static ::ExceptionBase & ExcNotImplemented()
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1787
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1914
#define AssertIndexRange(index, range)
Definition: exceptions.h:1855
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1703
IteratorRange< BaseIterator > make_iterator_range(const BaseIterator &begin, const std_cxx20::type_identity_t< BaseIterator > &end)
std::vector< bool > get_locally_owned_vertices(const Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4415
void exchange_cell_data_to_level_ghosts(const MeshType &mesh, const std::function< std_cxx17::optional< DataType >(const typename MeshType::level_cell_iterator &)> &pack, const std::function< void(const typename MeshType::level_cell_iterator &, const DataType &)> &unpack, const std::function< bool(const typename MeshType::level_cell_iterator &)> &cell_filter=always_return< typename MeshType::level_cell_iterator, bool >{ true})
types::global_dof_index size_type
Definition: cuda_kernels.h:45
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
@ triangulation_base_fill_level_ghost_owners
TriangulationBase<dim, spacedim>::fill_level_ghost_owners()
Definition: mpi_tags.h:102
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:161
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1732
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1351
T unpack(const std::vector< char > &buffer, const bool allow_compression=true)
Definition: utilities.h:1510
unsigned int n_active_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:13833
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
Definition: tria.cc:13826
constexpr ReferenceCell make_reference_cell_from_int(const std::uint8_t kind)
const types::subdomain_id artificial_subdomain_id
Definition: types.h:315
static const unsigned int invalid_unsigned_int
Definition: types.h:213
const types::global_dof_index invalid_dof_index
Definition: types.h:233
Definition: types.h:33
unsigned int manifold_id
Definition: types.h:153
global_cell_index coarse_cell_id
Definition: types.h:127
unsigned int subdomain_id
Definition: types.h:44
unsigned int global_cell_index
Definition: types.h:118
unsigned int boundary_id
Definition: types.h:141
const ::Triangulation< dim, spacedim > & tria