Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
fully_distributed_tria.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2019 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18#include <deal.II/base/mpi.h>
20
23
25
26#include <fstream>
27#include <memory>
28
30
31// Forward declarations
32namespace GridGenerator
33{
34 template <int dim, int spacedim>
35 void
37 const double left,
38 const double right,
39 const bool colorize);
40} // namespace GridGenerator
41
42namespace parallel
43{
44 namespace fullydistributed
45 {
46 template <int dim, int spacedim>
48 Triangulation<dim, spacedim>::Triangulation(const MPI_Comm mpi_communicator)
49 : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
50 , settings(TriangulationDescription::Settings::default_setting)
51 , partitioner([](::Triangulation<dim, spacedim> &tria,
52 const unsigned int n_partitions) {
54 })
55 , currently_processing_create_triangulation_for_internal_usage(false)
56 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
57 false)
58 {}
59
60
61
62 template <int dim, int spacedim>
64 void Triangulation<dim, spacedim>::create_triangulation(
65 const TriangulationDescription::Description<dim, spacedim>
66 &construction_data)
67 {
68 // check if the communicator of this parallel triangulation has been used
69 // to construct the TriangulationDescription::Description
70 Assert(construction_data.comm == this->mpi_communicator,
71 ExcMessage("MPI communicators do not match!"));
72
73 // store internally the settings
74 settings = construction_data.settings;
75
76 // set the smoothing properties
77 if (settings &
79 this->set_mesh_smoothing(
80 static_cast<
81 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
84 else
85 this->set_mesh_smoothing(
86 static_cast<
87 typename ::Triangulation<dim, spacedim>::MeshSmoothing>(
89
90 this->set_mesh_smoothing(construction_data.smoothing);
91
92 // clear internal data structures
93 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
94 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
95
96 // check if no locally relevant coarse-grid cells have been provided
97 if (construction_data.coarse_cell_vertices.empty())
98 {
99 // 1) create a dummy hypercube
100 currently_processing_create_triangulation_for_internal_usage = true;
101 GridGenerator::hyper_cube(*this, 0, 1, false);
102 currently_processing_create_triangulation_for_internal_usage = false;
103
104 // 2) mark cell as artificial
105 auto cell = this->begin();
106 cell->set_subdomain_id(::numbers::artificial_subdomain_id);
107 cell->set_level_subdomain_id(
109
110 // 3) set up dummy mapping between locally relevant coarse-grid cells
111 // and global cells
112 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(
114 this->coarse_cell_index_to_coarse_cell_id_vector.emplace_back(
116 }
117 else
118 {
119 // 1) store `coarse-cell index to coarse-cell id`-mapping
120 this->coarse_cell_index_to_coarse_cell_id_vector =
121 construction_data.coarse_cell_index_to_coarse_cell_id;
122
123 // 2) set up `coarse-cell id to coarse-cell index`-mapping
124 std::map<types::coarse_cell_id, unsigned int>
125 coarse_cell_id_to_coarse_cell_index_vector;
126 for (unsigned int i = 0;
127 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
128 ++i)
129 coarse_cell_id_to_coarse_cell_index_vector
130 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
131
132 for (auto i : coarse_cell_id_to_coarse_cell_index_vector)
133 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
134
135 // create locally-relevant
136 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
137 true;
138 currently_processing_create_triangulation_for_internal_usage = true;
140 construction_data);
141 currently_processing_prepare_coarsening_and_refinement_for_internal_usage =
142 false;
143 currently_processing_create_triangulation_for_internal_usage = false;
144
145 // create a copy of cell_infos such that we can sort them
146 auto cell_infos = construction_data.cell_infos;
147
148 // sort cell_infos on each level separately (as done in
149 // ::Triangulation::create_triangulation())
150 for (auto &cell_info : cell_infos)
151 std::sort(cell_info.begin(),
152 cell_info.end(),
155 const CellId a_id(a.id);
156 const CellId b_id(b.id);
157
158 const auto a_coarse_cell_index =
159 this->coarse_cell_id_to_coarse_cell_index(
160 a_id.get_coarse_cell_id());
161 const auto b_coarse_cell_index =
162 this->coarse_cell_id_to_coarse_cell_index(
163 b_id.get_coarse_cell_id());
164
165 // according to their coarse-cell index and if that is
166 // same according to their cell id (the result is that
167 // cells on each level are sorted according to their
168 // index on that level - what we need in the following
169 // operations)
170 if (a_coarse_cell_index != b_coarse_cell_index)
171 return a_coarse_cell_index < b_coarse_cell_index;
172 else
173 return a_id < b_id;
174 });
175
176 // 4a) set all cells artificial (and set the actual
177 // (level_)subdomain_ids in the next step)
178 for (const auto &cell : this->cell_iterators())
179 {
180 if (cell->is_active())
181 cell->set_subdomain_id(
183
184 cell->set_level_subdomain_id(
186 }
187
188 // 4b) set actual (level_)subdomain_ids
189 for (unsigned int level = 0;
190 level < cell_infos.size() && !cell_infos[level].empty();
191 ++level)
192 {
193 auto cell = this->begin(level);
194 auto cell_info = cell_infos[level].begin();
195 for (; cell_info != cell_infos[level].end(); ++cell_info)
196 {
197 // find cell that has the correct cell
198 while (cell_info->id != cell->id().template to_binary<dim>())
199 ++cell;
200
201 // subdomain id
202 if (cell->is_active())
203 cell->set_subdomain_id(cell_info->subdomain_id);
204
205 // level subdomain id
207 construct_multigrid_hierarchy)
208 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
209 }
210 }
211 }
212
213 this->update_number_cache();
214 this->update_cell_relations();
215 }
216
217
218
219 template <int dim, int spacedim>
221 void Triangulation<dim, spacedim>::create_triangulation(
222 const std::vector<Point<spacedim>> & vertices,
223 const std::vector<::CellData<dim>> &cells,
224 const SubCellData & subcelldata)
225 {
226 Assert(
227 currently_processing_create_triangulation_for_internal_usage,
229 "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
230 "that takes 3 arguments. If you have not called this function directly, \n"
231 "it might have been called via a function from the GridGenerator or GridIn \n"
232 "namespace. To be able to set up a fully-distributed Triangulation with these \n"
233 "utility functions nevertheless, please follow the following three steps:\n"
234 " 1) call the utility function for a (serial) Triangulation, \n"
235 " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
236 " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
237 " or ::create_description_from_triangulation_in_groups() to create the \n"
238 " description of the local partition, and\n"
239 " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
240
242 cells,
243 subcelldata);
244 }
245
246
247
248 template <int dim, int spacedim>
250 void Triangulation<dim, spacedim>::copy_triangulation(
251 const ::Triangulation<dim, spacedim> &other_tria)
252 {
253 // pointer to the triangulation for which the construction data
254 // should be created (normally it is the input triangulation but
255 // in the case of a serial triangulation we create a copy which should
256 // be used)
257 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
258
259 // temporary serial triangulation (since the input triangulation is const
260 // and we might modify its subdomain_ids and level_subdomain_ids during
261 // partitioning)
263
264 // check if other triangulation is not a parallel one, which needs to be
265 // partitioned
266 if (dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
267 *>(&other_tria) == nullptr)
268 {
269 // actually copy the serial triangulation
270 serial_tria.copy_triangulation(other_tria);
271
272 // partition triangulation
273 this->partitioner(serial_tria,
275 this->mpi_communicator));
276
277 // partition multigrid levels
278 if (this->is_multilevel_hierarchy_constructed())
280
281 // use the new serial triangulation to create the construction data
282 other_tria_ptr = &serial_tria;
283 }
284
285 // create construction data
286 const auto construction_data = TriangulationDescription::Utilities::
288 this->mpi_communicator,
289 this->settings);
290
291 // finally create triangulation
292 this->create_triangulation(construction_data);
293 }
294
295
296
297 template <int dim, int spacedim>
299 void Triangulation<dim, spacedim>::set_partitioner(
300 const std::function<void(::Triangulation<dim, spacedim> &,
301 const unsigned int)> &partitioner,
302 const TriangulationDescription::Settings & settings)
303 {
304 this->partitioner = partitioner;
305 this->settings = settings;
306 }
307
308
309
310 template <int dim, int spacedim>
312 void Triangulation<dim, spacedim>::set_partitioner(
313 const RepartitioningPolicyTools::Base<dim, spacedim> &partitioner,
314 const TriangulationDescription::Settings & settings)
315 {
316 this->partitioner_distributed = &partitioner;
317 this->settings = settings;
318 }
319
320
321
322 template <int dim, int spacedim>
324 void Triangulation<dim, spacedim>::repartition()
325 {
326 // signal that repartitioning has started
327 this->signals.pre_distributed_repartition();
328
329 // create construction_data with the help of the partitioner
330 const auto construction_data = TriangulationDescription::Utilities::
332 *this,
333 this->partitioner_distributed->partition(*this),
334 this->settings);
335
336 // clear old content
337 this->clear();
338 this->coarse_cell_id_to_coarse_cell_index_vector.clear();
339 this->coarse_cell_index_to_coarse_cell_id_vector.clear();
340
341 // use construction_data to set up new triangulation
342 this->create_triangulation(construction_data);
343
344 // signal that repartitioning has completed
345 this->signals.post_distributed_repartition();
346 }
347
348
349
350 template <int dim, int spacedim>
352 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
353 {
354 Assert(false, ExcNotImplemented());
355 }
356
357
358
359 template <int dim, int spacedim>
361 bool Triangulation<dim, spacedim>::prepare_coarsening_and_refinement()
362 {
363 Assert(
364 currently_processing_prepare_coarsening_and_refinement_for_internal_usage,
365 ExcMessage("No coarsening and refinement is supported!"));
366
367 return ::Triangulation<dim, spacedim>::
368 prepare_coarsening_and_refinement();
369 }
370
371
372
373 template <int dim, int spacedim>
375 std::size_t Triangulation<dim, spacedim>::memory_consumption() const
376 {
377 const std::size_t mem =
381 coarse_cell_id_to_coarse_cell_index_vector) +
383 coarse_cell_index_to_coarse_cell_id_vector);
384 return mem;
385 }
386
387
388
389 template <int dim, int spacedim>
391 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
392 const
393 {
394 return (
395 settings &
397 }
398
399
400
401 template <int dim, int spacedim>
403 unsigned int Triangulation<dim, spacedim>::
404 coarse_cell_id_to_coarse_cell_index(
405 const types::coarse_cell_id coarse_cell_id) const
406 {
407 const auto coarse_cell_index = std::lower_bound(
408 coarse_cell_id_to_coarse_cell_index_vector.begin(),
409 coarse_cell_id_to_coarse_cell_index_vector.end(),
410 coarse_cell_id,
411 [](const std::pair<types::coarse_cell_id, unsigned int> &pair,
412 const types::coarse_cell_id &val) { return pair.first < val; });
413 Assert(coarse_cell_index !=
414 coarse_cell_id_to_coarse_cell_index_vector.cend(),
415 ExcMessage("Coarse cell index not found!"));
416 return coarse_cell_index->second;
417 }
418
419
420
421 template <int dim, int spacedim>
425 const unsigned int coarse_cell_index) const
426 {
427 AssertIndexRange(coarse_cell_index,
428 coarse_cell_index_to_coarse_cell_id_vector.size());
429
430 const auto coarse_cell_id =
431 coarse_cell_index_to_coarse_cell_id_vector[coarse_cell_index];
433 ExcMessage("You are trying to access a dummy cell!"));
434 return coarse_cell_id;
435 }
436
437
438 template <int dim, int spacedim>
440 void Triangulation<dim, spacedim>::update_cell_relations()
441 {
442 // Reorganize memory for local_cell_relations.
443 this->local_cell_relations.clear();
444 this->local_cell_relations.reserve(this->n_locally_owned_active_cells());
445
446 for (const auto &cell : this->active_cell_iterators())
447 if (cell->is_locally_owned())
448 this->local_cell_relations.emplace_back(
450 }
451
452
453
454 template <int dim, int spacedim>
456 void Triangulation<dim, spacedim>::save(const std::string &filename) const
457 {
458#ifdef DEAL_II_WITH_MPI
459 AssertThrow(this->cell_attached_data.pack_callbacks_variable.size() == 0,
461
462 Assert(
463 this->cell_attached_data.n_attached_deserialize == 0,
465 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
466 Assert(this->n_cells() > 0,
467 ExcMessage("Can not save() an empty Triangulation."));
468
469 const int myrank =
470 Utilities::MPI::this_mpi_process(this->mpi_communicator);
471 const int mpisize =
472 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
473
474 // Compute global offset for each rank.
475 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
476
477 unsigned int global_first_cell = 0;
478
479 int ierr = MPI_Exscan(&n_locally_owned_cells,
480 &global_first_cell,
481 1,
482 MPI_UNSIGNED,
483 MPI_SUM,
484 this->mpi_communicator);
485 AssertThrowMPI(ierr);
486
487 global_first_cell *= sizeof(unsigned int);
488
489
490 if (myrank == 0)
491 {
492 std::string fname = std::string(filename) + ".info";
493 std::ofstream f(fname.c_str());
494 f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
495 << std::endl
496 << 4 << " "
497 << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
498 << this->cell_attached_data.pack_callbacks_fixed.size() << " "
499 << this->cell_attached_data.pack_callbacks_variable.size() << " "
500 << this->n_global_active_cells() << std::endl;
501 }
502
503 // Save cell attached data.
504 this->save_attached_data(global_first_cell,
505 this->n_global_active_cells(),
506 filename);
507
508 // Save triangulation description.
509 {
510 MPI_Info info;
511 int ierr = MPI_Info_create(&info);
512 AssertThrowMPI(ierr);
513
514 const std::string fname_tria = filename + "_triangulation.data";
515
516 // Open file.
517 MPI_File fh;
518 ierr = MPI_File_open(this->mpi_communicator,
519 fname_tria.c_str(),
520 MPI_MODE_CREATE | MPI_MODE_WRONLY,
521 info,
522 &fh);
523 AssertThrowMPI(ierr);
524
525 ierr = MPI_File_set_size(fh, 0); // delete the file contents
526 AssertThrowMPI(ierr);
527 // this barrier is necessary, because otherwise others might already
528 // write while one core is still setting the size to zero.
529 ierr = MPI_Barrier(this->mpi_communicator);
530 AssertThrowMPI(ierr);
531 ierr = MPI_Info_free(&info);
532 AssertThrowMPI(ierr);
533 // ------------------
534
535 // Create construction data.
536 const auto construction_data = TriangulationDescription::Utilities::
538 this->mpi_communicator,
539 this->settings);
540
541 // Pack.
542 std::vector<char> buffer;
543 ::Utilities::pack(construction_data, buffer, false);
544
545 // Write offsets to file.
546 const std::uint64_t buffer_size = buffer.size();
547
548 std::uint64_t offset = 0;
549
550 ierr = MPI_Exscan(
551 &buffer_size,
552 &offset,
553 1,
554 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
555 MPI_SUM,
556 this->mpi_communicator);
557 AssertThrowMPI(ierr);
558
559 // Write offsets to file.
560 ierr = MPI_File_write_at(
561 fh,
562 myrank * sizeof(std::uint64_t),
563 &buffer_size,
564 1,
565 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
566 MPI_STATUS_IGNORE);
567 AssertThrowMPI(ierr);
568
569 // global position in file
570 const std::uint64_t global_position =
571 mpisize * sizeof(std::uint64_t) + offset;
572
573 // Write buffers to file.
575 fh,
576 global_position,
577 buffer.data(),
578 buffer.size(), // local buffer
579 MPI_CHAR,
580 MPI_STATUS_IGNORE);
581 AssertThrowMPI(ierr);
582
583 ierr = MPI_File_close(&fh);
584 AssertThrowMPI(ierr);
585 }
586#else
587 (void)filename;
588
589 AssertThrow(false, ExcNeedsMPI());
590#endif
591 }
592
593
594
595 template <int dim, int spacedim>
597 void Triangulation<dim, spacedim>::load(const std::string &filename)
598 {
599#ifdef DEAL_II_WITH_MPI
600 Assert(this->n_cells() == 0,
601 ExcMessage("load() only works if the Triangulation is empty!"));
602
603 // Compute global offset for each rank.
604 unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
605
606 unsigned int global_first_cell = 0;
607
608 int ierr = MPI_Exscan(&n_locally_owned_cells,
609 &global_first_cell,
610 1,
611 MPI_UNSIGNED,
612 MPI_SUM,
613 this->mpi_communicator);
614 AssertThrowMPI(ierr);
615
616 global_first_cell *= sizeof(unsigned int);
617
618
619 unsigned int version, numcpus, attached_count_fixed,
620 attached_count_variable, n_global_active_cells;
621 {
622 std::string fname = std::string(filename) + ".info";
623 std::ifstream f(fname.c_str());
624 AssertThrow(f.fail() == false, ExcIO());
625 std::string firstline;
626 getline(f, firstline); // skip first line
627 f >> version >> numcpus >> attached_count_fixed >>
628 attached_count_variable >> n_global_active_cells;
629 }
630
631 AssertThrow(version == 4,
632 ExcMessage("Incompatible version found in .info file."));
633 Assert(this->n_global_active_cells() == n_global_active_cells,
634 ExcMessage("Number of global active cells differ!"));
635
636 // Load description and construct the triangulation.
637 {
638 const int myrank =
639 Utilities::MPI::this_mpi_process(this->mpi_communicator);
640 const int mpisize =
641 Utilities::MPI::n_mpi_processes(this->mpi_communicator);
642
643 AssertDimension(numcpus, mpisize);
644
645 // Open file.
646 MPI_Info info;
647 int ierr = MPI_Info_create(&info);
648 AssertThrowMPI(ierr);
649
650 const std::string fname_tria = filename + "_triangulation.data";
651
652 MPI_File fh;
653 ierr = MPI_File_open(this->mpi_communicator,
654 fname_tria.c_str(),
655 MPI_MODE_RDONLY,
656 info,
657 &fh);
658 AssertThrowMPI(ierr);
659
660 ierr = MPI_Info_free(&info);
661 AssertThrowMPI(ierr);
662
663 // Read offsets from file.
664 std::uint64_t buffer_size;
665
666 ierr = MPI_File_read_at(
667 fh,
668 myrank * sizeof(std::uint64_t),
669 &buffer_size,
670 1,
671 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
672 MPI_STATUS_IGNORE);
673 AssertThrowMPI(ierr);
674
675 std::uint64_t offset = 0;
676
677 ierr = MPI_Exscan(
678 &buffer_size,
679 &offset,
680 1,
681 Utilities::MPI::mpi_type_id_for_type<decltype(buffer_size)>,
682 MPI_SUM,
683 this->mpi_communicator);
684 AssertThrowMPI(ierr);
685
686 // global position in file
687 const std::uint64_t global_position =
688 mpisize * sizeof(std::uint64_t) + offset;
689
690 // Read buffers from file.
691 std::vector<char> buffer(buffer_size);
693 fh,
694 global_position,
695 buffer.data(),
696 buffer.size(), // local buffer
697 MPI_CHAR,
698 MPI_STATUS_IGNORE);
699 AssertThrowMPI(ierr);
700
701 ierr = MPI_File_close(&fh);
702 AssertThrowMPI(ierr);
703
704 auto construction_data = ::Utilities::template unpack<
706
707 // WARNING: serialization cannot handle the MPI communicator
708 // which is the reason why we have to set it here explicitly
709 construction_data.comm = this->mpi_communicator;
710
711 this->create_triangulation(construction_data);
712 }
713
714 // clear all of the callback data, as explained in the documentation of
715 // register_data_attach()
716 this->cell_attached_data.n_attached_data_sets = 0;
717 this->cell_attached_data.n_attached_deserialize =
718 attached_count_fixed + attached_count_variable;
719
720 // Load attached cell data, if any was stored.
721 this->load_attached_data(global_first_cell,
722 this->n_global_active_cells(),
723 this->n_locally_owned_active_cells(),
724 filename,
725 attached_count_fixed,
726 attached_count_variable);
727
728 this->update_cell_relations();
729 this->update_periodic_face_map();
730 this->update_number_cache();
731#else
732 (void)filename;
733
734 AssertThrow(false, ExcNeedsMPI());
735#endif
736 }
737
738
739
740 template <int dim, int spacedim>
742 void Triangulation<dim, spacedim>::load(const std::string &filename,
743 const bool autopartition)
744 {
745 (void)autopartition;
746 load(filename);
747 }
748
749
750
751 template <int dim, int spacedim>
753 void Triangulation<dim, spacedim>::update_number_cache()
754 {
756
757 // additionally update the number of global coarse cells
758 types::coarse_cell_id number_of_global_coarse_cells = 0;
759
760 for (const auto &cell : this->active_cell_iterators())
761 if (!cell->is_artificial())
762 number_of_global_coarse_cells =
763 std::max(number_of_global_coarse_cells,
764 cell->id().get_coarse_cell_id());
765
766 number_of_global_coarse_cells =
767 Utilities::MPI::max(number_of_global_coarse_cells,
768 this->mpi_communicator) +
769 1;
770
771 this->number_cache.number_of_global_coarse_cells =
772 number_of_global_coarse_cells;
773 }
774
775
776 } // namespace fullydistributed
777} // namespace parallel
778
779
780
781/*-------------- Explicit Instantiations -------------------------------*/
782#include "fully_distributed_tria.inst"
783
784
types::coarse_cell_id get_coarse_cell_id() const
Definition cell_id.h:394
Definition point.h:112
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const override
Definition tria_base.cc:93
virtual void update_number_cache()
Definition tria_base.cc:170
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:160
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
Point< 3 > vertices[4]
bool colorize
Definition grid_out.cc:4617
unsigned int level
Definition grid_out.cc:4618
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1732
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1351
const types::coarse_cell_id invalid_coarse_cell_id
Definition types.h:242
const types::subdomain_id artificial_subdomain_id
Definition types.h:315
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:33
const TriangulationDescription::Settings settings
std::vector< std::vector< CellData< dim > > > cell_infos
const ::Triangulation< dim, spacedim > & tria