Reference documentation for deal.II version GIT relicensing-218-g1f873f3929 2024-03-28 15:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mg_level_global_transfer.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2016 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18
20
21#include <deal.II/fe/fe.h>
22
23#include <deal.II/grid/tria.h>
25
32#include <deal.II/lac/vector.h>
33
36#include <deal.II/multigrid/mg_transfer.templates.h>
38
39#include <algorithm>
40
42
43
44/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
45
46
47template <typename VectorType>
48template <int dim, int spacedim>
49void
51 const DoFHandler<dim, spacedim> &mg_dof)
52{
54 mg_constrained_dofs,
55 copy_indices,
56 copy_indices_global_mine,
57 copy_indices_level_mine);
58
59 // check if we can run a plain copy operation between the global DoFs and
60 // the finest level.
61 bool my_perform_plain_copy =
62 (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements()) &&
63 (mg_dof.locally_owned_dofs().n_elements() ==
64 mg_dof
66 .n_elements());
67 if (my_perform_plain_copy)
68 {
69 AssertDimension(copy_indices_global_mine.back().size(), 0);
70 AssertDimension(copy_indices_level_mine.back().size(), 0);
71
72 // check whether there is a renumbering of degrees of freedom on
73 // either the finest level or the global dofs, which means that we
74 // cannot apply a plain copy
75 for (const auto &copy_index : copy_indices.back())
76 if (copy_index.first != copy_index.second)
77 {
78 my_perform_plain_copy = false;
79 break;
80 }
81 }
82
83 // now do a global reduction over all processors to see what operation
84 // they can agree upon
87 &mg_dof.get_triangulation()))
88 perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
89 ptria->get_communicator()) == 1);
90 else
91 perform_plain_copy = my_perform_plain_copy;
92}
93
94
95
96template <typename VectorType>
97void
99{
100 sizes.resize(0);
101 copy_indices.clear();
102 copy_indices_global_mine.clear();
103 copy_indices_level_mine.clear();
104 component_to_block_map.resize(0);
105 mg_constrained_dofs = nullptr;
106 perform_plain_copy = false;
107}
108
109
110
111template <typename VectorType>
112void
114{
115 for (unsigned int level = 0; level < copy_indices.size(); ++level)
116 {
117 for (unsigned int i = 0; i < copy_indices[level].size(); ++i)
118 os << "copy_indices[" << level << "]\t" << copy_indices[level][i].first
119 << '\t' << copy_indices[level][i].second << std::endl;
120 }
121
122 for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
123 {
124 for (unsigned int i = 0; i < copy_indices_level_mine[level].size(); ++i)
125 os << "copy_ifrom [" << level << "]\t"
126 << copy_indices_level_mine[level][i].first << '\t'
127 << copy_indices_level_mine[level][i].second << std::endl;
128 }
129 for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
130 {
131 for (unsigned int i = 0; i < copy_indices_global_mine[level].size(); ++i)
132 os << "copy_ito [" << level << "]\t"
133 << copy_indices_global_mine[level][i].first << '\t'
134 << copy_indices_global_mine[level][i].second << std::endl;
135 }
136}
137
138
139
140template <typename VectorType>
141std::size_t
143{
144 std::size_t result = sizeof(*this);
146 result += MemoryConsumption::memory_consumption(copy_indices);
147 result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
148 result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
149
150 return result;
151}
152
153
154
155/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
156
157namespace
158{
159 template <int dim, int spacedim, typename Number>
160 void
161 fill_internal(
162 const DoFHandler<dim, spacedim> &mg_dof,
163 SmartPointer<const MGConstrainedDoFs> mg_constrained_dofs,
164 const MPI_Comm mpi_communicator,
165 const bool transfer_solution_vectors,
166 std::vector<Table<2, unsigned int>> &copy_indices,
167 std::vector<Table<2, unsigned int>> &copy_indices_global_mine,
168 std::vector<Table<2, unsigned int>> &copy_indices_level_mine,
169 LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
171 &ghosted_level_vector)
172 {
173 // first go to the usual routine...
174 std::vector<
175 std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
176 my_copy_indices;
177 std::vector<
178 std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
179 my_copy_indices_global_mine;
180 std::vector<
181 std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
182 my_copy_indices_level_mine;
183
185 mg_constrained_dofs,
186 my_copy_indices,
187 my_copy_indices_global_mine,
188 my_copy_indices_level_mine,
189 !transfer_solution_vectors);
190
191 // get all degrees of freedom that we need read access to in copy_to_mg
192 // and copy_from_mg, respectively. We fill an IndexSet once on each level
193 // (for the global_mine indices accessing remote level indices) and once
194 // globally (for the level_mine indices accessing remote global indices).
195
196 // the variables index_set and level_index_set are going to define the
197 // ghost indices of the respective vectors (due to construction, these are
198 // precisely the indices that we need)
199
200 IndexSet index_set(mg_dof.locally_owned_dofs().size());
201 std::vector<types::global_dof_index> accessed_indices;
202 ghosted_level_vector.resize(0,
204 1);
205 std::vector<IndexSet> level_index_set(
207 for (unsigned int l = 0; l < mg_dof.get_triangulation().n_global_levels();
208 ++l)
209 {
210 for (const auto &indices : my_copy_indices_level_mine[l])
211 accessed_indices.push_back(indices.first);
212 std::vector<types::global_dof_index> accessed_level_indices;
213 for (const auto &indices : my_copy_indices_global_mine[l])
214 accessed_level_indices.push_back(indices.second);
215 std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
216 level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
217 level_index_set[l].add_indices(accessed_level_indices.begin(),
218 accessed_level_indices.end());
219 level_index_set[l].compress();
220 ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
221 level_index_set[l],
222 mpi_communicator);
223 }
224 std::sort(accessed_indices.begin(), accessed_indices.end());
225 index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
226 index_set.compress();
227 ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
228 index_set,
229 mpi_communicator);
230
231 // localize the copy indices for faster access. Since all access will be
232 // through the ghosted vector in 'data', we can use this (much faster)
233 // option
234 copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
235 copy_indices_level_mine.resize(
237 copy_indices_global_mine.resize(
239 for (unsigned int level = 0;
240 level < mg_dof.get_triangulation().n_global_levels();
241 ++level)
242 {
243 const Utilities::MPI::Partitioner &global_partitioner =
244 *ghosted_global_vector.get_partitioner();
245 const Utilities::MPI::Partitioner &level_partitioner =
246 *ghosted_level_vector[level].get_partitioner();
247
248 auto translate_indices =
249 [&](const std::vector<
250 std::pair<types::global_dof_index, types::global_dof_index>>
251 &global_copy_indices,
252 Table<2, unsigned int> &local_copy_indices) {
253 local_copy_indices.reinit(2, global_copy_indices.size());
254 for (unsigned int i = 0; i < global_copy_indices.size(); ++i)
255 {
256 local_copy_indices(0, i) = global_partitioner.global_to_local(
257 global_copy_indices[i].first);
258 local_copy_indices(1, i) = level_partitioner.global_to_local(
259 global_copy_indices[i].second);
260 }
261 };
262
263 // owned-owned case
264 translate_indices(my_copy_indices[level], copy_indices[level]);
265
266 // remote-owned case
267 translate_indices(my_copy_indices_level_mine[level],
268 copy_indices_level_mine[level]);
269
270 // owned-remote case
271 translate_indices(my_copy_indices_global_mine[level],
272 copy_indices_global_mine[level]);
273 }
274 }
275} // namespace
276
277template <typename Number>
278template <int dim, int spacedim>
279void
281 fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
282{
283 const MPI_Comm mpi_communicator = mg_dof.get_communicator();
284
285 fill_internal(mg_dof,
286 mg_constrained_dofs,
287 mpi_communicator,
288 false,
289 this->copy_indices,
290 this->copy_indices_global_mine,
291 this->copy_indices_level_mine,
292 ghosted_global_vector,
293 ghosted_level_vector);
294
295 // in case we have hanging nodes which imply different ownership of
296 // global and level dof indices, we must also fill the solution indices
297 // which contain additional indices, otherwise they can re-use the
298 // indices of the standard transfer.
299 int have_refinement_edge_dofs = 0;
300 if (mg_constrained_dofs != nullptr)
301 for (unsigned int level = 0;
302 level < mg_dof.get_triangulation().n_global_levels();
303 ++level)
304 if (mg_constrained_dofs->get_refinement_edge_indices(level).n_elements() >
305 0)
306 {
307 have_refinement_edge_dofs = 1;
308 break;
309 }
310 if (Utilities::MPI::max(have_refinement_edge_dofs, mpi_communicator) == 1)
311 {
312 // note: variables not needed
313 std::vector<Table<2, unsigned int>> solution_copy_indices_global_mine;
315 solution_ghosted_level_vector;
316
317 fill_internal(mg_dof,
318 mg_constrained_dofs,
319 mpi_communicator,
320 true,
321 this->solution_copy_indices,
322 solution_copy_indices_global_mine,
323 this->solution_copy_indices_level_mine,
324 solution_ghosted_global_vector,
325 solution_ghosted_level_vector);
326 }
327 else
328 {
329 this->solution_copy_indices = this->copy_indices;
330 this->solution_copy_indices_level_mine = this->copy_indices_level_mine;
331 solution_ghosted_global_vector = ghosted_global_vector;
332 }
333
334 // Check if we can perform a cheaper "plain copy" (with or without
335 // renumbering) instead of having to translate individual entries
336 // using copy_indices*. This only works if a) we don't have to send
337 // or receive any DoFs and we have all locally owned DoFs in our
338 // copy_indices (so no adaptive refinement) and b) all processors
339 // agree on the choice (see below).
340 const bool my_perform_renumbered_plain_copy =
341 (this->copy_indices.back().n_cols() ==
342 mg_dof.locally_owned_dofs().n_elements()) &&
343 (this->copy_indices_global_mine.back().n_rows() == 0) &&
344 (this->copy_indices_level_mine.back().n_rows() == 0);
345
346 bool my_perform_plain_copy = false;
347 if (my_perform_renumbered_plain_copy)
348 {
349 my_perform_plain_copy = true;
350 // check whether there is a renumbering of degrees of freedom on
351 // either the finest level or the global dofs, which means that we
352 // cannot apply a plain copy
353 for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
354 if (this->copy_indices.back()(0, i) != this->copy_indices.back()(1, i))
355 {
356 my_perform_plain_copy = false;
357 break;
358 }
359 }
360
361 // now do a global reduction over all processors to see what operation
362 // they can agree upon
363 perform_plain_copy =
364 Utilities::MPI::min(static_cast<int>(my_perform_plain_copy),
365 mpi_communicator);
366 perform_renumbered_plain_copy =
367 Utilities::MPI::min(static_cast<int>(my_perform_renumbered_plain_copy),
368 mpi_communicator);
369
370 // if we do a plain copy, no need to hold additional ghosted vectors
371 if (perform_renumbered_plain_copy)
372 {
373 for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
374 AssertDimension(this->copy_indices.back()(0, i), i);
375
376 ghosted_global_vector.reinit(0);
377 ghosted_level_vector.resize(0, 0);
378 solution_ghosted_global_vector.reinit(0);
379 }
380}
381
382
383
384template <typename Number>
385void
387{
388 sizes.resize(0);
389 copy_indices.clear();
390 solution_copy_indices.clear();
391 copy_indices_global_mine.clear();
392 copy_indices_level_mine.clear();
393 solution_copy_indices_level_mine.clear();
394 component_to_block_map.resize(0);
395 mg_constrained_dofs = nullptr;
396 ghosted_global_vector.reinit(0);
397 solution_ghosted_global_vector.reinit(0);
398 ghosted_level_vector.resize(0, 0);
399 perform_plain_copy = false;
400 perform_renumbered_plain_copy = false;
401 initialize_dof_vector = nullptr;
402}
403
404
405
406template <typename Number>
407void
409 print_indices(std::ostream &os) const
410{
411 for (unsigned int level = 0; level < copy_indices.size(); ++level)
412 {
413 for (unsigned int i = 0; i < copy_indices[level].n_cols(); ++i)
414 os << "copy_indices[" << level << "]\t" << copy_indices[level](0, i)
415 << '\t' << copy_indices[level](1, i) << std::endl;
416 }
417
418 for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
419 {
420 for (unsigned int i = 0; i < copy_indices_level_mine[level].n_cols(); ++i)
421 os << "copy_ifrom [" << level << "]\t"
422 << copy_indices_level_mine[level](0, i) << '\t'
423 << copy_indices_level_mine[level](1, i) << std::endl;
424 }
425 for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
426 {
427 for (unsigned int i = 0; i < copy_indices_global_mine[level].n_cols();
428 ++i)
429 os << "copy_ito [" << level << "]\t"
430 << copy_indices_global_mine[level](0, i) << '\t'
431 << copy_indices_global_mine[level](1, i) << std::endl;
432 }
433}
434
435
436
437template <typename Number>
438std::size_t
441{
442 std::size_t result = sizeof(*this);
444 result += MemoryConsumption::memory_consumption(copy_indices);
445 result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
446 result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
447 result += ghosted_global_vector.memory_consumption();
448 for (unsigned int i = ghosted_level_vector.min_level();
449 i <= ghosted_level_vector.max_level();
450 ++i)
451 result += ghosted_level_vector[i].memory_consumption();
452
453 return result;
454}
455
456
457
458// explicit instantiation
459#include "mg_level_global_transfer.inst"
460
461// create an additional instantiation currently not supported by the automatic
462// template instantiation scheme
464
465
const IndexSet & locally_owned_mg_dofs(const unsigned int level) const
const Triangulation< dim, spacedim > & get_triangulation() const
const IndexSet & locally_owned_dofs() const
MPI_Comm get_communicator() const
size_type size() const
Definition index_set.h:1765
size_type n_elements() const
Definition index_set.h:1923
const std::shared_ptr< const Utilities::MPI::Partitioner > & get_partitioner() const
std::size_t memory_consumption() const
void reinit(const size_type size, const bool omit_zeroing_entries=false)
void print_indices(std::ostream &os) const
std::size_t memory_consumption() const
void fill_and_communicate_copy_indices(const DoFHandler< dim, spacedim > &dof_handler)
virtual unsigned int n_global_levels() const
unsigned int global_to_local(const types::global_dof_index global_index) const
virtual void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
unsigned int level
Definition grid_out.cc:4616
#define AssertDimension(dim1, dim2)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
void fill_copy_indices(const DoFHandler< dim, spacedim > &dof_handler, const MGConstrainedDoFs *mg_constrained_dofs, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices_global_mine, std::vector< std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > &copy_indices_level_mine, const bool skip_interface_dofs=true)