Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_block_vector.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2004 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_petsc_block_vector_h
17#define dealii_petsc_block_vector_h
18
19
20#include <deal.II/base/config.h>
21
22#ifdef DEAL_II_WITH_PETSC
23
29
31
32
33namespace PETScWrappers
34{
35 // forward declaration
36 class BlockVector;
37
38 namespace MPI
39 {
60 class BlockVector : public BlockVectorBase<Vector>
61 {
62 public:
67
72
84
88 BlockVector() = default;
89
96 explicit BlockVector(const unsigned int n_blocks,
97 const MPI_Comm & communicator,
100
105 BlockVector(const BlockVector &V);
106
114 BlockVector(const std::vector<size_type> &block_sizes,
115 const MPI_Comm & communicator,
116 const std::vector<size_type> &local_elements);
117
122 explicit BlockVector(const std::vector<IndexSet> &parallel_partitioning,
123 const MPI_Comm &communicator = MPI_COMM_WORLD);
124
128 BlockVector(const std::vector<IndexSet> &parallel_partitioning,
129 const std::vector<IndexSet> &ghost_indices,
130 const MPI_Comm & communicator);
131
132
133
137 ~BlockVector() override = default;
138
144 operator=(const value_type s);
145
150 operator=(const BlockVector &V);
151
161 void
162 reinit(const unsigned int n_blocks,
163 const MPI_Comm & communicator,
164 const size_type block_size,
166 const bool omit_zeroing_entries = false);
167
188 void
189 reinit(const std::vector<size_type> &block_sizes,
190 const MPI_Comm & communicator,
191 const std::vector<size_type> &locally_owned_sizes,
192 const bool omit_zeroing_entries = false);
193
208 void
209 reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
210
215 void
216 reinit(const std::vector<IndexSet> &parallel_partitioning,
217 const MPI_Comm & communicator);
218
222 void
223 reinit(const std::vector<IndexSet> &parallel_partitioning,
224 const std::vector<IndexSet> &ghost_entries,
225 const MPI_Comm & communicator);
226
233 void
234 reinit(const unsigned int num_blocks);
235
239 bool
240 has_ghost_elements() const;
241
246 const MPI_Comm &
247 get_mpi_communicator() const;
248
266 void
267 swap(BlockVector &v);
268
272 void
273 print(std::ostream & out,
274 const unsigned int precision = 3,
275 const bool scientific = true,
276 const bool across = true) const;
277
286 };
287
290 /*--------------------- Inline functions --------------------------------*/
291
292 inline BlockVector::BlockVector(const unsigned int n_blocks,
293 const MPI_Comm & communicator,
294 const size_type block_size,
295 const size_type locally_owned_size)
296 {
298 }
299
300
301
303 const std::vector<size_type> &block_sizes,
304 const MPI_Comm & communicator,
305 const std::vector<size_type> &local_elements)
306 {
307 reinit(block_sizes, communicator, local_elements, false);
308 }
309
310
313 {
314 this->components.resize(v.n_blocks());
316
317 for (unsigned int i = 0; i < this->n_blocks(); ++i)
318 this->components[i] = v.components[i];
319 }
320
322 const std::vector<IndexSet> &parallel_partitioning,
323 const MPI_Comm & communicator)
324 {
325 reinit(parallel_partitioning, communicator);
326 }
327
329 const std::vector<IndexSet> &parallel_partitioning,
330 const std::vector<IndexSet> &ghost_indices,
331 const MPI_Comm & communicator)
332 {
333 reinit(parallel_partitioning, ghost_indices, communicator);
334 }
335
336 inline BlockVector &
338 {
340 return *this;
341 }
342
343 inline BlockVector &
345 {
346 // we only allow assignment to vectors with the same number of blocks
347 // or to an empty BlockVector
348 Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
350
351 if (this->n_blocks() != v.n_blocks())
352 reinit(v.n_blocks());
353
354 for (size_type i = 0; i < this->n_blocks(); ++i)
355 this->components[i] = v.block(i);
356
358
359 return *this;
360 }
361
362
363
364 inline void
365 BlockVector::reinit(const unsigned int n_blocks,
366 const MPI_Comm & communicator,
367 const size_type block_size,
368 const size_type locally_owned_size,
369 const bool omit_zeroing_entries)
370 {
371 reinit(std::vector<size_type>(n_blocks, block_size),
372 communicator,
373 std::vector<size_type>(n_blocks, locally_owned_size),
374 omit_zeroing_entries);
375 }
376
377
378
379 inline void
380 BlockVector::reinit(const std::vector<size_type> &block_sizes,
381 const MPI_Comm & communicator,
382 const std::vector<size_type> &locally_owned_sizes,
383 const bool omit_zeroing_entries)
384 {
385 this->block_indices.reinit(block_sizes);
386 if (this->components.size() != this->n_blocks())
387 this->components.resize(this->n_blocks());
388
389 for (unsigned int i = 0; i < this->n_blocks(); ++i)
390 this->components[i].reinit(communicator,
391 block_sizes[i],
392 locally_owned_sizes[i],
393 omit_zeroing_entries);
394 }
395
396
397 inline void
398 BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
399 {
401 if (this->components.size() != this->n_blocks())
402 this->components.resize(this->n_blocks());
403
404 for (unsigned int i = 0; i < this->n_blocks(); ++i)
405 block(i).reinit(v.block(i), omit_zeroing_entries);
406 }
407
408 inline void
409 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
410 const MPI_Comm & communicator)
411 {
412 std::vector<size_type> sizes(parallel_partitioning.size());
413 for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
414 sizes[i] = parallel_partitioning[i].size();
415
416 this->block_indices.reinit(sizes);
417 if (this->components.size() != this->n_blocks())
418 this->components.resize(this->n_blocks());
419
420 for (unsigned int i = 0; i < this->n_blocks(); ++i)
421 block(i).reinit(parallel_partitioning[i], communicator);
422 }
423
424 inline void
425 BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
426 const std::vector<IndexSet> &ghost_entries,
427 const MPI_Comm & communicator)
428 {
429 std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
430 for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
431 sizes[i] = parallel_partitioning[i].size();
432
433 this->block_indices.reinit(sizes);
434 if (this->components.size() != this->n_blocks())
435 this->components.resize(this->n_blocks());
436
437 for (unsigned int i = 0; i < this->n_blocks(); ++i)
438 block(i).reinit(parallel_partitioning[i],
439 ghost_entries[i],
440 communicator);
441 }
442
443
444
445 inline const MPI_Comm &
447 {
448 return block(0).get_mpi_communicator();
449 }
450
451 inline bool
453 {
454 bool ghosted = block(0).has_ghost_elements();
455# ifdef DEBUG
456 for (unsigned int i = 0; i < this->n_blocks(); ++i)
458# endif
459 return ghosted;
460 }
461
462
463 inline void
465 {
467
469 }
470
471
472
473 inline void
474 BlockVector::print(std::ostream & out,
475 const unsigned int precision,
476 const bool scientific,
477 const bool across) const
478 {
479 for (unsigned int i = 0; i < this->n_blocks(); ++i)
480 {
481 if (across)
482 out << 'C' << i << ':';
483 else
484 out << "Component " << i << std::endl;
485 this->components[i].print(out, precision, scientific, across);
486 }
487 }
488
489
490
498 inline void
500 {
501 u.swap(v);
502 }
503
504 } // namespace MPI
505
506} // namespace PETScWrappers
507
508namespace internal
509{
510 namespace LinearOperatorImplementation
511 {
512 template <typename>
513 class ReinitHelper;
514
519 template <>
521 {
522 public:
523 template <typename Matrix>
524 static void
527 bool /*omit_zeroing_entries*/)
528 {
529 v.reinit(matrix.locally_owned_range_indices(),
530 matrix.get_mpi_communicator());
531 }
532
533 template <typename Matrix>
534 static void
537 bool /*omit_zeroing_entries*/)
538 {
539 v.reinit(matrix.locally_owned_domain_indices(),
540 matrix.get_mpi_communicator());
541 }
542 };
543
544 } // namespace LinearOperatorImplementation
545} /* namespace internal */
546
547
551template <>
553{};
554
555
557
558#endif // DEAL_II_WITH_PETSC
559
560#endif
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
#define DeclException0(Exception0)
Definition: exceptions.h:470
static ::ExceptionBase & ExcNonMatchingBlockVectors()
::internal::BlockVectorIterators::Iterator< BlockVectorBase, false > iterator
unsigned int n_blocks() const
const value_type * const_pointer
typename BlockType::const_reference const_reference
types::global_dof_index size_type
typename BlockType::value_type value_type
BlockVectorBase & operator=(const value_type s)
std::size_t size() const
#define Assert(cond, exc)
Definition: exceptions.h:1465
std::vector< Vector > components
::internal::BlockVectorIterators::Iterator< BlockVectorBase, true > const_iterator
typename BlockType::reference reference
static ::ExceptionBase & ExcInternalError()
BlockType & block(const unsigned int i)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::size_t locally_owned_size() const
const BlockIndices & get_block_indices() const
static ::ExceptionBase & ExcIteratorRangeDoesNotMatchVectorSize()
BaseClass::value_type value_type
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::BlockVector &v, bool)
void reinit(const unsigned int n_blocks, const MPI_Comm &communicator, const size_type block_size, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
BaseClass::const_reference const_reference
void swap(BlockVector &u, BlockVector &v)
~BlockVector() override=default
BlockVector & operator=(const value_type s)
BaseClass::const_pointer const_pointer
const MPI_Comm & get_mpi_communicator() const
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::BlockVector &v, bool)
bool has_ghost_elements() const
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
constexpr int block_size
Definition: cuda_size.h:29
@ matrix
Contents is actually a matrix.
static const char V
BlockVector< double > BlockVector
std::enable_if< IsBlockVector< VectorType >::value, unsignedint >::type n_blocks(const VectorType &vector)
Definition: operators.h:49