Reference documentation for deal.II version GIT 982a52a150 2023-04-01 20:45:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_vector_h
17 #define dealii_petsc_vector_h
18 
19 
20 #include <deal.II/base/config.h>
21 
22 #ifdef DEAL_II_WITH_PETSC
23 
24 # include <deal.II/base/index_set.h>
27 
28 # include <deal.II/lac/exceptions.h>
30 # include <deal.II/lac/vector.h>
33 
35 
36 
41 namespace PETScWrappers
42 {
49  namespace MPI
50  {
158  class Vector : public VectorBase
159  {
160  public:
165 
169  Vector();
170 
175 
192  explicit Vector(const MPI_Comm &communicator,
193  const size_type n,
195 
206  template <typename Number>
207  explicit Vector(const MPI_Comm & communicator,
208  const ::Vector<Number> &v,
210 
234  Vector(const IndexSet &local,
235  const IndexSet &ghost,
236  const MPI_Comm &communicator);
237 
249  explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
250 
254  Vector(const Vector &v);
255 
260  virtual void
261  clear() override;
262 
267  Vector &
268  operator=(const Vector &v);
269 
276  Vector &
277  operator=(const PetscScalar s);
278 
288  template <typename number>
289  Vector &
290  operator=(const ::Vector<number> &v);
291 
292  using VectorBase::reinit;
293 
310  void
311  reinit(const MPI_Comm &communicator,
312  const size_type N,
314  const bool omit_zeroing_entries = false);
315 
325  void
326  reinit(const Vector &v, const bool omit_zeroing_entries = false);
327 
335  void
336  reinit(const IndexSet &local,
337  const IndexSet &ghost,
338  const MPI_Comm &communicator);
339 
347  void
348  reinit(const IndexSet &local, const MPI_Comm &communicator);
349 
354  void
355  reinit(
356  const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
357 
369  void
370  print(std::ostream & out,
371  const unsigned int precision = 3,
372  const bool scientific = true,
373  const bool across = true) const;
374 
381  bool
382  all_zero() const;
383 
384  protected:
391  virtual void
392  create_vector(const MPI_Comm &comm,
393  const size_type n,
395 
396 
397 
403  virtual void
404  create_vector(const MPI_Comm &comm,
405  const size_type n,
407  const IndexSet &ghostnodes);
408  };
409 
410 
411  // ------------------ template and inline functions -------------
412 
413 
421  inline void
423  {
424  u.swap(v);
425  }
426 
427 
428 # ifndef DOXYGEN
429 
430  template <typename number>
431  Vector::Vector(const MPI_Comm & communicator,
432  const ::Vector<number> &v,
433  const size_type locally_owned_size)
434  {
435  Vector::create_vector(communicator, v.size(), locally_owned_size);
436 
437  *this = v;
438  }
439 
440 
441 
442  inline Vector &
443  Vector::operator=(const PetscScalar s)
444  {
446 
447  return *this;
448  }
449 
450 
451 
452  template <typename number>
453  inline Vector &
454  Vector::operator=(const ::Vector<number> &v)
455  {
456  Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
457 
458  // FIXME: the following isn't necessarily fast, but this is due to
459  // the fact that PETSc doesn't offer an inlined access operator.
460  //
461  // if someone wants to contribute some code: to make this code
462  // faster, one could either first convert all values to PetscScalar,
463  // and then set them all at once using VecSetValues. This has the
464  // drawback that it could take quite some memory, if the vector is
465  // large, and it would in addition allocate memory on the heap, which
466  // is expensive. an alternative would be to split the vector into
467  // chunks of, say, 128 elements, convert a chunk at a time and set it
468  // in the output vector using VecSetValues. since 128 elements is
469  // small enough, this could easily be allocated on the stack (as a
470  // local variable) which would make the whole thing much more
471  // efficient.
472  //
473  // a second way to make things faster is for the special case that
474  // number==PetscScalar. we could then declare a specialization of
475  // this template, and omit the conversion. the problem with this is
476  // that the best we can do is to use VecSetValues, but this isn't
477  // very efficient either: it wants to see an array of indices, which
478  // in this case a) again takes up a whole lot of memory on the heap,
479  // and b) is totally dumb since its content would simply be the
480  // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
481  // function in PETSc that would take a pointer to an array of
482  // PetscScalar values and simply copy n elements verbatim into the
483  // vector...
484  for (size_type i = 0; i < v.size(); ++i)
485  (*this)(i) = v(i);
486 
488 
489  return *this;
490  }
491 
492 
493 
494 # endif // DOXYGEN
495  } // namespace MPI
496 } // namespace PETScWrappers
497 
498 namespace internal
499 {
500  namespace LinearOperatorImplementation
501  {
502  template <typename>
503  class ReinitHelper;
504 
509  template <>
511  {
512  public:
513  template <typename Matrix>
514  static void
515  reinit_range_vector(const Matrix & matrix,
517  bool /*omit_zeroing_entries*/)
518  {
519  v.reinit(matrix.locally_owned_range_indices(),
520  matrix.get_mpi_communicator());
521  }
522 
523  template <typename Matrix>
524  static void
525  reinit_domain_vector(const Matrix & matrix,
527  bool /*omit_zeroing_entries*/)
528  {
529  v.reinit(matrix.locally_owned_domain_indices(),
530  matrix.get_mpi_communicator());
531  }
532  };
533 
534  } // namespace LinearOperatorImplementation
535 } /* namespace internal */
536 
543 template <>
544 struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
545 {};
546 
547 
549 
550 #endif // DEAL_II_WITH_PETSC
551 
552 #endif
types::global_dof_index size_type
Definition: petsc_vector.h:164
Vector(const MPI_Comm &communicator, const ::Vector< Number > &v, const size_type locally_owned_size)
Vector & operator=(const PetscScalar s)
Vector & operator=(const ::Vector< number > &v)
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
virtual void clear() override
void reinit(const MPI_Comm &communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
Vector & operator=(const Vector &v)
virtual void create_vector(const MPI_Comm &comm, const size_type n, const size_type locally_owned_size)
void swap(Vector &u, Vector &v)
Definition: petsc_vector.h:422
void compress(const VectorOperation::values operation)
size_type locally_owned_size() const
VectorBase & operator=(const VectorBase &)
Definition: vector.h:109
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:525
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:515
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define Assert(cond, exc)
Definition: exceptions.h:1586
@ matrix
Contents is actually a matrix.
static const char N
types::global_dof_index size_type
unsigned int global_dof_index
Definition: types.h:82
const MPI_Comm & comm