Reference documentation for deal.II version Git 0096380e24 2020-08-06 21:03:09 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
petsc_vector.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_vector_h
17 # define dealii_petsc_vector_h
18 
19 
20 # include <deal.II/base/config.h>
21 
22 # ifdef DEAL_II_WITH_PETSC
23 
24 # include <deal.II/base/index_set.h>
26 
27 # include <deal.II/lac/exceptions.h>
29 # include <deal.II/lac/vector.h>
32 
34 
35 
39 namespace PETScWrappers
40 {
47  namespace MPI
48  {
156  class Vector : public VectorBase
157  {
158  public:
163 
167  Vector();
168 
185  explicit Vector(const MPI_Comm &communicator,
186  const size_type n,
187  const size_type local_size);
188 
189 
200  template <typename Number>
201  explicit Vector(const MPI_Comm & communicator,
202  const ::Vector<Number> &v,
203  const size_type local_size);
204 
205 
219  explicit Vector(const MPI_Comm & communicator,
220  const VectorBase &v,
221  const size_type local_size);
222 
246  Vector(const IndexSet &local,
247  const IndexSet &ghost,
248  const MPI_Comm &communicator);
249 
261  explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
262 
266  Vector(const Vector &v);
267 
272  virtual void
273  clear() override;
274 
279  Vector &
280  operator=(const Vector &v);
281 
288  Vector &
289  operator=(const PetscScalar s);
290 
300  template <typename number>
301  Vector &
302  operator=(const ::Vector<number> &v);
303 
320  void
321  reinit(const MPI_Comm &communicator,
322  const size_type N,
323  const size_type local_size,
324  const bool omit_zeroing_entries = false);
325 
335  void
336  reinit(const Vector &v, const bool omit_zeroing_entries = false);
337 
345  void
346  reinit(const IndexSet &local,
347  const IndexSet &ghost,
348  const MPI_Comm &communicator);
349 
357  void
358  reinit(const IndexSet &local, const MPI_Comm &communicator);
359 
364  const MPI_Comm &
365  get_mpi_communicator() const override;
366 
378  void
379  print(std::ostream & out,
380  const unsigned int precision = 3,
381  const bool scientific = true,
382  const bool across = true) const;
383 
390  bool
391  all_zero() const;
392 
393  protected:
400  virtual void
402 
403 
404 
410  virtual void
411  create_vector(const size_type n,
412  const size_type local_size,
413  const IndexSet &ghostnodes);
414 
415 
416  private:
420  MPI_Comm communicator;
421  };
422 
423 
424  // ------------------ template and inline functions -------------
425 
426 
434  inline void
436  {
437  u.swap(v);
438  }
439 
440 
441 # ifndef DOXYGEN
442 
443  template <typename number>
444  Vector::Vector(const MPI_Comm & communicator,
445  const ::Vector<number> &v,
446  const size_type local_size)
447  : communicator(communicator)
448  {
450 
451  *this = v;
452  }
453 
454 
455 
456  inline Vector &
457  Vector::operator=(const PetscScalar s)
458  {
460 
461  return *this;
462  }
463 
464 
465 
466  template <typename number>
467  inline Vector &
468  Vector::operator=(const ::Vector<number> &v)
469  {
470  Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
471 
472  // FIXME: the following isn't necessarily fast, but this is due to
473  // the fact that PETSc doesn't offer an inlined access operator.
474  //
475  // if someone wants to contribute some code: to make this code
476  // faster, one could either first convert all values to PetscScalar,
477  // and then set them all at once using VecSetValues. This has the
478  // drawback that it could take quite some memory, if the vector is
479  // large, and it would in addition allocate memory on the heap, which
480  // is expensive. an alternative would be to split the vector into
481  // chunks of, say, 128 elements, convert a chunk at a time and set it
482  // in the output vector using VecSetValues. since 128 elements is
483  // small enough, this could easily be allocated on the stack (as a
484  // local variable) which would make the whole thing much more
485  // efficient.
486  //
487  // a second way to make things faster is for the special case that
488  // number==PetscScalar. we could then declare a specialization of
489  // this template, and omit the conversion. the problem with this is
490  // that the best we can do is to use VecSetValues, but this isn't
491  // very efficient either: it wants to see an array of indices, which
492  // in this case a) again takes up a whole lot of memory on the heap,
493  // and b) is totally dumb since its content would simply be the
494  // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
495  // function in Petsc that would take a pointer to an array of
496  // PetscScalar values and simply copy n elements verbatim into the
497  // vector...
498  for (size_type i = 0; i < v.size(); ++i)
499  (*this)(i) = v(i);
500 
502 
503  return *this;
504  }
505 
506 
507 
508  inline const MPI_Comm &
510  {
511  return communicator;
512  }
513 
514 # endif // DOXYGEN
515  } // namespace MPI
516 } // namespace PETScWrappers
517 
518 namespace internal
519 {
520  namespace LinearOperatorImplementation
521  {
522  template <typename>
523  class ReinitHelper;
524 
529  template <>
531  {
532  public:
533  template <typename Matrix>
534  static void
535  reinit_range_vector(const Matrix & matrix,
537  bool /*omit_zeroing_entries*/)
538  {
539  v.reinit(matrix.locally_owned_range_indices(),
540  matrix.get_mpi_communicator());
541  }
542 
543  template <typename Matrix>
544  static void
545  reinit_domain_vector(const Matrix & matrix,
547  bool /*omit_zeroing_entries*/)
548  {
549  v.reinit(matrix.locally_owned_domain_indices(),
550  matrix.get_mpi_communicator());
551  }
552  };
553 
554  } // namespace LinearOperatorImplementation
555 } /* namespace internal */
556 
563 template <>
564 struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
565 {};
566 
567 
569 
570 # endif // DEAL_II_WITH_PETSC
571 
572 #endif
573 /*------------------------- petsc_vector.h -------------------------*/
Contents is actually a matrix.
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:535
virtual void clear() override
VectorBase & operator=(const VectorBase &)=delete
void compress(const VectorOperation::values operation)
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
Definition: petsc_vector.h:545
#define Assert(cond, exc)
Definition: exceptions.h:1411
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:363
Vector & operator=(const Vector &v)
unsigned int global_dof_index
Definition: types.h:76
const MPI_Comm & get_mpi_communicator() const override
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:362
void reinit(const MPI_Comm &communicator, const size_type N, const size_type local_size, const bool omit_zeroing_entries=false)
static const char N
virtual void create_vector(const size_type n, const size_type local_size)
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
#define DEAL_II_DEPRECATED
Definition: config.h:152
void swap(Vector &u, Vector &v)
Definition: petsc_vector.h:435