Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
petsc_vector.h
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2004 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#ifndef dealii_petsc_vector_h
17#define dealii_petsc_vector_h
18
19
20#include <deal.II/base/config.h>
21
22#ifdef DEAL_II_WITH_PETSC
23
27
30# include <deal.II/lac/vector.h>
33
35
36
41namespace PETScWrappers
42{
49 namespace MPI
50 {
158 class Vector : public VectorBase
159 {
160 public:
165
169 Vector();
170
175
192 explicit Vector(const MPI_Comm communicator,
193 const size_type n,
195
206 template <typename Number>
207 explicit Vector(const MPI_Comm communicator,
208 const ::Vector<Number> &v,
210
234 Vector(const IndexSet &local,
235 const IndexSet &ghost,
236 const MPI_Comm communicator);
237
249 explicit Vector(const IndexSet &local, const MPI_Comm communicator);
250
254 Vector(const Vector &v);
255
260 virtual void
261 clear() override;
262
267 Vector &
268 operator=(const Vector &v);
269
276 Vector &
277 operator=(const PetscScalar s);
278
288 template <typename number>
289 Vector &
290 operator=(const ::Vector<number> &v);
291
292 using VectorBase::reinit;
293
310 void
311 reinit(const MPI_Comm communicator,
312 const size_type N,
314 const bool omit_zeroing_entries = false);
315
325 void
326 reinit(const Vector &v, const bool omit_zeroing_entries = false);
327
335 void
336 reinit(const IndexSet &local,
337 const IndexSet &ghost,
338 const MPI_Comm communicator);
339
347 void
348 reinit(const IndexSet &local, const MPI_Comm communicator);
349
357 void
358 reinit(
359 const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
360 const bool make_ghosted = true);
361
373 void
374 print(std::ostream & out,
375 const unsigned int precision = 3,
376 const bool scientific = true,
377 const bool across = true) const;
378
385 bool
386 all_zero() const;
387
388 protected:
395 virtual void
397 const size_type n,
399
400
401
407 virtual void
409 const size_type n,
411 const IndexSet &ghostnodes);
412 };
413
414
415 // ------------------ template and inline functions -------------
416
417
425 inline void
427 {
428 u.swap(v);
429 }
430
431
432# ifndef DOXYGEN
433
434 template <typename number>
435 Vector::Vector(const MPI_Comm communicator,
436 const ::Vector<number> &v,
437 const size_type locally_owned_size)
438 {
439 Vector::create_vector(communicator, v.size(), locally_owned_size);
440
441 *this = v;
442 }
443
444
445
446 inline Vector &
447 Vector::operator=(const PetscScalar s)
448 {
450
451 return *this;
452 }
453
454
455
456 template <typename number>
457 inline Vector &
458 Vector::operator=(const ::Vector<number> &v)
459 {
460 Assert(size() == v.size(), ExcDimensionMismatch(size(), v.size()));
461
462 // FIXME: the following isn't necessarily fast, but this is due to
463 // the fact that PETSc doesn't offer an inlined access operator.
464 //
465 // if someone wants to contribute some code: to make this code
466 // faster, one could either first convert all values to PetscScalar,
467 // and then set them all at once using VecSetValues. This has the
468 // drawback that it could take quite some memory, if the vector is
469 // large, and it would in addition allocate memory on the heap, which
470 // is expensive. an alternative would be to split the vector into
471 // chunks of, say, 128 elements, convert a chunk at a time and set it
472 // in the output vector using VecSetValues. since 128 elements is
473 // small enough, this could easily be allocated on the stack (as a
474 // local variable) which would make the whole thing much more
475 // efficient.
476 //
477 // a second way to make things faster is for the special case that
478 // number==PetscScalar. we could then declare a specialization of
479 // this template, and omit the conversion. the problem with this is
480 // that the best we can do is to use VecSetValues, but this isn't
481 // very efficient either: it wants to see an array of indices, which
482 // in this case a) again takes up a whole lot of memory on the heap,
483 // and b) is totally dumb since its content would simply be the
484 // sequence 0,1,2,3,...,n. the best of all worlds would probably be a
485 // function in PETSc that would take a pointer to an array of
486 // PetscScalar values and simply copy n elements verbatim into the
487 // vector...
488 for (size_type i = 0; i < v.size(); ++i)
489 (*this)(i) = v(i);
490
492
493 return *this;
494 }
495
496
497
498# endif // DOXYGEN
499 } // namespace MPI
500} // namespace PETScWrappers
501
502namespace internal
503{
504 namespace LinearOperatorImplementation
505 {
506 template <typename>
507 class ReinitHelper;
508
513 template <>
514 class ReinitHelper<PETScWrappers::MPI::Vector>
515 {
516 public:
517 template <typename Matrix>
518 static void
519 reinit_range_vector(const Matrix & matrix,
521 bool /*omit_zeroing_entries*/)
522 {
523 v.reinit(matrix.locally_owned_range_indices(),
524 matrix.get_mpi_communicator());
525 }
526
527 template <typename Matrix>
528 static void
529 reinit_domain_vector(const Matrix & matrix,
531 bool /*omit_zeroing_entries*/)
532 {
533 v.reinit(matrix.locally_owned_domain_indices(),
534 matrix.get_mpi_communicator());
535 }
536 };
537
538 } // namespace LinearOperatorImplementation
539} /* namespace internal */
540
547template <>
548struct is_serial_vector<PETScWrappers::MPI::Vector> : std::false_type
549{};
550
551
553
554#endif // DEAL_II_WITH_PETSC
555
556#endif
types::global_dof_index size_type
void print(std::ostream &out, const unsigned int precision=3, const bool scientific=true, const bool across=true) const
Vector & operator=(const ::Vector< number > &v)
Vector & operator=(const Vector &v)
virtual void create_vector(const MPI_Comm comm, const size_type n, const size_type locally_owned_size)
Vector & operator=(const PetscScalar s)
void reinit(const MPI_Comm communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries=false)
Vector(const MPI_Comm communicator, const ::Vector< Number > &v, const size_type locally_owned_size)
void swap(Vector &u, Vector &v)
void compress(const VectorOperation::values operation)
size_type locally_owned_size() const
VectorBase & operator=(const VectorBase &)
static void reinit_domain_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
static void reinit_range_vector(const Matrix &matrix, PETScWrappers::MPI::Vector &v, bool)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
#define Assert(cond, exc)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
unsigned int global_dof_index
Definition types.h:82
const MPI_Comm comm