Reference documentation for deal.II version GIT relicensing-399-g79d89019c5 2024-04-16 15:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_block_sparse_matrix.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2008 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_TRILINOS
18
21
23
24namespace TrilinosWrappers
25{
27 {
28 // delete previous content of
29 // the subobjects array
30 try
31 {
32 clear();
33 }
34 catch (...)
35 {}
36 }
37
38
39
40# ifndef DOXYGEN
41 void
42 BlockSparseMatrix::reinit(const size_type n_block_rows,
43 const size_type n_block_columns)
44 {
45 // first delete previous content of
46 // the subobjects array
47 clear();
48
49 // then resize. set sizes of blocks to
50 // zero. user will later have to call
51 // collect_sizes for this
52 this->sub_objects.reinit(n_block_rows, n_block_columns);
53 this->row_block_indices.reinit(n_block_rows, 0);
54 this->column_block_indices.reinit(n_block_columns, 0);
55
56 // and reinitialize the blocks
57 for (size_type r = 0; r < this->n_block_rows(); ++r)
58 for (size_type c = 0; c < this->n_block_cols(); ++c)
59 {
60 BlockType *p = new BlockType();
61
62 Assert(this->sub_objects[r][c] == nullptr, ExcInternalError());
63 this->sub_objects[r][c] = p;
64 }
65 }
66# endif
67
68
69
70 template <typename BlockSparsityPatternType>
71 void
73 const std::vector<IndexSet> &parallel_partitioning,
74 const BlockSparsityPatternType &block_sparsity_pattern,
75 const MPI_Comm communicator,
76 const bool exchange_data)
77 {
78 std::vector<Epetra_Map> epetra_maps;
79 for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
80 epetra_maps.push_back(
81 parallel_partitioning[i].make_trilinos_map(communicator, false));
82
83 Assert(epetra_maps.size() == block_sparsity_pattern.n_block_rows(),
84 ExcDimensionMismatch(epetra_maps.size(),
85 block_sparsity_pattern.n_block_rows()));
86 Assert(epetra_maps.size() == block_sparsity_pattern.n_block_cols(),
87 ExcDimensionMismatch(epetra_maps.size(),
88 block_sparsity_pattern.n_block_cols()));
89
90 const size_type n_block_rows = epetra_maps.size();
91 (void)n_block_rows;
92
93 Assert(n_block_rows == block_sparsity_pattern.n_block_rows(),
95 block_sparsity_pattern.n_block_rows()));
96 Assert(n_block_rows == block_sparsity_pattern.n_block_cols(),
98 block_sparsity_pattern.n_block_cols()));
99
100
101 // Call the other basic reinit function, ...
102 reinit(block_sparsity_pattern.n_block_rows(),
103 block_sparsity_pattern.n_block_cols());
104
105 // ... set the correct sizes, ...
106 this->row_block_indices = block_sparsity_pattern.get_row_indices();
107 this->column_block_indices = block_sparsity_pattern.get_column_indices();
108
109 // ... and then assign the correct
110 // data to the blocks.
111 for (size_type r = 0; r < this->n_block_rows(); ++r)
112 for (size_type c = 0; c < this->n_block_cols(); ++c)
113 {
114 this->sub_objects[r][c]->reinit(parallel_partitioning[r],
115 parallel_partitioning[c],
116 block_sparsity_pattern.block(r, c),
117 communicator,
118 exchange_data);
119 }
120 }
121
122
123
124 template <typename BlockSparsityPatternType>
125 void
127 const BlockSparsityPatternType &block_sparsity_pattern)
128 {
129 std::vector<IndexSet> parallel_partitioning;
130 for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
131 parallel_partitioning.emplace_back(
132 complete_index_set(block_sparsity_pattern.block(i, 0).n_rows()));
133
134 reinit(parallel_partitioning, block_sparsity_pattern);
135 }
136
137
138
139 template <>
140 void
141 BlockSparseMatrix::reinit(const BlockSparsityPattern &block_sparsity_pattern)
142 {
143 // Call the other basic reinit function, ...
144 reinit(block_sparsity_pattern.n_block_rows(),
145 block_sparsity_pattern.n_block_cols());
146
147 // ... set the correct sizes, ...
148 this->row_block_indices = block_sparsity_pattern.get_row_indices();
149 this->column_block_indices = block_sparsity_pattern.get_column_indices();
150
151 // ... and then assign the correct
152 // data to the blocks.
153 for (size_type r = 0; r < this->n_block_rows(); ++r)
154 for (size_type c = 0; c < this->n_block_cols(); ++c)
155 {
156 this->sub_objects[r][c]->reinit(block_sparsity_pattern.block(r, c));
157 }
158 }
159
160
161
162 void
164 const std::vector<IndexSet> &parallel_partitioning,
165 const ::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
166 const MPI_Comm communicator,
167 const double drop_tolerance)
168 {
169 const size_type n_block_rows = parallel_partitioning.size();
170
171 Assert(n_block_rows == dealii_block_sparse_matrix.n_block_rows(),
173 dealii_block_sparse_matrix.n_block_rows()));
174 Assert(n_block_rows == dealii_block_sparse_matrix.n_block_cols(),
176 dealii_block_sparse_matrix.n_block_cols()));
177
178 // Call the other basic reinit function ...
180
181 // ... and then assign the correct
182 // data to the blocks.
183 for (size_type r = 0; r < this->n_block_rows(); ++r)
184 for (size_type c = 0; c < this->n_block_cols(); ++c)
185 {
186 this->sub_objects[r][c]->reinit(parallel_partitioning[r],
187 parallel_partitioning[c],
188 dealii_block_sparse_matrix.block(r,
189 c),
190 communicator,
191 drop_tolerance);
192 }
193
195 }
196
197
198
199 void
201 const ::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
202 const double drop_tolerance)
203 {
204 Assert(dealii_block_sparse_matrix.n_block_rows() ==
205 dealii_block_sparse_matrix.n_block_cols(),
206 ExcDimensionMismatch(dealii_block_sparse_matrix.n_block_rows(),
207 dealii_block_sparse_matrix.n_block_cols()));
208 Assert(dealii_block_sparse_matrix.m() == dealii_block_sparse_matrix.n(),
209 ExcDimensionMismatch(dealii_block_sparse_matrix.m(),
210 dealii_block_sparse_matrix.n()));
211
212 std::vector<IndexSet> parallel_partitioning;
213 for (size_type i = 0; i < dealii_block_sparse_matrix.n_block_rows(); ++i)
214 parallel_partitioning.emplace_back(
215 complete_index_set(dealii_block_sparse_matrix.block(i, 0).m()));
216
217 reinit(parallel_partitioning,
218 dealii_block_sparse_matrix,
219 MPI_COMM_SELF,
220 drop_tolerance);
221 }
222
223
224
225 void
227 {
228 // simply forward to the (non-public) function of the base class
230 }
231
232
233
234 std::uint64_t
236 {
237 std::uint64_t n_nonzero = 0;
238 for (size_type rows = 0; rows < this->n_block_rows(); ++rows)
239 for (size_type cols = 0; cols < this->n_block_cols(); ++cols)
240 n_nonzero += this->block(rows, cols).n_nonzero_elements();
241
242 return n_nonzero;
243 }
244
245
246
249 const MPI::BlockVector &x,
250 const MPI::BlockVector &b) const
251 {
252 vmult(dst, x);
253 dst -= b;
254 dst *= -1.;
255
256 return dst.l2_norm();
257 }
258
259
260
261 // TODO: In the following we
262 // use the same code as just
263 // above three more times. Use
264 // templates.
267 const MPI::Vector &x,
268 const MPI::BlockVector &b) const
269 {
270 vmult(dst, x);
271 dst -= b;
272 dst *= -1.;
273
274 return dst.l2_norm();
275 }
276
277
278
281 const MPI::BlockVector &x,
282 const MPI::Vector &b) const
283 {
284 vmult(dst, x);
285 dst -= b;
286 dst *= -1.;
287
288 return dst.l2_norm();
289 }
290
291
292
295 const MPI::Vector &x,
296 const MPI::Vector &b) const
297 {
298 vmult(dst, x);
299 dst -= b;
300 dst *= -1.;
301
302 return dst.l2_norm();
303 }
304
305
306
309 {
310 Assert(this->n_block_cols() != 0, ExcNotInitialized());
311 Assert(this->n_block_rows() != 0, ExcNotInitialized());
312 return this->sub_objects[0][0]->get_mpi_communicator();
313 }
314
315
316
317# ifndef DOXYGEN
318 // -------------------- explicit instantiations -----------------------
319 //
320 template void
321 BlockSparseMatrix::reinit(const ::BlockSparsityPattern &);
322 template void
323 BlockSparseMatrix::reinit(const ::BlockDynamicSparsityPattern &);
324
325 template void
326 BlockSparseMatrix::reinit(const std::vector<IndexSet> &,
327 const ::BlockDynamicSparsityPattern &,
328 const MPI_Comm,
329 const bool);
330# endif // DOXYGEN
331
332} // namespace TrilinosWrappers
333
334
336
337#endif
void reinit(const unsigned int n_blocks, const size_type n_elements_per_block)
unsigned int n_block_rows() const
Table< 2, SmartPointer< BlockType, BlockMatrixBase< SparseMatrix > > > sub_objects
unsigned int n_block_cols() const
BlockType & block(const unsigned int row, const unsigned int column)
SparsityPatternType & block(const size_type row, const size_type column)
const BlockIndices & get_column_indices() const
const BlockIndices & get_row_indices() const
real_type l2_norm() const
std::size_t n_nonzero_elements() const
TrilinosScalar residual(MPI::BlockVector &dst, const MPI::BlockVector &x, const MPI::BlockVector &b) const
void reinit(const size_type n_block_rows, const size_type n_block_columns)
void vmult(VectorType1 &dst, const VectorType2 &src) const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcNotInitialized()
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1193
double TrilinosScalar
Definition types.h:178