Reference documentation for deal.II version Git 9297d75edf 2020-11-26 18:52:14 +0100
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
trilinos_sparse_matrix.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2008 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
18 
19 #ifdef DEAL_II_WITH_TRILINOS
20 
21 # include <deal.II/base/utilities.h>
22 
30 
31 # include <boost/container/small_vector.hpp>
32 
33 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
34 # include <EpetraExt_MatrixMatrix.h>
35 # endif
36 # include <Epetra_Export.h>
37 # include <Teuchos_RCP.hpp>
38 # include <ml_epetra_utils.h>
39 # include <ml_struct.h>
40 
41 # include <memory>
42 
44 
45 namespace TrilinosWrappers
46 {
47  namespace internal
48  {
49  template <typename VectorType>
50  typename VectorType::value_type *
52  {
53  return V.begin();
54  }
55 
56  template <typename VectorType>
57  const typename VectorType::value_type *
58  begin(const VectorType &V)
59  {
60  return V.begin();
61  }
62 
63  template <typename VectorType>
64  typename VectorType::value_type *
66  {
67  return V.end();
68  }
69 
70  template <typename VectorType>
71  const typename VectorType::value_type *
72  end(const VectorType &V)
73  {
74  return V.end();
75  }
76 
77 # ifdef DEAL_II_WITH_MPI
78  template <>
79  double *
81  {
82  return V.trilinos_vector()[0];
83  }
84 
85  template <>
86  const double *
88  {
89  return V.trilinos_vector()[0];
90  }
91 
92  template <>
93  double *
95  {
96  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
97  }
98 
99  template <>
100  const double *
102  {
103  return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
104  }
105 
106 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
107  template <typename Number>
108  Number *
110  {
111  return V.trilinos_vector().getDataNonConst().get();
112  }
113 
114  template <typename Number>
115  const Number *
117  {
118  return V.trilinos_vector().getData().get();
119  }
120 
121  template <typename Number>
122  Number *
124  {
125  return V.trilinos_vector().getDataNonConst().get() +
126  V.trilinos_vector().getLocalLength();
127  }
128 
129  template <typename Number>
130  const Number *
132  {
133  return V.trilinos_vector().getData().get() +
134  V.trilinos_vector().getLocalLength();
135  }
136 # endif
137 # endif
138  } // namespace internal
139 
140 
141  namespace SparseMatrixIterators
142  {
143  void
144  AccessorBase::visit_present_row()
145  {
146  // if we are asked to visit the past-the-end line, then simply
147  // release all our caches and go on with life.
148  //
149  // do the same if the row we're supposed to visit is not locally
150  // owned. this is simply going to make non-locally owned rows
151  // look like they're empty
152  if ((this->a_row == matrix->m()) ||
153  (matrix->in_local_range(this->a_row) == false))
154  {
155  colnum_cache.reset();
156  value_cache.reset();
157 
158  return;
159  }
160 
161  // get a representation of the present row
162  int ncols;
164  if (value_cache.get() == nullptr)
165  {
166  value_cache =
167  std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
168  colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
169  }
170  else
171  {
172  value_cache->resize(matrix->n());
173  colnum_cache->resize(matrix->n());
174  }
175 
176  int ierr = matrix->trilinos_matrix().ExtractGlobalRowCopy(
177  this->a_row,
178  colnums,
179  ncols,
180  value_cache->data(),
181  reinterpret_cast<TrilinosWrappers::types::int_type *>(
182  colnum_cache->data()));
183  value_cache->resize(ncols);
184  colnum_cache->resize(ncols);
185  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
186 
187  // copy it into our caches if the
188  // line isn't empty. if it is, then
189  // we've done something wrong, since
190  // we shouldn't have initialized an
191  // iterator for an empty line (what
192  // would it point to?)
193  }
194  } // namespace SparseMatrixIterators
195 
196 
197  // The constructor is actually the
198  // only point where we have to check
199  // whether we build a serial or a
200  // parallel Trilinos matrix.
201  // Actually, it does not even matter
202  // how many threads there are, but
203  // only if we use an MPI compiler or
204  // a standard compiler. So, even one
205  // thread on a configuration with
206  // MPI will still get a parallel
207  // interface.
209  : column_space_map(new Epetra_Map(0, 0, Utilities::Trilinos::comm_self()))
210  , matrix(
211  new Epetra_FECrsMatrix(View, *column_space_map, *column_space_map, 0))
212  , last_action(Zero)
213  , compressed(true)
214  {
215  matrix->FillComplete();
216  }
217 
218 
219 
221  const size_type n,
222  const unsigned int n_max_entries_per_row)
224  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
225  0,
226  Utilities::Trilinos::comm_self()))
227  ,
228 
229  // on one processor only, we know how the
230  // columns of the matrix will be
231  // distributed (everything on one
232  // processor), so we can hand in this
233  // information to the constructor. we
234  // can't do so in parallel, where the
235  // information from columns is only
236  // available when entries have been added
237  matrix(new Epetra_FECrsMatrix(
238  Copy,
239  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
240  0,
241  Utilities::Trilinos::comm_self()),
243  n_max_entries_per_row,
244  false))
245  , last_action(Zero)
246  , compressed(false)
247  {}
248 
249 
250 
252  const size_type n,
253  const std::vector<unsigned int> &n_entries_per_row)
255  new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n),
256  0,
257  Utilities::Trilinos::comm_self()))
258  , matrix(new Epetra_FECrsMatrix(
259  Copy,
260  Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(m),
261  0,
262  Utilities::Trilinos::comm_self()),
264  reinterpret_cast<int *>(
265  const_cast<unsigned int *>(n_entries_per_row.data())),
266  false))
267  , last_action(Zero)
268  , compressed(false)
269  {}
270 
271 
272 
273  SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
274  const MPI_Comm & communicator,
275  const unsigned int n_max_entries_per_row)
276  : column_space_map(new Epetra_Map(
277  parallel_partitioning.make_trilinos_map(communicator, false)))
278  , matrix(new Epetra_FECrsMatrix(Copy,
280  n_max_entries_per_row,
281  false))
282  , last_action(Zero)
283  , compressed(false)
284  {}
285 
286 
287 
288  SparseMatrix::SparseMatrix(const IndexSet &parallel_partitioning,
289  const MPI_Comm &communicator,
290  const std::vector<unsigned int> &n_entries_per_row)
291  : column_space_map(new Epetra_Map(
292  parallel_partitioning.make_trilinos_map(communicator, false)))
293  , matrix(new Epetra_FECrsMatrix(Copy,
295  reinterpret_cast<int *>(
296  const_cast<unsigned int *>(
297  n_entries_per_row.data())),
298  false))
299  , last_action(Zero)
300  , compressed(false)
301  {}
302 
303 
304 
305  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
306  const IndexSet &col_parallel_partitioning,
307  const MPI_Comm &communicator,
308  const size_type n_max_entries_per_row)
309  : column_space_map(new Epetra_Map(
310  col_parallel_partitioning.make_trilinos_map(communicator, false)))
311  , matrix(new Epetra_FECrsMatrix(
312  Copy,
313  row_parallel_partitioning.make_trilinos_map(communicator, false),
314  n_max_entries_per_row,
315  false))
316  , last_action(Zero)
317  , compressed(false)
318  {}
319 
320 
321 
322  SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
323  const IndexSet &col_parallel_partitioning,
324  const MPI_Comm &communicator,
325  const std::vector<unsigned int> &n_entries_per_row)
326  : column_space_map(new Epetra_Map(
327  col_parallel_partitioning.make_trilinos_map(communicator, false)))
328  , matrix(new Epetra_FECrsMatrix(
329  Copy,
330  row_parallel_partitioning.make_trilinos_map(communicator, false),
331  reinterpret_cast<int *>(
332  const_cast<unsigned int *>(n_entries_per_row.data())),
333  false))
334  , last_action(Zero)
335  , compressed(false)
336  {}
337 
338 
339 
341  : column_space_map(new Epetra_Map(sparsity_pattern.domain_partitioner()))
342  , matrix(
343  new Epetra_FECrsMatrix(Copy,
344  sparsity_pattern.trilinos_sparsity_pattern(),
345  false))
346  , last_action(Zero)
347  , compressed(true)
348  {
349  Assert(sparsity_pattern.trilinos_sparsity_pattern().Filled() == true,
350  ExcMessage(
351  "The Trilinos sparsity pattern has not been compressed."));
353  }
354 
355 
356 
358  : column_space_map(std::move(other.column_space_map))
359  , matrix(std::move(other.matrix))
360  , nonlocal_matrix(std::move(other.nonlocal_matrix))
361  , nonlocal_matrix_exporter(std::move(other.nonlocal_matrix_exporter))
362  , last_action(other.last_action)
363  , compressed(other.compressed)
364  {
365  other.last_action = Zero;
366  other.compressed = false;
367  }
368 
369 
370 
371  void
373  {
374  if (this == &rhs)
375  return;
376 
377  nonlocal_matrix.reset();
378  nonlocal_matrix_exporter.reset();
379 
380  // check whether we need to update the whole matrix layout (we have
381  // different maps or if we detect a row where the columns of the two
382  // matrices do not match)
383  bool needs_deep_copy =
384  !matrix->RowMap().SameAs(rhs.matrix->RowMap()) ||
385  !matrix->ColMap().SameAs(rhs.matrix->ColMap()) ||
386  !matrix->DomainMap().SameAs(rhs.matrix->DomainMap()) ||
388  if (!needs_deep_copy)
389  {
390  // Try to copy all the rows of the matrix one by one. In case of error
391  // (i.e., the column indices are different), we need to abort and blow
392  // away the matrix.
393  for (const auto row : locally_owned_range_indices())
394  {
395  const int row_local = matrix->RowMap().LID(
396  static_cast<TrilinosWrappers::types::int_type>(row));
397  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
398 
399  int n_entries, rhs_n_entries;
400  TrilinosScalar *value_ptr, *rhs_value_ptr;
401  int * index_ptr, *rhs_index_ptr;
402  int ierr = rhs.matrix->ExtractMyRowView(row_local,
403  rhs_n_entries,
404  rhs_value_ptr,
405  rhs_index_ptr);
406  (void)ierr;
407  Assert(ierr == 0, ExcTrilinosError(ierr));
408 
409  ierr = matrix->ExtractMyRowView(row_local,
410  n_entries,
411  value_ptr,
412  index_ptr);
413  Assert(ierr == 0, ExcTrilinosError(ierr));
414 
415  if (n_entries != rhs_n_entries ||
416  std::memcmp(static_cast<void *>(index_ptr),
417  static_cast<void *>(rhs_index_ptr),
418  sizeof(int) * n_entries) != 0)
419  {
420  needs_deep_copy = true;
421  break;
422  }
423 
424  for (int i = 0; i < n_entries; ++i)
425  value_ptr[i] = rhs_value_ptr[i];
426  }
427  }
428 
429  if (needs_deep_copy)
430  {
432  std::make_unique<Epetra_Map>(rhs.trilinos_matrix().DomainMap());
433 
434  // release memory before reallocation
435  matrix = std::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
436 
437  matrix->FillComplete(*column_space_map, matrix->RowMap());
438  }
439 
440  if (rhs.nonlocal_matrix.get() != nullptr)
442  std::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
443  }
444 
445 
446 
447  namespace
448  {
450 
451  template <typename SparsityPatternType>
452  void
453  reinit_matrix(const IndexSet & row_parallel_partitioning,
454  const IndexSet & column_parallel_partitioning,
455  const SparsityPatternType & sparsity_pattern,
456  const bool exchange_data,
457  const MPI_Comm & communicator,
458  std::unique_ptr<Epetra_Map> &column_space_map,
459  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
460  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
461  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
462  {
463  // release memory before reallocation
464  matrix.reset();
465  nonlocal_matrix.reset();
466  nonlocal_matrix_exporter.reset();
467 
468  column_space_map = std::make_unique<Epetra_Map>(
469  column_parallel_partitioning.make_trilinos_map(communicator, false));
470 
471  if (column_space_map->Comm().MyPID() == 0)
472  {
473  AssertDimension(sparsity_pattern.n_rows(),
474  row_parallel_partitioning.size());
475  AssertDimension(sparsity_pattern.n_cols(),
476  column_parallel_partitioning.size());
477  }
478 
479  Epetra_Map row_space_map =
480  row_parallel_partitioning.make_trilinos_map(communicator, false);
481 
482  // if we want to exchange data, build a usual Trilinos sparsity pattern
483  // and let that handle the exchange. otherwise, manually create a
484  // CrsGraph, which consumes considerably less memory because it can set
485  // correct number of indices right from the start
486  if (exchange_data)
487  {
488  SparsityPattern trilinos_sparsity;
489  trilinos_sparsity.reinit(row_parallel_partitioning,
490  column_parallel_partitioning,
491  sparsity_pattern,
492  communicator,
493  exchange_data);
494  matrix = std::make_unique<Epetra_FECrsMatrix>(
495  Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
496 
497  return;
498  }
499 
500  const size_type first_row = TrilinosWrappers::min_my_gid(row_space_map),
501  last_row =
502  TrilinosWrappers::max_my_gid(row_space_map) + 1;
503  std::vector<int> n_entries_per_row(last_row - first_row);
504 
505  for (size_type row = first_row; row < last_row; ++row)
506  n_entries_per_row[row - first_row] = sparsity_pattern.row_length(row);
507 
508  // The deal.II notation of a Sparsity pattern corresponds to the Epetra
509  // concept of a Graph. Hence, we generate a graph by copying the
510  // sparsity pattern into it, and then build up the matrix from the
511  // graph. This is considerable faster than directly filling elements
512  // into the matrix. Moreover, it consumes less memory, since the
513  // internal reordering is done on ints only, and we can leave the
514  // doubles aside.
515 
516  // for more than one processor, need to specify only row map first and
517  // let the matrix entries decide about the column map (which says which
518  // columns are present in the matrix, not to be confused with the
519  // col_map that tells how the domain dofs of the matrix will be
520  // distributed). for only one processor, we can directly assign the
521  // columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
522  std::unique_ptr<Epetra_CrsGraph> graph;
523  if (row_space_map.Comm().NumProc() > 1)
524  graph = std::make_unique<Epetra_CrsGraph>(Copy,
525  row_space_map,
526  n_entries_per_row.data(),
527  true);
528  else
529  graph = std::make_unique<Epetra_CrsGraph>(Copy,
530  row_space_map,
532  n_entries_per_row.data(),
533  true);
534 
535  // This functions assumes that the sparsity pattern sits on all
536  // processors (completely). The parallel version uses an Epetra graph
537  // that is already distributed.
538 
539  // now insert the indices
540  std::vector<TrilinosWrappers::types::int_type> row_indices;
541 
542  for (size_type row = first_row; row < last_row; ++row)
543  {
544  const int row_length = sparsity_pattern.row_length(row);
545  if (row_length == 0)
546  continue;
547 
548  row_indices.resize(row_length, -1);
549  {
550  typename SparsityPatternType::iterator p =
551  sparsity_pattern.begin(row);
552  for (size_type col = 0; p != sparsity_pattern.end(row); ++p, ++col)
553  row_indices[col] = p->column();
554  }
555  graph->Epetra_CrsGraph::InsertGlobalIndices(row,
556  row_length,
557  row_indices.data());
558  }
559 
560  // Eventually, optimize the graph structure (sort indices, make memory
561  // contiguous, etc). note that the documentation of the function indeed
562  // states that we first need to provide the column (domain) map and then
563  // the row (range) map
564  graph->FillComplete(*column_space_map, row_space_map);
565  graph->OptimizeStorage();
566 
567  // check whether we got the number of columns right.
568  AssertDimension(sparsity_pattern.n_cols(),
570  (void)n_global_cols;
571 
572  // And now finally generate the matrix.
573  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
574  }
575 
576 
577 
578  // for the non-local graph, we need to circumvent the problem that some
579  // processors will not add into the non-local graph at all: We do not want
580  // to insert dummy elements on >5000 processors because that gets very
581  // slow. Thus, we set a flag in Epetra_CrsGraph that sets the correct
582  // flag. Since it is protected, we need to expose this information by
583  // deriving a class from Epetra_CrsGraph for the purpose of creating the
584  // data structure
585  class Epetra_CrsGraphMod : public Epetra_CrsGraph
586  {
587  public:
588  Epetra_CrsGraphMod(const Epetra_Map &row_map,
589  const int * n_entries_per_row)
590  : Epetra_CrsGraph(Copy, row_map, n_entries_per_row, true)
591  {}
592 
593  void
594  SetIndicesAreGlobal()
595  {
596  this->Epetra_CrsGraph::SetIndicesAreGlobal(true);
597  }
598  };
599 
600 
601 
602  // specialization for DynamicSparsityPattern which can provide us with
603  // more information about the non-locally owned rows
604  template <>
605  void
606  reinit_matrix(const IndexSet & row_parallel_partitioning,
607  const IndexSet & column_parallel_partitioning,
608  const DynamicSparsityPattern &sparsity_pattern,
609  const bool exchange_data,
610  const MPI_Comm & communicator,
611  std::unique_ptr<Epetra_Map> & column_space_map,
612  std::unique_ptr<Epetra_FECrsMatrix> &matrix,
613  std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
614  std::unique_ptr<Epetra_Export> & nonlocal_matrix_exporter)
615  {
616  matrix.reset();
617  nonlocal_matrix.reset();
618  nonlocal_matrix_exporter.reset();
619 
620  column_space_map = std::make_unique<Epetra_Map>(
621  column_parallel_partitioning.make_trilinos_map(communicator, false));
622 
623  AssertDimension(sparsity_pattern.n_rows(),
624  row_parallel_partitioning.size());
625  AssertDimension(sparsity_pattern.n_cols(),
626  column_parallel_partitioning.size());
627 
628  Epetra_Map row_space_map =
629  row_parallel_partitioning.make_trilinos_map(communicator, false);
630 
631  IndexSet relevant_rows(sparsity_pattern.row_index_set());
632  // serial case
633  if (relevant_rows.size() == 0)
634  {
635  relevant_rows.set_size(
636  TrilinosWrappers::n_global_elements(row_space_map));
637  relevant_rows.add_range(
638  0, TrilinosWrappers::n_global_elements(row_space_map));
639  }
640  relevant_rows.compress();
641  Assert(relevant_rows.n_elements() >=
642  static_cast<unsigned int>(row_space_map.NumMyElements()),
643  ExcMessage(
644  "Locally relevant rows of sparsity pattern must contain "
645  "all locally owned rows"));
646 
647  // check whether the relevant rows correspond to exactly the same map as
648  // the owned rows. In that case, do not create the nonlocal graph and
649  // fill the columns by demand
650  const bool have_ghost_rows = [&]() {
651  std::vector<::types::global_dof_index> indices;
652  relevant_rows.fill_index_vector(indices);
653  Epetra_Map relevant_map(
655  TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
656  (indices.empty() ?
657  nullptr :
658  reinterpret_cast<TrilinosWrappers::types::int_type *>(
659  indices.data())),
660  0,
661  row_space_map.Comm());
662  return !relevant_map.SameAs(row_space_map);
663  }();
664 
665  const unsigned int n_rows = relevant_rows.n_elements();
666  std::vector<TrilinosWrappers::types::int_type> ghost_rows;
667  std::vector<int> n_entries_per_row(row_space_map.NumMyElements());
668  std::vector<int> n_entries_per_ghost_row;
669  for (unsigned int i = 0, own = 0; i < n_rows; ++i)
670  {
671  const TrilinosWrappers::types::int_type global_row =
672  relevant_rows.nth_index_in_set(i);
673  if (row_space_map.MyGID(global_row))
674  n_entries_per_row[own++] = sparsity_pattern.row_length(global_row);
675  else if (sparsity_pattern.row_length(global_row) > 0)
676  {
677  ghost_rows.push_back(global_row);
678  n_entries_per_ghost_row.push_back(
679  sparsity_pattern.row_length(global_row));
680  }
681  }
682 
683  Epetra_Map off_processor_map(-1,
684  ghost_rows.size(),
685  (ghost_rows.size() > 0) ?
686  (ghost_rows.data()) :
687  nullptr,
688  0,
689  row_space_map.Comm());
690 
691  std::unique_ptr<Epetra_CrsGraph> graph;
692  std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
693  if (row_space_map.Comm().NumProc() > 1)
694  {
695  graph =
696  std::make_unique<Epetra_CrsGraph>(Copy,
697  row_space_map,
698  (n_entries_per_row.size() > 0) ?
699  (n_entries_per_row.data()) :
700  nullptr,
701  exchange_data ? false : true);
702  if (have_ghost_rows == true)
703  nonlocal_graph = std::make_unique<Epetra_CrsGraphMod>(
704  off_processor_map, n_entries_per_ghost_row.data());
705  }
706  else
707  graph =
708  std::make_unique<Epetra_CrsGraph>(Copy,
709  row_space_map,
711  (n_entries_per_row.size() > 0) ?
712  (n_entries_per_row.data()) :
713  nullptr,
714  true);
715 
716  // now insert the indices, select between the right matrix
717  std::vector<TrilinosWrappers::types::int_type> row_indices;
718 
719  for (unsigned int i = 0; i < n_rows; ++i)
720  {
721  const TrilinosWrappers::types::int_type global_row =
722  relevant_rows.nth_index_in_set(i);
723  const int row_length = sparsity_pattern.row_length(global_row);
724  if (row_length == 0)
725  continue;
726 
727  row_indices.resize(row_length, -1);
728  for (int col = 0; col < row_length; ++col)
729  row_indices[col] = sparsity_pattern.column_number(global_row, col);
730 
731  if (row_space_map.MyGID(global_row))
732  graph->InsertGlobalIndices(global_row,
733  row_length,
734  row_indices.data());
735  else
736  {
737  Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
738  nonlocal_graph->InsertGlobalIndices(global_row,
739  row_length,
740  row_indices.data());
741  }
742  }
743 
744  // finalize nonlocal graph and create nonlocal matrix
745  if (nonlocal_graph.get() != nullptr)
746  {
747  // must make sure the IndicesAreGlobal flag is set on all processors
748  // because some processors might not call InsertGlobalIndices (and
749  // we do not want to insert dummy indices on all processors for
750  // large-scale simulations due to the bad impact on performance)
751  nonlocal_graph->SetIndicesAreGlobal();
752  Assert(nonlocal_graph->IndicesAreGlobal() == true,
753  ExcInternalError());
754  nonlocal_graph->FillComplete(*column_space_map, row_space_map);
755  nonlocal_graph->OptimizeStorage();
756 
757  // insert data from nonlocal graph into the final sparsity pattern
758  if (exchange_data)
759  {
760  Epetra_Export exporter(nonlocal_graph->RowMap(), row_space_map);
761  int ierr = graph->Export(*nonlocal_graph, exporter, Add);
762  (void)ierr;
763  Assert(ierr == 0, ExcTrilinosError(ierr));
764  }
765 
766  nonlocal_matrix =
767  std::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
768  }
769 
770  graph->FillComplete(*column_space_map, row_space_map);
771  graph->OptimizeStorage();
772 
773  AssertDimension(sparsity_pattern.n_cols(),
775 
776  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
777  }
778  } // namespace
779 
780 
781 
782  template <typename SparsityPatternType>
783  void
784  SparseMatrix::reinit(const SparsityPatternType &sparsity_pattern)
785  {
786  reinit_matrix(complete_index_set(sparsity_pattern.n_rows()),
787  complete_index_set(sparsity_pattern.n_cols()),
788  sparsity_pattern,
789  false,
790  MPI_COMM_SELF,
792  matrix,
795  }
796 
797 
798 
799  template <typename SparsityPatternType>
800  inline typename std::enable_if<
801  !std::is_same<SparsityPatternType,
802  ::SparseMatrix<double>>::value>::type
803  SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
804  const IndexSet & col_parallel_partitioning,
805  const SparsityPatternType &sparsity_pattern,
806  const MPI_Comm & communicator,
807  const bool exchange_data)
808  {
809  reinit_matrix(row_parallel_partitioning,
810  col_parallel_partitioning,
811  sparsity_pattern,
812  exchange_data,
813  communicator,
815  matrix,
818 
819  // In the end, the matrix needs to be compressed in order to be really
820  // ready.
821  last_action = Zero;
823  }
824 
825 
826 
827  void
828  SparseMatrix::reinit(const SparsityPattern &sparsity_pattern)
829  {
830  matrix.reset();
831  nonlocal_matrix_exporter.reset();
832 
833  // reinit with a (parallel) Trilinos sparsity pattern.
835  std::make_unique<Epetra_Map>(sparsity_pattern.domain_partitioner());
836  matrix = std::make_unique<Epetra_FECrsMatrix>(
837  Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
838 
839  if (sparsity_pattern.nonlocal_graph.get() != nullptr)
841  std::make_unique<Epetra_CrsMatrix>(Copy,
842  *sparsity_pattern.nonlocal_graph);
843  else
844  nonlocal_matrix.reset();
845 
846  last_action = Zero;
848  }
849 
850 
851 
852  void
853  SparseMatrix::reinit(const SparseMatrix &sparse_matrix)
854  {
855  if (this == &sparse_matrix)
856  return;
857 
859  std::make_unique<Epetra_Map>(sparse_matrix.trilinos_matrix().DomainMap());
860  matrix.reset();
861  nonlocal_matrix_exporter.reset();
862  matrix = std::make_unique<Epetra_FECrsMatrix>(
863  Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
864 
865  if (sparse_matrix.nonlocal_matrix != nullptr)
866  nonlocal_matrix = std::make_unique<Epetra_CrsMatrix>(
867  Copy, sparse_matrix.nonlocal_matrix->Graph());
868  else
869  nonlocal_matrix.reset();
870 
871  last_action = Zero;
873  }
874 
875 
876 
877  template <typename number>
878  inline void
880  const IndexSet & row_parallel_partitioning,
881  const IndexSet & col_parallel_partitioning,
882  const ::SparseMatrix<number> &dealii_sparse_matrix,
883  const MPI_Comm & communicator,
884  const double drop_tolerance,
885  const bool copy_values,
886  const ::SparsityPattern * use_this_sparsity)
887  {
888  if (copy_values == false)
889  {
890  // in case we do not copy values, just
891  // call the other function.
892  if (use_this_sparsity == nullptr)
893  reinit(row_parallel_partitioning,
894  col_parallel_partitioning,
895  dealii_sparse_matrix.get_sparsity_pattern(),
896  communicator,
897  false);
898  else
899  reinit(row_parallel_partitioning,
900  col_parallel_partitioning,
901  *use_this_sparsity,
902  communicator,
903  false);
904  return;
905  }
906 
907  const size_type n_rows = dealii_sparse_matrix.m();
908 
909  AssertDimension(row_parallel_partitioning.size(), n_rows);
910  AssertDimension(col_parallel_partitioning.size(), dealii_sparse_matrix.n());
911 
912  const ::SparsityPattern &sparsity_pattern =
913  (use_this_sparsity != nullptr) ?
914  *use_this_sparsity :
915  dealii_sparse_matrix.get_sparsity_pattern();
916 
917  if (matrix.get() == nullptr || m() != n_rows ||
918  n_nonzero_elements() != sparsity_pattern.n_nonzero_elements())
919  {
920  reinit(row_parallel_partitioning,
921  col_parallel_partitioning,
922  sparsity_pattern,
923  communicator,
924  false);
925  }
926 
927  // fill the values. the same as above: go through all rows of the
928  // matrix, and then all columns. since the sparsity patterns of the
929  // input matrix and the specified sparsity pattern might be different,
930  // need to go through the row for both these sparsity structures
931  // simultaneously in order to really set the correct values.
932  size_type maximum_row_length = matrix->MaxNumEntries();
933  std::vector<size_type> row_indices(maximum_row_length);
934  std::vector<TrilinosScalar> values(maximum_row_length);
935 
936  for (size_type row = 0; row < n_rows; ++row)
937  // see if the row is locally stored on this processor
938  if (row_parallel_partitioning.is_element(row) == true)
939  {
940  ::SparsityPattern::iterator select_index =
941  sparsity_pattern.begin(row);
942  typename ::SparseMatrix<number>::const_iterator it =
943  dealii_sparse_matrix.begin(row);
944  size_type col = 0;
945  if (sparsity_pattern.n_rows() == sparsity_pattern.n_cols())
946  {
947  // optimized diagonal
948  AssertDimension(it->column(), row);
949  if (std::fabs(it->value()) > drop_tolerance)
950  {
951  values[col] = it->value();
952  row_indices[col++] = it->column();
953  }
954  ++select_index;
955  ++it;
956  }
957 
958  while (it != dealii_sparse_matrix.end(row) &&
959  select_index != sparsity_pattern.end(row))
960  {
961  while (select_index->column() < it->column() &&
962  select_index != sparsity_pattern.end(row))
963  ++select_index;
964  while (it->column() < select_index->column() &&
965  it != dealii_sparse_matrix.end(row))
966  ++it;
967 
968  if (it == dealii_sparse_matrix.end(row))
969  break;
970  if (std::fabs(it->value()) > drop_tolerance)
971  {
972  values[col] = it->value();
973  row_indices[col++] = it->column();
974  }
975  ++select_index;
976  ++it;
977  }
978  set(row,
979  col,
980  reinterpret_cast<size_type *>(row_indices.data()),
981  values.data(),
982  false);
983  }
985  }
986 
987 
988 
989  template <typename number>
990  void
992  const ::SparseMatrix<number> &dealii_sparse_matrix,
993  const double drop_tolerance,
994  const bool copy_values,
995  const ::SparsityPattern * use_this_sparsity)
996  {
997  reinit(complete_index_set(dealii_sparse_matrix.m()),
998  complete_index_set(dealii_sparse_matrix.n()),
999  dealii_sparse_matrix,
1000  MPI_COMM_SELF,
1001  drop_tolerance,
1002  copy_values,
1003  use_this_sparsity);
1004  }
1005 
1006 
1007 
1008  void
1009  SparseMatrix::reinit(const Epetra_CrsMatrix &input_matrix,
1010  const bool copy_values)
1011  {
1012  Assert(input_matrix.Filled() == true,
1013  ExcMessage("Input CrsMatrix has not called FillComplete()!"));
1014 
1015  column_space_map = std::make_unique<Epetra_Map>(input_matrix.DomainMap());
1016 
1017  const Epetra_CrsGraph *graph = &input_matrix.Graph();
1018 
1019  nonlocal_matrix.reset();
1020  nonlocal_matrix_exporter.reset();
1021  matrix.reset();
1022  matrix = std::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
1023 
1024  matrix->FillComplete(*column_space_map, input_matrix.RangeMap(), true);
1025 
1026  if (copy_values == true)
1027  {
1028  // point to the first data entry in the two
1029  // matrices and copy the content
1030  const TrilinosScalar *in_values = input_matrix[0];
1031  TrilinosScalar * values = (*matrix)[0];
1032  const size_type my_nonzeros = input_matrix.NumMyNonzeros();
1033  std::memcpy(values, in_values, my_nonzeros * sizeof(TrilinosScalar));
1034  }
1035 
1036  last_action = Zero;
1038  }
1039 
1040 
1041 
1042  void
1044  {
1045  Epetra_CombineMode mode = last_action;
1046  if (last_action == Zero)
1047  {
1048  if ((operation == ::VectorOperation::add) ||
1049  (operation == ::VectorOperation::unknown))
1050  mode = Add;
1051  else if (operation == ::VectorOperation::insert)
1052  mode = Insert;
1053  else
1054  Assert(
1055  false,
1056  ExcMessage(
1057  "compress() can only be called with VectorOperation add, insert, or unknown"));
1058  }
1059  else
1060  {
1061  Assert(((last_action == Add) &&
1062  (operation != ::VectorOperation::insert)) ||
1063  ((last_action == Insert) &&
1064  (operation != ::VectorOperation::add)),
1065  ExcMessage("Operation and argument to compress() do not match"));
1066  }
1067 
1068  // flush buffers
1069  int ierr;
1070  if (nonlocal_matrix.get() != nullptr && mode == Add)
1071  {
1072  // do only export in case of an add() operation, otherwise the owning
1073  // processor must have set the correct entry
1074  nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
1075  if (nonlocal_matrix_exporter.get() == nullptr)
1077  std::make_unique<Epetra_Export>(nonlocal_matrix->RowMap(),
1078  matrix->RowMap());
1079  ierr =
1081  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1082  ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
1083  nonlocal_matrix->PutScalar(0);
1084  }
1085  else
1086  ierr =
1087  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true, mode);
1088 
1089  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1090 
1091  ierr = matrix->OptimizeStorage();
1092  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1093 
1094  last_action = Zero;
1095 
1096  compressed = true;
1097  }
1098 
1099 
1100 
1101  void
1103  {
1104  // When we clear the matrix, reset
1105  // the pointer and generate an
1106  // empty matrix.
1108  std::make_unique<Epetra_Map>(0, 0, Utilities::Trilinos::comm_self());
1109  matrix = std::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
1110  nonlocal_matrix.reset();
1111  nonlocal_matrix_exporter.reset();
1112 
1113  matrix->FillComplete();
1114 
1115  compressed = true;
1116  }
1117 
1118 
1119 
1120  void
1122  const TrilinosScalar new_diag_value)
1123  {
1124  Assert(matrix->Filled() == true, ExcMatrixNotCompressed());
1125 
1126  // Only do this on the rows owned
1127  // locally on this processor.
1128  int local_row =
1129  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1130  if (local_row >= 0)
1131  {
1133  int * col_indices;
1134  int num_entries;
1135  const int ierr =
1136  matrix->ExtractMyRowView(local_row, num_entries, values, col_indices);
1137  (void)ierr;
1138 
1139  Assert(ierr == 0, ExcTrilinosError(ierr));
1140 
1141  const std::ptrdiff_t diag_index =
1142  std::find(col_indices, col_indices + num_entries, local_row) -
1143  col_indices;
1144 
1145  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
1146  if (diag_index != j || new_diag_value == 0)
1147  values[j] = 0.;
1148 
1149  if (diag_index != num_entries)
1150  values[diag_index] = new_diag_value;
1151  }
1152  }
1153 
1154 
1155 
1156  void
1157  SparseMatrix::clear_rows(const std::vector<size_type> &rows,
1158  const TrilinosScalar new_diag_value)
1159  {
1160  for (const auto row : rows)
1161  clear_row(row, new_diag_value);
1162  }
1163 
1164 
1165 
1168  {
1169  // Extract local indices in
1170  // the matrix.
1171  int trilinos_i =
1172  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1173  trilinos_j =
1174  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1175  TrilinosScalar value = 0.;
1176 
1177  // If the data is not on the
1178  // present processor, we throw
1179  // an exception. This is one of
1180  // the two tiny differences to
1181  // the el(i,j) call, which does
1182  // not throw any assertions.
1183  if (trilinos_i == -1)
1184  {
1185  Assert(false,
1187  i, j, local_range().first, local_range().second));
1188  }
1189  else
1190  {
1191  // Check whether the matrix has
1192  // already been transformed to local
1193  // indices.
1194  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1195 
1196  // Prepare pointers for extraction
1197  // of a view of the row.
1198  int nnz_present = matrix->NumMyEntries(trilinos_i);
1199  int nnz_extracted;
1200  int * col_indices;
1202 
1203  // Generate the view and make
1204  // sure that we have not generated
1205  // an error.
1206  // TODO Check that col_indices are int and not long long
1207  int ierr = matrix->ExtractMyRowView(trilinos_i,
1208  nnz_extracted,
1209  values,
1210  col_indices);
1211  (void)ierr;
1212  Assert(ierr == 0, ExcTrilinosError(ierr));
1213 
1214  Assert(nnz_present == nnz_extracted,
1215  ExcDimensionMismatch(nnz_present, nnz_extracted));
1216 
1217  // Search the index where we
1218  // look for the value, and then
1219  // finally get it.
1220  const std::ptrdiff_t local_col_index =
1221  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1222  col_indices;
1223 
1224  // This is actually the only
1225  // difference to the el(i,j)
1226  // function, which means that
1227  // we throw an exception in
1228  // this case instead of just
1229  // returning zero for an
1230  // element that is not present
1231  // in the sparsity pattern.
1232  if (local_col_index == nnz_present)
1233  {
1234  Assert(false, ExcInvalidIndex(i, j));
1235  }
1236  else
1237  value = values[local_col_index];
1238  }
1239 
1240  return value;
1241  }
1242 
1243 
1244 
1246  SparseMatrix::el(const size_type i, const size_type j) const
1247  {
1248  // Extract local indices in
1249  // the matrix.
1250  int trilinos_i =
1251  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
1252  trilinos_j =
1253  matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
1254  TrilinosScalar value = 0.;
1255 
1256  // If the data is not on the
1257  // present processor, we can't
1258  // continue. Just print out zero
1259  // as discussed in the
1260  // documentation of this
1261  // function. if you want error
1262  // checking, use operator().
1263  if ((trilinos_i == -1) || (trilinos_j == -1))
1264  return 0.;
1265  else
1266  {
1267  // Check whether the matrix
1268  // already is transformed to
1269  // local indices.
1270  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1271 
1272  // Prepare pointers for extraction
1273  // of a view of the row.
1274  int nnz_present = matrix->NumMyEntries(trilinos_i);
1275  int nnz_extracted;
1276  int * col_indices;
1278 
1279  // Generate the view and make
1280  // sure that we have not generated
1281  // an error.
1282  int ierr = matrix->ExtractMyRowView(trilinos_i,
1283  nnz_extracted,
1284  values,
1285  col_indices);
1286  (void)ierr;
1287  Assert(ierr == 0, ExcTrilinosError(ierr));
1288 
1289  Assert(nnz_present == nnz_extracted,
1290  ExcDimensionMismatch(nnz_present, nnz_extracted));
1291 
1292  // Search the index where we
1293  // look for the value, and then
1294  // finally get it.
1295  const std::ptrdiff_t local_col_index =
1296  std::find(col_indices, col_indices + nnz_present, trilinos_j) -
1297  col_indices;
1298 
1299  // This is actually the only
1300  // difference to the () function
1301  // querying (i,j), where we throw an
1302  // exception instead of just
1303  // returning zero for an element
1304  // that is not present in the
1305  // sparsity pattern.
1306  if (local_col_index == nnz_present)
1307  value = 0;
1308  else
1309  value = values[local_col_index];
1310  }
1311 
1312  return value;
1313  }
1314 
1315 
1316 
1319  {
1320  Assert(m() == n(), ExcNotQuadratic());
1321 
1322 # ifdef DEBUG
1323  // use operator() in debug mode because
1324  // it checks if this is a valid element
1325  // (in parallel)
1326  return operator()(i, i);
1327 # else
1328  // Trilinos doesn't seem to have a
1329  // more efficient way to access the
1330  // diagonal than by just using the
1331  // standard el(i,j) function.
1332  return el(i, i);
1333 # endif
1334  }
1335 
1336 
1337 
1338  unsigned int
1340  {
1341  Assert(row < m(), ExcInternalError());
1342 
1343  // get a representation of the
1344  // present row
1345  int ncols = -1;
1346  int local_row =
1347  matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(row));
1348  Assert((local_row >= 0), ExcAccessToNonlocalRow(row));
1349 
1350  // on the processor who owns this
1351  // row, we'll have a non-negative
1352  // value.
1353  if (local_row >= 0)
1354  {
1355  int ierr = matrix->NumMyRowEntries(local_row, ncols);
1356  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1357  }
1358 
1359  return static_cast<unsigned int>(ncols);
1360  }
1361 
1362 
1363 
1364  void
1365  SparseMatrix::set(const std::vector<size_type> & row_indices,
1366  const std::vector<size_type> & col_indices,
1368  const bool elide_zero_values)
1369  {
1370  Assert(row_indices.size() == values.m(),
1371  ExcDimensionMismatch(row_indices.size(), values.m()));
1372  Assert(col_indices.size() == values.n(),
1373  ExcDimensionMismatch(col_indices.size(), values.n()));
1374 
1375  for (size_type i = 0; i < row_indices.size(); ++i)
1376  set(row_indices[i],
1377  col_indices.size(),
1378  col_indices.data(),
1379  &values(i, 0),
1380  elide_zero_values);
1381  }
1382 
1383 
1384 
1385  void
1387  const std::vector<size_type> & col_indices,
1388  const std::vector<TrilinosScalar> &values,
1389  const bool elide_zero_values)
1390  {
1391  Assert(col_indices.size() == values.size(),
1392  ExcDimensionMismatch(col_indices.size(), values.size()));
1393 
1394  set(row,
1395  col_indices.size(),
1396  col_indices.data(),
1397  values.data(),
1398  elide_zero_values);
1399  }
1400 
1401 
1402 
1403  template <>
1404  void
1405  SparseMatrix::set<TrilinosScalar>(const size_type row,
1406  const size_type n_cols,
1407  const size_type * col_indices,
1408  const TrilinosScalar *values,
1409  const bool elide_zero_values)
1410  {
1411  AssertIndexRange(row, this->m());
1412 
1413  int ierr;
1414  if (last_action == Add)
1415  {
1416  ierr =
1417  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), true);
1418 
1419  Assert(ierr == 0, ExcTrilinosError(ierr));
1420  }
1421 
1422  last_action = Insert;
1423 
1424  const TrilinosWrappers::types::int_type *col_index_ptr;
1425  const TrilinosScalar * col_value_ptr;
1426  const TrilinosWrappers::types::int_type trilinos_row = row;
1428 
1429  boost::container::small_vector<TrilinosScalar, 200> local_value_array(
1430  n_cols);
1431  boost::container::small_vector<TrilinosWrappers::types::int_type, 200>
1432  local_index_array(n_cols);
1433 
1434  // If we don't elide zeros, the pointers are already available... need to
1435  // cast to non-const pointers as that is the format taken by Trilinos (but
1436  // we will not modify const data)
1437  if (elide_zero_values == false)
1438  {
1439  col_index_ptr =
1440  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1441  col_indices);
1442  col_value_ptr = values;
1443  n_columns = n_cols;
1444  }
1445  else
1446  {
1447  // Otherwise, extract nonzero values in each row and get the
1448  // respective indices.
1449  col_index_ptr = local_index_array.data();
1450  col_value_ptr = local_value_array.data();
1451 
1452  n_columns = 0;
1453  for (size_type j = 0; j < n_cols; ++j)
1454  {
1455  const double value = values[j];
1456  AssertIsFinite(value);
1457  if (value != 0)
1458  {
1459  local_index_array[n_columns] = col_indices[j];
1460  local_value_array[n_columns] = value;
1461  n_columns++;
1462  }
1463  }
1464 
1465  AssertIndexRange(n_columns, n_cols + 1);
1466  }
1467 
1468 
1469  // If the calling matrix owns the row to which we want to insert values,
1470  // we can directly call the Epetra_CrsMatrix input function, which is much
1471  // faster than the Epetra_FECrsMatrix function. We distinguish between two
1472  // cases: the first one is when the matrix is not filled (i.e., it is
1473  // possible to add new elements to the sparsity pattern), and the second
1474  // one is when the pattern is already fixed. In the former case, we add
1475  // the possibility to insert new values, and in the second we just replace
1476  // data.
1477  if (matrix->RowMap().MyGID(
1478  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1479  {
1480  if (matrix->Filled() == false)
1481  {
1482  ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
1483  row, static_cast<int>(n_columns), col_value_ptr, col_index_ptr);
1484 
1485  // When inserting elements, we do not want to create exceptions in
1486  // the case when inserting non-local data (since that's what we
1487  // want to do right now).
1488  if (ierr > 0)
1489  ierr = 0;
1490  }
1491  else
1492  ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(row,
1493  n_columns,
1494  col_value_ptr,
1495  col_index_ptr);
1496  }
1497  else
1498  {
1499  // When we're at off-processor data, we have to stick with the
1500  // standard Insert/ReplaceGlobalValues function. Nevertheless, the way
1501  // we call it is the fastest one (any other will lead to repeated
1502  // allocation and deallocation of memory in order to call the function
1503  // we already use, which is very inefficient if writing one element at
1504  // a time).
1505  compressed = false;
1506 
1507  if (matrix->Filled() == false)
1508  {
1509  ierr = matrix->InsertGlobalValues(1,
1510  &trilinos_row,
1511  n_columns,
1512  col_index_ptr,
1513  &col_value_ptr,
1514  Epetra_FECrsMatrix::ROW_MAJOR);
1515  if (ierr > 0)
1516  ierr = 0;
1517  }
1518  else
1519  ierr = matrix->ReplaceGlobalValues(1,
1520  &trilinos_row,
1521  n_columns,
1522  col_index_ptr,
1523  &col_value_ptr,
1524  Epetra_FECrsMatrix::ROW_MAJOR);
1525  // use the FECrsMatrix facilities for set even in the case when we
1526  // have explicitly set the off-processor rows because that only works
1527  // properly when adding elements, not when setting them (since we want
1528  // to only touch elements that have been set explicitly, and there is
1529  // no way on the receiving processor to identify them otherwise)
1530  }
1531 
1532  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1533  AssertThrow(ierr >= 0, ExcTrilinosError(ierr));
1534  }
1535 
1536 
1537 
1538  void
1539  SparseMatrix::add(const std::vector<size_type> & indices,
1540  const FullMatrix<TrilinosScalar> &values,
1541  const bool elide_zero_values)
1542  {
1543  Assert(indices.size() == values.m(),
1544  ExcDimensionMismatch(indices.size(), values.m()));
1545  Assert(values.m() == values.n(), ExcNotQuadratic());
1546 
1547  for (size_type i = 0; i < indices.size(); ++i)
1548  add(indices[i],
1549  indices.size(),
1550  indices.data(),
1551  &values(i, 0),
1552  elide_zero_values);
1553  }
1554 
1555 
1556 
1557  void
1558  SparseMatrix::add(const std::vector<size_type> & row_indices,
1559  const std::vector<size_type> & col_indices,
1560  const FullMatrix<TrilinosScalar> &values,
1561  const bool elide_zero_values)
1562  {
1563  Assert(row_indices.size() == values.m(),
1564  ExcDimensionMismatch(row_indices.size(), values.m()));
1565  Assert(col_indices.size() == values.n(),
1566  ExcDimensionMismatch(col_indices.size(), values.n()));
1567 
1568  for (size_type i = 0; i < row_indices.size(); ++i)
1569  add(row_indices[i],
1570  col_indices.size(),
1571  col_indices.data(),
1572  &values(i, 0),
1573  elide_zero_values);
1574  }
1575 
1576 
1577 
1578  void
1580  const std::vector<size_type> & col_indices,
1581  const std::vector<TrilinosScalar> &values,
1582  const bool elide_zero_values)
1583  {
1584  Assert(col_indices.size() == values.size(),
1585  ExcDimensionMismatch(col_indices.size(), values.size()));
1586 
1587  add(row,
1588  col_indices.size(),
1589  col_indices.data(),
1590  values.data(),
1591  elide_zero_values);
1592  }
1593 
1594 
1595 
1596  void
1598  const size_type n_cols,
1599  const size_type * col_indices,
1600  const TrilinosScalar *values,
1601  const bool elide_zero_values,
1602  const bool /*col_indices_are_sorted*/)
1603  {
1604  AssertIndexRange(row, this->m());
1605  int ierr;
1606  if (last_action == Insert)
1607  {
1608  // TODO: this could lead to a dead lock when only one processor
1609  // calls GlobalAssemble.
1610  ierr =
1611  matrix->GlobalAssemble(*column_space_map, matrix->RowMap(), false);
1612 
1613  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1614  }
1615 
1616  last_action = Add;
1617 
1618  const TrilinosWrappers::types::int_type *col_index_ptr;
1619  const TrilinosScalar * col_value_ptr;
1620  const TrilinosWrappers::types::int_type trilinos_row = row;
1622 
1623  boost::container::small_vector<TrilinosScalar, 100> local_value_array(
1624  n_cols);
1625  boost::container::small_vector<TrilinosWrappers::types::int_type, 100>
1626  local_index_array(n_cols);
1627 
1628  // If we don't elide zeros, the pointers are already available... need to
1629  // cast to non-const pointers as that is the format taken by Trilinos (but
1630  // we will not modify const data)
1631  if (elide_zero_values == false)
1632  {
1633  col_index_ptr =
1634  reinterpret_cast<const TrilinosWrappers::types::int_type *>(
1635  col_indices);
1636  col_value_ptr = values;
1637  n_columns = n_cols;
1638 # ifdef DEBUG
1639  for (size_type j = 0; j < n_cols; ++j)
1640  AssertIsFinite(values[j]);
1641 # endif
1642  }
1643  else
1644  {
1645  // Otherwise, extract nonzero values in each row and the corresponding
1646  // index.
1647  col_index_ptr = local_index_array.data();
1648  col_value_ptr = local_value_array.data();
1649 
1650  n_columns = 0;
1651  for (size_type j = 0; j < n_cols; ++j)
1652  {
1653  const double value = values[j];
1654 
1655  AssertIsFinite(value);
1656  if (value != 0)
1657  {
1658  local_index_array[n_columns] = col_indices[j];
1659  local_value_array[n_columns] = value;
1660  ++n_columns;
1661  }
1662  }
1663 
1664  AssertIndexRange(n_columns, n_cols + 1);
1665  }
1666  // Exit early if there is nothing to do
1667  if (n_columns == 0)
1668  {
1669  return;
1670  }
1671 
1672  // If the calling processor owns the row to which we want to add values, we
1673  // can directly call the Epetra_CrsMatrix input function, which is much
1674  // faster than the Epetra_FECrsMatrix function.
1675  if (matrix->RowMap().MyGID(
1676  static_cast<TrilinosWrappers::types::int_type>(row)) == true)
1677  {
1678  ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(row,
1679  n_columns,
1680  col_value_ptr,
1681  col_index_ptr);
1682  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1683  }
1684  else if (nonlocal_matrix.get() != nullptr)
1685  {
1686  compressed = false;
1687  // this is the case when we have explicitly set the off-processor rows
1688  // and want to create a separate matrix object for them (to retain
1689  // thread-safety)
1690  Assert(nonlocal_matrix->RowMap().LID(
1691  static_cast<TrilinosWrappers::types::int_type>(row)) != -1,
1692  ExcMessage("Attempted to write into off-processor matrix row "
1693  "that has not be specified as being writable upon "
1694  "initialization"));
1695  ierr = nonlocal_matrix->SumIntoGlobalValues(row,
1696  n_columns,
1697  col_value_ptr,
1698  col_index_ptr);
1699  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1700  }
1701  else
1702  {
1703  // When we're at off-processor data, we have to stick with the
1704  // standard SumIntoGlobalValues function. Nevertheless, the way we
1705  // call it is the fastest one (any other will lead to repeated
1706  // allocation and deallocation of memory in order to call the function
1707  // we already use, which is very inefficient if writing one element at
1708  // a time).
1709  compressed = false;
1710 
1711  ierr = matrix->SumIntoGlobalValues(1,
1712  &trilinos_row,
1713  n_columns,
1714  col_index_ptr,
1715  &col_value_ptr,
1716  Epetra_FECrsMatrix::ROW_MAJOR);
1717  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1718  }
1719 
1720 # ifdef DEBUG
1721  if (ierr > 0)
1722  {
1723  std::cout << "------------------------------------------" << std::endl;
1724  std::cout << "Got error " << ierr << " in row " << row << " of proc "
1725  << matrix->RowMap().Comm().MyPID()
1726  << " when trying to add the columns:" << std::endl;
1727  for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
1728  std::cout << col_index_ptr[i] << " ";
1729  std::cout << std::endl << std::endl;
1730  std::cout << "Matrix row "
1731  << (matrix->RowMap().MyGID(
1732  static_cast<TrilinosWrappers::types::int_type>(row)) ==
1733  false ?
1734  "(nonlocal part)" :
1735  "")
1736  << " has the following indices:" << std::endl;
1737  std::vector<TrilinosWrappers::types::int_type> indices;
1738  const Epetra_CrsGraph * graph =
1739  (nonlocal_matrix.get() != nullptr &&
1740  matrix->RowMap().MyGID(
1741  static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
1742  &nonlocal_matrix->Graph() :
1743  &matrix->Graph();
1744 
1745  indices.resize(graph->NumGlobalIndices(row));
1746  int n_indices = 0;
1747  graph->ExtractGlobalRowCopy(row,
1748  indices.size(),
1749  n_indices,
1750  indices.data());
1751  AssertDimension(n_indices, indices.size());
1752 
1753  for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
1754  std::cout << indices[i] << " ";
1755  std::cout << std::endl << std::endl;
1756  Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
1757  }
1758 # endif
1759  Assert(ierr >= 0, ExcTrilinosError(ierr));
1760  }
1761 
1762 
1763 
1764  SparseMatrix &
1766  {
1768  compress(
1769  ::VectorOperation::unknown); // TODO: why do we do this? Should we
1770  // not check for is_compressed?
1771 
1772  const int ierr = matrix->PutScalar(d);
1773  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1774  if (nonlocal_matrix.get() != nullptr)
1775  nonlocal_matrix->PutScalar(d);
1776 
1777  return *this;
1778  }
1779 
1780 
1781 
1782  void
1784  {
1785  AssertDimension(rhs.m(), m());
1786  AssertDimension(rhs.n(), n());
1787  AssertDimension(rhs.local_range().first, local_range().first);
1788  AssertDimension(rhs.local_range().second, local_range().second);
1789  Assert(matrix->RowMap().SameAs(rhs.matrix->RowMap()),
1790  ExcMessage("Can only add matrices with same distribution of rows"));
1791  Assert(matrix->Filled() && rhs.matrix->Filled(),
1792  ExcMessage("Addition of matrices only allowed if matrices are "
1793  "filled, i.e., compress() has been called"));
1794 
1795  const bool same_col_map = matrix->ColMap().SameAs(rhs.matrix->ColMap());
1796 
1797  for (const auto row : locally_owned_range_indices())
1798  {
1799  const int row_local = matrix->RowMap().LID(
1800  static_cast<TrilinosWrappers::types::int_type>(row));
1801  Assert((row_local >= 0), ExcAccessToNonlocalRow(row));
1802 
1803  // First get a view to the matrix columns of both matrices. Note that
1804  // the data is in local index spaces so we need to be careful not only
1805  // to compare column indices in case they are derived from the same
1806  // map.
1807  int n_entries, rhs_n_entries;
1808  TrilinosScalar *value_ptr, *rhs_value_ptr;
1809  int * index_ptr, *rhs_index_ptr;
1810  int ierr = rhs.matrix->ExtractMyRowView(row_local,
1811  rhs_n_entries,
1812  rhs_value_ptr,
1813  rhs_index_ptr);
1814  (void)ierr;
1815  Assert(ierr == 0, ExcTrilinosError(ierr));
1816 
1817  ierr =
1818  matrix->ExtractMyRowView(row_local, n_entries, value_ptr, index_ptr);
1819  Assert(ierr == 0, ExcTrilinosError(ierr));
1820  bool expensive_checks = (n_entries != rhs_n_entries || !same_col_map);
1821  if (!expensive_checks)
1822  {
1823  // check if the column indices are the same. If yes, can simply
1824  // copy over the data.
1825  expensive_checks = std::memcmp(static_cast<void *>(index_ptr),
1826  static_cast<void *>(rhs_index_ptr),
1827  sizeof(int) * n_entries) != 0;
1828  if (!expensive_checks)
1829  for (int i = 0; i < n_entries; ++i)
1830  value_ptr[i] += rhs_value_ptr[i] * factor;
1831  }
1832  // Now to the expensive case where we need to check all column indices
1833  // against each other (transformed into global index space) and where
1834  // we need to make sure that all entries we are about to add into the
1835  // lhs matrix actually exist
1836  if (expensive_checks)
1837  {
1838  for (int i = 0; i < rhs_n_entries; ++i)
1839  {
1840  if (rhs_value_ptr[i] == 0.)
1841  continue;
1842  const TrilinosWrappers::types::int_type rhs_global_col =
1843  global_column_index(*rhs.matrix, rhs_index_ptr[i]);
1844  int local_col = matrix->ColMap().LID(rhs_global_col);
1845  int *local_index = Utilities::lower_bound(index_ptr,
1846  index_ptr + n_entries,
1847  local_col);
1848  Assert(local_index != index_ptr + n_entries &&
1849  *local_index == local_col,
1850  ExcMessage(
1851  "Adding the entries from the other matrix "
1852  "failed, because the sparsity pattern "
1853  "of that matrix includes more elements than the "
1854  "calling matrix, which is not allowed."));
1855  value_ptr[local_index - index_ptr] += factor * rhs_value_ptr[i];
1856  }
1857  }
1858  }
1859  }
1860 
1861 
1862 
1863  void
1865  {
1866  // This only flips a flag that tells
1867  // Trilinos that any vmult operation
1868  // should be done with the
1869  // transpose. However, the matrix
1870  // structure is not reset.
1871  int ierr;
1872 
1873  if (!matrix->UseTranspose())
1874  {
1875  ierr = matrix->SetUseTranspose(true);
1876  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1877  }
1878  else
1879  {
1880  ierr = matrix->SetUseTranspose(false);
1881  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
1882  }
1883  }
1884 
1885 
1886 
1887  SparseMatrix &
1889  {
1890  const int ierr = matrix->Scale(a);
1891  Assert(ierr == 0, ExcTrilinosError(ierr));
1892  (void)ierr; // removes -Wunused-variable in optimized mode
1893 
1894  return *this;
1895  }
1896 
1897 
1898 
1899  SparseMatrix &
1901  {
1902  Assert(a != 0, ExcDivideByZero());
1903 
1904  const TrilinosScalar factor = 1. / a;
1905 
1906  const int ierr = matrix->Scale(factor);
1907  Assert(ierr == 0, ExcTrilinosError(ierr));
1908  (void)ierr; // removes -Wunused-variable in optimized mode
1909 
1910  return *this;
1911  }
1912 
1913 
1914 
1917  {
1918  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1919  return matrix->NormOne();
1920  }
1921 
1922 
1923 
1926  {
1927  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1928  return matrix->NormInf();
1929  }
1930 
1931 
1932 
1935  {
1936  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1937  return matrix->NormFrobenius();
1938  }
1939 
1940 
1941 
1942  namespace internal
1943  {
1944  namespace SparseMatrixImplementation
1945  {
1946  template <typename VectorType>
1947  inline void
1948  check_vector_map_equality(const Epetra_CrsMatrix &,
1949  const VectorType &,
1950  const VectorType &)
1951  {}
1952 
1953  inline void
1954  check_vector_map_equality(const Epetra_CrsMatrix & m,
1956  const TrilinosWrappers::MPI::Vector &out)
1957  {
1958  Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
1959  ExcMessage(
1960  "Column map of matrix does not fit with vector map!"));
1961  Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
1962  ExcMessage("Row map of matrix does not fit with vector map!"));
1963  (void)m;
1964  (void)in;
1965  (void)out;
1966  }
1967  } // namespace SparseMatrixImplementation
1968  } // namespace internal
1969 
1970 
1971  template <typename VectorType>
1972  typename std::enable_if<
1973  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
1975  {
1976  Assert(&src != &dst, ExcSourceEqualsDestination());
1977  Assert(matrix->Filled(), ExcMatrixNotCompressed());
1978  (void)src;
1979  (void)dst;
1980 
1982  src,
1983  dst);
1984  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
1985  AssertDimension(dst_local_size, matrix->RangeMap().NumMyPoints());
1986  const size_type src_local_size = internal::end(src) - internal::begin(src);
1987  AssertDimension(src_local_size, matrix->DomainMap().NumMyPoints());
1988 
1989  Epetra_MultiVector tril_dst(
1990  View, matrix->RangeMap(), internal::begin(dst), dst_local_size, 1);
1991  Epetra_MultiVector tril_src(View,
1992  matrix->DomainMap(),
1993  const_cast<TrilinosScalar *>(
1994  internal::begin(src)),
1995  src_local_size,
1996  1);
1997 
1998  const int ierr = matrix->Multiply(false, tril_src, tril_dst);
1999  Assert(ierr == 0, ExcTrilinosError(ierr));
2000  (void)ierr; // removes -Wunused-variable in optimized mode
2001  }
2002 
2003 
2004 
2005  template <typename VectorType>
2006  typename std::enable_if<
2007  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2008  SparseMatrix::vmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2009  {
2010  AssertThrow(false, ExcNotImplemented());
2011  }
2012 
2013 
2014 
2015  template <typename VectorType>
2016  typename std::enable_if<
2017  std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2019  {
2020  Assert(&src != &dst, ExcSourceEqualsDestination());
2021  Assert(matrix->Filled(), ExcMatrixNotCompressed());
2022 
2024  dst,
2025  src);
2026  const size_type dst_local_size = internal::end(dst) - internal::begin(dst);
2027  AssertDimension(dst_local_size, matrix->DomainMap().NumMyPoints());
2028  const size_type src_local_size = internal::end(src) - internal::begin(src);
2029  AssertDimension(src_local_size, matrix->RangeMap().NumMyPoints());
2030 
2031  Epetra_MultiVector tril_dst(
2032  View, matrix->DomainMap(), internal::begin(dst), dst_local_size, 1);
2033  Epetra_MultiVector tril_src(View,
2034  matrix->RangeMap(),
2035  const_cast<double *>(internal::begin(src)),
2036  src_local_size,
2037  1);
2038 
2039  const int ierr = matrix->Multiply(true, tril_src, tril_dst);
2040  Assert(ierr == 0, ExcTrilinosError(ierr));
2041  (void)ierr; // removes -Wunused-variable in optimized mode
2042  }
2043 
2044 
2045 
2046  template <typename VectorType>
2047  typename std::enable_if<
2048  !std::is_same<typename VectorType::value_type, TrilinosScalar>::value>::type
2049  SparseMatrix::Tvmult(VectorType & /*dst*/, const VectorType & /*src*/) const
2050  {
2051  AssertThrow(false, ExcNotImplemented());
2052  }
2053 
2054 
2055 
2056  template <typename VectorType>
2057  void
2059  {
2060  Assert(&src != &dst, ExcSourceEqualsDestination());
2061 
2062  // Reinit a temporary vector with fast argument set, which does not
2063  // overwrite the content (to save time).
2064  VectorType tmp_vector;
2065  tmp_vector.reinit(dst, true);
2066  vmult(tmp_vector, src);
2067  dst += tmp_vector;
2068  }
2069 
2070 
2071 
2072  template <typename VectorType>
2073  void
2075  {
2076  Assert(&src != &dst, ExcSourceEqualsDestination());
2077 
2078  // Reinit a temporary vector with fast argument set, which does not
2079  // overwrite the content (to save time).
2080  VectorType tmp_vector;
2081  tmp_vector.reinit(dst, true);
2082  Tvmult(tmp_vector, src);
2083  dst += tmp_vector;
2084  }
2085 
2086 
2087 
2090  {
2091  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2092 
2093  MPI::Vector temp_vector;
2094  temp_vector.reinit(v, true);
2095 
2096  vmult(temp_vector, v);
2097  return temp_vector * v;
2098  }
2099 
2100 
2101 
2104  const MPI::Vector &v) const
2105  {
2106  Assert(matrix->RowMap().SameAs(matrix->DomainMap()), ExcNotQuadratic());
2107 
2108  MPI::Vector temp_vector;
2109  temp_vector.reinit(v, true);
2110 
2111  vmult(temp_vector, v);
2112  return u * temp_vector;
2113  }
2114 
2115 
2116 
2119  const MPI::Vector &x,
2120  const MPI::Vector &b) const
2121  {
2122  vmult(dst, x);
2123  dst -= b;
2124  dst *= -1.;
2125 
2126  return dst.l2_norm();
2127  }
2128 
2129 
2130 
2131  namespace internals
2132  {
2134 
2135  void
2136  perform_mmult(const SparseMatrix &inputleft,
2137  const SparseMatrix &inputright,
2138  SparseMatrix & result,
2139  const MPI::Vector & V,
2140  const bool transpose_left)
2141  {
2142  const bool use_vector = (V.size() == inputright.m() ? true : false);
2143  if (transpose_left == false)
2144  {
2145  Assert(inputleft.n() == inputright.m(),
2146  ExcDimensionMismatch(inputleft.n(), inputright.m()));
2147  Assert(inputleft.trilinos_matrix().DomainMap().SameAs(
2148  inputright.trilinos_matrix().RangeMap()),
2149  ExcMessage("Parallel partitioning of A and B does not fit."));
2150  }
2151  else
2152  {
2153  Assert(inputleft.m() == inputright.m(),
2154  ExcDimensionMismatch(inputleft.m(), inputright.m()));
2155  Assert(inputleft.trilinos_matrix().RangeMap().SameAs(
2156  inputright.trilinos_matrix().RangeMap()),
2157  ExcMessage("Parallel partitioning of A and B does not fit."));
2158  }
2159 
2160  result.clear();
2161 
2162  // create a suitable operator B: in case
2163  // we do not use a vector, all we need to
2164  // do is to set the pointer. Otherwise,
2165  // we insert the data from B, but
2166  // multiply each row with the respective
2167  // vector element.
2168  Teuchos::RCP<Epetra_CrsMatrix> mod_B;
2169  if (use_vector == false)
2170  {
2171  mod_B = Teuchos::rcp(const_cast<Epetra_CrsMatrix *>(
2172  &inputright.trilinos_matrix()),
2173  false);
2174  }
2175  else
2176  {
2177  mod_B = Teuchos::rcp(
2178  new Epetra_CrsMatrix(Copy, inputright.trilinos_sparsity_pattern()),
2179  true);
2180  mod_B->FillComplete(inputright.trilinos_matrix().DomainMap(),
2181  inputright.trilinos_matrix().RangeMap());
2182  Assert(inputright.local_range() == V.local_range(),
2183  ExcMessage("Parallel distribution of matrix B and vector V "
2184  "does not match."));
2185 
2186  const int local_N = inputright.local_size();
2187  for (int i = 0; i < local_N; ++i)
2188  {
2189  int N_entries = -1;
2190  double *new_data, *B_data;
2191  mod_B->ExtractMyRowView(i, N_entries, new_data);
2192  inputright.trilinos_matrix().ExtractMyRowView(i,
2193  N_entries,
2194  B_data);
2195  double value = V.trilinos_vector()[0][i];
2196  for (TrilinosWrappers::types::int_type j = 0; j < N_entries; ++j)
2197  new_data[j] = value * B_data[j];
2198  }
2199  }
2200 
2201 
2202  SparseMatrix tmp_result(transpose_left ?
2203  inputleft.locally_owned_domain_indices() :
2204  inputleft.locally_owned_range_indices(),
2205  inputright.locally_owned_domain_indices(),
2206  inputleft.get_mpi_communicator());
2207 
2208 # ifdef DEAL_II_TRILINOS_WITH_EPETRAEXT
2209  EpetraExt::MatrixMatrix::Multiply(inputleft.trilinos_matrix(),
2210  transpose_left,
2211  *mod_B,
2212  false,
2213  const_cast<Epetra_CrsMatrix &>(
2214  tmp_result.trilinos_matrix()));
2215 # else
2216  Assert("false", ExcMessage("This function requires EpetraExt."));
2217 # endif
2218  result.reinit(tmp_result.trilinos_matrix());
2219  }
2220  } // namespace internals
2221 
2222 
2223  void
2225  const SparseMatrix &B,
2226  const MPI::Vector & V) const
2227  {
2228  internals::perform_mmult(*this, B, C, V, false);
2229  }
2230 
2231 
2232 
2233  void
2235  const SparseMatrix &B,
2236  const MPI::Vector & V) const
2237  {
2238  internals::perform_mmult(*this, B, C, V, true);
2239  }
2240 
2241 
2242 
2243  void
2245  {
2246  Assert(false, ExcNotImplemented());
2247  }
2248 
2249 
2250 
2251  // As of now, no particularly neat
2252  // output is generated in case of
2253  // multiple processors.
2254  void
2255  SparseMatrix::print(std::ostream &out,
2256  const bool print_detailed_trilinos_information) const
2257  {
2258  if (print_detailed_trilinos_information == true)
2259  out << *matrix;
2260  else
2261  {
2262  double *values;
2263  int * indices;
2264  int num_entries;
2265 
2266  for (int i = 0; i < matrix->NumMyRows(); ++i)
2267  {
2268  const int ierr =
2269  matrix->ExtractMyRowView(i, num_entries, values, indices);
2270  (void)ierr;
2271  Assert(ierr == 0, ExcTrilinosError(ierr));
2272 
2273  for (TrilinosWrappers::types::int_type j = 0; j < num_entries; ++j)
2274  out << "(" << TrilinosWrappers::global_row_index(*matrix, i)
2275  << ","
2276  << TrilinosWrappers::global_column_index(*matrix, indices[j])
2277  << ") " << values[j] << std::endl;
2278  }
2279  }
2280 
2281  AssertThrow(out, ExcIO());
2282  }
2283 
2284 
2285 
2288  {
2289  size_type static_memory =
2290  sizeof(*this) + sizeof(*matrix) + sizeof(*matrix->Graph().DataPtr());
2291  return (
2293  matrix->NumMyNonzeros() +
2294  sizeof(int) * local_size() + static_memory);
2295  }
2296 
2297 
2298 
2299  MPI_Comm
2301  {
2302 # ifdef DEAL_II_WITH_MPI
2303 
2304  const Epetra_MpiComm *mpi_comm =
2305  dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
2306  Assert(mpi_comm != nullptr, ExcInternalError());
2307  return mpi_comm->Comm();
2308 # else
2309 
2310  return MPI_COMM_SELF;
2311 
2312 # endif
2313  }
2314 } // namespace TrilinosWrappers
2315 
2316 
2317 namespace TrilinosWrappers
2318 {
2319  namespace internal
2320  {
2321  namespace
2322  {
2323 # ifndef DEAL_II_WITH_MPI
2324  Epetra_Map
2325  make_serial_Epetra_map(const IndexSet &serial_partitioning)
2326  {
2327  // See IndexSet::make_trilinos_map
2328  return Epetra_Map(
2329  TrilinosWrappers::types::int_type(serial_partitioning.size()),
2330  TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
2331  0,
2332  Epetra_SerialComm());
2333  }
2334 # endif
2335  } // namespace
2336 
2337  namespace LinearOperatorImplementation
2338  {
2339  TrilinosPayload::TrilinosPayload()
2340  : use_transpose(false)
2341  ,
2342 # ifdef DEAL_II_WITH_MPI
2343  communicator(MPI_COMM_SELF)
2344  , domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
2345  , range_map(IndexSet().make_trilinos_map(communicator.Comm()))
2346 # else
2347  domain_map(internal::make_serial_Epetra_map(IndexSet()))
2348  , range_map(internal::make_serial_Epetra_map(IndexSet()))
2349 # endif
2350  {
2351  vmult = [](Range &, const Domain &) {
2352  Assert(false,
2353  ExcMessage("Uninitialized TrilinosPayload::vmult called "
2354  "(Default constructor)"));
2355  };
2356 
2357  Tvmult = [](Domain &, const Range &) {
2358  Assert(false,
2359  ExcMessage("Uninitialized TrilinosPayload::Tvmult called "
2360  "(Default constructor)"));
2361  };
2362 
2363  inv_vmult = [](Domain &, const Range &) {
2364  Assert(false,
2365  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2366  "(Default constructor)"));
2367  };
2368 
2369  inv_Tvmult = [](Range &, const Domain &) {
2370  Assert(false,
2371  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2372  "(Default constructor)"));
2373  };
2374  }
2375 
2376 
2377 
2379  const TrilinosWrappers::SparseMatrix &matrix_exemplar,
2381  : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2382  ,
2383 # ifdef DEAL_II_WITH_MPI
2384  communicator(matrix_exemplar.get_mpi_communicator())
2385  , domain_map(
2386  matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2387  communicator.Comm()))
2388  , range_map(
2389  matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2390  communicator.Comm()))
2391 # else
2392  domain_map(internal::make_serial_Epetra_map(
2393  matrix_exemplar.locally_owned_domain_indices()))
2394  , range_map(internal::make_serial_Epetra_map(
2395  matrix_exemplar.locally_owned_range_indices()))
2396 # endif
2397  {
2398  vmult = [&matrix_exemplar, &matrix](Range & tril_dst,
2399  const Domain &tril_src) {
2400  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2401  Assert(&tril_src != &tril_dst,
2403  Assert(matrix.trilinos_matrix().Filled(),
2406  matrix_exemplar.trilinos_matrix(),
2407  tril_src,
2408  tril_dst,
2409  matrix_exemplar.trilinos_matrix().UseTranspose());
2411  matrix.trilinos_matrix(),
2412  tril_src,
2413  tril_dst,
2414  matrix.trilinos_matrix().UseTranspose());
2415 
2416  const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2417  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2418  };
2419 
2420  Tvmult = [&matrix_exemplar, &matrix](Domain & tril_dst,
2421  const Range &tril_src) {
2422  // Duplicated from TrilinosWrappers::SparseMatrix::Tvmult
2423  Assert(&tril_src != &tril_dst,
2425  Assert(matrix.trilinos_matrix().Filled(),
2428  matrix_exemplar.trilinos_matrix(),
2429  tril_src,
2430  tril_dst,
2431  !matrix_exemplar.trilinos_matrix().UseTranspose());
2433  matrix.trilinos_matrix(),
2434  tril_src,
2435  tril_dst,
2436  !matrix.trilinos_matrix().UseTranspose());
2437 
2438  Epetra_CrsMatrix &tril_mtrx_non_const =
2439  const_cast<Epetra_CrsMatrix &>(matrix.trilinos_matrix());
2440  tril_mtrx_non_const.SetUseTranspose(
2441  !matrix.trilinos_matrix().UseTranspose());
2442  const int ierr = matrix.trilinos_matrix().Apply(tril_src, tril_dst);
2443  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2444  tril_mtrx_non_const.SetUseTranspose(
2445  !matrix.trilinos_matrix().UseTranspose());
2446  };
2447 
2448  inv_vmult = [](Domain &, const Range &) {
2449  Assert(false,
2450  ExcMessage("Uninitialized TrilinosPayload::inv_vmult called "
2451  "(Matrix constructor with matrix exemplar)"));
2452  };
2453 
2454  inv_Tvmult = [](Range &, const Domain &) {
2455  Assert(false,
2456  ExcMessage("Uninitialized TrilinosPayload::inv_Tvmult called "
2457  "(Matrix constructor with matrix exemplar)"));
2458  };
2459  }
2460 
2461 
2462 
2464  const TrilinosWrappers::SparseMatrix & matrix_exemplar,
2465  const TrilinosWrappers::PreconditionBase &preconditioner)
2466  : use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
2467  ,
2468 # ifdef DEAL_II_WITH_MPI
2469  communicator(matrix_exemplar.get_mpi_communicator())
2470  , domain_map(
2471  matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
2472  communicator.Comm()))
2473  , range_map(
2474  matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
2475  communicator.Comm()))
2476 # else
2477  domain_map(internal::make_serial_Epetra_map(
2478  matrix_exemplar.locally_owned_domain_indices()))
2479  , range_map(internal::make_serial_Epetra_map(
2480  matrix_exemplar.locally_owned_range_indices()))
2481 # endif
2482  {
2483  vmult = [&matrix_exemplar, &preconditioner](Range & tril_dst,
2484  const Domain &tril_src) {
2485  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2486  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2487  Assert(&tril_src != &tril_dst,
2490  matrix_exemplar.trilinos_matrix(),
2491  tril_src,
2492  tril_dst,
2493  matrix_exemplar.trilinos_matrix().UseTranspose());
2495  preconditioner.trilinos_operator(),
2496  tril_src,
2497  tril_dst,
2498  preconditioner.trilinos_operator().UseTranspose());
2499 
2500  const int ierr =
2501  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2502  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2503  };
2504 
2505  Tvmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2506  const Range &tril_src) {
2507  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2508  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2509  Assert(&tril_src != &tril_dst,
2512  matrix_exemplar.trilinos_matrix(),
2513  tril_src,
2514  tril_dst,
2515  !matrix_exemplar.trilinos_matrix().UseTranspose());
2517  preconditioner.trilinos_operator(),
2518  tril_src,
2519  tril_dst,
2520  !preconditioner.trilinos_operator().UseTranspose());
2521 
2522  preconditioner.trilinos_operator().SetUseTranspose(
2523  !preconditioner.trilinos_operator().UseTranspose());
2524  const int ierr =
2525  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2526  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2527  preconditioner.trilinos_operator().SetUseTranspose(
2528  !preconditioner.trilinos_operator().UseTranspose());
2529  };
2530 
2531  inv_vmult = [&matrix_exemplar, &preconditioner](Domain & tril_dst,
2532  const Range &tril_src) {
2533  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2534  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2535  Assert(&tril_src != &tril_dst,
2538  matrix_exemplar.trilinos_matrix(),
2539  tril_src,
2540  tril_dst,
2541  !matrix_exemplar.trilinos_matrix().UseTranspose());
2543  preconditioner.trilinos_operator(),
2544  tril_src,
2545  tril_dst,
2546  !preconditioner.trilinos_operator().UseTranspose());
2547 
2548  const int ierr =
2549  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2550  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2551  };
2552 
2553  inv_Tvmult = [&matrix_exemplar,
2554  &preconditioner](Range & tril_dst,
2555  const Domain &tril_src) {
2556  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2557  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2558  Assert(&tril_src != &tril_dst,
2561  matrix_exemplar.trilinos_matrix(),
2562  tril_src,
2563  tril_dst,
2564  matrix_exemplar.trilinos_matrix().UseTranspose());
2566  preconditioner.trilinos_operator(),
2567  tril_src,
2568  tril_dst,
2569  preconditioner.trilinos_operator().UseTranspose());
2570 
2571  preconditioner.trilinos_operator().SetUseTranspose(
2572  !preconditioner.trilinos_operator().UseTranspose());
2573  const int ierr =
2574  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2575  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2576  preconditioner.trilinos_operator().SetUseTranspose(
2577  !preconditioner.trilinos_operator().UseTranspose());
2578  };
2579  }
2580 
2581 
2582 
2584  const TrilinosWrappers::PreconditionBase &preconditioner_exemplar,
2585  const TrilinosWrappers::PreconditionBase &preconditioner)
2586  : use_transpose(
2587  preconditioner_exemplar.trilinos_operator().UseTranspose())
2588  ,
2589 # ifdef DEAL_II_WITH_MPI
2590  communicator(preconditioner_exemplar.get_mpi_communicator())
2591  , domain_map(preconditioner_exemplar.locally_owned_domain_indices()
2592  .make_trilinos_map(communicator.Comm()))
2593  , range_map(preconditioner_exemplar.locally_owned_range_indices()
2594  .make_trilinos_map(communicator.Comm()))
2595 # else
2596  domain_map(internal::make_serial_Epetra_map(
2597  preconditioner_exemplar.locally_owned_domain_indices()))
2598  , range_map(internal::make_serial_Epetra_map(
2599  preconditioner_exemplar.locally_owned_range_indices()))
2600 # endif
2601  {
2602  vmult = [&preconditioner_exemplar,
2603  &preconditioner](Range &tril_dst, const Domain &tril_src) {
2604  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2605  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2606  Assert(&tril_src != &tril_dst,
2609  preconditioner_exemplar.trilinos_operator(),
2610  tril_src,
2611  tril_dst,
2612  preconditioner_exemplar.trilinos_operator().UseTranspose());
2614  preconditioner.trilinos_operator(),
2615  tril_src,
2616  tril_dst,
2617  preconditioner.trilinos_operator().UseTranspose());
2618 
2619  const int ierr =
2620  preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2621  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2622  };
2623 
2624  Tvmult = [&preconditioner_exemplar,
2625  &preconditioner](Domain &tril_dst, const Range &tril_src) {
2626  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2627  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2628  Assert(&tril_src != &tril_dst,
2631  preconditioner_exemplar.trilinos_operator(),
2632  tril_src,
2633  tril_dst,
2634  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2636  preconditioner.trilinos_operator(),
2637  tril_src,
2638  tril_dst,
2639  !preconditioner.trilinos_operator().UseTranspose());
2640 
2641  preconditioner.trilinos_operator().SetUseTranspose(
2642  !preconditioner.trilinos_operator().UseTranspose());
2643  const int ierr =
2644  preconditioner.trilinos_operator().Apply(tril_src, tril_dst);
2645  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2646  preconditioner.trilinos_operator().SetUseTranspose(
2647  !preconditioner.trilinos_operator().UseTranspose());
2648  };
2649 
2650  inv_vmult = [&preconditioner_exemplar,
2651  &preconditioner](Domain &tril_dst, const Range &tril_src) {
2652  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2653  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2654  Assert(&tril_src != &tril_dst,
2657  preconditioner_exemplar.trilinos_operator(),
2658  tril_src,
2659  tril_dst,
2660  !preconditioner_exemplar.trilinos_operator().UseTranspose());
2662  preconditioner.trilinos_operator(),
2663  tril_src,
2664  tril_dst,
2665  !preconditioner.trilinos_operator().UseTranspose());
2666 
2667  const int ierr =
2668  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2669  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2670  };
2671 
2672  inv_Tvmult = [&preconditioner_exemplar,
2673  &preconditioner](Range & tril_dst,
2674  const Domain &tril_src) {
2675  // Duplicated from TrilinosWrappers::PreconditionBase::vmult
2676  // as well as from TrilinosWrappers::SparseMatrix::Tvmult
2677  Assert(&tril_src != &tril_dst,
2680  preconditioner_exemplar.trilinos_operator(),
2681  tril_src,
2682  tril_dst,
2683  preconditioner_exemplar.trilinos_operator().UseTranspose());
2685  preconditioner.trilinos_operator(),
2686  tril_src,
2687  tril_dst,
2688  preconditioner.trilinos_operator().UseTranspose());
2689 
2690  preconditioner.trilinos_operator().SetUseTranspose(
2691  !preconditioner.trilinos_operator().UseTranspose());
2692  const int ierr =
2693  preconditioner.trilinos_operator().ApplyInverse(tril_src, tril_dst);
2694  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2695  preconditioner.trilinos_operator().SetUseTranspose(
2696  !preconditioner.trilinos_operator().UseTranspose());
2697  };
2698  }
2699 
2700 
2701 
2703  : vmult(payload.vmult)
2704  , Tvmult(payload.Tvmult)
2705  , inv_vmult(payload.inv_vmult)
2706  , inv_Tvmult(payload.inv_Tvmult)
2707  , use_transpose(payload.use_transpose)
2708  , communicator(payload.communicator)
2709  , domain_map(payload.domain_map)
2710  , range_map(payload.range_map)
2711  {}
2712 
2713 
2714 
2715  // Composite copy constructor
2716  // This is required for PackagedOperations
2718  const TrilinosPayload &second_op)
2719  : use_transpose(false)
2720  , // The combination of operators provides the exact
2721  // definition of the operation
2722  communicator(first_op.communicator)
2723  , domain_map(second_op.domain_map)
2724  , range_map(first_op.range_map)
2725  {}
2726 
2727 
2728 
2731  {
2732  TrilinosPayload return_op(*this);
2733 
2734  return_op.vmult = [](Range &tril_dst, const Range &tril_src) {
2735  tril_dst = tril_src;
2736  };
2737 
2738  return_op.Tvmult = [](Range &tril_dst, const Range &tril_src) {
2739  tril_dst = tril_src;
2740  };
2741 
2742  return_op.inv_vmult = [](Range &tril_dst, const Range &tril_src) {
2743  tril_dst = tril_src;
2744  };
2745 
2746  return_op.inv_Tvmult = [](Range &tril_dst, const Range &tril_src) {
2747  tril_dst = tril_src;
2748  };
2749 
2750  return return_op;
2751  }
2752 
2753 
2754 
2757  {
2758  TrilinosPayload return_op(*this);
2759 
2760  return_op.vmult = [](Range &tril_dst, const Domain &) {
2761  const int ierr = tril_dst.PutScalar(0.0);
2762 
2763  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2764  };
2765 
2766  return_op.Tvmult = [](Domain &tril_dst, const Range &) {
2767  const int ierr = tril_dst.PutScalar(0.0);
2768 
2769  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2770  };
2771 
2772  return_op.inv_vmult = [](Domain &tril_dst, const Range &) {
2773  AssertThrow(false,
2774  ExcMessage("Cannot compute inverse of null operator"));
2775 
2776  const int ierr = tril_dst.PutScalar(0.0);
2777 
2778  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2779  };
2780 
2781  return_op.inv_Tvmult = [](Range &tril_dst, const Domain &) {
2782  AssertThrow(false,
2783  ExcMessage("Cannot compute inverse of null operator"));
2784 
2785  const int ierr = tril_dst.PutScalar(0.0);
2786 
2787  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2788  };
2789 
2790  return return_op;
2791  }
2792 
2793 
2794 
2797  {
2798  TrilinosPayload return_op(*this);
2799  return_op.transpose();
2800  return return_op;
2801  }
2802 
2803 
2804 
2805  IndexSet
2807  {
2808  return IndexSet(domain_map);
2809  }
2810 
2811 
2812 
2813  IndexSet
2815  {
2816  return IndexSet(range_map);
2817  }
2818 
2819 
2820 
2821  MPI_Comm
2823  {
2824 # ifdef DEAL_II_WITH_MPI
2825  return communicator.Comm();
2826 # else
2827  return MPI_COMM_SELF;
2828 # endif
2829  }
2830 
2831 
2832 
2833  void
2835  {
2837  }
2838 
2839 
2840 
2841  bool
2843  {
2844  return use_transpose;
2845  }
2846 
2847 
2848 
2849  int
2851  {
2852  if (use_transpose != UseTranspose)
2853  {
2858  }
2859  return 0;
2860  }
2861 
2862 
2863 
2864  int
2866  {
2867  // The transposedness of the operations is taken care of
2868  // when we hit the transpose flag.
2869  vmult(Y, X);
2870  return 0;
2871  }
2872 
2873 
2874 
2875  int
2877  {
2878  // The transposedness of the operations is taken care of
2879  // when we hit the transpose flag.
2880  inv_vmult(X, Y);
2881  return 0;
2882  }
2883 
2884 
2885 
2886  const char *
2888  {
2889  return "TrilinosPayload";
2890  }
2891 
2892 
2893 
2894  const Epetra_Comm &
2896  {
2897  return communicator;
2898  }
2899 
2900 
2901 
2902  const Epetra_Map &
2904  {
2905  return domain_map;
2906  }
2907 
2908 
2909 
2910  const Epetra_Map &
2912  {
2913  return range_map;
2914  }
2915 
2916 
2917 
2918  bool
2920  {
2921  return false;
2922  }
2923 
2924 
2925 
2926  double
2928  {
2929  AssertThrow(false, ExcNotImplemented());
2930  return 0.0;
2931  }
2932 
2933 
2934 
2936  operator+(const TrilinosPayload &first_op,
2937  const TrilinosPayload &second_op)
2938  {
2939  using Domain = typename TrilinosPayload::Domain;
2940  using Range = typename TrilinosPayload::Range;
2941  using Intermediate = typename TrilinosPayload::VectorType;
2942  using GVMVectorType = TrilinosWrappers::MPI::Vector;
2943 
2944  Assert(first_op.locally_owned_domain_indices() ==
2945  second_op.locally_owned_domain_indices(),
2946  ExcMessage(
2947  "Operators are set to work on incompatible IndexSets."));
2948  Assert(first_op.locally_owned_range_indices() ==
2949  second_op.locally_owned_range_indices(),
2950  ExcMessage(
2951  "Operators are set to work on incompatible IndexSets."));
2952 
2953  TrilinosPayload return_op(first_op, second_op);
2954 
2955  // Capture by copy so the payloads are always valid
2956  return_op.vmult = [first_op, second_op](Range & tril_dst,
2957  const Domain &tril_src) {
2958  // Duplicated from LinearOperator::operator*
2959  // TODO: Template the constructor on GrowingVectorMemory vector type?
2960  GrowingVectorMemory<GVMVectorType> vector_memory;
2961  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2962 
2963  // Initialize intermediate vector
2964  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
2965  i->reinit(IndexSet(first_op_init_map),
2966  first_op.get_mpi_communicator(),
2967  /*bool omit_zeroing_entries =*/true);
2968 
2969  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
2970  const size_type i_local_size = i->end() - i->begin();
2971  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
2972  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
2973  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
2974  (void)second_op_init_map;
2975  Intermediate tril_int(View,
2976  first_op_init_map,
2977  const_cast<TrilinosScalar *>(i->begin()),
2978  i_local_size,
2979  1);
2980 
2981  // These operators may themselves be transposed or not, so we let them
2982  // decide what the intended outcome is
2983  second_op.Apply(tril_src, tril_int);
2984  first_op.Apply(tril_src, tril_dst);
2985  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
2986  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
2987  };
2988 
2989  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
2990  const Range &tril_src) {
2991  // Duplicated from LinearOperator::operator*
2992  // TODO: Template the constructor on GrowingVectorMemory vector type?
2993  GrowingVectorMemory<GVMVectorType> vector_memory;
2994  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
2995 
2996  // These operators may themselves be transposed or not, so we let them
2997  // decide what the intended outcome is
2998  // We must first transpose the operators to get the right IndexSets
2999  // for the input, intermediate and result vectors
3000  const_cast<TrilinosPayload &>(first_op).transpose();
3001  const_cast<TrilinosPayload &>(second_op).transpose();
3002 
3003  // Initialize intermediate vector
3004  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3005  i->reinit(IndexSet(first_op_init_map),
3006  first_op.get_mpi_communicator(),
3007  /*bool omit_zeroing_entries =*/true);
3008 
3009  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3010  const size_type i_local_size = i->end() - i->begin();
3011  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3012  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3013  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3014  (void)second_op_init_map;
3015  Intermediate tril_int(View,
3016  first_op_init_map,
3017  const_cast<TrilinosScalar *>(i->begin()),
3018  i_local_size,
3019  1);
3020 
3021  // These operators may themselves be transposed or not, so we let them
3022  // decide what the intended outcome is
3023  second_op.Apply(tril_src, tril_int);
3024  first_op.Apply(tril_src, tril_dst);
3025  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3026  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3027 
3028  // Reset transpose flag
3029  const_cast<TrilinosPayload &>(first_op).transpose();
3030  const_cast<TrilinosPayload &>(second_op).transpose();
3031  };
3032 
3033  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3034  const Range &tril_src) {
3035  // Duplicated from LinearOperator::operator*
3036  // TODO: Template the constructor on GrowingVectorMemory vector type?
3037  GrowingVectorMemory<GVMVectorType> vector_memory;
3038  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3039 
3040  // Initialize intermediate vector
3041  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3042  i->reinit(IndexSet(first_op_init_map),
3043  first_op.get_mpi_communicator(),
3044  /*bool omit_zeroing_entries =*/true);
3045 
3046  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3047  const size_type i_local_size = i->end() - i->begin();
3048  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3049  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3050  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3051  (void)second_op_init_map;
3052  Intermediate tril_int(View,
3053  first_op_init_map,
3054  const_cast<TrilinosScalar *>(i->begin()),
3055  i_local_size,
3056  1);
3057 
3058  // These operators may themselves be transposed or not, so we let them
3059  // decide what the intended outcome is
3060  second_op.ApplyInverse(tril_src, tril_int);
3061  first_op.ApplyInverse(tril_src, tril_dst);
3062  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3063  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3064  };
3065 
3066  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3067  const Domain &tril_src) {
3068  // Duplicated from LinearOperator::operator*
3069  // TODO: Template the constructor on GrowingVectorMemory vector type?
3070  GrowingVectorMemory<GVMVectorType> vector_memory;
3071  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3072 
3073  // These operators may themselves be transposed or not, so we let them
3074  // decide what the intended outcome is
3075  // We must first transpose the operators to get the right IndexSets
3076  // for the input, intermediate and result vectors
3077  const_cast<TrilinosPayload &>(first_op).transpose();
3078  const_cast<TrilinosPayload &>(second_op).transpose();
3079 
3080  // Initialize intermediate vector
3081  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3082  i->reinit(IndexSet(first_op_init_map),
3083  first_op.get_mpi_communicator(),
3084  /*bool omit_zeroing_entries =*/true);
3085 
3086  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3087  const size_type i_local_size = i->end() - i->begin();
3088  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3089  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3090  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3091  (void)second_op_init_map;
3092  Intermediate tril_int(View,
3093  first_op_init_map,
3094  const_cast<TrilinosScalar *>(i->begin()),
3095  i_local_size,
3096  1);
3097 
3098  // These operators may themselves be transposed or not, so we let them
3099  // decide what the intended outcome is
3100  second_op.ApplyInverse(tril_src, tril_int);
3101  first_op.ApplyInverse(tril_src, tril_dst);
3102  const int ierr = tril_dst.Update(1.0, tril_int, 1.0);
3103  AssertThrow(ierr == 0, ExcTrilinosError(ierr));
3104 
3105  // Reset transpose flag
3106  const_cast<TrilinosPayload &>(first_op).transpose();
3107  const_cast<TrilinosPayload &>(second_op).transpose();
3108  };
3109 
3110  return return_op;
3111  }
3112 
3113 
3114 
3116  const TrilinosPayload &second_op)
3117  {
3118  using Domain = typename TrilinosPayload::Domain;
3119  using Range = typename TrilinosPayload::Range;
3120  using Intermediate = typename TrilinosPayload::VectorType;
3121  using GVMVectorType = TrilinosWrappers::MPI::Vector;
3122 
3124  second_op.locally_owned_range_indices(),
3125  ExcMessage(
3126  "Operators are set to work on incompatible IndexSets."));
3127 
3128  TrilinosPayload return_op(first_op, second_op);
3129 
3130  // Capture by copy so the payloads are always valid
3131  return_op.vmult = [first_op, second_op](Range & tril_dst,
3132  const Domain &tril_src) {
3133  // Duplicated from LinearOperator::operator*
3134  // TODO: Template the constructor on GrowingVectorMemory vector type?
3135  GrowingVectorMemory<GVMVectorType> vector_memory;
3136  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3137 
3138  // Initialize intermediate vector
3139  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3140  i->reinit(IndexSet(first_op_init_map),
3141  first_op.get_mpi_communicator(),
3142  /*bool omit_zeroing_entries =*/true);
3143 
3144  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3145  const size_type i_local_size = i->end() - i->begin();
3146  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3147  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3148  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3149  (void)second_op_init_map;
3150  Intermediate tril_int(View,
3151  first_op_init_map,
3152  const_cast<TrilinosScalar *>(i->begin()),
3153  i_local_size,
3154  1);
3155 
3156  // These operators may themselves be transposed or not, so we let them
3157  // decide what the intended outcome is
3158  second_op.Apply(tril_src, tril_int);
3159  first_op.Apply(tril_int, tril_dst);
3160  };
3161 
3162  return_op.Tvmult = [first_op, second_op](Domain & tril_dst,
3163  const Range &tril_src) {
3164  // Duplicated from LinearOperator::operator*
3165  // TODO: Template the constructor on GrowingVectorMemory vector type?
3166  GrowingVectorMemory<GVMVectorType> vector_memory;
3167  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3168 
3169  // These operators may themselves be transposed or not, so we let them
3170  // decide what the intended outcome is
3171  // We must first transpose the operators to get the right IndexSets
3172  // for the input, intermediate and result vectors
3173  const_cast<TrilinosPayload &>(first_op).transpose();
3174  const_cast<TrilinosPayload &>(second_op).transpose();
3175 
3176  // Initialize intermediate vector
3177  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3178  i->reinit(IndexSet(first_op_init_map),
3179  first_op.get_mpi_communicator(),
3180  /*bool omit_zeroing_entries =*/true);
3181 
3182  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3183  const size_type i_local_size = i->end() - i->begin();
3184  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3185  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3186  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3187  (void)second_op_init_map;
3188  Intermediate tril_int(View,
3189  first_op_init_map,
3190  const_cast<TrilinosScalar *>(i->begin()),
3191  i_local_size,
3192  1);
3193 
3194  // Apply the operators in the reverse order to vmult
3195  first_op.Apply(tril_src, tril_int);
3196  second_op.Apply(tril_int, tril_dst);
3197 
3198  // Reset transpose flag
3199  const_cast<TrilinosPayload &>(first_op).transpose();
3200  const_cast<TrilinosPayload &>(second_op).transpose();
3201  };
3202 
3203  return_op.inv_vmult = [first_op, second_op](Domain & tril_dst,
3204  const Range &tril_src) {
3205  // Duplicated from LinearOperator::operator*
3206  // TODO: Template the constructor on GrowingVectorMemory vector type?
3207  GrowingVectorMemory<GVMVectorType> vector_memory;
3208  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3209 
3210  // Initialize intermediate vector
3211  const Epetra_Map &first_op_init_map = first_op.OperatorRangeMap();
3212  i->reinit(IndexSet(first_op_init_map),
3213  first_op.get_mpi_communicator(),
3214  /*bool omit_zeroing_entries =*/true);
3215 
3216  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3217  const size_type i_local_size = i->end() - i->begin();
3218  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3219  const Epetra_Map &second_op_init_map = second_op.OperatorDomainMap();
3220  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3221  (void)second_op_init_map;
3222  Intermediate tril_int(View,
3223  first_op_init_map,
3224  const_cast<TrilinosScalar *>(i->begin()),
3225  i_local_size,
3226  1);
3227 
3228  // Apply the operators in the reverse order to vmult
3229  // and the same order as Tvmult
3230  first_op.ApplyInverse(tril_src, tril_int);
3231  second_op.ApplyInverse(tril_int, tril_dst);
3232  };
3233 
3234  return_op.inv_Tvmult = [first_op, second_op](Range & tril_dst,
3235  const Domain &tril_src) {
3236  // Duplicated from LinearOperator::operator*
3237  // TODO: Template the constructor on GrowingVectorMemory vector type?
3238  GrowingVectorMemory<GVMVectorType> vector_memory;
3239  VectorMemory<GVMVectorType>::Pointer i(vector_memory);
3240 
3241  // These operators may themselves be transposed or not, so we let them
3242  // decide what the intended outcome is
3243  // We must first transpose the operators to get the right IndexSets
3244  // for the input, intermediate and result vectors
3245  const_cast<TrilinosPayload &>(first_op).transpose();
3246  const_cast<TrilinosPayload &>(second_op).transpose();
3247 
3248  // Initialize intermediate vector
3249  const Epetra_Map &first_op_init_map = first_op.OperatorDomainMap();
3250  i->reinit(IndexSet(first_op_init_map),
3251  first_op.get_mpi_communicator(),
3252  /*bool omit_zeroing_entries =*/true);
3253 
3254  // Duplicated from TrilinosWrappers::SparseMatrix::vmult
3255  const size_type i_local_size = i->end() - i->begin();
3256  AssertDimension(i_local_size, first_op_init_map.NumMyPoints());
3257  const Epetra_Map &second_op_init_map = second_op.OperatorRangeMap();
3258  AssertDimension(i_local_size, second_op_init_map.NumMyPoints());
3259  (void)second_op_init_map;
3260  Intermediate tril_int(View,
3261  first_op_init_map,
3262  const_cast<TrilinosScalar *>(i->begin()),
3263  i_local_size,
3264  1);
3265 
3266  // These operators may themselves be transposed or not, so we let them
3267  // decide what the intended outcome is
3268  // Apply the operators in the reverse order to Tvmult
3269  // and the same order as vmult
3270  second_op.ApplyInverse(tril_src, tril_int);
3271  first_op.ApplyInverse(tril_int, tril_dst);
3272 
3273  // Reset transpose flag
3274  const_cast<TrilinosPayload &>(first_op).transpose();
3275  const_cast<TrilinosPayload &>(second_op).transpose();
3276  };
3277 
3278  return return_op;
3279  }
3280 
3281  } // namespace LinearOperatorImplementation
3282  } /* namespace internal */
3283 } /* namespace TrilinosWrappers */
3284 
3285 
3286 
3287 // explicit instantiations
3288 # include "trilinos_sparse_matrix.inst"
3289 
3290 # ifndef DOXYGEN
3291 // TODO: put these instantiations into generic file
3292 namespace TrilinosWrappers
3293 {
3294  template void
3295  SparseMatrix::reinit(const ::SparsityPattern &);
3296 
3297  template void
3299 
3300  template void
3302  const IndexSet &,
3303  const ::SparsityPattern &,
3304  const MPI_Comm &,
3305  const bool);
3306 
3307  template void
3309  const IndexSet &,
3310  const DynamicSparsityPattern &,
3311  const MPI_Comm &,
3312  const bool);
3313 
3314  template void
3315  SparseMatrix::vmult(MPI::Vector &, const MPI::Vector &) const;
3316 
3317  template void
3318  SparseMatrix::vmult(::Vector<double> &,
3319  const ::Vector<double> &) const;
3320 
3321  template void
3324  const ::LinearAlgebra::distributed::Vector<double> &) const;
3325 
3326 # ifdef DEAL_II_WITH_MPI
3327 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3328  template void
3331  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3332 
3333  template void
3336  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3337 # endif
3338 
3339  template void
3343 # endif
3344 
3345  template void
3346  SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
3347 
3348  template void
3349  SparseMatrix::Tvmult(::Vector<double> &,
3350  const ::Vector<double> &) const;
3351 
3352  template void
3355  const ::LinearAlgebra::distributed::Vector<double> &) const;
3356 
3357 # ifdef DEAL_II_WITH_MPI
3358 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3359  template void
3362  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3363 
3364  template void
3367  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3368 # endif
3369 
3370  template void
3374 # endif
3375 
3376  template void
3378 
3379  template void
3380  SparseMatrix::vmult_add(::Vector<double> &,
3381  const ::Vector<double> &) const;
3382 
3383  template void
3386  const ::LinearAlgebra::distributed::Vector<double> &) const;
3387 
3388 # ifdef DEAL_II_WITH_MPI
3389 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3390  template void
3393  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3394 
3395  template void
3398  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3399 # endif
3400 
3401  template void
3405 # endif
3406 
3407  template void
3409 
3410  template void
3411  SparseMatrix::Tvmult_add(::Vector<double> &,
3412  const ::Vector<double> &) const;
3413 
3414  template void
3417  const ::LinearAlgebra::distributed::Vector<double> &) const;
3418 
3419 # ifdef DEAL_II_WITH_MPI
3420 # ifdef DEAL_II_TRILINOS_WITH_TPETRA
3421  template void
3424  const ::LinearAlgebra::TpetraWrappers::Vector<double> &) const;
3425 
3426  template void
3429  const ::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
3430 # endif
3431 
3432  template void
3436 # endif
3437 } // namespace TrilinosWrappers
3438 # endif // DOXYGEN
3439 
3441 
3442 #endif // DEAL_II_WITH_TRILINOS
TrilinosScalar diag_element(const size_type i) const
Iterator lower_bound(Iterator first, Iterator last, const T &val)
Definition: utilities.h:1075
static ::ExceptionBase & ExcTrilinosError(int arg1)
TrilinosScalar el(const size_type i, const size_type j) const
size_type m() const
const Epetra_Map & domain_partitioner() const
void mmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1623
typename ::internal::FEValuesViews::ViewType< dim, spacedim, Extractor >::type View
Definition: fe_values.h:2060
Contents is actually a matrix.
static ::ExceptionBase & ExcSourceEqualsDestination()
TrilinosPayload operator*(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
void check_vector_map_equality(const Epetra_CrsMatrix &, const VectorType &, const VectorType &)
static ::ExceptionBase & ExcIO()
void clear_row(const size_type row, const TrilinosScalar new_diag_value=0)
size_type column_number(const size_type row, const size_type index) const
static ::ExceptionBase & ExcScalarAssignmentOnlyForZeroValue()
void vmult_add(VectorType &dst, const VectorType &src) const
std::function< void(VectorType &, const VectorType &)> inv_Tvmult
::types::global_dof_index size_type
const Epetra_FECrsGraph & trilinos_sparsity_pattern() const
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1691
TrilinosWrappers::types::int_type min_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
TrilinosScalar operator()(const size_type i, const size_type j) const
static const char V
TrilinosPayload operator+(const TrilinosPayload &first_op, const TrilinosPayload &second_op)
static ::ExceptionBase & ExcInvalidIndex(size_type arg1, size_type arg2)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1576
Point< 2 > second
Definition: grid_out.cc:4341
static ::ExceptionBase & ExcAccessToNonPresentElement(size_type arg1, size_type arg2)
TrilinosScalar residual(MPI::Vector &dst, const MPI::Vector &x, const MPI::Vector &b) const
const Epetra_Comm & comm_self()
Definition: utilities.cc:1114
void check_vector_map_equality(const Epetra_CrsMatrix &m, const TrilinosWrappers::MPI::Vector &in, const TrilinosWrappers::MPI::Vector &out)
static ::ExceptionBase & ExcTrilinosError(int arg1)
TrilinosScalar matrix_scalar_product(const MPI::Vector &u, const MPI::Vector &v) const
std::unique_ptr< Epetra_CrsGraph > nonlocal_graph
static ::ExceptionBase & ExcDivideByZero()
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
void clear_rows(const std::vector< size_type > &rows, const TrilinosScalar new_diag_value=0)
Epetra_Map make_trilinos_map(const MPI_Comm &communicator=MPI_COMM_WORLD, const bool overlapping=false) const
Definition: index_set.cc:596
SparseMatrix & operator*=(const TrilinosScalar factor)
std::unique_ptr< Epetra_FECrsMatrix > matrix
size_type size() const
Definition: index_set.h:1632
virtual int Apply(const VectorType &X, VectorType &Y) const override
SparseMatrix & operator=(const SparseMatrix &)=delete
static ::ExceptionBase & ExcAccessToNonlocalRow(std::size_t arg1)
::types::global_dof_index size_type
static ::ExceptionBase & ExcMessage(std::string arg1)
size_type n() const
Definition: types.h:31
void add(const size_type i, const size_type j, const TrilinosScalar value)
#define Assert(cond, exc)
Definition: exceptions.h:1466
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
std::unique_ptr< Epetra_Export > nonlocal_matrix_exporter
TrilinosWrappers::types::int_type n_global_cols(const Epetra_CrsGraph &graph)
TrilinosWrappers::types::int_type global_row_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
const IndexSet & row_index_set() const
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:372
std::function< void(VectorType &, const VectorType &)> inv_vmult
VectorType::value_type * end(VectorType &V)
TrilinosScalar matrix_norm_square(const MPI::Vector &v) const
const Epetra_FEVector & trilinos_vector() const
virtual int ApplyInverse(const VectorType &Y, VectorType &X) const override
Expression fabs(const Expression &x)
const Epetra_CrsMatrix & trilinos_matrix() const
TrilinosWrappers::types::int_type global_column_index(const Epetra_CrsMatrix &matrix, const ::types::global_dof_index i)
Epetra_Operator & trilinos_operator() const
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Point< 2 > first
Definition: grid_out.cc:4340
IndexSet locally_owned_range_indices() const
size_type n_nonzero_elements() const
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type Tvmult(VectorType &dst, const VectorType &src) const
static ::ExceptionBase & ExcNotQuadratic()
TrilinosWrappers::types::int_type n_global_elements(const Epetra_BlockMap &map)
unsigned int row_length(const size_type row) const
void set_size(const size_type size)
Definition: index_set.h:1620
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1011
void Tvmult_add(VectorType &dst, const VectorType &src) const
unsigned int global_dof_index
Definition: types.h:76
Definition: cuda.h:32
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
std::enable_if< std::is_same< typename VectorType::value_type, TrilinosScalar >::value >::type vmult(VectorType &dst, const VectorType &src) const
const Tpetra::Vector< Number, int, types::global_dof_index > & trilinos_vector() const
void reinit(const SparsityPatternType &sparsity_pattern)
size_type row_length(const size_type row) const
const Epetra_BlockMap & trilinos_partitioner() const
void copy_from(const SparseMatrix &source)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:371
VectorType::value_type * begin(VectorType &V)
SparseMatrix & operator/=(const TrilinosScalar factor)
TrilinosWrappers::types::int_type max_my_gid(const Epetra_BlockMap &map)
std::pair< size_type, size_type > local_range() const
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcMatrixNotCompressed()
IndexSet locally_owned_domain_indices() const
const Epetra_CrsGraph & trilinos_sparsity_pattern() const
bool is_element(const size_type index) const
Definition: index_set.h:1763
void Tmmult(SparseMatrix &C, const SparseMatrix &B, const MPI::Vector &V=MPI::Vector()) const
std::unique_ptr< Epetra_Map > column_space_map
void perform_mmult(const SparseMatrix &inputleft, const SparseMatrix &inputright, SparseMatrix &result, const MPI::Vector &V, const bool transpose_left)
void compress(::VectorOperation::values operation)
const Epetra_MultiVector & trilinos_vector() const
static ::ExceptionBase & ExcAccessToNonLocalElement(size_type arg1, size_type arg2, size_type arg3, size_type arg4)
std::unique_ptr< Epetra_CrsMatrix > nonlocal_matrix
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
void set(const size_type i, const size_type j, const TrilinosScalar value)
size_type n_elements() const
Definition: index_set.h:1830
#define AssertIsFinite(number)
Definition: exceptions.h:1722
unsigned int local_size() const
void check_vector_map_equality(const Epetra_CrsMatrix &mtrx, const Epetra_MultiVector &src, const Epetra_MultiVector &dst, const bool transpose)
static ::ExceptionBase & ExcInternalError()