deal.II version GIT relicensing-2250-g88cb8ba3cb 2024-12-13 12:20:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
trilinos_tpetra_sparsity_pattern.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/config.h>
16
17#ifdef DEAL_II_TRILINOS_WITH_TPETRA
18
19# include <deal.II/base/mpi.h>
21
26
27# include <limits>
28
30
31namespace LinearAlgebra
32{
33
34 namespace TpetraWrappers
35 {
37 {
38 template <typename MemorySpace>
39 void
41 {
42 // if we are asked to visit the past-the-end line, then simply
43 // release all our caches and go on with life
44 if (static_cast<size_t>(this->a_row) == sparsity_pattern->n_rows())
45 {
46 colnum_cache.reset();
47 return;
48 }
49
50 // otherwise first flush Trilinos caches if necessary
51 if (!sparsity_pattern->is_compressed())
52 sparsity_pattern->compress();
53
54 colnum_cache =
55 std::make_shared<std::vector<::types::signed_global_dof_index>>(
56 sparsity_pattern->row_length(this->a_row));
57
58 if (colnum_cache->size() > 0)
59 {
60 // get a representation of the present row
61 std::size_t ncols;
62# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
65 column_indices_view(colnum_cache->data(), colnum_cache->size());
66# else
68 column_indices_view(colnum_cache->data(), colnum_cache->size());
69# endif
70 sparsity_pattern->graph->getGlobalRowCopy(this->a_row,
72 ncols);
73 AssertThrow(ncols == colnum_cache->size(), ExcInternalError());
74 }
75 }
76 } // namespace SparsityPatternIterators
77
78
79 // The constructor is actually the only point where we have to check whether
80 // we build a serial or a parallel Trilinos matrix. Actually, it does not
81 // even matter how many threads there are, but only if we use an MPI
82 // compiler or a standard compiler. So, even one thread on a configuration
83 // with MPI will still get a parallel interface.
84 template <typename MemorySpace>
98
99
100
101 template <typename MemorySpace>
103 const size_type m,
104 const size_type n,
106 {
107 reinit(m, n, n_entries_per_row);
108 }
109
110
111
112 template <typename MemorySpace>
114 const size_type m,
115 const size_type n,
116 const std::vector<size_type> &n_entries_per_row)
117 {
118 reinit(m, n, n_entries_per_row);
119 }
120
121
122
123 template <typename MemorySpace>
126 : SparsityPatternBase(std::move(other))
127 , column_space_map(std::move(other.column_space_map))
128 , graph(std::move(other.graph))
129 , nonlocal_graph(std::move(other.nonlocal_graph))
130 {}
131
132
133
134 // Copy function only works if the sparsity pattern is empty.
135 template <typename MemorySpace>
139 , column_space_map(Utilities::Trilinos::internal::make_rcp<
140 TpetraTypes::MapType<MemorySpace>>(
141 0,
142 0,
143 Utilities::Trilinos::tpetra_comm_self()))
144 , graph(Utilities::Trilinos::internal::make_rcp<
145 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
146 column_space_map,
147 0))
148 {
150 Assert(input_sparsity.n_rows() == 0,
152 "Copy constructor only works for empty sparsity patterns."));
153 }
154
155
156
157 template <typename MemorySpace>
168
169
170
171 template <typename MemorySpace>
174 const MPI_Comm communicator,
175 const std::vector<size_type> &n_entries_per_row)
176 {
179 communicator,
181 }
182
183
184
185 template <typename MemorySpace>
197
198
199
200 template <typename MemorySpace>
204 const MPI_Comm communicator,
205 const std::vector<size_type> &n_entries_per_row)
206 {
209 communicator,
211 }
212
213
214
215 template <typename MemorySpace>
229
230
231
232 template <typename MemorySpace>
233 void
243
244
245
246 template <typename MemorySpace>
247 void
249 const size_type m,
250 const size_type n,
251 const std::vector<size_type> &n_entries_per_row)
252 {
253 reinit(complete_index_set(m),
257 }
258
259
260
261 namespace SparsityPatternImpl
262 {
263 template <typename MemorySpace>
265
266 template <typename MemorySpace>
267 void
275 {
276 Assert(row_map->isOneToOne(),
277 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
278 "the maps of different processors."));
279 Assert(col_map->isOneToOne(),
280 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
281 "the maps of different processors."));
282
283 nonlocal_graph.reset();
284 graph.reset();
285 column_space_map = col_map;
286
287 // for more than one processor, need to specify only row map first and
288 // let the matrix entries decide about the column map (which says which
289 // columns are present in the matrix, not to be confused with the
290 // col_map that tells how the domain dofs of the matrix will be
291 // distributed). for only one processor, we can directly assign the
292 // columns as well. If we use a recent Trilinos version, we can also
293 // require building a non-local graph which gives us thread-safe
294 // initialization.
297 row_map,
299 }
300
301
302
303 template <typename MemorySpace>
304 void
308 const std::vector<size_type<MemorySpace>> &n_entries_per_row,
312 {
313 Assert(row_map->isOneToOne(),
314 ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
315 "the maps of different processors."));
316 Assert(col_map->isOneToOne(),
317 ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
318 "the maps of different processors."));
319
320 // release memory before reallocation
321 nonlocal_graph.reset();
322 graph.reset();
324 row_map->getGlobalNumElements());
325
326 column_space_map = col_map;
327
328 // Translate the vector of row lengths into one that only stores
329 // those entries that related to the locally stored rows of the matrix:
330 Kokkos::DualView<size_t *, typename MemorySpace::kokkos_space>
331 local_entries_per_row("local_entries_per_row",
332 row_map->getMaxGlobalIndex() -
333 row_map->getMinGlobalIndex());
334
338
339 std::uint64_t total_size = 0;
340 for (unsigned int i = 0; i < local_entries_per_row.extent(0); ++i)
341 {
343 n_entries_per_row[row_map->getMinGlobalIndex() + i];
344 total_size += local_entries_per_row_host[i];
345 }
350
352 total_size < static_cast<std::uint64_t>(
353 std::numeric_limits<
356 "You are requesting to store more elements than global ordinal type allows."));
357
360 col_map,
362 }
363
364
365
366 template <typename SparsityPatternType, typename MemorySpace>
367 void
371 const SparsityPatternType &sp,
372 [[maybe_unused]] const bool exchange_data,
376 {
377 nonlocal_graph.reset();
378 graph.reset();
379
380 AssertDimension(sp.n_rows(), row_map->getGlobalNumElements());
381 AssertDimension(sp.n_cols(), col_map->getGlobalNumElements());
385
386 Assert(row_map->isContiguous() == true,
388 "This function only works if the row map is contiguous."));
389
390 const size_type<MemorySpace> first_row = row_map->getMinGlobalIndex(),
392 row_map->getMaxGlobalIndex() + 1;
394
395 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
396 n_entries_per_row[row - first_row] = sp.row_length(row);
397
399 std::accumulate(n_entries_per_row.begin(),
401 std::uint64_t(0)) <
402 static_cast<std::uint64_t>(std::numeric_limits<int>::max()),
404 "The TrilinosWrappers use Tpetra internally, and "
405 "Trilinos/Tpetra was compiled with 'local ordinate = int'. "
406 "Therefore, 'signed int' is used to represent local indices, "
407 "and only 2,147,483,647 nonzero matrix entries can be stored "
408 "on a single process, but you are requesting more than "
409 "that. Either use more MPI processes or recompile Trilinos "
410 "with 'local ordinate = long long' "));
411
412# if DEAL_II_TRILINOS_VERSION_GTE(12, 16, 0)
413 if (row_map->getComm()->getSize() > 1)
416 else
421# else
422 if (row_map->getComm()->getSize() > 1)
426 else
430
431# endif
432
433 AssertDimension(sp.n_rows(), graph->getGlobalNumRows());
434 AssertDimension(sp.n_cols(), graph->getGlobalNumEntries());
435
436 std::vector<TrilinosWrappers::types::int_type> row_indices;
437
438 for (size_type<MemorySpace> row = first_row; row < last_row; ++row)
439 {
440 const TrilinosWrappers::types::int_type row_length =
441 sp.row_length(row);
442 if (row_length == 0)
443 continue;
444
445 row_indices.resize(row_length, -1);
446 {
447 typename SparsityPatternType::iterator p = sp.begin(row);
448 // avoid incrementing p over the end of the current row because
449 // it is slow for DynamicSparsityPattern in parallel
450 for (int col = 0; col < row_length;)
451 {
452 row_indices[col++] = p->column();
453 if (col < row_length)
454 ++p;
455 }
456 }
457 graph->insertGlobalIndices(row, row_length, row_indices.data());
458 }
459
460 graph->globalAssemble();
461 }
462 } // namespace SparsityPatternImpl
463
465 template <typename MemorySpace>
466 void
468 const MPI_Comm communicator,
470 {
472 parallel_partitioning.size());
476 communicator, false);
477 SparsityPatternImpl::reinit_sp<MemorySpace>(
478 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
479 }
480
481
483 template <typename MemorySpace>
484 void
487 const MPI_Comm communicator,
488 const std::vector<size_type> &n_entries_per_row)
489 {
491 parallel_partitioning.size());
495 communicator, false);
496 SparsityPatternImpl::reinit_sp<MemorySpace>(
497 map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
499
500
501
502 template <typename MemorySpace>
503 void
527
528
530 template <typename MemorySpace>
531 void
535 const MPI_Comm communicator,
536 const std::vector<size_type> &n_entries_per_row)
537 {
543 communicator, false);
547 communicator, false);
548 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
549 col_map,
551 column_space_map,
552 graph,
553 nonlocal_graph);
554 }
555
556
557
558 template <typename MemorySpace>
559 void
563 const IndexSet &writable_rows,
564 const MPI_Comm communicator,
572 communicator, false);
576 communicator, false);
577 SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
578 col_map,
580 column_space_map,
581 graph,
582 nonlocal_graph);
583
587# ifdef DEBUG
588 {
592 "The set of writable rows passed to this method does not "
593 "contain the locally owned rows, which is not allowed."));
594 }
595# endif
597 if (Utilities::MPI::n_mpi_processes(communicator) > 1)
598 {
602 communicator, true);
605 }
606 else
607 Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
608 }
609
610
611
612 template <typename MemorySpace>
613 template <typename SparsityPatternType>
614 void
619 const MPI_Comm communicator,
620 const bool exchange_data)
621 {
627 communicator, false);
631 communicator, false);
632 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
633 row_map,
634 col_map,
637 column_space_map,
638 graph,
639 nonlocal_graph);
640 }
641
642
643
644 template <typename MemorySpace>
645 template <typename SparsityPatternType>
646 void
650 const MPI_Comm communicator,
651 const bool exchange_data)
652 {
654 parallel_partitioning.size());
656 parallel_partitioning.size());
658 parallel_partitioning.size());
662 communicator, false);
663 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
664 map,
665 map,
668 column_space_map,
669 graph,
670 nonlocal_graph);
671 }
672
673
674
675 template <typename MemorySpace>
683
684
685
686 template <typename MemorySpace>
687 void
690 {
691 SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
693 TpetraTypes::MapType<MemorySpace>>(*sp.column_space_map);
696
697 if (sp.nonlocal_graph.get() != nullptr)
699 TpetraTypes::GraphType<MemorySpace>>(*sp.nonlocal_graph);
700 else
701 nonlocal_graph.reset();
702 }
703
704
706 template <typename MemorySpace>
707 template <typename SparsityPatternType>
708 void
710 {
711 SparsityPatternBase::resize(sp.n_rows(), sp.n_cols());
716 0,
722 0,
724
725 SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
726 rows, columns, sp, false, column_space_map, graph, nonlocal_graph);
727 }
728
730
731 template <typename MemorySpace>
732 void
734 {
736 // When we clear the matrix, reset
737 // the pointer and generate an
738 // empty sparsity pattern.
745 TpetraTypes::GraphType<MemorySpace>>(column_space_map,
746 column_space_map,
747 0);
748 graph->fillComplete();
749
750 nonlocal_graph.reset();
751 }
753
754
755 template <typename MemorySpace>
756 void
758 {
759 Assert(column_space_map.get(), ExcInternalError());
760 if (nonlocal_graph.get() != nullptr)
761 {
762# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
763 if (nonlocal_graph->getRowMap()->getLocalNumElements() > 0 &&
764 column_space_map->getGlobalNumElements() > 0)
765# else
766 if (nonlocal_graph->getRowMap()->getNodeNumElements() > 0 &&
767 column_space_map->getGlobalNumElements() > 0)
768# endif
769 {
770 // Insert dummy element at (row, column) that corresponds to row 0
771 // in local index counting.
773 nonlocal_graph->getRowMap()->getGlobalElement(0);
775
776 // in case we have a square sparsity pattern, add the entry on the
777 // diagonal
778 if (column_space_map->getGlobalNumElements() ==
779 graph->getRangeMap()->getGlobalNumElements())
780 column = row;
781 // if not, take a column index that we have ourselves since we
782 // know for sure it is there (and it will not create spurious
783 // messages to many ranks like putting index 0 on many
784 // processors)
785# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
786 else if (column_space_map->getLocalNumElements() > 0)
787# else
788 else if (column_space_map->getNodeNumElements() > 0)
789# endif
790 column = column_space_map->getGlobalElement(0);
791 nonlocal_graph->insertGlobalIndices(row, 1, &column);
792 }
793# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
794 Assert(nonlocal_graph->getRowMap()->getLocalNumElements() == 0 ||
795 column_space_map->getGlobalNumElements() == 0,
797# else
798 Assert(nonlocal_graph->getRowMap()->getNodeNumElements() == 0 ||
799 column_space_map->getGlobalNumElements() == 0,
801# endif
802
803 nonlocal_graph->fillComplete(column_space_map, graph->getRowMap());
804 }
805 graph->fillComplete(column_space_map, graph->getRowMap());
806
807 // Check consistency between the sizes set at the beginning and what
808 // Trilinos stores:
809 using namespace deal_II_exceptions::internals;
810 AssertDimension(n_rows(), graph->getGlobalNumRows());
811 AssertDimension(n_cols(), graph->getGlobalNumCols());
812 }
813
814
815
816 template <typename MemorySpace>
817 bool
819 {
820 return graph->getRowMap()->getLocalElement(i) !=
821 Teuchos::OrdinalTraits<int>::invalid();
822 }
823
824
825
826 template <typename MemorySpace>
827 bool
829 const size_type j) const
830 {
831# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
832 if (!row_is_stored_locally(i))
833 return false;
834
835 // Extract local indices in the matrix.
836 const auto trilinos_i = graph->getRowMap()->getLocalElement(i);
837 const auto trilinos_j = graph->getColMap()->getLocalElement(j);
838
841
842 // Generate the view.
843 graph->getLocalRowView(trilinos_i, col_indices);
844
845 // Search the index
847 std::find(col_indices.data(),
848 col_indices.data() + col_indices.size(),
849 trilinos_j) -
850 col_indices.data();
851
852 return static_cast<size_t>(local_col_index) != col_indices.size();
853# else
854 (void)i;
855 (void)j;
857 return false;
858# endif
859 }
860
861
862
863 template <typename MemorySpace>
866 {
867 size_type local_b = 0;
868 for (int i = 0; i < static_cast<int>(local_size()); ++i)
869 {
870# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
871 typename TpetraTypes::GraphType<
873# else
875# endif
876
877 graph->getLocalRowView(i, indices);
878 const auto num_entries = indices.size();
879 for (unsigned int j = 0; j < static_cast<unsigned int>(num_entries);
880 ++j)
881 {
882 if (static_cast<size_type>(std::abs(i - indices[j])) > local_b)
883 local_b = std::abs(i - indices[j]);
884 }
885 }
886
890 graph->getComm()));
891 return static_cast<size_type>(global_b);
892 }
893
894
895
896 template <typename MemorySpace>
897 unsigned int
899 {
900# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
901 return graph->getLocalNumRows();
902# else
903 return graph->getNodeNumRows();
904# endif
905 }
906
907
908
909 template <typename MemorySpace>
910 std::pair<typename SparsityPattern<MemorySpace>::size_type,
913 {
914 const size_type begin = graph->getRowMap()->getMinGlobalIndex();
915 const size_type end = graph->getRowMap()->getMaxGlobalIndex() + 1;
916
917 return {begin, end};
918 }
920
921
922 template <typename MemorySpace>
923 std::uint64_t
925 {
926 return graph->getGlobalNumEntries();
927 }
928
929
930
931 template <typename MemorySpace>
932 unsigned int
934 {
935# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
936 return graph->getLocalMaxNumRowEntries();
937# else
938 return graph->getNodeMaxNumRowEntries();
939# endif
940 }
941
942
943
944 template <typename MemorySpace>
947 {
948 Assert(row < (size_type)n_rows(), ExcInternalError());
949
950 // Get a representation of the where the present row is located on
951 // the current processor
953 graph->getRowMap()->getLocalElement(row);
954
955 // On the processor who owns this row, we'll have a non-negative
956 // value for `local_row` and can ask for the length of the row.
957 if (local_row >= 0)
958 return graph->getNumEntriesInLocalRow(local_row);
959 else
960 return static_cast<size_type>(-1);
961 }
962
963
964
965 template <typename MemorySpace>
966 void
968 const ::types::global_dof_index &row,
970 const bool indices_are_sorted)
971 {
972 add_entries(row, columns.begin(), columns.end(), indices_are_sorted);
973 }
974
975
976
977 template <typename MemorySpace>
980 {
981 return graph->getDomainMap();
982 }
983
984
985
986 template <typename MemorySpace>
989 {
990 return graph->getRangeMap();
991 }
992
993
994
995 template <typename MemorySpace>
998 {
1000 graph->getRangeMap()->getComm());
1001 }
1002
1003
1004
1005 template <typename MemorySpace>
1008 {
1009 return graph->getRangeMap()->getComm();
1010 }
1011
1012
1013
1014 // As of now, no particularly neat
1015 // output is generated in case of
1016 // multiple processors.
1017 template <typename MemorySpace>
1018 void
1020 std::ostream &out,
1021 const bool write_extended_trilinos_info) const
1022 {
1024 out << *graph;
1025 else
1026 {
1027# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
1028 for (unsigned int i = 0; i < graph->getLocalNumRows(); ++i)
1029# else
1030 for (unsigned int i = 0; i < graph->getNodeNumRows(); ++i)
1031# endif
1032 {
1033# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1034 typename TpetraTypes::GraphType<
1036# else
1038# endif
1039 graph->getLocalRowView(i, indices);
1040 int num_entries = indices.size();
1041 for (int j = 0; j < num_entries; ++j)
1042 out << "(" << graph->getRowMap()->getGlobalElement(i) << ","
1043 << graph->getColMap()->getGlobalElement(indices[j]) << ") "
1044 << std::endl;
1045 }
1046 }
1047
1048 AssertThrow(out.fail() == false, ExcIO());
1049 }
1050
1051
1052
1053 template <typename MemorySpace>
1054 void
1056 {
1057 Assert(graph->isFillComplete() == true, ExcInternalError());
1058
1059 for (unsigned int row = 0; row < local_size(); ++row)
1060 {
1061# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
1062 typename TpetraTypes::GraphType<
1064# else
1066# endif
1067 graph->getLocalRowView(row, indices);
1068 int num_entries = indices.size();
1069
1071 // avoid sign comparison warning
1072 const ::types::signed_global_dof_index num_entries_ =
1075 ++j)
1076 // while matrix entries are usually
1077 // written (i,j), with i vertical and
1078 // j horizontal, gnuplot output is
1079 // x-y, that is we have to exchange
1080 // the order of output
1081 out << static_cast<int>(
1082 graph->getColMap()->getGlobalElement(indices[j]))
1083 << " "
1084 << -static_cast<int>(graph->getRowMap()->getGlobalElement(row))
1085 << std::endl;
1086 }
1087
1088 AssertThrow(out.fail() == false, ExcIO());
1089 }
1090
1091 // TODO: Implement!
1092 template <typename MemorySpace>
1093 std::size_t
1099
1100
1101# ifndef DOXYGEN
1102 // explicit instantiations
1104
1105 template void
1107 const ::SparsityPattern &);
1108 template void
1110 const ::DynamicSparsityPattern &);
1111
1112 template void
1114 const IndexSet &,
1115 const ::SparsityPattern &,
1116 const MPI_Comm,
1117 bool);
1118 template void
1120 const IndexSet &,
1121 const ::DynamicSparsityPattern &,
1122 const MPI_Comm,
1123 bool);
1124
1125
1126 template void
1128 const IndexSet &,
1129 const IndexSet &,
1130 const ::SparsityPattern &,
1131 const MPI_Comm,
1132 bool);
1133 template void
1135 const IndexSet &,
1136 const IndexSet &,
1137 const ::DynamicSparsityPattern &,
1138 const MPI_Comm,
1139 bool);
1140
1141
1143
1144 template void
1146 const ::SparsityPattern &);
1147 template void
1149 const ::DynamicSparsityPattern &);
1150
1151 template void
1153 const IndexSet &,
1154 const ::SparsityPattern &,
1155 const MPI_Comm,
1156 bool);
1157 template void
1159 const IndexSet &,
1160 const ::DynamicSparsityPattern &,
1161 const MPI_Comm,
1162 bool);
1163
1164
1165 template void
1167 const IndexSet &,
1168 const IndexSet &,
1169 const ::SparsityPattern &,
1170 const MPI_Comm,
1171 bool);
1172 template void
1174 const IndexSet &,
1175 const IndexSet &,
1176 const ::DynamicSparsityPattern &,
1177 const MPI_Comm,
1178 bool);
1179
1180# endif
1181
1182 } // namespace TpetraWrappers
1183
1184} // namespace LinearAlgebra
1185
1187
1188#endif // DEAL_II_TRILINOS_WITH_TPETRA
Teuchos::RCP< const Teuchos::Comm< int > > get_teuchos_mpi_communicator() const
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > range_partitioner() const
void copy_from(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
SparsityPattern< MemorySpace > & operator=(const SparsityPattern< MemorySpace > &input_sparsity_pattern)
void print(std::ostream &out, const bool write_extended_trilinos_info=false) const
Teuchos::RCP< const TpetraTypes::MapType< MemorySpace > > domain_partitioner() const
bool exists(const size_type i, const size_type j) const
virtual void add_row_entries(const ::types::global_dof_index &row, const ArrayView< const ::types::global_dof_index > &columns, const bool indices_are_sorted=false) override
void reinit(const size_type m, const size_type n, const size_type n_entries_per_row=0)
virtual void resize(const size_type rows, const size_type cols)
friend class Tensor
Definition tensor.h:865
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
static ::ExceptionBase & ExcIO()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1204
void reinit_sp(const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &row_map, const Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &col_map, const size_type< MemorySpace > n_entries_per_row, Teuchos::RCP< TpetraTypes::MapType< MemorySpace > > &column_space_map, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &graph, Teuchos::RCP< TpetraTypes::GraphType< MemorySpace > > &nonlocal_graph)
typename SparsityPattern< MemorySpace >::size_type size_type
Tpetra::CrsGraph< LO, GO, NodeType< MemorySpace > > GraphType
Tpetra::Map< LO, GO, NodeType< MemorySpace > > MapType
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
Teuchos::RCP< T > make_rcp(Args &&...args)
MPI_Comm teuchos_comm_to_mpi_comm(const Teuchos::RCP< const Teuchos::Comm< int > > &teuchos_comm)
const Teuchos::RCP< const Teuchos::Comm< int > > & tpetra_comm_self()
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)