deal.II version GIT relicensing-3307-g81a1c05a67 2025-05-12 20:50:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
sparse_direct.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2001 - 2025 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17
21#include <deal.II/lac/vector.h>
22
23#include <complex>
24#include <iostream>
25#include <list>
26#include <typeinfo>
27#include <vector>
28
29#ifdef DEAL_II_WITH_UMFPACK
30# include <umfpack.h>
31#endif
32
33#ifdef DEAL_II_WITH_MUMPS
34# include <dmumps_c.h>
35#endif
36
38
39namespace
40{
48 template <typename SparseMatrixType>
49 unsigned int
50 parallel_grainsize(const SparseMatrixType &matrix)
51 {
52 const unsigned int avg_entries_per_row =
53 matrix.n_nonzero_elements() / matrix.m();
54 return std::max(1000 / avg_entries_per_row, 1u);
55 }
56} // namespace
57
58
63
64
65void
68
69
70#ifdef DEAL_II_WITH_UMFPACK
71
73 : n_rows(0)
74 , n_cols(0)
75 , symbolic_decomposition(nullptr)
76 , numeric_decomposition(nullptr)
77 , control(UMFPACK_CONTROL)
78{
79 umfpack_dl_defaults(control.data());
80}
81
82
83
84void
86{
87 // delete objects that haven't been deleted yet
88 if (symbolic_decomposition != nullptr)
89 {
90 umfpack_dl_free_symbolic(&symbolic_decomposition);
91 symbolic_decomposition = nullptr;
92 }
93
94 if (numeric_decomposition != nullptr)
95 {
96 umfpack_dl_free_numeric(&numeric_decomposition);
97 numeric_decomposition = nullptr;
98 }
99
100 {
101 std::vector<types::suitesparse_index> tmp;
102 tmp.swap(Ap);
103 }
104
105 {
106 std::vector<types::suitesparse_index> tmp;
107 tmp.swap(Ai);
108 }
109
110 {
111 std::vector<double> tmp;
112 tmp.swap(Ax);
113 }
114
115 {
116 std::vector<double> tmp;
117 tmp.swap(Az);
118 }
119
120 umfpack_dl_defaults(control.data());
121}
122
123
124
125template <typename number>
126void
128{
129 // do the copying around of entries so that the diagonal entry is in the
130 // right place. note that this is easy to detect: since all entries apart
131 // from the diagonal entry are sorted, we know that the diagonal entry is
132 // in the wrong place if and only if its column index is larger than the
133 // column index of the second entry in a row
134 //
135 // ignore rows with only one or no entry
137 0,
138 matrix.m(),
139 [this](const size_type row_begin, const size_type row_end) {
140 for (size_type row = row_begin; row < row_end; ++row)
141 {
142 // we may have to move some elements that are left of the diagonal
143 // but presently after the diagonal entry to the left, whereas the
144 // diagonal entry has to move to the right. we could first figure out
145 // where to move everything to, but for simplicity we just make a
146 // series of swaps instead (this is kind of a single run of
147 // bubble-sort, which gives us the desired result since the array is
148 // already "almost" sorted)
149 //
150 // in the first loop, the condition in the while-header also checks
151 // that the row has at least two entries and that the diagonal entry
152 // is really in the wrong place
153 long int cursor = Ap[row];
154 while ((cursor < Ap[row + 1] - 1) && (Ai[cursor] > Ai[cursor + 1]))
155 {
156 std::swap(Ai[cursor], Ai[cursor + 1]);
157
158 std::swap(Ax[cursor], Ax[cursor + 1]);
159 if (numbers::NumberTraits<number>::is_complex == true)
160 std::swap(Az[cursor], Az[cursor + 1]);
161
162 ++cursor;
163 }
164 }
165 },
166 parallel_grainsize(matrix));
167}
168
169
170
171template <typename number>
172void
174{
175 // same thing for SparseMatrixEZ
177 0,
178 matrix.m(),
179 [this](const size_type row_begin, const size_type row_end) {
180 for (size_type row = row_begin; row < row_end; ++row)
181 {
182 long int cursor = Ap[row];
183 while ((cursor < Ap[row + 1] - 1) && (Ai[cursor] > Ai[cursor + 1]))
184 {
185 std::swap(Ai[cursor], Ai[cursor + 1]);
186
187 std::swap(Ax[cursor], Ax[cursor + 1]);
188 if (numbers::NumberTraits<number>::is_complex == true)
189 std::swap(Az[cursor], Az[cursor + 1]);
190
191 ++cursor;
192 }
193 }
194 },
195 parallel_grainsize(matrix));
196}
197
198
199
200template <typename number>
201void
203{
204 // the case for block matrices is a bit more difficult, since all we know
205 // is that *within each block*, the diagonal of that block may come
206 // first. however, that means that there may be as many entries per row
207 // in the wrong place as there are block columns. we can do the same
208 // thing as above, but we have to do it multiple times
210 0,
211 matrix.m(),
212 [this, &matrix](const size_type row_begin, const size_type row_end) {
213 for (size_type row = row_begin; row < row_end; ++row)
214 {
215 long int cursor = Ap[row];
216 for (size_type block = 0; block < matrix.n_block_cols(); ++block)
217 {
218 // find the next out-of-order element
219 while ((cursor < Ap[row + 1] - 1) &&
220 (Ai[cursor] < Ai[cursor + 1]))
221 ++cursor;
222
223 // if there is none, then just go on
224 if (cursor == Ap[row + 1] - 1)
225 break;
226
227 // otherwise swap this entry with successive ones as long as
228 // necessary
229 long int element = cursor;
230 while ((element < Ap[row + 1] - 1) &&
231 (Ai[element] > Ai[element + 1]))
232 {
233 std::swap(Ai[element], Ai[element + 1]);
234
235 std::swap(Ax[element], Ax[element + 1]);
236 if (numbers::NumberTraits<number>::is_complex == true)
237 std::swap(Az[element], Az[element + 1]);
238
239 ++element;
240 }
241 }
242 }
243 },
244 parallel_grainsize(matrix));
245}
246
247
248
249template <class Matrix>
250void
252{
253 Assert(matrix.m() == matrix.n(), ExcNotQuadratic());
254
255 clear();
256
257 using number = typename Matrix::value_type;
258
259 n_rows = matrix.m();
260 n_cols = matrix.n();
261
262 const size_type N = matrix.m();
263
264 // copy over the data from the matrix to the data structures UMFPACK
265 // wants. note two things: first, UMFPACK wants compressed column storage
266 // whereas we always do compressed row storage; we work around this by,
267 // rather than shuffling things around, copy over the data we have, but
268 // then call the umfpack_dl_solve function with the UMFPACK_At argument,
269 // meaning that we want to solve for the transpose system
270 //
271 // second: the data we have in the sparse matrices is "almost" right
272 // already; UMFPACK wants the entries in each row (i.e. really: column)
273 // to be sorted in ascending order. we almost have that, except that we
274 // usually store the diagonal first in each row to allow for some
275 // optimizations. thus, we have to resort things a little bit, but only
276 // within each row
277 //
278 // final note: if the matrix has entries in the sparsity pattern that are
279 // actually occupied by entries that have a zero numerical value, then we
280 // keep them anyway. people are supposed to provide accurate sparsity
281 // patterns.
282 Ap.resize(N + 1);
283 Ai.resize(matrix.n_nonzero_elements());
284 Ax.resize(matrix.n_nonzero_elements());
286 Az.resize(matrix.n_nonzero_elements());
287
288 // first fill row lengths array
289 Ap[0] = 0;
290 for (size_type row = 1; row <= N; ++row)
291 Ap[row] = Ap[row - 1] + matrix.get_row_length(row - 1);
292 Assert(static_cast<size_type>(Ap.back()) == Ai.size(), ExcInternalError());
293
294 // then copy over matrix elements. note that for sparse matrices,
295 // iterators are sorted so that they traverse each row from start to end
296 // before moving on to the next row.
298 0,
299 matrix.m(),
300 [this, &matrix](const size_type row_begin, const size_type row_end) {
301 for (size_type row = row_begin; row < row_end; ++row)
302 {
303 long int index = Ap[row];
304 for (typename Matrix::const_iterator p = matrix.begin(row);
305 p != matrix.end(row);
306 ++p)
307 {
308 // write entry into the first free one for this row
309 Ai[index] = p->column();
310 Ax[index] = std::real(p->value());
311 if (numbers::NumberTraits<number>::is_complex == true)
312 Az[index] = std::imag(p->value());
313
314 // then move pointer ahead
315 ++index;
316 }
317 Assert(index == Ap[row + 1], ExcInternalError());
318 }
319 },
320 parallel_grainsize(matrix));
321
322 // make sure that the elements in each row are sorted. we have to be more
323 // careful for block sparse matrices, so ship this task out to a
324 // different function
325 sort_arrays(matrix);
326
327 int status;
329 status = umfpack_dl_symbolic(N,
330 N,
331 Ap.data(),
332 Ai.data(),
333 Ax.data(),
334 &symbolic_decomposition,
335 control.data(),
336 nullptr);
337 else
338 status = umfpack_zl_symbolic(N,
339 N,
340 Ap.data(),
341 Ai.data(),
342 Ax.data(),
343 Az.data(),
344 &symbolic_decomposition,
345 control.data(),
346 nullptr);
347 AssertThrow(status == UMFPACK_OK,
348 ExcUMFPACKError("umfpack_dl_symbolic", status));
349
351 status = umfpack_dl_numeric(Ap.data(),
352 Ai.data(),
353 Ax.data(),
354 symbolic_decomposition,
355 &numeric_decomposition,
356 control.data(),
357 nullptr);
358 else
359 status = umfpack_zl_numeric(Ap.data(),
360 Ai.data(),
361 Ax.data(),
362 Az.data(),
363 symbolic_decomposition,
364 &numeric_decomposition,
365 control.data(),
366 nullptr);
367
368 // Clean up before we deal with the error code from the calls above:
369 umfpack_dl_free_symbolic(&symbolic_decomposition);
370 if (status == UMFPACK_WARNING_singular_matrix)
371 {
372 // UMFPACK sometimes warns that the matrix is singular, but that a
373 // factorization was successful nonetheless. Report this by
374 // throwing an exception that can be caught at a higher level:
375 AssertThrow(false,
377 "UMFPACK reports that the matrix is singular, "
378 "but that the factorization was successful anyway. "
379 "You can try and see whether you can still "
380 "solve a linear system with such a factorization "
381 "by catching and ignoring this exception, "
382 "though in practice this will typically not "
383 "work."));
384 }
385 else
386 AssertThrow(status == UMFPACK_OK,
387 ExcUMFPACKError("umfpack_dl_numeric", status));
388}
389
390
391
392void
394 const bool transpose /*=false*/) const
395{
396 // make sure that some kind of factorize() call has happened before
397 Assert(Ap.size() != 0, ExcNotInitialized());
398 Assert(Ai.size() != 0, ExcNotInitialized());
399 Assert(Ai.size() == Ax.size(), ExcNotInitialized());
400
401 Assert(Az.empty(),
402 ExcMessage("You have previously factored a matrix using this class "
403 "that had complex-valued entries. This then requires "
404 "applying the factored matrix to a complex-valued "
405 "vector, but you are only providing a real-valued vector "
406 "here."));
407
408 Vector<double> rhs(rhs_and_solution.size());
409 rhs = rhs_and_solution;
410
411 // solve the system. note that since UMFPACK wants compressed column
412 // storage instead of the compressed row storage format we use in
413 // deal.II's SparsityPattern classes, we solve for UMFPACK's A^T instead
414
415 // Conversely, if we solve for the transpose, we have to use UMFPACK_A
416 // instead.
417 const int status = umfpack_dl_solve(transpose ? UMFPACK_A : UMFPACK_At,
418 Ap.data(),
419 Ai.data(),
420 Ax.data(),
421 rhs_and_solution.begin(),
422 rhs.begin(),
424 control.data(),
425 nullptr);
426 AssertThrow(status == UMFPACK_OK,
427 ExcUMFPACKError("umfpack_dl_solve", status));
428}
429
430
431
432void
433SparseDirectUMFPACK::solve(Vector<std::complex<double>> &rhs_and_solution,
434 const bool transpose /*=false*/) const
435{
436# ifdef DEAL_II_WITH_COMPLEX_VALUES
437 // make sure that some kind of factorize() call has happened before
438 Assert(Ap.size() != 0, ExcNotInitialized());
439 Assert(Ai.size() != 0, ExcNotInitialized());
440 Assert(Ai.size() == Ax.size(), ExcNotInitialized());
441
442 // First see whether the matrix that was factorized was complex-valued.
443 // If so, just apply the complex factorization to the vector.
444 if (Az.size() != 0)
445 {
446 Assert(Ax.size() == Az.size(), ExcInternalError());
447
448 // It would be nice if we could just present a pointer to the
449 // first element of the complex-valued solution vector and let
450 // UMFPACK fill both the real and imaginary parts of the solution
451 // vector at that address. UMFPACK calls this the 'packed' format,
452 // and in those cases it only takes one pointer to the entire
453 // vector, rather than a pointer to the real and one pointer to
454 // the imaginary parts of the vector. The problem is that if we
455 // want to pack, then we also need to pack the matrix, and the
456 // functions above have already decided that we don't want to pack
457 // the matrix but instead deal with split format for the matrix,
458 // and then UMFPACK decides that it can't deal with a split matrix
459 // and a packed vector. We have to choose one or the other, not
460 // mix.
461 //
462 // So create four vectors, one each for the real and imaginary parts
463 // of the right hand side and solution.
464 Vector<double> rhs_re(rhs_and_solution.size());
465 Vector<double> rhs_im(rhs_and_solution.size());
466 for (unsigned int i = 0; i < rhs_and_solution.size(); ++i)
467 {
468 rhs_re(i) = std::real(rhs_and_solution(i));
469 rhs_im(i) = std::imag(rhs_and_solution(i));
470 }
471
472 Vector<double> solution_re(rhs_and_solution.size());
473 Vector<double> solution_im(rhs_and_solution.size());
474
475 // Solve the system. note that since UMFPACK wants compressed column
476 // storage instead of the compressed row storage format we use in
477 // deal.II's SparsityPattern classes, we solve for UMFPACK's A^T instead
478 //
479 // Conversely, if we solve for the transpose, we have to use UMFPACK_A
480 // instead.
481 //
482 // Note that for the complex case here, the transpose is selected using
483 // UMFPACK_Aat, not UMFPACK_At.
484 const int status = umfpack_zl_solve(transpose ? UMFPACK_A : UMFPACK_Aat,
485 Ap.data(),
486 Ai.data(),
487 Ax.data(),
488 Az.data(),
489 solution_re.data(),
490 solution_im.data(),
491 rhs_re.data(),
492 rhs_im.data(),
494 control.data(),
495 nullptr);
496 AssertThrow(status == UMFPACK_OK,
497 ExcUMFPACKError("umfpack_dl_solve", status));
498
499 // Now put things back together into the output vector
500 for (unsigned int i = 0; i < rhs_and_solution.size(); ++i)
501 rhs_and_solution(i) = {solution_re(i), solution_im(i)};
502 }
503 else
504 {
505 // We have factorized a real-valued matrix, but the rhs and solution
506 // vectors are complex-valued. UMFPACK does not natively support this
507 // case, but we can just apply the factorization to real and imaginary
508 // parts of the right hand side separately
509 const Vector<std::complex<double>> rhs = rhs_and_solution;
510
511 // Get the real part of the right hand side, solve with it, and copy the
512 // results into the result vector by just copying the real output
513 // into the complex-valued result vector (which implies setting the
514 // imaginary parts to zero):
515 Vector<double> rhs_real_or_imag(rhs_and_solution.size());
516 for (unsigned int i = 0; i < rhs.size(); ++i)
517 rhs_real_or_imag(i) = std::real(rhs(i));
518
519 solve(rhs_real_or_imag, transpose);
520
521 rhs_and_solution = rhs_real_or_imag;
522
523 // Then repeat the whole thing with the imaginary part. The copying step
524 // is more complicated now because we can only touch the imaginary
525 // component of the output vector:
526 for (unsigned int i = 0; i < rhs.size(); ++i)
527 rhs_real_or_imag(i) = std::imag(rhs(i));
528
529 solve(rhs_real_or_imag, transpose);
530
531 for (unsigned int i = 0; i < rhs.size(); ++i)
532 rhs_and_solution(i).imag(rhs_real_or_imag(i));
533 }
534
535# else
536
537 (void)rhs_and_solution;
538 (void)transpose;
539 Assert(false,
541 "This function can't be called if deal.II has been configured "
542 "with DEAL_II_WITH_COMPLEX_VALUES=FALSE."));
543# endif
544}
545
546
547void
549 const bool transpose /*=false*/) const
550{
551 // the UMFPACK functions want a contiguous array of elements, so
552 // there is no way around copying data around. thus, just copy the
553 // data into a regular vector and back
554 Vector<double> tmp(rhs_and_solution.size());
555 tmp = rhs_and_solution;
556 solve(tmp, transpose);
557 rhs_and_solution = tmp;
558}
559
560
561
562void
563SparseDirectUMFPACK::solve(BlockVector<std::complex<double>> &rhs_and_solution,
564 const bool transpose /*=false*/) const
565{
566# ifdef DEAL_II_WITH_COMPLEX_VALUES
567 // the UMFPACK functions want a contiguous array of elements, so
568 // there is no way around copying data around. thus, just copy the
569 // data into a regular vector and back
570 Vector<std::complex<double>> tmp(rhs_and_solution.size());
571 tmp = rhs_and_solution;
572 solve(tmp, transpose);
573 rhs_and_solution = tmp;
574
575# else
576 (void)rhs_and_solution;
577 (void)transpose;
578 Assert(false,
580 "This function can't be called if deal.II has been configured "
581 "with DEAL_II_WITH_COMPLEX_VALUES=FALSE."));
582# endif
583}
584
585
586
587template <class Matrix>
588void
589SparseDirectUMFPACK::solve(const Matrix &matrix,
590 Vector<double> &rhs_and_solution,
591 const bool transpose /*=false*/)
592{
593 factorize(matrix);
594 solve(rhs_and_solution, transpose);
595}
596
597
598
599template <class Matrix>
600void
601SparseDirectUMFPACK::solve(const Matrix &matrix,
602 Vector<std::complex<double>> &rhs_and_solution,
603 const bool transpose /*=false*/)
604{
605# ifdef DEAL_II_WITH_COMPLEX_VALUES
606 factorize(matrix);
607 solve(rhs_and_solution, transpose);
608
609# else
610
611 (void)matrix;
612 (void)rhs_and_solution;
613 (void)transpose;
614 Assert(false,
616 "This function can't be called if deal.II has been configured "
617 "with DEAL_II_WITH_COMPLEX_VALUES=FALSE."));
618# endif
619}
620
621
622
623template <class Matrix>
624void
625SparseDirectUMFPACK::solve(const Matrix &matrix,
626 BlockVector<double> &rhs_and_solution,
627 const bool transpose /*=false*/)
628{
629 factorize(matrix);
630 solve(rhs_and_solution, transpose);
631}
632
633
634
635template <class Matrix>
636void
637SparseDirectUMFPACK::solve(const Matrix &matrix,
638 BlockVector<std::complex<double>> &rhs_and_solution,
639 const bool transpose /*=false*/)
640{
641# ifdef DEAL_II_WITH_COMPLEX_VALUES
642 factorize(matrix);
643 solve(rhs_and_solution, transpose);
644
645# else
646
647 (void)matrix;
648 (void)rhs_and_solution;
649 (void)transpose;
650 Assert(false,
652 "This function can't be called if deal.II has been configured "
653 "with DEAL_II_WITH_COMPLEX_VALUES=FALSE."));
654# endif
655}
656
657
658#else
659
660
662 : n_rows(0)
663 , n_cols(0)
664 , symbolic_decomposition(nullptr)
665 , numeric_decomposition(nullptr)
666 , control(0)
667{}
668
669
670void
672{}
673
674
675template <class Matrix>
676void
677SparseDirectUMFPACK::factorize(const Matrix &)
678{
680 false,
682 "To call this function you need UMFPACK, but you configured deal.II "
683 "without passing the necessary switch to 'cmake'. Please consult the "
684 "installation instructions at https://dealii.org/current/readme.html"));
685}
686
687
688void
690{
692 false,
694 "To call this function you need UMFPACK, but you configured deal.II "
695 "without passing the necessary switch to 'cmake'. Please consult the "
696 "installation instructions at https://dealii.org/current/readme.html"));
697}
698
699
700
701void
702SparseDirectUMFPACK::solve(Vector<std::complex<double>> &, const bool) const
703{
705 false,
707 "To call this function you need UMFPACK, but you configured deal.II "
708 "without passing the necessary switch to 'cmake'. Please consult the "
709 "installation instructions at https://dealii.org/current/readme.html"));
710}
711
712
713
714void
716{
718 false,
720 "To call this function you need UMFPACK, but you configured deal.II "
721 "without passing the necessary switch to 'cmake'. Please consult the "
722 "installation instructions at https://dealii.org/current/readme.html"));
723}
724
725
726
727void
728SparseDirectUMFPACK::solve(BlockVector<std::complex<double>> &,
729 const bool) const
730{
732 false,
734 "To call this function you need UMFPACK, but you configured deal.II "
735 "without passing the necessary switch to 'cmake'. Please consult the "
736 "installation instructions at https://dealii.org/current/readme.html"));
737}
738
739
740
741template <class Matrix>
742void
743SparseDirectUMFPACK::solve(const Matrix &, Vector<double> &, const bool)
744{
746 false,
748 "To call this function you need UMFPACK, but you configured deal.II "
749 "without passing the necessary switch to 'cmake'. Please consult the "
750 "installation instructions at https://dealii.org/current/readme.html"));
751}
752
753
754
755template <class Matrix>
756void
757SparseDirectUMFPACK::solve(const Matrix &,
758 Vector<std::complex<double>> &,
759 const bool)
760{
762 false,
764 "To call this function you need UMFPACK, but you configured deal.II "
765 "without passing the necessary switch to 'cmake'. Please consult the "
766 "installation instructions at https://dealii.org/current/readme.html"));
767}
768
769
770
771template <class Matrix>
772void
773SparseDirectUMFPACK::solve(const Matrix &, BlockVector<double> &, const bool)
774{
776 false,
778 "To call this function you need UMFPACK, but you configured deal.II "
779 "without passing the necessary switch to 'cmake'. Please consult the "
780 "installation instructions at https://dealii.org/current/readme.html"));
781}
782
783
784
785template <class Matrix>
786void
787SparseDirectUMFPACK::solve(const Matrix &,
788 BlockVector<std::complex<double>> &,
789 const bool)
790{
792 false,
794 "To call this function you need UMFPACK, but you configured deal.II "
795 "without passing the necessary switch to 'cmake'. Please consult the "
796 "installation instructions at https://dealii.org/current/readme.html"));
797}
798
799#endif
800
801
802template <class Matrix>
803void
805{
806 this->factorize(M);
807}
808
809
810void
812{
813 dst = src;
814 this->solve(dst);
815}
816
817
818
819void
821 const BlockVector<double> &src) const
822{
823 dst = src;
824 this->solve(dst);
825}
826
827
828void
830 const Vector<double> &src) const
831{
832 dst = src;
833 this->solve(dst, /*transpose=*/true);
834}
835
836
837
838void
840 const BlockVector<double> &src) const
841{
842 dst = src;
843 this->solve(dst, /*transpose=*/true);
844}
845
848{
850 return n_rows;
851}
852
855{
857 return n_cols;
858}
859
860
861
862#ifdef DEAL_II_WITH_MUMPS
863
865 : initialize_called(false)
866{}
867
869{
870 id.job = -2;
871 dmumps_c(&id);
872
873 // Do some cleaning
874 if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
875 {
876 delete[] a;
877 delete[] irn;
878 delete[] jcn;
879 }
880}
881
882template <class Matrix>
883void
884SparseDirectMUMPS::initialize_matrix(const Matrix &matrix)
885{
886 Assert(matrix.n() == matrix.m(), ExcMessage("Matrix needs to be square."));
887
888
889 // Check we haven't been here before:
891
892 // Initialize MUMPS instance:
893 id.job = -1;
894 id.par = 1;
895 id.sym = 0;
896
897 // Use MPI_COMM_WORLD as communicator
898 id.comm_fortran = -987654;
899 dmumps_c(&id);
900
901 // Hand over matrix and right-hand side
902 if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
903 {
904 // Objects denoting a MUMPS data structure:
905 //
906 // Set number of unknowns
907 n = matrix.n();
908
909 // number of nonzero elements in matrix
910 nz = matrix.n_actually_nonzero_elements();
911
912 // representation of the matrix
913 a = new double[nz];
914
915 // matrix indices pointing to the row and column dimensions
916 // respectively of the matrix representation above (a): ie. a[k] is
917 // the matrix element (irn[k], jcn[k])
918 irn = new int[nz];
919 jcn = new int[nz];
920
921 size_type index = 0;
922
923 // loop over the elements of the matrix row by row, as suggested in
924 // the documentation of the sparse matrix iterator class
925 for (size_type row = 0; row < matrix.m(); ++row)
926 {
927 for (typename Matrix::const_iterator ptr = matrix.begin(row);
928 ptr != matrix.end(row);
929 ++ptr)
930 if (std::abs(ptr->value()) > 0.0)
931 {
932 a[index] = ptr->value();
933 irn[index] = row + 1;
934 jcn[index] = ptr->column() + 1;
935 ++index;
936 }
937 }
938
939 id.n = n;
940 id.nz = nz;
941 id.irn = irn;
942 id.jcn = jcn;
943 id.a = a;
944 }
945
946 // No outputs
947 id.icntl[0] = -1;
948 id.icntl[1] = -1;
949 id.icntl[2] = -1;
950 id.icntl[3] = 0;
951
952 // Exit by setting this flag:
953 initialize_called = true;
954}
955
956template <class Matrix>
957void
958SparseDirectMUMPS::initialize(const Matrix &matrix,
959 const Vector<double> &vector)
960{
961 // Hand over matrix and right-hand side
962 initialize_matrix(matrix);
963
964 copy_rhs_to_mumps(vector);
965}
966
967void
969{
970 Assert(n == new_rhs.size(),
971 ExcMessage("Matrix size and rhs length must be equal."));
972
973 if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
974 {
975 rhs.resize(n);
976 for (size_type i = 0; i < n; ++i)
977 rhs[i] = new_rhs(i);
978
979 id.rhs = &rhs[0];
980 }
981}
982
983void
985{
986 Assert(n == vector.size(),
987 ExcMessage("Matrix size and solution vector length must be equal."));
988 Assert(n == rhs.size(),
989 ExcMessage("Class not initialized with a rhs vector."));
990
991 // Copy solution into the given vector
992 if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
993 {
994 for (size_type i = 0; i < n; ++i)
995 vector(i) = rhs[i];
996
997 rhs.resize(0); // remove rhs again
998 }
999}
1000
1001template <class Matrix>
1002void
1003SparseDirectMUMPS::initialize(const Matrix &matrix)
1004{
1005 // Initialize MUMPS instance:
1006 initialize_matrix(matrix);
1007 // Start factorization
1008 id.job = 4;
1009 dmumps_c(&id);
1010}
1011
1012void
1014{
1015 // TODO: this could be implemented similar to SparseDirectUMFPACK where
1016 // the given vector will be used as the RHS. Sadly, there is no easy
1017 // way to do this without breaking the interface.
1018
1019 // Check that the solver has been initialized by the routine above:
1021
1022 // and that the matrix has at least one nonzero element:
1023 Assert(nz != 0, ExcNotInitialized());
1024
1025 // Start solver
1026 id.job = 6; // 6 = analysis, factorization, and solve
1027 dmumps_c(&id);
1028 copy_solution(vector);
1029}
1030
1031void
1033{
1034 // Check that the solver has been initialized by the routine above:
1036
1037 // and that the matrix has at least one nonzero element:
1038 Assert(nz != 0, ExcNotInitialized());
1039
1040 Assert(n == dst.size(), ExcMessage("Destination vector has the wrong size."));
1041 Assert(n == src.size(), ExcMessage("Source vector has the wrong size."));
1042
1043 // Hand over right-hand side
1044 copy_rhs_to_mumps(src);
1045
1046 // Start solver
1047 id.job = 3;
1048 dmumps_c(&id);
1049 copy_solution(dst);
1050}
1051
1052#endif // DEAL_II_WITH_MUMPS
1053
1054
1055// explicit instantiations for SparseMatrixUMFPACK
1056#define InstantiateUMFPACK(MatrixType) \
1057 template void SparseDirectUMFPACK::factorize(const MatrixType &); \
1058 template void SparseDirectUMFPACK::solve(const MatrixType &, \
1059 Vector<double> &, \
1060 const bool); \
1061 template void SparseDirectUMFPACK::solve(const MatrixType &, \
1062 Vector<std::complex<double>> &, \
1063 const bool); \
1064 template void SparseDirectUMFPACK::solve(const MatrixType &, \
1065 BlockVector<double> &, \
1066 const bool); \
1067 template void SparseDirectUMFPACK::solve( \
1068 const MatrixType &, BlockVector<std::complex<double>> &, const bool); \
1069 template void SparseDirectUMFPACK::initialize(const MatrixType &, \
1070 const AdditionalData)
1071
1072// Instantiate everything for real-valued matrices
1079
1080// Now also for complex-valued matrices
1081#ifdef DEAL_II_WITH_COMPLEX_VALUES
1082InstantiateUMFPACK(SparseMatrix<std::complex<double>>);
1083InstantiateUMFPACK(SparseMatrix<std::complex<float>>);
1086#endif
1087
1088// explicit instantiations for SparseDirectMUMPS
1089#ifdef DEAL_II_WITH_MUMPS
1090# define InstantiateMUMPS(MATRIX) \
1091 template void SparseDirectMUMPS::initialize(const MATRIX &, \
1092 const Vector<double> &); \
1093 template void SparseDirectMUMPS::initialize(const MATRIX &);
1094
1095InstantiateMUMPS(SparseMatrix<double>) InstantiateMUMPS(SparseMatrix<float>)
1096 // InstantiateMUMPS(SparseMatrixEZ<double>)
1097 // InstantiateMUMPS(SparseMatrixEZ<float>)
1098 InstantiateMUMPS(BlockSparseMatrix<double>)
1099 InstantiateMUMPS(BlockSparseMatrix<float>)
1100#endif
1101
virtual size_type size() const override
void initialize(const Matrix &matrix, const Vector< double > &rhs_vector)
void solve(Vector< double > &vector)
void initialize_matrix(const Matrix &matrix)
std::vector< double > rhs
types::global_dof_index n
types::global_dof_index nz
void copy_solution(Vector< double > &vector)
void copy_rhs_to_mumps(const Vector< double > &rhs)
void vmult(Vector< double > &dst, const Vector< double > &src)
types::global_dof_index size_type
std::vector< double > Az
~SparseDirectUMFPACK() override
void initialize(const SparsityPattern &sparsity_pattern)
void Tvmult(Vector< double > &dst, const Vector< double > &src) const
size_type m() const
void solve(Vector< double > &rhs_and_solution, const bool transpose=false) const
std::vector< double > Ax
void sort_arrays(const SparseMatrixEZ< number > &)
size_type n() const
void factorize(const Matrix &matrix)
std::vector< double > control
void vmult(Vector< double > &dst, const Vector< double > &src) const
std::vector< types::suitesparse_index > Ap
std::vector< types::suitesparse_index > Ai
pointer data()
virtual size_type size() const override
iterator begin()
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
static ::ExceptionBase & ExcUMFPACKError(std::string arg1, int arg2)
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcInitializeAlreadyCalled()
static ::ExceptionBase & ExcNotInitialized()
static ::ExceptionBase & ExcNotQuadratic()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ matrix
Contents is actually a matrix.
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
void apply_to_subranges(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, const Function &f, const unsigned int grainsize)
Definition parallel.h:540
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
#define InstantiateUMFPACK(MatrixType)