Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
scalapack.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2017 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
18
19#ifdef DEAL_II_WITH_SCALAPACK
20
22# include <deal.II/base/mpi.h>
23# include <deal.II/base/mpi.templates.h>
24
25# include <deal.II/lac/scalapack.templates.h>
26
27# ifdef DEAL_II_WITH_HDF5
28# include <hdf5.h>
29# endif
30
31# include <limits>
32# include <memory>
33
35
36# ifdef DEAL_II_WITH_HDF5
37
38template <typename number>
39inline hid_t
40hdf5_type_id(const number *)
41{
43 // don't know what to put here; it does not matter
44 return -1;
45}
46
47inline hid_t
48hdf5_type_id(const double *)
49{
50 return H5T_NATIVE_DOUBLE;
51}
52
53inline hid_t
54hdf5_type_id(const float *)
55{
56 return H5T_NATIVE_FLOAT;
57}
58
59inline hid_t
60hdf5_type_id(const int *)
61{
62 return H5T_NATIVE_INT;
63}
64
65inline hid_t
66hdf5_type_id(const unsigned int *)
67{
68 return H5T_NATIVE_UINT;
69}
70
71inline hid_t
72hdf5_type_id(const char *)
73{
74 return H5T_NATIVE_CHAR;
75}
76# endif // DEAL_II_WITH_HDF5
77
78
79
80template <typename NumberType>
82 const size_type n_rows_,
83 const size_type n_columns_,
84 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
85 const size_type row_block_size_,
86 const size_type column_block_size_,
87 const LAPACKSupport::Property property_)
88 : uplo('L')
89 , // for non-symmetric matrices this is not needed
90 first_process_row(0)
91 , first_process_column(0)
92 , submatrix_row(1)
93 , submatrix_column(1)
94{
95 reinit(n_rows_,
96 n_columns_,
97 process_grid,
98 row_block_size_,
99 column_block_size_,
100 property_);
101}
102
103
104
105template <typename NumberType>
107 const size_type size,
108 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
109 const size_type block_size,
110 const LAPACKSupport::Property property)
111 : ScaLAPACKMatrix<NumberType>(size,
112 size,
113 process_grid,
114 block_size,
115 block_size,
116 property)
117{}
118
119
120
121template <typename NumberType>
123 const std::string & filename,
124 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
125 const size_type row_block_size,
126 const size_type column_block_size)
127 : uplo('L')
128 , // for non-symmetric matrices this is not needed
129 first_process_row(0)
130 , first_process_column(0)
131 , submatrix_row(1)
132 , submatrix_column(1)
133{
134# ifndef DEAL_II_WITH_HDF5
135 (void)filename;
136 (void)process_grid;
137 (void)row_block_size;
138 (void)column_block_size;
139 Assert(
140 false,
142 "This function is only available when deal.II is configured with HDF5"));
143# else
144
145 const unsigned int this_mpi_process(
146 Utilities::MPI::this_mpi_process(process_grid->mpi_communicator));
147
148 // Before reading the content from disk the root process determines the
149 // dimensions of the matrix. Subsequently, memory is allocated by a call to
150 // reinit() and the matrix is loaded by a call to load().
151 if (this_mpi_process == 0)
152 {
153 herr_t status = 0;
154
155 // open file in read-only mode
156 hid_t file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
157 AssertThrow(file >= 0, ExcIO());
158
159 // get data set in file
160 hid_t dataset = H5Dopen2(file, "/matrix", H5P_DEFAULT);
161 AssertThrow(dataset >= 0, ExcIO());
162
163 // determine file space
164 hid_t filespace = H5Dget_space(dataset);
165
166 // get number of dimensions in data set
167 int rank = H5Sget_simple_extent_ndims(filespace);
168 AssertThrow(rank == 2, ExcIO());
169 hsize_t dims[2];
170 status = H5Sget_simple_extent_dims(filespace, dims, nullptr);
171 AssertThrow(status >= 0, ExcIO());
172
173 // due to ScaLAPACK's column-major memory layout the dimensions are
174 // swapped
175 n_rows = dims[1];
176 n_columns = dims[0];
177
178 // close/release resources
179 status = H5Sclose(filespace);
180 AssertThrow(status >= 0, ExcIO());
181 status = H5Dclose(dataset);
182 AssertThrow(status >= 0, ExcIO());
183 status = H5Fclose(file);
184 AssertThrow(status >= 0, ExcIO());
185 }
186 int ierr = MPI_Bcast(&n_rows,
187 1,
189 0 /*from root*/,
190 process_grid->mpi_communicator);
191 AssertThrowMPI(ierr);
192
193 ierr = MPI_Bcast(&n_columns,
194 1,
196 0 /*from root*/,
197 process_grid->mpi_communicator);
198 AssertThrowMPI(ierr);
199
200 // the property will be overwritten by the subsequent call to load()
202 n_columns,
203 process_grid,
207
208 load(filename.c_str());
209
210# endif // DEAL_II_WITH_HDF5
211}
212
213
214
215template <typename NumberType>
216void
218 const size_type n_rows_,
219 const size_type n_columns_,
220 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
221 const size_type row_block_size_,
222 const size_type column_block_size_,
223 const LAPACKSupport::Property property_)
224{
225 Assert(row_block_size_ > 0, ExcMessage("Row block size has to be positive."));
226 Assert(column_block_size_ > 0,
227 ExcMessage("Column block size has to be positive."));
228 Assert(
229 row_block_size_ <= n_rows_,
231 "Row block size can not be greater than the number of rows of the matrix"));
232 Assert(
233 column_block_size_ <= n_columns_,
235 "Column block size can not be greater than the number of columns of the matrix"));
236
238 property = property_;
239 grid = process_grid;
240 n_rows = n_rows_;
241 n_columns = n_columns_;
242 row_block_size = row_block_size_;
243 column_block_size = column_block_size_;
244
245 if (grid->mpi_process_is_active)
246 {
247 // Get local sizes:
248 n_local_rows = numroc_(&n_rows,
249 &row_block_size,
250 &(grid->this_process_row),
251 &first_process_row,
252 &(grid->n_process_rows));
253 n_local_columns = numroc_(&n_columns,
254 &column_block_size,
255 &(grid->this_process_column),
256 &first_process_column,
257 &(grid->n_process_columns));
258
259 // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different
260 // between processes
261 int lda = std::max(1, n_local_rows);
262
263 int info = 0;
264 descinit_(descriptor,
265 &n_rows,
266 &n_columns,
267 &row_block_size,
268 &column_block_size,
269 &first_process_row,
270 &first_process_column,
271 &(grid->blacs_context),
272 &lda,
273 &info);
274 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
275
276 this->TransposeTable<NumberType>::reinit(n_local_rows, n_local_columns);
277 }
278 else
279 {
280 // set process-local variables to something telling:
281 n_local_rows = -1;
282 n_local_columns = -1;
283 std::fill(std::begin(descriptor), std::end(descriptor), -1);
284 }
285}
286
287
288
289template <typename NumberType>
290void
292 const size_type size,
293 const std::shared_ptr<const Utilities::MPI::ProcessGrid> &process_grid,
294 const size_type block_size,
295 const LAPACKSupport::Property property)
296{
297 reinit(size, size, process_grid, block_size, block_size, property);
298}
299
300
301
302template <typename NumberType>
303void
305 const LAPACKSupport::Property property_)
306{
307 property = property_;
308}
309
310
311
312template <typename NumberType>
315{
316 return property;
317}
318
319
320
321template <typename NumberType>
324{
325 return state;
326}
327
328
329
330template <typename NumberType>
333{
334 // FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
335 // This routine computes the sum of two matrices B := a*A + b*B.
336 // Matrices can have different distribution,in particular matrix A can
337 // be owned by only one process, so we can set a=1 and b=0 to copy
338 // non-distributed matrix A into distributed matrix B.
339 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
340 Assert(n_columns == int(matrix.n()),
341 ExcDimensionMismatch(n_columns, matrix.n()));
342
343 if (grid->mpi_process_is_active)
344 {
345 for (int i = 0; i < n_local_rows; ++i)
346 {
347 const int glob_i = global_row(i);
348 for (int j = 0; j < n_local_columns; ++j)
349 {
350 const int glob_j = global_column(j);
351 local_el(i, j) = matrix(glob_i, glob_j);
352 }
353 }
354 }
355 state = LAPACKSupport::matrix;
356 return *this;
357}
358
359
360
361template <typename NumberType>
362void
364 const unsigned int rank)
365{
366 if (n_rows * n_columns == 0)
367 return;
368
369 const unsigned int this_mpi_process(
370 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
371
372# ifdef DEBUG
373 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
374 ExcMessage("All processes have to call routine with identical rank"));
375 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
376 ExcMessage("All processes have to call routine with identical rank"));
377# endif
378
379 // root process has to be active in the grid of A
380 if (this_mpi_process == rank)
381 {
382 Assert(grid->mpi_process_is_active, ExcInternalError());
383 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
384 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
385 }
386 // Create 1x1 grid for matrix B.
387 // The underlying grid for matrix B only contains the process #rank.
388 // This grid will be used to copy the serial matrix B to the distributed
389 // matrix using the ScaLAPACK routine pgemr2d.
390 MPI_Group group_A;
391 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
392 const int n = 1;
393 const std::vector<int> ranks(n, rank);
394 MPI_Group group_B;
395 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
396 MPI_Comm communicator_B;
397
399 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
400 group_B,
401 mpi_tag,
402 &communicator_B);
403 AssertThrowMPI(ierr);
404 int n_proc_rows_B = 1, n_proc_cols_B = 1;
405 int this_process_row_B = -1, this_process_column_B = -1;
406 int blacs_context_B = -1;
407 if (MPI_COMM_NULL != communicator_B)
408 {
409 // Initialize Cblas context from the provided communicator
410 blacs_context_B = Csys2blacs_handle(communicator_B);
411 const char *order = "Col";
412 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
413 Cblacs_gridinfo(blacs_context_B,
414 &n_proc_rows_B,
415 &n_proc_cols_B,
416 &this_process_row_B,
417 &this_process_column_B);
418 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
419 // the active process of grid B has to be process #rank of the
420 // communicator attached to A
421 Assert(this_mpi_process == rank, ExcInternalError());
422 }
423 const bool mpi_process_is_active_B =
424 (this_process_row_B >= 0 && this_process_column_B >= 0);
425
426 // create descriptor for matrix B
427 std::vector<int> descriptor_B(9, -1);
428 const int first_process_row_B = 0, first_process_col_B = 0;
429
430 if (mpi_process_is_active_B)
431 {
432 // Get local sizes:
433 int n_local_rows_B = numroc_(&n_rows,
434 &n_rows,
435 &this_process_row_B,
436 &first_process_row_B,
437 &n_proc_rows_B);
438 int n_local_cols_B = numroc_(&n_columns,
439 &n_columns,
440 &this_process_column_B,
441 &first_process_col_B,
442 &n_proc_cols_B);
443 Assert(n_local_rows_B == n_rows, ExcInternalError());
444 Assert(n_local_cols_B == n_columns, ExcInternalError());
445 (void)n_local_cols_B;
446
447 int lda = std::max(1, n_local_rows_B);
448 int info = 0;
449 descinit_(descriptor_B.data(),
450 &n_rows,
451 &n_columns,
452 &n_rows,
453 &n_columns,
454 &first_process_row_B,
455 &first_process_col_B,
456 &blacs_context_B,
457 &lda,
458 &info);
459 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
460 }
461 if (this->grid->mpi_process_is_active)
462 {
463 const int ii = 1;
464 NumberType *loc_vals_A =
465 this->values.size() > 0 ? this->values.data() : nullptr;
466 const NumberType *loc_vals_B =
467 mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
468
469 // pgemr2d has to be called only for processes active on grid attached to
470 // matrix A
471 pgemr2d(&n_rows,
472 &n_columns,
473 loc_vals_B,
474 &ii,
475 &ii,
476 descriptor_B.data(),
477 loc_vals_A,
478 &ii,
479 &ii,
480 this->descriptor,
481 &(this->grid->blacs_context));
482 }
483 if (mpi_process_is_active_B)
484 Cblacs_gridexit(blacs_context_B);
485
486 MPI_Group_free(&group_A);
487 MPI_Group_free(&group_B);
488 if (MPI_COMM_NULL != communicator_B)
489 Utilities::MPI::free_communicator(communicator_B);
490
491 state = LAPACKSupport::matrix;
492}
493
494
495
496template <typename NumberType>
497unsigned int
498ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
499{
500 Assert(n_local_rows >= 0 && loc_row < static_cast<unsigned int>(n_local_rows),
501 ExcIndexRange(loc_row, 0, n_local_rows));
502 const int i = loc_row + 1;
503 return indxl2g_(&i,
504 &row_block_size,
505 &(grid->this_process_row),
506 &first_process_row,
507 &(grid->n_process_rows)) -
508 1;
509}
510
511
512
513template <typename NumberType>
514unsigned int
515ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
516{
517 Assert(n_local_columns >= 0 &&
518 loc_column < static_cast<unsigned int>(n_local_columns),
519 ExcIndexRange(loc_column, 0, n_local_columns));
520 const int j = loc_column + 1;
521 return indxl2g_(&j,
522 &column_block_size,
523 &(grid->this_process_column),
524 &first_process_column,
525 &(grid->n_process_columns)) -
526 1;
527}
528
529
530
531template <typename NumberType>
532void
534 const unsigned int rank) const
535{
536 if (n_rows * n_columns == 0)
537 return;
538
539 const unsigned int this_mpi_process(
540 Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
541
542# ifdef DEBUG
543 Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
544 ExcMessage("All processes have to call routine with identical rank"));
545 Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
546 ExcMessage("All processes have to call routine with identical rank"));
547# endif
548
549 if (this_mpi_process == rank)
550 {
551 // the process which gets the serial copy has to be in the process grid
552 Assert(this->grid->is_process_active(), ExcInternalError());
553 Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
554 Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
555 }
556
557 // Create 1x1 grid for matrix B.
558 // The underlying grid for matrix B only contains the process #rank.
559 // This grid will be used to copy to the distributed matrix to the serial
560 // matrix B using the ScaLAPACK routine pgemr2d.
561 MPI_Group group_A;
562 MPI_Comm_group(this->grid->mpi_communicator, &group_A);
563 const int n = 1;
564 const std::vector<int> ranks(n, rank);
565 MPI_Group group_B;
566 MPI_Group_incl(group_A, n, ranks.data(), &group_B);
567 MPI_Comm communicator_B;
568
570 const int ierr = MPI_Comm_create_group(this->grid->mpi_communicator,
571 group_B,
572 mpi_tag,
573 &communicator_B);
574 AssertThrowMPI(ierr);
575 int n_proc_rows_B = 1, n_proc_cols_B = 1;
576 int this_process_row_B = -1, this_process_column_B = -1;
577 int blacs_context_B = -1;
578 if (MPI_COMM_NULL != communicator_B)
579 {
580 // Initialize Cblas context from the provided communicator
581 blacs_context_B = Csys2blacs_handle(communicator_B);
582 const char *order = "Col";
583 Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
584 Cblacs_gridinfo(blacs_context_B,
585 &n_proc_rows_B,
586 &n_proc_cols_B,
587 &this_process_row_B,
588 &this_process_column_B);
589 Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
590 // the active process of grid B has to be process #rank of the
591 // communicator attached to A
592 Assert(this_mpi_process == rank, ExcInternalError());
593 }
594 const bool mpi_process_is_active_B =
595 (this_process_row_B >= 0 && this_process_column_B >= 0);
596
597 // create descriptor for matrix B
598 std::vector<int> descriptor_B(9, -1);
599 const int first_process_row_B = 0, first_process_col_B = 0;
600
601 if (mpi_process_is_active_B)
602 {
603 // Get local sizes:
604 int n_local_rows_B = numroc_(&n_rows,
605 &n_rows,
606 &this_process_row_B,
607 &first_process_row_B,
608 &n_proc_rows_B);
609 int n_local_cols_B = numroc_(&n_columns,
610 &n_columns,
611 &this_process_column_B,
612 &first_process_col_B,
613 &n_proc_cols_B);
614 Assert(n_local_rows_B == n_rows, ExcInternalError());
615 Assert(n_local_cols_B == n_columns, ExcInternalError());
616 (void)n_local_cols_B;
617
618 int lda = std::max(1, n_local_rows_B);
619 int info = 0;
620 // fill descriptor for matrix B
621 descinit_(descriptor_B.data(),
622 &n_rows,
623 &n_columns,
624 &n_rows,
625 &n_columns,
626 &first_process_row_B,
627 &first_process_col_B,
628 &blacs_context_B,
629 &lda,
630 &info);
631 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
632 }
633 // pgemr2d has to be called only for processes active on grid attached to
634 // matrix A
635 if (this->grid->mpi_process_is_active)
636 {
637 const int ii = 1;
638 const NumberType *loc_vals_A =
639 this->values.size() > 0 ? this->values.data() : nullptr;
640 NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
641
642 pgemr2d(&n_rows,
643 &n_columns,
644 loc_vals_A,
645 &ii,
646 &ii,
647 this->descriptor,
648 loc_vals_B,
649 &ii,
650 &ii,
651 descriptor_B.data(),
652 &(this->grid->blacs_context));
653 }
654 if (mpi_process_is_active_B)
655 Cblacs_gridexit(blacs_context_B);
656
657 MPI_Group_free(&group_A);
658 MPI_Group_free(&group_B);
659 if (MPI_COMM_NULL != communicator_B)
660 Utilities::MPI::free_communicator(communicator_B);
661}
662
663
664
665template <typename NumberType>
666void
668{
669 // FIXME: use PDGEMR2d for copying?
670 // PDGEMR2d copies a submatrix of A on a submatrix of B.
671 // A and B can have different distributions
672 // see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
673 Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
674 Assert(n_columns == int(matrix.n()),
675 ExcDimensionMismatch(n_columns, matrix.n()));
676
677 matrix = 0.;
678 if (grid->mpi_process_is_active)
679 {
680 for (int i = 0; i < n_local_rows; ++i)
681 {
682 const int glob_i = global_row(i);
683 for (int j = 0; j < n_local_columns; ++j)
684 {
685 const int glob_j = global_column(j);
686 matrix(glob_i, glob_j) = local_el(i, j);
687 }
688 }
689 }
690 Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
691
692 // we could move the following lines under the main loop above,
693 // but they would be dependent on glob_i and glob_j, which
694 // won't make it much prettier
695 if (state == LAPACKSupport::cholesky)
696 {
697 if (property == LAPACKSupport::lower_triangular)
698 for (unsigned int i = 0; i < matrix.n(); ++i)
699 for (unsigned int j = i + 1; j < matrix.m(); ++j)
700 matrix(i, j) = 0.;
701 else if (property == LAPACKSupport::upper_triangular)
702 for (unsigned int i = 0; i < matrix.n(); ++i)
703 for (unsigned int j = 0; j < i; ++j)
704 matrix(i, j) = 0.;
705 }
706 else if (property == LAPACKSupport::symmetric &&
708 {
709 if (uplo == 'L')
710 for (unsigned int i = 0; i < matrix.n(); ++i)
711 for (unsigned int j = i + 1; j < matrix.m(); ++j)
712 matrix(i, j) = matrix(j, i);
713 else if (uplo == 'U')
714 for (unsigned int i = 0; i < matrix.n(); ++i)
715 for (unsigned int j = 0; j < i; ++j)
716 matrix(i, j) = matrix(j, i);
717 }
718}
719
720
721
722template <typename NumberType>
723void
726 const std::pair<unsigned int, unsigned int> &offset_A,
727 const std::pair<unsigned int, unsigned int> &offset_B,
728 const std::pair<unsigned int, unsigned int> &submatrix_size) const
729{
730 // submatrix is empty
731 if (submatrix_size.first == 0 || submatrix_size.second == 0)
732 return;
733
734 // range checking for matrix A
735 AssertIndexRange(offset_A.first, n_rows - submatrix_size.first + 1);
736 AssertIndexRange(offset_A.second, n_columns - submatrix_size.second + 1);
737
738 // range checking for matrix B
739 AssertIndexRange(offset_B.first, B.n_rows - submatrix_size.first + 1);
740 AssertIndexRange(offset_B.second, B.n_columns - submatrix_size.second + 1);
741
742 // Currently, copying of matrices will only be supported if A and B share the
743 // same MPI communicator
744 int ierr, comparison;
745 ierr = MPI_Comm_compare(grid->mpi_communicator,
746 B.grid->mpi_communicator,
747 &comparison);
748 AssertThrowMPI(ierr);
749 Assert(comparison == MPI_IDENT,
750 ExcMessage("Matrix A and B must have a common MPI Communicator"));
751
752 /*
753 * The routine pgemr2d requires a BLACS context resembling at least the union
754 * of process grids described by the BLACS contexts held by the ProcessGrids
755 * of matrix A and B. As A and B share the same MPI communicator, there is no
756 * need to create a union MPI communicator to initialize the BLACS context
757 */
758 int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
759 const char *order = "Col";
760 int union_n_process_rows =
761 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator);
762 int union_n_process_columns = 1;
763 Cblacs_gridinit(&union_blacs_context,
764 order,
765 union_n_process_rows,
766 union_n_process_columns);
767
768 int n_grid_rows_A, n_grid_columns_A, my_row_A, my_column_A;
769 Cblacs_gridinfo(this->grid->blacs_context,
770 &n_grid_rows_A,
771 &n_grid_columns_A,
772 &my_row_A,
773 &my_column_A);
774
775 // check whether process is in the BLACS context of matrix A
776 const bool in_context_A =
777 (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
778 (my_column_A >= 0 && my_column_A < n_grid_columns_A);
779
780 int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
781 Cblacs_gridinfo(B.grid->blacs_context,
782 &n_grid_rows_B,
783 &n_grid_columns_B,
784 &my_row_B,
785 &my_column_B);
786
787 // check whether process is in the BLACS context of matrix B
788 const bool in_context_B =
789 (my_row_B >= 0 && my_row_B < n_grid_rows_B) &&
790 (my_column_B >= 0 && my_column_B < n_grid_columns_B);
791
792 const int n_rows_submatrix = submatrix_size.first;
793 const int n_columns_submatrix = submatrix_size.second;
794
795 // due to Fortran indexing one has to be added
796 int ia = offset_A.first + 1, ja = offset_A.second + 1;
797 int ib = offset_B.first + 1, jb = offset_B.second + 1;
798
799 std::array<int, 9> desc_A, desc_B;
800
801 const NumberType *loc_vals_A = nullptr;
802 NumberType * loc_vals_B = nullptr;
803
804 // Note: the function pgemr2d has to be called for all processes in the union
805 // BLACS context If the calling process is not part of the BLACS context of A,
806 // desc_A[1] has to be -1 and all other parameters do not have to be set If
807 // the calling process is not part of the BLACS context of B, desc_B[1] has to
808 // be -1 and all other parameters do not have to be set
809 if (in_context_A)
810 {
811 if (this->values.size() != 0)
812 loc_vals_A = this->values.data();
813
814 for (unsigned int i = 0; i < desc_A.size(); ++i)
815 desc_A[i] = this->descriptor[i];
816 }
817 else
818 desc_A[1] = -1;
819
820 if (in_context_B)
821 {
822 if (B.values.size() != 0)
823 loc_vals_B = B.values.data();
824
825 for (unsigned int i = 0; i < desc_B.size(); ++i)
826 desc_B[i] = B.descriptor[i];
827 }
828 else
829 desc_B[1] = -1;
830
831 pgemr2d(&n_rows_submatrix,
832 &n_columns_submatrix,
833 loc_vals_A,
834 &ia,
835 &ja,
836 desc_A.data(),
837 loc_vals_B,
838 &ib,
839 &jb,
840 desc_B.data(),
841 &union_blacs_context);
842
844
845 // releasing the union BLACS context
846 Cblacs_gridexit(union_blacs_context);
847}
848
849
850
851template <typename NumberType>
852void
854{
855 Assert(n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
856 Assert(n_columns == dest.n_columns,
857 ExcDimensionMismatch(n_columns, dest.n_columns));
858
859 if (this->grid->mpi_process_is_active)
861 this->descriptor[0] == 1,
863 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
864 if (dest.grid->mpi_process_is_active)
866 dest.descriptor[0] == 1,
868 "Copying of ScaLAPACK matrices only implemented for dense matrices"));
869
870 /*
871 * just in case of different process grids or block-cyclic distributions
872 * inter-process communication is necessary
873 * if distributed matrices have the same process grid and block sizes, local
874 * copying is enough
875 */
876 if ((this->grid != dest.grid) || (row_block_size != dest.row_block_size) ||
877 (column_block_size != dest.column_block_size))
878 {
879 /*
880 * get the MPI communicator, which is the union of the source and
881 * destination MPI communicator
882 */
883 int ierr = 0;
884 MPI_Group group_source, group_dest, group_union;
885 ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
886 AssertThrowMPI(ierr);
887 ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
888 AssertThrowMPI(ierr);
889 ierr = MPI_Group_union(group_source, group_dest, &group_union);
890 AssertThrowMPI(ierr);
891 MPI_Comm mpi_communicator_union;
892
893 // to create a communicator representing the union of the source
894 // and destination MPI communicator we need a communicator that
895 // is guaranteed to contain all desired processes -- i.e.,
896 // MPI_COMM_WORLD. on the other hand, as documented in the MPI
897 // standard, MPI_Comm_create_group is not collective on all
898 // processes in the first argument, but instead is collective on
899 // only those processes listed in the group. in other words,
900 // there is really no harm in passing MPI_COMM_WORLD as the
901 // first argument, even if the program we are currently running
902 // and that is calling this function only works on a subset of
903 // processes. the same holds for the wrapper/fallback we are using here.
904
906 ierr = MPI_Comm_create_group(MPI_COMM_WORLD,
907 group_union,
908 mpi_tag,
909 &mpi_communicator_union);
910 AssertThrowMPI(ierr);
911
912 /*
913 * The routine pgemr2d requires a BLACS context resembling at least the
914 * union of process grids described by the BLACS contexts of matrix A and
915 * B
916 */
917 int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
918 const char *order = "Col";
919 int union_n_process_rows =
920 Utilities::MPI::n_mpi_processes(mpi_communicator_union);
921 int union_n_process_columns = 1;
922 Cblacs_gridinit(&union_blacs_context,
923 order,
924 union_n_process_rows,
925 union_n_process_columns);
926
927 const NumberType *loc_vals_source = nullptr;
928 NumberType * loc_vals_dest = nullptr;
929
930 if (this->grid->mpi_process_is_active && (this->values.size() > 0))
931 {
932 AssertThrow(this->values.size() > 0,
934 "source: process is active but local matrix empty"));
935 loc_vals_source = this->values.data();
936 }
937 if (dest.grid->mpi_process_is_active && (dest.values.size() > 0))
938 {
940 dest.values.size() > 0,
942 "destination: process is active but local matrix empty"));
943 loc_vals_dest = dest.values.data();
944 }
945 pgemr2d(&n_rows,
946 &n_columns,
947 loc_vals_source,
948 &submatrix_row,
949 &submatrix_column,
950 descriptor,
951 loc_vals_dest,
952 &dest.submatrix_row,
953 &dest.submatrix_column,
954 dest.descriptor,
955 &union_blacs_context);
956
957 Cblacs_gridexit(union_blacs_context);
958
959 if (mpi_communicator_union != MPI_COMM_NULL)
960 Utilities::MPI::free_communicator(mpi_communicator_union);
961 ierr = MPI_Group_free(&group_source);
962 AssertThrowMPI(ierr);
963 ierr = MPI_Group_free(&group_dest);
964 AssertThrowMPI(ierr);
965 ierr = MPI_Group_free(&group_union);
966 AssertThrowMPI(ierr);
967 }
968 else
969 // process is active in the process grid
970 if (this->grid->mpi_process_is_active)
971 dest.values = this->values;
972
973 dest.state = state;
974 dest.property = property;
975}
976
977
978
979template <typename NumberType>
980void
983{
984 add(B, 0, 1, true);
985}
986
987
988
989template <typename NumberType>
990void
992 const NumberType alpha,
993 const NumberType beta,
994 const bool transpose_B)
995{
996 if (transpose_B)
997 {
998 Assert(n_rows == B.n_columns, ExcDimensionMismatch(n_rows, B.n_columns));
999 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
1000 Assert(column_block_size == B.row_block_size,
1001 ExcDimensionMismatch(column_block_size, B.row_block_size));
1002 Assert(row_block_size == B.column_block_size,
1003 ExcDimensionMismatch(row_block_size, B.column_block_size));
1004 }
1005 else
1006 {
1007 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
1008 Assert(n_columns == B.n_columns,
1009 ExcDimensionMismatch(n_columns, B.n_columns));
1010 Assert(column_block_size == B.column_block_size,
1011 ExcDimensionMismatch(column_block_size, B.column_block_size));
1012 Assert(row_block_size == B.row_block_size,
1013 ExcDimensionMismatch(row_block_size, B.row_block_size));
1014 }
1015 Assert(this->grid == B.grid,
1016 ExcMessage("The matrices A and B need to have the same process grid"));
1017
1018 if (this->grid->mpi_process_is_active)
1019 {
1020 char trans_b = transpose_B ? 'T' : 'N';
1021 NumberType *A_loc =
1022 (this->values.size() > 0) ? this->values.data() : nullptr;
1023 const NumberType *B_loc =
1024 (B.values.size() > 0) ? B.values.data() : nullptr;
1025
1026 pgeadd(&trans_b,
1027 &n_rows,
1028 &n_columns,
1029 &beta,
1030 B_loc,
1031 &B.submatrix_row,
1033 B.descriptor,
1034 &alpha,
1035 A_loc,
1036 &submatrix_row,
1037 &submatrix_column,
1038 descriptor);
1039 }
1040 state = LAPACKSupport::matrix;
1041}
1042
1043
1044
1045template <typename NumberType>
1046void
1049{
1050 add(B, 1, a, false);
1051}
1052
1053
1054
1055template <typename NumberType>
1056void
1059{
1060 add(B, 1, a, true);
1061}
1062
1063
1064
1065template <typename NumberType>
1066void
1069 const NumberType c,
1071 const bool transpose_A,
1072 const bool transpose_B) const
1073{
1074 Assert(this->grid == B.grid,
1075 ExcMessage("The matrices A and B need to have the same process grid"));
1076 Assert(C.grid == B.grid,
1077 ExcMessage("The matrices B and C need to have the same process grid"));
1078
1079 // see for further info:
1080 // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgemm.htm
1081 if (!transpose_A && !transpose_B)
1082 {
1083 Assert(this->n_columns == B.n_rows,
1084 ExcDimensionMismatch(this->n_columns, B.n_rows));
1085 Assert(this->n_rows == C.n_rows,
1086 ExcDimensionMismatch(this->n_rows, C.n_rows));
1087 Assert(B.n_columns == C.n_columns,
1088 ExcDimensionMismatch(B.n_columns, C.n_columns));
1089 Assert(this->row_block_size == C.row_block_size,
1090 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1091 Assert(this->column_block_size == B.row_block_size,
1092 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1093 Assert(B.column_block_size == C.column_block_size,
1094 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1095 }
1096 else if (transpose_A && !transpose_B)
1097 {
1098 Assert(this->n_rows == B.n_rows,
1099 ExcDimensionMismatch(this->n_rows, B.n_rows));
1100 Assert(this->n_columns == C.n_rows,
1101 ExcDimensionMismatch(this->n_columns, C.n_rows));
1102 Assert(B.n_columns == C.n_columns,
1103 ExcDimensionMismatch(B.n_columns, C.n_columns));
1104 Assert(this->column_block_size == C.row_block_size,
1105 ExcDimensionMismatch(this->column_block_size, C.row_block_size));
1106 Assert(this->row_block_size == B.row_block_size,
1107 ExcDimensionMismatch(this->row_block_size, B.row_block_size));
1108 Assert(B.column_block_size == C.column_block_size,
1109 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1110 }
1111 else if (!transpose_A && transpose_B)
1112 {
1113 Assert(this->n_columns == B.n_columns,
1114 ExcDimensionMismatch(this->n_columns, B.n_columns));
1115 Assert(this->n_rows == C.n_rows,
1116 ExcDimensionMismatch(this->n_rows, C.n_rows));
1117 Assert(B.n_rows == C.n_columns,
1118 ExcDimensionMismatch(B.n_rows, C.n_columns));
1119 Assert(this->row_block_size == C.row_block_size,
1120 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1121 Assert(this->column_block_size == B.column_block_size,
1122 ExcDimensionMismatch(this->column_block_size,
1124 Assert(B.row_block_size == C.column_block_size,
1125 ExcDimensionMismatch(B.row_block_size, C.column_block_size));
1126 }
1127 else // if (transpose_A && transpose_B)
1128 {
1129 Assert(this->n_rows == B.n_columns,
1130 ExcDimensionMismatch(this->n_rows, B.n_columns));
1131 Assert(this->n_columns == C.n_rows,
1132 ExcDimensionMismatch(this->n_columns, C.n_rows));
1133 Assert(B.n_rows == C.n_columns,
1134 ExcDimensionMismatch(B.n_rows, C.n_columns));
1135 Assert(this->column_block_size == C.row_block_size,
1136 ExcDimensionMismatch(this->row_block_size, C.row_block_size));
1137 Assert(this->row_block_size == B.column_block_size,
1138 ExcDimensionMismatch(this->column_block_size, B.row_block_size));
1139 Assert(B.row_block_size == C.column_block_size,
1140 ExcDimensionMismatch(B.column_block_size, C.column_block_size));
1141 }
1142
1143 if (this->grid->mpi_process_is_active)
1144 {
1145 char trans_a = transpose_A ? 'T' : 'N';
1146 char trans_b = transpose_B ? 'T' : 'N';
1147
1148 const NumberType *A_loc =
1149 (this->values.size() > 0) ? this->values.data() : nullptr;
1150 const NumberType *B_loc =
1151 (B.values.size() > 0) ? B.values.data() : nullptr;
1152 NumberType *C_loc = (C.values.size() > 0) ? C.values.data() : nullptr;
1153 int m = C.n_rows;
1154 int n = C.n_columns;
1155 int k = transpose_A ? this->n_rows : this->n_columns;
1156
1157 pgemm(&trans_a,
1158 &trans_b,
1159 &m,
1160 &n,
1161 &k,
1162 &b,
1163 A_loc,
1164 &(this->submatrix_row),
1165 &(this->submatrix_column),
1166 this->descriptor,
1167 B_loc,
1168 &B.submatrix_row,
1170 B.descriptor,
1171 &c,
1172 C_loc,
1173 &C.submatrix_row,
1174 &C.submatrix_column,
1175 C.descriptor);
1176 }
1177 C.state = LAPACKSupport::matrix;
1178}
1179
1180
1181
1182template <typename NumberType>
1183void
1186 const bool adding) const
1187{
1188 if (adding)
1189 mult(1., B, 1., C, false, false);
1190 else
1191 mult(1., B, 0, C, false, false);
1192}
1193
1194
1195
1196template <typename NumberType>
1197void
1200 const bool adding) const
1201{
1202 if (adding)
1203 mult(1., B, 1., C, true, false);
1204 else
1205 mult(1., B, 0, C, true, false);
1206}
1207
1208
1209
1210template <typename NumberType>
1211void
1214 const bool adding) const
1215{
1216 if (adding)
1217 mult(1., B, 1., C, false, true);
1218 else
1219 mult(1., B, 0, C, false, true);
1220}
1221
1222
1223
1224template <typename NumberType>
1225void
1228 const bool adding) const
1229{
1230 if (adding)
1231 mult(1., B, 1., C, true, true);
1232 else
1233 mult(1., B, 0, C, true, true);
1234}
1235
1236
1237
1238template <typename NumberType>
1239void
1241{
1242 Assert(
1243 n_columns == n_rows && property == LAPACKSupport::Property::symmetric,
1244 ExcMessage(
1245 "Cholesky factorization can be applied to symmetric matrices only."));
1247 ExcMessage(
1248 "Matrix has to be in Matrix state before calling this function."));
1249
1250 if (grid->mpi_process_is_active)
1251 {
1252 int info = 0;
1253 NumberType *A_loc = this->values.data();
1254 // pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
1255 ppotrf(&uplo,
1256 &n_columns,
1257 A_loc,
1258 &submatrix_row,
1259 &submatrix_column,
1260 descriptor,
1261 &info);
1262 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ppotrf", info));
1263 }
1265 property = (uplo == 'L' ? LAPACKSupport::lower_triangular :
1267}
1268
1269
1270
1271template <typename NumberType>
1272void
1274{
1276 ExcMessage(
1277 "Matrix has to be in Matrix state before calling this function."));
1278
1279 if (grid->mpi_process_is_active)
1280 {
1281 int info = 0;
1282 NumberType *A_loc = this->values.data();
1283
1284 const int iarow = indxg2p_(&submatrix_row,
1285 &row_block_size,
1286 &(grid->this_process_row),
1287 &first_process_row,
1288 &(grid->n_process_rows));
1289 const int mp = numroc_(&n_rows,
1290 &row_block_size,
1291 &(grid->this_process_row),
1292 &iarow,
1293 &(grid->n_process_rows));
1294 ipiv.resize(mp + row_block_size);
1295
1296 pgetrf(&n_rows,
1297 &n_columns,
1298 A_loc,
1299 &submatrix_row,
1300 &submatrix_column,
1301 descriptor,
1302 ipiv.data(),
1303 &info);
1304 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgetrf", info));
1305 }
1308}
1309
1310
1311
1312template <typename NumberType>
1313void
1315{
1316 // Check whether matrix is symmetric and save flag.
1317 // If a Cholesky factorization has been applied previously,
1318 // the original matrix was symmetric.
1319 const bool is_symmetric = (property == LAPACKSupport::symmetric ||
1321
1322 // Check whether matrix is triangular and is in an unfactorized state.
1323 const bool is_triangular = (property == LAPACKSupport::upper_triangular ||
1324 property == LAPACKSupport::lower_triangular) &&
1325 (state == LAPACKSupport::State::matrix ||
1327
1328 if (is_triangular)
1329 {
1330 if (grid->mpi_process_is_active)
1331 {
1332 const char uploTriangular =
1333 property == LAPACKSupport::upper_triangular ? 'U' : 'L';
1334 const char diag = 'N';
1335 int info = 0;
1336 NumberType *A_loc = this->values.data();
1337 ptrtri(&uploTriangular,
1338 &diag,
1339 &n_columns,
1340 A_loc,
1341 &submatrix_row,
1342 &submatrix_column,
1343 descriptor,
1344 &info);
1345 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("ptrtri", info));
1346 // The inversion is stored in the same part as the triangular matrix,
1347 // so we don't need to re-set the property here.
1348 }
1349 }
1350 else
1351 {
1352 // Matrix is neither in Cholesky nor LU state.
1353 // Compute the required factorizations based on the property of the
1354 // matrix.
1355 if (!(state == LAPACKSupport::State::lu ||
1357 {
1358 if (is_symmetric)
1359 compute_cholesky_factorization();
1360 else
1361 compute_lu_factorization();
1362 }
1363 if (grid->mpi_process_is_active)
1364 {
1365 int info = 0;
1366 NumberType *A_loc = this->values.data();
1367
1368 if (is_symmetric)
1369 {
1370 ppotri(&uplo,
1371 &n_columns,
1372 A_loc,
1373 &submatrix_row,
1374 &submatrix_column,
1375 descriptor,
1376 &info);
1377 AssertThrow(info == 0,
1378 LAPACKSupport::ExcErrorCode("ppotri", info));
1380 }
1381 else
1382 {
1383 int lwork = -1, liwork = -1;
1384 work.resize(1);
1385 iwork.resize(1);
1386
1387 pgetri(&n_columns,
1388 A_loc,
1389 &submatrix_row,
1390 &submatrix_column,
1391 descriptor,
1392 ipiv.data(),
1393 work.data(),
1394 &lwork,
1395 iwork.data(),
1396 &liwork,
1397 &info);
1398
1399 AssertThrow(info == 0,
1400 LAPACKSupport::ExcErrorCode("pgetri", info));
1401 lwork = static_cast<int>(work[0]);
1402 liwork = iwork[0];
1403 work.resize(lwork);
1404 iwork.resize(liwork);
1405
1406 pgetri(&n_columns,
1407 A_loc,
1408 &submatrix_row,
1409 &submatrix_column,
1410 descriptor,
1411 ipiv.data(),
1412 work.data(),
1413 &lwork,
1414 iwork.data(),
1415 &liwork,
1416 &info);
1417
1418 AssertThrow(info == 0,
1419 LAPACKSupport::ExcErrorCode("pgetri", info));
1420 }
1421 }
1422 }
1424}
1425
1426
1427
1428template <typename NumberType>
1429std::vector<NumberType>
1431 const std::pair<unsigned int, unsigned int> &index_limits,
1432 const bool compute_eigenvectors)
1433{
1434 // check validity of index limits
1435 AssertIndexRange(index_limits.first, n_rows);
1436 AssertIndexRange(index_limits.second, n_rows);
1437
1438 std::pair<unsigned int, unsigned int> idx =
1439 std::make_pair(std::min(index_limits.first, index_limits.second),
1440 std::max(index_limits.first, index_limits.second));
1441
1442 // compute all eigenvalues/eigenvectors
1443 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1444 return eigenpairs_symmetric(compute_eigenvectors);
1445 else
1446 return eigenpairs_symmetric(compute_eigenvectors, idx);
1447}
1448
1449
1450
1451template <typename NumberType>
1452std::vector<NumberType>
1454 const std::pair<NumberType, NumberType> &value_limits,
1455 const bool compute_eigenvectors)
1456{
1457 Assert(!std::isnan(value_limits.first),
1458 ExcMessage("value_limits.first is NaN"));
1459 Assert(!std::isnan(value_limits.second),
1460 ExcMessage("value_limits.second is NaN"));
1461
1462 std::pair<unsigned int, unsigned int> indices =
1463 std::make_pair(numbers::invalid_unsigned_int,
1465
1466 return eigenpairs_symmetric(compute_eigenvectors, indices, value_limits);
1467}
1468
1469
1470
1471template <typename NumberType>
1472std::vector<NumberType>
1474 const bool compute_eigenvectors,
1475 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1476 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1477{
1479 ExcMessage(
1480 "Matrix has to be in Matrix state before calling this function."));
1481 Assert(property == LAPACKSupport::symmetric,
1482 ExcMessage("Matrix has to be symmetric for this operation."));
1483
1484 std::lock_guard<std::mutex> lock(mutex);
1485
1486 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1487 std::isnan(eigenvalue_limits.second)) ?
1488 false :
1489 true;
1490 const bool use_indices =
1491 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1492 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1493 false :
1494 true;
1495
1496 Assert(
1497 !(use_values && use_indices),
1498 ExcMessage(
1499 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1500
1501 // if computation of eigenvectors is not required use a sufficiently small
1502 // distributed matrix
1503 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1504 compute_eigenvectors ?
1505 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1506 grid,
1507 row_block_size) :
1508 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1509 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1510
1511 eigenvectors->property = property;
1512 // number of eigenvalues to be returned from psyevx; upon successful exit ev
1513 // contains the m seclected eigenvalues in ascending order set to all
1514 // eigenvaleus in case we will be using psyev.
1515 int m = n_rows;
1516 std::vector<NumberType> ev(n_rows);
1517
1518 if (grid->mpi_process_is_active)
1519 {
1520 int info = 0;
1521 /*
1522 * for jobz==N only eigenvalues are computed, for jobz='V' also the
1523 * eigenvectors of the matrix are computed
1524 */
1525 char jobz = compute_eigenvectors ? 'V' : 'N';
1526 char range = 'A';
1527 // default value is to compute all eigenvalues and optionally eigenvectors
1528 bool all_eigenpairs = true;
1529 NumberType vl = NumberType(), vu = NumberType();
1530 int il = 1, iu = 1;
1531 // number of eigenvectors to be returned;
1532 // upon successful exit the first m=nz columns contain the selected
1533 // eigenvectors (only if jobz=='V')
1534 int nz = 0;
1535 NumberType abstol = NumberType();
1536
1537 // orfac decides which eigenvectors should be reorthogonalized
1538 // see
1539 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1540 // for explanation to keeps simple no reorthogonalized will be done by
1541 // setting orfac to 0
1542 NumberType orfac = 0;
1543 // contains the indices of eigenvectors that failed to converge
1544 std::vector<int> ifail;
1545 // This array contains indices of eigenvectors corresponding to
1546 // a cluster of eigenvalues that could not be reorthogonalized
1547 // due to insufficient workspace
1548 // see
1549 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1550 // for explanation
1551 std::vector<int> iclustr;
1552 // This array contains the gap between eigenvalues whose
1553 // eigenvectors could not be reorthogonalized.
1554 // see
1555 // http://www.netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1556 // for explanation
1557 std::vector<NumberType> gap(n_local_rows * n_local_columns);
1558
1559 // index range for eigenvalues is not specified
1560 if (!use_indices)
1561 {
1562 // interval for eigenvalues is not specified and consequently all
1563 // eigenvalues/eigenpairs will be computed
1564 if (!use_values)
1565 {
1566 range = 'A';
1567 all_eigenpairs = true;
1568 }
1569 else
1570 {
1571 range = 'V';
1572 all_eigenpairs = false;
1573 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1574 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1575 }
1576 }
1577 else
1578 {
1579 range = 'I';
1580 all_eigenpairs = false;
1581 // as Fortran starts counting/indexing from 1 unlike C/C++, where it
1582 // starts from 0
1583 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1584 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1585 }
1586 NumberType *A_loc = this->values.data();
1587 /*
1588 * by setting lwork to -1 a workspace query for optimal length of work is
1589 * performed
1590 */
1591 int lwork = -1;
1592 int liwork = -1;
1593 NumberType *eigenvectors_loc =
1594 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1595 /*
1596 * According to the official "documentation" found on the internet
1597 * (aka source file ppsyevx.f [1]) the work array has to have a
1598 * minimal size of max(3, lwork). Because we query for optimal size
1599 * (lwork == -1) we have to guarantee at least three doubles. The
1600 * necessary size of iwork is not specified, so let's use three as
1601 * well.
1602 * [1]
1603 * https://netlib.org/scalapack/explore-html/df/d1a/pdsyevx_8f_source.html
1604 */
1605 work.resize(3);
1606 iwork.resize(3);
1607
1608 if (all_eigenpairs)
1609 {
1610 psyev(&jobz,
1611 &uplo,
1612 &n_rows,
1613 A_loc,
1614 &submatrix_row,
1615 &submatrix_column,
1616 descriptor,
1617 ev.data(),
1618 eigenvectors_loc,
1619 &eigenvectors->submatrix_row,
1620 &eigenvectors->submatrix_column,
1621 eigenvectors->descriptor,
1622 work.data(),
1623 &lwork,
1624 &info);
1625 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1626 }
1627 else
1628 {
1629 char cmach = compute_eigenvectors ? 'U' : 'S';
1630 plamch(&(this->grid->blacs_context), &cmach, abstol);
1631 abstol *= 2;
1632 ifail.resize(n_rows);
1633 iclustr.resize(2 * grid->n_process_rows * grid->n_process_columns);
1634 gap.resize(grid->n_process_rows * grid->n_process_columns);
1635
1636 psyevx(&jobz,
1637 &range,
1638 &uplo,
1639 &n_rows,
1640 A_loc,
1641 &submatrix_row,
1642 &submatrix_column,
1643 descriptor,
1644 &vl,
1645 &vu,
1646 &il,
1647 &iu,
1648 &abstol,
1649 &m,
1650 &nz,
1651 ev.data(),
1652 &orfac,
1653 eigenvectors_loc,
1654 &eigenvectors->submatrix_row,
1655 &eigenvectors->submatrix_column,
1656 eigenvectors->descriptor,
1657 work.data(),
1658 &lwork,
1659 iwork.data(),
1660 &liwork,
1661 ifail.data(),
1662 iclustr.data(),
1663 gap.data(),
1664 &info);
1665 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1666 }
1667 lwork = static_cast<int>(work[0]);
1668 work.resize(lwork);
1669
1670 if (all_eigenpairs)
1671 {
1672 psyev(&jobz,
1673 &uplo,
1674 &n_rows,
1675 A_loc,
1676 &submatrix_row,
1677 &submatrix_column,
1678 descriptor,
1679 ev.data(),
1680 eigenvectors_loc,
1681 &eigenvectors->submatrix_row,
1682 &eigenvectors->submatrix_column,
1683 eigenvectors->descriptor,
1684 work.data(),
1685 &lwork,
1686 &info);
1687
1688 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyev", info));
1689 }
1690 else
1691 {
1692 liwork = iwork[0];
1693 AssertThrow(liwork > 0, ExcInternalError());
1694 iwork.resize(liwork);
1695
1696 psyevx(&jobz,
1697 &range,
1698 &uplo,
1699 &n_rows,
1700 A_loc,
1701 &submatrix_row,
1702 &submatrix_column,
1703 descriptor,
1704 &vl,
1705 &vu,
1706 &il,
1707 &iu,
1708 &abstol,
1709 &m,
1710 &nz,
1711 ev.data(),
1712 &orfac,
1713 eigenvectors_loc,
1714 &eigenvectors->submatrix_row,
1715 &eigenvectors->submatrix_column,
1716 eigenvectors->descriptor,
1717 work.data(),
1718 &lwork,
1719 iwork.data(),
1720 &liwork,
1721 ifail.data(),
1722 iclustr.data(),
1723 gap.data(),
1724 &info);
1725
1726 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevx", info));
1727 }
1728 // if eigenvectors are queried copy eigenvectors to original matrix
1729 // as the temporary matrix eigenvectors has identical dimensions and
1730 // block-cyclic distribution we simply swap the local array
1731 if (compute_eigenvectors)
1732 this->values.swap(eigenvectors->values);
1733
1734 // adapt the size of ev to fit m upon return
1735 while (ev.size() > static_cast<size_type>(m))
1736 ev.pop_back();
1737 }
1738 /*
1739 * send number of computed eigenvalues to inactive processes
1740 */
1741 grid->send_to_inactive(&m, 1);
1742
1743 /*
1744 * inactive processes have to resize array of eigenvalues
1745 */
1746 if (!grid->mpi_process_is_active)
1747 ev.resize(m);
1748 /*
1749 * send the eigenvalues to processors not being part of the process grid
1750 */
1751 grid->send_to_inactive(ev.data(), ev.size());
1752
1753 /*
1754 * if only eigenvalues are queried the content of the matrix will be destroyed
1755 * if the eigenpairs are queried matrix A on exit stores the eigenvectors in
1756 * the columns
1757 */
1758 if (compute_eigenvectors)
1759 {
1762 }
1763 else
1765
1766 return ev;
1767}
1768
1769
1770
1771template <typename NumberType>
1772std::vector<NumberType>
1774 const std::pair<unsigned int, unsigned int> &index_limits,
1775 const bool compute_eigenvectors)
1776{
1777 // Check validity of index limits.
1778 AssertIndexRange(index_limits.first, static_cast<unsigned int>(n_rows));
1779 AssertIndexRange(index_limits.second, static_cast<unsigned int>(n_rows));
1780
1781 const std::pair<unsigned int, unsigned int> idx =
1782 std::make_pair(std::min(index_limits.first, index_limits.second),
1783 std::max(index_limits.first, index_limits.second));
1784
1785 // Compute all eigenvalues/eigenvectors.
1786 if (idx.first == 0 && idx.second == static_cast<unsigned int>(n_rows - 1))
1787 return eigenpairs_symmetric_MRRR(compute_eigenvectors);
1788 else
1789 return eigenpairs_symmetric_MRRR(compute_eigenvectors, idx);
1790}
1791
1792
1793
1794template <typename NumberType>
1795std::vector<NumberType>
1797 const std::pair<NumberType, NumberType> &value_limits,
1798 const bool compute_eigenvectors)
1799{
1800 AssertIsFinite(value_limits.first);
1801 AssertIsFinite(value_limits.second);
1802
1803 const std::pair<unsigned int, unsigned int> indices =
1804 std::make_pair(numbers::invalid_unsigned_int,
1806
1807 return eigenpairs_symmetric_MRRR(compute_eigenvectors, indices, value_limits);
1808}
1809
1810
1811
1812template <typename NumberType>
1813std::vector<NumberType>
1815 const bool compute_eigenvectors,
1816 const std::pair<unsigned int, unsigned int> &eigenvalue_idx,
1817 const std::pair<NumberType, NumberType> & eigenvalue_limits)
1818{
1820 ExcMessage(
1821 "Matrix has to be in Matrix state before calling this function."));
1822 Assert(property == LAPACKSupport::symmetric,
1823 ExcMessage("Matrix has to be symmetric for this operation."));
1824
1825 std::lock_guard<std::mutex> lock(mutex);
1826
1827 const bool use_values = (std::isnan(eigenvalue_limits.first) ||
1828 std::isnan(eigenvalue_limits.second)) ?
1829 false :
1830 true;
1831 const bool use_indices =
1832 ((eigenvalue_idx.first == numbers::invalid_unsigned_int) ||
1833 (eigenvalue_idx.second == numbers::invalid_unsigned_int)) ?
1834 false :
1835 true;
1836
1837 Assert(
1838 !(use_values && use_indices),
1839 ExcMessage(
1840 "Prescribing both the index and value range for the eigenvalues is ambiguous"));
1841
1842 // If computation of eigenvectors is not required, use a sufficiently small
1843 // distributed matrix.
1844 std::unique_ptr<ScaLAPACKMatrix<NumberType>> eigenvectors =
1845 compute_eigenvectors ?
1846 std::make_unique<ScaLAPACKMatrix<NumberType>>(n_rows,
1847 grid,
1848 row_block_size) :
1849 std::make_unique<ScaLAPACKMatrix<NumberType>>(
1850 grid->n_process_rows, grid->n_process_columns, grid, 1, 1);
1851
1852 eigenvectors->property = property;
1853 // Number of eigenvalues to be returned from psyevr; upon successful exit ev
1854 // contains the m seclected eigenvalues in ascending order.
1855 int m = n_rows;
1856 std::vector<NumberType> ev(n_rows);
1857
1858 // Number of eigenvectors to be returned;
1859 // Upon successful exit the first m=nz columns contain the selected
1860 // eigenvectors (only if jobz=='V').
1861 int nz = 0;
1862
1863 if (grid->mpi_process_is_active)
1864 {
1865 int info = 0;
1866 /*
1867 * For jobz==N only eigenvalues are computed, for jobz='V' also the
1868 * eigenvectors of the matrix are computed.
1869 */
1870 char jobz = compute_eigenvectors ? 'V' : 'N';
1871 // Default value is to compute all eigenvalues and optionally
1872 // eigenvectors.
1873 char range = 'A';
1874 NumberType vl = NumberType(), vu = NumberType();
1875 int il = 1, iu = 1;
1876
1877 // Index range for eigenvalues is not specified.
1878 if (!use_indices)
1879 {
1880 // Interval for eigenvalues is not specified and consequently all
1881 // eigenvalues/eigenpairs will be computed.
1882 if (!use_values)
1883 {
1884 range = 'A';
1885 }
1886 else
1887 {
1888 range = 'V';
1889 vl = std::min(eigenvalue_limits.first, eigenvalue_limits.second);
1890 vu = std::max(eigenvalue_limits.first, eigenvalue_limits.second);
1891 }
1892 }
1893 else
1894 {
1895 range = 'I';
1896 // As Fortran starts counting/indexing from 1 unlike C/C++, where it
1897 // starts from 0.
1898 il = std::min(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1899 iu = std::max(eigenvalue_idx.first, eigenvalue_idx.second) + 1;
1900 }
1901 NumberType *A_loc = this->values.data();
1902
1903 /*
1904 * By setting lwork to -1 a workspace query for optimal length of work is
1905 * performed.
1906 */
1907 int lwork = -1;
1908 int liwork = -1;
1909 NumberType *eigenvectors_loc =
1910 (compute_eigenvectors ? eigenvectors->values.data() : nullptr);
1911 work.resize(1);
1912 iwork.resize(1);
1913
1914 psyevr(&jobz,
1915 &range,
1916 &uplo,
1917 &n_rows,
1918 A_loc,
1919 &submatrix_row,
1920 &submatrix_column,
1921 descriptor,
1922 &vl,
1923 &vu,
1924 &il,
1925 &iu,
1926 &m,
1927 &nz,
1928 ev.data(),
1929 eigenvectors_loc,
1930 &eigenvectors->submatrix_row,
1931 &eigenvectors->submatrix_column,
1932 eigenvectors->descriptor,
1933 work.data(),
1934 &lwork,
1935 iwork.data(),
1936 &liwork,
1937 &info);
1938
1939 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1940
1941 lwork = static_cast<int>(work[0]);
1942 work.resize(lwork);
1943 liwork = iwork[0];
1944 iwork.resize(liwork);
1945
1946 psyevr(&jobz,
1947 &range,
1948 &uplo,
1949 &n_rows,
1950 A_loc,
1951 &submatrix_row,
1952 &submatrix_column,
1953 descriptor,
1954 &vl,
1955 &vu,
1956 &il,
1957 &iu,
1958 &m,
1959 &nz,
1960 ev.data(),
1961 eigenvectors_loc,
1962 &eigenvectors->submatrix_row,
1963 &eigenvectors->submatrix_column,
1964 eigenvectors->descriptor,
1965 work.data(),
1966 &lwork,
1967 iwork.data(),
1968 &liwork,
1969 &info);
1970
1971 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("psyevr", info));
1972
1973 if (compute_eigenvectors)
1975 m == nz,
1976 ExcMessage(
1977 "psyevr failed to compute all eigenvectors for the selected eigenvalues"));
1978
1979 // If eigenvectors are queried, copy eigenvectors to original matrix.
1980 // As the temporary matrix eigenvectors has identical dimensions and
1981 // block-cyclic distribution we simply swap the local array.
1982 if (compute_eigenvectors)
1983 this->values.swap(eigenvectors->values);
1984
1985 // Adapt the size of ev to fit m upon return.
1986 while (ev.size() > static_cast<size_type>(m))
1987 ev.pop_back();
1988 }
1989 /*
1990 * Send number of computed eigenvalues to inactive processes.
1991 */
1992 grid->send_to_inactive(&m, 1);
1993
1994 /*
1995 * Inactive processes have to resize array of eigenvalues.
1996 */
1997 if (!grid->mpi_process_is_active)
1998 ev.resize(m);
1999 /*
2000 * Send the eigenvalues to processors not being part of the process grid.
2001 */
2002 grid->send_to_inactive(ev.data(), ev.size());
2003
2004 /*
2005 * If only eigenvalues are queried, the content of the matrix will be
2006 * destroyed. If the eigenpairs are queried, matrix A on exit stores the
2007 * eigenvectors in the columns.
2008 */
2009 if (compute_eigenvectors)
2010 {
2013 }
2014 else
2016
2017 return ev;
2018}
2019
2020
2021
2022template <typename NumberType>
2023std::vector<NumberType>
2026{
2028 ExcMessage(
2029 "Matrix has to be in Matrix state before calling this function."));
2030 Assert(row_block_size == column_block_size,
2031 ExcDimensionMismatch(row_block_size, column_block_size));
2032
2033 const bool left_singular_vectors = (U != nullptr) ? true : false;
2034 const bool right_singular_vectors = (VT != nullptr) ? true : false;
2035
2036 if (left_singular_vectors)
2037 {
2038 Assert(n_rows == U->n_rows, ExcDimensionMismatch(n_rows, U->n_rows));
2039 Assert(U->n_rows == U->n_columns,
2041 Assert(row_block_size == U->row_block_size,
2042 ExcDimensionMismatch(row_block_size, U->row_block_size));
2043 Assert(column_block_size == U->column_block_size,
2044 ExcDimensionMismatch(column_block_size, U->column_block_size));
2045 Assert(grid->blacs_context == U->grid->blacs_context,
2046 ExcDimensionMismatch(grid->blacs_context, U->grid->blacs_context));
2047 }
2048 if (right_singular_vectors)
2049 {
2050 Assert(n_columns == VT->n_rows,
2051 ExcDimensionMismatch(n_columns, VT->n_rows));
2052 Assert(VT->n_rows == VT->n_columns,
2054 Assert(row_block_size == VT->row_block_size,
2055 ExcDimensionMismatch(row_block_size, VT->row_block_size));
2056 Assert(column_block_size == VT->column_block_size,
2057 ExcDimensionMismatch(column_block_size, VT->column_block_size));
2058 Assert(grid->blacs_context == VT->grid->blacs_context,
2059 ExcDimensionMismatch(grid->blacs_context,
2060 VT->grid->blacs_context));
2061 }
2062 std::lock_guard<std::mutex> lock(mutex);
2063
2064 std::vector<NumberType> sv(std::min(n_rows, n_columns));
2065
2066 if (grid->mpi_process_is_active)
2067 {
2068 char jobu = left_singular_vectors ? 'V' : 'N';
2069 char jobvt = right_singular_vectors ? 'V' : 'N';
2070 NumberType *A_loc = this->values.data();
2071 NumberType *U_loc = left_singular_vectors ? U->values.data() : nullptr;
2072 NumberType *VT_loc = right_singular_vectors ? VT->values.data() : nullptr;
2073 int info = 0;
2074 /*
2075 * by setting lwork to -1 a workspace query for optimal length of work is
2076 * performed
2077 */
2078 int lwork = -1;
2079 work.resize(1);
2080
2081 pgesvd(&jobu,
2082 &jobvt,
2083 &n_rows,
2084 &n_columns,
2085 A_loc,
2086 &submatrix_row,
2087 &submatrix_column,
2088 descriptor,
2089 &*sv.begin(),
2090 U_loc,
2091 &U->submatrix_row,
2092 &U->submatrix_column,
2093 U->descriptor,
2094 VT_loc,
2095 &VT->submatrix_row,
2096 &VT->submatrix_column,
2097 VT->descriptor,
2098 work.data(),
2099 &lwork,
2100 &info);
2101 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2102
2103 lwork = static_cast<int>(work[0]);
2104 work.resize(lwork);
2105
2106 pgesvd(&jobu,
2107 &jobvt,
2108 &n_rows,
2109 &n_columns,
2110 A_loc,
2111 &submatrix_row,
2112 &submatrix_column,
2113 descriptor,
2114 &*sv.begin(),
2115 U_loc,
2116 &U->submatrix_row,
2117 &U->submatrix_column,
2118 U->descriptor,
2119 VT_loc,
2120 &VT->submatrix_row,
2121 &VT->submatrix_column,
2122 VT->descriptor,
2123 work.data(),
2124 &lwork,
2125 &info);
2126 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgesvd", info));
2127 }
2128
2129 /*
2130 * send the singular values to processors not being part of the process grid
2131 */
2132 grid->send_to_inactive(sv.data(), sv.size());
2133
2136
2137 return sv;
2138}
2139
2140
2141
2142template <typename NumberType>
2143void
2145 const bool transpose)
2146{
2147 Assert(grid == B.grid,
2148 ExcMessage("The matrices A and B need to have the same process grid"));
2150 ExcMessage(
2151 "Matrix has to be in Matrix state before calling this function."));
2153 ExcMessage(
2154 "Matrix B has to be in Matrix state before calling this function."));
2155
2156 if (transpose)
2157 {
2158 Assert(n_columns == B.n_rows, ExcDimensionMismatch(n_columns, B.n_rows));
2159 }
2160 else
2161 {
2162 Assert(n_rows == B.n_rows, ExcDimensionMismatch(n_rows, B.n_rows));
2163 }
2164
2165 // see
2166 // https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lgels.htm
2167 Assert(row_block_size == column_block_size,
2168 ExcMessage(
2169 "Use identical block sizes for rows and columns of matrix A"));
2171 ExcMessage(
2172 "Use identical block sizes for rows and columns of matrix B"));
2173 Assert(row_block_size == B.row_block_size,
2174 ExcMessage(
2175 "Use identical block-cyclic distribution for matrices A and B"));
2176
2177 std::lock_guard<std::mutex> lock(mutex);
2178
2179 if (grid->mpi_process_is_active)
2180 {
2181 char trans = transpose ? 'T' : 'N';
2182 NumberType *A_loc = this->values.data();
2183 NumberType *B_loc = B.values.data();
2184 int info = 0;
2185 /*
2186 * by setting lwork to -1 a workspace query for optimal length of work is
2187 * performed
2188 */
2189 int lwork = -1;
2190 work.resize(1);
2191
2192 pgels(&trans,
2193 &n_rows,
2194 &n_columns,
2195 &B.n_columns,
2196 A_loc,
2197 &submatrix_row,
2198 &submatrix_column,
2199 descriptor,
2200 B_loc,
2201 &B.submatrix_row,
2203 B.descriptor,
2204 work.data(),
2205 &lwork,
2206 &info);
2207 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2208
2209 lwork = static_cast<int>(work[0]);
2210 work.resize(lwork);
2211
2212 pgels(&trans,
2213 &n_rows,
2214 &n_columns,
2215 &B.n_columns,
2216 A_loc,
2217 &submatrix_row,
2218 &submatrix_column,
2219 descriptor,
2220 B_loc,
2221 &B.submatrix_row,
2223 B.descriptor,
2224 work.data(),
2225 &lwork,
2226 &info);
2227 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pgels", info));
2228 }
2230}
2231
2232
2233
2234template <typename NumberType>
2235unsigned int
2237{
2239 ExcMessage(
2240 "Matrix has to be in Matrix state before calling this function."));
2241 Assert(row_block_size == column_block_size,
2242 ExcMessage(
2243 "Use identical block sizes for rows and columns of matrix A"));
2244 Assert(
2245 ratio > 0. && ratio < 1.,
2246 ExcMessage(
2247 "input parameter ratio has to be larger than zero and smaller than 1"));
2248
2250 n_rows,
2251 grid,
2252 row_block_size,
2253 row_block_size,
2255 ScaLAPACKMatrix<NumberType> VT(n_columns,
2256 n_columns,
2257 grid,
2258 row_block_size,
2259 row_block_size,
2261 std::vector<NumberType> sv = this->compute_SVD(&U, &VT);
2262 AssertThrow(sv[0] > std::numeric_limits<NumberType>::min(),
2263 ExcMessage("Matrix has rank 0"));
2264
2265 // Get number of singular values fulfilling the following: sv[i] > sv[0] *
2266 // ratio Obviously, 0-th element already satisfies sv[0] > sv[0] * ratio The
2267 // singular values in sv are ordered by descending value so we break out of
2268 // the loop if a singular value is smaller than sv[0] * ratio.
2269 unsigned int n_sv = 1;
2270 std::vector<NumberType> inv_sigma;
2271 inv_sigma.push_back(1 / sv[0]);
2272
2273 for (unsigned int i = 1; i < sv.size(); ++i)
2274 if (sv[i] > sv[0] * ratio)
2275 {
2276 ++n_sv;
2277 inv_sigma.push_back(1 / sv[i]);
2278 }
2279 else
2280 break;
2281
2282 // For the matrix multiplication we use only the columns of U and rows of VT
2283 // which are associated with singular values larger than the limit. That saves
2284 // computational time for matrices with rank significantly smaller than
2285 // min(n_rows,n_columns)
2286 ScaLAPACKMatrix<NumberType> U_R(n_rows,
2287 n_sv,
2288 grid,
2289 row_block_size,
2290 row_block_size,
2293 n_columns,
2294 grid,
2295 row_block_size,
2296 row_block_size,
2298 U.copy_to(U_R,
2299 std::make_pair(0, 0),
2300 std::make_pair(0, 0),
2301 std::make_pair(n_rows, n_sv));
2302 VT.copy_to(VT_R,
2303 std::make_pair(0, 0),
2304 std::make_pair(0, 0),
2305 std::make_pair(n_sv, n_columns));
2306
2307 VT_R.scale_rows(inv_sigma);
2308 this->reinit(n_columns,
2309 n_rows,
2310 this->grid,
2311 row_block_size,
2312 column_block_size,
2314 VT_R.mult(1, U_R, 0, *this, true, true);
2316 return n_sv;
2317}
2318
2319
2320
2321template <typename NumberType>
2322NumberType
2324 const NumberType a_norm) const
2325{
2327 ExcMessage(
2328 "Matrix has to be in Cholesky state before calling this function."));
2329 std::lock_guard<std::mutex> lock(mutex);
2330 NumberType rcond = 0.;
2331
2332 if (grid->mpi_process_is_active)
2333 {
2334 int liwork = n_local_rows;
2335 iwork.resize(liwork);
2336
2337 int info = 0;
2338 const NumberType *A_loc = this->values.data();
2339
2340 // by setting lwork to -1 a workspace query for optimal length of work is
2341 // performed
2342 int lwork = -1;
2343 work.resize(1);
2344 ppocon(&uplo,
2345 &n_columns,
2346 A_loc,
2347 &submatrix_row,
2348 &submatrix_column,
2349 descriptor,
2350 &a_norm,
2351 &rcond,
2352 work.data(),
2353 &lwork,
2354 iwork.data(),
2355 &liwork,
2356 &info);
2357 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2358 lwork = static_cast<int>(std::ceil(work[0]));
2359 work.resize(lwork);
2360
2361 // now the actual run:
2362 ppocon(&uplo,
2363 &n_columns,
2364 A_loc,
2365 &submatrix_row,
2366 &submatrix_column,
2367 descriptor,
2368 &a_norm,
2369 &rcond,
2370 work.data(),
2371 &lwork,
2372 iwork.data(),
2373 &liwork,
2374 &info);
2375 AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("pdpocon", info));
2376 }
2377 grid->send_to_inactive(&rcond);
2378 return rcond;
2379}
2380
2381
2382
2383template <typename NumberType>
2384NumberType
2386{
2387 const char type('O');
2388
2389 if (property == LAPACKSupport::symmetric)
2390 return norm_symmetric(type);
2391 else
2392 return norm_general(type);
2393}
2394
2395
2396
2397template <typename NumberType>
2398NumberType
2400{
2401 const char type('I');
2402
2403 if (property == LAPACKSupport::symmetric)
2404 return norm_symmetric(type);
2405 else
2406 return norm_general(type);
2407}
2408
2409
2410
2411template <typename NumberType>
2412NumberType
2414{
2415 const char type('F');
2416
2417 if (property == LAPACKSupport::symmetric)
2418 return norm_symmetric(type);
2419 else
2420 return norm_general(type);
2421}
2422
2423
2424
2425template <typename NumberType>
2426NumberType
2428{
2429 Assert(state == LAPACKSupport::matrix ||
2431 ExcMessage("norms can be called in matrix state only."));
2432 std::lock_guard<std::mutex> lock(mutex);
2433 NumberType res = 0.;
2434
2435 if (grid->mpi_process_is_active)
2436 {
2437 const int iarow = indxg2p_(&submatrix_row,
2438 &row_block_size,
2439 &(grid->this_process_row),
2440 &first_process_row,
2441 &(grid->n_process_rows));
2442 const int iacol = indxg2p_(&submatrix_column,
2443 &column_block_size,
2444 &(grid->this_process_column),
2445 &first_process_column,
2446 &(grid->n_process_columns));
2447 const int mp0 = numroc_(&n_rows,
2448 &row_block_size,
2449 &(grid->this_process_row),
2450 &iarow,
2451 &(grid->n_process_rows));
2452 const int nq0 = numroc_(&n_columns,
2453 &column_block_size,
2454 &(grid->this_process_column),
2455 &iacol,
2456 &(grid->n_process_columns));
2457
2458 // type='M': compute largest absolute value
2459 // type='F' || type='E': compute Frobenius norm
2460 // type='0' || type='1': compute infinity norm
2461 int lwork = 0; // for type == 'M' || type == 'F' || type == 'E'
2462 if (type == 'O' || type == '1')
2463 lwork = nq0;
2464 else if (type == 'I')
2465 lwork = mp0;
2466
2467 work.resize(lwork);
2468 const NumberType *A_loc = this->values.begin();
2469 res = plange(&type,
2470 &n_rows,
2471 &n_columns,
2472 A_loc,
2473 &submatrix_row,
2474 &submatrix_column,
2475 descriptor,
2476 work.data());
2477 }
2478 grid->send_to_inactive(&res);
2479 return res;
2480}
2481
2482
2483
2484template <typename NumberType>
2485NumberType
2487{
2488 Assert(state == LAPACKSupport::matrix ||
2490 ExcMessage("norms can be called in matrix state only."));
2491 Assert(property == LAPACKSupport::symmetric,
2492 ExcMessage("Matrix has to be symmetric for this operation."));
2493 std::lock_guard<std::mutex> lock(mutex);
2494 NumberType res = 0.;
2495
2496 if (grid->mpi_process_is_active)
2497 {
2498 // int IROFFA = MOD( IA-1, MB_A )
2499 // int ICOFFA = MOD( JA-1, NB_A )
2500 const int lcm =
2501 ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
2502 const int v2 = lcm / (grid->n_process_rows);
2503
2504 const int IAROW = indxg2p_(&submatrix_row,
2505 &row_block_size,
2506 &(grid->this_process_row),
2507 &first_process_row,
2508 &(grid->n_process_rows));
2509 const int IACOL = indxg2p_(&submatrix_column,
2510 &column_block_size,
2511 &(grid->this_process_column),
2512 &first_process_column,
2513 &(grid->n_process_columns));
2514 const int Np0 = numroc_(&n_columns /*+IROFFA*/,
2515 &row_block_size,
2516 &(grid->this_process_row),
2517 &IAROW,
2518 &(grid->n_process_rows));
2519 const int Nq0 = numroc_(&n_columns /*+ICOFFA*/,
2520 &column_block_size,
2521 &(grid->this_process_column),
2522 &IACOL,
2523 &(grid->n_process_columns));
2524
2525 const int v1 = iceil_(&Np0, &row_block_size);
2526 const int ldw = (n_local_rows == n_local_columns) ?
2527 0 :
2528 row_block_size * iceil_(&v1, &v2);
2529
2530 const int lwork =
2531 (type == 'M' || type == 'F' || type == 'E') ? 0 : 2 * Nq0 + Np0 + ldw;
2532 work.resize(lwork);
2533 const NumberType *A_loc = this->values.begin();
2534 res = plansy(&type,
2535 &uplo,
2536 &n_columns,
2537 A_loc,
2538 &submatrix_row,
2539 &submatrix_column,
2540 descriptor,
2541 work.data());
2542 }
2543 grid->send_to_inactive(&res);
2544 return res;
2545}
2546
2547
2548
2549# ifdef DEAL_II_WITH_HDF5
2550namespace internal
2551{
2552 namespace
2553 {
2554 void
2555 create_HDF5_state_enum_id(hid_t &state_enum_id)
2556 {
2557 // create HDF5 enum type for LAPACKSupport::State
2559 state_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::State));
2561 herr_t status = H5Tenum_insert(state_enum_id, "cholesky", &val);
2562 AssertThrow(status >= 0, ExcInternalError());
2564 status = H5Tenum_insert(state_enum_id, "eigenvalues", &val);
2565 AssertThrow(status >= 0, ExcInternalError());
2567 status = H5Tenum_insert(state_enum_id, "inverse_matrix", &val);
2568 AssertThrow(status >= 0, ExcInternalError());
2570 status = H5Tenum_insert(state_enum_id, "inverse_svd", &val);
2571 AssertThrow(status >= 0, ExcInternalError());
2573 status = H5Tenum_insert(state_enum_id, "lu", &val);
2574 AssertThrow(status >= 0, ExcInternalError());
2576 status = H5Tenum_insert(state_enum_id, "matrix", &val);
2577 AssertThrow(status >= 0, ExcInternalError());
2579 status = H5Tenum_insert(state_enum_id, "svd", &val);
2580 AssertThrow(status >= 0, ExcInternalError());
2582 status = H5Tenum_insert(state_enum_id, "unusable", &val);
2583 AssertThrow(status >= 0, ExcInternalError());
2584 }
2585
2586 void
2587 create_HDF5_property_enum_id(hid_t &property_enum_id)
2588 {
2589 // create HDF5 enum type for LAPACKSupport::Property
2590 property_enum_id = H5Tcreate(H5T_ENUM, sizeof(LAPACKSupport::Property));
2592 herr_t status = H5Tenum_insert(property_enum_id, "diagonal", &prop);
2593 AssertThrow(status >= 0, ExcInternalError());
2595 status = H5Tenum_insert(property_enum_id, "general", &prop);
2596 AssertThrow(status >= 0, ExcInternalError());
2598 status = H5Tenum_insert(property_enum_id, "hessenberg", &prop);
2599 AssertThrow(status >= 0, ExcInternalError());
2601 status = H5Tenum_insert(property_enum_id, "lower_triangular", &prop);
2602 AssertThrow(status >= 0, ExcInternalError());
2604 status = H5Tenum_insert(property_enum_id, "symmetric", &prop);
2605 AssertThrow(status >= 0, ExcInternalError());
2607 status = H5Tenum_insert(property_enum_id, "upper_triangular", &prop);
2608 AssertThrow(status >= 0, ExcInternalError());
2609 }
2610 } // namespace
2611} // namespace internal
2612# endif
2613
2614
2615
2616template <typename NumberType>
2617void
2619 const std::string & filename,
2620 const std::pair<unsigned int, unsigned int> &chunk_size) const
2621{
2622# ifndef DEAL_II_WITH_HDF5
2623 (void)filename;
2624 (void)chunk_size;
2625 AssertThrow(false, ExcNeedsHDF5());
2626# else
2627
2628 std::pair<unsigned int, unsigned int> chunks_size_ = chunk_size;
2629
2630 if (chunks_size_.first == numbers::invalid_unsigned_int ||
2631 chunks_size_.second == numbers::invalid_unsigned_int)
2632 {
2633 // default: store the matrix in chunks of columns
2634 chunks_size_.first = n_rows;
2635 chunks_size_.second = 1;
2636 }
2637 Assert(chunks_size_.first > 0,
2638 ExcMessage("The row chunk size must be larger than 0."));
2639 AssertIndexRange(chunks_size_.first, n_rows + 1);
2640 Assert(chunks_size_.second > 0,
2641 ExcMessage("The column chunk size must be larger than 0."));
2642 AssertIndexRange(chunks_size_.second, n_columns + 1);
2643
2644# ifdef H5_HAVE_PARALLEL
2645 // implementation for configurations equipped with a parallel file system
2646 save_parallel(filename, chunks_size_);
2647
2648# else
2649 // implementation for configurations with no parallel file system
2650 save_serial(filename, chunks_size_);
2651
2652# endif
2653# endif
2654}
2655
2656
2657
2658template <typename NumberType>
2659void
2661 const std::string & filename,
2662 const std::pair<unsigned int, unsigned int> &chunk_size) const
2663{
2664# ifndef DEAL_II_WITH_HDF5
2665 (void)filename;
2666 (void)chunk_size;
2667 Assert(false, ExcInternalError());
2668# else
2669
2670 /*
2671 * The content of the distributed matrix is copied to a matrix using a 1x1
2672 * process grid. Therefore, one process has all the data and can write it to a
2673 * file.
2674 *
2675 * Create a 1x1 column grid which will be used to initialize
2676 * an effectively serial ScaLAPACK matrix to gather the contents from the
2677 * current object
2678 */
2679 const auto column_grid =
2680 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2681 1,
2682 1);
2683
2684 const int MB = n_rows, NB = n_columns;
2685 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2686 copy_to(tmp);
2687
2688 // the 1x1 grid has only one process and this one writes
2689 // the content of the matrix to the HDF5 file
2690 if (tmp.grid->mpi_process_is_active)
2691 {
2692 herr_t status;
2693
2694 // create a new file using default properties
2695 hid_t file_id =
2696 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2697
2698 // modify dataset creation properties, i.e. enable chunking
2699 hsize_t chunk_dims[2];
2700 // revert order of rows and columns as ScaLAPACK uses column-major
2701 // ordering
2702 chunk_dims[0] = chunk_size.second;
2703 chunk_dims[1] = chunk_size.first;
2704 hid_t data_property = H5Pcreate(H5P_DATASET_CREATE);
2705 status = H5Pset_chunk(data_property, 2, chunk_dims);
2706 AssertThrow(status >= 0, ExcIO());
2707
2708 // create the data space for the dataset
2709 hsize_t dims[2];
2710 // change order of rows and columns as ScaLAPACKMatrix uses column major
2711 // ordering
2712 dims[0] = n_columns;
2713 dims[1] = n_rows;
2714 hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
2715
2716 // create the dataset within the file using chunk creation properties
2717 hid_t type_id = hdf5_type_id(tmp.values.data());
2718 hid_t dataset_id = H5Dcreate2(file_id,
2719 "/matrix",
2720 type_id,
2721 dataspace_id,
2722 H5P_DEFAULT,
2723 data_property,
2724 H5P_DEFAULT);
2725
2726 // write the dataset
2727 status = H5Dwrite(
2728 dataset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, tmp.values.data());
2729 AssertThrow(status >= 0, ExcIO());
2730
2731 // create HDF5 enum type for LAPACKSupport::State and
2732 // LAPACKSupport::Property
2733 hid_t state_enum_id, property_enum_id;
2734 internal::create_HDF5_state_enum_id(state_enum_id);
2735 internal::create_HDF5_property_enum_id(property_enum_id);
2736
2737 // create the data space for the state enum
2738 hsize_t dims_state[1];
2739 dims_state[0] = 1;
2740 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2741 // create the dataset for the state enum
2742 hid_t state_enum_dataset = H5Dcreate2(file_id,
2743 "/state",
2744 state_enum_id,
2745 state_enum_dataspace,
2746 H5P_DEFAULT,
2747 H5P_DEFAULT,
2748 H5P_DEFAULT);
2749 // write the dataset for the state enum
2750 status = H5Dwrite(state_enum_dataset,
2751 state_enum_id,
2752 H5S_ALL,
2753 H5S_ALL,
2754 H5P_DEFAULT,
2755 &state);
2756 AssertThrow(status >= 0, ExcIO());
2757
2758 // create the data space for the property enum
2759 hsize_t dims_property[1];
2760 dims_property[0] = 1;
2761 hid_t property_enum_dataspace =
2762 H5Screate_simple(1, dims_property, nullptr);
2763 // create the dataset for the property enum
2764 hid_t property_enum_dataset = H5Dcreate2(file_id,
2765 "/property",
2766 property_enum_id,
2767 property_enum_dataspace,
2768 H5P_DEFAULT,
2769 H5P_DEFAULT,
2770 H5P_DEFAULT);
2771 // write the dataset for the property enum
2772 status = H5Dwrite(property_enum_dataset,
2773 property_enum_id,
2774 H5S_ALL,
2775 H5S_ALL,
2776 H5P_DEFAULT,
2777 &property);
2778 AssertThrow(status >= 0, ExcIO());
2779
2780 // end access to the datasets and release resources used by them
2781 status = H5Dclose(dataset_id);
2782 AssertThrow(status >= 0, ExcIO());
2783 status = H5Dclose(state_enum_dataset);
2784 AssertThrow(status >= 0, ExcIO());
2785 status = H5Dclose(property_enum_dataset);
2786 AssertThrow(status >= 0, ExcIO());
2787
2788 // terminate access to the data spaces
2789 status = H5Sclose(dataspace_id);
2790 AssertThrow(status >= 0, ExcIO());
2791 status = H5Sclose(state_enum_dataspace);
2792 AssertThrow(status >= 0, ExcIO());
2793 status = H5Sclose(property_enum_dataspace);
2794 AssertThrow(status >= 0, ExcIO());
2795
2796 // release enum data types
2797 status = H5Tclose(state_enum_id);
2798 AssertThrow(status >= 0, ExcIO());
2799 status = H5Tclose(property_enum_id);
2800 AssertThrow(status >= 0, ExcIO());
2801
2802 // release the creation property
2803 status = H5Pclose(data_property);
2804 AssertThrow(status >= 0, ExcIO());
2805
2806 // close the file.
2807 status = H5Fclose(file_id);
2808 AssertThrow(status >= 0, ExcIO());
2809 }
2810# endif
2811}
2812
2813
2814
2815template <typename NumberType>
2816void
2818 const std::string & filename,
2819 const std::pair<unsigned int, unsigned int> &chunk_size) const
2820{
2821# ifndef DEAL_II_WITH_HDF5
2822 (void)filename;
2823 (void)chunk_size;
2824 Assert(false, ExcInternalError());
2825# else
2826
2827 const unsigned int n_mpi_processes(
2828 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
2829 MPI_Info info = MPI_INFO_NULL;
2830 /*
2831 * The content of the distributed matrix is copied to a matrix using a
2832 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
2833 * of the matrix, which they can write to the file
2834 *
2835 * Create a 1xn_processes column grid
2836 */
2837 const auto column_grid =
2838 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
2839 1,
2840 n_mpi_processes);
2841
2842 const int MB = n_rows;
2843 /*
2844 * If the ratio n_columns/n_mpi_processes is smaller than the column block
2845 * size of the original matrix, the redistribution and saving of the matrix
2846 * requires a significant amount of MPI communication. Therefore, it is better
2847 * to set a minimum value for the block size NB, causing only
2848 * ceil(n_columns/NB) processes being actively involved in saving the matrix.
2849 * Example: A 2*10^9 x 400 matrix is distributed on a 80 x 5 process grid
2850 * using block size 32. Instead of distributing the matrix on a 1 x 400
2851 * process grid with a row block size of 2*10^9 and a column block size of 1,
2852 * the minimum value for NB yields that only ceil(400/32)=13 processes will be
2853 * writing the matrix to disk.
2854 */
2855 const int NB = std::max(static_cast<int>(std::ceil(
2856 static_cast<double>(n_columns) / n_mpi_processes)),
2857 column_block_size);
2858
2859 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
2860 copy_to(tmp);
2861
2862 // get pointer to data held by the process
2863 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
2864
2865 herr_t status;
2866 // dataset dimensions
2867 hsize_t dims[2];
2868
2869 // set up file access property list with parallel I/O access
2870 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
2871 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
2872 AssertThrow(status >= 0, ExcIO());
2873
2874 // create a new file collectively and release property list identifier
2875 hid_t file_id =
2876 H5Fcreate(filename.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
2877 status = H5Pclose(plist_id);
2878 AssertThrow(status >= 0, ExcIO());
2879
2880 // As ScaLAPACK, and therefore the class ScaLAPACKMatrix, uses column-major
2881 // ordering but HDF5 row-major ordering, we have to reverse entries related to
2882 // columns and rows in the following. create the dataspace for the dataset
2883 dims[0] = tmp.n_columns;
2884 dims[1] = tmp.n_rows;
2885
2886 hid_t filespace = H5Screate_simple(2, dims, nullptr);
2887
2888 // create the chunked dataset with default properties and close filespace
2889 hsize_t chunk_dims[2];
2890 // revert order of rows and columns as ScaLAPACK uses column-major ordering
2891 chunk_dims[0] = chunk_size.second;
2892 chunk_dims[1] = chunk_size.first;
2893 plist_id = H5Pcreate(H5P_DATASET_CREATE);
2894 H5Pset_chunk(plist_id, 2, chunk_dims);
2895 hid_t type_id = hdf5_type_id(data);
2896 hid_t dset_id = H5Dcreate2(
2897 file_id, "/matrix", type_id, filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
2898
2899 status = H5Sclose(filespace);
2900 AssertThrow(status >= 0, ExcIO());
2901
2902 status = H5Pclose(plist_id);
2903 AssertThrow(status >= 0, ExcIO());
2904
2905 // gather the number of local rows and columns from all processes
2906 std::vector<int> proc_n_local_rows(n_mpi_processes),
2907 proc_n_local_columns(n_mpi_processes);
2908 int ierr = MPI_Allgather(&tmp.n_local_rows,
2909 1,
2910 MPI_INT,
2911 proc_n_local_rows.data(),
2912 1,
2913 MPI_INT,
2914 tmp.grid->mpi_communicator);
2915 AssertThrowMPI(ierr);
2916 ierr = MPI_Allgather(&tmp.n_local_columns,
2917 1,
2918 MPI_INT,
2919 proc_n_local_columns.data(),
2920 1,
2921 MPI_INT,
2922 tmp.grid->mpi_communicator);
2923 AssertThrowMPI(ierr);
2924
2925 const unsigned int my_rank(
2926 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
2927
2928 // hyperslab selection parameters
2929 // each process defines dataset in memory and writes it to the hyperslab in
2930 // the file
2931 hsize_t count[2];
2932 count[0] = tmp.n_local_columns;
2933 count[1] = tmp.n_rows;
2934 hid_t memspace = H5Screate_simple(2, count, nullptr);
2935
2936 hsize_t offset[2] = {0};
2937 for (unsigned int i = 0; i < my_rank; ++i)
2938 offset[0] += proc_n_local_columns[i];
2939
2940 // select hyperslab in the file.
2941 filespace = H5Dget_space(dset_id);
2942 status = H5Sselect_hyperslab(
2943 filespace, H5S_SELECT_SET, offset, nullptr, count, nullptr);
2944 AssertThrow(status >= 0, ExcIO());
2945
2946 // create property list for independent dataset write
2947 plist_id = H5Pcreate(H5P_DATASET_XFER);
2948 status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_INDEPENDENT);
2949 AssertThrow(status >= 0, ExcIO());
2950
2951 // process with no data will not participate in writing to the file
2952 if (tmp.values.size() > 0)
2953 {
2954 status = H5Dwrite(dset_id, type_id, memspace, filespace, plist_id, data);
2955 AssertThrow(status >= 0, ExcIO());
2956 }
2957 // close/release sources
2958 status = H5Dclose(dset_id);
2959 AssertThrow(status >= 0, ExcIO());
2960 status = H5Sclose(filespace);
2961 AssertThrow(status >= 0, ExcIO());
2962 status = H5Sclose(memspace);
2963 AssertThrow(status >= 0, ExcIO());
2964 status = H5Pclose(plist_id);
2965 AssertThrow(status >= 0, ExcIO());
2966 status = H5Fclose(file_id);
2967 AssertThrow(status >= 0, ExcIO());
2968
2969 // before writing the state and property to file wait for
2970 // all processes to finish writing the matrix content to the file
2971 ierr = MPI_Barrier(tmp.grid->mpi_communicator);
2972 AssertThrowMPI(ierr);
2973
2974 // only root process will write state and property to the file
2975 if (tmp.grid->this_mpi_process == 0)
2976 {
2977 // open file using default properties
2978 hid_t file_id_reopen =
2979 H5Fopen(filename.c_str(), H5F_ACC_RDWR, H5P_DEFAULT);
2980
2981 // create HDF5 enum type for LAPACKSupport::State and
2982 // LAPACKSupport::Property
2983 hid_t state_enum_id, property_enum_id;
2984 internal::create_HDF5_state_enum_id(state_enum_id);
2985 internal::create_HDF5_property_enum_id(property_enum_id);
2986
2987 // create the data space for the state enum
2988 hsize_t dims_state[1];
2989 dims_state[0] = 1;
2990 hid_t state_enum_dataspace = H5Screate_simple(1, dims_state, nullptr);
2991 // create the dataset for the state enum
2992 hid_t state_enum_dataset = H5Dcreate2(file_id_reopen,
2993 "/state",
2994 state_enum_id,
2995 state_enum_dataspace,
2996 H5P_DEFAULT,
2997 H5P_DEFAULT,
2998 H5P_DEFAULT);
2999 // write the dataset for the state enum
3000 status = H5Dwrite(state_enum_dataset,
3001 state_enum_id,
3002 H5S_ALL,
3003 H5S_ALL,
3004 H5P_DEFAULT,
3005 &state);
3006 AssertThrow(status >= 0, ExcIO());
3007
3008 // create the data space for the property enum
3009 hsize_t dims_property[1];
3010 dims_property[0] = 1;
3011 hid_t property_enum_dataspace =
3012 H5Screate_simple(1, dims_property, nullptr);
3013 // create the dataset for the property enum
3014 hid_t property_enum_dataset = H5Dcreate2(file_id_reopen,
3015 "/property",
3016 property_enum_id,
3017 property_enum_dataspace,
3018 H5P_DEFAULT,
3019 H5P_DEFAULT,
3020 H5P_DEFAULT);
3021 // write the dataset for the property enum
3022 status = H5Dwrite(property_enum_dataset,
3023 property_enum_id,
3024 H5S_ALL,
3025 H5S_ALL,
3026 H5P_DEFAULT,
3027 &property);
3028 AssertThrow(status >= 0, ExcIO());
3029
3030 status = H5Dclose(state_enum_dataset);
3031 AssertThrow(status >= 0, ExcIO());
3032 status = H5Dclose(property_enum_dataset);
3033 AssertThrow(status >= 0, ExcIO());
3034 status = H5Sclose(state_enum_dataspace);
3035 AssertThrow(status >= 0, ExcIO());
3036 status = H5Sclose(property_enum_dataspace);
3037 AssertThrow(status >= 0, ExcIO());
3038 status = H5Tclose(state_enum_id);
3039 AssertThrow(status >= 0, ExcIO());
3040 status = H5Tclose(property_enum_id);
3041 AssertThrow(status >= 0, ExcIO());
3042 status = H5Fclose(file_id_reopen);
3043 AssertThrow(status >= 0, ExcIO());
3044 }
3045
3046# endif
3047}
3048
3049
3050
3051template <typename NumberType>
3052void
3053ScaLAPACKMatrix<NumberType>::load(const std::string &filename)
3054{
3055# ifndef DEAL_II_WITH_HDF5
3056 (void)filename;
3057 AssertThrow(false, ExcNeedsHDF5());
3058# else
3059# ifdef H5_HAVE_PARALLEL
3060 // implementation for configurations equipped with a parallel file system
3061 load_parallel(filename);
3062
3063# else
3064 // implementation for configurations with no parallel file system
3065 load_serial(filename);
3066# endif
3067# endif
3068}
3069
3070
3071
3072template <typename NumberType>
3073void
3075{
3076# ifndef DEAL_II_WITH_HDF5
3077 (void)filename;
3078 Assert(false, ExcInternalError());
3079# else
3080
3081 /*
3082 * The content of the distributed matrix is copied to a matrix using a 1x1
3083 * process grid. Therefore, one process has all the data and can write it to a
3084 * file
3085 */
3086 // create a 1xP column grid with P being the number of MPI processes
3087 const auto one_grid =
3088 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3089 1,
3090 1);
3091
3092 const int MB = n_rows, NB = n_columns;
3093 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, one_grid, MB, NB);
3094
3095 int state_int = -1;
3096 int property_int = -1;
3097
3098 // the 1x1 grid has only one process and this one reads
3099 // the content of the matrix from the HDF5 file
3100 if (tmp.grid->mpi_process_is_active)
3101 {
3102 herr_t status;
3103
3104 // open the file in read-only mode
3105 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
3106
3107 // open the dataset in the file
3108 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3109
3110 // check the datatype of the data in the file
3111 // datatype of source and destination must have the same class
3112 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3113 // Selection
3114 hid_t datatype = H5Dget_type(dataset_id);
3115 H5T_class_t t_class_in = H5Tget_class(datatype);
3116 H5T_class_t t_class = H5Tget_class(hdf5_type_id(tmp.values.data()));
3118 t_class_in == t_class,
3119 ExcMessage(
3120 "The data type of the matrix to be read does not match the archive"));
3121
3122 // get dataspace handle
3123 hid_t dataspace_id = H5Dget_space(dataset_id);
3124 // get number of dimensions
3125 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3126 AssertThrow(ndims == 2, ExcIO());
3127 // get every dimension
3128 hsize_t dims[2];
3129 H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3131 static_cast<int>(dims[0]) == n_columns,
3132 ExcMessage(
3133 "The number of columns of the matrix does not match the content of the archive"));
3135 static_cast<int>(dims[1]) == n_rows,
3136 ExcMessage(
3137 "The number of rows of the matrix does not match the content of the archive"));
3138
3139 // read data
3140 status = H5Dread(dataset_id,
3141 hdf5_type_id(tmp.values.data()),
3142 H5S_ALL,
3143 H5S_ALL,
3144 H5P_DEFAULT,
3145 tmp.values.data());
3146 AssertThrow(status >= 0, ExcIO());
3147
3148 // create HDF5 enum type for LAPACKSupport::State and
3149 // LAPACKSupport::Property
3150 hid_t state_enum_id, property_enum_id;
3151 internal::create_HDF5_state_enum_id(state_enum_id);
3152 internal::create_HDF5_property_enum_id(property_enum_id);
3153
3154 // open the datasets for the state and property enum in the file
3155 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3156 hid_t datatype_state = H5Dget_type(dataset_state_id);
3157 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3158 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3159
3160 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3161 hid_t datatype_property = H5Dget_type(dataset_property_id);
3162 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3163 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3164
3165 // get dataspace handles
3166 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3167 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3168 // get number of dimensions
3169 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3170 AssertThrow(ndims_state == 1, ExcIO());
3171 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3172 AssertThrow(ndims_property == 1, ExcIO());
3173 // get every dimension
3174 hsize_t dims_state[1];
3175 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3176 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3177 hsize_t dims_property[1];
3178 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3179 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3180
3181 // read data
3182 status = H5Dread(dataset_state_id,
3183 state_enum_id,
3184 H5S_ALL,
3185 H5S_ALL,
3186 H5P_DEFAULT,
3187 &tmp.state);
3188 AssertThrow(status >= 0, ExcIO());
3189 // To send the state from the root process to the other processes
3190 // the state enum is casted to an integer, that will be broadcasted and
3191 // subsequently casted back to the enum type
3192 state_int = static_cast<int>(tmp.state);
3193
3194 status = H5Dread(dataset_property_id,
3195 property_enum_id,
3196 H5S_ALL,
3197 H5S_ALL,
3198 H5P_DEFAULT,
3199 &tmp.property);
3200 AssertThrow(status >= 0, ExcIO());
3201 // To send the property from the root process to the other processes
3202 // the state enum is casted to an integer, that will be broadcasted and
3203 // subsequently casted back to the enum type
3204 property_int = static_cast<int>(tmp.property);
3205
3206 // terminate access to the data spaces
3207 status = H5Sclose(dataspace_id);
3208 AssertThrow(status >= 0, ExcIO());
3209 status = H5Sclose(dataspace_state);
3210 AssertThrow(status >= 0, ExcIO());
3211 status = H5Sclose(dataspace_property);
3212 AssertThrow(status >= 0, ExcIO());
3213
3214 // release data type handles
3215 status = H5Tclose(datatype);
3216 AssertThrow(status >= 0, ExcIO());
3217 status = H5Tclose(state_enum_id);
3218 AssertThrow(status >= 0, ExcIO());
3219 status = H5Tclose(property_enum_id);
3220 AssertThrow(status >= 0, ExcIO());
3221
3222 // end access to the data sets and release resources used by them
3223 status = H5Dclose(dataset_state_id);
3224 AssertThrow(status >= 0, ExcIO());
3225 status = H5Dclose(dataset_id);
3226 AssertThrow(status >= 0, ExcIO());
3227 status = H5Dclose(dataset_property_id);
3228 AssertThrow(status >= 0, ExcIO());
3229
3230 // close the file.
3231 status = H5Fclose(file_id);
3232 AssertThrow(status >= 0, ExcIO());
3233 }
3234 // so far only the root process has the correct state integer --> broadcasting
3235 tmp.grid->send_to_inactive(&state_int, 1);
3236 // so far only the root process has the correct property integer -->
3237 // broadcasting
3238 tmp.grid->send_to_inactive(&property_int, 1);
3239
3240 tmp.state = static_cast<LAPACKSupport::State>(state_int);
3241 tmp.property = static_cast<LAPACKSupport::Property>(property_int);
3242
3243 tmp.copy_to(*this);
3244
3245# endif // DEAL_II_WITH_HDF5
3246}
3247
3248
3249
3250template <typename NumberType>
3251void
3253{
3254# ifndef DEAL_II_WITH_HDF5
3255 (void)filename;
3256 Assert(false, ExcInternalError());
3257# else
3258# ifndef H5_HAVE_PARALLEL
3259 Assert(false, ExcInternalError());
3260# else
3261
3262 const unsigned int n_mpi_processes(
3263 Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
3264 MPI_Info info = MPI_INFO_NULL;
3265 /*
3266 * The content of the distributed matrix is copied to a matrix using a
3267 * 1xn_processes process grid. Therefore, the processes hold contiguous chunks
3268 * of the matrix, which they can write to the file
3269 */
3270 // create a 1xP column grid with P being the number of MPI processes
3271 const auto column_grid =
3272 std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,
3273 1,
3274 n_mpi_processes);
3275
3276 const int MB = n_rows;
3277 // for the choice of NB see explanation in save_parallel()
3278 const int NB = std::max(static_cast<int>(std::ceil(
3279 static_cast<double>(n_columns) / n_mpi_processes)),
3280 column_block_size);
3281
3282 ScaLAPACKMatrix<NumberType> tmp(n_rows, n_columns, column_grid, MB, NB);
3283
3284 // get pointer to data held by the process
3285 NumberType *data = (tmp.values.size() > 0) ? tmp.values.data() : nullptr;
3286
3287 herr_t status;
3288
3289 // set up file access property list with parallel I/O access
3290 hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
3291 status = H5Pset_fapl_mpio(plist_id, tmp.grid->mpi_communicator, info);
3292 AssertThrow(status >= 0, ExcIO());
3293
3294 // open file collectively in read-only mode and release property list
3295 // identifier
3296 hid_t file_id = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, plist_id);
3297 status = H5Pclose(plist_id);
3298 AssertThrow(status >= 0, ExcIO());
3299
3300 // open the dataset in the file collectively
3301 hid_t dataset_id = H5Dopen2(file_id, "/matrix", H5P_DEFAULT);
3302
3303 // check the datatype of the dataset in the file
3304 // if the classes of type of the dataset and the matrix do not match abort
3305 // see HDF User's Guide: 6.10. Data Transfer: Datatype Conversion and
3306 // Selection
3307 hid_t datatype = hdf5_type_id(data);
3308 hid_t datatype_inp = H5Dget_type(dataset_id);
3309 H5T_class_t t_class_inp = H5Tget_class(datatype_inp);
3310 H5T_class_t t_class = H5Tget_class(datatype);
3312 t_class_inp == t_class,
3313 ExcMessage(
3314 "The data type of the matrix to be read does not match the archive"));
3315
3316 // get the dimensions of the matrix stored in the file
3317 // get dataspace handle
3318 hid_t dataspace_id = H5Dget_space(dataset_id);
3319 // get number of dimensions
3320 const int ndims = H5Sget_simple_extent_ndims(dataspace_id);
3321 AssertThrow(ndims == 2, ExcIO());
3322 // get every dimension
3323 hsize_t dims[2];
3324 status = H5Sget_simple_extent_dims(dataspace_id, dims, nullptr);
3325 AssertThrow(status >= 0, ExcIO());
3327 static_cast<int>(dims[0]) == n_columns,
3328 ExcMessage(
3329 "The number of columns of the matrix does not match the content of the archive"));
3331 static_cast<int>(dims[1]) == n_rows,
3332 ExcMessage(
3333 "The number of rows of the matrix does not match the content of the archive"));
3334
3335 // gather the number of local rows and columns from all processes
3336 std::vector<int> proc_n_local_rows(n_mpi_processes),
3337 proc_n_local_columns(n_mpi_processes);
3338 int ierr = MPI_Allgather(&tmp.n_local_rows,
3339 1,
3340 MPI_INT,
3341 proc_n_local_rows.data(),
3342 1,
3343 MPI_INT,
3344 tmp.grid->mpi_communicator);
3345 AssertThrowMPI(ierr);
3346 ierr = MPI_Allgather(&tmp.n_local_columns,
3347 1,
3348 MPI_INT,
3349 proc_n_local_columns.data(),
3350 1,
3351 MPI_INT,
3352 tmp.grid->mpi_communicator);
3353 AssertThrowMPI(ierr);
3354
3355 const unsigned int my_rank(
3356 Utilities::MPI::this_mpi_process(tmp.grid->mpi_communicator));
3357
3358 // hyperslab selection parameters
3359 // each process defines dataset in memory and writes it to the hyperslab in
3360 // the file
3361 hsize_t count[2];
3362 count[0] = tmp.n_local_columns;
3363 count[1] = tmp.n_local_rows;
3364
3365 hsize_t offset[2] = {0};
3366 for (unsigned int i = 0; i < my_rank; ++i)
3367 offset[0] += proc_n_local_columns[i];
3368
3369 // select hyperslab in the file
3370 status = H5Sselect_hyperslab(
3371 dataspace_id, H5S_SELECT_SET, offset, nullptr, count, nullptr);
3372 AssertThrow(status >= 0, ExcIO());
3373
3374 // create a memory dataspace independently
3375 hid_t memspace = H5Screate_simple(2, count, nullptr);
3376
3377 // read data independently
3378 status =
3379 H5Dread(dataset_id, datatype, memspace, dataspace_id, H5P_DEFAULT, data);
3380 AssertThrow(status >= 0, ExcIO());
3381
3382 // create HDF5 enum type for LAPACKSupport::State and LAPACKSupport::Property
3383 hid_t state_enum_id, property_enum_id;
3384 internal::create_HDF5_state_enum_id(state_enum_id);
3385 internal::create_HDF5_property_enum_id(property_enum_id);
3386
3387 // open the datasets for the state and property enum in the file
3388 hid_t dataset_state_id = H5Dopen2(file_id, "/state", H5P_DEFAULT);
3389 hid_t datatype_state = H5Dget_type(dataset_state_id);
3390 H5T_class_t t_class_state = H5Tget_class(datatype_state);
3391 AssertThrow(t_class_state == H5T_ENUM, ExcIO());
3392
3393 hid_t dataset_property_id = H5Dopen2(file_id, "/property", H5P_DEFAULT);
3394 hid_t datatype_property = H5Dget_type(dataset_property_id);
3395 H5T_class_t t_class_property = H5Tget_class(datatype_property);
3396 AssertThrow(t_class_property == H5T_ENUM, ExcIO());
3397
3398 // get dataspace handles
3399 hid_t dataspace_state = H5Dget_space(dataset_state_id);
3400 hid_t dataspace_property = H5Dget_space(dataset_property_id);
3401 // get number of dimensions
3402 const int ndims_state = H5Sget_simple_extent_ndims(dataspace_state);
3403 AssertThrow(ndims_state == 1, ExcIO());
3404 const int ndims_property = H5Sget_simple_extent_ndims(dataspace_property);
3405 AssertThrow(ndims_property == 1, ExcIO());
3406 // get every dimension
3407 hsize_t dims_state[1];
3408 H5Sget_simple_extent_dims(dataspace_state, dims_state, nullptr);
3409 AssertThrow(static_cast<int>(dims_state[0]) == 1, ExcIO());
3410 hsize_t dims_property[1];
3411 H5Sget_simple_extent_dims(dataspace_property, dims_property, nullptr);
3412 AssertThrow(static_cast<int>(dims_property[0]) == 1, ExcIO());
3413
3414 // read data
3415 status = H5Dread(
3416 dataset_state_id, state_enum_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &tmp.state);
3417 AssertThrow(status >= 0, ExcIO());
3418
3419 status = H5Dread(dataset_property_id,
3420 property_enum_id,
3421 H5S_ALL,
3422 H5S_ALL,
3423 H5P_DEFAULT,
3424 &tmp.property);
3425 AssertThrow(status >= 0, ExcIO());
3426
3427 // close/release sources
3428 status = H5Sclose(memspace);
3429 AssertThrow(status >= 0, ExcIO());
3430 status = H5Dclose(dataset_id);
3431 AssertThrow(status >= 0, ExcIO());
3432 status = H5Dclose(dataset_state_id);
3433 AssertThrow(status >= 0, ExcIO());
3434 status = H5Dclose(dataset_property_id);
3435 AssertThrow(status >= 0, ExcIO());
3436 status = H5Sclose(dataspace_id);
3437 AssertThrow(status >= 0, ExcIO());
3438 status = H5Sclose(dataspace_state);
3439 AssertThrow(status >= 0, ExcIO());
3440 status = H5Sclose(dataspace_property);
3441 AssertThrow(status >= 0, ExcIO());
3442 // status = H5Tclose(datatype);
3443 // AssertThrow(status >= 0, ExcIO());
3444 status = H5Tclose(state_enum_id);
3445 AssertThrow(status >= 0, ExcIO());
3446 status = H5Tclose(property_enum_id);
3447 AssertThrow(status >= 0, ExcIO());
3448 status = H5Fclose(file_id);
3449 AssertThrow(status >= 0, ExcIO());
3450
3451 // copying the distributed matrices
3452 tmp.copy_to(*this);
3453
3454# endif // H5_HAVE_PARALLEL
3455# endif // DEAL_II_WITH_HDF5
3456}
3457
3458
3459
3460namespace internal
3461{
3462 namespace
3463 {
3464 template <typename NumberType>
3465 void
3466 scale_columns(ScaLAPACKMatrix<NumberType> & matrix,
3467 const ArrayView<const NumberType> &factors)
3468 {
3469 Assert(matrix.n() == factors.size(),
3470 ExcDimensionMismatch(matrix.n(), factors.size()));
3471
3472 for (unsigned int i = 0; i < matrix.local_n(); ++i)
3473 {
3474 const NumberType s = factors[matrix.global_column(i)];
3475
3476 for (unsigned int j = 0; j < matrix.local_m(); ++j)
3477 matrix.local_el(j, i) *= s;
3478 }
3479 }
3480
3481 template <typename NumberType>
3482 void
3483 scale_rows(ScaLAPACKMatrix<NumberType> & matrix,
3484 const ArrayView<const NumberType> &factors)
3485 {
3486 Assert(matrix.m() == factors.size(),
3487 ExcDimensionMismatch(matrix.m(), factors.size()));
3488
3489 for (unsigned int i = 0; i < matrix.local_m(); ++i)
3490 {
3491 const NumberType s = factors[matrix.global_row(i)];
3492
3493 for (unsigned int j = 0; j < matrix.local_n(); ++j)
3494 matrix.local_el(i, j) *= s;
3495 }
3496 }
3497
3498 } // namespace
3499} // namespace internal
3500
3501
3502
3503template <typename NumberType>
3504template <class InputVector>
3505void
3507{
3508 if (this->grid->mpi_process_is_active)
3509 internal::scale_columns(*this, make_array_view(factors));
3510}
3511
3512
3513
3514template <typename NumberType>
3515template <class InputVector>
3516void
3518{
3519 if (this->grid->mpi_process_is_active)
3520 internal::scale_rows(*this, make_array_view(factors));
3521}
3522
3523
3524
3525// instantiations
3526# include "scalapack.inst"
3527
3528
3530
3531#endif // DEAL_II_WITH_SCALAPACK
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:704
std::size_t size() const
Definition array_view.h:576
size_type m() const
size_type n() const
int descriptor[9]
Definition scalapack.h:935
std::vector< NumberType > compute_SVD(ScaLAPACKMatrix< NumberType > *U=nullptr, ScaLAPACKMatrix< NumberType > *VT=nullptr)
std::vector< NumberType > eigenpairs_symmetric_by_value(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
NumberType frobenius_norm() const
unsigned int pseudoinverse(const NumberType ratio)
std::vector< NumberType > eigenpairs_symmetric_by_value_MRRR(const std::pair< NumberType, NumberType > &value_limits, const bool compute_eigenvectors)
void copy_from(const LAPACKFullMatrix< NumberType > &matrix, const unsigned int rank)
Definition scalapack.cc:363
void save_parallel(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
void least_squares(ScaLAPACKMatrix< NumberType > &B, const bool transpose=false)
ScaLAPACKMatrix< NumberType > & operator=(const FullMatrix< NumberType > &)
Definition scalapack.cc:332
void Tadd(const NumberType b, const ScaLAPACKMatrix< NumberType > &B)
void mTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
void add(const ScaLAPACKMatrix< NumberType > &B, const NumberType a=0., const NumberType b=1., const bool transpose_B=false)
Definition scalapack.cc:991
LAPACKSupport::State get_state() const
Definition scalapack.cc:323
LAPACKSupport::Property get_property() const
Definition scalapack.cc:314
void Tmmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_MRRR(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void scale_rows(const InputVector &factors)
std::shared_ptr< const Utilities::MPI::ProcessGrid > grid
Definition scalapack.h:900
ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:81
void load(const std::string &filename)
const int submatrix_column
Definition scalapack.h:981
const int submatrix_row
Definition scalapack.h:975
void mmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric(const bool compute_eigenvectors, const std::pair< unsigned int, unsigned int > &index_limits=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int), const std::pair< NumberType, NumberType > &value_limits=std::make_pair(std::numeric_limits< NumberType >::quiet_NaN(), std::numeric_limits< NumberType >::quiet_NaN()))
void save_serial(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size) const
NumberType norm_general(const char type) const
void save(const std::string &filename, const std::pair< unsigned int, unsigned int > &chunk_size=std::make_pair(numbers::invalid_unsigned_int, numbers::invalid_unsigned_int)) const
void load_parallel(const std::string &filename)
NumberType l1_norm() const
void compute_lu_factorization()
NumberType norm_symmetric(const char type) const
void mult(const NumberType b, const ScaLAPACKMatrix< NumberType > &B, const NumberType c, ScaLAPACKMatrix< NumberType > &C, const bool transpose_A=false, const bool transpose_B=false) const
LAPACKSupport::Property property
Definition scalapack.h:893
void set_property(const LAPACKSupport::Property property)
Definition scalapack.cc:304
void reinit(const size_type n_rows, const size_type n_columns, const std::shared_ptr< const Utilities::MPI::ProcessGrid > &process_grid, const size_type row_block_size=32, const size_type column_block_size=32, const LAPACKSupport::Property property=LAPACKSupport::Property::general)
Definition scalapack.cc:217
void load_serial(const std::string &filename)
std::vector< NumberType > eigenpairs_symmetric_by_index_MRRR(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
NumberType reciprocal_condition_number(const NumberType a_norm) const
LAPACKSupport::State state
Definition scalapack.h:887
void TmTmult(ScaLAPACKMatrix< NumberType > &C, const ScaLAPACKMatrix< NumberType > &B, const bool adding=false) const
std::vector< NumberType > eigenpairs_symmetric_by_index(const std::pair< unsigned int, unsigned int > &index_limits, const bool compute_eigenvectors)
unsigned int global_column(const unsigned int loc_column) const
Definition scalapack.cc:515
void copy_to(FullMatrix< NumberType > &matrix) const
Definition scalapack.cc:667
unsigned int global_row(const unsigned int loc_row) const
Definition scalapack.cc:498
void compute_cholesky_factorization()
void copy_transposed(const ScaLAPACKMatrix< NumberType > &B)
Definition scalapack.cc:981
NumberType linfty_norm() const
void scale_columns(const InputVector &factors)
AlignedVector< T > values
Definition table.h:796
void reinit(const size_type size1, const size_type size2, const bool omit_default_initialization=false)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
const unsigned int v1
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcErrorCode(std::string arg1, types::blas_int arg2)
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsHDF5()
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ cholesky
Contents is a Cholesky decomposition.
@ lu
Contents is an LU decomposition.
@ matrix
Contents is actually a matrix.
@ unusable
Contents is something useless.
@ inverse_matrix
Contents is the inverse of a matrix.
@ svd
Matrix contains singular value decomposition,.
@ inverse_svd
Matrix is the inverse of a singular value decomposition.
@ eigenvalues
Eigenvalue vector is filled.
@ symmetric
Matrix is symmetric.
@ hessenberg
Matrix is in upper Hessenberg form.
@ diagonal
Matrix is diagonal.
@ upper_triangular
Matrix is upper triangular.
@ lower_triangular
Matrix is lower triangular.
@ general
No special properties.
@ scalapack_copy_from
ScaLAPACKMatrix<NumberType>::copy_from.
Definition mpi_tags.h:121
@ scalapack_copy_to2
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:119
@ scalapack_copy_to
ScaLAPACKMatrix<NumberType>::copy_to.
Definition mpi_tags.h:117
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1732
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:204
static const unsigned int invalid_unsigned_int
Definition types.h:213
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
hid_t hdf5_type_id(const number *)
Definition scalapack.cc:40
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)