deal.II version GIT relicensing-2888-ge8e3b84039 2025-03-21 18:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
process_grid.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_SCALAPACK
18
19# include <deal.II/lac/scalapack.templates.h>
20
22
23namespace
24{
35 inline std::pair<int, int>
36 compute_processor_grid_sizes(const MPI_Comm mpi_comm,
37 const unsigned int m,
38 const unsigned int n,
39 const unsigned int block_size_m,
40 const unsigned int block_size_n)
41 {
42 // Few notes from the ScaLAPACK user guide:
43 // It is possible to predict the best grid shape given the number of
44 // processes available: Pr x Pc <= P This, however, depends on the task to
45 // be done. LU , QR and QL factorizations perform better for “flat” process
46 // grids (Pr < Pc ) For large N, Pc = 2*Pr is a good choice, whereas for
47 // small N, one should choose small Pr Square or near square grids are more
48 // optimal for Cholesky factorization. LQ and RQ factorizations take
49 // advantage of “tall” grids (Pr > Pc )
50
51 // Below we always try to create 2d processor grids:
52
53 const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
54
55 // Get the total number of cores we can occupy in a rectangular dense matrix
56 // with rectangular blocks when every core owns only a single block:
57 const int n_processes_heuristic = int(std::ceil((1. * m) / block_size_m)) *
58 int(std::ceil((1. * n) / block_size_n));
59 const int Np = std::min(n_processes_heuristic, n_processes);
60
61 // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
62 // Pc =: ratio * Pr
63 // therefore
64 // Np = Pc * Pc / ratio
65 // for quadratic matrices the ratio equals 1
66 const double ratio = double(n) / m;
67 int Pc = static_cast<int>(std::sqrt(ratio * Np));
68
69 // one could rounds up Pc to the number which has zero remainder from the
70 // division of Np while ( Np % Pc != 0 )
71 // ++Pc;
72 // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes
73 // 2x5.
74 // limit our estimate to be in [2, Np]
75 int n_process_columns = std::min(Np, std::max(2, Pc));
76 // finally, get the rows:
77 int n_process_rows = Np / n_process_columns;
78
79 Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
80 n_processes >= n_process_rows * n_process_columns,
82 "error in process grid: " + std::to_string(n_process_rows) + "x" +
83 std::to_string(n_process_columns) + "=" +
84 std::to_string(n_process_rows * n_process_columns) + " out of " +
85 std::to_string(n_processes)));
86
87 return std::make_pair(n_process_rows, n_process_columns);
88
89 // For example,
90 // 320x320 with 32x32 blocks and 16 cores:
91 // Pc = 1.0 * Pr => 4x4 grid
92 // Pc = 0.5 * Pr => 8x2 grid
93 // Pc = 2.0 * Pr => 3x5 grid
94 }
95} // namespace
96
97namespace Utilities
98{
99 namespace MPI
100 {
102 const MPI_Comm mpi_comm,
103 const std::pair<unsigned int, unsigned int> &grid_dimensions)
104 : mpi_communicator(mpi_comm)
105 , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
106 , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
107 , n_process_rows(grid_dimensions.first)
108 , n_process_columns(grid_dimensions.second)
109 {
110 Assert(grid_dimensions.first > 0,
111 ExcMessage("Number of process grid rows has to be positive."));
112 Assert(grid_dimensions.second > 0,
113 ExcMessage("Number of process grid columns has to be positive."));
114
115 Assert(
116 grid_dimensions.first * grid_dimensions.second <= n_mpi_processes,
118 "Size of process grid is larger than number of available MPI processes."));
119
120 // processor grid order.
121 const bool column_major = false;
122
123 // Initialize Cblas context from the provided communicator
124 blacs_context = Csys2blacs_handle(mpi_communicator);
125 const char *order = (column_major ? "Col" : "Row");
126 // Note that blacs_context can be modified below. Thus Cblacs2sys_handle
127 // may not return the same MPI communicator.
128 Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
129
130 // Blacs may modify the grid size on processes which are not used
131 // in the grid. So provide copies below:
132 int procrows_ = n_process_rows;
133 int proccols_ = n_process_columns;
134 Cblacs_gridinfo(blacs_context,
135 &procrows_,
136 &proccols_,
139
140 // If this MPI core is not on the grid, flag it as inactive and
141 // skip all jobs
142 // Note that a different condition is used in FORTRAN code here
143 // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
145 mpi_process_is_active = false;
146 else
148
149 // Create an auxiliary communicator which has root and all inactive cores.
150 // Assume that inactive cores start with
151 // id=n_process_rows*n_process_columns
152 const unsigned int n_active_mpi_processes =
155 this_mpi_process >= n_active_mpi_processes,
157
158 std::vector<int> inactive_with_root_ranks;
159 inactive_with_root_ranks.push_back(0);
160 for (unsigned int i = n_active_mpi_processes; i < n_mpi_processes; ++i)
161 inactive_with_root_ranks.push_back(i);
162
163 // Get the group of processes in mpi_communicator
164 int ierr = 0;
165 MPI_Group all_group;
166 ierr = MPI_Comm_group(mpi_communicator, &all_group);
167 AssertThrowMPI(ierr);
168
169 // Construct the group containing all ranks we need:
170 MPI_Group inactive_with_root_group;
171 const int n = inactive_with_root_ranks.size();
172 ierr = MPI_Group_incl(all_group,
173 n,
174 inactive_with_root_ranks.data(),
175 &inactive_with_root_group);
176 AssertThrowMPI(ierr);
177
178 // Create the communicator based on inactive_with_root_group.
179 // Note that on all the active MPI processes (except for the one with
180 // rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
181 // will be MPI_COMM_NULL.
182 const int mpi_tag =
184
185 ierr = MPI_Comm_create_group(mpi_communicator,
186 inactive_with_root_group,
187 mpi_tag,
189 AssertThrowMPI(ierr);
190
191 ierr = MPI_Group_free(&all_group);
192 AssertThrowMPI(ierr);
193 ierr = MPI_Group_free(&inactive_with_root_group);
194 AssertThrowMPI(ierr);
195
196 // Double check that the process with rank 0 in subgroup is active:
197 if constexpr (running_in_debug_mode())
198 {
199 if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
203 }
204 }
205
206
207
209 const unsigned int n_rows_matrix,
210 const unsigned int n_columns_matrix,
211 const unsigned int row_block_size,
212 const unsigned int column_block_size)
213 : ProcessGrid(mpi_comm,
214 compute_processor_grid_sizes(mpi_comm,
215 n_rows_matrix,
216 n_columns_matrix,
217 row_block_size,
218 column_block_size))
219 {}
220
221
222
224 const unsigned int n_rows,
225 const unsigned int n_columns)
226 : ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
227 {}
228
229
230
239
240
241
242 template <typename NumberType>
243 void
244 ProcessGrid::send_to_inactive(NumberType *value, const int count) const
245 {
246 Assert(count > 0, ExcInternalError());
247 if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
248 {
249 const int ierr =
250 MPI_Bcast(value,
251 count,
252 Utilities::MPI::mpi_type_id_for_type<decltype(*value)>,
253 0 /*from root*/,
255 AssertThrowMPI(ierr);
256 }
257 }
258
259 } // namespace MPI
260} // namespace Utilities
261
262// instantiations
263
264template void
265Utilities::MPI::ProcessGrid::send_to_inactive<double>(double *,
266 const int) const;
267template void
268Utilities::MPI::ProcessGrid::send_to_inactive<float>(float *, const int) const;
269template void
270Utilities::MPI::ProcessGrid::send_to_inactive<int>(int *, const int) const;
271
273
274#endif // DEAL_II_WITH_SCALAPACK
ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
MPI_Comm mpi_communicator_inactive_with_root
void send_to_inactive(NumberType *value, const int count=1) const
const unsigned int n_mpi_processes
const unsigned int this_mpi_process
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:40
constexpr bool running_in_debug_mode()
Definition config.h:78
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:41
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
@ process_grid_constructor
ProcessGrid::ProcessGrid.
Definition mpi_tags.h:124
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1634
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:161
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)