Reference documentation for deal.II version GIT 7b2de2f2f9 2023-09-24 11:00:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi_noncontiguous_partitioner.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
18 #include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
19 
21 
22 #include <boost/serialization/utility.hpp>
23 
24 
26 
27 namespace Utilities
28 {
29  namespace MPI
30  {
32  const IndexSet &indexset_has,
33  const IndexSet &indexset_want,
34  const MPI_Comm communicator)
35  {
36  this->reinit(indexset_has, indexset_want, communicator);
37  }
38 
39 
40 
42  const std::vector<types::global_dof_index> &indices_has,
43  const std::vector<types::global_dof_index> &indices_want,
44  const MPI_Comm communicator)
45  {
46  this->reinit(indices_has, indices_want, communicator);
47  }
48 
49 
50 
51  std::pair<unsigned int, unsigned int>
53  {
54  return {send_ranks.size(), recv_ranks.size()};
55  }
56 
57 
58 
59  unsigned int
61  {
62  return send_ptr.back();
63  }
64 
65 
66 
69  {
78  }
79 
80 
81 
82  MPI_Comm
84  {
85  return communicator;
86  }
87 
88 
89 
90  void
92  const IndexSet &indexset_want,
93  const MPI_Comm communicator)
94  {
95  this->communicator = communicator;
96 
97  // clean up
98  send_ranks.clear();
99  send_ptr.clear();
100  send_indices.clear();
101  recv_ranks.clear();
102  recv_ptr.clear();
103  recv_indices.clear();
104  buffers.clear();
105  requests.clear();
106 
107  // set up communication pattern
108  std::vector<unsigned int> owning_ranks_of_ghosts(
109  indexset_want.n_elements());
110 
111  // set up dictionary
113  process(indexset_has,
114  indexset_want,
115  communicator,
116  owning_ranks_of_ghosts,
117  true);
118 
120  std::vector<
121  std::pair<types::global_dof_index, types::global_dof_index>>,
122  std::vector<unsigned int>>
123  consensus_algorithm;
124  consensus_algorithm.run(process, communicator);
125 
126  // set up map of processes from where this rank will receive values
127  {
128  std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
129 
130  for (const auto &owner : owning_ranks_of_ghosts)
131  recv_map[owner] = std::vector<types::global_dof_index>();
132 
133  for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
134  i++)
135  recv_map[owning_ranks_of_ghosts[i]].push_back(i);
136 
137  recv_ptr.push_back(recv_indices.size() /*=0*/);
138  for (const auto &target_with_indexset : recv_map)
139  {
140  recv_ranks.push_back(target_with_indexset.first);
141 
142  for (const auto cell_index : target_with_indexset.second)
143  recv_indices.push_back(cell_index);
144 
145  recv_ptr.push_back(recv_indices.size());
146  }
147  }
148 
149  {
150  const auto targets_with_indexset = process.get_requesters();
151 
152  send_ptr.push_back(recv_ptr.back());
153  for (const auto &target_with_indexset : targets_with_indexset)
154  {
155  send_ranks.push_back(target_with_indexset.first);
156 
157  for (const auto cell_index : target_with_indexset.second)
158  send_indices.push_back(indexset_has.index_within_set(cell_index));
159 
160  send_ptr.push_back(send_indices.size() + recv_ptr.back());
161  }
162  }
163  }
164 
165 
166 
167  void
169  const std::vector<types::global_dof_index> &indices_has,
170  const std::vector<types::global_dof_index> &indices_want,
171  const MPI_Comm communicator)
172  {
173  // step 0) clean vectors from numbers::invalid_dof_index (indicating
174  // padding)
175  std::vector<types::global_dof_index> indices_has_clean;
176  indices_has_clean.reserve(indices_has.size());
177 
178  for (const auto i : indices_has)
180  indices_has_clean.push_back(i);
181 
182  std::vector<types::global_dof_index> indices_want_clean;
183  indices_want_clean.reserve(indices_want.size());
184 
185  for (const auto i : indices_want)
187  indices_want_clean.push_back(i);
188 
189  // step 0) determine "number of degrees of freedom" needed for IndexSet
190  const types::global_dof_index local_n_dofs_has =
191  indices_has_clean.empty() ?
192  0 :
193  (*std::max_element(indices_has_clean.begin(),
194  indices_has_clean.end()) +
195  1);
196 
197  const types::global_dof_index local_n_dofs_want =
198  indices_want_clean.empty() ?
199  0 :
200  (*std::max_element(indices_want_clean.begin(),
201  indices_want_clean.end()) +
202  1);
203 
204  const types::global_dof_index n_dofs =
205  Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
206  communicator);
207 
208  // step 1) convert vectors to indexsets (sorted!)
209  IndexSet index_set_has(n_dofs);
210  index_set_has.add_indices(indices_has_clean.begin(),
211  indices_has_clean.end());
212 
213  IndexSet index_set_want(n_dofs);
214  index_set_want.add_indices(indices_want_clean.begin(),
215  indices_want_clean.end());
216 
217  // step 2) set up internal data structures with indexset
218  this->reinit(index_set_has, index_set_want, communicator);
219 
220  // step 3) fix inner data structures so that it is sorted as
221  // in the original vector
222  {
223  std::vector<types::global_dof_index> temp_map_send(
224  index_set_has.n_elements());
225 
226  for (types::global_dof_index i = 0; i < indices_has.size(); ++i)
227  if (indices_has[i] != numbers::invalid_dof_index)
228  temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
229 
230  for (auto &i : send_indices)
231  i = temp_map_send[i];
232  }
233 
234  {
235  std::vector<types::global_dof_index> temp_map_recv(
236  index_set_want.n_elements());
237 
238  for (types::global_dof_index i = 0; i < indices_want.size(); ++i)
239  if (indices_want[i] != numbers::invalid_dof_index)
240  temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
241 
242  for (auto &i : recv_indices)
243  i = temp_map_recv[i];
244  }
245  }
246  } // namespace MPI
247 } // namespace Utilities
248 
249 #include "mpi_noncontiguous_partitioner.inst"
250 
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1921
size_type n_elements() const
Definition: index_set.h:1854
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1751
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) override
std::vector< types::global_dof_index > recv_ptr
std::vector< types::global_dof_index > send_ptr
std::vector< types::global_dof_index > recv_indices
std::pair< unsigned int, unsigned int > n_targets() const
std::vector< types::global_dof_index > send_indices
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
unsigned int cell_index
Definition: grid_tools.cc:1192
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::global_dof_index invalid_dof_index
Definition: types.h:233
unsigned int global_dof_index
Definition: types.h:82