Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
particle_handler.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2017 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18
20
22
23#include <limits>
24#include <memory>
25#include <utility>
26
28
29namespace Particles
30{
31 namespace
32 {
33 template <int dim, int spacedim>
34 std::vector<char>
35 pack_particles(std::vector<ParticleIterator<dim, spacedim>> &particles)
36 {
37 std::vector<char> buffer;
38
39 if (particles.size() == 0)
40 return buffer;
41
42 buffer.resize(particles.size() *
43 particles.front()->serialized_size_in_bytes());
44 void *current_data = buffer.data();
45
46 for (const auto &particle : particles)
47 {
48 current_data = particle->write_particle_data_to_memory(current_data);
49 }
50
51 return buffer;
52 }
53 } // namespace
54
55
56
57 template <int dim, int spacedim>
60 , mapping()
61 , property_pool(std::make_unique<PropertyPool<dim, spacedim>>(0))
62 , global_number_of_particles(0)
63 , number_of_locally_owned_particles(0)
64 , global_max_particles_per_cell(0)
65 , next_free_particle_index(0)
66 , size_callback()
67 , store_callback()
68 , load_callback()
69 , handle(numbers::invalid_unsigned_int)
70 , tria_listeners()
71 {
73 }
74
75
76
77 template <int dim, int spacedim>
80 const Mapping<dim, spacedim> & mapping,
81 const unsigned int n_properties)
82 : triangulation(&triangulation, typeid(*this).name())
83 , mapping(&mapping, typeid(*this).name())
84 , property_pool(std::make_unique<PropertyPool<dim, spacedim>>(n_properties))
85 , cells_to_particle_cache(triangulation.n_active_cells(), particles.end())
86 , global_number_of_particles(0)
87 , number_of_locally_owned_particles(0)
88 , global_max_particles_per_cell(0)
89 , next_free_particle_index(0)
90 , size_callback()
91 , store_callback()
92 , load_callback()
93 , handle(numbers::invalid_unsigned_int)
94 , triangulation_cache(
95 std::make_unique<GridTools::Cache<dim, spacedim>>(triangulation,
96 mapping))
97 , tria_listeners()
98 {
101 }
102
103
104
105 template <int dim, int spacedim>
107 {
108 clear_particles();
109
110 for (const auto &connection : tria_listeners)
111 connection.disconnect();
112 }
113
114
115
116 template <int dim, int spacedim>
117 void
119 const Triangulation<dim, spacedim> &new_triangulation,
120 const Mapping<dim, spacedim> & new_mapping,
121 const unsigned int n_properties)
122 {
123 clear();
124
125 triangulation = &new_triangulation;
126 mapping = &new_mapping;
127
128 reset_particle_container(particles);
129
130 // Create the memory pool that will store all particle properties
131 property_pool = std::make_unique<PropertyPool<dim, spacedim>>(n_properties);
132
133 // Create the grid cache to cache the information about the triangulation
134 // that is used to locate the particles into subdomains and cells
135 triangulation_cache =
136 std::make_unique<GridTools::Cache<dim, spacedim>>(new_triangulation,
137 new_mapping);
138
139 cells_to_particle_cache.resize(triangulation->n_active_cells(),
140 particles.end());
141
142 connect_to_triangulation_signals();
143 }
144
145
146
147 template <int dim, int spacedim>
148 void
150 const ParticleHandler<dim, spacedim> &particle_handler)
151 {
152 const unsigned int n_properties =
153 particle_handler.property_pool->n_properties_per_slot();
154 initialize(*particle_handler.triangulation,
155 *particle_handler.mapping,
156 n_properties);
157
158 property_pool = std::make_unique<PropertyPool<dim, spacedim>>(
159 *(particle_handler.property_pool));
160
161 // copy static members
162 global_number_of_particles = particle_handler.global_number_of_particles;
163 number_of_locally_owned_particles =
164 particle_handler.number_of_locally_owned_particles;
165
166 global_max_particles_per_cell =
167 particle_handler.global_max_particles_per_cell;
168 next_free_particle_index = particle_handler.next_free_particle_index;
169
170 // Manually copy over the particles because we do not want to touch the
171 // anchor iterators set by initialize()
172 particles.insert(particle_container_owned_end(),
173 particle_handler.particle_container_owned_begin(),
174 particle_handler.particle_container_owned_end());
175 particles.insert(particle_container_ghost_end(),
176 particle_handler.particle_container_ghost_begin(),
177 particle_handler.particle_container_ghost_end());
178
179 for (auto it = particles.begin(); it != particles.end(); ++it)
180 if (!it->particles.empty())
181 cells_to_particle_cache[it->cell->active_cell_index()] = it;
182
183 ghost_particles_cache.ghost_particles_by_domain =
184 particle_handler.ghost_particles_cache.ghost_particles_by_domain;
185 handle = particle_handler.handle;
186 }
187
188
189
190 template <int dim, int spacedim>
191 void
193 {
194 clear_particles();
195 global_number_of_particles = 0;
196 number_of_locally_owned_particles = 0;
197 next_free_particle_index = 0;
198 global_max_particles_per_cell = 0;
199 }
200
201
202
203 template <int dim, int spacedim>
204 void
206 {
207 for (auto &particles_in_cell : particles)
208 for (auto &particle : particles_in_cell.particles)
210 property_pool->deregister_particle(particle);
211
212 cells_to_particle_cache.clear();
213 reset_particle_container(particles);
214 if (triangulation != nullptr)
215 cells_to_particle_cache.resize(triangulation->n_active_cells(),
216 particles.end());
217
218 // the particle properties have already been deleted by their destructor,
219 // but the memory is still allocated. Return the memory as well.
220 property_pool->clear();
221 }
222
223
224
225 template <int dim, int spacedim>
226 void
227 ParticleHandler<dim, spacedim>::reserve(const std::size_t n_particles)
228 {
229 property_pool->reserve(n_particles);
230 }
231
232
233
234 template <int dim, int spacedim>
235 void
237 particle_container &given_particles)
238 {
239 // Make sure to set a valid past-the-end iterator also in case we have no
240 // triangulation
242 past_the_end_iterator =
243 triangulation != nullptr ?
244 triangulation->end() :
245 typename Triangulation<dim, spacedim>::cell_iterator(nullptr, -1, -1);
246
247 given_particles.clear();
248 for (unsigned int i = 0; i < 3; ++i)
249 given_particles.emplace_back(
250 std::vector<typename PropertyPool<dim, spacedim>::Handle>(),
251 past_the_end_iterator);
252
253 // Set the end of owned particles to the middle of the three elements
254 const_cast<typename particle_container::iterator &>(owned_particles_end) =
255 ++given_particles.begin();
256 }
257
258
259
260 template <int dim, int spacedim>
261 void
263 {
264 // first sort the owned particles by the active cell index
265 bool sort_is_necessary = false;
266 {
267 auto previous = particle_container_owned_begin();
268 for (auto next = previous; next != particle_container_owned_end(); ++next)
269 {
270 if (previous->cell.state() == IteratorState::valid &&
271 next->cell.state() == IteratorState::valid &&
272 previous->cell > next->cell)
273 {
274 sort_is_necessary = true;
275 break;
276 }
277 previous = next;
278 }
279 }
280 if (sort_is_necessary)
281 {
282 // we could have tried to call std::list::sort with a custom
283 // comparator to get things sorted, but things are complicated by the
284 // three anchor entries that we do not want to move, and we would
285 // hence pay an O(N log(N)) algorithm with a large constant in front
286 // of it (on the order of 20+ instructions with many
287 // difficult-to-predict branches). Therefore, we simply copy the list
288 // into a new one (keeping alive the possibly large vectors with
289 // particles on cells) into a new container.
290 particle_container sorted_particles;
291
292 // note that this call updates owned_particles_end, so that
293 // particle_container_owned_end() below already points to the
294 // new container
295 reset_particle_container(sorted_particles);
296
297 // iterate over cells and insert the entries in the new order
298 for (const auto &cell : triangulation->active_cell_iterators())
299 if (!cell->is_artificial())
300 if (cells_to_particle_cache[cell->active_cell_index()] !=
301 particles.end())
302 {
303 // before we move the sorted_particles into particles
304 // particle_container_ghost_end() still points to the
305 // old particles container. Therefore this condition looks
306 // quirky.
307 typename particle_container::iterator insert_position =
308 cell->is_locally_owned() ? particle_container_owned_end() :
309 --sorted_particles.end();
310 typename particle_container::iterator new_entry =
311 sorted_particles.insert(
312 insert_position, typename particle_container::value_type());
313 new_entry->cell = cell;
314 new_entry->particles =
315 std::move(cells_to_particle_cache[cell->active_cell_index()]
316 ->particles);
317 }
318 particles = std::move(sorted_particles);
319
320 // refresh cells_to_particle_cache
321 cells_to_particle_cache.clear();
322 cells_to_particle_cache.resize(triangulation->n_active_cells(),
323 particles.end());
324 for (auto it = particles.begin(); it != particles.end(); ++it)
325 if (!it->particles.empty())
326 cells_to_particle_cache[it->cell->active_cell_index()] = it;
327 }
328
329 // Ensure that we did not accidentally modify the anchor entries with
330 // special purpose.
331 Assert(particles.front().cell.state() == IteratorState::past_the_end &&
332 particles.front().particles.empty() &&
333 particles.back().cell.state() == IteratorState::past_the_end &&
334 particles.back().particles.empty() &&
335 owned_particles_end->cell.state() == IteratorState::past_the_end &&
336 owned_particles_end->particles.empty(),
338
339#ifdef DEBUG
340 // check that no cache element hits the three anchor states in the list of
341 // particles
342 for (const auto &it : cells_to_particle_cache)
343 Assert(it != particles.begin() && it != owned_particles_end &&
344 it != --(particles.end()),
346
347 // check that we only have locally owned particles in the first region of
348 // cells; note that we skip the very first anchor element
349 for (auto it = particle_container_owned_begin();
350 it != particle_container_owned_end();
351 ++it)
352 Assert(it->cell->is_locally_owned(), ExcInternalError());
353
354 // check that the cache is consistent with the iterators
355 std::vector<typename particle_container::iterator> verify_cache(
356 triangulation->n_active_cells(), particles.end());
357 for (auto it = particles.begin(); it != particles.end(); ++it)
358 if (!it->particles.empty())
359 verify_cache[it->cell->active_cell_index()] = it;
360
361 for (unsigned int i = 0; i < verify_cache.size(); ++i)
362 Assert(verify_cache[i] == cells_to_particle_cache[i], ExcInternalError());
363#endif
364
365 // now compute local result with the function above and then compute the
366 // collective results
367 number_of_locally_owned_particles = 0;
368
369 types::particle_index result[2] = {};
370 for (const auto &particles_in_cell : particles)
371 {
372 const types::particle_index n_particles_in_cell =
373 particles_in_cell.particles.size();
374
375 // local_max_particles_per_cell
376 result[0] = std::max(result[0], n_particles_in_cell);
377
378 // number of locally owned particles
379 if (n_particles_in_cell > 0 &&
380 particles_in_cell.cell->is_locally_owned())
381 number_of_locally_owned_particles += n_particles_in_cell;
382
383 // local_max_particle_index
384 for (const auto &particle : particles_in_cell.particles)
385 result[1] = std::max(result[1], property_pool->get_id(particle));
386 }
387
388 global_number_of_particles =
389 ::Utilities::MPI::sum(number_of_locally_owned_particles,
390 triangulation->get_communicator());
391
392 if (global_number_of_particles == 0)
393 {
394 next_free_particle_index = 0;
395 global_max_particles_per_cell = 0;
396 }
397 else
398 {
399 Utilities::MPI::max(result, triangulation->get_communicator(), result);
400
401 next_free_particle_index = result[1] + 1;
402 global_max_particles_per_cell = result[0];
403 }
404 }
405
406
407
408 template <int dim, int spacedim>
412 const
413 {
414 if (cells_to_particle_cache.size() == 0)
415 return 0;
416
417 if (cell->is_artificial() == false)
418 {
419 return cells_to_particle_cache[cell->active_cell_index()] !=
420 particles.end() ?
421 cells_to_particle_cache[cell->active_cell_index()]
422 ->particles.size() :
423 0;
424 }
425 else
426 AssertThrow(false,
427 ExcMessage("You can't ask for the particles on an artificial "
428 "cell since we don't know what exists on these "
429 "kinds of cells."));
430
432 }
433
434
435
436 template <int dim, int spacedim>
440 const
441 {
442 return (const_cast<ParticleHandler<dim, spacedim> *>(this))
443 ->particles_in_cell(cell);
444 }
445
446
447
448 template <int dim, int spacedim>
452 {
453 const unsigned int active_cell_index = cell->active_cell_index();
454
455 if (cell->is_artificial() == false)
456 {
457 if (cells_to_particle_cache[active_cell_index] == particles.end())
458 {
459 return boost::make_iterator_range(
460 particle_iterator(particles.begin(), *property_pool, 0),
461 particle_iterator(particles.begin(), *property_pool, 0));
462 }
463 else
464 {
465 const typename particle_container::iterator
466 particles_in_current_cell =
467 cells_to_particle_cache[active_cell_index];
468 typename particle_container::iterator particles_in_next_cell =
469 particles_in_current_cell;
470 ++particles_in_next_cell;
471 return boost::make_iterator_range(
472 particle_iterator(particles_in_current_cell, *property_pool, 0),
473 particle_iterator(particles_in_next_cell, *property_pool, 0));
474 }
475 }
476 else
477 AssertThrow(false,
478 ExcMessage("You can't ask for the particles on an artificial "
479 "cell since we don't know what exists on these "
480 "kinds of cells."));
481
482 return {};
483 }
484
485
486
487 template <int dim, int spacedim>
488 void
491 {
492 auto &particles_on_cell = particle->particles_in_cell->particles;
493
494 // if the particle has an invalid handle (e.g. because it has
495 // been duplicated before calling this function) do not try
496 // to deallocate its memory again
497 auto handle = particle->get_handle();
499 property_pool->deregister_particle(handle);
500
501 // need to reduce the cached number before deleting, because the iterator
502 // may be invalid after removing the particle even if only
503 // accessing the cell
504 const auto cell = particle->get_surrounding_cell();
505 const bool owned_cell = cell->is_locally_owned();
506 if (owned_cell)
507 --number_of_locally_owned_particles;
508
509 if (particles_on_cell.size() > 1)
510 {
511 particles_on_cell[particle->particle_index_within_cell] =
512 std::move(particles_on_cell.back());
513 particles_on_cell.resize(particles_on_cell.size() - 1);
514 }
515 else
516 {
517 particles.erase(particle->particles_in_cell);
518 cells_to_particle_cache[cell->active_cell_index()] = particles.end();
519 }
520 }
521
522
523
524 template <int dim, int spacedim>
525 void
528 &particles_to_remove)
529 {
530 // We need to remove particles backwards on each cell to keep the particle
531 // iterators alive as we keep removing particles on the same cell. To
532 // ensure that this is safe, we either check if we already have sorted
533 // iterators or if we need to manually sort
534 const auto check_greater = [](const particle_iterator &a,
535 const particle_iterator &b) {
536 return a->particles_in_cell->cell > b->particles_in_cell->cell ||
537 (a->particles_in_cell->cell == b->particles_in_cell->cell &&
538 a->particle_index_within_cell > b->particle_index_within_cell);
539 };
540
541 bool particles_are_sorted = true;
542 auto previous = particles_to_remove.begin();
543 for (auto next = previous; next != particles_to_remove.end(); ++next)
544 {
545 if (check_greater(*previous, *next))
546 {
547 particles_are_sorted = false;
548 break;
549 }
550 previous = next;
551 }
552 if (particles_are_sorted)
553 {
554 // pass along backwards in array
555 for (auto it = particles_to_remove.rbegin();
556 it != particles_to_remove.rend();
557 ++it)
558 remove_particle(*it);
559 }
560 else
561 {
562 std::vector<ParticleHandler<dim, spacedim>::particle_iterator>
563 sorted_particles(particles_to_remove);
564 std::sort(sorted_particles.begin(),
565 sorted_particles.end(),
566 check_greater);
567
568 for (const auto &particle : sorted_particles)
569 remove_particle(particle);
570 }
571
572 update_cached_numbers();
573 }
574
575
576
577 template <int dim, int spacedim>
580 const Particle<dim, spacedim> & particle,
582 {
583 return insert_particle(particle.get_location(),
584 particle.get_reference_location(),
585 particle.get_id(),
586 cell,
587 particle.get_properties());
588 }
589
590
591
592 template <int dim, int spacedim>
595 const typename PropertyPool<dim, spacedim>::Handle handle,
597 {
598 const unsigned int active_cell_index = cell->active_cell_index();
599 typename particle_container::iterator &cache =
600 cells_to_particle_cache[active_cell_index];
601 if (cache == particles.end())
602 {
603 const typename particle_container::iterator insert_position =
604 cell->is_locally_owned() ? particle_container_owned_end() :
605 particle_container_ghost_end();
606 cache = particles.emplace(
607 insert_position,
608 std::vector<typename PropertyPool<dim, spacedim>::Handle>{handle},
609 cell);
610 }
611 else
612 {
613 cache->particles.push_back(handle);
614 Assert(cache->cell == cell, ExcInternalError());
615 }
616 return particle_iterator(cache,
617 *property_pool,
618 cache->particles.size() - 1);
619 }
620
621
622
623 template <int dim, int spacedim>
626 const void *& data,
628 {
630 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
632 Assert(cell->is_locally_owned(),
633 ExcMessage("You tried to insert particles into a cell that is not "
634 "locally owned. This is not supported."));
635
636 particle_iterator particle_it =
637 insert_particle(property_pool->register_particle(), cell);
638
639 data = particle_it->read_particle_data_from_memory(data);
640
641 ++number_of_locally_owned_particles;
642
643 return particle_it;
644 }
645
646
647
648 template <int dim, int spacedim>
651 const Point<spacedim> & position,
652 const Point<dim> & reference_position,
653 const types::particle_index particle_index,
655 const ArrayView<const double> &properties)
656 {
658 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
661 Assert(cell->is_locally_owned(),
662 ExcMessage("You tried to insert particles into a cell that is not "
663 "locally owned. This is not supported."));
664
665 particle_iterator particle_it =
666 insert_particle(property_pool->register_particle(), cell);
667
668 particle_it->set_location(position);
669 particle_it->set_reference_location(reference_position);
670 particle_it->set_id(particle_index);
671
672 if (properties.size() != 0)
673 particle_it->set_properties(properties);
674
675 ++number_of_locally_owned_particles;
676
677 return particle_it;
678 }
679
680
681
682 template <int dim, int spacedim>
683 void
685 const std::multimap<
687 Particle<dim, spacedim>> &new_particles)
688 {
689 reserve(n_locally_owned_particles() + new_particles.size());
690 for (const auto &cell_and_particle : new_particles)
691 insert_particle(cell_and_particle.second, cell_and_particle.first);
692
693 update_cached_numbers();
694 }
695
696
697
698 template <int dim, int spacedim>
699 void
701 const std::vector<Point<spacedim>> &positions)
702 {
704
705 update_cached_numbers();
706 reserve(n_locally_owned_particles() + positions.size());
707
708 // Determine the starting particle index of this process, which
709 // is the highest currently existing particle index plus the sum
710 // of the number of newly generated particles of all
711 // processes with a lower rank if in a parallel computation.
712 const types::particle_index local_next_particle_index =
713 get_next_free_particle_index();
714 types::particle_index local_start_index = 0;
715
716#ifdef DEAL_II_WITH_MPI
717 if (const auto parallel_triangulation =
718 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
719 &*triangulation))
720 {
721 types::particle_index particles_to_add_locally = positions.size();
722 const int ierr = MPI_Scan(&particles_to_add_locally,
723 &local_start_index,
724 1,
726 MPI_SUM,
727 parallel_triangulation->get_communicator());
728 AssertThrowMPI(ierr);
729 local_start_index -= particles_to_add_locally;
730 }
731#endif
732
733 local_start_index += local_next_particle_index;
734
735 auto point_locations =
737 positions);
738
739 auto &cells = std::get<0>(point_locations);
740 auto &local_positions = std::get<1>(point_locations);
741 auto &index_map = std::get<2>(point_locations);
742 auto &missing_points = std::get<3>(point_locations);
743 // If a point was not found, throwing an error, as the old
744 // implementation of compute_point_locations would have done
745 AssertThrow(missing_points.size() == 0,
747
748 (void)missing_points;
749
750 for (unsigned int i = 0; i < cells.size(); ++i)
751 for (unsigned int p = 0; p < local_positions[i].size(); ++p)
752 insert_particle(positions[index_map[i][p]],
753 local_positions[i][p],
754 local_start_index + index_map[i][p],
755 cells[i]);
756
757 update_cached_numbers();
758 }
759
760
761
762 template <int dim, int spacedim>
763 std::map<unsigned int, IndexSet>
765 const std::vector<Point<spacedim>> &positions,
766 const std::vector<std::vector<BoundingBox<spacedim>>>
767 & global_bounding_boxes,
768 const std::vector<std::vector<double>> & properties,
769 const std::vector<types::particle_index> &ids)
770 {
771 if (!properties.empty())
772 {
773 AssertDimension(properties.size(), positions.size());
774#ifdef DEBUG
775 for (const auto &p : properties)
776 AssertDimension(p.size(), n_properties_per_particle());
777#endif
778 }
779
780 if (!ids.empty())
781 AssertDimension(ids.size(), positions.size());
782
783 const auto comm = triangulation->get_communicator();
784
785 const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm);
786
787 // Compute the global number of properties
788 const auto n_global_properties =
789 Utilities::MPI::sum(properties.size(), comm);
790
791 // Gather the number of points per processor
792 const auto n_particles_per_proc =
793 Utilities::MPI::all_gather(comm, positions.size());
794
795 // Calculate all starting points locally
796 std::vector<unsigned int> particle_start_indices(n_mpi_processes);
797
798 unsigned int particle_start_index = get_next_free_particle_index();
799 for (unsigned int process = 0; process < particle_start_indices.size();
800 ++process)
801 {
802 particle_start_indices[process] = particle_start_index;
803 particle_start_index += n_particles_per_proc[process];
804 }
805
806 // Get all local information
807 const auto cells_positions_and_index_maps =
809 positions,
810 global_bounding_boxes);
811
812 // Unpack the information into several vectors:
813 // All cells that contain at least one particle
814 const auto &local_cells_containing_particles =
815 std::get<0>(cells_positions_and_index_maps);
816
817 // The reference position of every particle in the local part of the
818 // triangulation.
819 const auto &local_reference_positions =
820 std::get<1>(cells_positions_and_index_maps);
821 // The original index in the positions vector for each particle in the
822 // local part of the triangulation
823 const auto &original_indices_of_local_particles =
824 std::get<2>(cells_positions_and_index_maps);
825 // The real spatial position of every particle in the local part of the
826 // triangulation.
827 const auto &local_positions = std::get<3>(cells_positions_and_index_maps);
828 // The MPI process that inserted each particle
829 const auto &calling_process_indices =
830 std::get<4>(cells_positions_and_index_maps);
831
832 // Create the map of cpu to indices, indicating who sent us what particle
833 std::map<unsigned int, std::vector<unsigned int>>
834 original_process_to_local_particle_indices_tmp;
835 for (unsigned int i_cell = 0;
836 i_cell < local_cells_containing_particles.size();
837 ++i_cell)
838 {
839 for (unsigned int i_particle = 0;
840 i_particle < local_positions[i_cell].size();
841 ++i_particle)
842 {
843 const unsigned int local_id_on_calling_process =
844 original_indices_of_local_particles[i_cell][i_particle];
845 const unsigned int calling_process =
846 calling_process_indices[i_cell][i_particle];
847
848 original_process_to_local_particle_indices_tmp[calling_process]
849 .push_back(local_id_on_calling_process);
850 }
851 }
852 std::map<unsigned int, IndexSet> original_process_to_local_particle_indices;
853 for (auto &process_and_particle_indices :
854 original_process_to_local_particle_indices_tmp)
855 {
856 const unsigned int calling_process = process_and_particle_indices.first;
857 original_process_to_local_particle_indices.insert(
858 {calling_process, IndexSet(n_particles_per_proc[calling_process])});
859 std::sort(process_and_particle_indices.second.begin(),
860 process_and_particle_indices.second.end());
861 original_process_to_local_particle_indices[calling_process].add_indices(
862 process_and_particle_indices.second.begin(),
863 process_and_particle_indices.second.end());
864 original_process_to_local_particle_indices[calling_process].compress();
865 }
866
867 // A map from mpi process to properties, ordered as in the IndexSet.
868 // Notice that this ordering may be different from the ordering in the
869 // vectors above, since no local ordering is guaranteed by the
870 // distribute_compute_point_locations() call.
871 // This is only filled if n_global_properties is > 0
872 std::map<unsigned int, std::vector<std::vector<double>>>
873 locally_owned_properties_from_other_processes;
874
875 // A map from mpi process to ids, ordered as in the IndexSet.
876 // Notice that this ordering may be different from the ordering in the
877 // vectors above, since no local ordering is guaranteed by the
878 // distribute_compute_point_locations() call.
879 // This is only filled if ids.size() is > 0
880 std::map<unsigned int, std::vector<types::particle_index>>
881 locally_owned_ids_from_other_processes;
882
883 if (n_global_properties > 0 || !ids.empty())
884 {
885 // Gather whom I sent my own particles to, to decide whom to send
886 // the particle properties or the ids
887 auto send_to_cpu = Utilities::MPI::some_to_some(
888 comm, original_process_to_local_particle_indices);
889
890 // Prepare the vector of properties to send
891 if (n_global_properties > 0)
892 {
893 std::map<unsigned int, std::vector<std::vector<double>>>
894 non_locally_owned_properties;
895
896 for (const auto &it : send_to_cpu)
897 {
898 std::vector<std::vector<double>> properties_to_send(
899 it.second.n_elements(),
900 std::vector<double>(n_properties_per_particle()));
901 unsigned int index = 0;
902 for (const auto el : it.second)
903 properties_to_send[index++] = properties[el];
904 non_locally_owned_properties.insert(
905 {it.first, properties_to_send});
906 }
907
908 // Send the non locally owned properties to each mpi process
909 // that needs them
910 locally_owned_properties_from_other_processes =
911 Utilities::MPI::some_to_some(comm, non_locally_owned_properties);
912
914 locally_owned_properties_from_other_processes.size(),
915 original_process_to_local_particle_indices.size());
916 }
917
918 if (!ids.empty())
919 {
920 std::map<unsigned int, std::vector<types::particle_index>>
921 non_locally_owned_ids;
922 for (const auto &it : send_to_cpu)
923 {
924 std::vector<types::particle_index> ids_to_send(
925 it.second.n_elements());
926 unsigned int index = 0;
927 for (const auto el : it.second)
928 ids_to_send[index++] = ids[el];
929 non_locally_owned_ids.insert({it.first, ids_to_send});
930 }
931
932 // Send the non locally owned ids to each mpi process
933 // that needs them
934 locally_owned_ids_from_other_processes =
935 Utilities::MPI::some_to_some(comm, non_locally_owned_ids);
936
937 AssertDimension(locally_owned_ids_from_other_processes.size(),
938 original_process_to_local_particle_indices.size());
939 }
940 }
941
942 // Now fill up the actual particles
943 for (unsigned int i_cell = 0;
944 i_cell < local_cells_containing_particles.size();
945 ++i_cell)
946 {
947 for (unsigned int i_particle = 0;
948 i_particle < local_positions[i_cell].size();
949 ++i_particle)
950 {
951 const unsigned int local_id_on_calling_process =
952 original_indices_of_local_particles[i_cell][i_particle];
953
954 const unsigned int calling_process =
955 calling_process_indices[i_cell][i_particle];
956
957 const unsigned int index_within_set =
958 original_process_to_local_particle_indices[calling_process]
959 .index_within_set(local_id_on_calling_process);
960
961 const unsigned int particle_id =
962 ids.empty() ?
963 local_id_on_calling_process +
964 particle_start_indices[calling_process] :
965 locally_owned_ids_from_other_processes[calling_process]
966 [index_within_set];
967
968 auto particle_it =
969 insert_particle(local_positions[i_cell][i_particle],
970 local_reference_positions[i_cell][i_particle],
971 particle_id,
972 local_cells_containing_particles[i_cell]);
973
974 if (n_global_properties > 0)
975 {
976 particle_it->set_properties(
977 locally_owned_properties_from_other_processes
978 [calling_process][index_within_set]);
979 }
980 }
981 }
982
983 update_cached_numbers();
984
985 return original_process_to_local_particle_indices;
986 }
987
988
989
990 template <int dim, int spacedim>
991 std::map<unsigned int, IndexSet>
993 const std::vector<Particle<dim, spacedim>> &particles,
994 const std::vector<std::vector<BoundingBox<spacedim>>>
995 &global_bounding_boxes)
996 {
997 // Store the positions in a vector of points, the ids in a vector of ids,
998 // and the properties, if any, in a vector of vector of properties.
999 std::vector<Point<spacedim>> positions;
1000 std::vector<std::vector<double>> properties;
1001 std::vector<types::particle_index> ids;
1002 positions.resize(particles.size());
1003 ids.resize(particles.size());
1004 if (n_properties_per_particle() > 0)
1005 properties.resize(particles.size(),
1006 std::vector<double>(n_properties_per_particle()));
1007
1008 unsigned int i = 0;
1009 for (const auto &p : particles)
1010 {
1011 positions[i] = p.get_location();
1012 ids[i] = p.get_id();
1013 if (p.has_properties())
1014 properties[i] = {p.get_properties().begin(),
1015 p.get_properties().end()};
1016 ++i;
1017 }
1018
1019 return insert_global_particles(positions,
1020 global_bounding_boxes,
1021 properties,
1022 ids);
1023 }
1024
1025
1026
1027 template <int dim, int spacedim>
1030 {
1031 return global_number_of_particles;
1032 }
1033
1034
1035
1036 template <int dim, int spacedim>
1039 {
1040 return global_max_particles_per_cell;
1041 }
1042
1043
1044
1045 template <int dim, int spacedim>
1048 {
1049 return number_of_locally_owned_particles;
1050 }
1051
1052
1053
1054 template <int dim, int spacedim>
1055 unsigned int
1057 {
1058 return property_pool->n_properties_per_slot();
1059 }
1060
1061
1062
1063 template <int dim, int spacedim>
1066 {
1067 return next_free_particle_index;
1068 }
1069
1070
1071
1072 template <int dim, int spacedim>
1073 IndexSet
1075 {
1076 IndexSet set(get_next_free_particle_index());
1077 std::vector<types::particle_index> indices;
1078 indices.reserve(n_locally_owned_particles());
1079 for (const auto &p : *this)
1080 indices.push_back(p.get_id());
1081 set.add_indices(indices.begin(), indices.end());
1082 set.compress();
1083 return set;
1084 }
1085
1086
1087
1088 template <int dim, int spacedim>
1091 {
1092 return property_pool->n_slots();
1093 }
1094
1095
1096
1097 template <int dim, int spacedim>
1098 void
1100 std::vector<Point<spacedim>> &positions,
1101 const bool add_to_output_vector)
1102 {
1103 // There should be one point per particle to gather
1104 AssertDimension(positions.size(), n_locally_owned_particles());
1105
1106 unsigned int i = 0;
1107 for (auto it = begin(); it != end(); ++it, ++i)
1108 {
1109 if (add_to_output_vector)
1110 positions[i] = positions[i] + it->get_location();
1111 else
1112 positions[i] = it->get_location();
1113 }
1114 }
1115
1116
1117
1118 template <int dim, int spacedim>
1119 void
1121 const std::vector<Point<spacedim>> &new_positions,
1122 const bool displace_particles)
1123 {
1124 // There should be one point per particle to fix the new position
1125 AssertDimension(new_positions.size(), n_locally_owned_particles());
1126
1127 unsigned int i = 0;
1128 for (auto it = begin(); it != end(); ++it, ++i)
1129 {
1130 if (displace_particles)
1131 it->set_location(it->get_location() + new_positions[i]);
1132 else
1133 it->set_location(new_positions[i]);
1134 }
1135 sort_particles_into_subdomains_and_cells();
1136 }
1137
1138
1139
1140 template <int dim, int spacedim>
1141 void
1143 const Function<spacedim> &function,
1144 const bool displace_particles)
1145 {
1146 // The function should have sufficient components to displace the
1147 // particles
1148 AssertDimension(function.n_components, spacedim);
1149
1150 Vector<double> new_position(spacedim);
1151 for (auto &particle : *this)
1152 {
1153 Point<spacedim> particle_location = particle.get_location();
1154 function.vector_value(particle_location, new_position);
1155 if (displace_particles)
1156 for (unsigned int d = 0; d < spacedim; ++d)
1157 particle_location[d] += new_position[d];
1158 else
1159 for (unsigned int d = 0; d < spacedim; ++d)
1160 particle_location[d] = new_position[d];
1161 particle.set_location(particle_location);
1162 }
1163 sort_particles_into_subdomains_and_cells();
1164 }
1165
1166
1167
1168 template <int dim, int spacedim>
1171 {
1172 return *property_pool;
1173 }
1174
1175
1176
1177 namespace
1178 {
1188 template <int dim>
1189 bool
1190 compare_particle_association(
1191 const unsigned int a,
1192 const unsigned int b,
1193 const Tensor<1, dim> & particle_direction,
1194 const std::vector<Tensor<1, dim>> &center_directions)
1195 {
1196 const double scalar_product_a = center_directions[a] * particle_direction;
1197 const double scalar_product_b = center_directions[b] * particle_direction;
1198
1199 // The function is supposed to return if a is before b. We are looking
1200 // for the alignment of particle direction and center direction,
1201 // therefore return if the scalar product of a is larger.
1202 return (scalar_product_a > scalar_product_b);
1203 }
1204 } // namespace
1205
1206
1207
1208 template <int dim, int spacedim>
1209 void
1211 {
1212 Assert(triangulation != nullptr, ExcInternalError());
1213 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
1215
1216 // TODO: The current algorithm only works for particles that are in
1217 // the local domain or in ghost cells, because it only knows the
1218 // subdomain_id of ghost cells, but not of artificial cells. This
1219 // can be extended using the distributed version of compute point
1220 // locations.
1221 // TODO: Extend this function to allow keeping particles on other
1222 // processes around (with an invalid cell).
1223
1224 std::vector<particle_iterator> particles_out_of_cell;
1225
1226 // Reserve some space for particles that need sorting to avoid frequent
1227 // re-allocation. Guess 25% of particles need sorting. Balance memory
1228 // overhead and performance.
1229 particles_out_of_cell.reserve(n_locally_owned_particles() / 4);
1230
1231 // Now update the reference locations of the moved particles
1232 std::vector<Point<spacedim>> real_locations;
1233 std::vector<Point<dim>> reference_locations;
1234 real_locations.reserve(global_max_particles_per_cell);
1235 reference_locations.reserve(global_max_particles_per_cell);
1236
1237 for (const auto &cell : triangulation->active_cell_iterators())
1238 {
1239 // Particles can be inserted into arbitrary cells, e.g. if their cell is
1240 // not known. However, for artificial cells we can not evaluate
1241 // the reference position of particles. Do not sort particles that are
1242 // not locally owned, because they will be sorted by the process that
1243 // owns them.
1244 if (cell->is_locally_owned() == false)
1245 {
1246 continue;
1247 }
1248
1249 const unsigned int n_pic = n_particles_in_cell(cell);
1250 auto pic = particles_in_cell(cell);
1251
1252 real_locations.clear();
1253 for (const auto &particle : pic)
1254 real_locations.push_back(particle.get_location());
1255
1256 reference_locations.resize(n_pic);
1257 mapping->transform_points_real_to_unit_cell(cell,
1258 real_locations,
1259 reference_locations);
1260
1261 auto particle = pic.begin();
1262 for (const auto &p_unit : reference_locations)
1263 {
1264 if (numbers::is_finite(p_unit[0]) &&
1266 particle->set_reference_location(p_unit);
1267 else
1268 particles_out_of_cell.push_back(particle);
1269
1270 ++particle;
1271 }
1272 }
1273
1274 // There are three reasons why a particle is not in its old cell:
1275 // It moved to another cell, to another subdomain or it left the mesh.
1276 // Particles that moved to another cell are updated and moved inside the
1277 // particles vector, particles that moved to another domain are
1278 // collected in the moved_particles_domain vector. Particles that left
1279 // the mesh completely are ignored and removed.
1280 std::map<types::subdomain_id, std::vector<particle_iterator>>
1281 moved_particles;
1282 std::map<
1284 std::vector<typename Triangulation<dim, spacedim>::active_cell_iterator>>
1285 moved_cells;
1286
1287 // We do not know exactly how many particles are lost, exchanged between
1288 // domains, or remain on this process. Therefore we pre-allocate
1289 // approximate sizes for these vectors. If more space is needed an
1290 // automatic and relatively fast (compared to other parts of this
1291 // algorithm) re-allocation will happen.
1292 using vector_size = typename std::vector<particle_iterator>::size_type;
1293
1294 std::set<types::subdomain_id> ghost_owners;
1295 if (const auto parallel_triangulation =
1296 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1297 &*triangulation))
1298 ghost_owners = parallel_triangulation->ghost_owners();
1299
1300 // Reserve some space for particles that need communication to avoid
1301 // frequent re-allocation. Guess 25% of particles out of their old cell need
1302 // communication. Balance memory overhead and performance.
1303 for (const auto &ghost_owner : ghost_owners)
1304 moved_particles[ghost_owner].reserve(particles_out_of_cell.size() / 4);
1305 for (const auto &ghost_owner : ghost_owners)
1306 moved_cells[ghost_owner].reserve(particles_out_of_cell.size() / 4);
1307
1308 {
1309 // Create a map from vertices to adjacent cells using grid cache
1310 const std::vector<
1311 std::set<typename Triangulation<dim, spacedim>::active_cell_iterator>>
1312 &vertex_to_cells = triangulation_cache->get_vertex_to_cell_map();
1313
1314 // Create a corresponding map of vectors from vertex to cell center
1315 // using grid cache
1316 const std::vector<std::vector<Tensor<1, spacedim>>>
1317 &vertex_to_cell_centers =
1318 triangulation_cache->get_vertex_to_cell_centers_directions();
1319
1320 std::vector<unsigned int> neighbor_permutation;
1321
1322 // Reuse these vectors below, but only with a single element.
1323 // Avoid resizing for every particle.
1324 Point<dim> invalid_reference_point;
1325 Point<spacedim> invalid_point;
1326 invalid_reference_point[0] = std::numeric_limits<double>::infinity();
1327 invalid_point[0] = std::numeric_limits<double>::infinity();
1328 reference_locations.resize(1, invalid_reference_point);
1329 real_locations.resize(1, invalid_point);
1330
1331 // Find the cells that the particles moved to.
1332 for (auto &out_particle : particles_out_of_cell)
1333 {
1334 // make a copy of the current cell, since we will modify the
1335 // variable current_cell below, but we need the original in
1336 // the case the particle is not found
1337 auto current_cell = out_particle->get_surrounding_cell();
1338
1339 real_locations[0] = out_particle->get_location();
1340
1341 // Record if the new cell was found
1342 bool found_cell = false;
1343
1344 // Check if the particle is in one of the old cell's neighbors
1345 // that are adjacent to the closest vertex
1346 const unsigned int closest_vertex =
1347 GridTools::find_closest_vertex_of_cell<dim, spacedim>(
1348 current_cell, out_particle->get_location(), *mapping);
1349 Tensor<1, spacedim> vertex_to_particle =
1350 out_particle->get_location() - current_cell->vertex(closest_vertex);
1351 vertex_to_particle /= vertex_to_particle.norm();
1352
1353 const unsigned int closest_vertex_index =
1354 current_cell->vertex_index(closest_vertex);
1355 const unsigned int n_neighbor_cells =
1356 vertex_to_cells[closest_vertex_index].size();
1357
1358 neighbor_permutation.resize(n_neighbor_cells);
1359 for (unsigned int i = 0; i < n_neighbor_cells; ++i)
1360 neighbor_permutation[i] = i;
1361
1362 const auto &cell_centers =
1363 vertex_to_cell_centers[closest_vertex_index];
1364 std::sort(neighbor_permutation.begin(),
1365 neighbor_permutation.end(),
1366 [&vertex_to_particle, &cell_centers](const unsigned int a,
1367 const unsigned int b) {
1368 return compare_particle_association(a,
1369 b,
1370 vertex_to_particle,
1371 cell_centers);
1372 });
1373
1374 // Search all of the cells adjacent to the closest vertex of the
1375 // previous cell. Most likely we will find the particle in them.
1376 for (unsigned int i = 0; i < n_neighbor_cells; ++i)
1377 {
1378 typename std::set<typename Triangulation<dim, spacedim>::
1379 active_cell_iterator>::const_iterator cell =
1380 vertex_to_cells[closest_vertex_index].begin();
1381
1382 std::advance(cell, neighbor_permutation[i]);
1383 mapping->transform_points_real_to_unit_cell(*cell,
1384 real_locations,
1385 reference_locations);
1386
1388 reference_locations[0]))
1389 {
1390 current_cell = *cell;
1391 found_cell = true;
1392 break;
1393 }
1394 }
1395
1396 if (!found_cell)
1397 {
1398 // For some clang-based compilers and boost versions the call to
1399 // RTree::query doesn't compile. We use a slower implementation as
1400 // workaround.
1401 // This is fixed in boost in
1402 // https://github.com/boostorg/numeric_conversion/commit/50a1eae942effb0a9b90724323ef8f2a67e7984a
1403#if defined(DEAL_II_WITH_BOOST_BUNDLED) || \
1404 !(defined(__clang_major__) && __clang_major__ >= 16) || \
1405 BOOST_VERSION >= 108100
1406 // The particle is not in a neighbor of the old cell.
1407 // Look for the new cell in the whole local domain.
1408 // This case is rare.
1409 std::vector<std::pair<Point<spacedim>, unsigned int>>
1410 closest_vertex_in_domain;
1411 triangulation_cache->get_used_vertices_rtree().query(
1412 boost::geometry::index::nearest(out_particle->get_location(),
1413 1),
1414 std::back_inserter(closest_vertex_in_domain));
1415
1416 // We should have one and only one result
1417 AssertDimension(closest_vertex_in_domain.size(), 1);
1418 const unsigned int closest_vertex_index_in_domain =
1419 closest_vertex_in_domain[0].second;
1420#else
1421 const unsigned int closest_vertex_index_in_domain =
1424 out_particle->get_location());
1425#endif
1426
1427 // Search all of the cells adjacent to the closest vertex of the
1428 // domain. Most likely we will find the particle in them.
1429 for (const auto &cell :
1430 vertex_to_cells[closest_vertex_index_in_domain])
1431 {
1432 mapping->transform_points_real_to_unit_cell(
1433 cell, real_locations, reference_locations);
1434
1436 reference_locations[0]))
1437 {
1438 current_cell = cell;
1439 found_cell = true;
1440 break;
1441 }
1442 }
1443 }
1444
1445 if (!found_cell)
1446 {
1447 // We can find no cell for this particle. It has left the
1448 // domain due to an integration error or an open boundary.
1449 // Signal the loss and move on.
1450 signals.particle_lost(out_particle,
1451 out_particle->get_surrounding_cell());
1452 continue;
1453 }
1454
1455 // If we are here, we found a cell and reference position for this
1456 // particle
1457 out_particle->set_reference_location(reference_locations[0]);
1458
1459 // Reinsert the particle into our domain if we own its cell.
1460 // Mark it for MPI transfer otherwise
1461 if (current_cell->is_locally_owned())
1462 {
1464 out_particle->particles_in_cell
1465 ->particles[out_particle->particle_index_within_cell];
1466
1467 // Avoid deallocating the memory of this particle
1468 const auto old_value = old;
1470
1471 // Allocate particle with the old handle
1472 insert_particle(old_value, current_cell);
1473 }
1474 else
1475 {
1476 moved_particles[current_cell->subdomain_id()].push_back(
1477 out_particle);
1478 moved_cells[current_cell->subdomain_id()].push_back(current_cell);
1479 }
1480 }
1481 }
1482
1483 // Exchange particles between processors if we have more than one process
1484#ifdef DEAL_II_WITH_MPI
1485 if (const auto parallel_triangulation =
1486 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1487 &*triangulation))
1488 {
1490 parallel_triangulation->get_communicator()) > 1)
1491 send_recv_particles(moved_particles, moved_cells);
1492 }
1493#endif
1494
1495 // remove_particles also calls update_cached_numbers()
1496 remove_particles(particles_out_of_cell);
1497
1498 // now make sure particle data is sorted in order of iteration
1499 std::vector<typename PropertyPool<dim, spacedim>::Handle> unsorted_handles;
1500 unsorted_handles.reserve(property_pool->n_registered_slots());
1501
1502 typename PropertyPool<dim, spacedim>::Handle sorted_handle = 0;
1503 for (auto &particles_in_cell : particles)
1504 for (auto &particle : particles_in_cell.particles)
1505 {
1506 unsorted_handles.push_back(particle);
1507 particle = sorted_handle++;
1508 }
1509
1510 property_pool->sort_memory_slots(unsorted_handles);
1511
1512 } // namespace Particles
1513
1514
1515
1516 template <int dim, int spacedim>
1517 void
1519 const bool enable_cache)
1520 {
1521 // Nothing to do in serial computations
1522 const auto parallel_triangulation =
1523 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1524 &*triangulation);
1525 if (parallel_triangulation != nullptr)
1526 {
1528 parallel_triangulation->get_communicator()) == 1)
1529 return;
1530 }
1531 else
1532 return;
1533
1534#ifndef DEAL_II_WITH_MPI
1535 (void)enable_cache;
1536#else
1537 // Clear ghost particles and their properties
1538 for (const auto &cell : triangulation->active_cell_iterators())
1539 if (cell->is_ghost() &&
1540 cells_to_particle_cache[cell->active_cell_index()] != particles.end())
1541 {
1542 Assert(cells_to_particle_cache[cell->active_cell_index()]->cell ==
1543 cell,
1545 // Clear particle properties
1546 for (auto &ghost_particle :
1547 cells_to_particle_cache[cell->active_cell_index()]->particles)
1548 property_pool->deregister_particle(ghost_particle);
1549
1550 // Clear particles themselves
1551 particles.erase(cells_to_particle_cache[cell->active_cell_index()]);
1552 cells_to_particle_cache[cell->active_cell_index()] = particles.end();
1553 }
1554
1555 // Clear ghost particles cache and invalidate it
1556 ghost_particles_cache.ghost_particles_by_domain.clear();
1557 ghost_particles_cache.valid = false;
1558
1559 // In the case of a parallel simulation with periodic boundary conditions
1560 // the vertices associated with periodic boundaries are not directly
1561 // connected to the ghost cells but they are connected to the ghost cells
1562 // through their coinciding vertices. We gather this information using the
1563 // vertices_with_ghost_neighbors map
1564 const std::map<unsigned int, std::set<types::subdomain_id>>
1566 triangulation_cache->get_vertices_with_ghost_neighbors();
1567
1568 const std::set<types::subdomain_id> ghost_owners =
1569 parallel_triangulation->ghost_owners();
1570 for (const auto ghost_owner : ghost_owners)
1571 ghost_particles_cache.ghost_particles_by_domain[ghost_owner].reserve(
1572 n_locally_owned_particles() / 4);
1573
1574 const std::vector<std::set<unsigned int>> vertex_to_neighbor_subdomain =
1575 triangulation_cache->get_vertex_to_neighbor_subdomain();
1576
1577 for (const auto &cell : triangulation->active_cell_iterators())
1578 {
1579 if (cell->is_locally_owned())
1580 {
1581 std::set<unsigned int> cell_to_neighbor_subdomain;
1582 for (const unsigned int v : cell->vertex_indices())
1583 {
1584 const auto vertex_ghost_neighbors =
1585 vertices_with_ghost_neighbors.find(cell->vertex_index(v));
1586 if (vertex_ghost_neighbors !=
1588 {
1589 cell_to_neighbor_subdomain.insert(
1590 vertex_ghost_neighbors->second.begin(),
1591 vertex_ghost_neighbors->second.end());
1592 }
1593 }
1594
1595 if (cell_to_neighbor_subdomain.size() > 0)
1596 {
1597 const particle_iterator_range particle_range =
1598 particles_in_cell(cell);
1599
1600 for (const auto domain : cell_to_neighbor_subdomain)
1601 {
1602 for (typename particle_iterator_range::iterator particle =
1603 particle_range.begin();
1604 particle != particle_range.end();
1605 ++particle)
1606 ghost_particles_cache.ghost_particles_by_domain[domain]
1607 .push_back(particle);
1608 }
1609 }
1610 }
1611 }
1612
1613 send_recv_particles(
1614 ghost_particles_cache.ghost_particles_by_domain,
1615 std::map<
1617 std::vector<
1619 enable_cache);
1620#endif
1621 }
1622
1623
1624
1625 template <int dim, int spacedim>
1626 void
1628 {
1629 // Nothing to do in serial computations
1630 const auto parallel_triangulation =
1631 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1632 &*triangulation);
1633 if (parallel_triangulation == nullptr ||
1635 parallel_triangulation->get_communicator()) == 1)
1636 {
1637 return;
1638 }
1639
1640
1641#ifdef DEAL_II_WITH_MPI
1642 // First clear the current ghost_particle information
1643 // ghost_particles.clear();
1644 Assert(ghost_particles_cache.valid,
1645 ExcMessage(
1646 "Ghost particles cannot be updated if they first have not been "
1647 "exchanged at least once with the cache enabled"));
1648
1649
1650 send_recv_particles_properties_and_location(
1651 ghost_particles_cache.ghost_particles_by_domain);
1652#endif
1653 }
1654
1655
1656
1657#ifdef DEAL_II_WITH_MPI
1658 template <int dim, int spacedim>
1659 void
1661 const std::map<types::subdomain_id, std::vector<particle_iterator>>
1662 &particles_to_send,
1663 const std::map<
1666 & send_cells,
1667 const bool build_cache)
1668 {
1669 Assert(triangulation != nullptr, ExcInternalError());
1670 Assert(cells_to_particle_cache.size() == triangulation->n_active_cells(),
1672
1673 ghost_particles_cache.valid = build_cache;
1674
1675 const auto parallel_triangulation =
1676 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1677 &*triangulation);
1678 Assert(parallel_triangulation,
1679 ExcMessage("This function is only implemented for "
1680 "parallel::TriangulationBase objects."));
1681
1682 // Determine the communication pattern
1683 const std::set<types::subdomain_id> ghost_owners =
1684 parallel_triangulation->ghost_owners();
1685 const std::vector<types::subdomain_id> neighbors(ghost_owners.begin(),
1686 ghost_owners.end());
1687 const unsigned int n_neighbors = neighbors.size();
1688
1689 if (send_cells.size() != 0)
1690 Assert(particles_to_send.size() == send_cells.size(), ExcInternalError());
1691
1692 // If we do not know the subdomain this particle needs to be send to,
1693 // throw an error
1694 Assert(particles_to_send.find(numbers::artificial_subdomain_id) ==
1695 particles_to_send.end(),
1697
1698 // TODO: Implement the shipping of particles to processes that are not
1699 // ghost owners of the local domain
1700 for (auto send_particles = particles_to_send.begin();
1701 send_particles != particles_to_send.end();
1702 ++send_particles)
1703 Assert(ghost_owners.find(send_particles->first) != ghost_owners.end(),
1705
1706 std::size_t n_send_particles = 0;
1707 for (auto send_particles = particles_to_send.begin();
1708 send_particles != particles_to_send.end();
1709 ++send_particles)
1710 n_send_particles += send_particles->second.size();
1711
1712 const unsigned int cellid_size = sizeof(CellId::binary_type);
1713
1714 // Containers for the amount and offsets of data we will send
1715 // to other processors and the data itself.
1716 std::vector<unsigned int> n_send_data(n_neighbors, 0);
1717 std::vector<unsigned int> send_offsets(n_neighbors, 0);
1718 std::vector<char> send_data;
1719
1720 Particle<dim, spacedim> test_particle;
1721 test_particle.set_property_pool(*property_pool);
1722
1723 const unsigned int individual_particle_data_size =
1724 test_particle.serialized_size_in_bytes() +
1725 (size_callback ? size_callback() : 0);
1726
1727 const unsigned int individual_total_particle_data_size =
1728 individual_particle_data_size + cellid_size;
1729
1730 // Only serialize things if there are particles to be send.
1731 // We can not return early even if no particles
1732 // are send, because we might receive particles from other processes
1733 if (n_send_particles > 0)
1734 {
1735 // Allocate space for sending particle data
1736 send_data.resize(n_send_particles *
1737 individual_total_particle_data_size);
1738
1739 void *data = static_cast<void *>(&send_data.front());
1740
1741 // Serialize the data sorted by receiving process
1742 for (unsigned int i = 0; i < n_neighbors; ++i)
1743 {
1744 send_offsets[i] = reinterpret_cast<std::size_t>(data) -
1745 reinterpret_cast<std::size_t>(&send_data.front());
1746
1747 const unsigned int n_particles_to_send =
1748 particles_to_send.at(neighbors[i]).size();
1749
1750 Assert(static_cast<std::size_t>(n_particles_to_send) *
1751 individual_total_particle_data_size ==
1752 static_cast<std::size_t>(
1753 n_particles_to_send *
1754 individual_total_particle_data_size),
1755 ExcMessage("Overflow when trying to send particle "
1756 "data"));
1757
1758 for (unsigned int j = 0; j < n_particles_to_send; ++j)
1759 {
1760 // If no target cells are given, use the iterator
1761 // information
1763 cell;
1764 if (send_cells.size() == 0)
1765 cell = particles_to_send.at(neighbors[i])[j]
1766 ->get_surrounding_cell();
1767 else
1768 cell = send_cells.at(neighbors[i])[j];
1769
1770 const CellId::binary_type cellid =
1771 cell->id().template to_binary<dim>();
1772 memcpy(data, &cellid, cellid_size);
1773 data = static_cast<char *>(data) + cellid_size;
1774
1775 data = particles_to_send.at(neighbors[i])[j]
1776 ->write_particle_data_to_memory(data);
1777 if (store_callback)
1778 data =
1779 store_callback(particles_to_send.at(neighbors[i])[j], data);
1780 }
1781 n_send_data[i] = n_particles_to_send;
1782 }
1783 }
1784
1785 // Containers for the data we will receive from other processors
1786 std::vector<unsigned int> n_recv_data(n_neighbors);
1787 std::vector<unsigned int> recv_offsets(n_neighbors);
1788
1789 {
1790 const int mpi_tag = Utilities::MPI::internal::Tags::
1792
1793 std::vector<MPI_Request> n_requests(2 * n_neighbors);
1794 for (unsigned int i = 0; i < n_neighbors; ++i)
1795 {
1796 const int ierr = MPI_Irecv(&(n_recv_data[i]),
1797 1,
1798 MPI_UNSIGNED,
1799 neighbors[i],
1800 mpi_tag,
1801 parallel_triangulation->get_communicator(),
1802 &(n_requests[2 * i]));
1803 AssertThrowMPI(ierr);
1804 }
1805 for (unsigned int i = 0; i < n_neighbors; ++i)
1806 {
1807 const int ierr = MPI_Isend(&(n_send_data[i]),
1808 1,
1809 MPI_UNSIGNED,
1810 neighbors[i],
1811 mpi_tag,
1812 parallel_triangulation->get_communicator(),
1813 &(n_requests[2 * i + 1]));
1814 AssertThrowMPI(ierr);
1815 }
1816 const int ierr =
1817 MPI_Waitall(2 * n_neighbors, n_requests.data(), MPI_STATUSES_IGNORE);
1818 AssertThrowMPI(ierr);
1819 }
1820
1821 // Determine how many particles and data we will receive
1822 unsigned int total_recv_data = 0;
1823 for (unsigned int neighbor_id = 0; neighbor_id < n_neighbors; ++neighbor_id)
1824 {
1825 recv_offsets[neighbor_id] = total_recv_data;
1826 total_recv_data +=
1827 n_recv_data[neighbor_id] * individual_total_particle_data_size;
1828 }
1829
1830 // Set up the space for the received particle data
1831 std::vector<char> recv_data(total_recv_data);
1832
1833 // Exchange the particle data between domains
1834 {
1835 std::vector<MPI_Request> requests(2 * n_neighbors);
1836 unsigned int send_ops = 0;
1837 unsigned int recv_ops = 0;
1838
1839 const int mpi_tag = Utilities::MPI::internal::Tags::
1841
1842 for (unsigned int i = 0; i < n_neighbors; ++i)
1843 if (n_recv_data[i] > 0)
1844 {
1845 const int ierr =
1846 MPI_Irecv(&(recv_data[recv_offsets[i]]),
1847 n_recv_data[i] * individual_total_particle_data_size,
1848 MPI_CHAR,
1849 neighbors[i],
1850 mpi_tag,
1851 parallel_triangulation->get_communicator(),
1852 &(requests[send_ops]));
1853 AssertThrowMPI(ierr);
1854 send_ops++;
1855 }
1856
1857 for (unsigned int i = 0; i < n_neighbors; ++i)
1858 if (n_send_data[i] > 0)
1859 {
1860 const int ierr =
1861 MPI_Isend(&(send_data[send_offsets[i]]),
1862 n_send_data[i] * individual_total_particle_data_size,
1863 MPI_CHAR,
1864 neighbors[i],
1865 mpi_tag,
1866 parallel_triangulation->get_communicator(),
1867 &(requests[send_ops + recv_ops]));
1868 AssertThrowMPI(ierr);
1869 recv_ops++;
1870 }
1871 const int ierr =
1872 MPI_Waitall(send_ops + recv_ops, requests.data(), MPI_STATUSES_IGNORE);
1873 AssertThrowMPI(ierr);
1874 }
1875
1876 // Put the received particles into the domain if they are in the
1877 // triangulation
1878 const void *recv_data_it = static_cast<const void *>(recv_data.data());
1879
1880 // Store the particle iterators in the cache
1881 auto &ghost_particles_iterators =
1882 ghost_particles_cache.ghost_particles_iterators;
1883
1884 if (build_cache)
1885 {
1886 ghost_particles_iterators.clear();
1887
1888 auto &send_pointers_particles = ghost_particles_cache.send_pointers;
1889 send_pointers_particles.assign(n_neighbors + 1, 0);
1890
1891 for (unsigned int i = 0; i < n_neighbors; ++i)
1892 send_pointers_particles[i + 1] =
1893 send_pointers_particles[i] +
1894 n_send_data[i] * individual_particle_data_size;
1895
1896 auto &recv_pointers_particles = ghost_particles_cache.recv_pointers;
1897 recv_pointers_particles.assign(n_neighbors + 1, 0);
1898
1899 for (unsigned int i = 0; i < n_neighbors; ++i)
1900 recv_pointers_particles[i + 1] =
1901 recv_pointers_particles[i] +
1902 n_recv_data[i] * individual_particle_data_size;
1903
1904 ghost_particles_cache.neighbors = neighbors;
1905
1906 ghost_particles_cache.send_data.resize(
1907 ghost_particles_cache.send_pointers.back());
1908 ghost_particles_cache.recv_data.resize(
1909 ghost_particles_cache.recv_pointers.back());
1910 }
1911
1912 while (reinterpret_cast<std::size_t>(recv_data_it) -
1913 reinterpret_cast<std::size_t>(recv_data.data()) <
1914 total_recv_data)
1915 {
1916 CellId::binary_type binary_cellid;
1917 memcpy(&binary_cellid, recv_data_it, cellid_size);
1918 const CellId id(binary_cellid);
1919 recv_data_it = static_cast<const char *>(recv_data_it) + cellid_size;
1920
1922 triangulation->create_cell_iterator(id);
1923
1924 insert_particle(property_pool->register_particle(), cell);
1925 const typename particle_container::iterator &cache =
1926 cells_to_particle_cache[cell->active_cell_index()];
1927 Assert(cache->cell == cell, ExcInternalError());
1928
1929 particle_iterator particle_it(cache,
1930 *property_pool,
1931 cache->particles.size() - 1);
1932
1933 recv_data_it =
1934 particle_it->read_particle_data_from_memory(recv_data_it);
1935
1936 if (load_callback)
1937 recv_data_it = load_callback(particle_it, recv_data_it);
1938
1939 if (build_cache) // TODO: is this safe?
1940 ghost_particles_iterators.push_back(particle_it);
1941 }
1942
1943 AssertThrow(recv_data_it == recv_data.data() + recv_data.size(),
1944 ExcMessage(
1945 "The amount of data that was read into new particles "
1946 "does not match the amount of data sent around."));
1947 }
1948#endif
1949
1950
1951
1952#ifdef DEAL_II_WITH_MPI
1953 template <int dim, int spacedim>
1954 void
1956 const std::map<types::subdomain_id, std::vector<particle_iterator>>
1957 &particles_to_send)
1958 {
1959 const auto parallel_triangulation =
1960 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
1961 &*triangulation);
1962 Assert(
1963 parallel_triangulation,
1964 ExcMessage(
1965 "This function is only implemented for parallel::TriangulationBase "
1966 "objects."));
1967
1968 const auto &neighbors = ghost_particles_cache.neighbors;
1969 const auto &send_pointers = ghost_particles_cache.send_pointers;
1970 const auto &recv_pointers = ghost_particles_cache.recv_pointers;
1971
1972 std::vector<char> &send_data = ghost_particles_cache.send_data;
1973
1974 // Fill data to send
1975 if (send_pointers.back() > 0)
1976 {
1977 void *data = static_cast<void *>(&send_data.front());
1978
1979 // Serialize the data sorted by receiving process
1980 for (const auto i : neighbors)
1981 for (const auto &p : particles_to_send.at(i))
1982 {
1983 data = p->write_particle_data_to_memory(data);
1984 if (store_callback)
1985 data = store_callback(p, data);
1986 }
1987 }
1988
1989 std::vector<char> &recv_data = ghost_particles_cache.recv_data;
1990
1991 // Exchange the particle data between domains
1992 {
1993 std::vector<MPI_Request> requests(2 * neighbors.size());
1994 unsigned int send_ops = 0;
1995 unsigned int recv_ops = 0;
1996
1997 const int mpi_tag = Utilities::MPI::internal::Tags::
1999
2000 for (unsigned int i = 0; i < neighbors.size(); ++i)
2001 if ((recv_pointers[i + 1] - recv_pointers[i]) > 0)
2002 {
2003 const int ierr =
2004 MPI_Irecv(recv_data.data() + recv_pointers[i],
2005 recv_pointers[i + 1] - recv_pointers[i],
2006 MPI_CHAR,
2007 neighbors[i],
2008 mpi_tag,
2009 parallel_triangulation->get_communicator(),
2010 &(requests[send_ops]));
2011 AssertThrowMPI(ierr);
2012 send_ops++;
2013 }
2014
2015 for (unsigned int i = 0; i < neighbors.size(); ++i)
2016 if ((send_pointers[i + 1] - send_pointers[i]) > 0)
2017 {
2018 const int ierr =
2019 MPI_Isend(send_data.data() + send_pointers[i],
2020 send_pointers[i + 1] - send_pointers[i],
2021 MPI_CHAR,
2022 neighbors[i],
2023 mpi_tag,
2024 parallel_triangulation->get_communicator(),
2025 &(requests[send_ops + recv_ops]));
2026 AssertThrowMPI(ierr);
2027 recv_ops++;
2028 }
2029 const int ierr =
2030 MPI_Waitall(send_ops + recv_ops, requests.data(), MPI_STATUSES_IGNORE);
2031 AssertThrowMPI(ierr);
2032 }
2033
2034 // Put the received particles into the domain if they are in the
2035 // triangulation
2036 const void *recv_data_it = static_cast<const void *>(recv_data.data());
2037
2038 // Gather ghost particle iterators from the cache
2039 auto &ghost_particles_iterators =
2040 ghost_particles_cache.ghost_particles_iterators;
2041
2042 for (auto &recv_particle : ghost_particles_iterators)
2043 {
2044 // Update particle data using previously allocated memory space
2045 // for efficiency reasons
2046 recv_data_it =
2047 recv_particle->read_particle_data_from_memory(recv_data_it);
2048
2049 Assert(recv_particle->particles_in_cell->cell->is_ghost(),
2051
2052 if (load_callback)
2053 recv_data_it = load_callback(
2054 particle_iterator(recv_particle->particles_in_cell,
2055 *property_pool,
2056 recv_particle->particle_index_within_cell),
2057 recv_data_it);
2058 }
2059
2060 AssertThrow(recv_data_it == recv_data.data() + recv_data.size(),
2061 ExcMessage(
2062 "The amount of data that was read into new particles "
2063 "does not match the amount of data sent around."));
2064 }
2065#endif
2066
2067 template <int dim, int spacedim>
2068 void
2070 const std::function<std::size_t()> & size_callb,
2071 const std::function<void *(const particle_iterator &, void *)> &store_callb,
2072 const std::function<const void *(const particle_iterator &, const void *)>
2073 &load_callb)
2074 {
2075 size_callback = size_callb;
2076 store_callback = store_callb;
2077 load_callback = load_callb;
2078 }
2079
2080
2081 template <int dim, int spacedim>
2082 void
2084 {
2085 // First disconnect existing connections
2086 for (const auto &connection : tria_listeners)
2087 connection.disconnect();
2088
2089 tria_listeners.clear();
2090
2091 tria_listeners.push_back(triangulation->signals.create.connect([&]() {
2092 this->initialize(*(this->triangulation),
2093 *(this->mapping),
2094 this->property_pool->n_properties_per_slot());
2095 }));
2096
2097 this->tria_listeners.push_back(
2098 this->triangulation->signals.clear.connect([&]() { this->clear(); }));
2099
2100 // for distributed triangulations, connect to distributed signals
2102 *>(&(*triangulation)) != nullptr)
2103 {
2104 tria_listeners.push_back(
2105 triangulation->signals.post_distributed_refinement.connect(
2106 [&]() { this->post_mesh_change_action(); }));
2107 tria_listeners.push_back(
2108 triangulation->signals.post_distributed_repartition.connect(
2109 [&]() { this->post_mesh_change_action(); }));
2110 tria_listeners.push_back(
2111 triangulation->signals.post_distributed_load.connect(
2112 [&]() { this->post_mesh_change_action(); }));
2113 }
2114 else
2115 {
2116 tria_listeners.push_back(triangulation->signals.post_refinement.connect(
2117 [&]() { this->post_mesh_change_action(); }));
2118 }
2119 }
2120
2121
2122
2123 template <int dim, int spacedim>
2124 void
2126 {
2127 Assert(triangulation != nullptr, ExcInternalError());
2128
2129 const bool distributed_triangulation =
2130 dynamic_cast<
2132 &(*triangulation)) != nullptr;
2133 (void)distributed_triangulation;
2134
2135 Assert(
2136 distributed_triangulation || number_of_locally_owned_particles == 0,
2137 ExcMessage(
2138 "Mesh refinement in a non-distributed triangulation is not supported "
2139 "by the ParticleHandler class. Either insert particles after mesh "
2140 "creation, or use a distributed triangulation."));
2141
2142 // Resize the container if it is possible without
2143 // transferring particles
2144 if (number_of_locally_owned_particles == 0)
2145 cells_to_particle_cache.resize(triangulation->n_active_cells(),
2146 particles.end());
2147 }
2148
2149
2150
2151 template <int dim, int spacedim>
2152 void
2154 {
2155 register_data_attach();
2156 }
2157
2158
2159
2160 template <int dim, int spacedim>
2161 void
2163 {
2164 register_data_attach();
2165 }
2166
2167
2168
2169 template <int dim, int spacedim>
2170 void
2172 {
2173 register_data_attach();
2174 }
2175
2176
2177
2178 template <int dim, int spacedim>
2179 void
2181 {
2183 *distributed_triangulation =
2186 *>(&(*triangulation)));
2187 (void)distributed_triangulation;
2188
2189 Assert(
2190 distributed_triangulation != nullptr,
2191 ExcMessage(
2192 "Mesh refinement in a non-distributed triangulation is not supported "
2193 "by the ParticleHandler class. Either insert particles after mesh "
2194 "creation and do not refine afterwards, or use a distributed triangulation."));
2195
2196#ifdef DEAL_II_WITH_P4EST
2197 const auto callback_function =
2198 [this](
2200 & cell_iterator,
2201 const typename Triangulation<dim, spacedim>::CellStatus cell_status) {
2202 return this->pack_callback(cell_iterator, cell_status);
2203 };
2204
2205 handle = distributed_triangulation->register_data_attach(
2206 callback_function, /*returns_variable_size_data=*/true);
2207#endif
2208 }
2209
2210
2211
2212 template <int dim, int spacedim>
2213 void
2215 {
2216 const bool serialization = false;
2217 notify_ready_to_unpack(serialization);
2218 }
2219
2220
2221
2222 template <int dim, int spacedim>
2223 void
2225 {
2226 const bool serialization = true;
2227 notify_ready_to_unpack(serialization);
2228 }
2229
2230
2231 template <int dim, int spacedim>
2232 void
2234 const bool serialization)
2235 {
2236 notify_ready_to_unpack(serialization);
2237 }
2238
2239
2240
2241 template <int dim, int spacedim>
2242 void
2244 const bool serialization)
2245 {
2247 *distributed_triangulation =
2250 *>(&(*triangulation)));
2251 (void)distributed_triangulation;
2252
2253 Assert(
2254 distributed_triangulation != nullptr,
2255 ExcMessage(
2256 "Mesh refinement in a non-distributed triangulation is not supported "
2257 "by the ParticleHandler class. Either insert particles after mesh "
2258 "creation and do not refine afterwards, or use a distributed triangulation."));
2259
2260 // First prepare container for insertion
2261 clear();
2262
2263#ifdef DEAL_II_WITH_P4EST
2264 // If we are resuming from a checkpoint, we first have to register the
2265 // store function again, to set the triangulation to the same state as
2266 // before the serialization. Only afterwards we know how to deserialize the
2267 // data correctly.
2268 if (serialization)
2269 register_data_attach();
2270
2271 // Check if something was stored and load it
2272 if (handle != numbers::invalid_unsigned_int)
2273 {
2274 const auto callback_function =
2275 [this](
2277 &cell_iterator,
2278 const typename Triangulation<dim, spacedim>::CellStatus cell_status,
2279 const boost::iterator_range<std::vector<char>::const_iterator>
2280 &range_iterator) {
2281 this->unpack_callback(cell_iterator, cell_status, range_iterator);
2282 };
2283
2284 distributed_triangulation->notify_ready_to_unpack(handle,
2285 callback_function);
2286
2287 // Reset handle and update global numbers.
2289 update_cached_numbers();
2290 }
2291#else
2292 (void)serialization;
2293#endif
2294 }
2295
2296
2297
2298 template <int dim, int spacedim>
2299 std::vector<char>
2302 const typename Triangulation<dim, spacedim>::CellStatus status) const
2303 {
2304 std::vector<particle_iterator> stored_particles_on_cell;
2305
2306 switch (status)
2307 {
2310 // If the cell persist or is refined store all particles of the
2311 // current cell.
2312 {
2313 const unsigned int n_particles = n_particles_in_cell(cell);
2314 stored_particles_on_cell.reserve(n_particles);
2315
2316 for (unsigned int i = 0; i < n_particles; ++i)
2317 stored_particles_on_cell.push_back(particle_iterator(
2318 cells_to_particle_cache[cell->active_cell_index()],
2319 *property_pool,
2320 i));
2321 }
2322 break;
2323
2325 // If this cell is the parent of children that will be coarsened,
2326 // collect the particles of all children.
2327 {
2328 for (const auto &child : cell->child_iterators())
2329 {
2330 const unsigned int n_particles = n_particles_in_cell(child);
2331
2332 stored_particles_on_cell.reserve(
2333 stored_particles_on_cell.size() + n_particles);
2334
2335 const typename particle_container::iterator &cache =
2336 cells_to_particle_cache[child->active_cell_index()];
2337 for (unsigned int i = 0; i < n_particles; ++i)
2338 stored_particles_on_cell.push_back(
2339 particle_iterator(cache, *property_pool, i));
2340 }
2341 }
2342 break;
2343
2344 default:
2345 Assert(false, ExcInternalError());
2346 break;
2347 }
2348
2349 return pack_particles(stored_particles_on_cell);
2350 }
2351
2352
2353
2354 template <int dim, int spacedim>
2355 void
2357 const typename Triangulation<dim, spacedim>::cell_iterator & cell,
2358 const typename Triangulation<dim, spacedim>::CellStatus status,
2359 const boost::iterator_range<std::vector<char>::const_iterator> &data_range)
2360 {
2361 if (data_range.begin() == data_range.end())
2362 return;
2363
2364 const auto cell_to_store_particles =
2366 cell :
2367 cell->child(0);
2368
2369 // deserialize particles and insert into local storage
2370 if (data_range.begin() != data_range.end())
2371 {
2372 const void *data = static_cast<const void *>(&(*data_range.begin()));
2373 const void *end = static_cast<const void *>(
2374 &(*data_range.begin()) + (data_range.end() - data_range.begin()));
2375
2376 while (data < end)
2377 insert_particle(data, cell_to_store_particles);
2378
2379 Assert(data == end,
2380 ExcMessage(
2381 "The particle data could not be deserialized successfully. "
2382 "Check that when deserializing the particles you expect "
2383 "the same number of properties that were serialized."));
2384 }
2385
2386 auto loaded_particles_on_cell = particles_in_cell(cell_to_store_particles);
2387
2388 // now update particle storage location and properties if necessary
2389 switch (status)
2390 {
2392 {
2393 // all particles are correctly inserted
2394 }
2395 break;
2396
2398 {
2399 // all particles are in correct cell, but their reference location
2400 // has changed
2401 for (auto &particle : loaded_particles_on_cell)
2402 {
2403 const Point<dim> p_unit =
2404 mapping->transform_real_to_unit_cell(cell_to_store_particles,
2405 particle.get_location());
2406 particle.set_reference_location(p_unit);
2407 }
2408 }
2409 break;
2410
2412 {
2413 // we need to find the correct child to store the particles and
2414 // their reference location has changed
2415 typename particle_container::iterator &cache =
2416 cells_to_particle_cache[cell_to_store_particles
2417 ->active_cell_index()];
2418
2419 // make sure that the call above has inserted an entry
2420 Assert(cache != particles.end(), ExcInternalError());
2421
2422 // Cannot use range-based loop, because number of particles in cell
2423 // is going to change
2424 auto particle = loaded_particles_on_cell.begin();
2425 for (unsigned int i = 0; i < cache->particles.size();)
2426 {
2427 bool found_new_cell = false;
2428
2429 for (unsigned int child_index = 0;
2430 child_index < GeometryInfo<dim>::max_children_per_cell;
2431 ++child_index)
2432 {
2434 child = cell->child(child_index);
2435 Assert(child->is_locally_owned(), ExcInternalError());
2436
2437 try
2438 {
2439 const Point<dim> p_unit =
2440 mapping->transform_real_to_unit_cell(
2441 child, particle->get_location());
2443 1e-12))
2444 {
2445 found_new_cell = true;
2446 particle->set_reference_location(p_unit);
2447
2448 // if the particle is not in child 0, we stored the
2449 // handle in the wrong place; move the handle and
2450 // redo the loop; otherwise move on to next particle
2451 if (child_index != 0)
2452 {
2453 insert_particle(cache->particles[i], child);
2454
2455 cache->particles[i] = cache->particles.back();
2456 cache->particles.resize(
2457 cache->particles.size() - 1);
2458 }
2459 else
2460 {
2461 ++i;
2462 ++particle;
2463 }
2464 break;
2465 }
2466 }
2467 catch (typename Mapping<dim>::ExcTransformationFailed &)
2468 {}
2469 }
2470
2471 if (found_new_cell == false)
2472 {
2473 // If we get here, we did not find the particle in any
2474 // child. This case may happen for particles that are at the
2475 // boundary for strongly curved cells. We apply a tolerance
2476 // in the call to GeometryInfo<dim>::is_inside_unit_cell to
2477 // account for this, but if that is not enough, we still
2478 // need to prevent an endless loop here. Delete the particle
2479 // and move on.
2480 signals.particle_lost(particle,
2481 particle->get_surrounding_cell());
2482 cache->particles[i] = cache->particles.back();
2483 cache->particles.resize(cache->particles.size() - 1);
2484 }
2485 }
2486 // clean up in case child 0 has no particle left
2487 if (cache->particles.empty())
2488 {
2489 particles.erase(cache);
2490 cache = particles.end();
2491 }
2492 }
2493 break;
2494
2495 default:
2496 Assert(false, ExcInternalError());
2497 break;
2498 }
2499 }
2500} // namespace Particles
2501
2502#include "particle_handler.inst"
2503
std::size_t size() const
Definition array_view.h:576
std::array< unsigned int, 4 > binary_type
Definition cell_id.h:81
const unsigned int n_components
Definition function.h:164
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Abstract base class for mapping classes.
Definition mapping.h:317
void register_additional_store_load_functions(const std::function< std::size_t()> &size_callback, const std::function< void *(const particle_iterator &, void *)> &store_callback, const std::function< const void *(const particle_iterator &, const void *)> &load_callback)
void exchange_ghost_particles(const bool enable_ghost_cache=false)
types::particle_index n_global_particles() const
unsigned int global_max_particles_per_cell
internal::GhostParticlePartitioner< dim, spacedim > ghost_particles_cache
particle_container::iterator particle_container_ghost_begin() const
unsigned int n_properties_per_particle() const
types::particle_index global_number_of_particles
void get_particle_positions(VectorType &output_vector, const bool add_to_output_vector=false)
particle_container::iterator particle_container_owned_end() const
particle_container::iterator particle_container_ghost_end() const
boost::iterator_range< particle_iterator > particle_iterator_range
void send_recv_particles(const std::map< types::subdomain_id, std::vector< particle_iterator > > &particles_to_send, const std::map< types::subdomain_id, std::vector< typename Triangulation< dim, spacedim >::active_cell_iterator > > &new_cells_for_particles=std::map< types::subdomain_id, std::vector< typename Triangulation< dim, spacedim >::active_cell_iterator > >(), const bool enable_cache=false)
void send_recv_particles_properties_and_location(const std::map< types::subdomain_id, std::vector< particle_iterator > > &particles_to_send)
types::particle_index number_of_locally_owned_particles
PropertyPool< dim, spacedim > & get_property_pool() const
void initialize(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping, const unsigned int n_properties=0)
std::unique_ptr< PropertyPool< dim, spacedim > > property_pool
types::particle_index get_max_local_particle_index() const
void reserve(const std::size_t n_particles)
std::map< unsigned int, IndexSet > insert_global_particles(const std::vector< Point< spacedim > > &positions, const std::vector< std::vector< BoundingBox< spacedim > > > &global_bounding_boxes, const std::vector< std::vector< double > > &properties={}, const std::vector< types::particle_index > &ids={})
void notify_ready_to_unpack(const bool serialization)
void register_load_callback_function(const bool serialization)
std::enable_if_t< std::is_convertible< VectorType *, Function< spacedim > * >::value==false > set_particle_positions(const VectorType &input_vector, const bool displace_particles=true)
void insert_particles(const std::multimap< typename Triangulation< dim, spacedim >::active_cell_iterator, Particle< dim, spacedim > > &particles)
types::particle_index next_free_particle_index
void unpack_callback(const typename Triangulation< dim, spacedim >::cell_iterator &cell, const typename Triangulation< dim, spacedim >::CellStatus status, const boost::iterator_range< std::vector< char >::const_iterator > &data_range)
void remove_particle(const particle_iterator &particle)
types::particle_index n_locally_owned_particles() const
void reset_particle_container(particle_container &particles)
types::particle_index n_particles_in_cell(const typename Triangulation< dim, spacedim >::active_cell_iterator &cell) const
typename ParticleAccessor< dim, spacedim >::particle_container particle_container
particle_iterator_range particles_in_cell(const typename Triangulation< dim, spacedim >::active_cell_iterator &cell)
SmartPointer< const Triangulation< dim, spacedim >, ParticleHandler< dim, spacedim > > triangulation
particle_iterator insert_particle(const Particle< dim, spacedim > &particle, const typename Triangulation< dim, spacedim >::active_cell_iterator &cell)
void remove_particles(const std::vector< particle_iterator > &particles)
types::particle_index n_global_max_particles_per_cell() const
particle_container::iterator particle_container_owned_begin() const
void copy_from(const ParticleHandler< dim, spacedim > &particle_handler)
SmartPointer< const Mapping< dim, spacedim >, ParticleHandler< dim, spacedim > > mapping
types::particle_index get_next_free_particle_index() const
std::vector< char > pack_callback(const typename Triangulation< dim, spacedim >::cell_iterator &cell, const typename Triangulation< dim, spacedim >::CellStatus status) const
IndexSet locally_owned_particle_ids() const
void set_property_pool(PropertyPool< dim, spacedim > &property_pool)
Definition particle.h:578
const Point< dim > & get_reference_location() const
Definition particle.h:551
const Point< spacedim > & get_location() const
Definition particle.h:533
std::size_t serialized_size_in_bytes() const
Definition particle.cc:287
const ArrayView< double > get_properties()
Definition particle.cc:330
types::particle_index get_id() const
Definition particle.h:560
Definition point.h:112
numbers::NumberTraits< Number >::real_type norm() const
IteratorState::IteratorStates state() const
void notify_ready_to_unpack(const unsigned int handle, const std::function< void(const cell_iterator &, const CellStatus, const boost::iterator_range< std::vector< char >::const_iterator > &)> &unpack_callback)
Definition tria_base.cc:834
unsigned int register_data_attach(const std::function< std::vector< char >(const cell_iterator &, const CellStatus)> &pack_callback, const bool returns_variable_size_data)
Definition tria_base.cc:804
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcPointNotAvailableHere()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
TriaIterator< CellAccessor< dim, spacedim > > cell_iterator
Definition tria.h:1370
unsigned int find_closest_vertex(const std::map< unsigned int, Point< spacedim > > &vertices, const Point< spacedim > &p)
return_type compute_point_locations_try_all(const Cache< dim, spacedim > &cache, const std::vector< Point< spacedim > > &points, const typename Triangulation< dim, spacedim >::active_cell_iterator &cell_hint=typename Triangulation< dim, spacedim >::active_cell_iterator())
return_type distributed_compute_point_locations(const GridTools::Cache< dim, spacedim > &cache, const std::vector< Point< spacedim > > &local_points, const std::vector< std::vector< BoundingBox< spacedim > > > &global_bboxes, const double tolerance=1e-10, const std::vector< bool > &marked_vertices={}, const bool enforce_unique_mapping=true)
@ past_the_end
Iterator reached end of container.
@ valid
Iterator points to a valid object.
@ particle_handler_send_recv_particles_send
ParticleHandler<dim, spacedim>::send_recv_particles.
Definition mpi_tags.h:114
@ particle_handler_send_recv_particles_setup
ParticleHandler<dim, spacedim>::send_recv_particles.
Definition mpi_tags.h:110
T sum(const T &t, const MPI_Comm mpi_communicator)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
const types::subdomain_id artificial_subdomain_id
Definition types.h:315
static const unsigned int invalid_unsigned_int
Definition types.h:213
bool is_finite(const double x)
Definition numbers.h:538
STL namespace.
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int subdomain_id
Definition types.h:44
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
std::map< unsigned int, std::set<::types::subdomain_id > > * vertices_with_ghost_neighbors
#define DEAL_II_PARTICLE_INDEX_MPI_TYPE
static bool is_inside_unit_cell(const Point< dim > &p)
const MPI_Comm comm