deal.II version GIT relicensing-1871-g28cda481ac 2024-09-17 21:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
work_stream.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2009 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_work_stream_h
16# define dealii_work_stream_h
17
18
19# include <deal.II/base/config.h>
20
28
29# ifdef DEAL_II_WITH_TBB
30# ifdef DEAL_II_TBB_WITH_ONEAPI
31# include <tbb/parallel_pipeline.h>
32# else
33# include <tbb/pipeline.h>
34# endif
35# endif
36
37# include <functional>
38# include <iterator>
39# include <memory>
40# include <utility>
41# include <vector>
42
44
45
46
159namespace WorkStream
160{
165 namespace internal
166 {
172 template <typename Iterator, typename ScratchData, typename CopyData>
174 {
175 std::unique_ptr<ScratchData> scratch_data;
176 std::unique_ptr<CopyData> copy_data;
178
185
186 ScratchAndCopyDataObjects(std::unique_ptr<ScratchData> &&p,
187 std::unique_ptr<CopyData> &&q,
188 const bool in_use)
189 : scratch_data(std::move(p))
190 , copy_data(std::move(q))
191 , currently_in_use(in_use)
192 {}
193
194 // Provide a copy constructor that actually doesn't copy the
195 // internal state. This makes handling ScratchAndCopyDataObjects
196 // easier to handle with STL containers.
200 };
201
207 template <typename ScratchData>
209 {
210 std::unique_ptr<ScratchData> scratch_data;
212
217 : currently_in_use(false)
218 {}
219
220 ScratchDataObject(std::unique_ptr<ScratchData> &&p, const bool in_use)
221 : scratch_data(std::move(p))
222 , currently_in_use(in_use)
223 {}
224
225 ScratchDataObject(ScratchData *p, const bool in_use)
226 : scratch_data(p)
227 , currently_in_use(in_use)
228 {}
229
230 // Provide a copy constructor that actually doesn't copy the
231 // internal state. This makes handling ScratchAndCopyDataObjects
232 // easier to handle with STL containers.
236
237 ScratchDataObject(ScratchDataObject &&o) noexcept = default;
238 };
239
240# ifdef DEAL_II_WITH_TBB
255 namespace tbb_no_coloring
256 {
260 template <typename Iterator, typename ScratchData, typename CopyData>
262 {
263 public:
270 struct ItemType
271 {
276 using ScratchDataList = std::list<ScratchDataObject<ScratchData>>;
277
282 std::vector<Iterator> iterators;
283
289 std::vector<CopyData> copy_datas;
290
296 unsigned int n_iterators;
297
330
335 const ScratchData *sample_scratch_data;
336
342
343
349 : n_iterators(0)
350 , scratch_data(nullptr)
351 , sample_scratch_data(nullptr)
352 , currently_in_use(false)
353 {}
354 };
355
356
362 IteratorRangeToItemStream(const Iterator &begin,
363 const Iterator &end,
364 const unsigned int buffer_size,
365 const unsigned int chunk_size,
366 const ScratchData &sample_scratch_data,
367 const CopyData &sample_copy_data)
368 : remaining_iterator_range(begin, end)
369 , item_buffer(buffer_size)
372 {
373 // initialize the elements of the ring buffer
374 for (auto &item : item_buffer)
375 {
376 Assert(item.n_iterators == 0, ExcInternalError());
377
378 item.iterators.resize(chunk_size,
380 item.scratch_data = &thread_local_scratch;
381 item.sample_scratch_data = &sample_scratch_data;
382 item.copy_datas.resize(chunk_size, sample_copy_data);
383 item.currently_in_use = false;
384 }
385 }
386
387
391 ItemType *
393 {
394 // find first unused item. we know that there must be one
395 // because we have set the maximal number of tokens in flight
396 // and have set the ring buffer to have exactly this size. so
397 // if this function is called, we know that less than the
398 // maximal number of items in currently in flight
399 //
400 // note that we need not lock access to this array since
401 // the current stage is run sequentially and we can therefore
402 // enter the following block only once at any given time.
403 // thus, there can be no race condition between checking that
404 // a flag is false and setting it to true. (there may be
405 // another thread where we release items and set 'false'
406 // flags to 'true', but that too does not produce any
407 // problems)
408 ItemType *current_item = nullptr;
409 for (unsigned int i = 0; i < item_buffer.size(); ++i)
410 if (item_buffer[i].currently_in_use == false)
411 {
413 current_item = &item_buffer[i];
414 break;
415 }
416 Assert(current_item != nullptr,
417 ExcMessage("This can't be. There must be a free item!"));
418
419 // initialize the next item. it may
420 // consist of at most chunk_size
421 // elements
422 current_item->n_iterators = 0;
423 while ((remaining_iterator_range.first !=
424 remaining_iterator_range.second) &&
425 (current_item->n_iterators < chunk_size))
426 {
427 current_item->iterators[current_item->n_iterators] =
429
431 ++current_item->n_iterators;
432 }
433
434 if (current_item->n_iterators == 0)
435 // there were no items
436 // left. terminate the pipeline
437 return nullptr;
438 else
439 return current_item;
440 }
441
442 private:
447 std::pair<Iterator, Iterator> remaining_iterator_range;
448
452 std::vector<ItemType> item_buffer;
453
486
492 const ScratchData &sample_scratch_data;
493
500 const unsigned int chunk_size;
501 };
502
503
504
505 template <typename Worker,
506 typename Copier,
507 typename Iterator,
508 typename ScratchData,
509 typename CopyData>
510 void
511 run(const Iterator &begin,
513 Worker worker,
514 Copier copier,
515 const ScratchData &sample_scratch_data,
516 const CopyData &sample_copy_data,
517 const unsigned int queue_length,
518 const unsigned int chunk_size)
519 {
520 using ItemType = typename IteratorRangeToItemStream<Iterator,
521 ScratchData,
522 CopyData>::ItemType;
523
524 // Define the three stages of the pipeline:
525
526 //
527 // ----- Stage 1 -----
528 //
529 // The first stage is the one that provides us with chunks of data
530 // to work on (the stream of "items"). This stage will run sequentially.
532 iterator_range_to_item_stream(begin,
533 end,
534 queue_length,
535 chunk_size,
536 sample_scratch_data,
537 sample_copy_data);
538 auto item_generator = [&](tbb::flow_control &fc) -> ItemType * {
539 if (const auto item = iterator_range_to_item_stream.get_item())
540 return item;
541 else
542 {
543 fc.stop();
544 return nullptr;
545 }
546 };
547
548 //
549 // ----- Stage 2 -----
550 //
551 // The second stage is the one that does the actual work. This is the
552 // stage that runs in parallel
553 auto item_worker =
554 [worker =
555 std::function<void(const Iterator &, ScratchData &, CopyData &)>(
556 worker),
557 copier_exists =
558 static_cast<bool>(std::function<void(const CopyData &)>(copier))](
559 ItemType *current_item) {
560 // we need to find an unused scratch data object in the list that
561 // corresponds to the current thread and then mark it as used. if
562 // we can't find one, create one
563 //
564 // as discussed in the discussion of the documentation of the
565 // IteratorRangeToItemStream::scratch_data variable, there is no
566 // need to synchronize access to this variable using a mutex
567 // as long as we have no yield-point in between. this means that
568 // we can't take an iterator into the list now and expect it to
569 // still be valid after calling the worker, but we at least do
570 // not have to lock the following section
571 ScratchData *scratch_data = nullptr;
572 {
573 // see if there is an unused object. if so, grab it and mark
574 // it as used
575 for (auto &p : current_item->scratch_data->get())
576 if (p.currently_in_use == false)
577 {
578 scratch_data = p.scratch_data.get();
579 p.currently_in_use = true;
580
581 break;
582 }
583
584 // if no object was found, create one and mark it as used
585 if (scratch_data == nullptr)
586 {
587 scratch_data =
588 new ScratchData(*current_item->sample_scratch_data);
589 current_item->scratch_data->get().emplace_back(scratch_data,
590 true);
591 }
592 };
593
594 // then call the worker function on each element of the chunk we
595 // were given. since these worker functions are called on separate
596 // threads, nothing good can happen if they throw an exception and
597 // we are best off catching it and showing an error message
598 for (unsigned int i = 0; i < current_item->n_iterators; ++i)
599 {
600 try
601 {
602 if (worker)
603 worker(current_item->iterators[i],
604 *scratch_data,
605 current_item->copy_datas[i]);
606 }
607 catch (const std::exception &exc)
608 {
610 }
611 catch (...)
612 {
614 }
615 }
616
617 // finally mark the scratch object as unused again. as above, there
618 // is no need to lock anything here since the object we work on
619 // is thread-local
620 for (auto &p : current_item->scratch_data->get())
621 if (p.scratch_data.get() == scratch_data)
622 {
623 Assert(p.currently_in_use == true, ExcInternalError());
624 p.currently_in_use = false;
625
626 break;
627 }
628
629 // if there is no copier, mark current item as usable again
630 if (copier_exists == false)
631 current_item->currently_in_use = false;
632
633
634 // Then return the original pointer
635 // to the now modified object. The copier will work on it next.
636 return current_item;
637 };
638
639 //
640 // ----- Stage 3 -----
641 //
642 // The last stage is the one that copies data from the CopyData objects
643 // to the final destination. This stage runs sequentially again.
644 auto item_copier = [copier = std::function<void(const CopyData &)>(
645 copier)](ItemType *current_item) {
646 if (copier)
647 {
648 // Initiate copying data. For the same reasons as in the worker
649 // class above, catch exceptions rather than letting them
650 // propagate into unknown territories:
651 for (unsigned int i = 0; i < current_item->n_iterators; ++i)
652 {
653 try
654 {
655 copier(current_item->copy_datas[i]);
656 }
657 catch (const std::exception &exc)
658 {
660 }
661 catch (...)
662 {
664 }
665 }
666 }
667 // mark current item as usable again
668 current_item->currently_in_use = false;
669 };
670
671
672 // Now we just have to set up the pipeline and run it:
673 auto tbb_item_stream_filter = tbb::make_filter<void, ItemType *>(
674# ifdef DEAL_II_TBB_WITH_ONEAPI
675 tbb::filter_mode::serial_in_order,
676# else
677 tbb::filter::serial,
678# endif
679 item_generator);
680
681 auto tbb_worker_filter = tbb::make_filter<ItemType *, ItemType *>(
682# ifdef DEAL_II_TBB_WITH_ONEAPI
683 tbb::filter_mode::parallel,
684# else
685 tbb::filter::parallel,
686# endif
687 item_worker);
688
689 auto tbb_copier_filter = tbb::make_filter<ItemType *, void>(
690# ifdef DEAL_II_TBB_WITH_ONEAPI
691 tbb::filter_mode::serial_in_order,
692# else
693 tbb::filter::serial,
694# endif
695 item_copier);
696
697 tbb::parallel_pipeline(queue_length,
698 tbb_item_stream_filter & tbb_worker_filter &
699 tbb_copier_filter);
700 }
701
702 } // namespace tbb_no_coloring
703# endif // DEAL_II_WITH_TBB
704
705
706
707# ifdef DEAL_II_WITH_TASKFLOW
715 namespace taskflow_no_coloring
716 {
723 template <typename Worker,
724 typename Copier,
725 typename Iterator,
726 typename ScratchData,
727 typename CopyData>
728 void
729 run(const Iterator &begin,
731 Worker worker,
732 Copier copier,
733 const ScratchData &sample_scratch_data,
734 const CopyData &sample_copy_data,
735 const unsigned int /*queue_length*/ = 2 *
737 const unsigned int /*chunk_size*/ = 8)
738
739 {
740 tf::Executor &executor = MultithreadInfo::get_taskflow_executor();
741 tf::Taskflow taskflow;
742
743 using ScratchDataList = std::list<ScratchDataObject<ScratchData>>;
744
746 thread_safe_scratch_data_list;
747
748 tf::Task last_copier;
749
750 // idx is used to connect each worker to its copier as communication
751 // between tasks is not supported. It does this by providing a unique
752 // index in the vector of pointers copy_datas at which the copy data
753 // object where the work done by work task #idx is stored
754 unsigned int idx = 0;
755
756 std::vector<std::unique_ptr<CopyData>> copy_datas;
757
758 // Generate a static task graph. Here we generate a task for each cell
759 // that will be worked on. The tasks are not executed until all of them
760 // are created, this code runs sequentially.
761 for (Iterator it = begin; it != end; ++it, ++idx)
762 {
763 copy_datas.emplace_back();
764 // Create a worker task.
765 auto worker_task =
766 taskflow
767 .emplace([it,
768 idx,
769 &thread_safe_scratch_data_list,
770 &sample_scratch_data,
771 &sample_copy_data,
772 &copy_datas,
773 &worker]() {
774 ScratchData *scratch_data = nullptr;
775
776 ScratchDataList &scratch_data_list =
777 thread_safe_scratch_data_list.get();
778 // See if there is an unused object. if so,
779 // grab it and mark it as used.
780 for (auto &p : scratch_data_list)
781 {
782 if (p.currently_in_use == false)
783 {
784 scratch_data = p.scratch_data.get();
785 p.currently_in_use = true;
786 break;
787 }
788 }
789 // If no element in the list was found, create
790 // one and mark it as used.
791 if (scratch_data == nullptr)
792 {
793 scratch_data_list.emplace_back(
794 std::make_unique<ScratchData>(sample_scratch_data),
795 true);
796 scratch_data =
797 scratch_data_list.back().scratch_data.get();
798 }
799
800 // Create a unique copy data object where this
801 // worker's work will be stored.
802 auto &copy = copy_datas[idx];
803 copy = std::make_unique<CopyData>(sample_copy_data);
804 worker(it, *scratch_data, *copy.get());
805
806 // Find our currently used scratch data and
807 // mark it as unused.
808 for (auto &p : scratch_data_list)
809 {
810 if (p.scratch_data.get() == scratch_data)
811 {
812 Assert(p.currently_in_use == true,
814 p.currently_in_use = false;
815 }
816 }
817 })
818 .name("worker");
819
820 // Create a copier task. This task is a separate object from the
821 // worker task.
822 tf::Task copier_task = taskflow
823 .emplace([idx, &copy_datas, &copier]() {
824 copier(*copy_datas[idx].get());
825 copy_datas[idx].reset();
826 })
827 .name("copy");
828
829 // Ensure the copy task runs after the worker task.
830 worker_task.precede(copier_task);
831
832 // Ensure that only one copy task can run at a time. The code below
833 // makes each copy task wait until the previous one has finished
834 // before it can start
835 if (!last_copier.empty())
836 last_copier.precede(copier_task);
837
838 // Keep a handle to the last copier. Tasks in taskflow are
839 // basically handles to internally stored data, so this does not
840 // perform a copy:
841 last_copier = copier_task;
842 }
843
844 // Now we run all the tasks in the task graph. They will be run in
845 // parallel and are eligible to run when their dependencies established
846 // above are met.
847 executor.run(taskflow).wait();
848 }
849 } // namespace taskflow_no_coloring
850# endif
851
858 namespace sequential
859 {
863 template <typename Worker,
864 typename Copier,
865 typename Iterator,
866 typename ScratchData,
867 typename CopyData>
868 void
869 run(const Iterator &begin,
871 Worker worker,
872 Copier copier,
873 const ScratchData &sample_scratch_data,
874 const CopyData &sample_copy_data)
875 {
876 // need to copy the sample since it is marked const
877 ScratchData scratch_data = sample_scratch_data;
878 CopyData copy_data = sample_copy_data; // NOLINT
879
880 // Optimization: Check if the functions are not the zero function. To
881 // check zero-ness, create a C++ function out of it:
882 const bool have_worker =
883 (static_cast<const std::function<
884 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
885 nullptr;
886 const bool have_copier =
887 (static_cast<const std::function<void(const CopyData &)> &>(
888 copier)) != nullptr;
889
890 // Finally loop over all items and perform the necessary work:
891 for (Iterator i = begin; i != end; ++i)
892 {
893 if (have_worker)
894 worker(i, scratch_data, copy_data);
895 if (have_copier)
896 copier(copy_data);
897 }
898 }
899
900
901
905 template <typename Worker,
906 typename Copier,
907 typename Iterator,
908 typename ScratchData,
909 typename CopyData>
910 void
911 run(const std::vector<std::vector<Iterator>> &colored_iterators,
912 Worker worker,
913 Copier copier,
914 const ScratchData &sample_scratch_data,
915 const CopyData &sample_copy_data)
916 {
917 // need to copy the sample since it is marked const
918 ScratchData scratch_data = sample_scratch_data;
919 CopyData copy_data = sample_copy_data; // NOLINT
920
921 // Optimization: Check if the functions are not the zero function. To
922 // check zero-ness, create a C++ function out of it:
923 const bool have_worker =
924 (static_cast<const std::function<
925 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
926 nullptr;
927 const bool have_copier =
928 (static_cast<const std::function<void(const CopyData &)> &>(
929 copier)) != nullptr;
930
931 // Finally loop over all items and perform the necessary work:
932 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
933 if (colored_iterators[color].size() > 0)
934 for (auto &it : colored_iterators[color])
935 {
936 if (have_worker)
937 worker(it, scratch_data, copy_data);
938 if (have_copier)
939 copier(copy_data);
940 }
941 }
942
943 } // namespace sequential
944
945
946
947# ifdef DEAL_II_WITH_TBB
955 namespace tbb_colored
956 {
962 template <typename Iterator, typename ScratchData, typename CopyData>
964 {
965 public:
970 const std::function<void(const Iterator &, ScratchData &, CopyData &)>
971 &worker,
972 const std::function<void(const CopyData &)> &copier,
973 const ScratchData &sample_scratch_data,
974 const CopyData &sample_copy_data)
975 : worker(worker)
976 , copier(copier)
979 {}
980
981
986 void
987 operator()(const tbb::blocked_range<
988 typename std::vector<Iterator>::const_iterator> &range)
989 {
990 // we need to find an unused scratch and corresponding copy
991 // data object in the list that corresponds to the current
992 // thread and then mark it as used. If we can't find one,
993 // create one as discussed in the discussion of the documentation
994 // of the IteratorRangeToItemStream::scratch_data variable,
995 // there is no need to synchronize access to this variable
996 // using a mutex as long as we have no yield-point in between.
997 // This means that we can't take an iterator into the list
998 // now and expect it to still be valid after calling the worker,
999 // but we at least do not have to lock the following section.
1000 ScratchData *scratch_data = nullptr;
1001 CopyData *copy_data = nullptr;
1002 {
1003 ScratchAndCopyDataList &scratch_and_copy_data_list = data.get();
1004
1005 // see if there is an unused object. if so, grab it and mark
1006 // it as used
1007 for (typename ScratchAndCopyDataList::iterator p =
1008 scratch_and_copy_data_list.begin();
1009 p != scratch_and_copy_data_list.end();
1010 ++p)
1011 if (p->currently_in_use == false)
1012 {
1013 scratch_data = p->scratch_data.get();
1014 copy_data = p->copy_data.get();
1015 p->currently_in_use = true;
1016 break;
1017 }
1018
1019 // if no element in the list was found, create one and mark it as
1020 // used
1021 if (scratch_data == nullptr)
1022 {
1023 Assert(copy_data == nullptr, ExcInternalError());
1024
1025 scratch_and_copy_data_list.emplace_back(
1026 std::make_unique<ScratchData>(sample_scratch_data),
1027 std::make_unique<CopyData>(sample_copy_data),
1028 true);
1029 scratch_data =
1030 scratch_and_copy_data_list.back().scratch_data.get();
1031 copy_data = scratch_and_copy_data_list.back().copy_data.get();
1032 }
1033 }
1034
1035 // then call the worker and copier functions on each
1036 // element of the chunk we were given.
1037 for (typename std::vector<Iterator>::const_iterator p = range.begin();
1038 p != range.end();
1039 ++p)
1040 {
1041 try
1042 {
1043 if (worker)
1044 worker(*p, *scratch_data, *copy_data);
1045 if (copier)
1046 copier(*copy_data);
1047 }
1048 catch (const std::exception &exc)
1049 {
1051 }
1052 catch (...)
1053 {
1055 }
1056 }
1057
1058 // finally mark the scratch object as unused again. as above, there
1059 // is no need to lock anything here since the object we work on
1060 // is thread-local
1061 {
1062 ScratchAndCopyDataList &scratch_and_copy_data_list = data.get();
1063
1064 for (typename ScratchAndCopyDataList::iterator p =
1065 scratch_and_copy_data_list.begin();
1066 p != scratch_and_copy_data_list.end();
1067 ++p)
1068 if (p->scratch_data.get() == scratch_data)
1069 {
1070 Assert(p->currently_in_use == true, ExcInternalError());
1071 p->currently_in_use = false;
1072 }
1073 }
1074 }
1075
1076 private:
1077 using ScratchAndCopyDataObjects = typename internal::
1078 ScratchAndCopyDataObjects<Iterator, ScratchData, CopyData>;
1079
1084 using ScratchAndCopyDataList = std::list<ScratchAndCopyDataObjects>;
1085
1087
1092 const std::function<void(const Iterator &, ScratchData &, CopyData &)>
1094
1099 const std::function<void(const CopyData &)> copier;
1100
1104 const ScratchData &sample_scratch_data;
1105 const CopyData &sample_copy_data;
1106 };
1107
1111 template <typename Worker,
1112 typename Copier,
1113 typename Iterator,
1114 typename ScratchData,
1115 typename CopyData>
1116 void
1117 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1118 Worker worker,
1119 Copier copier,
1120 const ScratchData &sample_scratch_data,
1121 const CopyData &sample_copy_data,
1122 const unsigned int chunk_size)
1123 {
1124 // loop over the various colors of what we're given
1125 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
1126 if (colored_iterators[color].size() > 0)
1127 {
1128 using WorkerAndCopier = internal::tbb_colored::
1129 WorkerAndCopier<Iterator, ScratchData, CopyData>;
1130
1131 WorkerAndCopier worker_and_copier(worker,
1132 copier,
1133 sample_scratch_data,
1134 sample_copy_data);
1135
1137 colored_iterators[color].begin(),
1138 colored_iterators[color].end(),
1139 [&worker_and_copier](
1140 const tbb::blocked_range<
1141 typename std::vector<Iterator>::const_iterator> &range) {
1142 worker_and_copier(range);
1143 },
1144 chunk_size);
1145 }
1146 }
1147
1148 } // namespace tbb_colored
1149# endif // DEAL_II_WITH_TBB
1150
1151
1152
1153# ifdef DEAL_II_WITH_TASKFLOW
1159 namespace taskflow_colored
1160 {
1167 template <typename Worker,
1168 typename Copier,
1169 typename Iterator,
1170 typename ScratchData,
1171 typename CopyData>
1172 void
1173 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1174 Worker worker,
1175 Copier copier,
1176 const ScratchData &sample_scratch_data,
1177 const CopyData &sample_copy_data,
1178 const unsigned int /*queue_length*/ = 2 *
1180 const unsigned int /*chunk_size*/ = 8)
1181
1182 {
1183 tf::Executor &executor = MultithreadInfo::get_taskflow_executor();
1184 tf::Taskflow taskflow;
1185 using ScratchAndCopyDataObjects = typename internal::
1186 ScratchAndCopyDataObjects<Iterator, ScratchData, CopyData>;
1187
1188 using ScratchAndCopyDataList = std::list<ScratchAndCopyDataObjects>;
1189
1191 thread_safe_scratch_and_copy_data_list;
1192
1193 // These are used to establish task dependencies. These are barrier
1194 // tasks which are dependent on all tasks in the same "color" to be
1195 // completed before allowing any tasks in the next "color" to be worked
1196 // on.
1197 tf::Task last_color;
1198 tf::Task next_color;
1199
1200 const bool have_worker =
1201 (static_cast<const std::function<
1202 void(const Iterator &, ScratchData &, CopyData &)> &>(worker)) !=
1203 nullptr;
1204 const bool have_copier =
1205 (static_cast<const std::function<void(const CopyData &)> &>(
1206 copier)) != nullptr;
1207
1208 // Generate a static task graph. Here we generate a task for each cell
1209 // that will be worked on. The tasks are not executed until all of them
1210 // are created, this code runs sequentially. Cells have been grouped
1211 // into "colors" data from cells in the same color are safe to copy in
1212 // parallel so copying need not be sequential.
1213 for (unsigned int color = 0; color < colored_iterators.size(); ++color)
1214 // Ignore color blocks which are empty.
1215 if (colored_iterators[color].size() > 0)
1216 {
1217 // Keep a handle to the last color. Tasks in taskflow are
1218 // basically handles to internally stored data, so this does not
1219 // perform a copy:
1220 if (!next_color.empty())
1221 last_color = next_color;
1222 // A placeholder task is a task object which respects dependencies
1223 // but is empty (contains no work). The name is useful when
1224 // examining a visualization of the graph through Taskflow's
1225 // supported visualization tools.
1226 if (color < colored_iterators.size() - 1)
1227 next_color = taskflow.placeholder().name("color border");
1228
1229 // For each cell queue up a combined worker and copier task. These
1230 // are not yet run.
1231 for (const Iterator &it : colored_iterators[color])
1232 {
1233 auto worker_task =
1234 taskflow
1235 .emplace(
1236 [it =
1237 it, // make a copy of the reference to the iterator
1238 have_worker,
1239 have_copier,
1240 &thread_safe_scratch_and_copy_data_list,
1241 &sample_scratch_data,
1242 &sample_copy_data,
1243 &worker,
1244 &copier]() {
1245 ScratchData *scratch_data = nullptr;
1246 CopyData *copy_data = nullptr;
1247
1248 ScratchAndCopyDataList &scratch_and_copy_data_list =
1249 thread_safe_scratch_and_copy_data_list.get();
1250 // See if there is an unused object. if so, grab it
1251 // and mark it as used.
1252 for (typename ScratchAndCopyDataList::iterator p =
1253 scratch_and_copy_data_list.begin();
1254 p != scratch_and_copy_data_list.end();
1255 ++p)
1256 {
1257 if (p->currently_in_use == false)
1258 {
1259 scratch_data = p->scratch_data.get();
1260 copy_data = p->copy_data.get();
1261 p->currently_in_use = true;
1262 break;
1263 }
1264 }
1265 // If no element in the list was found, create one and
1266 // mark it as used.
1267 if (scratch_data == nullptr)
1268 {
1269 Assert(copy_data == nullptr, ExcInternalError());
1270 scratch_and_copy_data_list.emplace_back(
1271 std::make_unique<ScratchData>(
1272 sample_scratch_data),
1273 std::make_unique<CopyData>(sample_copy_data),
1274 true);
1275 scratch_data = scratch_and_copy_data_list.back()
1276 .scratch_data.get();
1277 copy_data = scratch_and_copy_data_list.back()
1278 .copy_data.get();
1279 }
1280 if (have_worker)
1281 worker(it, *scratch_data, *copy_data);
1282 if (have_copier)
1283 copier(*copy_data);
1284
1285 // Mark objects as free to be used again.
1286 for (typename ScratchAndCopyDataList::iterator p =
1287 scratch_and_copy_data_list.begin();
1288 p != scratch_and_copy_data_list.end();
1289 ++p)
1290 {
1291 if (p->scratch_data.get() == scratch_data)
1292 {
1293 Assert(p->currently_in_use == true,
1295 p->currently_in_use = false;
1296 }
1297 }
1298 })
1299 .name("worker_and_copier");
1300 // If we are on the last color we do not need to add a barrier
1301 // at the end of the graph. Otherwise we force the next color
1302 // barrier to wait until the current task (and thus all tasks
1303 // in this color group) are finished.
1304 if (color < colored_iterators.size() - 1)
1305 worker_task.precede(next_color);
1306
1307 // The first group of tasks does not depend on a previous
1308 // barrier being reached. Otherwise, we ensure the previous
1309 // barrier must resolve before any new tasks in the next color
1310 // can start.
1311 if (!last_color.empty())
1312 last_color.precede(worker_task);
1313 }
1314 }
1315 // Now we run all the tasks in the task graph. They will be run in
1316 // parallel and are eligible to run when their dependencies established
1317 // above are met.
1318 executor.run(taskflow).wait();
1319 }
1320 } // namespace taskflow_colored
1321# endif // DEAL_II_WITH_TASKFLOW
1322
1323
1324 } // namespace internal
1325
1326
1327
1375 template <typename Worker,
1376 typename Copier,
1377 typename Iterator,
1378 typename ScratchData,
1379 typename CopyData>
1380 void
1381 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1382 Worker worker,
1383 Copier copier,
1384 const ScratchData &sample_scratch_data,
1385 const CopyData &sample_copy_data,
1386 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1387 const unsigned int chunk_size = 8);
1388
1389
1439 template <typename Worker,
1440 typename Copier,
1441 typename Iterator,
1442 typename ScratchData,
1443 typename CopyData>
1444 void
1445 run(const Iterator &begin,
1447 Worker worker,
1448 Copier copier,
1449 const ScratchData &sample_scratch_data,
1450 const CopyData &sample_copy_data,
1451 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1452 const unsigned int chunk_size = 8)
1453 {
1454 Assert(queue_length > 0,
1455 ExcMessage("The queue length must be at least one, and preferably "
1456 "larger than the number of processors on this system."));
1457 (void)queue_length; // removes -Wunused-parameter warning in optimized mode
1458 Assert(chunk_size > 0, ExcMessage("The chunk_size must be at least one."));
1459 (void)chunk_size; // removes -Wunused-parameter warning in optimized mode
1460
1461 // If no work then skip. (only use operator!= for iterators since we may
1462 // not have an equality comparison operator)
1463 if (!(begin != end))
1464 return;
1465
1467 {
1468# if defined(DEAL_II_WITH_TBB) || defined(DEAL_II_WITH_TASKFLOW)
1469 if (static_cast<const std::function<void(const CopyData &)> &>(copier))
1470 {
1471 // If we have a copier, run the algorithm:
1472# if defined(DEAL_II_WITH_TASKFLOW)
1474 end,
1475 worker,
1476 copier,
1477 sample_scratch_data,
1478 sample_copy_data,
1479 queue_length,
1480 chunk_size);
1481# elif defined(DEAL_II_WITH_TBB)
1483 end,
1484 worker,
1485 copier,
1486 sample_scratch_data,
1487 sample_copy_data,
1488 queue_length,
1489 chunk_size);
1490# endif
1491 }
1492 else
1493 {
1494 // There is no copier function. in this case, we have an
1495 // embarrassingly parallel problem where we can
1496 // essentially apply parallel_for. because parallel_for
1497 // requires subdividing the range for which operator- is
1498 // necessary between iterators, it is often inefficient to
1499 // apply it directly to cell ranges and similar iterator
1500 // types for which operator- is expensive or, in fact,
1501 // nonexistent. rather, in that case, we simply copy the
1502 // iterators into a large array and use operator- on
1503 // iterators to this array of iterators.
1504 //
1505 // instead of duplicating code, this is essentially the
1506 // same situation we have in the colored implementation below, so we
1507 // just defer to that place
1508 std::vector<std::vector<Iterator>> all_iterators(1);
1509 for (Iterator p = begin; p != end; ++p)
1510 all_iterators[0].push_back(p);
1511
1512 run(all_iterators,
1513 worker,
1514 copier,
1515 sample_scratch_data,
1516 sample_copy_data,
1517 queue_length,
1518 chunk_size);
1519 }
1520
1521 // exit this function to not run the sequential version below:
1522 return;
1523# endif
1524 }
1525
1526 // no TBB or Taskflow installed or we are requested to run sequentially:
1528 begin, end, worker, copier, sample_scratch_data, sample_copy_data);
1529 }
1530
1531
1532
1540 template <typename Worker,
1541 typename Copier,
1542 typename IteratorRangeType,
1543 typename ScratchData,
1544 typename CopyData,
1545 typename = std::enable_if_t<has_begin_and_end<IteratorRangeType>>>
1546 void
1547 run(IteratorRangeType iterator_range,
1548 Worker worker,
1549 Copier copier,
1550 const ScratchData &sample_scratch_data,
1551 const CopyData &sample_copy_data,
1552 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1553 const unsigned int chunk_size = 8)
1554 {
1555 // Call the function above
1556 run(iterator_range.begin(),
1557 iterator_range.end(),
1558 worker,
1559 copier,
1560 sample_scratch_data,
1561 sample_copy_data,
1562 queue_length,
1563 chunk_size);
1564 }
1565
1566
1567
1571 template <typename Worker,
1572 typename Copier,
1573 typename Iterator,
1574 typename ScratchData,
1575 typename CopyData>
1576 void
1577 run(const IteratorRange<Iterator> &iterator_range,
1578 Worker worker,
1579 Copier copier,
1580 const ScratchData &sample_scratch_data,
1581 const CopyData &sample_copy_data,
1582 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1583 const unsigned int chunk_size = 8)
1584 {
1585 // Call the function above
1586 run(iterator_range.begin(),
1587 iterator_range.end(),
1588 worker,
1589 copier,
1590 sample_scratch_data,
1591 sample_copy_data,
1592 queue_length,
1593 chunk_size);
1594 }
1595
1596
1597
1598 template <typename Worker,
1599 typename Copier,
1600 typename Iterator,
1601 typename ScratchData,
1602 typename CopyData>
1603 void
1604 run(const std::vector<std::vector<Iterator>> &colored_iterators,
1605 Worker worker,
1606 Copier copier,
1607 const ScratchData &sample_scratch_data,
1608 const CopyData &sample_copy_data,
1609 const unsigned int queue_length,
1610 const unsigned int chunk_size)
1611 {
1612 Assert(queue_length > 0,
1613 ExcMessage("The queue length must be at least one, and preferably "
1614 "larger than the number of processors on this system."));
1615 (void)queue_length; // removes -Wunused-parameter warning in optimized mode
1616 Assert(chunk_size > 0, ExcMessage("The chunk_size must be at least one."));
1617 (void)chunk_size; // removes -Wunused-parameter warning in optimized mode
1618
1619
1621 {
1622# ifdef DEAL_II_WITH_TASKFLOW
1623 internal::taskflow_colored::run(colored_iterators,
1624 worker,
1625 copier,
1626 sample_scratch_data,
1627 sample_copy_data,
1628 chunk_size);
1629
1630 // exit this function to not run the sequential version below:
1631 return;
1632# elif defined(DEAL_II_WITH_TBB)
1633 internal::tbb_colored::run(colored_iterators,
1634 worker,
1635 copier,
1636 sample_scratch_data,
1637 sample_copy_data,
1638 chunk_size);
1639
1640 // exit this function to not run the sequential version below:
1641 return;
1642# endif
1643 }
1644
1645 // run all colors sequentially:
1646 {
1647 internal::sequential::run(colored_iterators,
1648 worker,
1649 copier,
1650 sample_scratch_data,
1651 sample_copy_data);
1652 }
1653 }
1654
1655
1656
1698 template <typename MainClass,
1699 typename Iterator,
1700 typename ScratchData,
1701 typename CopyData>
1702 void
1703 run(const Iterator &begin,
1705 MainClass &main_object,
1706 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1707 void (MainClass::*copier)(const CopyData &),
1708 const ScratchData &sample_scratch_data,
1709 const CopyData &sample_copy_data,
1710 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1711 const unsigned int chunk_size = 8)
1712 {
1713 // forward to the other function
1714 run(
1715 begin,
1716 end,
1717 [&main_object, worker](const Iterator &iterator,
1718 ScratchData &scratch_data,
1719 CopyData &copy_data) {
1720 (main_object.*worker)(iterator, scratch_data, copy_data);
1721 },
1722 [&main_object, copier](const CopyData &copy_data) {
1723 (main_object.*copier)(copy_data);
1724 },
1725 sample_scratch_data,
1726 sample_copy_data,
1727 queue_length,
1728 chunk_size);
1729 }
1730
1731
1732 template <typename MainClass,
1733 typename Iterator,
1734 typename ScratchData,
1735 typename CopyData>
1736 void
1739 MainClass &main_object,
1740 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1741 void (MainClass::*copier)(const CopyData &),
1742 const ScratchData &sample_scratch_data,
1743 const CopyData &sample_copy_data,
1744 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1745 const unsigned int chunk_size = 8)
1746 {
1747 // forward to the other function
1748 run(
1749 begin,
1750 end,
1751 [&main_object, worker](const Iterator &iterator,
1752 ScratchData &scratch_data,
1753 CopyData &copy_data) {
1754 (main_object.*worker)(iterator, scratch_data, copy_data);
1755 },
1756 [&main_object, copier](const CopyData &copy_data) {
1757 (main_object.*copier)(copy_data);
1758 },
1759 sample_scratch_data,
1760 sample_copy_data,
1761 queue_length,
1762 chunk_size);
1763 }
1764
1765
1766
1774 template <typename MainClass,
1775 typename IteratorRangeType,
1776 typename ScratchData,
1777 typename CopyData,
1778 typename = std::enable_if_t<has_begin_and_end<IteratorRangeType>>>
1779 void
1781 IteratorRangeType iterator_range,
1782 MainClass &main_object,
1783 void (MainClass::*worker)(
1784 const typename std_cxx20::type_identity_t<IteratorRangeType>::iterator &,
1785 ScratchData &,
1786 CopyData &),
1787 void (MainClass::*copier)(const CopyData &),
1788 const ScratchData &sample_scratch_data,
1789 const CopyData &sample_copy_data,
1790 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1791 const unsigned int chunk_size = 8)
1792 {
1793 // Call the function above
1794 run(std::begin(iterator_range),
1795 std::end(iterator_range),
1796 main_object,
1797 worker,
1798 copier,
1799 sample_scratch_data,
1800 sample_copy_data,
1801 queue_length,
1802 chunk_size);
1803 }
1804
1805
1806
1810 template <typename MainClass,
1811 typename Iterator,
1812 typename ScratchData,
1813 typename CopyData>
1814 void
1816 MainClass &main_object,
1817 void (MainClass::*worker)(const Iterator &, ScratchData &, CopyData &),
1818 void (MainClass::*copier)(const CopyData &),
1819 const ScratchData &sample_scratch_data,
1820 const CopyData &sample_copy_data,
1821 const unsigned int queue_length = 2 * MultithreadInfo::n_threads(),
1822 const unsigned int chunk_size = 8)
1823 {
1824 // Call the function above
1825 run(std::begin(iterator_range),
1826 std::end(iterator_range),
1827 main_object,
1828 worker,
1829 copier,
1830 sample_scratch_data,
1831 sample_copy_data,
1832 queue_length,
1833 chunk_size);
1834 }
1835
1836} // namespace WorkStream
1837
1838
1839
1841
1842
1843
1844//---------------------------- work_stream.h ---------------------------
1845// end of #ifndef dealii_work_stream_h
1846#endif
1847//---------------------------- work_stream.h ---------------------------
IteratorOverIterators end() const
IteratorOverIterators begin()
static unsigned int n_threads()
static tf::Executor & get_taskflow_executor()
A class that provides a separate storage location on each thread that accesses the object.
std::list< ScratchAndCopyDataObjects > ScratchAndCopyDataList
typename internal::ScratchAndCopyDataObjects< Iterator, ScratchData, CopyData > ScratchAndCopyDataObjects
WorkerAndCopier(const std::function< void(const Iterator &, ScratchData &, CopyData &)> &worker, const std::function< void(const CopyData &)> &copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
Threads::ThreadLocalStorage< ScratchAndCopyDataList > data
void operator()(const tbb::blocked_range< typename std::vector< Iterator >::const_iterator > &range)
const std::function< void(const Iterator &, ScratchData &, CopyData &)> worker
const std::function< void(const CopyData &)> copier
IteratorRangeToItemStream(const Iterator &begin, const Iterator &end, const unsigned int buffer_size, const unsigned int chunk_size, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
Threads::ThreadLocalStorage< typename ItemType::ScratchDataList > thread_local_scratch
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:500
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:501
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
void handle_std_exception(const std::exception &exc)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int=2 *MultithreadInfo::n_threads(), const unsigned int=8)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int=2 *MultithreadInfo::n_threads(), const unsigned int=8)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int chunk_size)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition parallel.h:83
typename type_identity< T >::type type_identity_t
Definition type_traits.h:95
STL namespace.
std::unique_ptr< ScratchData > scratch_data
ScratchAndCopyDataObjects(const ScratchAndCopyDataObjects &)
ScratchAndCopyDataObjects(std::unique_ptr< ScratchData > &&p, std::unique_ptr< CopyData > &&q, const bool in_use)
ScratchDataObject(std::unique_ptr< ScratchData > &&p, const bool in_use)
ScratchDataObject(ScratchDataObject &&o) noexcept=default
ScratchDataObject(ScratchData *p, const bool in_use)
ScratchDataObject(const ScratchDataObject &)
std::unique_ptr< ScratchData > scratch_data
std::list< ScratchDataObject< ScratchData > > ScratchDataList
Threads::ThreadLocalStorage< ScratchDataList > * scratch_data