Reference documentation for deal.II version Git 6d63218887 2020-10-30 17:17:53 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vector_operations_internal.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/config.h>
21 
25 #include <deal.II/base/parallel.h>
26 #include <deal.II/base/types.h>
28 
30 #include <deal.II/lac/cuda_kernels.templates.h>
32 
33 #include <cstdio>
34 #include <cstring>
35 
37 
38 namespace internal
39 {
40  namespace VectorOperations
41  {
43 
44  template <typename T>
45  bool
46  is_non_negative(const T &t)
47  {
48  return t >= 0;
49  }
50 
51 
52  template <typename T>
53  bool
54  is_non_negative(const std::complex<T> &)
55  {
56  Assert(false, ExcMessage("Complex numbers do not have an ordering."));
57 
58  return false;
59  }
60 
61 
62  // call std::copy, except for in
63  // the case where we want to copy
64  // from std::complex to a
65  // non-complex type
66  template <typename T, typename U>
67  void
68  copy(const T *begin, const T *end, U *dest)
69  {
70  std::copy(begin, end, dest);
71  }
72 
73  template <typename T, typename U>
74  void
75  copy(const std::complex<T> *begin,
76  const std::complex<T> *end,
77  std::complex<U> * dest)
78  {
79  std::copy(begin, end, dest);
80  }
81 
82  template <typename T, typename U>
83  void
84  copy(const std::complex<T> *, const std::complex<T> *, U *)
85  {
86  Assert(false,
87  ExcMessage("Can't convert a vector of complex numbers "
88  "into a vector of reals/doubles"));
89  }
90 
91 
92 
93 #ifdef DEAL_II_WITH_TBB
94 
102  template <typename Functor>
104  {
106  const size_type start,
107  const size_type end)
108  : functor(functor)
109  , start(start)
110  , end(end)
111  {
112  const size_type vec_size = end - start;
113  // set chunk size for sub-tasks
114  const unsigned int gs =
116  n_chunks =
117  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
118  vec_size / gs);
119  chunk_size = vec_size / n_chunks;
120 
121  // round to next multiple of 512 (or minimum grain size if that happens
122  // to be smaller). this is advantageous because our accumulation
123  // algorithms favor lengths of a power of 2 due to pairwise summation ->
124  // at most one 'oddly' sized chunk
125  if (chunk_size > 512)
126  chunk_size = ((chunk_size + 511) / 512) * 512;
127  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
128  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
129  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
130  }
131 
132  void
133  operator()(const tbb::blocked_range<size_type> &range) const
134  {
135  const size_type r_begin = start + range.begin() * chunk_size;
136  const size_type r_end = std::min(start + range.end() * chunk_size, end);
137  functor(r_begin, r_end);
138  }
139 
140  Functor & functor;
142  const size_type end;
143  unsigned int n_chunks;
145  };
146 #endif
147 
148  template <typename Functor>
149  void
151  Functor & functor,
152  const size_type start,
153  const size_type end,
154  const std::shared_ptr<::parallel::internal::TBBPartitioner>
155  &partitioner)
156  {
157 #ifdef DEAL_II_WITH_TBB
158  const size_type vec_size = end - start;
159  // only go to the parallel function in case there are at least 4 parallel
160  // items, otherwise the overhead is too large
161  if (vec_size >=
164  {
165  Assert(partitioner.get() != nullptr,
167  "Unexpected initialization of Vector that does "
168  "not set the TBB partitioner to a usable state."));
169  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
170  partitioner->acquire_one_partitioner();
171 
172  TBBForFunctor<Functor> generic_functor(functor, start, end);
173  // We use a minimum grain size of 1 here since the grains at this
174  // stage of dividing the work refer to the number of vector chunks
175  // that are processed by (possibly different) threads in the
176  // parallelized for loop (i.e., they do not refer to individual
177  // vector entries). The number of chunks here is calculated inside
178  // TBBForFunctor. See also GitHub issue #2496 for further discussion
179  // of this strategy.
181  static_cast<size_type>(0),
182  static_cast<size_type>(generic_functor.n_chunks),
183  generic_functor,
184  1,
185  tbb_partitioner);
186  partitioner->release_one_partitioner(tbb_partitioner);
187  }
188  else if (vec_size > 0)
189  functor(start, end);
190 #else
191  functor(start, end);
192  (void)partitioner;
193 #endif
194  }
195 
196 
197  // Define the functors necessary to use SIMD with TBB. we also include the
198  // simple copy and set operations
199 
200  template <typename Number>
201  struct Vector_set
202  {
203  Vector_set(const Number value, Number *const dst)
204  : value(value)
205  , dst(dst)
206  {
207  Assert(dst != nullptr, ExcInternalError());
208  }
209 
210  void
211  operator()(const size_type begin, const size_type end) const
212  {
213  Assert(end >= begin, ExcInternalError());
214 
215  if (value == Number())
216  {
217 #ifdef DEAL_II_HAVE_CXX17
218  if constexpr (std::is_trivial<Number>::value)
219 #else
220  if (std::is_trivial<Number>::value)
221 #endif
222  {
223  std::memset(dst + begin, 0, sizeof(Number) * (end - begin));
224  return;
225  }
226  }
227  std::fill(dst + begin, dst + end, value);
228  }
229 
230  const Number value;
231  Number *const dst;
232  };
233 
234  template <typename Number, typename OtherNumber>
235  struct Vector_copy
236  {
237  Vector_copy(const OtherNumber *const src, Number *const dst)
238  : src(src)
239  , dst(dst)
240  {
241  Assert(src != nullptr, ExcInternalError());
242  Assert(dst != nullptr, ExcInternalError());
243  }
244 
245  void
246  operator()(const size_type begin, const size_type end) const
247  {
248  Assert(end >= begin, ExcInternalError());
249 
250 #if __GNUG__ && __GNUC__ < 5
251  if (__has_trivial_copy(Number) &&
252  std::is_same<Number, OtherNumber>::value)
253 #else
254 # ifdef DEAL_II_HAVE_CXX17
255  if constexpr (std::is_trivially_copyable<Number>() &&
256  std::is_same<Number, OtherNumber>::value)
257 # else
258  if (std::is_trivially_copyable<Number>() &&
259  std::is_same<Number, OtherNumber>::value)
260 # endif
261 #endif
262  std::memcpy(dst + begin, src + begin, (end - begin) * sizeof(Number));
263  else
264  {
266  for (size_type i = begin; i < end; ++i)
267  dst[i] = src[i];
268  }
269  }
270 
271  const OtherNumber *const src;
272  Number *const dst;
273  };
274 
275  template <typename Number>
277  {
278  Vectorization_multiply_factor(Number *const val, const Number factor)
279  : val(val)
280  , factor(factor)
281  {}
282 
283  void
284  operator()(const size_type begin, const size_type end) const
285  {
287  {
289  for (size_type i = begin; i < end; ++i)
290  val[i] *= factor;
291  }
292  else
293  {
294  for (size_type i = begin; i < end; ++i)
295  val[i] *= factor;
296  }
297  }
298 
299  Number *const val;
300  const Number factor;
301  };
302 
303  template <typename Number>
305  {
306  Vectorization_add_av(Number *const val,
307  const Number *const v_val,
308  const Number factor)
309  : val(val)
310  , v_val(v_val)
311  , factor(factor)
312  {}
313 
314  void
315  operator()(const size_type begin, const size_type end) const
316  {
318  {
320  for (size_type i = begin; i < end; ++i)
321  val[i] += factor * v_val[i];
322  }
323  else
324  {
325  for (size_type i = begin; i < end; ++i)
326  val[i] += factor * v_val[i];
327  }
328  }
329 
330  Number *const val;
331  const Number *const v_val;
332  const Number factor;
333  };
334 
335  template <typename Number>
337  {
339  const Number *const v_val,
340  const Number a,
341  const Number x)
342  : val(val)
343  , v_val(v_val)
344  , a(a)
345  , x(x)
346  {}
347 
348  void
349  operator()(const size_type begin, const size_type end) const
350  {
352  {
354  for (size_type i = begin; i < end; ++i)
355  val[i] = x * val[i] + a * v_val[i];
356  }
357  else
358  {
359  for (size_type i = begin; i < end; ++i)
360  val[i] = x * val[i] + a * v_val[i];
361  }
362  }
363 
364  Number *const val;
365  const Number *const v_val;
366  const Number a;
367  const Number x;
368  };
369 
370  template <typename Number>
372  {
373  Vectorization_subtract_v(Number *val, const Number *const v_val)
374  : val(val)
375  , v_val(v_val)
376  {}
377 
378  void
379  operator()(const size_type begin, const size_type end) const
380  {
382  {
384  for (size_type i = begin; i < end; ++i)
385  val[i] -= v_val[i];
386  }
387  else
388  {
389  for (size_type i = begin; i < end; ++i)
390  val[i] -= v_val[i];
391  }
392  }
393 
394  Number *const val;
395  const Number *const v_val;
396  };
397 
398  template <typename Number>
400  {
401  Vectorization_add_factor(Number *const val, const Number factor)
402  : val(val)
403  , factor(factor)
404  {}
405 
406  void
407  operator()(const size_type begin, const size_type end) const
408  {
410  {
412  for (size_type i = begin; i < end; ++i)
413  val[i] += factor;
414  }
415  else
416  {
417  for (size_type i = begin; i < end; ++i)
418  val[i] += factor;
419  }
420  }
421 
422  Number *const val;
423  const Number factor;
424  };
425 
426  template <typename Number>
428  {
429  Vectorization_add_v(Number *const val, const Number *const v_val)
430  : val(val)
431  , v_val(v_val)
432  {}
433 
434  void
435  operator()(const size_type begin, const size_type end) const
436  {
438  {
440  for (size_type i = begin; i < end; ++i)
441  val[i] += v_val[i];
442  }
443  else
444  {
445  for (size_type i = begin; i < end; ++i)
446  val[i] += v_val[i];
447  }
448  }
449 
450  Number *const val;
451  const Number *const v_val;
452  };
453 
454  template <typename Number>
456  {
457  Vectorization_add_avpbw(Number *const val,
458  const Number *const v_val,
459  const Number *const w_val,
460  const Number a,
461  const Number b)
462  : val(val)
463  , v_val(v_val)
464  , w_val(w_val)
465  , a(a)
466  , b(b)
467  {}
468 
469  void
470  operator()(const size_type begin, const size_type end) const
471  {
473  {
475  for (size_type i = begin; i < end; ++i)
476  val[i] = val[i] + a * v_val[i] + b * w_val[i];
477  }
478  else
479  {
480  for (size_type i = begin; i < end; ++i)
481  val[i] = val[i] + a * v_val[i] + b * w_val[i];
482  }
483  }
484 
485  Number *const val;
486  const Number *const v_val;
487  const Number *const w_val;
488  const Number a;
489  const Number b;
490  };
491 
492  template <typename Number>
494  {
495  Vectorization_sadd_xv(Number *const val,
496  const Number *const v_val,
497  const Number x)
498  : val(val)
499  , v_val(v_val)
500  , x(x)
501  {}
502 
503  void
504  operator()(const size_type begin, const size_type end) const
505  {
507  {
509  for (size_type i = begin; i < end; ++i)
510  val[i] = x * val[i] + v_val[i];
511  }
512  else
513  {
514  for (size_type i = begin; i < end; ++i)
515  val[i] = x * val[i] + v_val[i];
516  }
517  }
518 
519  Number *const val;
520  const Number *const v_val;
521  const Number x;
522  };
523 
524  template <typename Number>
526  {
528  const Number *v_val,
529  const Number *w_val,
530  Number x,
531  Number a,
532  Number b)
533  : val(val)
534  , v_val(v_val)
535  , w_val(w_val)
536  , x(x)
537  , a(a)
538  , b(b)
539  {}
540 
541  void
542  operator()(const size_type begin, const size_type end) const
543  {
545  {
547  for (size_type i = begin; i < end; ++i)
548  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
549  }
550  else
551  {
552  for (size_type i = begin; i < end; ++i)
553  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
554  }
555  }
556 
557  Number *const val;
558  const Number *const v_val;
559  const Number *const w_val;
560  const Number x;
561  const Number a;
562  const Number b;
563  };
564 
565  template <typename Number>
567  {
568  Vectorization_scale(Number *const val, const Number *const v_val)
569  : val(val)
570  , v_val(v_val)
571  {}
572 
573  void
574  operator()(const size_type begin, const size_type end) const
575  {
577  {
579  for (size_type i = begin; i < end; ++i)
580  val[i] *= v_val[i];
581  }
582  else
583  {
584  for (size_type i = begin; i < end; ++i)
585  val[i] *= v_val[i];
586  }
587  }
588 
589  Number *const val;
590  const Number *const v_val;
591  };
592 
593  template <typename Number>
595  {
596  Vectorization_equ_au(Number *const val,
597  const Number *const u_val,
598  const Number a)
599  : val(val)
600  , u_val(u_val)
601  , a(a)
602  {}
603 
604  void
605  operator()(const size_type begin, const size_type end) const
606  {
608  {
610  for (size_type i = begin; i < end; ++i)
611  val[i] = a * u_val[i];
612  }
613  else
614  {
615  for (size_type i = begin; i < end; ++i)
616  val[i] = a * u_val[i];
617  }
618  }
619 
620  Number *const val;
621  const Number *const u_val;
622  const Number a;
623  };
624 
625  template <typename Number>
627  {
628  Vectorization_equ_aubv(Number *const val,
629  const Number *const u_val,
630  const Number *const v_val,
631  const Number a,
632  const Number b)
633  : val(val)
634  , u_val(u_val)
635  , v_val(v_val)
636  , a(a)
637  , b(b)
638  {}
639 
640  void
641  operator()(const size_type begin, const size_type end) const
642  {
644  {
646  for (size_type i = begin; i < end; ++i)
647  val[i] = a * u_val[i] + b * v_val[i];
648  }
649  else
650  {
651  for (size_type i = begin; i < end; ++i)
652  val[i] = a * u_val[i] + b * v_val[i];
653  }
654  }
655 
656  Number *const val;
657  const Number *const u_val;
658  const Number *const v_val;
659  const Number a;
660  const Number b;
661  };
662 
663  template <typename Number>
665  {
667  const Number *u_val,
668  const Number *v_val,
669  const Number *w_val,
670  const Number a,
671  const Number b,
672  const Number c)
673  : val(val)
674  , u_val(u_val)
675  , v_val(v_val)
676  , w_val(w_val)
677  , a(a)
678  , b(b)
679  , c(c)
680  {}
681 
682  void
683  operator()(const size_type begin, const size_type end) const
684  {
686  {
688  for (size_type i = begin; i < end; ++i)
689  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
690  }
691  else
692  {
693  for (size_type i = begin; i < end; ++i)
694  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
695  }
696  }
697 
698  Number *const val;
699  const Number *const u_val;
700  const Number *const v_val;
701  const Number *const w_val;
702  const Number a;
703  const Number b;
704  const Number c;
705  };
706 
707  template <typename Number>
709  {
710  Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
711  : val(val)
712  , a_val(a_val)
713  , b_val(b_val)
714  {}
715 
716  void
717  operator()(const size_type begin, const size_type end) const
718  {
720  {
722  for (size_type i = begin; i < end; ++i)
723  val[i] = a_val[i] / b_val[i];
724  }
725  else
726  {
727  for (size_type i = begin; i < end; ++i)
728  val[i] = a_val[i] / b_val[i];
729  }
730  }
731 
732  Number *const val;
733  const Number *const a_val;
734  const Number *const b_val;
735  };
736 
737 
738 
739  // All sums over all the vector entries (l2-norm, inner product, etc.) are
740  // performed with the same code, using a templated operation defined
741  // here. There are always two versions defined, a standard one that covers
742  // most cases and a vectorized one which is only for equal types and float
743  // and double.
744  template <typename Number, typename Number2>
745  struct Dot
746  {
747  static constexpr bool vectorizes = std::is_same<Number, Number2>::value &&
749 
750  Dot(const Number *const X, const Number2 *const Y)
751  : X(X)
752  , Y(Y)
753  {}
754 
755  Number
756  operator()(const size_type i) const
757  {
758  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
759  }
760 
762  do_vectorized(const size_type i) const
763  {
765  x.load(X + i);
766  y.load(Y + i);
767 
768  // the following operation in VectorizedArray does an element-wise
769  // scalar product without taking into account complex values and
770  // the need to take the complex-conjugate of one argument. this
771  // may be a bug, but because all VectorizedArray classes only
772  // work on real scalars, it doesn't really matter very much.
773  // in any case, assert that we really don't get here for
774  // complex-valued objects
775  static_assert(numbers::NumberTraits<Number>::is_complex == false,
776  "This operation is not correctly implemented for "
777  "complex-valued objects.");
778  return x * y;
779  }
780 
781  const Number *const X;
782  const Number2 *const Y;
783  };
784 
785  template <typename Number, typename RealType>
786  struct Norm2
787  {
788  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
789 
790  Norm2(const Number *const X)
791  : X(X)
792  {}
793 
794  RealType
795  operator()(const size_type i) const
796  {
798  }
799 
801  do_vectorized(const size_type i) const
802  {
804  x.load(X + i);
805  return x * x;
806  }
807 
808  const Number *const X;
809  };
810 
811  template <typename Number, typename RealType>
812  struct Norm1
813  {
814  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
815 
816  Norm1(const Number *X)
817  : X(X)
818  {}
819 
820  RealType
821  operator()(const size_type i) const
822  {
824  }
825 
827  do_vectorized(const size_type i) const
828  {
830  x.load(X + i);
831  return std::abs(x);
832  }
833 
834  const Number *X;
835  };
836 
837  template <typename Number, typename RealType>
838  struct NormP
839  {
840  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
841 
842  NormP(const Number *X, RealType p)
843  : X(X)
844  , p(p)
845  {}
846 
847  RealType
848  operator()(const size_type i) const
849  {
850  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
851  }
852 
854  do_vectorized(const size_type i) const
855  {
857  x.load(X + i);
858  return std::pow(std::abs(x), p);
859  }
860 
861  const Number * X;
862  const RealType p;
863  };
864 
865  template <typename Number>
866  struct MeanValue
867  {
868  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
869 
870  MeanValue(const Number *X)
871  : X(X)
872  {}
873 
874  Number
875  operator()(const size_type i) const
876  {
877  return X[i];
878  }
879 
881  do_vectorized(const size_type i) const
882  {
884  x.load(X + i);
885  return x;
886  }
887 
888  const Number *X;
889  };
890 
891  template <typename Number>
892  struct AddAndDot
893  {
894  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
895 
896  AddAndDot(Number *const X,
897  const Number *const V,
898  const Number *const W,
899  const Number a)
900  : X(X)
901  , V(V)
902  , W(W)
903  , a(a)
904  {}
905 
906  Number
907  operator()(const size_type i) const
908  {
909  X[i] += a * V[i];
910  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
911  }
912 
914  do_vectorized(const size_type i) const
915  {
917  x.load(X + i);
918  v.load(V + i);
919  x += a * v;
920  x.store(X + i);
921  // may only load from W after storing in X because the pointers might
922  // point to the same memory
923  w.load(W + i);
924 
925  // the following operation in VectorizedArray does an element-wise
926  // scalar product without taking into account complex values and
927  // the need to take the complex-conjugate of one argument. this
928  // may be a bug, but because all VectorizedArray classes only
929  // work on real scalars, it doesn't really matter very much.
930  // in any case, assert that we really don't get here for
931  // complex-valued objects
932  static_assert(numbers::NumberTraits<Number>::is_complex == false,
933  "This operation is not correctly implemented for "
934  "complex-valued objects.");
935  return x * w;
936  }
937 
938  Number *const X;
939  const Number *const V;
940  const Number *const W;
941  const Number a;
942  };
943 
944 
945 
946  // this is the main working loop for all vector sums using the templated
947  // operation above. it accumulates the sums using a block-wise summation
948  // algorithm with post-update. this blocked algorithm has been proposed in
949  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
950  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
951  // block size, 2. Sometimes it is referred to as pairwise summation. The
952  // worst case error made by this algorithm is on the order O(eps *
953  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
954  // though the Kahan summation is even more accurate with an error O(eps)
955  // by carrying along remainders not captured by the main sum, that involves
956  // additional costs which are not worthwhile. See the Wikipedia article on
957  // the Kahan summation algorithm.
958 
959  // The algorithm implemented here has the additional benefit that it is
960  // easily parallelized without changing the order of how the elements are
961  // added (floating point addition is not associative). For the same vector
962  // size and minimum_parallel_grainsize, the blocks are always the
963  // same and added pairwise.
964 
965  // The depth of recursion is controlled by the 'magic' parameter
966  // vector_accumulation_recursion_threshold: If the length is below
967  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
968  // unroll), a straight loop instead of recursion will be used. At the
969  // innermost level, eight values are added consecutively in order to better
970  // balance multiplications and additions.
971 
972  // Loops are unrolled as follows: the range [first,last) is broken into
973  // @p n_chunks each of size 32 plus the @p remainder.
974  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
975  // if possible and stores the result of the operation for each chunk in @p outer_results.
976 
977  // The code returns the result as the last argument in order to make
978  // spawning tasks simpler and use automatic template deduction.
979 
980 
986  const unsigned int vector_accumulation_recursion_threshold = 128;
987 
988  template <typename Operation, typename ResultType>
989  void
990  accumulate_recursive(const Operation &op,
991  const size_type first,
992  const size_type last,
993  ResultType & result)
994  {
995  const size_type vec_size = last - first;
996  if (vec_size <= vector_accumulation_recursion_threshold * 32)
997  {
998  // the vector is short enough so we perform the summation. first
999  // work on the regular part. The innermost 32 values are expanded in
1000  // order to obtain known loop bounds for most of the work.
1001  size_type index = first;
1002  ResultType outer_results[vector_accumulation_recursion_threshold];
1003 
1004  // set the zeroth element to zero to correctly handle the case where
1005  // vec_size == 0
1006  outer_results[0] = ResultType();
1007 
1008  // the variable serves two purposes: (i) number of chunks (each 32
1009  // indices) for the given size; all results are stored in
1010  // outer_results[0,n_chunks) (ii) in the SIMD case n_chunks is also a
1011  // next free index in outer_results[] to which we can write after
1012  // accumulate_regular() is executed.
1013  size_type n_chunks = vec_size / 32;
1014  const size_type remainder = vec_size % 32;
1015  Assert(remainder == 0 ||
1016  n_chunks < vector_accumulation_recursion_threshold,
1017  ExcInternalError());
1018 
1019  // Select between the regular version and vectorized version based
1020  // on the number types we are given. To choose the vectorized
1021  // version often enough, we need to have all tasks but the last one
1022  // to be divisible by the vectorization length
1024  op,
1025  n_chunks,
1026  index,
1027  outer_results,
1028  std::integral_constant<bool, Operation::vectorizes>());
1029 
1030  // now work on the remainder, i.e., the last up to 32 values. Use
1031  // switch statement with fall-through to work on these values.
1032  if (remainder > 0)
1033  {
1034  // if we got here, it means that (vec_size <=
1035  // vector_accumulation_recursion_threshold * 32), which is to say
1036  // that the domain can be split into n_chunks <=
1037  // vector_accumulation_recursion_threshold:
1038  AssertIndexRange(n_chunks,
1039  vector_accumulation_recursion_threshold + 1);
1040  // split the remainder into chunks of 8, there could be up to 3
1041  // such chunks since remainder < 32.
1042  // Work on those chunks without any SIMD, that is we call
1043  // op(index).
1044  const size_type inner_chunks = remainder / 8;
1045  Assert(inner_chunks <= 3, ExcInternalError());
1046  const size_type remainder_inner = remainder % 8;
1047  ResultType r0 = ResultType(), r1 = ResultType(),
1048  r2 = ResultType();
1049  switch (inner_chunks)
1050  {
1051  case 3:
1052  r2 = op(index++);
1053  for (size_type j = 1; j < 8; ++j)
1054  r2 += op(index++);
1056  case 2:
1057  r1 = op(index++);
1058  for (size_type j = 1; j < 8; ++j)
1059  r1 += op(index++);
1060  r1 += r2;
1062  case 1:
1063  r2 = op(index++);
1064  for (size_type j = 1; j < 8; ++j)
1065  r2 += op(index++);
1067  default:
1068  for (size_type j = 0; j < remainder_inner; ++j)
1069  r0 += op(index++);
1070  r0 += r2;
1071  r0 += r1;
1072  if (n_chunks == vector_accumulation_recursion_threshold)
1073  outer_results[vector_accumulation_recursion_threshold -
1074  1] += r0;
1075  else
1076  {
1077  outer_results[n_chunks] = r0;
1078  n_chunks++;
1079  }
1080  break;
1081  }
1082  }
1083  // make sure we worked through all indices
1084  AssertDimension(index, last);
1085 
1086  // now sum the results from the chunks stored in
1087  // outer_results[0,n_chunks) recursively
1088  while (n_chunks > 1)
1089  {
1090  if (n_chunks % 2 == 1)
1091  outer_results[n_chunks++] = ResultType();
1092  for (size_type i = 0; i < n_chunks; i += 2)
1093  outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1094  n_chunks /= 2;
1095  }
1096  result = outer_results[0];
1097  }
1098  else
1099  {
1100  // split vector into four pieces and work on the pieces
1101  // recursively. Make pieces (except last) divisible by one fourth the
1102  // recursion threshold.
1103  const size_type new_size =
1104  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1105  vector_accumulation_recursion_threshold * 8;
1106  Assert(first + 3 * new_size < last, ExcInternalError());
1107  ResultType r0, r1, r2, r3;
1108  accumulate_recursive(op, first, first + new_size, r0);
1109  accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1111  first + 2 * new_size,
1112  first + 3 * new_size,
1113  r2);
1114  accumulate_recursive(op, first + 3 * new_size, last, r3);
1115  r0 += r1;
1116  r2 += r3;
1117  result = r0 + r2;
1118  }
1119  }
1120 
1121 
1122  // this is the inner working routine for the accumulation loops
1123  // below. This is the standard case where the loop bounds are known. We
1124  // pulled this function out of the regular accumulate routine because we
1125  // might do this thing vectorized (see specialized function below)
1126  template <typename Operation, typename ResultType>
1127  void
1129  const Operation &op,
1130  const size_type &n_chunks,
1131  size_type & index,
1132  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1133  std::integral_constant<bool, false>)
1134  {
1135  // note that each chunk is chosen to have a width of 32, thereby the index
1136  // is incremented by 4*8 for each @p i.
1137  for (size_type i = 0; i < n_chunks; ++i)
1138  {
1139  ResultType r0 = op(index);
1140  ResultType r1 = op(index + 1);
1141  ResultType r2 = op(index + 2);
1142  ResultType r3 = op(index + 3);
1143  index += 4;
1144  for (size_type j = 1; j < 8; ++j, index += 4)
1145  {
1146  r0 += op(index);
1147  r1 += op(index + 1);
1148  r2 += op(index + 2);
1149  r3 += op(index + 3);
1150  }
1151  r0 += r1;
1152  r2 += r3;
1153  outer_results[i] = r0 + r2;
1154  }
1155  }
1156 
1157 
1158 
1159  // this is the inner working routine for the accumulation loops
1160  // below. This is the specialized case where the loop bounds are known and
1161  // where we can vectorize. In that case, we request the 'do_vectorized'
1162  // routine of the operation instead of the regular one which does several
1163  // operations at once.
1164  template <typename Operation, typename Number>
1165  void
1167  const Operation &op,
1168  size_type & n_chunks,
1169  size_type & index,
1170  Number (&outer_results)[vector_accumulation_recursion_threshold],
1171  std::integral_constant<bool, true>)
1172  {
1173  // we start from @p index and workout @p n_chunks each of size 32.
1174  // in order employ SIMD and work on @p nvecs at a time, we split this
1175  // loop yet again:
1176  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1177  // nvecs*(4*8) elements.
1178 
1179  constexpr unsigned int nvecs = VectorizedArray<Number>::size();
1180  const size_type regular_chunks = n_chunks / nvecs;
1181  for (size_type i = 0; i < regular_chunks; ++i)
1182  {
1183  VectorizedArray<Number> r0 = op.do_vectorized(index);
1184  VectorizedArray<Number> r1 = op.do_vectorized(index + nvecs);
1185  VectorizedArray<Number> r2 = op.do_vectorized(index + 2 * nvecs);
1186  VectorizedArray<Number> r3 = op.do_vectorized(index + 3 * nvecs);
1187  index += nvecs * 4;
1188  for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1189  {
1190  r0 += op.do_vectorized(index);
1191  r1 += op.do_vectorized(index + nvecs);
1192  r2 += op.do_vectorized(index + 2 * nvecs);
1193  r3 += op.do_vectorized(index + 3 * nvecs);
1194  }
1195  r0 += r1;
1196  r2 += r3;
1197  r0 += r2;
1198  r0.store(&outer_results[i * nvecs]);
1199  }
1200 
1201  // If we are treating a case where the vector length is not divisible by
1202  // the vectorization length, need a cleanup loop
1203  // The remaining chunks are processed one by one starting from
1204  // regular_chunks * nvecs; We do as much as possible with 2 SIMD
1205  // operations within each chunk. Here we assume that nvecs < 32/2 = 16 as
1206  // well as 16%nvecs==0.
1207  static_assert(
1209  16 % VectorizedArray<Number>::size() == 0,
1210  "VectorizedArray::size() must be a power of 2 and not more than 16");
1211  Assert(16 % nvecs == 0, ExcInternalError());
1212  if (n_chunks % nvecs != 0)
1213  {
1215  r1 = VectorizedArray<Number>();
1216  const size_type start_irreg = regular_chunks * nvecs;
1217  for (size_type c = start_irreg; c < n_chunks; ++c)
1218  for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1219  {
1220  r0 += op.do_vectorized(index);
1221  r1 += op.do_vectorized(index + nvecs);
1222  }
1223  r0 += r1;
1224  r0.store(&outer_results[start_irreg]);
1225  // update n_chunks to denote unused element in outer_results[] from
1226  // which we can keep writing.
1227  n_chunks = start_irreg + VectorizedArray<Number>::size();
1228  }
1229  }
1230 
1231 
1232 
1233 #ifdef DEAL_II_WITH_TBB
1234 
1262  template <typename Operation, typename ResultType>
1264  {
1265  static const unsigned int threshold_array_allocate = 512;
1266 
1267  TBBReduceFunctor(const Operation &op,
1268  const size_type start,
1269  const size_type end)
1270  : op(op)
1271  , start(start)
1272  , end(end)
1273  {
1274  const size_type vec_size = end - start;
1275  // set chunk size for sub-tasks
1276  const unsigned int gs =
1278  n_chunks =
1279  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
1280  vec_size / gs);
1281  chunk_size = vec_size / n_chunks;
1282 
1283  // round to next multiple of 512 (or leave it at the minimum grain size
1284  // if that happens to be smaller). this is advantageous because our
1285  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1286  // at most one 'oddly' sized chunk
1287  if (chunk_size > 512)
1288  chunk_size = ((chunk_size + 511) / 512) * 512;
1289  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1290  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
1291  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
1292 
1293  if (n_chunks > threshold_array_allocate)
1294  {
1295  // make sure we allocate an even number of elements,
1296  // access to the new last element is needed in do_sum()
1297  large_array.resize(2 * ((n_chunks + 1) / 2));
1298  array_ptr = large_array.data();
1299  }
1300  else
1301  array_ptr = &small_array[0];
1302  }
1303 
1308  void
1309  operator()(const tbb::blocked_range<size_type> &range) const
1310  {
1311  for (size_type i = range.begin(); i < range.end(); ++i)
1313  start + i * chunk_size,
1314  std::min(start + (i + 1) * chunk_size, end),
1315  array_ptr[i]);
1316  }
1317 
1318  ResultType
1319  do_sum() const
1320  {
1321  while (n_chunks > 1)
1322  {
1323  if (n_chunks % 2 == 1)
1324  array_ptr[n_chunks++] = ResultType();
1325  for (size_type i = 0; i < n_chunks; i += 2)
1326  array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1327  n_chunks /= 2;
1328  }
1329  return array_ptr[0];
1330  }
1331 
1332  const Operation &op;
1335 
1336  mutable unsigned int n_chunks;
1337  unsigned int chunk_size;
1338  ResultType small_array[threshold_array_allocate];
1339  std::vector<ResultType> large_array;
1340  // this variable either points to small_array or large_array depending on
1341  // the number of threads we want to feed
1342  mutable ResultType *array_ptr;
1343  };
1344 #endif
1345 
1346 
1347 
1352  template <typename Operation, typename ResultType>
1353  void
1355  const Operation &op,
1356  const size_type start,
1357  const size_type end,
1358  ResultType & result,
1359  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1360  &partitioner)
1361  {
1362 #ifdef DEAL_II_WITH_TBB
1363  const size_type vec_size = end - start;
1364  // only go to the parallel function in case there are at least 4 parallel
1365  // items, otherwise the overhead is too large
1366  if (vec_size >=
1369  {
1370  Assert(partitioner.get() != nullptr,
1372  "Unexpected initialization of Vector that does "
1373  "not set the TBB partitioner to a usable state."));
1374  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1375  partitioner->acquire_one_partitioner();
1376 
1377  TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1378  start,
1379  end);
1380  // We use a minimum grain size of 1 here since the grains at this
1381  // stage of dividing the work refer to the number of vector chunks
1382  // that are processed by (possibly different) threads in the
1383  // parallelized for loop (i.e., they do not refer to individual
1384  // vector entries). The number of chunks here is calculated inside
1385  // TBBForFunctor. See also GitHub issue #2496 for further discussion
1386  // of this strategy.
1388  static_cast<size_type>(0),
1389  static_cast<size_type>(generic_functor.n_chunks),
1390  generic_functor,
1391  1,
1392  tbb_partitioner);
1393  partitioner->release_one_partitioner(tbb_partitioner);
1394  result = generic_functor.do_sum();
1395  }
1396  else
1397  accumulate_recursive(op, start, end, result);
1398 #else
1399  accumulate_recursive(op, start, end, result);
1400  (void)partitioner;
1401 #endif
1402  }
1403 
1404 
1405  template <typename Number, typename Number2, typename MemorySpace>
1406  struct functions
1407  {
1408  static void
1410  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1411  /*thread_loop_partitioner*/,
1412  const size_type /*size*/,
1413  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1414  & /*v_data*/,
1416  {
1417  static_assert(
1418  std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1419  std::is_same<Number, Number2>::value,
1420  "For the CUDA MemorySpace Number and Number2 should be the same type");
1421  }
1422 
1423  static void
1424  set(
1425  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1426  /*thread_loop_partitioner*/,
1427  const size_type /*size*/,
1428  const Number /*s*/,
1430  {}
1431 
1432  static void
1434  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1435  /*thread_loop_partitioner*/,
1436  const size_type /*size*/,
1437  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1438  & /*v_data*/,
1440  {}
1441 
1442  static void
1444  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1445  /*thread_loop_partitioner*/,
1446  const size_type /*size*/,
1447  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1448  & /*v_data*/,
1450  {}
1451 
1452  static void
1454  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1455  /*thread_loop_partitioner*/,
1456  const size_type /*size*/,
1457  Number /*a*/,
1459  {}
1460 
1461  static void
1463  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1464  /*thread_loop_partitioner*/,
1465  const size_type /*size*/,
1466  const Number /*a*/,
1467  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1468  & /*v_data*/,
1470  {}
1471 
1472  static void
1474  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1475  /*thread_loop_partitioner*/,
1476  const size_type /*size*/,
1477  const Number /*a*/,
1478  const Number /*b*/,
1479  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1480  & /*v_data*/,
1481  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1482  & /*w_data*/,
1484  {}
1485 
1486  static void
1488  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1489  /*thread_loop_partitioner*/,
1490  const size_type /*size*/,
1491  const Number /*x*/,
1492  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1493  & /*v_data*/,
1495  {}
1496 
1497  static void
1499  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1500  /*thread_loop_partitioner*/,
1501  const size_type /*size*/,
1502  const Number /*x*/,
1503  const Number /*a*/,
1504  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1505  & /*v_data*/,
1507  {}
1508 
1509  static void
1511  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1512  /*thread_loop_partitioner*/,
1513  const size_type /*size*/,
1514  const Number /*x*/,
1515  const Number /*a*/,
1516  const Number /*b*/,
1517  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1518  & /*v_data*/,
1519  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1520  & /*w_data*/,
1522  {}
1523 
1524  static void
1526  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1527  /*thread_loop_partitioner*/,
1528  const size_type /*size*/,
1529  const Number /*factor*/,
1531  {}
1532 
1533  static void
1535  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1536  /*thread_loop_partitioner*/,
1537  const size_type /*size*/,
1538  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1539  & /*v_data*/,
1541  {}
1542 
1543  static void
1545  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1546  /*thread_loop_partitioner*/,
1547  const size_type /*size*/,
1548  const Number /*a*/,
1549  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1550  & /*v_data*/,
1552  {}
1553 
1554  static void
1556  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1557  /*thread_loop_partitioner*/,
1558  const size_type /*size*/,
1559  const Number /*a*/,
1560  const Number /*b*/,
1561  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1562  & /*v_data*/,
1563  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1564  & /*w_data*/,
1566  {}
1567 
1568  static Number
1570  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1571  /*thread_loop_partitioner*/,
1572  const size_type /*size*/,
1573  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1574  & /*v_data*/,
1576  {
1577  return Number();
1578  }
1579 
1580  template <typename real_type>
1581  static void
1583  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1584  /*thread_loop_partitioner*/,
1585  const size_type /*size*/,
1586  real_type & /*sum*/,
1587  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1588  & /*v_data*/,
1590  {}
1591 
1592  static Number
1594  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1595  /*thread_loop_partitioner*/,
1596  const size_type /*size*/,
1597  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1598  & /*data*/)
1599  {
1600  return Number();
1601  }
1602 
1603  template <typename real_type>
1604  static void
1606  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1607  /*thread_loop_partitioner*/,
1608  const size_type /*size*/,
1609  real_type & /*sum*/,
1610  Number * /*values*/,
1611  Number * /*values_dev*/)
1612  {}
1613 
1614  template <typename real_type>
1615  static void
1617  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1618  /*thread_loop_partitioner*/,
1619  const size_type /*size*/,
1620  real_type & /*sum*/,
1621  real_type /*p*/,
1623  {}
1624 
1625  static Number
1627  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1628  /*thread_loop_partitioner*/,
1629  const size_type /*size*/,
1630  const Number /*a*/,
1631  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1632  & /*v_data*/,
1633  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1634  & /*w_data*/,
1636  {
1637  return Number();
1638  }
1639 
1640  template <typename MemorySpace2>
1641  static void
1642  import(
1643  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1644  /*thread_loop_partitioner*/,
1645  const size_type /*size*/,
1646  VectorOperation::values /*operation*/,
1647  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
1648  & /*v_data*/,
1650  {}
1651  };
1652 
1653 
1654 
1655  template <typename Number, typename Number2>
1656  struct functions<Number, Number2, ::MemorySpace::Host>
1657  {
1658  static void
1659  copy(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1660  & thread_loop_partitioner,
1661  const size_type size,
1662  const ::MemorySpace::
1663  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1666  &data)
1667  {
1668  Vector_copy<Number, Number2> copier(v_data.values.get(),
1669  data.values.get());
1670  parallel_for(copier, 0, size, thread_loop_partitioner);
1671  }
1672 
1673  static void
1674  set(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1675  & thread_loop_partitioner,
1676  const size_type size,
1677  const Number s,
1680  &data)
1681  {
1682  Vector_set<Number> setter(s, data.values.get());
1683  parallel_for(setter, 0, size, thread_loop_partitioner);
1684  }
1685 
1686  static void
1688  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1689  & thread_loop_partitioner,
1690  const size_type size,
1691  const ::MemorySpace::
1692  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1695  &data)
1696  {
1697  Vectorization_add_v<Number> vector_add(data.values.get(),
1698  v_data.values.get());
1699  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1700  }
1701 
1702  static void
1704  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1705  & thread_loop_partitioner,
1706  const size_type size,
1707  const ::MemorySpace::
1708  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1711  &data)
1712  {
1713  Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1714  v_data.values.get());
1715  parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1716  }
1717 
1718  static void
1720  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1721  & thread_loop_partitioner,
1722  const size_type size,
1723  Number a,
1726  &data)
1727  {
1728  Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1729  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1730  }
1731 
1732  static void
1733  add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1734  & thread_loop_partitioner,
1735  const size_type size,
1736  const Number a,
1737  const ::MemorySpace::
1738  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1741  &data)
1742  {
1743  Vectorization_add_av<Number> vector_add(data.values.get(),
1744  v_data.values.get(),
1745  a);
1746  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1747  }
1748 
1749  static void
1751  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1752  & thread_loop_partitioner,
1753  const size_type size,
1754  const Number a,
1755  const Number b,
1756  const ::MemorySpace::
1757  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1758  const ::MemorySpace::
1759  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1762  &data)
1763  {
1765  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1766  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1767  }
1768 
1769  static void
1771  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1772  & thread_loop_partitioner,
1773  const size_type size,
1774  const Number x,
1775  const ::MemorySpace::
1776  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1779  &data)
1780  {
1781  Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1782  v_data.values.get(),
1783  x);
1784  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1785  }
1786 
1787  static void
1789  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1790  & thread_loop_partitioner,
1791  const size_type size,
1792  const Number x,
1793  const Number a,
1794  const ::MemorySpace::
1795  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1798  &data)
1799  {
1800  Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1801  v_data.values.get(),
1802  a,
1803  x);
1804  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1805  }
1806 
1807  static void
1809  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1810  & thread_loop_partitioner,
1811  const size_type size,
1812  const Number x,
1813  const Number a,
1814  const Number b,
1815  const ::MemorySpace::
1816  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1817  const ::MemorySpace::
1818  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1821  &data)
1822  {
1824  data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
1825  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1826  }
1827 
1828  static void
1830  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1831  & thread_loop_partitioner,
1832  const size_type size,
1833  const Number factor,
1836  &data)
1837  {
1838  Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1839  factor);
1840  parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1841  }
1842 
1843  static void
1844  scale(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1845  & thread_loop_partitioner,
1846  const size_type size,
1847  const ::MemorySpace::
1848  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1851  &data)
1852  {
1853  Vectorization_scale<Number> vector_scale(data.values.get(),
1854  v_data.values.get());
1855  parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1856  }
1857 
1858  static void
1859  equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1860  & thread_loop_partitioner,
1861  const size_type size,
1862  const Number a,
1863  const ::MemorySpace::
1864  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1867  &data)
1868  {
1869  Vectorization_equ_au<Number> vector_equ(data.values.get(),
1870  v_data.values.get(),
1871  a);
1872  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1873  }
1874 
1875  static void
1877  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1878  & thread_loop_partitioner,
1879  const size_type size,
1880  const Number a,
1881  const Number b,
1882  const ::MemorySpace::
1883  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1884  const ::MemorySpace::
1885  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1888  &data)
1889  {
1890  Vectorization_equ_aubv<Number> vector_equ(
1891  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1892  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1893  }
1894 
1895  static Number
1896  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1897  & thread_loop_partitioner,
1898  const size_type size,
1899  const ::MemorySpace::
1900  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1903  &data)
1904  {
1905  Number sum;
1907  data.values.get(), v_data.values.get());
1909  dot, 0, size, sum, thread_loop_partitioner);
1910  AssertIsFinite(sum);
1911 
1912  return sum;
1913  }
1914 
1915  template <typename real_type>
1916  static void
1917  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1918  & thread_loop_partitioner,
1919  const size_type size,
1920  real_type & sum,
1923  &data)
1924  {
1925  Norm2<Number, real_type> norm2(data.values.get());
1926  parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1927  }
1928 
1929  static Number
1931  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1932  & thread_loop_partitioner,
1933  const size_type size,
1934  const ::MemorySpace::
1935  MemorySpaceData<Number, ::MemorySpace::Host> &data)
1936  {
1937  Number sum;
1938  MeanValue<Number> mean(data.values.get());
1939  parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1940 
1941  return sum;
1942  }
1943 
1944  template <typename real_type>
1945  static void
1946  norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1947  & thread_loop_partitioner,
1948  const size_type size,
1949  real_type & sum,
1952  &data)
1953  {
1954  Norm1<Number, real_type> norm1(data.values.get());
1955  parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1956  }
1957 
1958  template <typename real_type>
1959  static void
1960  norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1961  & thread_loop_partitioner,
1962  const size_type size,
1963  real_type & sum,
1964  const real_type p,
1967  &data)
1968  {
1969  NormP<Number, real_type> normp(data.values.get(), p);
1970  parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1971  }
1972 
1973  static Number
1975  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1976  & thread_loop_partitioner,
1977  const size_type size,
1978  const Number a,
1979  const ::MemorySpace::
1980  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1981  const ::MemorySpace::
1982  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1985  &data)
1986  {
1987  Number sum;
1988  AddAndDot<Number> adder(data.values.get(),
1989  v_data.values.get(),
1990  w_data.values.get(),
1991  a);
1992  parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
1993 
1994  return sum;
1995  }
1996 
1997  template <typename MemorySpace2>
1998  static void
1999  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2000  & thread_loop_partitioner,
2001  const size_type size,
2002  VectorOperation::values operation,
2003  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2004  &v_data,
2007  &data,
2008  typename std::enable_if<
2009  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2010  int>::type = 0)
2011  {
2012  if (operation == VectorOperation::insert)
2013  {
2014  copy(thread_loop_partitioner, size, v_data, data);
2015  }
2016  else if (operation == VectorOperation::add)
2017  {
2018  add_vector(thread_loop_partitioner, size, v_data, data);
2019  }
2020  else
2021  {
2022  AssertThrow(false, ExcNotImplemented());
2023  }
2024  }
2025 
2026 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2027  template <typename MemorySpace2>
2028  static void
2029  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2030  & /*thread_loop_partitioner*/,
2031  const size_type size,
2032  VectorOperation::values operation,
2033  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2034  &v_data,
2037  &data,
2038  typename std::enable_if<
2039  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2040  int>::type = 0)
2041  {
2042  if (operation == VectorOperation::insert)
2043  {
2044  cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
2045  v_data.values_dev.get(),
2046  size * sizeof(Number),
2047  cudaMemcpyDeviceToHost);
2048  AssertCuda(cuda_error_code);
2049  }
2050  else
2051  {
2052  AssertThrow(false, ExcNotImplemented());
2053  }
2054  }
2055 #endif
2056  };
2057 
2058 
2059 
2060 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2061  template <typename Number>
2062  struct functions<Number, Number, ::MemorySpace::CUDA>
2063  {
2064  static const int block_size =
2066  static const int chunk_size =
2068 
2069  static void
2071  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2072  const size_type size,
2073  const ::MemorySpace::
2074  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2077  &data)
2078  {
2079  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2080  v_data.values_dev.get(),
2081  size * sizeof(Number),
2082  cudaMemcpyDeviceToDevice);
2083  AssertCuda(cuda_error_code);
2084  }
2085 
2086  static void
2087  set(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2088  const size_type size,
2089  const Number s,
2092  &data)
2093  {
2094  const int n_blocks = 1 + size / (chunk_size * block_size);
2095  ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2096  <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
2097  AssertCudaKernel();
2098  }
2099 
2100  static void
2102  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2103  const size_type size,
2104  const ::MemorySpace::
2105  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2108  &data)
2109  {
2110  const int n_blocks = 1 + size / (chunk_size * block_size);
2111  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2112  <<<n_blocks, block_size>>>(data.values_dev.get(),
2113  1.,
2114  v_data.values_dev.get(),
2115  size);
2116  AssertCudaKernel();
2117  }
2118 
2119  static void
2121  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2122  const size_type size,
2123  const ::MemorySpace::
2124  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2127  &data)
2128  {
2129  const int n_blocks = 1 + size / (chunk_size * block_size);
2130  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2131  <<<n_blocks, block_size>>>(data.values_dev.get(),
2132  -1.,
2133  v_data.values_dev.get(),
2134  size);
2135  AssertCudaKernel();
2136  }
2137 
2138  static void
2140  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2141  const size_type size,
2142  Number a,
2145  &data)
2146  {
2147  const int n_blocks = 1 + size / (chunk_size * block_size);
2148  ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2149  <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
2150  AssertCudaKernel();
2151  }
2152 
2153  static void
2155  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2156  const size_type size,
2157  const Number a,
2158  const ::MemorySpace::
2159  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2162  &data)
2163  {
2164  const int n_blocks = 1 + size / (chunk_size * block_size);
2165  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2166  <<<n_blocks, block_size>>>(data.values_dev.get(),
2167  a,
2168  v_data.values_dev.get(),
2169  size);
2170  AssertCudaKernel();
2171  }
2172 
2173  static void
2175  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2176  const size_type size,
2177  const Number a,
2178  const Number b,
2179  const ::MemorySpace::
2180  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2181  const ::MemorySpace::
2182  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2185  &data)
2186  {
2187  const int n_blocks = 1 + size / (chunk_size * block_size);
2188  ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2189  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2190  a,
2191  v_data.values_dev.get(),
2192  b,
2193  w_data.values_dev.get(),
2194  size);
2195  AssertCudaKernel();
2196  }
2197 
2198  static void
2200  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2201  const size_type size,
2202  const Number x,
2203  const ::MemorySpace::
2204  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2207  &data)
2208  {
2209  const int n_blocks = 1 + size / (chunk_size * block_size);
2210  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2211  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2212  x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2213  AssertCudaKernel();
2214  }
2215 
2216  static void
2218  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2219  const size_type size,
2220  const Number x,
2221  const Number a,
2222  const ::MemorySpace::
2223  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2226  &data)
2227  {
2228  const int n_blocks = 1 + size / (chunk_size * block_size);
2229  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2230  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2231  x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2232  AssertCudaKernel();
2233  }
2234 
2235  static void
2237  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2238  const size_type size,
2239  const Number x,
2240  const Number a,
2241  const Number b,
2242  const ::MemorySpace::
2243  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2244  const ::MemorySpace::
2245  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2248  &data)
2249  {
2250  const int n_blocks = 1 + size / (chunk_size * block_size);
2251  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2252  <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2253  data.values_dev.get(),
2254  a,
2255  v_data.values_dev.get(),
2256  b,
2257  w_data.values_dev.get(),
2258  size);
2259  AssertCudaKernel();
2260  }
2261 
2262  static void
2264  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2265  const size_type size,
2266  const Number factor,
2269  &data)
2270  {
2271  const int n_blocks = 1 + size / (chunk_size * block_size);
2272  ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2273  <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
2274  AssertCudaKernel();
2275  }
2276 
2277  static void
2279  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2280  const size_type size,
2281  const ::MemorySpace::
2282  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2285  &data)
2286  {
2287  const int n_blocks = 1 + size / (chunk_size * block_size);
2288  ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2289  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2290  v_data.values_dev.get(),
2291  size);
2292  AssertCudaKernel();
2293  }
2294 
2295  static void
2297  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2298  const size_type size,
2299  const Number a,
2300  const ::MemorySpace::
2301  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2304  &data)
2305  {
2306  const int n_blocks = 1 + size / (chunk_size * block_size);
2307  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2308  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2309  a,
2310  v_data.values_dev.get(),
2311  size);
2312  AssertCudaKernel();
2313  }
2314 
2315  static void
2317  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2318  const size_type size,
2319  const Number a,
2320  const Number b,
2321  const ::MemorySpace::
2322  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2323  const ::MemorySpace::
2324  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2327  &data)
2328  {
2329  const int n_blocks = 1 + size / (chunk_size * block_size);
2330  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2331  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2332  a,
2333  v_data.values_dev.get(),
2334  b,
2335  w_data.values_dev.get(),
2336  size);
2337  AssertCudaKernel();
2338  }
2339 
2340  static Number
2341  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2342  const size_type size,
2343  const ::MemorySpace::
2344  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2347  &data)
2348  {
2349  Number * result_device;
2350  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2351  AssertCuda(error_code);
2352  error_code = cudaMemset(result_device, 0, sizeof(Number));
2353  AssertCuda(error_code);
2354 
2355  const int n_blocks = 1 + size / (chunk_size * block_size);
2357  Number,
2359  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2360  data.values_dev.get(),
2361  v_data.values_dev.get(),
2362  static_cast<unsigned int>(
2363  size));
2364  AssertCudaKernel();
2365 
2366  // Copy the result back to the host
2367  Number result;
2368  error_code = cudaMemcpy(&result,
2369  result_device,
2370  sizeof(Number),
2371  cudaMemcpyDeviceToHost);
2372  AssertCuda(error_code);
2373  // Free the memory on the device
2374  error_code = cudaFree(result_device);
2375  AssertCuda(error_code);
2376 
2377  AssertIsFinite(result);
2378 
2379  return result;
2380  }
2381 
2382  template <typename real_type>
2383  static void
2384  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2385  & thread_loop_partitioner,
2386  const size_type size,
2387  real_type & sum,
2390  &data)
2391  {
2392  sum = dot(thread_loop_partitioner, size, data, data);
2393  }
2394 
2395  static Number
2397  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2398  const size_type size,
2399  const ::MemorySpace::
2400  MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2401  {
2402  Number * result_device;
2403  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2404  AssertCuda(error_code);
2405  error_code = cudaMemset(result_device, 0, sizeof(Number));
2406 
2407  const int n_blocks = 1 + size / (chunk_size * block_size);
2409  Number,
2411  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2412  data.values_dev.get(),
2413  size);
2414 
2415  // Copy the result back to the host
2416  Number result;
2417  error_code = cudaMemcpy(&result,
2418  result_device,
2419  sizeof(Number),
2420  cudaMemcpyDeviceToHost);
2421  AssertCuda(error_code);
2422  // Free the memory on the device
2423  error_code = cudaFree(result_device);
2424  AssertCuda(error_code);
2425 
2426  return result;
2427  }
2428 
2429  template <typename real_type>
2430  static void
2432  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2433  const size_type size,
2434  real_type & sum,
2437  &data)
2438  {
2439  Number * result_device;
2440  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2441  AssertCuda(error_code);
2442  error_code = cudaMemset(result_device, 0, sizeof(Number));
2443 
2444  const int n_blocks = 1 + size / (chunk_size * block_size);
2446  Number,
2448  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2449  data.values_dev.get(),
2450  size);
2451 
2452  // Copy the result back to the host
2453  error_code = cudaMemcpy(&sum,
2454  result_device,
2455  sizeof(Number),
2456  cudaMemcpyDeviceToHost);
2457  AssertCuda(error_code);
2458  // Free the memory on the device
2459  error_code = cudaFree(result_device);
2460  AssertCuda(error_code);
2461  }
2462 
2463  template <typename real_type>
2464  static void
2466  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2467  const size_type,
2468  real_type &,
2469  real_type,
2471  ::MemorySpace::CUDA> &)
2472  {
2473  Assert(false, ExcNotImplemented());
2474  }
2475 
2476  static Number
2478  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2479  const size_type size,
2480  const Number a,
2481  const ::MemorySpace::
2482  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2483  const ::MemorySpace::
2484  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2487  &data)
2488  {
2489  Number * res_d;
2490  cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
2491  AssertCuda(error_code);
2492  error_code = cudaMemset(res_d, 0, sizeof(Number));
2493  AssertCuda(error_code);
2494 
2495  const int n_blocks = 1 + size / (chunk_size * block_size);
2496  ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2497  <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2498  data.values_dev.get(),
2499  v_data.values_dev.get(),
2500  w_data.values_dev.get(),
2501  a,
2502  size);
2503 
2504  Number res;
2505  error_code =
2506  cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
2507  AssertCuda(error_code);
2508  error_code = cudaFree(res_d);
2509 
2510  return res;
2511  }
2512 
2513  template <typename MemorySpace2>
2514  static void
2515  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2516  & thread_loop_partitioner,
2517  const size_type size,
2518  VectorOperation::values operation,
2519  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2520  &v_data,
2523  &data,
2524  typename std::enable_if<
2525  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2526  int>::type = 0)
2527  {
2528  if (operation == VectorOperation::insert)
2529  {
2530  copy(thread_loop_partitioner, size, v_data, data);
2531  }
2532  else if (operation == VectorOperation::add)
2533  {
2534  add_vector(thread_loop_partitioner, size, v_data, data);
2535  }
2536  else
2537  {
2538  AssertThrow(false, ExcNotImplemented());
2539  }
2540  }
2541 
2542  template <typename MemorySpace2>
2543  static void
2544  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2545  & /*thread_loop_partitioner*/,
2546  const size_type size,
2547  VectorOperation::values operation,
2548  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2549  &v_data,
2552  &data,
2553  typename std::enable_if<
2554  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2555  int>::type = 0)
2556  {
2557  if (operation == VectorOperation::insert)
2558  {
2559  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2560  v_data.values.get(),
2561  size * sizeof(Number),
2562  cudaMemcpyHostToDevice);
2563  AssertCuda(cuda_error_code);
2564  }
2565  else
2566  {
2567  AssertThrow(false, ExcNotImplemented());
2568  }
2569  }
2570  };
2571 #endif
2572  } // namespace VectorOperations
2573 } // namespace internal
2574 
2576 
2577 #endif
static void equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static Number dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, real_type &, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, real_type &sum, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1580
RealType operator()(const size_type i) const
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
Vectorization_multiply_factor(Number *const val, const Number factor)
void operator()(const size_type begin, const size_type end) const
static void add_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
TBBReduceFunctor(const Operation &op, const size_type start, const size_type end)
TBBForFunctor(Functor &functor, const size_type start, const size_type end)
void operator()(const size_type begin, const size_type end) const
static void norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, real_type &, real_type, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void multiply_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number factor, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void add_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
VectorizedArray< Number > do_vectorized(const size_type i) const
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:577
static void norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, real_type &, real_type, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1648
static void add_avpbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
Vectorization_add_factor(Number *const val, const Number factor)
constexpr int block_size
Definition: cuda_size.h:29
static void norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, real_type &sum, const real_type p, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static Number dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number2, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static const char V
static void sadd_xavbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number x, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void scale(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void sadd_xav(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number x, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
Number operator()(const size_type i) const
static void sadd_xavbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number x, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, real_type &sum, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void operator()(const size_type begin, const size_type end) const
static const char U
static void sadd_xv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number x, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1533
static real_type abs(const number &x)
Definition: numbers.h:599
VectorizedArray< Number > do_vectorized(const size_type i) const
VectorizedArray< Number > do_vectorized(const size_type i) const
static void equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
#define AssertCudaKernel()
Definition: exceptions.h:1785
static Number dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number2, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void operator()(const size_type begin, const size_type end) const
void store(Number *ptr) const
Vectorization_equ_aubvcw(Number *val, const Number *u_val, const Number *v_val, const Number *w_val, const Number a, const Number b, const Number c)
void operator()(const size_type begin, const size_type end) const
static void norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, real_type &, Number *, Number *)
void operator()(const size_type begin, const size_type end) const
Vectorization_add_v(Number *const val, const Number *const v_val)
Number operator()(const size_type i) const
static Number add_and_dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
Vectorization_sadd_xv(Number *const val, const Number *const v_val, const Number x)
Vector_copy(const OtherNumber *const src, Number *const dst)
static void norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, real_type &sum, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static ::ExceptionBase & ExcMessage(std::string arg1)
void operator()(const size_type begin, const size_type end) const
RealType operator()(const size_type i) const
static void equ_aubv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
void operator()(const size_type begin, const size_type end) const
static Number add_and_dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
VectorizedArray< Number > do_vectorized(const size_type i) const
void accumulate_recursive(const Operation &op, const size_type first, const size_type last, ResultType &result)
#define Assert(cond, exc)
Definition: exceptions.h:1423
Vectorization_scale(Number *const val, const Number *const v_val)
void operator()(const size_type begin, const size_type end) const
Vectorization_subtract_v(Number *val, const Number *const v_val)
static void sadd_xavbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const Number, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
#define AssertCuda(error_code)
Definition: exceptions.h:1730
static void sadd_xv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void add_avpbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void add_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, Number a, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void operator()(const size_type begin, const size_type end) const
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:369
void load(const Number *ptr)
VectorType::value_type * end(VectorType &V)
static void equ_aubv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void parallel_reduce(const Operation &op, const size_type start, const size_type end, ResultType &result, const std::shared_ptr<::parallel::internal::TBBPartitioner > &partitioner)
std::enable_if< IsBlockVector< VectorType >::value, unsigned int >::type n_blocks(const VectorType &vector)
Definition: operators.h:49
void operator()(const size_type begin, const size_type end) const
static void subtract_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void sadd_xv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number x, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void add_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, Number a, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
Vectorization_sadd_xav(Number *val, const Number *const v_val, const Number a, const Number x)
Vectorization_equ_au(Number *const val, const Number *const u_val, const Number a)
static void copy(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number2, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
VectorizedArray< Number > do_vectorized(const size_type i) const
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Point< 2 > first
Definition: grid_out.cc:4340
static void add_avpbw(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number a, const Number b, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
const unsigned int vector_accumulation_recursion_threshold
static void equ_aubv(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
void operator()(const size_type begin, const size_type end) const
Tensor< 2, dim, Number > w(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
unsigned int global_dof_index
Definition: types.h:76
static void subtract_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void copy(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
void accumulate_regular(const Operation &op, const size_type &n_chunks, size_type &index, ResultType(&outer_results)[vector_accumulation_recursion_threshold], std::integral_constant< bool, false >)
RealType operator()(const size_type i) const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:368
VectorType::value_type * begin(VectorType &V)
constexpr int chunk_size
Definition: cuda_size.h:35
T min(const T &t, const MPI_Comm &mpi_communicator)
static void multiply_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
Number operator()(const size_type i) const
static void copy(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number2, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void operator()(const size_type begin, const size_type end) const
unsigned int minimum_parallel_grain_size
Definition: parallel.cc:34
AddAndDot(Number *const X, const Number *const V, const Number *const W, const Number a)
Dot(const Number *const X, const Number2 *const Y)
static void scale(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static Number mean_value(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void add_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
Vectorization_equ_aubv(Number *const val, const Number *const u_val, const Number *const v_val, const Number a, const Number b)
static ::ExceptionBase & ExcNotImplemented()
static Number add_and_dot(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &w_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void sadd_xav(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number x, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static unsigned int n_threads()
Vectorization_add_avpbw(Number *const val, const Number *const v_val, const Number *const w_val, const Number a, const Number b)
VectorizedArray< Number > do_vectorized(const size_type i) const
static void subtract_vector(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
void operator()(const size_type begin, const size_type end) const
static void norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, real_type &sum, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void operator()(const tbb::blocked_range< size_type > &range) const
void operator()(const size_type begin, const size_type end) const
Vectorization_add_av(Number *const val, const Number *const v_val, const Number factor)
#define DEAL_II_FALLTHROUGH
Definition: config.h:153
static Number mean_value(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::CUDA > &data)
static void add_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, Number, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
void copy(const std::complex< T > *, const std::complex< T > *, U *)
static void sadd_xav(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const Number, const Number, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &, ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)
static void multiply_factor(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number factor, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static void add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const Number a, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
void parallel_for(Functor &functor, const size_type start, const size_type end, const std::shared_ptr<::parallel::internal::TBBPartitioner > &partitioner)
void copy(const T *begin, const T *end, U *dest)
#define AssertIsFinite(number)
Definition: exceptions.h:1679
Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
Vectorization_sadd_xavbw(Number *val, const Number *v_val, const Number *w_val, Number x, Number a, Number b)
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition: config.h:134
Vector_set(const Number value, Number *const dst)
void operator()(const size_type begin, const size_type end) const
static ::ExceptionBase & ExcInternalError()
static void scale(const std::shared_ptr<::parallel::internal::TBBPartitioner > &thread_loop_partitioner, const size_type size, const ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &v_data, ::MemorySpace::MemorySpaceData< Number, ::MemorySpace::Host > &data)
static Number mean_value(const std::shared_ptr<::parallel::internal::TBBPartitioner > &, const size_type, const ::MemorySpace::MemorySpaceData< Number, MemorySpace > &)