Reference documentation for deal.II version Git 840703ecf3 2019-10-14 17:15:20 -0400
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/memory_space.h>
21 #include <deal.II/base/multithread_info.h>
22 #include <deal.II/base/parallel.h>
23 #include <deal.II/base/types.h>
24 #include <deal.II/base/vectorization.h>
25 
26 #include <deal.II/lac/cuda_kernels.h>
27 #include <deal.II/lac/cuda_kernels.templates.h>
28 #include <deal.II/lac/vector_operation.h>
29 
30 #include <cstdio>
31 #include <cstring>
32 
33 DEAL_II_NAMESPACE_OPEN
34 
35 namespace internal
36 {
37  namespace VectorOperations
38  {
39  using size_type = types::global_dof_index;
40 
41  template <typename T>
42  bool
43  is_non_negative(const T &t)
44  {
45  return t >= 0;
46  }
47 
48 
49  template <typename T>
50  bool
51  is_non_negative(const std::complex<T> &)
52  {
53  Assert(false, ExcMessage("Complex numbers do not have an ordering."));
54 
55  return false;
56  }
57 
58 
59  template <typename T>
60  void
61  print(const T &t, const char *format)
62  {
63  if (format != nullptr)
64  std::printf(format, t);
65  else
66  std::printf(" %5.2f", double(t));
67  }
68 
69 
70 
71  template <typename T>
72  void
73  print(const std::complex<T> &t, const char *format)
74  {
75  if (format != nullptr)
76  std::printf(format, t.real(), t.imag());
77  else
78  std::printf(" %5.2f+%5.2fi", double(t.real()), double(t.imag()));
79  }
80 
81  // call std::copy, except for in
82  // the case where we want to copy
83  // from std::complex to a
84  // non-complex type
85  template <typename T, typename U>
86  void
87  copy(const T *begin, const T *end, U *dest)
88  {
89  std::copy(begin, end, dest);
90  }
91 
92  template <typename T, typename U>
93  void
94  copy(const std::complex<T> *begin,
95  const std::complex<T> *end,
96  std::complex<U> * dest)
97  {
98  std::copy(begin, end, dest);
99  }
100 
101  template <typename T, typename U>
102  void
103  copy(const std::complex<T> *, const std::complex<T> *, U *)
104  {
105  Assert(false,
106  ExcMessage("Can't convert a vector of complex numbers "
107  "into a vector of reals/doubles"));
108  }
109 
110 
111 
112 #ifdef DEAL_II_WITH_THREADS
113 
121  template <typename Functor>
123  {
124  TBBForFunctor(Functor & functor,
125  const size_type start,
126  const size_type end)
127  : functor(functor)
128  , start(start)
129  , end(end)
130  {
131  const size_type vec_size = end - start;
132  // set chunk size for sub-tasks
133  const unsigned int gs =
134  internal::VectorImplementation::minimum_parallel_grain_size;
135  n_chunks =
136  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
137  vec_size / gs);
138  chunk_size = vec_size / n_chunks;
139 
140  // round to next multiple of 512 (or minimum grain size if that happens
141  // to be smaller). this is advantageous because our accumulation
142  // algorithms favor lengths of a power of 2 due to pairwise summation ->
143  // at most one 'oddly' sized chunk
144  if (chunk_size > 512)
145  chunk_size = ((chunk_size + 511) / 512) * 512;
146  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
147  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
148  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
149  }
150 
151  void
152  operator()(const tbb::blocked_range<size_type> &range) const
153  {
154  const size_type r_begin = start + range.begin() * chunk_size;
155  const size_type r_end = std::min(start + range.end() * chunk_size, end);
156  functor(r_begin, r_end);
157  }
158 
159  Functor & functor;
160  const size_type start;
161  const size_type end;
162  unsigned int n_chunks;
163  size_type chunk_size;
164  };
165 #endif
166 
167  template <typename Functor>
168  void
169  parallel_for(
170  Functor & functor,
171  const size_type start,
172  const size_type end,
173  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
174  {
175 #ifdef DEAL_II_WITH_THREADS
176  const size_type vec_size = end - start;
177  // only go to the parallel function in case there are at least 4 parallel
178  // items, otherwise the overhead is too large
179  if (vec_size >=
180  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
182  {
183  Assert(partitioner.get() != nullptr,
185  "Unexpected initialization of Vector that does "
186  "not set the TBB partitioner to a usable state."));
187  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
188  partitioner->acquire_one_partitioner();
189 
190  TBBForFunctor<Functor> generic_functor(functor, start, end);
191  // We use a minimum grain size of 1 here since the grains at this
192  // stage of dividing the work refer to the number of vector chunks
193  // that are processed by (possibly different) threads in the
194  // parallelized for loop (i.e., they do not refer to individual
195  // vector entries). The number of chunks here is calculated inside
196  // TBBForFunctor. See also GitHub issue #2496 for further discussion
197  // of this strategy.
198  parallel::internal::parallel_for(static_cast<size_type>(0),
199  static_cast<size_type>(
200  generic_functor.n_chunks),
201  generic_functor,
202  1,
203  tbb_partitioner);
204  partitioner->release_one_partitioner(tbb_partitioner);
205  }
206  else if (vec_size > 0)
207  functor(start, end);
208 #else
209  functor(start, end);
210  (void)partitioner;
211 #endif
212  }
213 
214 
215  // Define the functors necessary to use SIMD with TBB. we also include the
216  // simple copy and set operations
217 
218  template <typename Number>
219  struct Vector_set
220  {
221  Vector_set(const Number value, Number *const dst)
222  : value(value)
223  , dst(dst)
224  {
225  Assert(dst != nullptr, ExcInternalError());
226  }
227 
228  void
229  operator()(const size_type begin, const size_type end) const
230  {
231  Assert(end >= begin, ExcInternalError());
232 
233  if (value == Number())
234  {
235 #ifdef DEAL_II_WITH_CXX17
236  if constexpr (std::is_trivial<Number>::value)
237 #else
238  if (std::is_trivial<Number>::value)
239 #endif
240  {
241  std::memset(dst + begin, 0, sizeof(Number) * (end - begin));
242  return;
243  }
244  }
245  std::fill(dst + begin, dst + end, value);
246  }
247 
248  const Number value;
249  Number *const dst;
250  };
251 
252  template <typename Number, typename OtherNumber>
253  struct Vector_copy
254  {
255  Vector_copy(const OtherNumber *const src, Number *const dst)
256  : src(src)
257  , dst(dst)
258  {
259  Assert(src != nullptr, ExcInternalError());
260  Assert(dst != nullptr, ExcInternalError());
261  }
262 
263  void
264  operator()(const size_type begin, const size_type end) const
265  {
266  Assert(end >= begin, ExcInternalError());
267 
268 #if __GNUG__ && __GNUC__ < 5
269  if (__has_trivial_copy(Number) &&
270  std::is_same<Number, OtherNumber>::value)
271 #else
272 # ifdef DEAL_II_WITH_CXX17
273  if constexpr (std::is_trivially_copyable<Number>() &&
274  std::is_same<Number, OtherNumber>::value)
275 # else
276  if (std::is_trivially_copyable<Number>() &&
277  std::is_same<Number, OtherNumber>::value)
278 # endif
279 #endif
280  std::memcpy(dst + begin, src + begin, (end - begin) * sizeof(Number));
281  else
282  {
283  DEAL_II_OPENMP_SIMD_PRAGMA
284  for (size_type i = begin; i < end; ++i)
285  dst[i] = src[i];
286  }
287  }
288 
289  const OtherNumber *const src;
290  Number *const dst;
291  };
292 
293  template <typename Number>
294  struct Vectorization_multiply_factor
295  {
296  Vectorization_multiply_factor(Number *const val, const Number factor)
297  : val(val)
298  , factor(factor)
299  {}
300 
301  void
302  operator()(const size_type begin, const size_type end) const
303  {
305  {
306  DEAL_II_OPENMP_SIMD_PRAGMA
307  for (size_type i = begin; i < end; ++i)
308  val[i] *= factor;
309  }
310  else
311  {
312  for (size_type i = begin; i < end; ++i)
313  val[i] *= factor;
314  }
315  }
316 
317  Number *const val;
318  const Number factor;
319  };
320 
321  template <typename Number>
322  struct Vectorization_add_av
323  {
324  Vectorization_add_av(Number *const val,
325  const Number *const v_val,
326  const Number factor)
327  : val(val)
328  , v_val(v_val)
329  , factor(factor)
330  {}
331 
332  void
333  operator()(const size_type begin, const size_type end) const
334  {
336  {
337  DEAL_II_OPENMP_SIMD_PRAGMA
338  for (size_type i = begin; i < end; ++i)
339  val[i] += factor * v_val[i];
340  }
341  else
342  {
343  for (size_type i = begin; i < end; ++i)
344  val[i] += factor * v_val[i];
345  }
346  }
347 
348  Number *const val;
349  const Number *const v_val;
350  const Number factor;
351  };
352 
353  template <typename Number>
354  struct Vectorization_sadd_xav
355  {
356  Vectorization_sadd_xav(Number * val,
357  const Number *const v_val,
358  const Number a,
359  const Number x)
360  : val(val)
361  , v_val(v_val)
362  , a(a)
363  , x(x)
364  {}
365 
366  void
367  operator()(const size_type begin, const size_type end) const
368  {
370  {
371  DEAL_II_OPENMP_SIMD_PRAGMA
372  for (size_type i = begin; i < end; ++i)
373  val[i] = x * val[i] + a * v_val[i];
374  }
375  else
376  {
377  for (size_type i = begin; i < end; ++i)
378  val[i] = x * val[i] + a * v_val[i];
379  }
380  }
381 
382  Number *const val;
383  const Number *const v_val;
384  const Number a;
385  const Number x;
386  };
387 
388  template <typename Number>
389  struct Vectorization_subtract_v
390  {
391  Vectorization_subtract_v(Number *val, const Number *const v_val)
392  : val(val)
393  , v_val(v_val)
394  {}
395 
396  void
397  operator()(const size_type begin, const size_type end) const
398  {
400  {
401  DEAL_II_OPENMP_SIMD_PRAGMA
402  for (size_type i = begin; i < end; ++i)
403  val[i] -= v_val[i];
404  }
405  else
406  {
407  for (size_type i = begin; i < end; ++i)
408  val[i] -= v_val[i];
409  }
410  }
411 
412  Number *const val;
413  const Number *const v_val;
414  };
415 
416  template <typename Number>
417  struct Vectorization_add_factor
418  {
419  Vectorization_add_factor(Number *const val, const Number factor)
420  : val(val)
421  , factor(factor)
422  {}
423 
424  void
425  operator()(const size_type begin, const size_type end) const
426  {
428  {
429  DEAL_II_OPENMP_SIMD_PRAGMA
430  for (size_type i = begin; i < end; ++i)
431  val[i] += factor;
432  }
433  else
434  {
435  for (size_type i = begin; i < end; ++i)
436  val[i] += factor;
437  }
438  }
439 
440  Number *const val;
441  const Number factor;
442  };
443 
444  template <typename Number>
445  struct Vectorization_add_v
446  {
447  Vectorization_add_v(Number *const val, const Number *const v_val)
448  : val(val)
449  , v_val(v_val)
450  {}
451 
452  void
453  operator()(const size_type begin, const size_type end) const
454  {
456  {
457  DEAL_II_OPENMP_SIMD_PRAGMA
458  for (size_type i = begin; i < end; ++i)
459  val[i] += v_val[i];
460  }
461  else
462  {
463  for (size_type i = begin; i < end; ++i)
464  val[i] += v_val[i];
465  }
466  }
467 
468  Number *const val;
469  const Number *const v_val;
470  };
471 
472  template <typename Number>
473  struct Vectorization_add_avpbw
474  {
475  Vectorization_add_avpbw(Number *const val,
476  const Number *const v_val,
477  const Number *const w_val,
478  const Number a,
479  const Number b)
480  : val(val)
481  , v_val(v_val)
482  , w_val(w_val)
483  , a(a)
484  , b(b)
485  {}
486 
487  void
488  operator()(const size_type begin, const size_type end) const
489  {
491  {
492  DEAL_II_OPENMP_SIMD_PRAGMA
493  for (size_type i = begin; i < end; ++i)
494  val[i] = val[i] + a * v_val[i] + b * w_val[i];
495  }
496  else
497  {
498  for (size_type i = begin; i < end; ++i)
499  val[i] = val[i] + a * v_val[i] + b * w_val[i];
500  }
501  }
502 
503  Number *const val;
504  const Number *const v_val;
505  const Number *const w_val;
506  const Number a;
507  const Number b;
508  };
509 
510  template <typename Number>
511  struct Vectorization_sadd_xv
512  {
513  Vectorization_sadd_xv(Number *const val,
514  const Number *const v_val,
515  const Number x)
516  : val(val)
517  , v_val(v_val)
518  , x(x)
519  {}
520 
521  void
522  operator()(const size_type begin, const size_type end) const
523  {
525  {
526  DEAL_II_OPENMP_SIMD_PRAGMA
527  for (size_type i = begin; i < end; ++i)
528  val[i] = x * val[i] + v_val[i];
529  }
530  else
531  {
532  for (size_type i = begin; i < end; ++i)
533  val[i] = x * val[i] + v_val[i];
534  }
535  }
536 
537  Number *const val;
538  const Number *const v_val;
539  const Number x;
540  };
541 
542  template <typename Number>
543  struct Vectorization_sadd_xavbw
544  {
545  Vectorization_sadd_xavbw(Number * val,
546  const Number *v_val,
547  const Number *w_val,
548  Number x,
549  Number a,
550  Number b)
551  : val(val)
552  , v_val(v_val)
553  , w_val(w_val)
554  , x(x)
555  , a(a)
556  , b(b)
557  {}
558 
559  void
560  operator()(const size_type begin, const size_type end) const
561  {
563  {
564  DEAL_II_OPENMP_SIMD_PRAGMA
565  for (size_type i = begin; i < end; ++i)
566  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
567  }
568  else
569  {
570  for (size_type i = begin; i < end; ++i)
571  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
572  }
573  }
574 
575  Number *const val;
576  const Number *const v_val;
577  const Number *const w_val;
578  const Number x;
579  const Number a;
580  const Number b;
581  };
582 
583  template <typename Number>
584  struct Vectorization_scale
585  {
586  Vectorization_scale(Number *const val, const Number *const v_val)
587  : val(val)
588  , v_val(v_val)
589  {}
590 
591  void
592  operator()(const size_type begin, const size_type end) const
593  {
595  {
596  DEAL_II_OPENMP_SIMD_PRAGMA
597  for (size_type i = begin; i < end; ++i)
598  val[i] *= v_val[i];
599  }
600  else
601  {
602  for (size_type i = begin; i < end; ++i)
603  val[i] *= v_val[i];
604  }
605  }
606 
607  Number *const val;
608  const Number *const v_val;
609  };
610 
611  template <typename Number>
612  struct Vectorization_equ_au
613  {
614  Vectorization_equ_au(Number *const val,
615  const Number *const u_val,
616  const Number a)
617  : val(val)
618  , u_val(u_val)
619  , a(a)
620  {}
621 
622  void
623  operator()(const size_type begin, const size_type end) const
624  {
626  {
627  DEAL_II_OPENMP_SIMD_PRAGMA
628  for (size_type i = begin; i < end; ++i)
629  val[i] = a * u_val[i];
630  }
631  else
632  {
633  for (size_type i = begin; i < end; ++i)
634  val[i] = a * u_val[i];
635  }
636  }
637 
638  Number *const val;
639  const Number *const u_val;
640  const Number a;
641  };
642 
643  template <typename Number>
644  struct Vectorization_equ_aubv
645  {
646  Vectorization_equ_aubv(Number *const val,
647  const Number *const u_val,
648  const Number *const v_val,
649  const Number a,
650  const Number b)
651  : val(val)
652  , u_val(u_val)
653  , v_val(v_val)
654  , a(a)
655  , b(b)
656  {}
657 
658  void
659  operator()(const size_type begin, const size_type end) const
660  {
662  {
663  DEAL_II_OPENMP_SIMD_PRAGMA
664  for (size_type i = begin; i < end; ++i)
665  val[i] = a * u_val[i] + b * v_val[i];
666  }
667  else
668  {
669  for (size_type i = begin; i < end; ++i)
670  val[i] = a * u_val[i] + b * v_val[i];
671  }
672  }
673 
674  Number *const val;
675  const Number *const u_val;
676  const Number *const v_val;
677  const Number a;
678  const Number b;
679  };
680 
681  template <typename Number>
682  struct Vectorization_equ_aubvcw
683  {
684  Vectorization_equ_aubvcw(Number * val,
685  const Number *u_val,
686  const Number *v_val,
687  const Number *w_val,
688  const Number a,
689  const Number b,
690  const Number c)
691  : val(val)
692  , u_val(u_val)
693  , v_val(v_val)
694  , w_val(w_val)
695  , a(a)
696  , b(b)
697  , c(c)
698  {}
699 
700  void
701  operator()(const size_type begin, const size_type end) const
702  {
704  {
705  DEAL_II_OPENMP_SIMD_PRAGMA
706  for (size_type i = begin; i < end; ++i)
707  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
708  }
709  else
710  {
711  for (size_type i = begin; i < end; ++i)
712  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
713  }
714  }
715 
716  Number *const val;
717  const Number *const u_val;
718  const Number *const v_val;
719  const Number *const w_val;
720  const Number a;
721  const Number b;
722  const Number c;
723  };
724 
725  template <typename Number>
726  struct Vectorization_ratio
727  {
728  Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
729  : val(val)
730  , a_val(a_val)
731  , b_val(b_val)
732  {}
733 
734  void
735  operator()(const size_type begin, const size_type end) const
736  {
738  {
739  DEAL_II_OPENMP_SIMD_PRAGMA
740  for (size_type i = begin; i < end; ++i)
741  val[i] = a_val[i] / b_val[i];
742  }
743  else
744  {
745  for (size_type i = begin; i < end; ++i)
746  val[i] = a_val[i] / b_val[i];
747  }
748  }
749 
750  Number *const val;
751  const Number *const a_val;
752  const Number *const b_val;
753  };
754 
755 
756 
757  // All sums over all the vector entries (l2-norm, inner product, etc.) are
758  // performed with the same code, using a templated operation defined
759  // here. There are always two versions defined, a standard one that covers
760  // most cases and a vectorized one which is only for equal types and float
761  // and double.
762  template <typename Number, typename Number2>
763  struct Dot
764  {
765  static const bool vectorizes =
766  std::is_same<Number, Number2>::value &&
768 
769  Dot(const Number *const X, const Number2 *const Y)
770  : X(X)
771  , Y(Y)
772  {}
773 
774  Number
775  operator()(const size_type i) const
776  {
777  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
778  }
779 
781  do_vectorized(const size_type i) const
782  {
784  x.load(X + i);
785  y.load(Y + i);
786 
787  // the following operation in VectorizedArray does an element-wise
788  // scalar product without taking into account complex values and
789  // the need to take the complex-conjugate of one argument. this
790  // may be a bug, but because all VectorizedArray classes only
791  // work on real scalars, it doesn't really matter very much.
792  // in any case, assert that we really don't get here for
793  // complex-valued objects
794  static_assert(numbers::NumberTraits<Number>::is_complex == false,
795  "This operation is not correctly implemented for "
796  "complex-valued objects.");
797  return x * y;
798  }
799 
800  const Number *const X;
801  const Number2 *const Y;
802  };
803 
804  template <typename Number, typename RealType>
805  struct Norm2
806  {
807  static const bool vectorizes =
809 
810  Norm2(const Number *const X)
811  : X(X)
812  {}
813 
814  RealType
815  operator()(const size_type i) const
816  {
818  }
819 
821  do_vectorized(const size_type i) const
822  {
824  x.load(X + i);
825  return x * x;
826  }
827 
828  const Number *const X;
829  };
830 
831  template <typename Number, typename RealType>
832  struct Norm1
833  {
834  static const bool vectorizes =
836 
837  Norm1(const Number *X)
838  : X(X)
839  {}
840 
841  RealType
842  operator()(const size_type i) const
843  {
845  }
846 
848  do_vectorized(const size_type i) const
849  {
851  x.load(X + i);
852  return std::abs(x);
853  }
854 
855  const Number *X;
856  };
857 
858  template <typename Number, typename RealType>
859  struct NormP
860  {
861  static const bool vectorizes =
863 
864  NormP(const Number *X, RealType p)
865  : X(X)
866  , p(p)
867  {}
868 
869  RealType
870  operator()(const size_type i) const
871  {
872  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
873  }
874 
876  do_vectorized(const size_type i) const
877  {
879  x.load(X + i);
880  return std::pow(std::abs(x), p);
881  }
882 
883  const Number * X;
884  const RealType p;
885  };
886 
887  template <typename Number>
888  struct MeanValue
889  {
890  static const bool vectorizes =
892 
893  MeanValue(const Number *X)
894  : X(X)
895  {}
896 
897  Number
898  operator()(const size_type i) const
899  {
900  return X[i];
901  }
902 
904  do_vectorized(const size_type i) const
905  {
907  x.load(X + i);
908  return x;
909  }
910 
911  const Number *X;
912  };
913 
914  template <typename Number>
915  struct AddAndDot
916  {
917  static const bool vectorizes =
919 
920  AddAndDot(Number *const X,
921  const Number *const V,
922  const Number *const W,
923  const Number a)
924  : X(X)
925  , V(V)
926  , W(W)
927  , a(a)
928  {}
929 
930  Number
931  operator()(const size_type i) const
932  {
933  X[i] += a * V[i];
934  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
935  }
936 
938  do_vectorized(const size_type i) const
939  {
940  VectorizedArray<Number> x, w, v;
941  x.load(X + i);
942  v.load(V + i);
943  x += a * v;
944  x.store(X + i);
945  // may only load from W after storing in X because the pointers might
946  // point to the same memory
947  w.load(W + i);
948 
949  // the following operation in VectorizedArray does an element-wise
950  // scalar product without taking into account complex values and
951  // the need to take the complex-conjugate of one argument. this
952  // may be a bug, but because all VectorizedArray classes only
953  // work on real scalars, it doesn't really matter very much.
954  // in any case, assert that we really don't get here for
955  // complex-valued objects
956  static_assert(numbers::NumberTraits<Number>::is_complex == false,
957  "This operation is not correctly implemented for "
958  "complex-valued objects.");
959  return x * w;
960  }
961 
962  Number *const X;
963  const Number *const V;
964  const Number *const W;
965  const Number a;
966  };
967 
968 
969 
970  // this is the main working loop for all vector sums using the templated
971  // operation above. it accumulates the sums using a block-wise summation
972  // algorithm with post-update. this blocked algorithm has been proposed in
973  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
974  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
975  // block size, 2. Sometimes it is referred to as pairwise summation. The
976  // worst case error made by this algorithm is on the order O(eps *
977  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
978  // though the Kahan summation is even more accurate with an error O(eps)
979  // by carrying along remainders not captured by the main sum, that involves
980  // additional costs which are not worthwhile. See the Wikipedia article on
981  // the Kahan summation algorithm.
982 
983  // The algorithm implemented here has the additional benefit that it is
984  // easily parallelized without changing the order of how the elements are
985  // added (floating point addition is not associative). For the same vector
986  // size and minimum_parallel_grainsize, the blocks are always the
987  // same and added pairwise.
988 
989  // The depth of recursion is controlled by the 'magic' parameter
990  // vector_accumulation_recursion_threshold: If the length is below
991  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
992  // unroll), a straight loop instead of recursion will be used. At the
993  // innermost level, eight values are added consecutively in order to better
994  // balance multiplications and additions.
995 
996  // Loops are unrolled as follows: the range [first,last) is broken into
997  // @p n_chunks each of size 32 plus the @p remainder.
998  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
999  // if possible and stores the result of the operation for each chunk in @p outer_results.
1000 
1001  // The code returns the result as the last argument in order to make
1002  // spawning tasks simpler and use automatic template deduction.
1003 
1004 
1010  const unsigned int vector_accumulation_recursion_threshold = 128;
1011 
1012  template <typename Operation, typename ResultType>
1013  void
1014  accumulate_recursive(const Operation &op,
1015  const size_type first,
1016  const size_type last,
1017  ResultType & result)
1018  {
1019  const size_type vec_size = last - first;
1020  if (vec_size <= vector_accumulation_recursion_threshold * 32)
1021  {
1022  // the vector is short enough so we perform the summation. first
1023  // work on the regular part. The innermost 32 values are expanded in
1024  // order to obtain known loop bounds for most of the work.
1025  size_type index = first;
1026  ResultType outer_results[vector_accumulation_recursion_threshold];
1027 
1028  // set the zeroth element to zero to correctly handle the case where
1029  // vec_size == 0
1030  outer_results[0] = ResultType();
1031 
1032  // the variable serves two purposes: (i) number of chunks (each 32
1033  // indices) for the given size; all results are stored in
1034  // outer_results[0,n_chunks) (ii) in the SIMD case n_chunks is also a
1035  // next free index in outer_results[] to which we can write after
1036  // accumulate_regular() is executed.
1037  size_type n_chunks = vec_size / 32;
1038  const size_type remainder = vec_size % 32;
1039  Assert(remainder == 0 ||
1040  n_chunks < vector_accumulation_recursion_threshold,
1041  ExcInternalError());
1042 
1043  // Select between the regular version and vectorized version based
1044  // on the number types we are given. To choose the vectorized
1045  // version often enough, we need to have all tasks but the last one
1046  // to be divisible by the vectorization length
1047  accumulate_regular(
1048  op,
1049  n_chunks,
1050  index,
1051  outer_results,
1052  std::integral_constant<bool, Operation::vectorizes>());
1053 
1054  // now work on the remainder, i.e., the last up to 32 values. Use
1055  // switch statement with fall-through to work on these values.
1056  if (remainder > 0)
1057  {
1058  // if we got here, it means that (vec_size <=
1059  // vector_accumulation_recursion_threshold * 32), which is to say
1060  // that the domain can be split into n_chunks <=
1061  // vector_accumulation_recursion_threshold:
1062  AssertIndexRange(n_chunks,
1063  vector_accumulation_recursion_threshold + 1);
1064  // split the remainder into chunks of 8, there could be up to 3
1065  // such chunks since remainder < 32.
1066  // Work on those chunks without any SIMD, that is we call
1067  // op(index).
1068  const size_type inner_chunks = remainder / 8;
1069  Assert(inner_chunks <= 3, ExcInternalError());
1070  const size_type remainder_inner = remainder % 8;
1071  ResultType r0 = ResultType(), r1 = ResultType(),
1072  r2 = ResultType();
1073  switch (inner_chunks)
1074  {
1075  case 3:
1076  r2 = op(index++);
1077  for (size_type j = 1; j < 8; ++j)
1078  r2 += op(index++);
1079  DEAL_II_FALLTHROUGH;
1080  case 2:
1081  r1 = op(index++);
1082  for (size_type j = 1; j < 8; ++j)
1083  r1 += op(index++);
1084  r1 += r2;
1085  DEAL_II_FALLTHROUGH;
1086  case 1:
1087  r2 = op(index++);
1088  for (size_type j = 1; j < 8; ++j)
1089  r2 += op(index++);
1090  DEAL_II_FALLTHROUGH;
1091  default:
1092  for (size_type j = 0; j < remainder_inner; ++j)
1093  r0 += op(index++);
1094  r0 += r2;
1095  r0 += r1;
1096  if (n_chunks == vector_accumulation_recursion_threshold)
1097  outer_results[vector_accumulation_recursion_threshold -
1098  1] += r0;
1099  else
1100  {
1101  outer_results[n_chunks] = r0;
1102  n_chunks++;
1103  }
1104  break;
1105  }
1106  }
1107  // make sure we worked through all indices
1108  AssertDimension(index, last);
1109 
1110  // now sum the results from the chunks stored in
1111  // outer_results[0,n_chunks) recursively
1112  while (n_chunks > 1)
1113  {
1114  if (n_chunks % 2 == 1)
1115  outer_results[n_chunks++] = ResultType();
1116  for (size_type i = 0; i < n_chunks; i += 2)
1117  outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1118  n_chunks /= 2;
1119  }
1120  result = outer_results[0];
1121  }
1122  else
1123  {
1124  // split vector into four pieces and work on the pieces
1125  // recursively. Make pieces (except last) divisible by one fourth the
1126  // recursion threshold.
1127  const size_type new_size =
1128  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1129  vector_accumulation_recursion_threshold * 8;
1130  Assert(first + 3 * new_size < last, ExcInternalError());
1131  ResultType r0, r1, r2, r3;
1132  accumulate_recursive(op, first, first + new_size, r0);
1133  accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1134  accumulate_recursive(op,
1135  first + 2 * new_size,
1136  first + 3 * new_size,
1137  r2);
1138  accumulate_recursive(op, first + 3 * new_size, last, r3);
1139  r0 += r1;
1140  r2 += r3;
1141  result = r0 + r2;
1142  }
1143  }
1144 
1145 
1146  // this is the inner working routine for the accumulation loops
1147  // below. This is the standard case where the loop bounds are known. We
1148  // pulled this function out of the regular accumulate routine because we
1149  // might do this thing vectorized (see specialized function below)
1150  template <typename Operation, typename ResultType>
1151  void
1152  accumulate_regular(
1153  const Operation &op,
1154  const size_type &n_chunks,
1155  size_type & index,
1156  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1157  std::integral_constant<bool, false>)
1158  {
1159  // note that each chunk is chosen to have a width of 32, thereby the index
1160  // is incremented by 4*8 for each @p i.
1161  for (size_type i = 0; i < n_chunks; ++i)
1162  {
1163  ResultType r0 = op(index);
1164  ResultType r1 = op(index + 1);
1165  ResultType r2 = op(index + 2);
1166  ResultType r3 = op(index + 3);
1167  index += 4;
1168  for (size_type j = 1; j < 8; ++j, index += 4)
1169  {
1170  r0 += op(index);
1171  r1 += op(index + 1);
1172  r2 += op(index + 2);
1173  r3 += op(index + 3);
1174  }
1175  r0 += r1;
1176  r2 += r3;
1177  outer_results[i] = r0 + r2;
1178  }
1179  }
1180 
1181 
1182 
1183  // this is the inner working routine for the accumulation loops
1184  // below. This is the specialized case where the loop bounds are known and
1185  // where we can vectorize. In that case, we request the 'do_vectorized'
1186  // routine of the operation instead of the regular one which does several
1187  // operations at once.
1188  template <typename Operation, typename Number>
1189  void
1190  accumulate_regular(
1191  const Operation &op,
1192  size_type & n_chunks,
1193  size_type & index,
1194  Number (&outer_results)[vector_accumulation_recursion_threshold],
1195  std::integral_constant<bool, true>)
1196  {
1197  // we start from @p index and workout @p n_chunks each of size 32.
1198  // in order employ SIMD and work on @p nvecs at a time, we split this
1199  // loop yet again:
1200  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1201  // nvecs*(4*8) elements.
1202 
1203  const unsigned int nvecs = VectorizedArray<Number>::n_array_elements;
1204  const size_type regular_chunks = n_chunks / nvecs;
1205  for (size_type i = 0; i < regular_chunks; ++i)
1206  {
1207  VectorizedArray<Number> r0 = op.do_vectorized(index);
1208  VectorizedArray<Number> r1 = op.do_vectorized(index + nvecs);
1209  VectorizedArray<Number> r2 = op.do_vectorized(index + 2 * nvecs);
1210  VectorizedArray<Number> r3 = op.do_vectorized(index + 3 * nvecs);
1211  index += nvecs * 4;
1212  for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1213  {
1214  r0 += op.do_vectorized(index);
1215  r1 += op.do_vectorized(index + nvecs);
1216  r2 += op.do_vectorized(index + 2 * nvecs);
1217  r3 += op.do_vectorized(index + 3 * nvecs);
1218  }
1219  r0 += r1;
1220  r2 += r3;
1221  r0 += r2;
1222  r0.store(
1223  &outer_results[i * VectorizedArray<Number>::n_array_elements]);
1224  }
1225 
1226  // If we are treating a case where the vector length is not divisible by
1227  // the vectorization length, need a cleanup loop
1228  // The remaining chunks are processed one by one starting from
1229  // regular_chunks * nvecs; We do as much as possible with 2 SIMD
1230  // operations within each chunk. Here we assume that nvecs < 32/2 = 16 as
1231  // well as 16%nvecs==0.
1233  Assert(16 % nvecs == 0, ExcInternalError());
1234  if (n_chunks % VectorizedArray<Number>::n_array_elements != 0)
1235  {
1237  r1 = VectorizedArray<Number>();
1238  const size_type start_irreg = regular_chunks * nvecs;
1239  for (size_type c = start_irreg; c < n_chunks; ++c)
1240  for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1241  {
1242  r0 += op.do_vectorized(index);
1243  r1 += op.do_vectorized(index + nvecs);
1244  }
1245  r0 += r1;
1246  r0.store(&outer_results[start_irreg]);
1247  // update n_chunks to denote unused element in outer_results[] from
1248  // which we can keep writing.
1249  n_chunks = start_irreg + VectorizedArray<Number>::n_array_elements;
1250  }
1251  }
1252 
1253 
1254 
1255 #ifdef DEAL_II_WITH_THREADS
1256 
1284  template <typename Operation, typename ResultType>
1286  {
1287  static const unsigned int threshold_array_allocate = 512;
1288 
1289  TBBReduceFunctor(const Operation &op,
1290  const size_type start,
1291  const size_type end)
1292  : op(op)
1293  , start(start)
1294  , end(end)
1295  {
1296  const size_type vec_size = end - start;
1297  // set chunk size for sub-tasks
1298  const unsigned int gs =
1299  internal::VectorImplementation::minimum_parallel_grain_size;
1300  n_chunks =
1301  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
1302  vec_size / gs);
1303  chunk_size = vec_size / n_chunks;
1304 
1305  // round to next multiple of 512 (or leave it at the minimum grain size
1306  // if that happens to be smaller). this is advantageous because our
1307  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1308  // at most one 'oddly' sized chunk
1309  if (chunk_size > 512)
1310  chunk_size = ((chunk_size + 511) / 512) * 512;
1311  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1312  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
1313  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
1314 
1315  if (n_chunks > threshold_array_allocate)
1316  {
1317  // make sure we allocate an even number of elements,
1318  // access to the new last element is needed in do_sum()
1319  large_array.resize(2 * ((n_chunks + 1) / 2));
1320  array_ptr = large_array.data();
1321  }
1322  else
1323  array_ptr = &small_array[0];
1324  }
1325 
1330  void
1331  operator()(const tbb::blocked_range<size_type> &range) const
1332  {
1333  for (size_type i = range.begin(); i < range.end(); ++i)
1334  accumulate_recursive(op,
1335  start + i * chunk_size,
1336  std::min(start + (i + 1) * chunk_size, end),
1337  array_ptr[i]);
1338  }
1339 
1340  ResultType
1341  do_sum() const
1342  {
1343  while (n_chunks > 1)
1344  {
1345  if (n_chunks % 2 == 1)
1346  array_ptr[n_chunks++] = ResultType();
1347  for (size_type i = 0; i < n_chunks; i += 2)
1348  array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1349  n_chunks /= 2;
1350  }
1351  return array_ptr[0];
1352  }
1353 
1354  const Operation &op;
1355  const size_type start;
1356  const size_type end;
1357 
1358  mutable unsigned int n_chunks;
1359  unsigned int chunk_size;
1360  ResultType small_array[threshold_array_allocate];
1361  std::vector<ResultType> large_array;
1362  // this variable either points to small_array or large_array depending on
1363  // the number of threads we want to feed
1364  mutable ResultType *array_ptr;
1365  };
1366 #endif
1367 
1368 
1369 
1374  template <typename Operation, typename ResultType>
1375  void
1376  parallel_reduce(
1377  const Operation & op,
1378  const size_type start,
1379  const size_type end,
1380  ResultType & result,
1381  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1382  {
1383 #ifdef DEAL_II_WITH_THREADS
1384  const size_type vec_size = end - start;
1385  // only go to the parallel function in case there are at least 4 parallel
1386  // items, otherwise the overhead is too large
1387  if (vec_size >=
1388  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
1390  {
1391  Assert(partitioner.get() != nullptr,
1393  "Unexpected initialization of Vector that does "
1394  "not set the TBB partitioner to a usable state."));
1395  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1396  partitioner->acquire_one_partitioner();
1397 
1398  TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1399  start,
1400  end);
1401  // We use a minimum grain size of 1 here since the grains at this
1402  // stage of dividing the work refer to the number of vector chunks
1403  // that are processed by (possibly different) threads in the
1404  // parallelized for loop (i.e., they do not refer to individual
1405  // vector entries). The number of chunks here is calculated inside
1406  // TBBForFunctor. See also GitHub issue #2496 for further discussion
1407  // of this strategy.
1408  parallel::internal::parallel_for(static_cast<size_type>(0),
1409  static_cast<size_type>(
1410  generic_functor.n_chunks),
1411  generic_functor,
1412  1,
1413  tbb_partitioner);
1414  partitioner->release_one_partitioner(tbb_partitioner);
1415  result = generic_functor.do_sum();
1416  }
1417  else
1418  accumulate_recursive(op, start, end, result);
1419 #else
1420  accumulate_recursive(op, start, end, result);
1421  (void)partitioner;
1422 #endif
1423  }
1424 
1425 
1426  template <typename Number, typename Number2, typename MemorySpace>
1427  struct functions
1428  {
1429  static void
1430  copy(
1431  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1432  /*thread_loop_partitioner*/,
1433  const size_type /*size*/,
1434  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1435  & /*v_data*/,
1437  {
1438  static_assert(
1439  std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1440  std::is_same<Number, Number2>::value,
1441  "For the CUDA MemorySpace Number and Number2 should be the same type");
1442  }
1443 
1444  static void
1445  set(
1446  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1447  /*thread_loop_partitioner*/,
1448  const size_type /*size*/,
1449  const Number /*s*/,
1451  {}
1452 
1453  static void
1454  add_vector(
1455  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1456  /*thread_loop_partitioner*/,
1457  const size_type /*size*/,
1458  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1459  & /*v_data*/,
1461  {}
1462 
1463  static void
1464  subtract_vector(
1465  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1466  /*thread_loop_partitioner*/,
1467  const size_type /*size*/,
1468  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1469  & /*v_data*/,
1471  {}
1472 
1473  static void
1474  add_factor(
1475  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1476  /*thread_loop_partitioner*/,
1477  const size_type /*size*/,
1478  Number /*a*/,
1480  {}
1481 
1482  static void
1483  add_av(
1484  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1485  /*thread_loop_partitioner*/,
1486  const size_type /*size*/,
1487  const Number /*a*/,
1488  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1489  & /*v_data*/,
1491  {}
1492 
1493  static void
1494  add_avpbw(
1495  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1496  /*thread_loop_partitioner*/,
1497  const size_type /*size*/,
1498  const Number /*a*/,
1499  const Number /*b*/,
1500  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1501  & /*v_data*/,
1502  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1503  & /*w_data*/,
1505  {}
1506 
1507  static void
1508  sadd_xv(
1509  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1510  /*thread_loop_partitioner*/,
1511  const size_type /*size*/,
1512  const Number /*x*/,
1513  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1514  & /*v_data*/,
1516  {}
1517 
1518  static void
1519  sadd_xav(
1520  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1521  /*thread_loop_partitioner*/,
1522  const size_type /*size*/,
1523  const Number /*x*/,
1524  const Number /*a*/,
1525  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1526  & /*v_data*/,
1528  {}
1529 
1530  static void
1531  sadd_xavbw(
1532  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1533  /*thread_loop_partitioner*/,
1534  const size_type /*size*/,
1535  const Number /*x*/,
1536  const Number /*a*/,
1537  const Number /*b*/,
1538  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1539  & /*v_data*/,
1540  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1541  & /*w_data*/,
1543  {}
1544 
1545  static void
1546  multiply_factor(
1547  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1548  /*thread_loop_partitioner*/,
1549  const size_type /*size*/,
1550  const Number /*factor*/,
1552  {}
1553 
1554  static void
1555  scale(
1556  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1557  /*thread_loop_partitioner*/,
1558  const size_type /*size*/,
1559  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1560  & /*v_data*/,
1562  {}
1563 
1564  static void
1565  equ_au(
1566  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1567  /*thread_loop_partitioner*/,
1568  const size_type /*size*/,
1569  const Number /*a*/,
1570  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1571  & /*v_data*/,
1573  {}
1574 
1575  static void
1576  equ_aubv(
1577  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1578  /*thread_loop_partitioner*/,
1579  const size_type /*size*/,
1580  const Number /*a*/,
1581  const Number /*b*/,
1582  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1583  & /*v_data*/,
1584  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1585  & /*w_data*/,
1587  {}
1588 
1589  static Number
1590  dot(
1591  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1592  /*thread_loop_partitioner*/,
1593  const size_type /*size*/,
1594  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1595  & /*v_data*/,
1597  {
1598  return Number();
1599  }
1600 
1601  template <typename real_type>
1602  static void
1603  norm_2(
1604  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1605  /*thread_loop_partitioner*/,
1606  const size_type /*size*/,
1607  real_type & /*sum*/,
1608  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1609  & /*v_data*/,
1611  {}
1612 
1613  static Number
1614  mean_value(
1615  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1616  /*thread_loop_partitioner*/,
1617  const size_type /*size*/,
1618  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1619  & /*data*/)
1620  {
1621  return Number();
1622  }
1623 
1624  template <typename real_type>
1625  static void
1626  norm_1(
1627  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1628  /*thread_loop_partitioner*/,
1629  const size_type /*size*/,
1630  real_type & /*sum*/,
1631  Number * /*values*/,
1632  Number * /*values_dev*/)
1633  {}
1634 
1635  template <typename real_type>
1636  static void
1637  norm_p(
1638  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1639  /*thread_loop_partitioner*/,
1640  const size_type /*size*/,
1641  real_type & /*sum*/,
1642  real_type /*p*/,
1644  {}
1645 
1646  static Number
1647  add_and_dot(
1648  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1649  /*thread_loop_partitioner*/,
1650  const size_type /*size*/,
1651  const Number /*a*/,
1652  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1653  & /*v_data*/,
1654  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1655  & /*w_data*/,
1657  {
1658  return Number();
1659  }
1660 
1661  template <typename MemorySpace2>
1662  static void
1663  import(
1664  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1665  /*thread_loop_partitioner*/,
1666  const size_type /*size*/,
1667  VectorOperation::values /*operation*/,
1668  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
1669  & /*v_data*/,
1671  {}
1672  };
1673 
1674 
1675 
1676  template <typename Number, typename Number2>
1677  struct functions<Number, Number2, ::MemorySpace::Host>
1678  {
1679  static void
1680  copy(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1681  & thread_loop_partitioner,
1682  const size_type size,
1683  const ::MemorySpace::
1684  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1687  &data)
1688  {
1689  Vector_copy<Number, Number2> copier(v_data.values.get(),
1690  data.values.get());
1691  parallel_for(copier, 0, size, thread_loop_partitioner);
1692  }
1693 
1694  static void
1695  set(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1696  & thread_loop_partitioner,
1697  const size_type size,
1698  const Number s,
1701  &data)
1702  {
1703  Vector_set<Number> setter(s, data.values.get());
1704  parallel_for(setter, 0, size, thread_loop_partitioner);
1705  }
1706 
1707  static void
1708  add_vector(
1709  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1710  & thread_loop_partitioner,
1711  const size_type size,
1712  const ::MemorySpace::
1713  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1716  &data)
1717  {
1718  Vectorization_add_v<Number> vector_add(data.values.get(),
1719  v_data.values.get());
1720  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1721  }
1722 
1723  static void
1724  subtract_vector(
1725  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1726  & thread_loop_partitioner,
1727  const size_type size,
1728  const ::MemorySpace::
1729  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1732  &data)
1733  {
1734  Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1735  v_data.values.get());
1736  parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1737  }
1738 
1739  static void
1740  add_factor(
1741  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1742  & thread_loop_partitioner,
1743  const size_type size,
1744  Number a,
1747  &data)
1748  {
1749  Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1750  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1751  }
1752 
1753  static void
1754  add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1755  & thread_loop_partitioner,
1756  const size_type size,
1757  const Number a,
1758  const ::MemorySpace::
1759  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1762  &data)
1763  {
1764  Vectorization_add_av<Number> vector_add(data.values.get(),
1765  v_data.values.get(),
1766  a);
1767  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1768  }
1769 
1770  static void
1771  add_avpbw(
1772  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1773  & thread_loop_partitioner,
1774  const size_type size,
1775  const Number a,
1776  const Number b,
1777  const ::MemorySpace::
1778  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1779  const ::MemorySpace::
1780  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1783  &data)
1784  {
1785  Vectorization_add_avpbw<Number> vector_add(
1786  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1787  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1788  }
1789 
1790  static void
1791  sadd_xv(
1792  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1793  & thread_loop_partitioner,
1794  const size_type size,
1795  const Number x,
1796  const ::MemorySpace::
1797  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1800  &data)
1801  {
1802  Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1803  v_data.values.get(),
1804  x);
1805  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1806  }
1807 
1808  static void
1809  sadd_xav(
1810  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1811  & thread_loop_partitioner,
1812  const size_type size,
1813  const Number x,
1814  const Number a,
1815  const ::MemorySpace::
1816  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1819  &data)
1820  {
1821  Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1822  v_data.values.get(),
1823  a,
1824  x);
1825  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1826  }
1827 
1828  static void
1829  sadd_xavbw(
1830  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1831  & thread_loop_partitioner,
1832  const size_type size,
1833  const Number x,
1834  const Number a,
1835  const Number b,
1836  const ::MemorySpace::
1837  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1838  const ::MemorySpace::
1839  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1842  &data)
1843  {
1844  Vectorization_sadd_xavbw<Number> vector_sadd(
1845  data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
1846  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1847  }
1848 
1849  static void
1850  multiply_factor(
1851  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1852  & thread_loop_partitioner,
1853  const size_type size,
1854  const Number factor,
1857  &data)
1858  {
1859  Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1860  factor);
1861  parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1862  }
1863 
1864  static void
1865  scale(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1866  & thread_loop_partitioner,
1867  const size_type size,
1868  const ::MemorySpace::
1869  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1872  &data)
1873  {
1874  Vectorization_scale<Number> vector_scale(data.values.get(),
1875  v_data.values.get());
1876  parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1877  }
1878 
1879  static void
1880  equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1881  & thread_loop_partitioner,
1882  const size_type size,
1883  const Number a,
1884  const ::MemorySpace::
1885  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1888  &data)
1889  {
1890  Vectorization_equ_au<Number> vector_equ(data.values.get(),
1891  v_data.values.get(),
1892  a);
1893  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1894  }
1895 
1896  static void
1897  equ_aubv(
1898  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1899  & thread_loop_partitioner,
1900  const size_type size,
1901  const Number a,
1902  const Number b,
1903  const ::MemorySpace::
1904  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1905  const ::MemorySpace::
1906  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1909  &data)
1910  {
1911  Vectorization_equ_aubv<Number> vector_equ(
1912  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1913  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1914  }
1915 
1916  static Number
1917  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1918  & thread_loop_partitioner,
1919  const size_type size,
1920  const ::MemorySpace::
1921  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1924  &data)
1925  {
1926  Number sum;
1927  ::internal::VectorOperations::Dot<Number, Number2> dot(
1928  data.values.get(), v_data.values.get());
1929  ::internal::VectorOperations::parallel_reduce(
1930  dot, 0, size, sum, thread_loop_partitioner);
1931  AssertIsFinite(sum);
1932 
1933  return sum;
1934  }
1935 
1936  template <typename real_type>
1937  static void
1938  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1939  & thread_loop_partitioner,
1940  const size_type size,
1941  real_type & sum,
1944  &data)
1945  {
1946  Norm2<Number, real_type> norm2(data.values.get());
1947  parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1948  }
1949 
1950  static Number
1951  mean_value(
1952  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1953  & thread_loop_partitioner,
1954  const size_type size,
1955  const ::MemorySpace::
1956  MemorySpaceData<Number, ::MemorySpace::Host> &data)
1957  {
1958  Number sum;
1959  MeanValue<Number> mean(data.values.get());
1960  parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1961 
1962  return sum;
1963  }
1964 
1965  template <typename real_type>
1966  static void
1967  norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1968  & thread_loop_partitioner,
1969  const size_type size,
1970  real_type & sum,
1973  &data)
1974  {
1975  Norm1<Number, real_type> norm1(data.values.get());
1976  parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1977  }
1978 
1979  template <typename real_type>
1980  static void
1981  norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1982  & thread_loop_partitioner,
1983  const size_type size,
1984  real_type & sum,
1985  const real_type p,
1988  &data)
1989  {
1990  NormP<Number, real_type> normp(data.values.get(), p);
1991  parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1992  }
1993 
1994  static Number
1995  add_and_dot(
1996  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1997  & thread_loop_partitioner,
1998  const size_type size,
1999  const Number a,
2000  const ::MemorySpace::
2001  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
2002  const ::MemorySpace::
2003  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
2006  &data)
2007  {
2008  Number sum;
2009  AddAndDot<Number> adder(data.values.get(),
2010  v_data.values.get(),
2011  w_data.values.get(),
2012  a);
2013  parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
2014 
2015  return sum;
2016  }
2017 
2018  template <typename MemorySpace2>
2019  static void
2020  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2021  & thread_loop_partitioner,
2022  const size_type size,
2023  VectorOperation::values operation,
2024  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2025  &v_data,
2028  &data,
2029  typename std::enable_if<
2030  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2031  int>::type = 0)
2032  {
2033  if (operation == VectorOperation::insert)
2034  {
2035  copy(thread_loop_partitioner, size, v_data, data);
2036  }
2037  else if (operation == VectorOperation::add)
2038  {
2039  add_vector(thread_loop_partitioner, size, v_data, data);
2040  }
2041  else
2042  {
2043  AssertThrow(false, ExcNotImplemented());
2044  }
2045  }
2046 
2047 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2048  template <typename MemorySpace2>
2049  static void
2050  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2051  & /*thread_loop_partitioner*/,
2052  const size_type size,
2053  VectorOperation::values operation,
2054  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2055  &v_data,
2058  &data,
2059  typename std::enable_if<
2060  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2061  int>::type = 0)
2062  {
2063  if (operation == VectorOperation::insert)
2064  {
2065  cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
2066  v_data.values_dev.get(),
2067  size * sizeof(Number),
2068  cudaMemcpyDeviceToHost);
2069  AssertCuda(cuda_error_code);
2070  }
2071  else
2072  {
2073  AssertThrow(false, ExcNotImplemented());
2074  }
2075  }
2076 #endif
2077  };
2078 
2079 
2080 
2081 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2082  template <typename Number>
2083  struct functions<Number, Number, ::MemorySpace::CUDA>
2084  {
2085  static const int block_size =
2086  ::LinearAlgebra::CUDAWrappers::kernel::block_size;
2087  static const int chunk_size =
2088  ::LinearAlgebra::CUDAWrappers::kernel::chunk_size;
2089 
2090  static void
2091  copy(
2092  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2093  const size_type size,
2094  const ::MemorySpace::
2095  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2098  &data)
2099  {
2100  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2101  v_data.values_dev.get(),
2102  size * sizeof(Number),
2103  cudaMemcpyDeviceToDevice);
2104  AssertCuda(cuda_error_code);
2105  }
2106 
2107  static void
2108  set(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2109  const size_type size,
2110  const Number s,
2113  &data)
2114  {
2115  const int n_blocks = 1 + size / (chunk_size * block_size);
2116  ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2117  <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
2118 
2119 # ifdef DEBUG
2120  // Check that the kernel was launched correctly
2121  AssertCuda(cudaGetLastError());
2122  // Check that there was no problem during the execution of the kernel
2123  AssertCuda(cudaDeviceSynchronize());
2124 # endif
2125  }
2126 
2127  static void
2128  add_vector(
2129  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2130  const size_type size,
2131  const ::MemorySpace::
2132  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2135  &data)
2136  {
2137  const int n_blocks = 1 + size / (chunk_size * block_size);
2138  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2139  <<<n_blocks, block_size>>>(data.values_dev.get(),
2140  1.,
2141  v_data.values_dev.get(),
2142  size);
2143 
2144 # ifdef DEBUG
2145  // Check that the kernel was launched correctly
2146  AssertCuda(cudaGetLastError());
2147  // Check that there was no problem during the execution of the kernel
2148  AssertCuda(cudaDeviceSynchronize());
2149 # endif
2150  }
2151 
2152  static void
2153  subtract_vector(
2154  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2155  const size_type size,
2156  const ::MemorySpace::
2157  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2160  &data)
2161  {
2162  const int n_blocks = 1 + size / (chunk_size * block_size);
2163  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2164  <<<n_blocks, block_size>>>(data.values_dev.get(),
2165  -1.,
2166  v_data.values_dev.get(),
2167  size);
2168 
2169 # ifdef DEBUG
2170  // Check that the kernel was launched correctly
2171  AssertCuda(cudaGetLastError());
2172  // Check that there was no problem during the execution of the kernel
2173  AssertCuda(cudaDeviceSynchronize());
2174 # endif
2175  }
2176 
2177  static void
2178  add_factor(
2179  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2180  const size_type size,
2181  Number a,
2184  &data)
2185  {
2186  const int n_blocks = 1 + size / (chunk_size * block_size);
2187  ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2188  <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
2189 
2190 # ifdef DEBUG
2191  // Check that the kernel was launched correctly
2192  AssertCuda(cudaGetLastError());
2193  // Check that there was no problem during the execution of the kernel
2194  AssertCuda(cudaDeviceSynchronize());
2195 # endif
2196  }
2197 
2198  static void
2199  add_av(
2200  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2201  const size_type size,
2202  const Number a,
2203  const ::MemorySpace::
2204  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2207  &data)
2208  {
2209  const int n_blocks = 1 + size / (chunk_size * block_size);
2210  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2211  <<<n_blocks, block_size>>>(data.values_dev.get(),
2212  a,
2213  v_data.values_dev.get(),
2214  size);
2215 
2216 # ifdef DEBUG
2217  // Check that the kernel was launched correctly
2218  AssertCuda(cudaGetLastError());
2219  // Check that there was no problem during the execution of the kernel
2220  AssertCuda(cudaDeviceSynchronize());
2221 # endif
2222  }
2223 
2224  static void
2225  add_avpbw(
2226  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2227  const size_type size,
2228  const Number a,
2229  const Number b,
2230  const ::MemorySpace::
2231  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2232  const ::MemorySpace::
2233  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2236  &data)
2237  {
2238  const int n_blocks = 1 + size / (chunk_size * block_size);
2239  ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2240  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2241  a,
2242  v_data.values_dev.get(),
2243  b,
2244  w_data.values_dev.get(),
2245  size);
2246 
2247 # ifdef DEBUG
2248  // Check that the kernel was launched correctly
2249  AssertCuda(cudaGetLastError());
2250  // Check that there was no problem during the execution of the kernel
2251  AssertCuda(cudaDeviceSynchronize());
2252 # endif
2253  }
2254 
2255  static void
2256  sadd_xv(
2257  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2258  const size_type size,
2259  const Number x,
2260  const ::MemorySpace::
2261  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2264  &data)
2265  {
2266  const int n_blocks = 1 + size / (chunk_size * block_size);
2267  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2268  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2269  x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2270 
2271 # ifdef DEBUG
2272  // Check that the kernel was launched correctly
2273  AssertCuda(cudaGetLastError());
2274  // Check that there was no problem during the execution of the kernel
2275  AssertCuda(cudaDeviceSynchronize());
2276 # endif
2277  }
2278 
2279  static void
2280  sadd_xav(
2281  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2282  const size_type size,
2283  const Number x,
2284  const Number a,
2285  const ::MemorySpace::
2286  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2289  &data)
2290  {
2291  const int n_blocks = 1 + size / (chunk_size * block_size);
2292  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2293  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2294  x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2295 
2296 # ifdef DEBUG
2297  // Check that the kernel was launched correctly
2298  AssertCuda(cudaGetLastError());
2299  // Check that there was no problem during the execution of the kernel
2300  AssertCuda(cudaDeviceSynchronize());
2301 # endif
2302  }
2303 
2304  static void
2305  sadd_xavbw(
2306  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2307  const size_type size,
2308  const Number x,
2309  const Number a,
2310  const Number b,
2311  const ::MemorySpace::
2312  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2313  const ::MemorySpace::
2314  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2317  &data)
2318  {
2319  const int n_blocks = 1 + size / (chunk_size * block_size);
2320  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2321  <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2322  data.values_dev.get(),
2323  a,
2324  v_data.values_dev.get(),
2325  b,
2326  w_data.values_dev.get(),
2327  size);
2328 
2329 # ifdef DEBUG
2330  // Check that the kernel was launched correctly
2331  AssertCuda(cudaGetLastError());
2332  // Check that there was no problem during the execution of the kernel
2333  AssertCuda(cudaDeviceSynchronize());
2334 # endif
2335  }
2336 
2337  static void
2338  multiply_factor(
2339  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2340  const size_type size,
2341  const Number factor,
2344  &data)
2345  {
2346  const int n_blocks = 1 + size / (chunk_size * block_size);
2347  ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2348  <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
2349 
2350 # ifdef DEBUG
2351  // Check that the kernel was launched correctly
2352  AssertCuda(cudaGetLastError());
2353  // Check that there was no problem during the execution of the kernel
2354  AssertCuda(cudaDeviceSynchronize());
2355 # endif
2356  }
2357 
2358  static void
2359  scale(
2360  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2361  const size_type size,
2362  const ::MemorySpace::
2363  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2366  &data)
2367  {
2368  const int n_blocks = 1 + size / (chunk_size * block_size);
2369  ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2370  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2371  v_data.values_dev.get(),
2372  size);
2373 
2374 # ifdef DEBUG
2375  // Check that the kernel was launched correctly
2376  AssertCuda(cudaGetLastError());
2377  // Check that there was no problem during the execution of the kernel
2378  AssertCuda(cudaDeviceSynchronize());
2379 # endif
2380  }
2381 
2382  static void
2383  equ_au(
2384  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2385  const size_type size,
2386  const Number a,
2387  const ::MemorySpace::
2388  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2391  &data)
2392  {
2393  const int n_blocks = 1 + size / (chunk_size * block_size);
2394  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2395  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2396  a,
2397  v_data.values_dev.get(),
2398  size);
2399 
2400 # ifdef DEBUG
2401  // Check that the kernel was launched correctly
2402  AssertCuda(cudaGetLastError());
2403  // Check that there was no problem during the execution of the kernel
2404  AssertCuda(cudaDeviceSynchronize());
2405 # endif
2406  }
2407 
2408  static void
2409  equ_aubv(
2410  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2411  const size_type size,
2412  const Number a,
2413  const Number b,
2414  const ::MemorySpace::
2415  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2416  const ::MemorySpace::
2417  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2420  &data)
2421  {
2422  const int n_blocks = 1 + size / (chunk_size * block_size);
2423  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2424  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2425  a,
2426  v_data.values_dev.get(),
2427  b,
2428  w_data.values_dev.get(),
2429  size);
2430 
2431 # ifdef DEBUG
2432  // Check that the kernel was launched correctly
2433  AssertCuda(cudaGetLastError());
2434  // Check that there was no problem during the execution of the kernel
2435  AssertCuda(cudaDeviceSynchronize());
2436 # endif
2437  }
2438 
2439  static Number
2440  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2441  const size_type size,
2442  const ::MemorySpace::
2443  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2446  &data)
2447  {
2448  Number * result_device;
2449  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2450  AssertCuda(error_code);
2451  error_code = cudaMemset(result_device, 0, sizeof(Number));
2452  AssertCuda(error_code);
2453 
2454  const int n_blocks = 1 + size / (chunk_size * block_size);
2456  Number,
2458  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2459  data.values_dev.get(),
2460  v_data.values_dev.get(),
2461  static_cast<unsigned int>(
2462  size));
2463 
2464 # ifdef DEBUG
2465  // Check that the kernel was launched correctly
2466  AssertCuda(cudaGetLastError());
2467  // Check that there was no problem during the execution of the kernel
2468  AssertCuda(cudaDeviceSynchronize());
2469 # endif
2470 
2471  // Copy the result back to the host
2472  Number result;
2473  error_code = cudaMemcpy(&result,
2474  result_device,
2475  sizeof(Number),
2476  cudaMemcpyDeviceToHost);
2477  AssertCuda(error_code);
2478  // Free the memory on the device
2479  error_code = cudaFree(result_device);
2480  AssertCuda(error_code);
2481 
2482  AssertIsFinite(result);
2483 
2484  return result;
2485  }
2486 
2487  template <typename real_type>
2488  static void
2489  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2490  & thread_loop_partitioner,
2491  const size_type size,
2492  real_type & sum,
2495  &data)
2496  {
2497  sum = dot(thread_loop_partitioner, size, data, data);
2498  }
2499 
2500  static Number
2501  mean_value(
2502  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2503  const size_type size,
2504  const ::MemorySpace::
2505  MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2506  {
2507  Number * result_device;
2508  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2509  AssertCuda(error_code);
2510  error_code = cudaMemset(result_device, 0, sizeof(Number));
2511 
2512  const int n_blocks = 1 + size / (chunk_size * block_size);
2514  Number,
2516  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2517  data.values_dev.get(),
2518  size);
2519 
2520  // Copy the result back to the host
2521  Number result;
2522  error_code = cudaMemcpy(&result,
2523  result_device,
2524  sizeof(Number),
2525  cudaMemcpyDeviceToHost);
2526  AssertCuda(error_code);
2527  // Free the memory on the device
2528  error_code = cudaFree(result_device);
2529  AssertCuda(error_code);
2530 
2531  return result;
2532  }
2533 
2534  template <typename real_type>
2535  static void
2536  norm_1(
2537  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2538  const size_type size,
2539  real_type & sum,
2542  &data)
2543  {
2544  Number * result_device;
2545  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2546  AssertCuda(error_code);
2547  error_code = cudaMemset(result_device, 0, sizeof(Number));
2548 
2549  const int n_blocks = 1 + size / (chunk_size * block_size);
2551  Number,
2553  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2554  data.values_dev.get(),
2555  size);
2556 
2557  // Copy the result back to the host
2558  error_code = cudaMemcpy(&sum,
2559  result_device,
2560  sizeof(Number),
2561  cudaMemcpyDeviceToHost);
2562  AssertCuda(error_code);
2563  // Free the memory on the device
2564  error_code = cudaFree(result_device);
2565  AssertCuda(error_code);
2566  }
2567 
2568  template <typename real_type>
2569  static void
2570  norm_p(
2571  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2572  const size_type,
2573  real_type &,
2574  real_type,
2576  ::MemorySpace::CUDA> &)
2577  {
2578  Assert(false, ExcNotImplemented());
2579  }
2580 
2581  static Number
2582  add_and_dot(
2583  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2584  const size_type size,
2585  const Number a,
2586  const ::MemorySpace::
2587  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2588  const ::MemorySpace::
2589  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2592  &data)
2593  {
2594  Number * res_d;
2595  cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
2596  AssertCuda(error_code);
2597  error_code = cudaMemset(res_d, 0, sizeof(Number));
2598  AssertCuda(error_code);
2599 
2600  const int n_blocks = 1 + size / (chunk_size * block_size);
2601  ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2602  <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2603  data.values_dev.get(),
2604  v_data.values_dev.get(),
2605  w_data.values_dev.get(),
2606  a,
2607  size);
2608 
2609  Number res;
2610  error_code =
2611  cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
2612  AssertCuda(error_code);
2613  error_code = cudaFree(res_d);
2614 
2615  return res;
2616  }
2617 
2618  template <typename MemorySpace2>
2619  static void
2620  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2621  & thread_loop_partitioner,
2622  const size_type size,
2623  VectorOperation::values operation,
2624  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2625  &v_data,
2628  &data,
2629  typename std::enable_if<
2630  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2631  int>::type = 0)
2632  {
2633  if (operation == VectorOperation::insert)
2634  {
2635  copy(thread_loop_partitioner, size, v_data, data);
2636  }
2637  else if (operation == VectorOperation::add)
2638  {
2639  add_vector(thread_loop_partitioner, size, v_data, data);
2640  }
2641  else
2642  {
2643  AssertThrow(false, ExcNotImplemented());
2644  }
2645  }
2646 
2647  template <typename MemorySpace2>
2648  static void
2649  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2650  & /*thread_loop_partitioner*/,
2651  const size_type size,
2652  VectorOperation::values operation,
2653  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2654  &v_data,
2657  &data,
2658  typename std::enable_if<
2659  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2660  int>::type = 0)
2661  {
2662  if (operation == VectorOperation::insert)
2663  {
2664  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2665  v_data.values.get(),
2666  size * sizeof(Number),
2667  cudaMemcpyHostToDevice);
2668  AssertCuda(cuda_error_code);
2669  }
2670  else
2671  {
2672  AssertThrow(false, ExcNotImplemented());
2673  }
2674  }
2675  };
2676 #endif
2677  } // namespace VectorOperations
2678 } // namespace internal
2679 
2680 DEAL_II_NAMESPACE_CLOSE
2681 
2682 #endif
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition: parallel.h:156
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:605
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
__global__ void add_and_dot(Number *res, Number *v1, const Number *v2, const Number *v3, const Number a, const size_type N)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
static real_type abs(const number &x)
Definition: numbers.h:627
void store(Number *ptr) const
__global__ void scale(Number *val, const Number *V_val, const size_type N)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1407
#define AssertCuda(error_code)
Definition: exceptions.h:1722
void load(const Number *ptr)
unsigned int global_dof_index
Definition: types.h:89
static ::ExceptionBase & ExcNotImplemented()
static unsigned int n_threads()
#define AssertIsFinite(number)
Definition: exceptions.h:1669
static ::ExceptionBase & ExcInternalError()