Reference documentation for deal.II version Git 1f3f4df3cd 2020-02-17 16:54:31 -0500
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/memory_space.h>
23 #include <deal.II/base/multithread_info.h>
24 #include <deal.II/base/parallel.h>
25 #include <deal.II/base/types.h>
26 #include <deal.II/base/vectorization.h>
27 
28 #include <deal.II/lac/cuda_kernels.h>
29 #include <deal.II/lac/cuda_kernels.templates.h>
30 #include <deal.II/lac/vector_operation.h>
31 
32 #include <cstdio>
33 #include <cstring>
34 
35 DEAL_II_NAMESPACE_OPEN
36 
37 namespace internal
38 {
39  namespace VectorOperations
40  {
41  using size_type = types::global_dof_index;
42 
43  template <typename T>
44  bool
45  is_non_negative(const T &t)
46  {
47  return t >= 0;
48  }
49 
50 
51  template <typename T>
52  bool
53  is_non_negative(const std::complex<T> &)
54  {
55  Assert(false, ExcMessage("Complex numbers do not have an ordering."));
56 
57  return false;
58  }
59 
60 
61  template <typename T>
62  void
63  print(const T &t, const char *format)
64  {
65  if (format != nullptr)
66  std::printf(format, t);
67  else
68  std::printf(" %5.2f", double(t));
69  }
70 
71 
72 
73  template <typename T>
74  void
75  print(const std::complex<T> &t, const char *format)
76  {
77  if (format != nullptr)
78  std::printf(format, t.real(), t.imag());
79  else
80  std::printf(" %5.2f+%5.2fi", double(t.real()), double(t.imag()));
81  }
82 
83  // call std::copy, except for in
84  // the case where we want to copy
85  // from std::complex to a
86  // non-complex type
87  template <typename T, typename U>
88  void
89  copy(const T *begin, const T *end, U *dest)
90  {
91  std::copy(begin, end, dest);
92  }
93 
94  template <typename T, typename U>
95  void
96  copy(const std::complex<T> *begin,
97  const std::complex<T> *end,
98  std::complex<U> * dest)
99  {
100  std::copy(begin, end, dest);
101  }
102 
103  template <typename T, typename U>
104  void
105  copy(const std::complex<T> *, const std::complex<T> *, U *)
106  {
107  Assert(false,
108  ExcMessage("Can't convert a vector of complex numbers "
109  "into a vector of reals/doubles"));
110  }
111 
112 
113 
114 #ifdef DEAL_II_WITH_THREADS
115 
123  template <typename Functor>
125  {
126  TBBForFunctor(Functor & functor,
127  const size_type start,
128  const size_type end)
129  : functor(functor)
130  , start(start)
131  , end(end)
132  {
133  const size_type vec_size = end - start;
134  // set chunk size for sub-tasks
135  const unsigned int gs =
136  internal::VectorImplementation::minimum_parallel_grain_size;
137  n_chunks =
138  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
139  vec_size / gs);
140  chunk_size = vec_size / n_chunks;
141 
142  // round to next multiple of 512 (or minimum grain size if that happens
143  // to be smaller). this is advantageous because our accumulation
144  // algorithms favor lengths of a power of 2 due to pairwise summation ->
145  // at most one 'oddly' sized chunk
146  if (chunk_size > 512)
147  chunk_size = ((chunk_size + 511) / 512) * 512;
148  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
149  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
150  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
151  }
152 
153  void
154  operator()(const tbb::blocked_range<size_type> &range) const
155  {
156  const size_type r_begin = start + range.begin() * chunk_size;
157  const size_type r_end = std::min(start + range.end() * chunk_size, end);
158  functor(r_begin, r_end);
159  }
160 
161  Functor & functor;
162  const size_type start;
163  const size_type end;
164  unsigned int n_chunks;
165  size_type chunk_size;
166  };
167 #endif
168 
169  template <typename Functor>
170  void
171  parallel_for(
172  Functor & functor,
173  const size_type start,
174  const size_type end,
175  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
176  {
177 #ifdef DEAL_II_WITH_THREADS
178  const size_type vec_size = end - start;
179  // only go to the parallel function in case there are at least 4 parallel
180  // items, otherwise the overhead is too large
181  if (vec_size >=
182  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
184  {
185  Assert(partitioner.get() != nullptr,
187  "Unexpected initialization of Vector that does "
188  "not set the TBB partitioner to a usable state."));
189  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
190  partitioner->acquire_one_partitioner();
191 
192  TBBForFunctor<Functor> generic_functor(functor, start, end);
193  // We use a minimum grain size of 1 here since the grains at this
194  // stage of dividing the work refer to the number of vector chunks
195  // that are processed by (possibly different) threads in the
196  // parallelized for loop (i.e., they do not refer to individual
197  // vector entries). The number of chunks here is calculated inside
198  // TBBForFunctor. See also GitHub issue #2496 for further discussion
199  // of this strategy.
200  parallel::internal::parallel_for(static_cast<size_type>(0),
201  static_cast<size_type>(
202  generic_functor.n_chunks),
203  generic_functor,
204  1,
205  tbb_partitioner);
206  partitioner->release_one_partitioner(tbb_partitioner);
207  }
208  else if (vec_size > 0)
209  functor(start, end);
210 #else
211  functor(start, end);
212  (void)partitioner;
213 #endif
214  }
215 
216 
217  // Define the functors necessary to use SIMD with TBB. we also include the
218  // simple copy and set operations
219 
220  template <typename Number>
221  struct Vector_set
222  {
223  Vector_set(const Number value, Number *const dst)
224  : value(value)
225  , dst(dst)
226  {
227  Assert(dst != nullptr, ExcInternalError());
228  }
229 
230  void
231  operator()(const size_type begin, const size_type end) const
232  {
233  Assert(end >= begin, ExcInternalError());
234 
235  if (value == Number())
236  {
237 #ifdef DEAL_II_WITH_CXX17
238  if constexpr (std::is_trivial<Number>::value)
239 #else
240  if (std::is_trivial<Number>::value)
241 #endif
242  {
243  std::memset(dst + begin, 0, sizeof(Number) * (end - begin));
244  return;
245  }
246  }
247  std::fill(dst + begin, dst + end, value);
248  }
249 
250  const Number value;
251  Number *const dst;
252  };
253 
254  template <typename Number, typename OtherNumber>
255  struct Vector_copy
256  {
257  Vector_copy(const OtherNumber *const src, Number *const dst)
258  : src(src)
259  , dst(dst)
260  {
261  Assert(src != nullptr, ExcInternalError());
262  Assert(dst != nullptr, ExcInternalError());
263  }
264 
265  void
266  operator()(const size_type begin, const size_type end) const
267  {
268  Assert(end >= begin, ExcInternalError());
269 
270 #if __GNUG__ && __GNUC__ < 5
271  if (__has_trivial_copy(Number) &&
272  std::is_same<Number, OtherNumber>::value)
273 #else
274 # ifdef DEAL_II_WITH_CXX17
275  if constexpr (std::is_trivially_copyable<Number>() &&
276  std::is_same<Number, OtherNumber>::value)
277 # else
278  if (std::is_trivially_copyable<Number>() &&
279  std::is_same<Number, OtherNumber>::value)
280 # endif
281 #endif
282  std::memcpy(dst + begin, src + begin, (end - begin) * sizeof(Number));
283  else
284  {
285  DEAL_II_OPENMP_SIMD_PRAGMA
286  for (size_type i = begin; i < end; ++i)
287  dst[i] = src[i];
288  }
289  }
290 
291  const OtherNumber *const src;
292  Number *const dst;
293  };
294 
295  template <typename Number>
296  struct Vectorization_multiply_factor
297  {
298  Vectorization_multiply_factor(Number *const val, const Number factor)
299  : val(val)
300  , factor(factor)
301  {}
302 
303  void
304  operator()(const size_type begin, const size_type end) const
305  {
307  {
308  DEAL_II_OPENMP_SIMD_PRAGMA
309  for (size_type i = begin; i < end; ++i)
310  val[i] *= factor;
311  }
312  else
313  {
314  for (size_type i = begin; i < end; ++i)
315  val[i] *= factor;
316  }
317  }
318 
319  Number *const val;
320  const Number factor;
321  };
322 
323  template <typename Number>
324  struct Vectorization_add_av
325  {
326  Vectorization_add_av(Number *const val,
327  const Number *const v_val,
328  const Number factor)
329  : val(val)
330  , v_val(v_val)
331  , factor(factor)
332  {}
333 
334  void
335  operator()(const size_type begin, const size_type end) const
336  {
338  {
339  DEAL_II_OPENMP_SIMD_PRAGMA
340  for (size_type i = begin; i < end; ++i)
341  val[i] += factor * v_val[i];
342  }
343  else
344  {
345  for (size_type i = begin; i < end; ++i)
346  val[i] += factor * v_val[i];
347  }
348  }
349 
350  Number *const val;
351  const Number *const v_val;
352  const Number factor;
353  };
354 
355  template <typename Number>
356  struct Vectorization_sadd_xav
357  {
358  Vectorization_sadd_xav(Number * val,
359  const Number *const v_val,
360  const Number a,
361  const Number x)
362  : val(val)
363  , v_val(v_val)
364  , a(a)
365  , x(x)
366  {}
367 
368  void
369  operator()(const size_type begin, const size_type end) const
370  {
372  {
373  DEAL_II_OPENMP_SIMD_PRAGMA
374  for (size_type i = begin; i < end; ++i)
375  val[i] = x * val[i] + a * v_val[i];
376  }
377  else
378  {
379  for (size_type i = begin; i < end; ++i)
380  val[i] = x * val[i] + a * v_val[i];
381  }
382  }
383 
384  Number *const val;
385  const Number *const v_val;
386  const Number a;
387  const Number x;
388  };
389 
390  template <typename Number>
391  struct Vectorization_subtract_v
392  {
393  Vectorization_subtract_v(Number *val, const Number *const v_val)
394  : val(val)
395  , v_val(v_val)
396  {}
397 
398  void
399  operator()(const size_type begin, const size_type end) const
400  {
402  {
403  DEAL_II_OPENMP_SIMD_PRAGMA
404  for (size_type i = begin; i < end; ++i)
405  val[i] -= v_val[i];
406  }
407  else
408  {
409  for (size_type i = begin; i < end; ++i)
410  val[i] -= v_val[i];
411  }
412  }
413 
414  Number *const val;
415  const Number *const v_val;
416  };
417 
418  template <typename Number>
419  struct Vectorization_add_factor
420  {
421  Vectorization_add_factor(Number *const val, const Number factor)
422  : val(val)
423  , factor(factor)
424  {}
425 
426  void
427  operator()(const size_type begin, const size_type end) const
428  {
430  {
431  DEAL_II_OPENMP_SIMD_PRAGMA
432  for (size_type i = begin; i < end; ++i)
433  val[i] += factor;
434  }
435  else
436  {
437  for (size_type i = begin; i < end; ++i)
438  val[i] += factor;
439  }
440  }
441 
442  Number *const val;
443  const Number factor;
444  };
445 
446  template <typename Number>
447  struct Vectorization_add_v
448  {
449  Vectorization_add_v(Number *const val, const Number *const v_val)
450  : val(val)
451  , v_val(v_val)
452  {}
453 
454  void
455  operator()(const size_type begin, const size_type end) const
456  {
458  {
459  DEAL_II_OPENMP_SIMD_PRAGMA
460  for (size_type i = begin; i < end; ++i)
461  val[i] += v_val[i];
462  }
463  else
464  {
465  for (size_type i = begin; i < end; ++i)
466  val[i] += v_val[i];
467  }
468  }
469 
470  Number *const val;
471  const Number *const v_val;
472  };
473 
474  template <typename Number>
475  struct Vectorization_add_avpbw
476  {
477  Vectorization_add_avpbw(Number *const val,
478  const Number *const v_val,
479  const Number *const w_val,
480  const Number a,
481  const Number b)
482  : val(val)
483  , v_val(v_val)
484  , w_val(w_val)
485  , a(a)
486  , b(b)
487  {}
488 
489  void
490  operator()(const size_type begin, const size_type end) const
491  {
493  {
494  DEAL_II_OPENMP_SIMD_PRAGMA
495  for (size_type i = begin; i < end; ++i)
496  val[i] = val[i] + a * v_val[i] + b * w_val[i];
497  }
498  else
499  {
500  for (size_type i = begin; i < end; ++i)
501  val[i] = val[i] + a * v_val[i] + b * w_val[i];
502  }
503  }
504 
505  Number *const val;
506  const Number *const v_val;
507  const Number *const w_val;
508  const Number a;
509  const Number b;
510  };
511 
512  template <typename Number>
513  struct Vectorization_sadd_xv
514  {
515  Vectorization_sadd_xv(Number *const val,
516  const Number *const v_val,
517  const Number x)
518  : val(val)
519  , v_val(v_val)
520  , x(x)
521  {}
522 
523  void
524  operator()(const size_type begin, const size_type end) const
525  {
527  {
528  DEAL_II_OPENMP_SIMD_PRAGMA
529  for (size_type i = begin; i < end; ++i)
530  val[i] = x * val[i] + v_val[i];
531  }
532  else
533  {
534  for (size_type i = begin; i < end; ++i)
535  val[i] = x * val[i] + v_val[i];
536  }
537  }
538 
539  Number *const val;
540  const Number *const v_val;
541  const Number x;
542  };
543 
544  template <typename Number>
545  struct Vectorization_sadd_xavbw
546  {
547  Vectorization_sadd_xavbw(Number * val,
548  const Number *v_val,
549  const Number *w_val,
550  Number x,
551  Number a,
552  Number b)
553  : val(val)
554  , v_val(v_val)
555  , w_val(w_val)
556  , x(x)
557  , a(a)
558  , b(b)
559  {}
560 
561  void
562  operator()(const size_type begin, const size_type end) const
563  {
565  {
566  DEAL_II_OPENMP_SIMD_PRAGMA
567  for (size_type i = begin; i < end; ++i)
568  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
569  }
570  else
571  {
572  for (size_type i = begin; i < end; ++i)
573  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
574  }
575  }
576 
577  Number *const val;
578  const Number *const v_val;
579  const Number *const w_val;
580  const Number x;
581  const Number a;
582  const Number b;
583  };
584 
585  template <typename Number>
586  struct Vectorization_scale
587  {
588  Vectorization_scale(Number *const val, const Number *const v_val)
589  : val(val)
590  , v_val(v_val)
591  {}
592 
593  void
594  operator()(const size_type begin, const size_type end) const
595  {
597  {
598  DEAL_II_OPENMP_SIMD_PRAGMA
599  for (size_type i = begin; i < end; ++i)
600  val[i] *= v_val[i];
601  }
602  else
603  {
604  for (size_type i = begin; i < end; ++i)
605  val[i] *= v_val[i];
606  }
607  }
608 
609  Number *const val;
610  const Number *const v_val;
611  };
612 
613  template <typename Number>
614  struct Vectorization_equ_au
615  {
616  Vectorization_equ_au(Number *const val,
617  const Number *const u_val,
618  const Number a)
619  : val(val)
620  , u_val(u_val)
621  , a(a)
622  {}
623 
624  void
625  operator()(const size_type begin, const size_type end) const
626  {
628  {
629  DEAL_II_OPENMP_SIMD_PRAGMA
630  for (size_type i = begin; i < end; ++i)
631  val[i] = a * u_val[i];
632  }
633  else
634  {
635  for (size_type i = begin; i < end; ++i)
636  val[i] = a * u_val[i];
637  }
638  }
639 
640  Number *const val;
641  const Number *const u_val;
642  const Number a;
643  };
644 
645  template <typename Number>
646  struct Vectorization_equ_aubv
647  {
648  Vectorization_equ_aubv(Number *const val,
649  const Number *const u_val,
650  const Number *const v_val,
651  const Number a,
652  const Number b)
653  : val(val)
654  , u_val(u_val)
655  , v_val(v_val)
656  , a(a)
657  , b(b)
658  {}
659 
660  void
661  operator()(const size_type begin, const size_type end) const
662  {
664  {
665  DEAL_II_OPENMP_SIMD_PRAGMA
666  for (size_type i = begin; i < end; ++i)
667  val[i] = a * u_val[i] + b * v_val[i];
668  }
669  else
670  {
671  for (size_type i = begin; i < end; ++i)
672  val[i] = a * u_val[i] + b * v_val[i];
673  }
674  }
675 
676  Number *const val;
677  const Number *const u_val;
678  const Number *const v_val;
679  const Number a;
680  const Number b;
681  };
682 
683  template <typename Number>
684  struct Vectorization_equ_aubvcw
685  {
686  Vectorization_equ_aubvcw(Number * val,
687  const Number *u_val,
688  const Number *v_val,
689  const Number *w_val,
690  const Number a,
691  const Number b,
692  const Number c)
693  : val(val)
694  , u_val(u_val)
695  , v_val(v_val)
696  , w_val(w_val)
697  , a(a)
698  , b(b)
699  , c(c)
700  {}
701 
702  void
703  operator()(const size_type begin, const size_type end) const
704  {
706  {
707  DEAL_II_OPENMP_SIMD_PRAGMA
708  for (size_type i = begin; i < end; ++i)
709  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
710  }
711  else
712  {
713  for (size_type i = begin; i < end; ++i)
714  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
715  }
716  }
717 
718  Number *const val;
719  const Number *const u_val;
720  const Number *const v_val;
721  const Number *const w_val;
722  const Number a;
723  const Number b;
724  const Number c;
725  };
726 
727  template <typename Number>
728  struct Vectorization_ratio
729  {
730  Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
731  : val(val)
732  , a_val(a_val)
733  , b_val(b_val)
734  {}
735 
736  void
737  operator()(const size_type begin, const size_type end) const
738  {
740  {
741  DEAL_II_OPENMP_SIMD_PRAGMA
742  for (size_type i = begin; i < end; ++i)
743  val[i] = a_val[i] / b_val[i];
744  }
745  else
746  {
747  for (size_type i = begin; i < end; ++i)
748  val[i] = a_val[i] / b_val[i];
749  }
750  }
751 
752  Number *const val;
753  const Number *const a_val;
754  const Number *const b_val;
755  };
756 
757 
758 
759  // All sums over all the vector entries (l2-norm, inner product, etc.) are
760  // performed with the same code, using a templated operation defined
761  // here. There are always two versions defined, a standard one that covers
762  // most cases and a vectorized one which is only for equal types and float
763  // and double.
764  template <typename Number, typename Number2>
765  struct Dot
766  {
767  static const bool vectorizes =
768  std::is_same<Number, Number2>::value &&
770 
771  Dot(const Number *const X, const Number2 *const Y)
772  : X(X)
773  , Y(Y)
774  {}
775 
776  Number
777  operator()(const size_type i) const
778  {
779  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
780  }
781 
783  do_vectorized(const size_type i) const
784  {
786  x.load(X + i);
787  y.load(Y + i);
788 
789  // the following operation in VectorizedArray does an element-wise
790  // scalar product without taking into account complex values and
791  // the need to take the complex-conjugate of one argument. this
792  // may be a bug, but because all VectorizedArray classes only
793  // work on real scalars, it doesn't really matter very much.
794  // in any case, assert that we really don't get here for
795  // complex-valued objects
796  static_assert(numbers::NumberTraits<Number>::is_complex == false,
797  "This operation is not correctly implemented for "
798  "complex-valued objects.");
799  return x * y;
800  }
801 
802  const Number *const X;
803  const Number2 *const Y;
804  };
805 
806  template <typename Number, typename RealType>
807  struct Norm2
808  {
809  static const bool vectorizes =
811 
812  Norm2(const Number *const X)
813  : X(X)
814  {}
815 
816  RealType
817  operator()(const size_type i) const
818  {
820  }
821 
823  do_vectorized(const size_type i) const
824  {
826  x.load(X + i);
827  return x * x;
828  }
829 
830  const Number *const X;
831  };
832 
833  template <typename Number, typename RealType>
834  struct Norm1
835  {
836  static const bool vectorizes =
838 
839  Norm1(const Number *X)
840  : X(X)
841  {}
842 
843  RealType
844  operator()(const size_type i) const
845  {
847  }
848 
850  do_vectorized(const size_type i) const
851  {
853  x.load(X + i);
854  return std::abs(x);
855  }
856 
857  const Number *X;
858  };
859 
860  template <typename Number, typename RealType>
861  struct NormP
862  {
863  static const bool vectorizes =
865 
866  NormP(const Number *X, RealType p)
867  : X(X)
868  , p(p)
869  {}
870 
871  RealType
872  operator()(const size_type i) const
873  {
874  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
875  }
876 
878  do_vectorized(const size_type i) const
879  {
881  x.load(X + i);
882  return std::pow(std::abs(x), p);
883  }
884 
885  const Number * X;
886  const RealType p;
887  };
888 
889  template <typename Number>
890  struct MeanValue
891  {
892  static const bool vectorizes =
894 
895  MeanValue(const Number *X)
896  : X(X)
897  {}
898 
899  Number
900  operator()(const size_type i) const
901  {
902  return X[i];
903  }
904 
906  do_vectorized(const size_type i) const
907  {
909  x.load(X + i);
910  return x;
911  }
912 
913  const Number *X;
914  };
915 
916  template <typename Number>
917  struct AddAndDot
918  {
919  static const bool vectorizes =
921 
922  AddAndDot(Number *const X,
923  const Number *const V,
924  const Number *const W,
925  const Number a)
926  : X(X)
927  , V(V)
928  , W(W)
929  , a(a)
930  {}
931 
932  Number
933  operator()(const size_type i) const
934  {
935  X[i] += a * V[i];
936  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
937  }
938 
940  do_vectorized(const size_type i) const
941  {
942  VectorizedArray<Number> x, w, v;
943  x.load(X + i);
944  v.load(V + i);
945  x += a * v;
946  x.store(X + i);
947  // may only load from W after storing in X because the pointers might
948  // point to the same memory
949  w.load(W + i);
950 
951  // the following operation in VectorizedArray does an element-wise
952  // scalar product without taking into account complex values and
953  // the need to take the complex-conjugate of one argument. this
954  // may be a bug, but because all VectorizedArray classes only
955  // work on real scalars, it doesn't really matter very much.
956  // in any case, assert that we really don't get here for
957  // complex-valued objects
958  static_assert(numbers::NumberTraits<Number>::is_complex == false,
959  "This operation is not correctly implemented for "
960  "complex-valued objects.");
961  return x * w;
962  }
963 
964  Number *const X;
965  const Number *const V;
966  const Number *const W;
967  const Number a;
968  };
969 
970 
971 
972  // this is the main working loop for all vector sums using the templated
973  // operation above. it accumulates the sums using a block-wise summation
974  // algorithm with post-update. this blocked algorithm has been proposed in
975  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
976  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
977  // block size, 2. Sometimes it is referred to as pairwise summation. The
978  // worst case error made by this algorithm is on the order O(eps *
979  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
980  // though the Kahan summation is even more accurate with an error O(eps)
981  // by carrying along remainders not captured by the main sum, that involves
982  // additional costs which are not worthwhile. See the Wikipedia article on
983  // the Kahan summation algorithm.
984 
985  // The algorithm implemented here has the additional benefit that it is
986  // easily parallelized without changing the order of how the elements are
987  // added (floating point addition is not associative). For the same vector
988  // size and minimum_parallel_grainsize, the blocks are always the
989  // same and added pairwise.
990 
991  // The depth of recursion is controlled by the 'magic' parameter
992  // vector_accumulation_recursion_threshold: If the length is below
993  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
994  // unroll), a straight loop instead of recursion will be used. At the
995  // innermost level, eight values are added consecutively in order to better
996  // balance multiplications and additions.
997 
998  // Loops are unrolled as follows: the range [first,last) is broken into
999  // @p n_chunks each of size 32 plus the @p remainder.
1000  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
1001  // if possible and stores the result of the operation for each chunk in @p outer_results.
1002 
1003  // The code returns the result as the last argument in order to make
1004  // spawning tasks simpler and use automatic template deduction.
1005 
1006 
1012  const unsigned int vector_accumulation_recursion_threshold = 128;
1013 
1014  template <typename Operation, typename ResultType>
1015  void
1016  accumulate_recursive(const Operation &op,
1017  const size_type first,
1018  const size_type last,
1019  ResultType & result)
1020  {
1021  const size_type vec_size = last - first;
1022  if (vec_size <= vector_accumulation_recursion_threshold * 32)
1023  {
1024  // the vector is short enough so we perform the summation. first
1025  // work on the regular part. The innermost 32 values are expanded in
1026  // order to obtain known loop bounds for most of the work.
1027  size_type index = first;
1028  ResultType outer_results[vector_accumulation_recursion_threshold];
1029 
1030  // set the zeroth element to zero to correctly handle the case where
1031  // vec_size == 0
1032  outer_results[0] = ResultType();
1033 
1034  // the variable serves two purposes: (i) number of chunks (each 32
1035  // indices) for the given size; all results are stored in
1036  // outer_results[0,n_chunks) (ii) in the SIMD case n_chunks is also a
1037  // next free index in outer_results[] to which we can write after
1038  // accumulate_regular() is executed.
1039  size_type n_chunks = vec_size / 32;
1040  const size_type remainder = vec_size % 32;
1041  Assert(remainder == 0 ||
1042  n_chunks < vector_accumulation_recursion_threshold,
1043  ExcInternalError());
1044 
1045  // Select between the regular version and vectorized version based
1046  // on the number types we are given. To choose the vectorized
1047  // version often enough, we need to have all tasks but the last one
1048  // to be divisible by the vectorization length
1049  accumulate_regular(
1050  op,
1051  n_chunks,
1052  index,
1053  outer_results,
1054  std::integral_constant<bool, Operation::vectorizes>());
1055 
1056  // now work on the remainder, i.e., the last up to 32 values. Use
1057  // switch statement with fall-through to work on these values.
1058  if (remainder > 0)
1059  {
1060  // if we got here, it means that (vec_size <=
1061  // vector_accumulation_recursion_threshold * 32), which is to say
1062  // that the domain can be split into n_chunks <=
1063  // vector_accumulation_recursion_threshold:
1064  AssertIndexRange(n_chunks,
1065  vector_accumulation_recursion_threshold + 1);
1066  // split the remainder into chunks of 8, there could be up to 3
1067  // such chunks since remainder < 32.
1068  // Work on those chunks without any SIMD, that is we call
1069  // op(index).
1070  const size_type inner_chunks = remainder / 8;
1071  Assert(inner_chunks <= 3, ExcInternalError());
1072  const size_type remainder_inner = remainder % 8;
1073  ResultType r0 = ResultType(), r1 = ResultType(),
1074  r2 = ResultType();
1075  switch (inner_chunks)
1076  {
1077  case 3:
1078  r2 = op(index++);
1079  for (size_type j = 1; j < 8; ++j)
1080  r2 += op(index++);
1081  DEAL_II_FALLTHROUGH;
1082  case 2:
1083  r1 = op(index++);
1084  for (size_type j = 1; j < 8; ++j)
1085  r1 += op(index++);
1086  r1 += r2;
1087  DEAL_II_FALLTHROUGH;
1088  case 1:
1089  r2 = op(index++);
1090  for (size_type j = 1; j < 8; ++j)
1091  r2 += op(index++);
1092  DEAL_II_FALLTHROUGH;
1093  default:
1094  for (size_type j = 0; j < remainder_inner; ++j)
1095  r0 += op(index++);
1096  r0 += r2;
1097  r0 += r1;
1098  if (n_chunks == vector_accumulation_recursion_threshold)
1099  outer_results[vector_accumulation_recursion_threshold -
1100  1] += r0;
1101  else
1102  {
1103  outer_results[n_chunks] = r0;
1104  n_chunks++;
1105  }
1106  break;
1107  }
1108  }
1109  // make sure we worked through all indices
1110  AssertDimension(index, last);
1111 
1112  // now sum the results from the chunks stored in
1113  // outer_results[0,n_chunks) recursively
1114  while (n_chunks > 1)
1115  {
1116  if (n_chunks % 2 == 1)
1117  outer_results[n_chunks++] = ResultType();
1118  for (size_type i = 0; i < n_chunks; i += 2)
1119  outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1120  n_chunks /= 2;
1121  }
1122  result = outer_results[0];
1123  }
1124  else
1125  {
1126  // split vector into four pieces and work on the pieces
1127  // recursively. Make pieces (except last) divisible by one fourth the
1128  // recursion threshold.
1129  const size_type new_size =
1130  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1131  vector_accumulation_recursion_threshold * 8;
1132  Assert(first + 3 * new_size < last, ExcInternalError());
1133  ResultType r0, r1, r2, r3;
1134  accumulate_recursive(op, first, first + new_size, r0);
1135  accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1136  accumulate_recursive(op,
1137  first + 2 * new_size,
1138  first + 3 * new_size,
1139  r2);
1140  accumulate_recursive(op, first + 3 * new_size, last, r3);
1141  r0 += r1;
1142  r2 += r3;
1143  result = r0 + r2;
1144  }
1145  }
1146 
1147 
1148  // this is the inner working routine for the accumulation loops
1149  // below. This is the standard case where the loop bounds are known. We
1150  // pulled this function out of the regular accumulate routine because we
1151  // might do this thing vectorized (see specialized function below)
1152  template <typename Operation, typename ResultType>
1153  void
1154  accumulate_regular(
1155  const Operation &op,
1156  const size_type &n_chunks,
1157  size_type & index,
1158  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1159  std::integral_constant<bool, false>)
1160  {
1161  // note that each chunk is chosen to have a width of 32, thereby the index
1162  // is incremented by 4*8 for each @p i.
1163  for (size_type i = 0; i < n_chunks; ++i)
1164  {
1165  ResultType r0 = op(index);
1166  ResultType r1 = op(index + 1);
1167  ResultType r2 = op(index + 2);
1168  ResultType r3 = op(index + 3);
1169  index += 4;
1170  for (size_type j = 1; j < 8; ++j, index += 4)
1171  {
1172  r0 += op(index);
1173  r1 += op(index + 1);
1174  r2 += op(index + 2);
1175  r3 += op(index + 3);
1176  }
1177  r0 += r1;
1178  r2 += r3;
1179  outer_results[i] = r0 + r2;
1180  }
1181  }
1182 
1183 
1184 
1185  // this is the inner working routine for the accumulation loops
1186  // below. This is the specialized case where the loop bounds are known and
1187  // where we can vectorize. In that case, we request the 'do_vectorized'
1188  // routine of the operation instead of the regular one which does several
1189  // operations at once.
1190  template <typename Operation, typename Number>
1191  void
1192  accumulate_regular(
1193  const Operation &op,
1194  size_type & n_chunks,
1195  size_type & index,
1196  Number (&outer_results)[vector_accumulation_recursion_threshold],
1197  std::integral_constant<bool, true>)
1198  {
1199  // we start from @p index and workout @p n_chunks each of size 32.
1200  // in order employ SIMD and work on @p nvecs at a time, we split this
1201  // loop yet again:
1202  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1203  // nvecs*(4*8) elements.
1204 
1205  const unsigned int nvecs = VectorizedArray<Number>::n_array_elements;
1206  const size_type regular_chunks = n_chunks / nvecs;
1207  for (size_type i = 0; i < regular_chunks; ++i)
1208  {
1209  VectorizedArray<Number> r0 = op.do_vectorized(index);
1210  VectorizedArray<Number> r1 = op.do_vectorized(index + nvecs);
1211  VectorizedArray<Number> r2 = op.do_vectorized(index + 2 * nvecs);
1212  VectorizedArray<Number> r3 = op.do_vectorized(index + 3 * nvecs);
1213  index += nvecs * 4;
1214  for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1215  {
1216  r0 += op.do_vectorized(index);
1217  r1 += op.do_vectorized(index + nvecs);
1218  r2 += op.do_vectorized(index + 2 * nvecs);
1219  r3 += op.do_vectorized(index + 3 * nvecs);
1220  }
1221  r0 += r1;
1222  r2 += r3;
1223  r0 += r2;
1224  r0.store(
1225  &outer_results[i * VectorizedArray<Number>::n_array_elements]);
1226  }
1227 
1228  // If we are treating a case where the vector length is not divisible by
1229  // the vectorization length, need a cleanup loop
1230  // The remaining chunks are processed one by one starting from
1231  // regular_chunks * nvecs; We do as much as possible with 2 SIMD
1232  // operations within each chunk. Here we assume that nvecs < 32/2 = 16 as
1233  // well as 16%nvecs==0.
1235  Assert(16 % nvecs == 0, ExcInternalError());
1236  if (n_chunks % VectorizedArray<Number>::n_array_elements != 0)
1237  {
1239  r1 = VectorizedArray<Number>();
1240  const size_type start_irreg = regular_chunks * nvecs;
1241  for (size_type c = start_irreg; c < n_chunks; ++c)
1242  for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1243  {
1244  r0 += op.do_vectorized(index);
1245  r1 += op.do_vectorized(index + nvecs);
1246  }
1247  r0 += r1;
1248  r0.store(&outer_results[start_irreg]);
1249  // update n_chunks to denote unused element in outer_results[] from
1250  // which we can keep writing.
1251  n_chunks = start_irreg + VectorizedArray<Number>::n_array_elements;
1252  }
1253  }
1254 
1255 
1256 
1257 #ifdef DEAL_II_WITH_THREADS
1258 
1286  template <typename Operation, typename ResultType>
1288  {
1289  static const unsigned int threshold_array_allocate = 512;
1290 
1291  TBBReduceFunctor(const Operation &op,
1292  const size_type start,
1293  const size_type end)
1294  : op(op)
1295  , start(start)
1296  , end(end)
1297  {
1298  const size_type vec_size = end - start;
1299  // set chunk size for sub-tasks
1300  const unsigned int gs =
1301  internal::VectorImplementation::minimum_parallel_grain_size;
1302  n_chunks =
1303  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
1304  vec_size / gs);
1305  chunk_size = vec_size / n_chunks;
1306 
1307  // round to next multiple of 512 (or leave it at the minimum grain size
1308  // if that happens to be smaller). this is advantageous because our
1309  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1310  // at most one 'oddly' sized chunk
1311  if (chunk_size > 512)
1312  chunk_size = ((chunk_size + 511) / 512) * 512;
1313  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1314  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
1315  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
1316 
1317  if (n_chunks > threshold_array_allocate)
1318  {
1319  // make sure we allocate an even number of elements,
1320  // access to the new last element is needed in do_sum()
1321  large_array.resize(2 * ((n_chunks + 1) / 2));
1322  array_ptr = large_array.data();
1323  }
1324  else
1325  array_ptr = &small_array[0];
1326  }
1327 
1332  void
1333  operator()(const tbb::blocked_range<size_type> &range) const
1334  {
1335  for (size_type i = range.begin(); i < range.end(); ++i)
1336  accumulate_recursive(op,
1337  start + i * chunk_size,
1338  std::min(start + (i + 1) * chunk_size, end),
1339  array_ptr[i]);
1340  }
1341 
1342  ResultType
1343  do_sum() const
1344  {
1345  while (n_chunks > 1)
1346  {
1347  if (n_chunks % 2 == 1)
1348  array_ptr[n_chunks++] = ResultType();
1349  for (size_type i = 0; i < n_chunks; i += 2)
1350  array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1351  n_chunks /= 2;
1352  }
1353  return array_ptr[0];
1354  }
1355 
1356  const Operation &op;
1357  const size_type start;
1358  const size_type end;
1359 
1360  mutable unsigned int n_chunks;
1361  unsigned int chunk_size;
1362  ResultType small_array[threshold_array_allocate];
1363  std::vector<ResultType> large_array;
1364  // this variable either points to small_array or large_array depending on
1365  // the number of threads we want to feed
1366  mutable ResultType *array_ptr;
1367  };
1368 #endif
1369 
1370 
1371 
1376  template <typename Operation, typename ResultType>
1377  void
1378  parallel_reduce(
1379  const Operation & op,
1380  const size_type start,
1381  const size_type end,
1382  ResultType & result,
1383  const std::shared_ptr<parallel::internal::TBBPartitioner> &partitioner)
1384  {
1385 #ifdef DEAL_II_WITH_THREADS
1386  const size_type vec_size = end - start;
1387  // only go to the parallel function in case there are at least 4 parallel
1388  // items, otherwise the overhead is too large
1389  if (vec_size >=
1390  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
1392  {
1393  Assert(partitioner.get() != nullptr,
1395  "Unexpected initialization of Vector that does "
1396  "not set the TBB partitioner to a usable state."));
1397  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1398  partitioner->acquire_one_partitioner();
1399 
1400  TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1401  start,
1402  end);
1403  // We use a minimum grain size of 1 here since the grains at this
1404  // stage of dividing the work refer to the number of vector chunks
1405  // that are processed by (possibly different) threads in the
1406  // parallelized for loop (i.e., they do not refer to individual
1407  // vector entries). The number of chunks here is calculated inside
1408  // TBBForFunctor. See also GitHub issue #2496 for further discussion
1409  // of this strategy.
1410  parallel::internal::parallel_for(static_cast<size_type>(0),
1411  static_cast<size_type>(
1412  generic_functor.n_chunks),
1413  generic_functor,
1414  1,
1415  tbb_partitioner);
1416  partitioner->release_one_partitioner(tbb_partitioner);
1417  result = generic_functor.do_sum();
1418  }
1419  else
1420  accumulate_recursive(op, start, end, result);
1421 #else
1422  accumulate_recursive(op, start, end, result);
1423  (void)partitioner;
1424 #endif
1425  }
1426 
1427 
1428  template <typename Number, typename Number2, typename MemorySpace>
1429  struct functions
1430  {
1431  static void
1432  copy(
1433  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1434  /*thread_loop_partitioner*/,
1435  const size_type /*size*/,
1436  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1437  & /*v_data*/,
1439  {
1440  static_assert(
1441  std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1442  std::is_same<Number, Number2>::value,
1443  "For the CUDA MemorySpace Number and Number2 should be the same type");
1444  }
1445 
1446  static void
1447  set(
1448  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1449  /*thread_loop_partitioner*/,
1450  const size_type /*size*/,
1451  const Number /*s*/,
1453  {}
1454 
1455  static void
1456  add_vector(
1457  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1458  /*thread_loop_partitioner*/,
1459  const size_type /*size*/,
1460  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1461  & /*v_data*/,
1463  {}
1464 
1465  static void
1466  subtract_vector(
1467  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1468  /*thread_loop_partitioner*/,
1469  const size_type /*size*/,
1470  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1471  & /*v_data*/,
1473  {}
1474 
1475  static void
1476  add_factor(
1477  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1478  /*thread_loop_partitioner*/,
1479  const size_type /*size*/,
1480  Number /*a*/,
1482  {}
1483 
1484  static void
1485  add_av(
1486  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1487  /*thread_loop_partitioner*/,
1488  const size_type /*size*/,
1489  const Number /*a*/,
1490  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1491  & /*v_data*/,
1493  {}
1494 
1495  static void
1496  add_avpbw(
1497  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1498  /*thread_loop_partitioner*/,
1499  const size_type /*size*/,
1500  const Number /*a*/,
1501  const Number /*b*/,
1502  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1503  & /*v_data*/,
1504  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1505  & /*w_data*/,
1507  {}
1508 
1509  static void
1510  sadd_xv(
1511  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1512  /*thread_loop_partitioner*/,
1513  const size_type /*size*/,
1514  const Number /*x*/,
1515  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1516  & /*v_data*/,
1518  {}
1519 
1520  static void
1521  sadd_xav(
1522  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1523  /*thread_loop_partitioner*/,
1524  const size_type /*size*/,
1525  const Number /*x*/,
1526  const Number /*a*/,
1527  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1528  & /*v_data*/,
1530  {}
1531 
1532  static void
1533  sadd_xavbw(
1534  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1535  /*thread_loop_partitioner*/,
1536  const size_type /*size*/,
1537  const Number /*x*/,
1538  const Number /*a*/,
1539  const Number /*b*/,
1540  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1541  & /*v_data*/,
1542  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1543  & /*w_data*/,
1545  {}
1546 
1547  static void
1548  multiply_factor(
1549  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1550  /*thread_loop_partitioner*/,
1551  const size_type /*size*/,
1552  const Number /*factor*/,
1554  {}
1555 
1556  static void
1557  scale(
1558  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1559  /*thread_loop_partitioner*/,
1560  const size_type /*size*/,
1561  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1562  & /*v_data*/,
1564  {}
1565 
1566  static void
1567  equ_au(
1568  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1569  /*thread_loop_partitioner*/,
1570  const size_type /*size*/,
1571  const Number /*a*/,
1572  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1573  & /*v_data*/,
1575  {}
1576 
1577  static void
1578  equ_aubv(
1579  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1580  /*thread_loop_partitioner*/,
1581  const size_type /*size*/,
1582  const Number /*a*/,
1583  const Number /*b*/,
1584  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1585  & /*v_data*/,
1586  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1587  & /*w_data*/,
1589  {}
1590 
1591  static Number
1592  dot(
1593  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1594  /*thread_loop_partitioner*/,
1595  const size_type /*size*/,
1596  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1597  & /*v_data*/,
1599  {
1600  return Number();
1601  }
1602 
1603  template <typename real_type>
1604  static void
1605  norm_2(
1606  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1607  /*thread_loop_partitioner*/,
1608  const size_type /*size*/,
1609  real_type & /*sum*/,
1610  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1611  & /*v_data*/,
1613  {}
1614 
1615  static Number
1616  mean_value(
1617  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1618  /*thread_loop_partitioner*/,
1619  const size_type /*size*/,
1620  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1621  & /*data*/)
1622  {
1623  return Number();
1624  }
1625 
1626  template <typename real_type>
1627  static void
1628  norm_1(
1629  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1630  /*thread_loop_partitioner*/,
1631  const size_type /*size*/,
1632  real_type & /*sum*/,
1633  Number * /*values*/,
1634  Number * /*values_dev*/)
1635  {}
1636 
1637  template <typename real_type>
1638  static void
1639  norm_p(
1640  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1641  /*thread_loop_partitioner*/,
1642  const size_type /*size*/,
1643  real_type & /*sum*/,
1644  real_type /*p*/,
1646  {}
1647 
1648  static Number
1649  add_and_dot(
1650  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1651  /*thread_loop_partitioner*/,
1652  const size_type /*size*/,
1653  const Number /*a*/,
1654  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1655  & /*v_data*/,
1656  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1657  & /*w_data*/,
1659  {
1660  return Number();
1661  }
1662 
1663  template <typename MemorySpace2>
1664  static void
1665  import(
1666  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1667  /*thread_loop_partitioner*/,
1668  const size_type /*size*/,
1669  VectorOperation::values /*operation*/,
1670  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
1671  & /*v_data*/,
1673  {}
1674  };
1675 
1676 
1677 
1678  template <typename Number, typename Number2>
1679  struct functions<Number, Number2, ::MemorySpace::Host>
1680  {
1681  static void
1682  copy(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1683  & thread_loop_partitioner,
1684  const size_type size,
1685  const ::MemorySpace::
1686  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1689  &data)
1690  {
1691  Vector_copy<Number, Number2> copier(v_data.values.get(),
1692  data.values.get());
1693  parallel_for(copier, 0, size, thread_loop_partitioner);
1694  }
1695 
1696  static void
1697  set(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1698  & thread_loop_partitioner,
1699  const size_type size,
1700  const Number s,
1703  &data)
1704  {
1705  Vector_set<Number> setter(s, data.values.get());
1706  parallel_for(setter, 0, size, thread_loop_partitioner);
1707  }
1708 
1709  static void
1710  add_vector(
1711  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1712  & thread_loop_partitioner,
1713  const size_type size,
1714  const ::MemorySpace::
1715  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1718  &data)
1719  {
1720  Vectorization_add_v<Number> vector_add(data.values.get(),
1721  v_data.values.get());
1722  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1723  }
1724 
1725  static void
1726  subtract_vector(
1727  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1728  & thread_loop_partitioner,
1729  const size_type size,
1730  const ::MemorySpace::
1731  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1734  &data)
1735  {
1736  Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1737  v_data.values.get());
1738  parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1739  }
1740 
1741  static void
1742  add_factor(
1743  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1744  & thread_loop_partitioner,
1745  const size_type size,
1746  Number a,
1749  &data)
1750  {
1751  Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1752  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1753  }
1754 
1755  static void
1756  add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1757  & thread_loop_partitioner,
1758  const size_type size,
1759  const Number a,
1760  const ::MemorySpace::
1761  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1764  &data)
1765  {
1766  Vectorization_add_av<Number> vector_add(data.values.get(),
1767  v_data.values.get(),
1768  a);
1769  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1770  }
1771 
1772  static void
1773  add_avpbw(
1774  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1775  & thread_loop_partitioner,
1776  const size_type size,
1777  const Number a,
1778  const Number b,
1779  const ::MemorySpace::
1780  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1781  const ::MemorySpace::
1782  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1785  &data)
1786  {
1787  Vectorization_add_avpbw<Number> vector_add(
1788  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1789  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1790  }
1791 
1792  static void
1793  sadd_xv(
1794  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1795  & thread_loop_partitioner,
1796  const size_type size,
1797  const Number x,
1798  const ::MemorySpace::
1799  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1802  &data)
1803  {
1804  Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1805  v_data.values.get(),
1806  x);
1807  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1808  }
1809 
1810  static void
1811  sadd_xav(
1812  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1813  & thread_loop_partitioner,
1814  const size_type size,
1815  const Number x,
1816  const Number a,
1817  const ::MemorySpace::
1818  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1821  &data)
1822  {
1823  Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1824  v_data.values.get(),
1825  a,
1826  x);
1827  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1828  }
1829 
1830  static void
1831  sadd_xavbw(
1832  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1833  & thread_loop_partitioner,
1834  const size_type size,
1835  const Number x,
1836  const Number a,
1837  const Number b,
1838  const ::MemorySpace::
1839  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1840  const ::MemorySpace::
1841  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1844  &data)
1845  {
1846  Vectorization_sadd_xavbw<Number> vector_sadd(
1847  data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
1848  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1849  }
1850 
1851  static void
1852  multiply_factor(
1853  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1854  & thread_loop_partitioner,
1855  const size_type size,
1856  const Number factor,
1859  &data)
1860  {
1861  Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1862  factor);
1863  parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1864  }
1865 
1866  static void
1867  scale(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1868  & thread_loop_partitioner,
1869  const size_type size,
1870  const ::MemorySpace::
1871  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1874  &data)
1875  {
1876  Vectorization_scale<Number> vector_scale(data.values.get(),
1877  v_data.values.get());
1878  parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1879  }
1880 
1881  static void
1882  equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1883  & thread_loop_partitioner,
1884  const size_type size,
1885  const Number a,
1886  const ::MemorySpace::
1887  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1890  &data)
1891  {
1892  Vectorization_equ_au<Number> vector_equ(data.values.get(),
1893  v_data.values.get(),
1894  a);
1895  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1896  }
1897 
1898  static void
1899  equ_aubv(
1900  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1901  & thread_loop_partitioner,
1902  const size_type size,
1903  const Number a,
1904  const Number b,
1905  const ::MemorySpace::
1906  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1907  const ::MemorySpace::
1908  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1911  &data)
1912  {
1913  Vectorization_equ_aubv<Number> vector_equ(
1914  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1915  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1916  }
1917 
1918  static Number
1919  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1920  & thread_loop_partitioner,
1921  const size_type size,
1922  const ::MemorySpace::
1923  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1926  &data)
1927  {
1928  Number sum;
1929  ::internal::VectorOperations::Dot<Number, Number2> dot(
1930  data.values.get(), v_data.values.get());
1931  ::internal::VectorOperations::parallel_reduce(
1932  dot, 0, size, sum, thread_loop_partitioner);
1933  AssertIsFinite(sum);
1934 
1935  return sum;
1936  }
1937 
1938  template <typename real_type>
1939  static void
1940  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1941  & thread_loop_partitioner,
1942  const size_type size,
1943  real_type & sum,
1946  &data)
1947  {
1948  Norm2<Number, real_type> norm2(data.values.get());
1949  parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1950  }
1951 
1952  static Number
1953  mean_value(
1954  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1955  & thread_loop_partitioner,
1956  const size_type size,
1957  const ::MemorySpace::
1958  MemorySpaceData<Number, ::MemorySpace::Host> &data)
1959  {
1960  Number sum;
1961  MeanValue<Number> mean(data.values.get());
1962  parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1963 
1964  return sum;
1965  }
1966 
1967  template <typename real_type>
1968  static void
1969  norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1970  & thread_loop_partitioner,
1971  const size_type size,
1972  real_type & sum,
1975  &data)
1976  {
1977  Norm1<Number, real_type> norm1(data.values.get());
1978  parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1979  }
1980 
1981  template <typename real_type>
1982  static void
1983  norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1984  & thread_loop_partitioner,
1985  const size_type size,
1986  real_type & sum,
1987  const real_type p,
1990  &data)
1991  {
1992  NormP<Number, real_type> normp(data.values.get(), p);
1993  parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1994  }
1995 
1996  static Number
1997  add_and_dot(
1998  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1999  & thread_loop_partitioner,
2000  const size_type size,
2001  const Number a,
2002  const ::MemorySpace::
2003  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
2004  const ::MemorySpace::
2005  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
2008  &data)
2009  {
2010  Number sum;
2011  AddAndDot<Number> adder(data.values.get(),
2012  v_data.values.get(),
2013  w_data.values.get(),
2014  a);
2015  parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
2016 
2017  return sum;
2018  }
2019 
2020  template <typename MemorySpace2>
2021  static void
2022  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2023  & thread_loop_partitioner,
2024  const size_type size,
2025  VectorOperation::values operation,
2026  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2027  &v_data,
2030  &data,
2031  typename std::enable_if<
2032  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2033  int>::type = 0)
2034  {
2035  if (operation == VectorOperation::insert)
2036  {
2037  copy(thread_loop_partitioner, size, v_data, data);
2038  }
2039  else if (operation == VectorOperation::add)
2040  {
2041  add_vector(thread_loop_partitioner, size, v_data, data);
2042  }
2043  else
2044  {
2045  AssertThrow(false, ExcNotImplemented());
2046  }
2047  }
2048 
2049 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2050  template <typename MemorySpace2>
2051  static void
2052  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2053  & /*thread_loop_partitioner*/,
2054  const size_type size,
2055  VectorOperation::values operation,
2056  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2057  &v_data,
2060  &data,
2061  typename std::enable_if<
2062  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2063  int>::type = 0)
2064  {
2065  if (operation == VectorOperation::insert)
2066  {
2067  cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
2068  v_data.values_dev.get(),
2069  size * sizeof(Number),
2070  cudaMemcpyDeviceToHost);
2071  AssertCuda(cuda_error_code);
2072  }
2073  else
2074  {
2075  AssertThrow(false, ExcNotImplemented());
2076  }
2077  }
2078 #endif
2079  };
2080 
2081 
2082 
2083 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2084  template <typename Number>
2085  struct functions<Number, Number, ::MemorySpace::CUDA>
2086  {
2087  static const int block_size =
2088  ::LinearAlgebra::CUDAWrappers::kernel::block_size;
2089  static const int chunk_size =
2090  ::LinearAlgebra::CUDAWrappers::kernel::chunk_size;
2091 
2092  static void
2093  copy(
2094  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2095  const size_type size,
2096  const ::MemorySpace::
2097  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2100  &data)
2101  {
2102  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2103  v_data.values_dev.get(),
2104  size * sizeof(Number),
2105  cudaMemcpyDeviceToDevice);
2106  AssertCuda(cuda_error_code);
2107  }
2108 
2109  static void
2110  set(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2111  const size_type size,
2112  const Number s,
2115  &data)
2116  {
2117  const int n_blocks = 1 + size / (chunk_size * block_size);
2118  ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2119  <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
2120 
2121 # ifdef DEBUG
2122  // Check that the kernel was launched correctly
2123  AssertCuda(cudaGetLastError());
2124  // Check that there was no problem during the execution of the kernel
2125  AssertCuda(cudaDeviceSynchronize());
2126 # endif
2127  }
2128 
2129  static void
2130  add_vector(
2131  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2132  const size_type size,
2133  const ::MemorySpace::
2134  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2137  &data)
2138  {
2139  const int n_blocks = 1 + size / (chunk_size * block_size);
2140  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2141  <<<n_blocks, block_size>>>(data.values_dev.get(),
2142  1.,
2143  v_data.values_dev.get(),
2144  size);
2145 
2146 # ifdef DEBUG
2147  // Check that the kernel was launched correctly
2148  AssertCuda(cudaGetLastError());
2149  // Check that there was no problem during the execution of the kernel
2150  AssertCuda(cudaDeviceSynchronize());
2151 # endif
2152  }
2153 
2154  static void
2155  subtract_vector(
2156  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2157  const size_type size,
2158  const ::MemorySpace::
2159  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2162  &data)
2163  {
2164  const int n_blocks = 1 + size / (chunk_size * block_size);
2165  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2166  <<<n_blocks, block_size>>>(data.values_dev.get(),
2167  -1.,
2168  v_data.values_dev.get(),
2169  size);
2170 
2171 # ifdef DEBUG
2172  // Check that the kernel was launched correctly
2173  AssertCuda(cudaGetLastError());
2174  // Check that there was no problem during the execution of the kernel
2175  AssertCuda(cudaDeviceSynchronize());
2176 # endif
2177  }
2178 
2179  static void
2180  add_factor(
2181  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2182  const size_type size,
2183  Number a,
2186  &data)
2187  {
2188  const int n_blocks = 1 + size / (chunk_size * block_size);
2189  ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2190  <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
2191 
2192 # ifdef DEBUG
2193  // Check that the kernel was launched correctly
2194  AssertCuda(cudaGetLastError());
2195  // Check that there was no problem during the execution of the kernel
2196  AssertCuda(cudaDeviceSynchronize());
2197 # endif
2198  }
2199 
2200  static void
2201  add_av(
2202  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2203  const size_type size,
2204  const Number a,
2205  const ::MemorySpace::
2206  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2209  &data)
2210  {
2211  const int n_blocks = 1 + size / (chunk_size * block_size);
2212  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2213  <<<n_blocks, block_size>>>(data.values_dev.get(),
2214  a,
2215  v_data.values_dev.get(),
2216  size);
2217 
2218 # ifdef DEBUG
2219  // Check that the kernel was launched correctly
2220  AssertCuda(cudaGetLastError());
2221  // Check that there was no problem during the execution of the kernel
2222  AssertCuda(cudaDeviceSynchronize());
2223 # endif
2224  }
2225 
2226  static void
2227  add_avpbw(
2228  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2229  const size_type size,
2230  const Number a,
2231  const Number b,
2232  const ::MemorySpace::
2233  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2234  const ::MemorySpace::
2235  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2238  &data)
2239  {
2240  const int n_blocks = 1 + size / (chunk_size * block_size);
2241  ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2242  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2243  a,
2244  v_data.values_dev.get(),
2245  b,
2246  w_data.values_dev.get(),
2247  size);
2248 
2249 # ifdef DEBUG
2250  // Check that the kernel was launched correctly
2251  AssertCuda(cudaGetLastError());
2252  // Check that there was no problem during the execution of the kernel
2253  AssertCuda(cudaDeviceSynchronize());
2254 # endif
2255  }
2256 
2257  static void
2258  sadd_xv(
2259  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2260  const size_type size,
2261  const Number x,
2262  const ::MemorySpace::
2263  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2266  &data)
2267  {
2268  const int n_blocks = 1 + size / (chunk_size * block_size);
2269  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2270  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2271  x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2272 
2273 # ifdef DEBUG
2274  // Check that the kernel was launched correctly
2275  AssertCuda(cudaGetLastError());
2276  // Check that there was no problem during the execution of the kernel
2277  AssertCuda(cudaDeviceSynchronize());
2278 # endif
2279  }
2280 
2281  static void
2282  sadd_xav(
2283  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2284  const size_type size,
2285  const Number x,
2286  const Number a,
2287  const ::MemorySpace::
2288  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2291  &data)
2292  {
2293  const int n_blocks = 1 + size / (chunk_size * block_size);
2294  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2295  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2296  x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2297 
2298 # ifdef DEBUG
2299  // Check that the kernel was launched correctly
2300  AssertCuda(cudaGetLastError());
2301  // Check that there was no problem during the execution of the kernel
2302  AssertCuda(cudaDeviceSynchronize());
2303 # endif
2304  }
2305 
2306  static void
2307  sadd_xavbw(
2308  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2309  const size_type size,
2310  const Number x,
2311  const Number a,
2312  const Number b,
2313  const ::MemorySpace::
2314  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2315  const ::MemorySpace::
2316  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2319  &data)
2320  {
2321  const int n_blocks = 1 + size / (chunk_size * block_size);
2322  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2323  <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2324  data.values_dev.get(),
2325  a,
2326  v_data.values_dev.get(),
2327  b,
2328  w_data.values_dev.get(),
2329  size);
2330 
2331 # ifdef DEBUG
2332  // Check that the kernel was launched correctly
2333  AssertCuda(cudaGetLastError());
2334  // Check that there was no problem during the execution of the kernel
2335  AssertCuda(cudaDeviceSynchronize());
2336 # endif
2337  }
2338 
2339  static void
2340  multiply_factor(
2341  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2342  const size_type size,
2343  const Number factor,
2346  &data)
2347  {
2348  const int n_blocks = 1 + size / (chunk_size * block_size);
2349  ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2350  <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
2351 
2352 # ifdef DEBUG
2353  // Check that the kernel was launched correctly
2354  AssertCuda(cudaGetLastError());
2355  // Check that there was no problem during the execution of the kernel
2356  AssertCuda(cudaDeviceSynchronize());
2357 # endif
2358  }
2359 
2360  static void
2361  scale(
2362  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2363  const size_type size,
2364  const ::MemorySpace::
2365  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2368  &data)
2369  {
2370  const int n_blocks = 1 + size / (chunk_size * block_size);
2371  ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2372  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2373  v_data.values_dev.get(),
2374  size);
2375 
2376 # ifdef DEBUG
2377  // Check that the kernel was launched correctly
2378  AssertCuda(cudaGetLastError());
2379  // Check that there was no problem during the execution of the kernel
2380  AssertCuda(cudaDeviceSynchronize());
2381 # endif
2382  }
2383 
2384  static void
2385  equ_au(
2386  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2387  const size_type size,
2388  const Number a,
2389  const ::MemorySpace::
2390  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2393  &data)
2394  {
2395  const int n_blocks = 1 + size / (chunk_size * block_size);
2396  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2397  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2398  a,
2399  v_data.values_dev.get(),
2400  size);
2401 
2402 # ifdef DEBUG
2403  // Check that the kernel was launched correctly
2404  AssertCuda(cudaGetLastError());
2405  // Check that there was no problem during the execution of the kernel
2406  AssertCuda(cudaDeviceSynchronize());
2407 # endif
2408  }
2409 
2410  static void
2411  equ_aubv(
2412  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2413  const size_type size,
2414  const Number a,
2415  const Number b,
2416  const ::MemorySpace::
2417  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2418  const ::MemorySpace::
2419  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2422  &data)
2423  {
2424  const int n_blocks = 1 + size / (chunk_size * block_size);
2425  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2426  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2427  a,
2428  v_data.values_dev.get(),
2429  b,
2430  w_data.values_dev.get(),
2431  size);
2432 
2433 # ifdef DEBUG
2434  // Check that the kernel was launched correctly
2435  AssertCuda(cudaGetLastError());
2436  // Check that there was no problem during the execution of the kernel
2437  AssertCuda(cudaDeviceSynchronize());
2438 # endif
2439  }
2440 
2441  static Number
2442  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2443  const size_type size,
2444  const ::MemorySpace::
2445  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2448  &data)
2449  {
2450  Number * result_device;
2451  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2452  AssertCuda(error_code);
2453  error_code = cudaMemset(result_device, 0, sizeof(Number));
2454  AssertCuda(error_code);
2455 
2456  const int n_blocks = 1 + size / (chunk_size * block_size);
2458  Number,
2460  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2461  data.values_dev.get(),
2462  v_data.values_dev.get(),
2463  static_cast<unsigned int>(
2464  size));
2465 
2466 # ifdef DEBUG
2467  // Check that the kernel was launched correctly
2468  AssertCuda(cudaGetLastError());
2469  // Check that there was no problem during the execution of the kernel
2470  AssertCuda(cudaDeviceSynchronize());
2471 # endif
2472 
2473  // Copy the result back to the host
2474  Number result;
2475  error_code = cudaMemcpy(&result,
2476  result_device,
2477  sizeof(Number),
2478  cudaMemcpyDeviceToHost);
2479  AssertCuda(error_code);
2480  // Free the memory on the device
2481  error_code = cudaFree(result_device);
2482  AssertCuda(error_code);
2483 
2484  AssertIsFinite(result);
2485 
2486  return result;
2487  }
2488 
2489  template <typename real_type>
2490  static void
2491  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2492  & thread_loop_partitioner,
2493  const size_type size,
2494  real_type & sum,
2497  &data)
2498  {
2499  sum = dot(thread_loop_partitioner, size, data, data);
2500  }
2501 
2502  static Number
2503  mean_value(
2504  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2505  const size_type size,
2506  const ::MemorySpace::
2507  MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2508  {
2509  Number * result_device;
2510  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2511  AssertCuda(error_code);
2512  error_code = cudaMemset(result_device, 0, sizeof(Number));
2513 
2514  const int n_blocks = 1 + size / (chunk_size * block_size);
2516  Number,
2518  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2519  data.values_dev.get(),
2520  size);
2521 
2522  // Copy the result back to the host
2523  Number result;
2524  error_code = cudaMemcpy(&result,
2525  result_device,
2526  sizeof(Number),
2527  cudaMemcpyDeviceToHost);
2528  AssertCuda(error_code);
2529  // Free the memory on the device
2530  error_code = cudaFree(result_device);
2531  AssertCuda(error_code);
2532 
2533  return result;
2534  }
2535 
2536  template <typename real_type>
2537  static void
2538  norm_1(
2539  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2540  const size_type size,
2541  real_type & sum,
2544  &data)
2545  {
2546  Number * result_device;
2547  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2548  AssertCuda(error_code);
2549  error_code = cudaMemset(result_device, 0, sizeof(Number));
2550 
2551  const int n_blocks = 1 + size / (chunk_size * block_size);
2553  Number,
2555  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2556  data.values_dev.get(),
2557  size);
2558 
2559  // Copy the result back to the host
2560  error_code = cudaMemcpy(&sum,
2561  result_device,
2562  sizeof(Number),
2563  cudaMemcpyDeviceToHost);
2564  AssertCuda(error_code);
2565  // Free the memory on the device
2566  error_code = cudaFree(result_device);
2567  AssertCuda(error_code);
2568  }
2569 
2570  template <typename real_type>
2571  static void
2572  norm_p(
2573  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2574  const size_type,
2575  real_type &,
2576  real_type,
2578  ::MemorySpace::CUDA> &)
2579  {
2580  Assert(false, ExcNotImplemented());
2581  }
2582 
2583  static Number
2584  add_and_dot(
2585  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2586  const size_type size,
2587  const Number a,
2588  const ::MemorySpace::
2589  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2590  const ::MemorySpace::
2591  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2594  &data)
2595  {
2596  Number * res_d;
2597  cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
2598  AssertCuda(error_code);
2599  error_code = cudaMemset(res_d, 0, sizeof(Number));
2600  AssertCuda(error_code);
2601 
2602  const int n_blocks = 1 + size / (chunk_size * block_size);
2603  ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2604  <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2605  data.values_dev.get(),
2606  v_data.values_dev.get(),
2607  w_data.values_dev.get(),
2608  a,
2609  size);
2610 
2611  Number res;
2612  error_code =
2613  cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
2614  AssertCuda(error_code);
2615  error_code = cudaFree(res_d);
2616 
2617  return res;
2618  }
2619 
2620  template <typename MemorySpace2>
2621  static void
2622  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2623  & thread_loop_partitioner,
2624  const size_type size,
2625  VectorOperation::values operation,
2626  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2627  &v_data,
2630  &data,
2631  typename std::enable_if<
2632  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2633  int>::type = 0)
2634  {
2635  if (operation == VectorOperation::insert)
2636  {
2637  copy(thread_loop_partitioner, size, v_data, data);
2638  }
2639  else if (operation == VectorOperation::add)
2640  {
2641  add_vector(thread_loop_partitioner, size, v_data, data);
2642  }
2643  else
2644  {
2645  AssertThrow(false, ExcNotImplemented());
2646  }
2647  }
2648 
2649  template <typename MemorySpace2>
2650  static void
2651  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2652  & /*thread_loop_partitioner*/,
2653  const size_type size,
2654  VectorOperation::values operation,
2655  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2656  &v_data,
2659  &data,
2660  typename std::enable_if<
2661  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2662  int>::type = 0)
2663  {
2664  if (operation == VectorOperation::insert)
2665  {
2666  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2667  v_data.values.get(),
2668  size * sizeof(Number),
2669  cudaMemcpyHostToDevice);
2670  AssertCuda(cuda_error_code);
2671  }
2672  else
2673  {
2674  AssertThrow(false, ExcNotImplemented());
2675  }
2676  }
2677  };
2678 #endif
2679  } // namespace VectorOperations
2680 } // namespace internal
2681 
2682 DEAL_II_NAMESPACE_CLOSE
2683 
2684 #endif
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1571
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
void parallel_for(Iterator x_begin, Iterator x_end, const Functor &functor, const unsigned int grainsize)
Definition: parallel.h:156
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:605
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
__global__ void add_and_dot(Number *res, Number *v1, const Number *v2, const Number *v3, const Number a, const size_type N)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1523
static real_type abs(const number &x)
Definition: numbers.h:627
void store(Number *ptr) const
__global__ void scale(Number *val, const Number *V_val, const size_type N)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1411
#define AssertCuda(error_code)
Definition: exceptions.h:1726
void load(const Number *ptr)
unsigned int global_dof_index
Definition: types.h:89
static ::ExceptionBase & ExcNotImplemented()
static unsigned int n_threads()
#define AssertIsFinite(number)
Definition: exceptions.h:1673
static ::ExceptionBase & ExcInternalError()