Reference documentation for deal.II version Git 1d6f7faded 2020-04-08 17:58:29 +0200
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vector_operations_internal.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vector_operations_internal_h
18 #define dealii_vector_operations_internal_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/memory_space.h>
23 #include <deal.II/base/multithread_info.h>
24 #include <deal.II/base/parallel.h>
25 #include <deal.II/base/types.h>
26 #include <deal.II/base/vectorization.h>
27 
28 #include <deal.II/lac/cuda_kernels.h>
29 #include <deal.II/lac/cuda_kernels.templates.h>
30 #include <deal.II/lac/vector_operation.h>
31 
32 #include <cstdio>
33 #include <cstring>
34 
35 DEAL_II_NAMESPACE_OPEN
36 
37 namespace internal
38 {
39  namespace VectorOperations
40  {
41  using size_type = types::global_dof_index;
42 
43  template <typename T>
44  bool
45  is_non_negative(const T &t)
46  {
47  return t >= 0;
48  }
49 
50 
51  template <typename T>
52  bool
53  is_non_negative(const std::complex<T> &)
54  {
55  Assert(false, ExcMessage("Complex numbers do not have an ordering."));
56 
57  return false;
58  }
59 
60 
61  template <typename T>
62  void
63  print(const T &t, const char *format)
64  {
65  if (format != nullptr)
66  std::printf(format, t);
67  else
68  std::printf(" %5.2f", double(t));
69  }
70 
71 
72 
73  template <typename T>
74  void
75  print(const std::complex<T> &t, const char *format)
76  {
77  if (format != nullptr)
78  std::printf(format, t.real(), t.imag());
79  else
80  std::printf(" %5.2f+%5.2fi", double(t.real()), double(t.imag()));
81  }
82 
83  // call std::copy, except for in
84  // the case where we want to copy
85  // from std::complex to a
86  // non-complex type
87  template <typename T, typename U>
88  void
89  copy(const T *begin, const T *end, U *dest)
90  {
91  std::copy(begin, end, dest);
92  }
93 
94  template <typename T, typename U>
95  void
96  copy(const std::complex<T> *begin,
97  const std::complex<T> *end,
98  std::complex<U> * dest)
99  {
100  std::copy(begin, end, dest);
101  }
102 
103  template <typename T, typename U>
104  void
105  copy(const std::complex<T> *, const std::complex<T> *, U *)
106  {
107  Assert(false,
108  ExcMessage("Can't convert a vector of complex numbers "
109  "into a vector of reals/doubles"));
110  }
111 
112 
113 
114 #ifdef DEAL_II_WITH_THREADS
115 
123  template <typename Functor>
125  {
126  TBBForFunctor(Functor & functor,
127  const size_type start,
128  const size_type end)
129  : functor(functor)
130  , start(start)
131  , end(end)
132  {
133  const size_type vec_size = end - start;
134  // set chunk size for sub-tasks
135  const unsigned int gs =
136  internal::VectorImplementation::minimum_parallel_grain_size;
137  n_chunks =
138  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
139  vec_size / gs);
140  chunk_size = vec_size / n_chunks;
141 
142  // round to next multiple of 512 (or minimum grain size if that happens
143  // to be smaller). this is advantageous because our accumulation
144  // algorithms favor lengths of a power of 2 due to pairwise summation ->
145  // at most one 'oddly' sized chunk
146  if (chunk_size > 512)
147  chunk_size = ((chunk_size + 511) / 512) * 512;
148  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
149  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
150  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
151  }
152 
153  void
154  operator()(const tbb::blocked_range<size_type> &range) const
155  {
156  const size_type r_begin = start + range.begin() * chunk_size;
157  const size_type r_end = std::min(start + range.end() * chunk_size, end);
158  functor(r_begin, r_end);
159  }
160 
161  Functor & functor;
162  const size_type start;
163  const size_type end;
164  unsigned int n_chunks;
165  size_type chunk_size;
166  };
167 #endif
168 
169  template <typename Functor>
170  void
171  parallel_for(
172  Functor & functor,
173  const size_type start,
174  const size_type end,
175  const std::shared_ptr<::parallel::internal::TBBPartitioner>
176  &partitioner)
177  {
178 #ifdef DEAL_II_WITH_THREADS
179  const size_type vec_size = end - start;
180  // only go to the parallel function in case there are at least 4 parallel
181  // items, otherwise the overhead is too large
182  if (vec_size >=
183  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
185  {
186  Assert(partitioner.get() != nullptr,
188  "Unexpected initialization of Vector that does "
189  "not set the TBB partitioner to a usable state."));
190  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
191  partitioner->acquire_one_partitioner();
192 
193  TBBForFunctor<Functor> generic_functor(functor, start, end);
194  // We use a minimum grain size of 1 here since the grains at this
195  // stage of dividing the work refer to the number of vector chunks
196  // that are processed by (possibly different) threads in the
197  // parallelized for loop (i.e., they do not refer to individual
198  // vector entries). The number of chunks here is calculated inside
199  // TBBForFunctor. See also GitHub issue #2496 for further discussion
200  // of this strategy.
201  ::parallel::internal::parallel_for(
202  static_cast<size_type>(0),
203  static_cast<size_type>(generic_functor.n_chunks),
204  generic_functor,
205  1,
206  tbb_partitioner);
207  partitioner->release_one_partitioner(tbb_partitioner);
208  }
209  else if (vec_size > 0)
210  functor(start, end);
211 #else
212  functor(start, end);
213  (void)partitioner;
214 #endif
215  }
216 
217 
218  // Define the functors necessary to use SIMD with TBB. we also include the
219  // simple copy and set operations
220 
221  template <typename Number>
222  struct Vector_set
223  {
224  Vector_set(const Number value, Number *const dst)
225  : value(value)
226  , dst(dst)
227  {
228  Assert(dst != nullptr, ExcInternalError());
229  }
230 
231  void
232  operator()(const size_type begin, const size_type end) const
233  {
234  Assert(end >= begin, ExcInternalError());
235 
236  if (value == Number())
237  {
238 #ifdef DEAL_II_WITH_CXX17
239  if constexpr (std::is_trivial<Number>::value)
240 #else
241  if (std::is_trivial<Number>::value)
242 #endif
243  {
244  std::memset(dst + begin, 0, sizeof(Number) * (end - begin));
245  return;
246  }
247  }
248  std::fill(dst + begin, dst + end, value);
249  }
250 
251  const Number value;
252  Number *const dst;
253  };
254 
255  template <typename Number, typename OtherNumber>
256  struct Vector_copy
257  {
258  Vector_copy(const OtherNumber *const src, Number *const dst)
259  : src(src)
260  , dst(dst)
261  {
262  Assert(src != nullptr, ExcInternalError());
263  Assert(dst != nullptr, ExcInternalError());
264  }
265 
266  void
267  operator()(const size_type begin, const size_type end) const
268  {
269  Assert(end >= begin, ExcInternalError());
270 
271 #if __GNUG__ && __GNUC__ < 5
272  if (__has_trivial_copy(Number) &&
273  std::is_same<Number, OtherNumber>::value)
274 #else
275 # ifdef DEAL_II_WITH_CXX17
276  if constexpr (std::is_trivially_copyable<Number>() &&
277  std::is_same<Number, OtherNumber>::value)
278 # else
279  if (std::is_trivially_copyable<Number>() &&
280  std::is_same<Number, OtherNumber>::value)
281 # endif
282 #endif
283  std::memcpy(dst + begin, src + begin, (end - begin) * sizeof(Number));
284  else
285  {
286  DEAL_II_OPENMP_SIMD_PRAGMA
287  for (size_type i = begin; i < end; ++i)
288  dst[i] = src[i];
289  }
290  }
291 
292  const OtherNumber *const src;
293  Number *const dst;
294  };
295 
296  template <typename Number>
297  struct Vectorization_multiply_factor
298  {
299  Vectorization_multiply_factor(Number *const val, const Number factor)
300  : val(val)
301  , factor(factor)
302  {}
303 
304  void
305  operator()(const size_type begin, const size_type end) const
306  {
308  {
309  DEAL_II_OPENMP_SIMD_PRAGMA
310  for (size_type i = begin; i < end; ++i)
311  val[i] *= factor;
312  }
313  else
314  {
315  for (size_type i = begin; i < end; ++i)
316  val[i] *= factor;
317  }
318  }
319 
320  Number *const val;
321  const Number factor;
322  };
323 
324  template <typename Number>
325  struct Vectorization_add_av
326  {
327  Vectorization_add_av(Number *const val,
328  const Number *const v_val,
329  const Number factor)
330  : val(val)
331  , v_val(v_val)
332  , factor(factor)
333  {}
334 
335  void
336  operator()(const size_type begin, const size_type end) const
337  {
339  {
340  DEAL_II_OPENMP_SIMD_PRAGMA
341  for (size_type i = begin; i < end; ++i)
342  val[i] += factor * v_val[i];
343  }
344  else
345  {
346  for (size_type i = begin; i < end; ++i)
347  val[i] += factor * v_val[i];
348  }
349  }
350 
351  Number *const val;
352  const Number *const v_val;
353  const Number factor;
354  };
355 
356  template <typename Number>
357  struct Vectorization_sadd_xav
358  {
359  Vectorization_sadd_xav(Number * val,
360  const Number *const v_val,
361  const Number a,
362  const Number x)
363  : val(val)
364  , v_val(v_val)
365  , a(a)
366  , x(x)
367  {}
368 
369  void
370  operator()(const size_type begin, const size_type end) const
371  {
373  {
374  DEAL_II_OPENMP_SIMD_PRAGMA
375  for (size_type i = begin; i < end; ++i)
376  val[i] = x * val[i] + a * v_val[i];
377  }
378  else
379  {
380  for (size_type i = begin; i < end; ++i)
381  val[i] = x * val[i] + a * v_val[i];
382  }
383  }
384 
385  Number *const val;
386  const Number *const v_val;
387  const Number a;
388  const Number x;
389  };
390 
391  template <typename Number>
392  struct Vectorization_subtract_v
393  {
394  Vectorization_subtract_v(Number *val, const Number *const v_val)
395  : val(val)
396  , v_val(v_val)
397  {}
398 
399  void
400  operator()(const size_type begin, const size_type end) const
401  {
403  {
404  DEAL_II_OPENMP_SIMD_PRAGMA
405  for (size_type i = begin; i < end; ++i)
406  val[i] -= v_val[i];
407  }
408  else
409  {
410  for (size_type i = begin; i < end; ++i)
411  val[i] -= v_val[i];
412  }
413  }
414 
415  Number *const val;
416  const Number *const v_val;
417  };
418 
419  template <typename Number>
420  struct Vectorization_add_factor
421  {
422  Vectorization_add_factor(Number *const val, const Number factor)
423  : val(val)
424  , factor(factor)
425  {}
426 
427  void
428  operator()(const size_type begin, const size_type end) const
429  {
431  {
432  DEAL_II_OPENMP_SIMD_PRAGMA
433  for (size_type i = begin; i < end; ++i)
434  val[i] += factor;
435  }
436  else
437  {
438  for (size_type i = begin; i < end; ++i)
439  val[i] += factor;
440  }
441  }
442 
443  Number *const val;
444  const Number factor;
445  };
446 
447  template <typename Number>
448  struct Vectorization_add_v
449  {
450  Vectorization_add_v(Number *const val, const Number *const v_val)
451  : val(val)
452  , v_val(v_val)
453  {}
454 
455  void
456  operator()(const size_type begin, const size_type end) const
457  {
459  {
460  DEAL_II_OPENMP_SIMD_PRAGMA
461  for (size_type i = begin; i < end; ++i)
462  val[i] += v_val[i];
463  }
464  else
465  {
466  for (size_type i = begin; i < end; ++i)
467  val[i] += v_val[i];
468  }
469  }
470 
471  Number *const val;
472  const Number *const v_val;
473  };
474 
475  template <typename Number>
476  struct Vectorization_add_avpbw
477  {
478  Vectorization_add_avpbw(Number *const val,
479  const Number *const v_val,
480  const Number *const w_val,
481  const Number a,
482  const Number b)
483  : val(val)
484  , v_val(v_val)
485  , w_val(w_val)
486  , a(a)
487  , b(b)
488  {}
489 
490  void
491  operator()(const size_type begin, const size_type end) const
492  {
494  {
495  DEAL_II_OPENMP_SIMD_PRAGMA
496  for (size_type i = begin; i < end; ++i)
497  val[i] = val[i] + a * v_val[i] + b * w_val[i];
498  }
499  else
500  {
501  for (size_type i = begin; i < end; ++i)
502  val[i] = val[i] + a * v_val[i] + b * w_val[i];
503  }
504  }
505 
506  Number *const val;
507  const Number *const v_val;
508  const Number *const w_val;
509  const Number a;
510  const Number b;
511  };
512 
513  template <typename Number>
514  struct Vectorization_sadd_xv
515  {
516  Vectorization_sadd_xv(Number *const val,
517  const Number *const v_val,
518  const Number x)
519  : val(val)
520  , v_val(v_val)
521  , x(x)
522  {}
523 
524  void
525  operator()(const size_type begin, const size_type end) const
526  {
528  {
529  DEAL_II_OPENMP_SIMD_PRAGMA
530  for (size_type i = begin; i < end; ++i)
531  val[i] = x * val[i] + v_val[i];
532  }
533  else
534  {
535  for (size_type i = begin; i < end; ++i)
536  val[i] = x * val[i] + v_val[i];
537  }
538  }
539 
540  Number *const val;
541  const Number *const v_val;
542  const Number x;
543  };
544 
545  template <typename Number>
546  struct Vectorization_sadd_xavbw
547  {
548  Vectorization_sadd_xavbw(Number * val,
549  const Number *v_val,
550  const Number *w_val,
551  Number x,
552  Number a,
553  Number b)
554  : val(val)
555  , v_val(v_val)
556  , w_val(w_val)
557  , x(x)
558  , a(a)
559  , b(b)
560  {}
561 
562  void
563  operator()(const size_type begin, const size_type end) const
564  {
566  {
567  DEAL_II_OPENMP_SIMD_PRAGMA
568  for (size_type i = begin; i < end; ++i)
569  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
570  }
571  else
572  {
573  for (size_type i = begin; i < end; ++i)
574  val[i] = x * val[i] + a * v_val[i] + b * w_val[i];
575  }
576  }
577 
578  Number *const val;
579  const Number *const v_val;
580  const Number *const w_val;
581  const Number x;
582  const Number a;
583  const Number b;
584  };
585 
586  template <typename Number>
587  struct Vectorization_scale
588  {
589  Vectorization_scale(Number *const val, const Number *const v_val)
590  : val(val)
591  , v_val(v_val)
592  {}
593 
594  void
595  operator()(const size_type begin, const size_type end) const
596  {
598  {
599  DEAL_II_OPENMP_SIMD_PRAGMA
600  for (size_type i = begin; i < end; ++i)
601  val[i] *= v_val[i];
602  }
603  else
604  {
605  for (size_type i = begin; i < end; ++i)
606  val[i] *= v_val[i];
607  }
608  }
609 
610  Number *const val;
611  const Number *const v_val;
612  };
613 
614  template <typename Number>
615  struct Vectorization_equ_au
616  {
617  Vectorization_equ_au(Number *const val,
618  const Number *const u_val,
619  const Number a)
620  : val(val)
621  , u_val(u_val)
622  , a(a)
623  {}
624 
625  void
626  operator()(const size_type begin, const size_type end) const
627  {
629  {
630  DEAL_II_OPENMP_SIMD_PRAGMA
631  for (size_type i = begin; i < end; ++i)
632  val[i] = a * u_val[i];
633  }
634  else
635  {
636  for (size_type i = begin; i < end; ++i)
637  val[i] = a * u_val[i];
638  }
639  }
640 
641  Number *const val;
642  const Number *const u_val;
643  const Number a;
644  };
645 
646  template <typename Number>
647  struct Vectorization_equ_aubv
648  {
649  Vectorization_equ_aubv(Number *const val,
650  const Number *const u_val,
651  const Number *const v_val,
652  const Number a,
653  const Number b)
654  : val(val)
655  , u_val(u_val)
656  , v_val(v_val)
657  , a(a)
658  , b(b)
659  {}
660 
661  void
662  operator()(const size_type begin, const size_type end) const
663  {
665  {
666  DEAL_II_OPENMP_SIMD_PRAGMA
667  for (size_type i = begin; i < end; ++i)
668  val[i] = a * u_val[i] + b * v_val[i];
669  }
670  else
671  {
672  for (size_type i = begin; i < end; ++i)
673  val[i] = a * u_val[i] + b * v_val[i];
674  }
675  }
676 
677  Number *const val;
678  const Number *const u_val;
679  const Number *const v_val;
680  const Number a;
681  const Number b;
682  };
683 
684  template <typename Number>
685  struct Vectorization_equ_aubvcw
686  {
687  Vectorization_equ_aubvcw(Number * val,
688  const Number *u_val,
689  const Number *v_val,
690  const Number *w_val,
691  const Number a,
692  const Number b,
693  const Number c)
694  : val(val)
695  , u_val(u_val)
696  , v_val(v_val)
697  , w_val(w_val)
698  , a(a)
699  , b(b)
700  , c(c)
701  {}
702 
703  void
704  operator()(const size_type begin, const size_type end) const
705  {
707  {
708  DEAL_II_OPENMP_SIMD_PRAGMA
709  for (size_type i = begin; i < end; ++i)
710  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
711  }
712  else
713  {
714  for (size_type i = begin; i < end; ++i)
715  val[i] = a * u_val[i] + b * v_val[i] + c * w_val[i];
716  }
717  }
718 
719  Number *const val;
720  const Number *const u_val;
721  const Number *const v_val;
722  const Number *const w_val;
723  const Number a;
724  const Number b;
725  const Number c;
726  };
727 
728  template <typename Number>
729  struct Vectorization_ratio
730  {
731  Vectorization_ratio(Number *val, const Number *a_val, const Number *b_val)
732  : val(val)
733  , a_val(a_val)
734  , b_val(b_val)
735  {}
736 
737  void
738  operator()(const size_type begin, const size_type end) const
739  {
741  {
742  DEAL_II_OPENMP_SIMD_PRAGMA
743  for (size_type i = begin; i < end; ++i)
744  val[i] = a_val[i] / b_val[i];
745  }
746  else
747  {
748  for (size_type i = begin; i < end; ++i)
749  val[i] = a_val[i] / b_val[i];
750  }
751  }
752 
753  Number *const val;
754  const Number *const a_val;
755  const Number *const b_val;
756  };
757 
758 
759 
760  // All sums over all the vector entries (l2-norm, inner product, etc.) are
761  // performed with the same code, using a templated operation defined
762  // here. There are always two versions defined, a standard one that covers
763  // most cases and a vectorized one which is only for equal types and float
764  // and double.
765  template <typename Number, typename Number2>
766  struct Dot
767  {
768  static constexpr bool vectorizes = std::is_same<Number, Number2>::value &&
770 
771  Dot(const Number *const X, const Number2 *const Y)
772  : X(X)
773  , Y(Y)
774  {}
775 
776  Number
777  operator()(const size_type i) const
778  {
779  return X[i] * Number(numbers::NumberTraits<Number2>::conjugate(Y[i]));
780  }
781 
783  do_vectorized(const size_type i) const
784  {
786  x.load(X + i);
787  y.load(Y + i);
788 
789  // the following operation in VectorizedArray does an element-wise
790  // scalar product without taking into account complex values and
791  // the need to take the complex-conjugate of one argument. this
792  // may be a bug, but because all VectorizedArray classes only
793  // work on real scalars, it doesn't really matter very much.
794  // in any case, assert that we really don't get here for
795  // complex-valued objects
796  static_assert(numbers::NumberTraits<Number>::is_complex == false,
797  "This operation is not correctly implemented for "
798  "complex-valued objects.");
799  return x * y;
800  }
801 
802  const Number *const X;
803  const Number2 *const Y;
804  };
805 
806  template <typename Number, typename RealType>
807  struct Norm2
808  {
809  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
810 
811  Norm2(const Number *const X)
812  : X(X)
813  {}
814 
815  RealType
816  operator()(const size_type i) const
817  {
819  }
820 
822  do_vectorized(const size_type i) const
823  {
825  x.load(X + i);
826  return x * x;
827  }
828 
829  const Number *const X;
830  };
831 
832  template <typename Number, typename RealType>
833  struct Norm1
834  {
835  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
836 
837  Norm1(const Number *X)
838  : X(X)
839  {}
840 
841  RealType
842  operator()(const size_type i) const
843  {
845  }
846 
848  do_vectorized(const size_type i) const
849  {
851  x.load(X + i);
852  return std::abs(x);
853  }
854 
855  const Number *X;
856  };
857 
858  template <typename Number, typename RealType>
859  struct NormP
860  {
861  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
862 
863  NormP(const Number *X, RealType p)
864  : X(X)
865  , p(p)
866  {}
867 
868  RealType
869  operator()(const size_type i) const
870  {
871  return std::pow(numbers::NumberTraits<Number>::abs(X[i]), p);
872  }
873 
875  do_vectorized(const size_type i) const
876  {
878  x.load(X + i);
879  return std::pow(std::abs(x), p);
880  }
881 
882  const Number * X;
883  const RealType p;
884  };
885 
886  template <typename Number>
887  struct MeanValue
888  {
889  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
890 
891  MeanValue(const Number *X)
892  : X(X)
893  {}
894 
895  Number
896  operator()(const size_type i) const
897  {
898  return X[i];
899  }
900 
902  do_vectorized(const size_type i) const
903  {
905  x.load(X + i);
906  return x;
907  }
908 
909  const Number *X;
910  };
911 
912  template <typename Number>
913  struct AddAndDot
914  {
915  static const bool vectorizes = VectorizedArray<Number>::size() > 1;
916 
917  AddAndDot(Number *const X,
918  const Number *const V,
919  const Number *const W,
920  const Number a)
921  : X(X)
922  , V(V)
923  , W(W)
924  , a(a)
925  {}
926 
927  Number
928  operator()(const size_type i) const
929  {
930  X[i] += a * V[i];
931  return X[i] * Number(numbers::NumberTraits<Number>::conjugate(W[i]));
932  }
933 
935  do_vectorized(const size_type i) const
936  {
937  VectorizedArray<Number> x, w, v;
938  x.load(X + i);
939  v.load(V + i);
940  x += a * v;
941  x.store(X + i);
942  // may only load from W after storing in X because the pointers might
943  // point to the same memory
944  w.load(W + i);
945 
946  // the following operation in VectorizedArray does an element-wise
947  // scalar product without taking into account complex values and
948  // the need to take the complex-conjugate of one argument. this
949  // may be a bug, but because all VectorizedArray classes only
950  // work on real scalars, it doesn't really matter very much.
951  // in any case, assert that we really don't get here for
952  // complex-valued objects
953  static_assert(numbers::NumberTraits<Number>::is_complex == false,
954  "This operation is not correctly implemented for "
955  "complex-valued objects.");
956  return x * w;
957  }
958 
959  Number *const X;
960  const Number *const V;
961  const Number *const W;
962  const Number a;
963  };
964 
965 
966 
967  // this is the main working loop for all vector sums using the templated
968  // operation above. it accumulates the sums using a block-wise summation
969  // algorithm with post-update. this blocked algorithm has been proposed in
970  // a similar form by Castaldo, Whaley and Chronopoulos (SIAM
971  // J. Sci. Comput. 31, 1156-1174, 2008) and we use the smallest possible
972  // block size, 2. Sometimes it is referred to as pairwise summation. The
973  // worst case error made by this algorithm is on the order O(eps *
974  // log2(vec_size)), whereas a naive summation is O(eps * vec_size). Even
975  // though the Kahan summation is even more accurate with an error O(eps)
976  // by carrying along remainders not captured by the main sum, that involves
977  // additional costs which are not worthwhile. See the Wikipedia article on
978  // the Kahan summation algorithm.
979 
980  // The algorithm implemented here has the additional benefit that it is
981  // easily parallelized without changing the order of how the elements are
982  // added (floating point addition is not associative). For the same vector
983  // size and minimum_parallel_grainsize, the blocks are always the
984  // same and added pairwise.
985 
986  // The depth of recursion is controlled by the 'magic' parameter
987  // vector_accumulation_recursion_threshold: If the length is below
988  // vector_accumulation_recursion_threshold * 32 (32 is the part of code we
989  // unroll), a straight loop instead of recursion will be used. At the
990  // innermost level, eight values are added consecutively in order to better
991  // balance multiplications and additions.
992 
993  // Loops are unrolled as follows: the range [first,last) is broken into
994  // @p n_chunks each of size 32 plus the @p remainder.
995  // accumulate_regular() does the work on 32*n_chunks elements employing SIMD
996  // if possible and stores the result of the operation for each chunk in @p outer_results.
997 
998  // The code returns the result as the last argument in order to make
999  // spawning tasks simpler and use automatic template deduction.
1000 
1001 
1007  const unsigned int vector_accumulation_recursion_threshold = 128;
1008 
1009  template <typename Operation, typename ResultType>
1010  void
1011  accumulate_recursive(const Operation &op,
1012  const size_type first,
1013  const size_type last,
1014  ResultType & result)
1015  {
1016  const size_type vec_size = last - first;
1017  if (vec_size <= vector_accumulation_recursion_threshold * 32)
1018  {
1019  // the vector is short enough so we perform the summation. first
1020  // work on the regular part. The innermost 32 values are expanded in
1021  // order to obtain known loop bounds for most of the work.
1022  size_type index = first;
1023  ResultType outer_results[vector_accumulation_recursion_threshold];
1024 
1025  // set the zeroth element to zero to correctly handle the case where
1026  // vec_size == 0
1027  outer_results[0] = ResultType();
1028 
1029  // the variable serves two purposes: (i) number of chunks (each 32
1030  // indices) for the given size; all results are stored in
1031  // outer_results[0,n_chunks) (ii) in the SIMD case n_chunks is also a
1032  // next free index in outer_results[] to which we can write after
1033  // accumulate_regular() is executed.
1034  size_type n_chunks = vec_size / 32;
1035  const size_type remainder = vec_size % 32;
1036  Assert(remainder == 0 ||
1037  n_chunks < vector_accumulation_recursion_threshold,
1038  ExcInternalError());
1039 
1040  // Select between the regular version and vectorized version based
1041  // on the number types we are given. To choose the vectorized
1042  // version often enough, we need to have all tasks but the last one
1043  // to be divisible by the vectorization length
1044  accumulate_regular(
1045  op,
1046  n_chunks,
1047  index,
1048  outer_results,
1049  std::integral_constant<bool, Operation::vectorizes>());
1050 
1051  // now work on the remainder, i.e., the last up to 32 values. Use
1052  // switch statement with fall-through to work on these values.
1053  if (remainder > 0)
1054  {
1055  // if we got here, it means that (vec_size <=
1056  // vector_accumulation_recursion_threshold * 32), which is to say
1057  // that the domain can be split into n_chunks <=
1058  // vector_accumulation_recursion_threshold:
1059  AssertIndexRange(n_chunks,
1060  vector_accumulation_recursion_threshold + 1);
1061  // split the remainder into chunks of 8, there could be up to 3
1062  // such chunks since remainder < 32.
1063  // Work on those chunks without any SIMD, that is we call
1064  // op(index).
1065  const size_type inner_chunks = remainder / 8;
1066  Assert(inner_chunks <= 3, ExcInternalError());
1067  const size_type remainder_inner = remainder % 8;
1068  ResultType r0 = ResultType(), r1 = ResultType(),
1069  r2 = ResultType();
1070  switch (inner_chunks)
1071  {
1072  case 3:
1073  r2 = op(index++);
1074  for (size_type j = 1; j < 8; ++j)
1075  r2 += op(index++);
1076  DEAL_II_FALLTHROUGH;
1077  case 2:
1078  r1 = op(index++);
1079  for (size_type j = 1; j < 8; ++j)
1080  r1 += op(index++);
1081  r1 += r2;
1082  DEAL_II_FALLTHROUGH;
1083  case 1:
1084  r2 = op(index++);
1085  for (size_type j = 1; j < 8; ++j)
1086  r2 += op(index++);
1087  DEAL_II_FALLTHROUGH;
1088  default:
1089  for (size_type j = 0; j < remainder_inner; ++j)
1090  r0 += op(index++);
1091  r0 += r2;
1092  r0 += r1;
1093  if (n_chunks == vector_accumulation_recursion_threshold)
1094  outer_results[vector_accumulation_recursion_threshold -
1095  1] += r0;
1096  else
1097  {
1098  outer_results[n_chunks] = r0;
1099  n_chunks++;
1100  }
1101  break;
1102  }
1103  }
1104  // make sure we worked through all indices
1105  AssertDimension(index, last);
1106 
1107  // now sum the results from the chunks stored in
1108  // outer_results[0,n_chunks) recursively
1109  while (n_chunks > 1)
1110  {
1111  if (n_chunks % 2 == 1)
1112  outer_results[n_chunks++] = ResultType();
1113  for (size_type i = 0; i < n_chunks; i += 2)
1114  outer_results[i / 2] = outer_results[i] + outer_results[i + 1];
1115  n_chunks /= 2;
1116  }
1117  result = outer_results[0];
1118  }
1119  else
1120  {
1121  // split vector into four pieces and work on the pieces
1122  // recursively. Make pieces (except last) divisible by one fourth the
1123  // recursion threshold.
1124  const size_type new_size =
1125  (vec_size / (vector_accumulation_recursion_threshold * 32)) *
1126  vector_accumulation_recursion_threshold * 8;
1127  Assert(first + 3 * new_size < last, ExcInternalError());
1128  ResultType r0, r1, r2, r3;
1129  accumulate_recursive(op, first, first + new_size, r0);
1130  accumulate_recursive(op, first + new_size, first + 2 * new_size, r1);
1131  accumulate_recursive(op,
1132  first + 2 * new_size,
1133  first + 3 * new_size,
1134  r2);
1135  accumulate_recursive(op, first + 3 * new_size, last, r3);
1136  r0 += r1;
1137  r2 += r3;
1138  result = r0 + r2;
1139  }
1140  }
1141 
1142 
1143  // this is the inner working routine for the accumulation loops
1144  // below. This is the standard case where the loop bounds are known. We
1145  // pulled this function out of the regular accumulate routine because we
1146  // might do this thing vectorized (see specialized function below)
1147  template <typename Operation, typename ResultType>
1148  void
1149  accumulate_regular(
1150  const Operation &op,
1151  const size_type &n_chunks,
1152  size_type & index,
1153  ResultType (&outer_results)[vector_accumulation_recursion_threshold],
1154  std::integral_constant<bool, false>)
1155  {
1156  // note that each chunk is chosen to have a width of 32, thereby the index
1157  // is incremented by 4*8 for each @p i.
1158  for (size_type i = 0; i < n_chunks; ++i)
1159  {
1160  ResultType r0 = op(index);
1161  ResultType r1 = op(index + 1);
1162  ResultType r2 = op(index + 2);
1163  ResultType r3 = op(index + 3);
1164  index += 4;
1165  for (size_type j = 1; j < 8; ++j, index += 4)
1166  {
1167  r0 += op(index);
1168  r1 += op(index + 1);
1169  r2 += op(index + 2);
1170  r3 += op(index + 3);
1171  }
1172  r0 += r1;
1173  r2 += r3;
1174  outer_results[i] = r0 + r2;
1175  }
1176  }
1177 
1178 
1179 
1180  // this is the inner working routine for the accumulation loops
1181  // below. This is the specialized case where the loop bounds are known and
1182  // where we can vectorize. In that case, we request the 'do_vectorized'
1183  // routine of the operation instead of the regular one which does several
1184  // operations at once.
1185  template <typename Operation, typename Number>
1186  void
1187  accumulate_regular(
1188  const Operation &op,
1189  size_type & n_chunks,
1190  size_type & index,
1191  Number (&outer_results)[vector_accumulation_recursion_threshold],
1192  std::integral_constant<bool, true>)
1193  {
1194  // we start from @p index and workout @p n_chunks each of size 32.
1195  // in order employ SIMD and work on @p nvecs at a time, we split this
1196  // loop yet again:
1197  // First we work on (n_chunks/nvecs) chunks, where each chunk processes
1198  // nvecs*(4*8) elements.
1199 
1200  constexpr unsigned int nvecs = VectorizedArray<Number>::size();
1201  const size_type regular_chunks = n_chunks / nvecs;
1202  for (size_type i = 0; i < regular_chunks; ++i)
1203  {
1204  VectorizedArray<Number> r0 = op.do_vectorized(index);
1205  VectorizedArray<Number> r1 = op.do_vectorized(index + nvecs);
1206  VectorizedArray<Number> r2 = op.do_vectorized(index + 2 * nvecs);
1207  VectorizedArray<Number> r3 = op.do_vectorized(index + 3 * nvecs);
1208  index += nvecs * 4;
1209  for (size_type j = 1; j < 8; ++j, index += nvecs * 4)
1210  {
1211  r0 += op.do_vectorized(index);
1212  r1 += op.do_vectorized(index + nvecs);
1213  r2 += op.do_vectorized(index + 2 * nvecs);
1214  r3 += op.do_vectorized(index + 3 * nvecs);
1215  }
1216  r0 += r1;
1217  r2 += r3;
1218  r0 += r2;
1219  r0.store(&outer_results[i * nvecs]);
1220  }
1221 
1222  // If we are treating a case where the vector length is not divisible by
1223  // the vectorization length, need a cleanup loop
1224  // The remaining chunks are processed one by one starting from
1225  // regular_chunks * nvecs; We do as much as possible with 2 SIMD
1226  // operations within each chunk. Here we assume that nvecs < 32/2 = 16 as
1227  // well as 16%nvecs==0.
1228  static_assert(
1230  16 % VectorizedArray<Number>::size() == 0,
1231  "VectorizedArray::size() must be a power of 2 and not more than 16");
1232  Assert(16 % nvecs == 0, ExcInternalError());
1233  if (n_chunks % nvecs != 0)
1234  {
1236  r1 = VectorizedArray<Number>();
1237  const size_type start_irreg = regular_chunks * nvecs;
1238  for (size_type c = start_irreg; c < n_chunks; ++c)
1239  for (size_type j = 0; j < 32; j += 2 * nvecs, index += 2 * nvecs)
1240  {
1241  r0 += op.do_vectorized(index);
1242  r1 += op.do_vectorized(index + nvecs);
1243  }
1244  r0 += r1;
1245  r0.store(&outer_results[start_irreg]);
1246  // update n_chunks to denote unused element in outer_results[] from
1247  // which we can keep writing.
1248  n_chunks = start_irreg + VectorizedArray<Number>::size();
1249  }
1250  }
1251 
1252 
1253 
1254 #ifdef DEAL_II_WITH_THREADS
1255 
1283  template <typename Operation, typename ResultType>
1285  {
1286  static const unsigned int threshold_array_allocate = 512;
1287 
1288  TBBReduceFunctor(const Operation &op,
1289  const size_type start,
1290  const size_type end)
1291  : op(op)
1292  , start(start)
1293  , end(end)
1294  {
1295  const size_type vec_size = end - start;
1296  // set chunk size for sub-tasks
1297  const unsigned int gs =
1298  internal::VectorImplementation::minimum_parallel_grain_size;
1299  n_chunks =
1300  std::min(static_cast<size_type>(4 * MultithreadInfo::n_threads()),
1301  vec_size / gs);
1302  chunk_size = vec_size / n_chunks;
1303 
1304  // round to next multiple of 512 (or leave it at the minimum grain size
1305  // if that happens to be smaller). this is advantageous because our
1306  // algorithm favors lengths of a power of 2 due to pairwise summation ->
1307  // at most one 'oddly' sized chunk
1308  if (chunk_size > 512)
1309  chunk_size = ((chunk_size + 511) / 512) * 512;
1310  n_chunks = (vec_size + chunk_size - 1) / chunk_size;
1311  AssertIndexRange((n_chunks - 1) * chunk_size, vec_size);
1312  AssertIndexRange(vec_size, n_chunks * chunk_size + 1);
1313 
1314  if (n_chunks > threshold_array_allocate)
1315  {
1316  // make sure we allocate an even number of elements,
1317  // access to the new last element is needed in do_sum()
1318  large_array.resize(2 * ((n_chunks + 1) / 2));
1319  array_ptr = large_array.data();
1320  }
1321  else
1322  array_ptr = &small_array[0];
1323  }
1324 
1329  void
1330  operator()(const tbb::blocked_range<size_type> &range) const
1331  {
1332  for (size_type i = range.begin(); i < range.end(); ++i)
1333  accumulate_recursive(op,
1334  start + i * chunk_size,
1335  std::min(start + (i + 1) * chunk_size, end),
1336  array_ptr[i]);
1337  }
1338 
1339  ResultType
1340  do_sum() const
1341  {
1342  while (n_chunks > 1)
1343  {
1344  if (n_chunks % 2 == 1)
1345  array_ptr[n_chunks++] = ResultType();
1346  for (size_type i = 0; i < n_chunks; i += 2)
1347  array_ptr[i / 2] = array_ptr[i] + array_ptr[i + 1];
1348  n_chunks /= 2;
1349  }
1350  return array_ptr[0];
1351  }
1352 
1353  const Operation &op;
1354  const size_type start;
1355  const size_type end;
1356 
1357  mutable unsigned int n_chunks;
1358  unsigned int chunk_size;
1359  ResultType small_array[threshold_array_allocate];
1360  std::vector<ResultType> large_array;
1361  // this variable either points to small_array or large_array depending on
1362  // the number of threads we want to feed
1363  mutable ResultType *array_ptr;
1364  };
1365 #endif
1366 
1367 
1368 
1373  template <typename Operation, typename ResultType>
1374  void
1375  parallel_reduce(
1376  const Operation &op,
1377  const size_type start,
1378  const size_type end,
1379  ResultType & result,
1380  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1381  &partitioner)
1382  {
1383 #ifdef DEAL_II_WITH_THREADS
1384  const size_type vec_size = end - start;
1385  // only go to the parallel function in case there are at least 4 parallel
1386  // items, otherwise the overhead is too large
1387  if (vec_size >=
1388  4 * internal::VectorImplementation::minimum_parallel_grain_size &&
1390  {
1391  Assert(partitioner.get() != nullptr,
1393  "Unexpected initialization of Vector that does "
1394  "not set the TBB partitioner to a usable state."));
1395  std::shared_ptr<tbb::affinity_partitioner> tbb_partitioner =
1396  partitioner->acquire_one_partitioner();
1397 
1398  TBBReduceFunctor<Operation, ResultType> generic_functor(op,
1399  start,
1400  end);
1401  // We use a minimum grain size of 1 here since the grains at this
1402  // stage of dividing the work refer to the number of vector chunks
1403  // that are processed by (possibly different) threads in the
1404  // parallelized for loop (i.e., they do not refer to individual
1405  // vector entries). The number of chunks here is calculated inside
1406  // TBBForFunctor. See also GitHub issue #2496 for further discussion
1407  // of this strategy.
1408  ::parallel::internal::parallel_for(
1409  static_cast<size_type>(0),
1410  static_cast<size_type>(generic_functor.n_chunks),
1411  generic_functor,
1412  1,
1413  tbb_partitioner);
1414  partitioner->release_one_partitioner(tbb_partitioner);
1415  result = generic_functor.do_sum();
1416  }
1417  else
1418  accumulate_recursive(op, start, end, result);
1419 #else
1420  accumulate_recursive(op, start, end, result);
1421  (void)partitioner;
1422 #endif
1423  }
1424 
1425 
1426  template <typename Number, typename Number2, typename MemorySpace>
1427  struct functions
1428  {
1429  static void
1430  copy(
1431  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1432  /*thread_loop_partitioner*/,
1433  const size_type /*size*/,
1434  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1435  & /*v_data*/,
1437  {
1438  static_assert(
1439  std::is_same<MemorySpace, ::MemorySpace::CUDA>::value &&
1440  std::is_same<Number, Number2>::value,
1441  "For the CUDA MemorySpace Number and Number2 should be the same type");
1442  }
1443 
1444  static void
1445  set(
1446  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1447  /*thread_loop_partitioner*/,
1448  const size_type /*size*/,
1449  const Number /*s*/,
1451  {}
1452 
1453  static void
1454  add_vector(
1455  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1456  /*thread_loop_partitioner*/,
1457  const size_type /*size*/,
1458  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1459  & /*v_data*/,
1461  {}
1462 
1463  static void
1464  subtract_vector(
1465  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1466  /*thread_loop_partitioner*/,
1467  const size_type /*size*/,
1468  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1469  & /*v_data*/,
1471  {}
1472 
1473  static void
1474  add_factor(
1475  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1476  /*thread_loop_partitioner*/,
1477  const size_type /*size*/,
1478  Number /*a*/,
1480  {}
1481 
1482  static void
1483  add_av(
1484  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1485  /*thread_loop_partitioner*/,
1486  const size_type /*size*/,
1487  const Number /*a*/,
1488  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1489  & /*v_data*/,
1491  {}
1492 
1493  static void
1494  add_avpbw(
1495  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1496  /*thread_loop_partitioner*/,
1497  const size_type /*size*/,
1498  const Number /*a*/,
1499  const Number /*b*/,
1500  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1501  & /*v_data*/,
1502  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1503  & /*w_data*/,
1505  {}
1506 
1507  static void
1508  sadd_xv(
1509  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1510  /*thread_loop_partitioner*/,
1511  const size_type /*size*/,
1512  const Number /*x*/,
1513  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1514  & /*v_data*/,
1516  {}
1517 
1518  static void
1519  sadd_xav(
1520  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1521  /*thread_loop_partitioner*/,
1522  const size_type /*size*/,
1523  const Number /*x*/,
1524  const Number /*a*/,
1525  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1526  & /*v_data*/,
1528  {}
1529 
1530  static void
1531  sadd_xavbw(
1532  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1533  /*thread_loop_partitioner*/,
1534  const size_type /*size*/,
1535  const Number /*x*/,
1536  const Number /*a*/,
1537  const Number /*b*/,
1538  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1539  & /*v_data*/,
1540  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1541  & /*w_data*/,
1543  {}
1544 
1545  static void
1546  multiply_factor(
1547  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1548  /*thread_loop_partitioner*/,
1549  const size_type /*size*/,
1550  const Number /*factor*/,
1552  {}
1553 
1554  static void
1555  scale(
1556  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1557  /*thread_loop_partitioner*/,
1558  const size_type /*size*/,
1559  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1560  & /*v_data*/,
1562  {}
1563 
1564  static void
1565  equ_au(
1566  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1567  /*thread_loop_partitioner*/,
1568  const size_type /*size*/,
1569  const Number /*a*/,
1570  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1571  & /*v_data*/,
1573  {}
1574 
1575  static void
1576  equ_aubv(
1577  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1578  /*thread_loop_partitioner*/,
1579  const size_type /*size*/,
1580  const Number /*a*/,
1581  const Number /*b*/,
1582  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1583  & /*v_data*/,
1584  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1585  & /*w_data*/,
1587  {}
1588 
1589  static Number
1590  dot(
1591  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1592  /*thread_loop_partitioner*/,
1593  const size_type /*size*/,
1594  const ::MemorySpace::MemorySpaceData<Number2, MemorySpace>
1595  & /*v_data*/,
1597  {
1598  return Number();
1599  }
1600 
1601  template <typename real_type>
1602  static void
1603  norm_2(
1604  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1605  /*thread_loop_partitioner*/,
1606  const size_type /*size*/,
1607  real_type & /*sum*/,
1608  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1609  & /*v_data*/,
1611  {}
1612 
1613  static Number
1614  mean_value(
1615  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1616  /*thread_loop_partitioner*/,
1617  const size_type /*size*/,
1618  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1619  & /*data*/)
1620  {
1621  return Number();
1622  }
1623 
1624  template <typename real_type>
1625  static void
1626  norm_1(
1627  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1628  /*thread_loop_partitioner*/,
1629  const size_type /*size*/,
1630  real_type & /*sum*/,
1631  Number * /*values*/,
1632  Number * /*values_dev*/)
1633  {}
1634 
1635  template <typename real_type>
1636  static void
1637  norm_p(
1638  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1639  /*thread_loop_partitioner*/,
1640  const size_type /*size*/,
1641  real_type & /*sum*/,
1642  real_type /*p*/,
1644  {}
1645 
1646  static Number
1647  add_and_dot(
1648  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1649  /*thread_loop_partitioner*/,
1650  const size_type /*size*/,
1651  const Number /*a*/,
1652  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1653  & /*v_data*/,
1654  const ::MemorySpace::MemorySpaceData<Number, MemorySpace>
1655  & /*w_data*/,
1657  {
1658  return Number();
1659  }
1660 
1661  template <typename MemorySpace2>
1662  static void
1663  import(
1664  const std::shared_ptr<::parallel::internal::TBBPartitioner> &
1665  /*thread_loop_partitioner*/,
1666  const size_type /*size*/,
1667  VectorOperation::values /*operation*/,
1668  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
1669  & /*v_data*/,
1671  {}
1672  };
1673 
1674 
1675 
1676  template <typename Number, typename Number2>
1677  struct functions<Number, Number2, ::MemorySpace::Host>
1678  {
1679  static void
1680  copy(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1681  & thread_loop_partitioner,
1682  const size_type size,
1683  const ::MemorySpace::
1684  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1687  &data)
1688  {
1689  Vector_copy<Number, Number2> copier(v_data.values.get(),
1690  data.values.get());
1691  parallel_for(copier, 0, size, thread_loop_partitioner);
1692  }
1693 
1694  static void
1695  set(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1696  & thread_loop_partitioner,
1697  const size_type size,
1698  const Number s,
1701  &data)
1702  {
1703  Vector_set<Number> setter(s, data.values.get());
1704  parallel_for(setter, 0, size, thread_loop_partitioner);
1705  }
1706 
1707  static void
1708  add_vector(
1709  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1710  & thread_loop_partitioner,
1711  const size_type size,
1712  const ::MemorySpace::
1713  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1716  &data)
1717  {
1718  Vectorization_add_v<Number> vector_add(data.values.get(),
1719  v_data.values.get());
1720  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1721  }
1722 
1723  static void
1724  subtract_vector(
1725  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1726  & thread_loop_partitioner,
1727  const size_type size,
1728  const ::MemorySpace::
1729  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1732  &data)
1733  {
1734  Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
1735  v_data.values.get());
1736  parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
1737  }
1738 
1739  static void
1740  add_factor(
1741  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1742  & thread_loop_partitioner,
1743  const size_type size,
1744  Number a,
1747  &data)
1748  {
1749  Vectorization_add_factor<Number> vector_add(data.values.get(), a);
1750  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1751  }
1752 
1753  static void
1754  add_av(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1755  & thread_loop_partitioner,
1756  const size_type size,
1757  const Number a,
1758  const ::MemorySpace::
1759  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1762  &data)
1763  {
1764  Vectorization_add_av<Number> vector_add(data.values.get(),
1765  v_data.values.get(),
1766  a);
1767  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1768  }
1769 
1770  static void
1771  add_avpbw(
1772  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1773  & thread_loop_partitioner,
1774  const size_type size,
1775  const Number a,
1776  const Number b,
1777  const ::MemorySpace::
1778  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1779  const ::MemorySpace::
1780  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1783  &data)
1784  {
1785  Vectorization_add_avpbw<Number> vector_add(
1786  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1787  parallel_for(vector_add, 0, size, thread_loop_partitioner);
1788  }
1789 
1790  static void
1791  sadd_xv(
1792  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1793  & thread_loop_partitioner,
1794  const size_type size,
1795  const Number x,
1796  const ::MemorySpace::
1797  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1800  &data)
1801  {
1802  Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
1803  v_data.values.get(),
1804  x);
1805  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1806  }
1807 
1808  static void
1809  sadd_xav(
1810  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1811  & thread_loop_partitioner,
1812  const size_type size,
1813  const Number x,
1814  const Number a,
1815  const ::MemorySpace::
1816  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1819  &data)
1820  {
1821  Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
1822  v_data.values.get(),
1823  a,
1824  x);
1825  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1826  }
1827 
1828  static void
1829  sadd_xavbw(
1830  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1831  & thread_loop_partitioner,
1832  const size_type size,
1833  const Number x,
1834  const Number a,
1835  const Number b,
1836  const ::MemorySpace::
1837  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1838  const ::MemorySpace::
1839  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1842  &data)
1843  {
1844  Vectorization_sadd_xavbw<Number> vector_sadd(
1845  data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
1846  parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
1847  }
1848 
1849  static void
1850  multiply_factor(
1851  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1852  & thread_loop_partitioner,
1853  const size_type size,
1854  const Number factor,
1857  &data)
1858  {
1859  Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
1860  factor);
1861  parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
1862  }
1863 
1864  static void
1865  scale(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1866  & thread_loop_partitioner,
1867  const size_type size,
1868  const ::MemorySpace::
1869  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1872  &data)
1873  {
1874  Vectorization_scale<Number> vector_scale(data.values.get(),
1875  v_data.values.get());
1876  parallel_for(vector_scale, 0, size, thread_loop_partitioner);
1877  }
1878 
1879  static void
1880  equ_au(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1881  & thread_loop_partitioner,
1882  const size_type size,
1883  const Number a,
1884  const ::MemorySpace::
1885  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1888  &data)
1889  {
1890  Vectorization_equ_au<Number> vector_equ(data.values.get(),
1891  v_data.values.get(),
1892  a);
1893  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1894  }
1895 
1896  static void
1897  equ_aubv(
1898  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1899  & thread_loop_partitioner,
1900  const size_type size,
1901  const Number a,
1902  const Number b,
1903  const ::MemorySpace::
1904  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
1905  const ::MemorySpace::
1906  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
1909  &data)
1910  {
1911  Vectorization_equ_aubv<Number> vector_equ(
1912  data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
1913  parallel_for(vector_equ, 0, size, thread_loop_partitioner);
1914  }
1915 
1916  static Number
1917  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1918  & thread_loop_partitioner,
1919  const size_type size,
1920  const ::MemorySpace::
1921  MemorySpaceData<Number2, ::MemorySpace::Host> &v_data,
1924  &data)
1925  {
1926  Number sum;
1927  ::internal::VectorOperations::Dot<Number, Number2> dot(
1928  data.values.get(), v_data.values.get());
1929  ::internal::VectorOperations::parallel_reduce(
1930  dot, 0, size, sum, thread_loop_partitioner);
1931  AssertIsFinite(sum);
1932 
1933  return sum;
1934  }
1935 
1936  template <typename real_type>
1937  static void
1938  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1939  & thread_loop_partitioner,
1940  const size_type size,
1941  real_type & sum,
1944  &data)
1945  {
1946  Norm2<Number, real_type> norm2(data.values.get());
1947  parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
1948  }
1949 
1950  static Number
1951  mean_value(
1952  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1953  & thread_loop_partitioner,
1954  const size_type size,
1955  const ::MemorySpace::
1956  MemorySpaceData<Number, ::MemorySpace::Host> &data)
1957  {
1958  Number sum;
1959  MeanValue<Number> mean(data.values.get());
1960  parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
1961 
1962  return sum;
1963  }
1964 
1965  template <typename real_type>
1966  static void
1967  norm_1(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1968  & thread_loop_partitioner,
1969  const size_type size,
1970  real_type & sum,
1973  &data)
1974  {
1975  Norm1<Number, real_type> norm1(data.values.get());
1976  parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
1977  }
1978 
1979  template <typename real_type>
1980  static void
1981  norm_p(const std::shared_ptr<::parallel::internal::TBBPartitioner>
1982  & thread_loop_partitioner,
1983  const size_type size,
1984  real_type & sum,
1985  const real_type p,
1988  &data)
1989  {
1990  NormP<Number, real_type> normp(data.values.get(), p);
1991  parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
1992  }
1993 
1994  static Number
1995  add_and_dot(
1996  const std::shared_ptr<::parallel::internal::TBBPartitioner>
1997  & thread_loop_partitioner,
1998  const size_type size,
1999  const Number a,
2000  const ::MemorySpace::
2001  MemorySpaceData<Number, ::MemorySpace::Host> &v_data,
2002  const ::MemorySpace::
2003  MemorySpaceData<Number, ::MemorySpace::Host> &w_data,
2006  &data)
2007  {
2008  Number sum;
2009  AddAndDot<Number> adder(data.values.get(),
2010  v_data.values.get(),
2011  w_data.values.get(),
2012  a);
2013  parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
2014 
2015  return sum;
2016  }
2017 
2018  template <typename MemorySpace2>
2019  static void
2020  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2021  & thread_loop_partitioner,
2022  const size_type size,
2023  VectorOperation::values operation,
2024  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2025  &v_data,
2028  &data,
2029  typename std::enable_if<
2030  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2031  int>::type = 0)
2032  {
2033  if (operation == VectorOperation::insert)
2034  {
2035  copy(thread_loop_partitioner, size, v_data, data);
2036  }
2037  else if (operation == VectorOperation::add)
2038  {
2039  add_vector(thread_loop_partitioner, size, v_data, data);
2040  }
2041  else
2042  {
2043  AssertThrow(false, ExcNotImplemented());
2044  }
2045  }
2046 
2047 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2048  template <typename MemorySpace2>
2049  static void
2050  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2051  & /*thread_loop_partitioner*/,
2052  const size_type size,
2053  VectorOperation::values operation,
2054  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2055  &v_data,
2058  &data,
2059  typename std::enable_if<
2060  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2061  int>::type = 0)
2062  {
2063  if (operation == VectorOperation::insert)
2064  {
2065  cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
2066  v_data.values_dev.get(),
2067  size * sizeof(Number),
2068  cudaMemcpyDeviceToHost);
2069  AssertCuda(cuda_error_code);
2070  }
2071  else
2072  {
2073  AssertThrow(false, ExcNotImplemented());
2074  }
2075  }
2076 #endif
2077  };
2078 
2079 
2080 
2081 #ifdef DEAL_II_COMPILER_CUDA_AWARE
2082  template <typename Number>
2083  struct functions<Number, Number, ::MemorySpace::CUDA>
2084  {
2085  static const int block_size =
2086  ::LinearAlgebra::CUDAWrappers::kernel::block_size;
2087  static const int chunk_size =
2088  ::LinearAlgebra::CUDAWrappers::kernel::chunk_size;
2089 
2090  static void
2091  copy(
2092  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2093  const size_type size,
2094  const ::MemorySpace::
2095  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2098  &data)
2099  {
2100  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2101  v_data.values_dev.get(),
2102  size * sizeof(Number),
2103  cudaMemcpyDeviceToDevice);
2104  AssertCuda(cuda_error_code);
2105  }
2106 
2107  static void
2108  set(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2109  const size_type size,
2110  const Number s,
2113  &data)
2114  {
2115  const int n_blocks = 1 + size / (chunk_size * block_size);
2116  ::LinearAlgebra::CUDAWrappers::kernel::set<Number>
2117  <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
2118 
2119 # ifdef DEBUG
2120  // Check that the kernel was launched correctly
2121  AssertCuda(cudaGetLastError());
2122  // Check that there was no problem during the execution of the kernel
2123  AssertCuda(cudaDeviceSynchronize());
2124 # endif
2125  }
2126 
2127  static void
2128  add_vector(
2129  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2130  const size_type size,
2131  const ::MemorySpace::
2132  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2135  &data)
2136  {
2137  const int n_blocks = 1 + size / (chunk_size * block_size);
2138  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2139  <<<n_blocks, block_size>>>(data.values_dev.get(),
2140  1.,
2141  v_data.values_dev.get(),
2142  size);
2143 
2144 # ifdef DEBUG
2145  // Check that the kernel was launched correctly
2146  AssertCuda(cudaGetLastError());
2147  // Check that there was no problem during the execution of the kernel
2148  AssertCuda(cudaDeviceSynchronize());
2149 # endif
2150  }
2151 
2152  static void
2153  subtract_vector(
2154  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2155  const size_type size,
2156  const ::MemorySpace::
2157  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2160  &data)
2161  {
2162  const int n_blocks = 1 + size / (chunk_size * block_size);
2163  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2164  <<<n_blocks, block_size>>>(data.values_dev.get(),
2165  -1.,
2166  v_data.values_dev.get(),
2167  size);
2168 
2169 # ifdef DEBUG
2170  // Check that the kernel was launched correctly
2171  AssertCuda(cudaGetLastError());
2172  // Check that there was no problem during the execution of the kernel
2173  AssertCuda(cudaDeviceSynchronize());
2174 # endif
2175  }
2176 
2177  static void
2178  add_factor(
2179  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2180  const size_type size,
2181  Number a,
2184  &data)
2185  {
2186  const int n_blocks = 1 + size / (chunk_size * block_size);
2187  ::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
2188  <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
2189 
2190 # ifdef DEBUG
2191  // Check that the kernel was launched correctly
2192  AssertCuda(cudaGetLastError());
2193  // Check that there was no problem during the execution of the kernel
2194  AssertCuda(cudaDeviceSynchronize());
2195 # endif
2196  }
2197 
2198  static void
2199  add_av(
2200  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2201  const size_type size,
2202  const Number a,
2203  const ::MemorySpace::
2204  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2207  &data)
2208  {
2209  const int n_blocks = 1 + size / (chunk_size * block_size);
2210  ::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
2211  <<<n_blocks, block_size>>>(data.values_dev.get(),
2212  a,
2213  v_data.values_dev.get(),
2214  size);
2215 
2216 # ifdef DEBUG
2217  // Check that the kernel was launched correctly
2218  AssertCuda(cudaGetLastError());
2219  // Check that there was no problem during the execution of the kernel
2220  AssertCuda(cudaDeviceSynchronize());
2221 # endif
2222  }
2223 
2224  static void
2225  add_avpbw(
2226  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2227  const size_type size,
2228  const Number a,
2229  const Number b,
2230  const ::MemorySpace::
2231  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2232  const ::MemorySpace::
2233  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2236  &data)
2237  {
2238  const int n_blocks = 1 + size / (chunk_size * block_size);
2239  ::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
2240  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2241  a,
2242  v_data.values_dev.get(),
2243  b,
2244  w_data.values_dev.get(),
2245  size);
2246 
2247 # ifdef DEBUG
2248  // Check that the kernel was launched correctly
2249  AssertCuda(cudaGetLastError());
2250  // Check that there was no problem during the execution of the kernel
2251  AssertCuda(cudaDeviceSynchronize());
2252 # endif
2253  }
2254 
2255  static void
2256  sadd_xv(
2257  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2258  const size_type size,
2259  const Number x,
2260  const ::MemorySpace::
2261  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2264  &data)
2265  {
2266  const int n_blocks = 1 + size / (chunk_size * block_size);
2267  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2268  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2269  x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
2270 
2271 # ifdef DEBUG
2272  // Check that the kernel was launched correctly
2273  AssertCuda(cudaGetLastError());
2274  // Check that there was no problem during the execution of the kernel
2275  AssertCuda(cudaDeviceSynchronize());
2276 # endif
2277  }
2278 
2279  static void
2280  sadd_xav(
2281  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2282  const size_type size,
2283  const Number x,
2284  const Number a,
2285  const ::MemorySpace::
2286  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2289  &data)
2290  {
2291  const int n_blocks = 1 + size / (chunk_size * block_size);
2292  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2293  <<<dim3(n_blocks, 1), dim3(block_size)>>>(
2294  x, data.values_dev.get(), a, v_data.values_dev.get(), size);
2295 
2296 # ifdef DEBUG
2297  // Check that the kernel was launched correctly
2298  AssertCuda(cudaGetLastError());
2299  // Check that there was no problem during the execution of the kernel
2300  AssertCuda(cudaDeviceSynchronize());
2301 # endif
2302  }
2303 
2304  static void
2305  sadd_xavbw(
2306  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2307  const size_type size,
2308  const Number x,
2309  const Number a,
2310  const Number b,
2311  const ::MemorySpace::
2312  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2313  const ::MemorySpace::
2314  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2317  &data)
2318  {
2319  const int n_blocks = 1 + size / (chunk_size * block_size);
2320  ::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
2321  <<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
2322  data.values_dev.get(),
2323  a,
2324  v_data.values_dev.get(),
2325  b,
2326  w_data.values_dev.get(),
2327  size);
2328 
2329 # ifdef DEBUG
2330  // Check that the kernel was launched correctly
2331  AssertCuda(cudaGetLastError());
2332  // Check that there was no problem during the execution of the kernel
2333  AssertCuda(cudaDeviceSynchronize());
2334 # endif
2335  }
2336 
2337  static void
2338  multiply_factor(
2339  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2340  const size_type size,
2341  const Number factor,
2344  &data)
2345  {
2346  const int n_blocks = 1 + size / (chunk_size * block_size);
2347  ::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
2348  <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
2349 
2350 # ifdef DEBUG
2351  // Check that the kernel was launched correctly
2352  AssertCuda(cudaGetLastError());
2353  // Check that there was no problem during the execution of the kernel
2354  AssertCuda(cudaDeviceSynchronize());
2355 # endif
2356  }
2357 
2358  static void
2359  scale(
2360  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2361  const size_type size,
2362  const ::MemorySpace::
2363  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2366  &data)
2367  {
2368  const int n_blocks = 1 + size / (chunk_size * block_size);
2369  ::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
2370  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2371  v_data.values_dev.get(),
2372  size);
2373 
2374 # ifdef DEBUG
2375  // Check that the kernel was launched correctly
2376  AssertCuda(cudaGetLastError());
2377  // Check that there was no problem during the execution of the kernel
2378  AssertCuda(cudaDeviceSynchronize());
2379 # endif
2380  }
2381 
2382  static void
2383  equ_au(
2384  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2385  const size_type size,
2386  const Number a,
2387  const ::MemorySpace::
2388  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2391  &data)
2392  {
2393  const int n_blocks = 1 + size / (chunk_size * block_size);
2394  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2395  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2396  a,
2397  v_data.values_dev.get(),
2398  size);
2399 
2400 # ifdef DEBUG
2401  // Check that the kernel was launched correctly
2402  AssertCuda(cudaGetLastError());
2403  // Check that there was no problem during the execution of the kernel
2404  AssertCuda(cudaDeviceSynchronize());
2405 # endif
2406  }
2407 
2408  static void
2409  equ_aubv(
2410  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2411  const size_type size,
2412  const Number a,
2413  const Number b,
2414  const ::MemorySpace::
2415  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2416  const ::MemorySpace::
2417  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2420  &data)
2421  {
2422  const int n_blocks = 1 + size / (chunk_size * block_size);
2423  ::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
2424  <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
2425  a,
2426  v_data.values_dev.get(),
2427  b,
2428  w_data.values_dev.get(),
2429  size);
2430 
2431 # ifdef DEBUG
2432  // Check that the kernel was launched correctly
2433  AssertCuda(cudaGetLastError());
2434  // Check that there was no problem during the execution of the kernel
2435  AssertCuda(cudaDeviceSynchronize());
2436 # endif
2437  }
2438 
2439  static Number
2440  dot(const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2441  const size_type size,
2442  const ::MemorySpace::
2443  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2446  &data)
2447  {
2448  Number * result_device;
2449  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2450  AssertCuda(error_code);
2451  error_code = cudaMemset(result_device, 0, sizeof(Number));
2452  AssertCuda(error_code);
2453 
2454  const int n_blocks = 1 + size / (chunk_size * block_size);
2456  Number,
2458  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2459  data.values_dev.get(),
2460  v_data.values_dev.get(),
2461  static_cast<unsigned int>(
2462  size));
2463 
2464 # ifdef DEBUG
2465  // Check that the kernel was launched correctly
2466  AssertCuda(cudaGetLastError());
2467  // Check that there was no problem during the execution of the kernel
2468  AssertCuda(cudaDeviceSynchronize());
2469 # endif
2470 
2471  // Copy the result back to the host
2472  Number result;
2473  error_code = cudaMemcpy(&result,
2474  result_device,
2475  sizeof(Number),
2476  cudaMemcpyDeviceToHost);
2477  AssertCuda(error_code);
2478  // Free the memory on the device
2479  error_code = cudaFree(result_device);
2480  AssertCuda(error_code);
2481 
2482  AssertIsFinite(result);
2483 
2484  return result;
2485  }
2486 
2487  template <typename real_type>
2488  static void
2489  norm_2(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2490  & thread_loop_partitioner,
2491  const size_type size,
2492  real_type & sum,
2495  &data)
2496  {
2497  sum = dot(thread_loop_partitioner, size, data, data);
2498  }
2499 
2500  static Number
2501  mean_value(
2502  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2503  const size_type size,
2504  const ::MemorySpace::
2505  MemorySpaceData<Number, ::MemorySpace::CUDA> &data)
2506  {
2507  Number * result_device;
2508  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2509  AssertCuda(error_code);
2510  error_code = cudaMemset(result_device, 0, sizeof(Number));
2511 
2512  const int n_blocks = 1 + size / (chunk_size * block_size);
2514  Number,
2516  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2517  data.values_dev.get(),
2518  size);
2519 
2520  // Copy the result back to the host
2521  Number result;
2522  error_code = cudaMemcpy(&result,
2523  result_device,
2524  sizeof(Number),
2525  cudaMemcpyDeviceToHost);
2526  AssertCuda(error_code);
2527  // Free the memory on the device
2528  error_code = cudaFree(result_device);
2529  AssertCuda(error_code);
2530 
2531  return result;
2532  }
2533 
2534  template <typename real_type>
2535  static void
2536  norm_1(
2537  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2538  const size_type size,
2539  real_type & sum,
2542  &data)
2543  {
2544  Number * result_device;
2545  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
2546  AssertCuda(error_code);
2547  error_code = cudaMemset(result_device, 0, sizeof(Number));
2548 
2549  const int n_blocks = 1 + size / (chunk_size * block_size);
2551  Number,
2553  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
2554  data.values_dev.get(),
2555  size);
2556 
2557  // Copy the result back to the host
2558  error_code = cudaMemcpy(&sum,
2559  result_device,
2560  sizeof(Number),
2561  cudaMemcpyDeviceToHost);
2562  AssertCuda(error_code);
2563  // Free the memory on the device
2564  error_code = cudaFree(result_device);
2565  AssertCuda(error_code);
2566  }
2567 
2568  template <typename real_type>
2569  static void
2570  norm_p(
2571  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2572  const size_type,
2573  real_type &,
2574  real_type,
2576  ::MemorySpace::CUDA> &)
2577  {
2578  Assert(false, ExcNotImplemented());
2579  }
2580 
2581  static Number
2582  add_and_dot(
2583  const std::shared_ptr<::parallel::internal::TBBPartitioner> &,
2584  const size_type size,
2585  const Number a,
2586  const ::MemorySpace::
2587  MemorySpaceData<Number, ::MemorySpace::CUDA> &v_data,
2588  const ::MemorySpace::
2589  MemorySpaceData<Number, ::MemorySpace::CUDA> &w_data,
2592  &data)
2593  {
2594  Number * res_d;
2595  cudaError_t error_code = cudaMalloc(&res_d, sizeof(Number));
2596  AssertCuda(error_code);
2597  error_code = cudaMemset(res_d, 0, sizeof(Number));
2598  AssertCuda(error_code);
2599 
2600  const int n_blocks = 1 + size / (chunk_size * block_size);
2601  ::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
2602  <<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
2603  data.values_dev.get(),
2604  v_data.values_dev.get(),
2605  w_data.values_dev.get(),
2606  a,
2607  size);
2608 
2609  Number res;
2610  error_code =
2611  cudaMemcpy(&res, res_d, sizeof(Number), cudaMemcpyDeviceToHost);
2612  AssertCuda(error_code);
2613  error_code = cudaFree(res_d);
2614 
2615  return res;
2616  }
2617 
2618  template <typename MemorySpace2>
2619  static void
2620  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2621  & thread_loop_partitioner,
2622  const size_type size,
2623  VectorOperation::values operation,
2624  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2625  &v_data,
2628  &data,
2629  typename std::enable_if<
2630  std::is_same<MemorySpace2, ::MemorySpace::CUDA>::value,
2631  int>::type = 0)
2632  {
2633  if (operation == VectorOperation::insert)
2634  {
2635  copy(thread_loop_partitioner, size, v_data, data);
2636  }
2637  else if (operation == VectorOperation::add)
2638  {
2639  add_vector(thread_loop_partitioner, size, v_data, data);
2640  }
2641  else
2642  {
2643  AssertThrow(false, ExcNotImplemented());
2644  }
2645  }
2646 
2647  template <typename MemorySpace2>
2648  static void
2649  import(const std::shared_ptr<::parallel::internal::TBBPartitioner>
2650  & /*thread_loop_partitioner*/,
2651  const size_type size,
2652  VectorOperation::values operation,
2653  const ::MemorySpace::MemorySpaceData<Number, MemorySpace2>
2654  &v_data,
2657  &data,
2658  typename std::enable_if<
2659  std::is_same<MemorySpace2, ::MemorySpace::Host>::value,
2660  int>::type = 0)
2661  {
2662  if (operation == VectorOperation::insert)
2663  {
2664  cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
2665  v_data.values.get(),
2666  size * sizeof(Number),
2667  cudaMemcpyHostToDevice);
2668  AssertCuda(cuda_error_code);
2669  }
2670  else
2671  {
2672  AssertThrow(false, ExcNotImplemented());
2673  }
2674  }
2675  };
2676 #endif
2677  } // namespace VectorOperations
2678 } // namespace internal
2679 
2680 DEAL_II_NAMESPACE_CLOSE
2681 
2682 #endif
__global__ void reduction(Number *result, const Number *v, const size_type N)
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1579
void operator()(const tbb::blocked_range< size_type > &range) const
__global__ void double_vector_reduction(Number *result, const Number *v1, const Number *v2, const size_type N)
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:603
#define AssertIndexRange(index, range)
Definition: exceptions.h:1649
__global__ void add_and_dot(Number *res, Number *v1, const Number *v2, const Number *v3, const Number a, const size_type N)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
static real_type abs(const number &x)
Definition: numbers.h:625
void store(Number *ptr) const
__global__ void scale(Number *val, const Number *V_val, const size_type N)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1419
#define AssertCuda(error_code)
Definition: exceptions.h:1734
void load(const Number *ptr)
unsigned int global_dof_index
Definition: types.h:77
static ::ExceptionBase & ExcNotImplemented()
static unsigned int n_threads()
#define AssertIsFinite(number)
Definition: exceptions.h:1681
static ::ExceptionBase & ExcInternalError()