Reference documentation for deal.II version Git 1733d17138 2019-04-25 23:30:57 -0500
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/exceptions.h>
23 #include <deal.II/base/template_constraints.h>
24 
25 #include <cmath>
26 
27 // Note:
28 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
29 // according to the following scheme
30 // #ifdef __AVX512F__
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
32 // #elif defined (__AVX__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
34 // #elif defined (__SSE2__)
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
36 // #else
37 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
38 // #endif
39 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
40 // 'check_01_cpu_features.cmake', ensures that these feature are not only
41 // present in the compilation unit but also working properly.
42 
43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \
44  !defined(__AVX__)
45 # error \
46  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
47 #endif
48 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \
49  !defined(__AVX512F__)
50 # error \
51  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
52 #endif
53 
54 #if defined(_MSC_VER)
55 # include <intrin.h>
56 #elif defined(__ALTIVEC__)
57 # include <altivec.h>
58 
59 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
60 // them before they make trouble
61 # undef vector
62 # undef pixel
63 # undef bool
64 #else
65 # include <x86intrin.h>
66 #endif
67 
68 DEAL_II_NAMESPACE_OPEN
69 
70 
71 namespace internal
72 {
86  template <typename T>
88  {
89  static const VectorizedArray<T> &
90  value(const VectorizedArray<T> &t)
91  {
92  return t;
93  }
94 
95  static VectorizedArray<T>
96  value(const T &t)
97  {
99  tmp = t;
100  return tmp;
101  }
102  };
103 } // namespace internal
104 
105 
106 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
107 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
108 
109 template <typename Number>
110 struct EnableIfScalar<VectorizedArray<Number>>
111 {
113 };
114 
115 
116 
167 template <typename Number>
168 class VectorizedArray
169 {
170 public:
176  static const unsigned int n_array_elements = 1;
177 
178  // POD means that there should be no user-defined constructors, destructors
179  // and copy functions (the standard is somewhat relaxed in C++2011, though).
180 
184  DEAL_II_ALWAYS_INLINE
185  VectorizedArray &
186  operator=(const Number scalar)
187  {
188  data = scalar;
189  return *this;
190  }
191 
195  DEAL_II_ALWAYS_INLINE
196  Number &operator[](const unsigned int comp)
197  {
198  (void)comp;
199  AssertIndexRange(comp, 1);
200  return data;
201  }
202 
206  DEAL_II_ALWAYS_INLINE
207  const Number &operator[](const unsigned int comp) const
208  {
209  (void)comp;
210  AssertIndexRange(comp, 1);
211  return data;
212  }
213 
217  DEAL_II_ALWAYS_INLINE
218  VectorizedArray &
220  {
221  data += vec.data;
222  return *this;
223  }
224 
228  DEAL_II_ALWAYS_INLINE
229  VectorizedArray &
231  {
232  data -= vec.data;
233  return *this;
234  }
235 
239  DEAL_II_ALWAYS_INLINE
240  VectorizedArray &
242  {
243  data *= vec.data;
244  return *this;
245  }
246 
250  DEAL_II_ALWAYS_INLINE
251  VectorizedArray &
253  {
254  data /= vec.data;
255  return *this;
256  }
257 
264  DEAL_II_ALWAYS_INLINE
265  void
266  load(const Number *ptr)
267  {
268  data = *ptr;
269  }
270 
277  DEAL_II_ALWAYS_INLINE
278  void
279  store(Number *ptr) const
280  {
281  *ptr = data;
282  }
283 
328  DEAL_II_ALWAYS_INLINE
329  void
330  streaming_store(Number *ptr) const
331  {
332  *ptr = data;
333  }
334 
347  DEAL_II_ALWAYS_INLINE
348  void
349  gather(const Number *base_ptr, const unsigned int *offsets)
350  {
351  data = base_ptr[offsets[0]];
352  }
353 
366  DEAL_II_ALWAYS_INLINE
367  void
368  scatter(const unsigned int *offsets, Number *base_ptr) const
369  {
370  base_ptr[offsets[0]] = data;
371  }
372 
377  Number data;
378 
379 private:
384  DEAL_II_ALWAYS_INLINE
385  VectorizedArray
386  get_sqrt() const
387  {
388  VectorizedArray res;
389  res.data = std::sqrt(data);
390  return res;
391  }
392 
397  DEAL_II_ALWAYS_INLINE
398  VectorizedArray
399  get_abs() const
400  {
401  VectorizedArray res;
402  res.data = std::fabs(data);
403  return res;
404  }
405 
410  DEAL_II_ALWAYS_INLINE
411  VectorizedArray
412  get_max(const VectorizedArray &other) const
413  {
414  VectorizedArray res;
415  res.data = std::max(data, other.data);
416  return res;
417  }
418 
423  DEAL_II_ALWAYS_INLINE
424  VectorizedArray
425  get_min(const VectorizedArray &other) const
426  {
427  VectorizedArray res;
428  res.data = std::min(data, other.data);
429  return res;
430  }
431 
435  template <typename Number2>
437  std::sqrt(const VectorizedArray<Number2> &);
438  template <typename Number2>
440  std::abs(const VectorizedArray<Number2> &);
441  template <typename Number2>
443  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
444  template <typename Number2>
446  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
447 };
448 
449 // We need to have a separate declaration for static const members
450 template <typename Number>
452 
453 
454 
461 template <typename Number>
462 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
463  make_vectorized_array(const Number &u)
464 {
466  result = u;
467  return result;
468 }
469 
470 
471 
497 template <typename Number>
498 inline void
499 vectorized_load_and_transpose(const unsigned int n_entries,
500  const Number * in,
501  const unsigned int * offsets,
503 {
504  for (unsigned int i = 0; i < n_entries; ++i)
505  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements; ++v)
506  out[i][v] = in[offsets[v] + i];
507 }
508 
509 
510 
549 template <typename Number>
550 inline void
551 vectorized_transpose_and_store(const bool add_into,
552  const unsigned int n_entries,
553  const VectorizedArray<Number> *in,
554  const unsigned int * offsets,
555  Number * out)
556 {
557  if (add_into)
558  for (unsigned int i = 0; i < n_entries; ++i)
559  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
560  ++v)
561  out[offsets[v] + i] += in[i][v];
562  else
563  for (unsigned int i = 0; i < n_entries; ++i)
564  for (unsigned int v = 0; v < VectorizedArray<Number>::n_array_elements;
565  ++v)
566  out[offsets[v] + i] = in[i][v];
567 }
568 
569 
570 
571 // for safety, also check that __AVX512F__ is defined in case the user manually
572 // set some conflicting compile flags which prevent compilation
573 
574 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
575 
579 template <>
580 class VectorizedArray<double>
581 {
582 public:
586  static const unsigned int n_array_elements = 8;
587 
591  DEAL_II_ALWAYS_INLINE
592  VectorizedArray &
593  operator=(const double x)
594  {
595  data = _mm512_set1_pd(x);
596  return *this;
597  }
598 
602  DEAL_II_ALWAYS_INLINE
603  double &operator[](const unsigned int comp)
604  {
605  AssertIndexRange(comp, 8);
606  return *(reinterpret_cast<double *>(&data) + comp);
607  }
608 
612  DEAL_II_ALWAYS_INLINE
613  const double &operator[](const unsigned int comp) const
614  {
615  AssertIndexRange(comp, 8);
616  return *(reinterpret_cast<const double *>(&data) + comp);
617  }
618 
622  DEAL_II_ALWAYS_INLINE
623  VectorizedArray &
624  operator+=(const VectorizedArray &vec)
625  {
626  // if the compiler supports vector arithmetics, we can simply use +=
627  // operator on the given data type. this allows the compiler to combine
628  // additions with multiplication (fused multiply-add) if those
629  // instructions are available. Otherwise, we need to use the built-in
630  // intrinsic command for __m512d
631 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
632  data += vec.data;
633 # else
634  data = _mm512_add_pd(data, vec.data);
635 # endif
636  return *this;
637  }
638 
642  DEAL_II_ALWAYS_INLINE
643  VectorizedArray &
644  operator-=(const VectorizedArray &vec)
645  {
646 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
647  data -= vec.data;
648 # else
649  data = _mm512_sub_pd(data, vec.data);
650 # endif
651  return *this;
652  }
656  DEAL_II_ALWAYS_INLINE
657  VectorizedArray &
658  operator*=(const VectorizedArray &vec)
659  {
660 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
661  data *= vec.data;
662 # else
663  data = _mm512_mul_pd(data, vec.data);
664 # endif
665  return *this;
666  }
667 
671  DEAL_II_ALWAYS_INLINE
672  VectorizedArray &
673  operator/=(const VectorizedArray &vec)
674  {
675 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
676  data /= vec.data;
677 # else
678  data = _mm512_div_pd(data, vec.data);
679 # endif
680  return *this;
681  }
682 
688  DEAL_II_ALWAYS_INLINE
689  void
690  load(const double *ptr)
691  {
692  data = _mm512_loadu_pd(ptr);
693  }
694 
701  DEAL_II_ALWAYS_INLINE
702  void
703  store(double *ptr) const
704  {
705  _mm512_storeu_pd(ptr, data);
706  }
707 
711  DEAL_II_ALWAYS_INLINE
712  void
713  streaming_store(double *ptr) const
714  {
715  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
716  ExcMessage("Memory not aligned"));
717  _mm512_stream_pd(ptr, data);
718  }
719 
732  DEAL_II_ALWAYS_INLINE
733  void
734  gather(const double *base_ptr, const unsigned int *offsets)
735  {
736  // unfortunately, there does not appear to be a 256 bit integer load, so
737  // do it by some reinterpret casts here. this is allowed because the Intel
738  // API allows aliasing between different vector types.
739  const __m256 index_val =
740  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
741  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
742  data = _mm512_i32gather_pd(index, base_ptr, 8);
743  }
744 
757  DEAL_II_ALWAYS_INLINE
758  void
759  scatter(const unsigned int *offsets, double *base_ptr) const
760  {
761  for (unsigned int i = 0; i < 8; ++i)
762  for (unsigned int j = i + 1; j < 8; ++j)
763  Assert(offsets[i] != offsets[j],
764  ExcMessage("Result of scatter undefined if two offset elements"
765  " point to the same position"));
766 
767  // unfortunately, there does not appear to be a 256 bit integer load, so
768  // do it by some reinterpret casts here. this is allowed because the Intel
769  // API allows aliasing between different vector types.
770  const __m256 index_val =
771  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
772  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
773  _mm512_i32scatter_pd(base_ptr, index, data, 8);
774  }
775 
780  __m512d data;
781 
782 private:
787  DEAL_II_ALWAYS_INLINE
788  VectorizedArray
789  get_sqrt() const
790  {
791  VectorizedArray res;
792  res.data = _mm512_sqrt_pd(data);
793  return res;
794  }
795 
800  DEAL_II_ALWAYS_INLINE
801  VectorizedArray
802  get_abs() const
803  {
804  // to compute the absolute value, perform bitwise andnot with -0. This
805  // will leave all value and exponent bits unchanged but force the sign
806  // value to +. Since there is no andnot for AVX512, we interpret the data
807  // as 64 bit integers and do the andnot on those types (note that andnot
808  // is a bitwise operation so the data type does not matter)
809  __m512d mask = _mm512_set1_pd(-0.);
810  VectorizedArray res;
811  res.data = reinterpret_cast<__m512d>(
812  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
813  reinterpret_cast<__m512i>(data)));
814  return res;
815  }
816 
821  DEAL_II_ALWAYS_INLINE
822  VectorizedArray
823  get_max(const VectorizedArray &other) const
824  {
825  VectorizedArray res;
826  res.data = _mm512_max_pd(data, other.data);
827  return res;
828  }
829 
834  DEAL_II_ALWAYS_INLINE
835  VectorizedArray
836  get_min(const VectorizedArray &other) const
837  {
838  VectorizedArray res;
839  res.data = _mm512_min_pd(data, other.data);
840  return res;
841  }
842 
846  template <typename Number2>
848  std::sqrt(const VectorizedArray<Number2> &);
849  template <typename Number2>
851  std::abs(const VectorizedArray<Number2> &);
852  template <typename Number2>
854  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
855  template <typename Number2>
857  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
858 };
859 
860 
861 
865 template <>
866 inline void
867 vectorized_load_and_transpose(const unsigned int n_entries,
868  const double * in,
869  const unsigned int * offsets,
871 {
872  const unsigned int n_chunks = n_entries / 4;
873  for (unsigned int outer = 0; outer < 8; outer += 4)
874  {
875  const double *in0 = in + offsets[0 + outer];
876  const double *in1 = in + offsets[1 + outer];
877  const double *in2 = in + offsets[2 + outer];
878  const double *in3 = in + offsets[3 + outer];
879 
880  for (unsigned int i = 0; i < n_chunks; ++i)
881  {
882  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
883  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
884  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
885  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
886  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
887  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
888  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
889  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
890  *reinterpret_cast<__m256d *>(
891  reinterpret_cast<double *>(&out[4 * i + 0].data) + outer) =
892  _mm256_unpacklo_pd(t0, t1);
893  *reinterpret_cast<__m256d *>(
894  reinterpret_cast<double *>(&out[4 * i + 1].data) + outer) =
895  _mm256_unpackhi_pd(t0, t1);
896  *reinterpret_cast<__m256d *>(
897  reinterpret_cast<double *>(&out[4 * i + 2].data) + outer) =
898  _mm256_unpacklo_pd(t2, t3);
899  *reinterpret_cast<__m256d *>(
900  reinterpret_cast<double *>(&out[4 * i + 3].data) + outer) =
901  _mm256_unpackhi_pd(t2, t3);
902  }
903  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
904  for (unsigned int v = 0; v < 4; ++v)
905  out[i][outer + v] = in[offsets[v + outer] + i];
906  }
907 }
908 
909 
910 
914 template <>
915 inline void
916 vectorized_transpose_and_store(const bool add_into,
917  const unsigned int n_entries,
918  const VectorizedArray<double> *in,
919  const unsigned int * offsets,
920  double * out)
921 {
922  const unsigned int n_chunks = n_entries / 4;
923  // do not do full transpose because the code is too long and will most
924  // likely not pay off. rather do the transposition on the vectorized array
925  // on size smaller, mm256d
926  for (unsigned int outer = 0; outer < 8; outer += 4)
927  {
928  double *out0 = out + offsets[0 + outer];
929  double *out1 = out + offsets[1 + outer];
930  double *out2 = out + offsets[2 + outer];
931  double *out3 = out + offsets[3 + outer];
932  for (unsigned int i = 0; i < n_chunks; ++i)
933  {
934  __m256d u0 = *reinterpret_cast<const __m256d *>(
935  reinterpret_cast<const double *>(&in[4 * i + 0].data) + outer);
936  __m256d u1 = *reinterpret_cast<const __m256d *>(
937  reinterpret_cast<const double *>(&in[4 * i + 1].data) + outer);
938  __m256d u2 = *reinterpret_cast<const __m256d *>(
939  reinterpret_cast<const double *>(&in[4 * i + 2].data) + outer);
940  __m256d u3 = *reinterpret_cast<const __m256d *>(
941  reinterpret_cast<const double *>(&in[4 * i + 3].data) + outer);
942  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
943  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
944  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
945  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
946  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
947  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
948  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
949  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
950 
951  // Cannot use the same store instructions in both paths of the 'if'
952  // because the compiler cannot know that there is no aliasing between
953  // pointers
954  if (add_into)
955  {
956  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
957  _mm256_storeu_pd(out0 + 4 * i, res0);
958  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
959  _mm256_storeu_pd(out1 + 4 * i, res1);
960  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
961  _mm256_storeu_pd(out2 + 4 * i, res2);
962  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
963  _mm256_storeu_pd(out3 + 4 * i, res3);
964  }
965  else
966  {
967  _mm256_storeu_pd(out0 + 4 * i, res0);
968  _mm256_storeu_pd(out1 + 4 * i, res1);
969  _mm256_storeu_pd(out2 + 4 * i, res2);
970  _mm256_storeu_pd(out3 + 4 * i, res3);
971  }
972  }
973  if (add_into)
974  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
975  for (unsigned int v = 0; v < 4; ++v)
976  out[offsets[v + outer] + i] += in[i][v + outer];
977  else
978  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
979  for (unsigned int v = 0; v < 4; ++v)
980  out[offsets[v + outer] + i] = in[i][v + outer];
981  }
982 }
983 
984 
985 
989 template <>
990 class VectorizedArray<float>
991 {
992 public:
996  static const unsigned int n_array_elements = 16;
997 
1001  DEAL_II_ALWAYS_INLINE
1002  VectorizedArray &
1003  operator=(const float x)
1004  {
1005  data = _mm512_set1_ps(x);
1006  return *this;
1007  }
1008 
1012  DEAL_II_ALWAYS_INLINE
1013  float &operator[](const unsigned int comp)
1014  {
1015  AssertIndexRange(comp, 16);
1016  return *(reinterpret_cast<float *>(&data) + comp);
1017  }
1018 
1022  DEAL_II_ALWAYS_INLINE
1023  const float &operator[](const unsigned int comp) const
1024  {
1025  AssertIndexRange(comp, 16);
1026  return *(reinterpret_cast<const float *>(&data) + comp);
1027  }
1028 
1032  DEAL_II_ALWAYS_INLINE
1033  VectorizedArray &
1034  operator+=(const VectorizedArray &vec)
1035  {
1036  // if the compiler supports vector arithmetics, we can simply use +=
1037  // operator on the given data type. this allows the compiler to combine
1038  // additions with multiplication (fused multiply-add) if those
1039  // instructions are available. Otherwise, we need to use the built-in
1040  // intrinsic command for __m512d
1041 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1042  data += vec.data;
1043 # else
1044  data = _mm512_add_ps(data, vec.data);
1045 # endif
1046  return *this;
1047  }
1048 
1052  DEAL_II_ALWAYS_INLINE
1053  VectorizedArray &
1054  operator-=(const VectorizedArray &vec)
1055  {
1056 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1057  data -= vec.data;
1058 # else
1059  data = _mm512_sub_ps(data, vec.data);
1060 # endif
1061  return *this;
1062  }
1066  DEAL_II_ALWAYS_INLINE
1067  VectorizedArray &
1068  operator*=(const VectorizedArray &vec)
1069  {
1070 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1071  data *= vec.data;
1072 # else
1073  data = _mm512_mul_ps(data, vec.data);
1074 # endif
1075  return *this;
1076  }
1077 
1081  DEAL_II_ALWAYS_INLINE
1082  VectorizedArray &
1083  operator/=(const VectorizedArray &vec)
1084  {
1085 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1086  data /= vec.data;
1087 # else
1088  data = _mm512_div_ps(data, vec.data);
1089 # endif
1090  return *this;
1091  }
1092 
1098  DEAL_II_ALWAYS_INLINE
1099  void
1100  load(const float *ptr)
1101  {
1102  data = _mm512_loadu_ps(ptr);
1103  }
1104 
1111  DEAL_II_ALWAYS_INLINE
1112  void
1113  store(float *ptr) const
1114  {
1115  _mm512_storeu_ps(ptr, data);
1116  }
1117 
1121  DEAL_II_ALWAYS_INLINE
1122  void
1123  streaming_store(float *ptr) const
1124  {
1125  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1126  ExcMessage("Memory not aligned"));
1127  _mm512_stream_ps(ptr, data);
1128  }
1129 
1142  DEAL_II_ALWAYS_INLINE
1143  void
1144  gather(const float *base_ptr, const unsigned int *offsets)
1145  {
1146  // unfortunately, there does not appear to be a 512 bit integer load, so
1147  // do it by some reinterpret casts here. this is allowed because the Intel
1148  // API allows aliasing between different vector types.
1149  const __m512 index_val =
1150  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1151  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1152  data = _mm512_i32gather_ps(index, base_ptr, 4);
1153  }
1154 
1167  DEAL_II_ALWAYS_INLINE
1168  void
1169  scatter(const unsigned int *offsets, float *base_ptr) const
1170  {
1171  for (unsigned int i = 0; i < 16; ++i)
1172  for (unsigned int j = i + 1; j < 16; ++j)
1173  Assert(offsets[i] != offsets[j],
1174  ExcMessage("Result of scatter undefined if two offset elements"
1175  " point to the same position"));
1176 
1177  // unfortunately, there does not appear to be a 512 bit integer load, so
1178  // do it by some reinterpret casts here. this is allowed because the Intel
1179  // API allows aliasing between different vector types.
1180  const __m512 index_val =
1181  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1182  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1183  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1184  }
1185 
1190  __m512 data;
1191 
1192 private:
1197  DEAL_II_ALWAYS_INLINE
1198  VectorizedArray
1199  get_sqrt() const
1200  {
1201  VectorizedArray res;
1202  res.data = _mm512_sqrt_ps(data);
1203  return res;
1204  }
1205 
1210  DEAL_II_ALWAYS_INLINE
1211  VectorizedArray
1212  get_abs() const
1213  {
1214  // to compute the absolute value, perform bitwise andnot with -0. This
1215  // will leave all value and exponent bits unchanged but force the sign
1216  // value to +. Since there is no andnot for AVX512, we interpret the data
1217  // as 32 bit integers and do the andnot on those types (note that andnot
1218  // is a bitwise operation so the data type does not matter)
1219  __m512 mask = _mm512_set1_ps(-0.f);
1220  VectorizedArray res;
1221  res.data = reinterpret_cast<__m512>(
1222  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1223  reinterpret_cast<__m512i>(data)));
1224  return res;
1225  }
1226 
1231  DEAL_II_ALWAYS_INLINE
1232  VectorizedArray
1233  get_max(const VectorizedArray &other) const
1234  {
1235  VectorizedArray res;
1236  res.data = _mm512_max_ps(data, other.data);
1237  return res;
1238  }
1239 
1244  DEAL_II_ALWAYS_INLINE
1245  VectorizedArray
1246  get_min(const VectorizedArray &other) const
1247  {
1248  VectorizedArray res;
1249  res.data = _mm512_min_ps(data, other.data);
1250  return res;
1251  }
1252 
1256  template <typename Number2>
1258  std::sqrt(const VectorizedArray<Number2> &);
1259  template <typename Number2>
1261  std::abs(const VectorizedArray<Number2> &);
1262  template <typename Number2>
1264  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1265  template <typename Number2>
1267  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1268 };
1269 
1270 
1271 
1275 template <>
1276 inline void
1277 vectorized_load_and_transpose(const unsigned int n_entries,
1278  const float * in,
1279  const unsigned int * offsets,
1281 {
1282  const unsigned int n_chunks = n_entries / 4;
1283  for (unsigned int outer = 0; outer < 16; outer += 8)
1284  {
1285  for (unsigned int i = 0; i < n_chunks; ++i)
1286  {
1287  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0 + outer]);
1288  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1 + outer]);
1289  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2 + outer]);
1290  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3 + outer]);
1291  __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4 + outer]);
1292  __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5 + outer]);
1293  __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6 + outer]);
1294  __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7 + outer]);
1295  // To avoid warnings about uninitialized variables, need to initialize
1296  // one variable with zero before using it.
1297  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1298  t0 = _mm256_insertf128_ps(t3, u0, 0);
1299  t0 = _mm256_insertf128_ps(t0, u4, 1);
1300  t1 = _mm256_insertf128_ps(t3, u1, 0);
1301  t1 = _mm256_insertf128_ps(t1, u5, 1);
1302  t2 = _mm256_insertf128_ps(t3, u2, 0);
1303  t2 = _mm256_insertf128_ps(t2, u6, 1);
1304  t3 = _mm256_insertf128_ps(t3, u3, 0);
1305  t3 = _mm256_insertf128_ps(t3, u7, 1);
1306  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
1307  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
1308  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
1309  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
1310  *reinterpret_cast<__m256 *>(
1311  reinterpret_cast<float *>(&out[4 * i + 0].data) + outer) =
1312  _mm256_shuffle_ps(v0, v2, 0x88);
1313  *reinterpret_cast<__m256 *>(
1314  reinterpret_cast<float *>(&out[4 * i + 1].data) + outer) =
1315  _mm256_shuffle_ps(v0, v2, 0xdd);
1316  *reinterpret_cast<__m256 *>(
1317  reinterpret_cast<float *>(&out[4 * i + 2].data) + outer) =
1318  _mm256_shuffle_ps(v1, v3, 0x88);
1319  *reinterpret_cast<__m256 *>(
1320  reinterpret_cast<float *>(&out[4 * i + 3].data) + outer) =
1321  _mm256_shuffle_ps(v1, v3, 0xdd);
1322  }
1323  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1324  for (unsigned int v = 0; v < 8; ++v)
1325  out[i][v + outer] = in[offsets[v + outer] + i];
1326  }
1327 }
1328 
1329 
1330 
1334 template <>
1335 inline void
1336 vectorized_transpose_and_store(const bool add_into,
1337  const unsigned int n_entries,
1338  const VectorizedArray<float> *in,
1339  const unsigned int * offsets,
1340  float * out)
1341 {
1342  const unsigned int n_chunks = n_entries / 4;
1343  for (unsigned int outer = 0; outer < 16; outer += 8)
1344  {
1345  for (unsigned int i = 0; i < n_chunks; ++i)
1346  {
1347  __m256 u0 = *reinterpret_cast<const __m256 *>(
1348  reinterpret_cast<const float *>(&in[4 * i + 0].data) + outer);
1349  __m256 u1 = *reinterpret_cast<const __m256 *>(
1350  reinterpret_cast<const float *>(&in[4 * i + 1].data) + outer);
1351  __m256 u2 = *reinterpret_cast<const __m256 *>(
1352  reinterpret_cast<const float *>(&in[4 * i + 2].data) + outer);
1353  __m256 u3 = *reinterpret_cast<const __m256 *>(
1354  reinterpret_cast<const float *>(&in[4 * i + 3].data) + outer);
1355  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
1356  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
1357  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
1358  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
1359  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
1360  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
1361  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
1362  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
1363  __m128 res0 = _mm256_extractf128_ps(u0, 0);
1364  __m128 res4 = _mm256_extractf128_ps(u0, 1);
1365  __m128 res1 = _mm256_extractf128_ps(u1, 0);
1366  __m128 res5 = _mm256_extractf128_ps(u1, 1);
1367  __m128 res2 = _mm256_extractf128_ps(u2, 0);
1368  __m128 res6 = _mm256_extractf128_ps(u2, 1);
1369  __m128 res3 = _mm256_extractf128_ps(u3, 0);
1370  __m128 res7 = _mm256_extractf128_ps(u3, 1);
1371 
1372  // Cannot use the same store instructions in both paths of the 'if'
1373  // because the compiler cannot know that there is no aliasing between
1374  // pointers
1375  if (add_into)
1376  {
1377  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0 + outer]),
1378  res0);
1379  _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1380  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1 + outer]),
1381  res1);
1382  _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1383  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2 + outer]),
1384  res2);
1385  _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1386  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3 + outer]),
1387  res3);
1388  _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1389  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4 + outer]),
1390  res4);
1391  _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1392  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5 + outer]),
1393  res5);
1394  _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1395  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6 + outer]),
1396  res6);
1397  _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1398  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7 + outer]),
1399  res7);
1400  _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1401  }
1402  else
1403  {
1404  _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
1405  _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
1406  _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
1407  _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
1408  _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
1409  _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
1410  _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
1411  _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
1412  }
1413  }
1414  if (add_into)
1415  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1416  for (unsigned int v = 0; v < 8; ++v)
1417  out[offsets[v + outer] + i] += in[i][v + outer];
1418  else
1419  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1420  for (unsigned int v = 0; v < 8; ++v)
1421  out[offsets[v + outer] + i] = in[i][v + outer];
1422  }
1423 }
1424 
1425 
1426 
1427 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1428 
1432 template <>
1433 class VectorizedArray<double>
1434 {
1435 public:
1439  static const unsigned int n_array_elements = 4;
1440 
1444  DEAL_II_ALWAYS_INLINE
1445  VectorizedArray &
1446  operator=(const double x)
1447  {
1448  data = _mm256_set1_pd(x);
1449  return *this;
1450  }
1451 
1455  DEAL_II_ALWAYS_INLINE
1456  double &operator[](const unsigned int comp)
1457  {
1458  AssertIndexRange(comp, 4);
1459  return *(reinterpret_cast<double *>(&data) + comp);
1460  }
1461 
1465  DEAL_II_ALWAYS_INLINE
1466  const double &operator[](const unsigned int comp) const
1467  {
1468  AssertIndexRange(comp, 4);
1469  return *(reinterpret_cast<const double *>(&data) + comp);
1470  }
1471 
1475  DEAL_II_ALWAYS_INLINE
1476  VectorizedArray &
1477  operator+=(const VectorizedArray &vec)
1478  {
1479  // if the compiler supports vector arithmetics, we can simply use +=
1480  // operator on the given data type. this allows the compiler to combine
1481  // additions with multiplication (fused multiply-add) if those
1482  // instructions are available. Otherwise, we need to use the built-in
1483  // intrinsic command for __m256d
1484 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1485  data += vec.data;
1486 # else
1487  data = _mm256_add_pd(data, vec.data);
1488 # endif
1489  return *this;
1490  }
1491 
1495  DEAL_II_ALWAYS_INLINE
1496  VectorizedArray &
1497  operator-=(const VectorizedArray &vec)
1498  {
1499 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1500  data -= vec.data;
1501 # else
1502  data = _mm256_sub_pd(data, vec.data);
1503 # endif
1504  return *this;
1505  }
1509  DEAL_II_ALWAYS_INLINE
1510  VectorizedArray &
1511  operator*=(const VectorizedArray &vec)
1512  {
1513 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1514  data *= vec.data;
1515 # else
1516  data = _mm256_mul_pd(data, vec.data);
1517 # endif
1518  return *this;
1519  }
1520 
1524  DEAL_II_ALWAYS_INLINE
1525  VectorizedArray &
1526  operator/=(const VectorizedArray &vec)
1527  {
1528 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1529  data /= vec.data;
1530 # else
1531  data = _mm256_div_pd(data, vec.data);
1532 # endif
1533  return *this;
1534  }
1535 
1541  DEAL_II_ALWAYS_INLINE
1542  void
1543  load(const double *ptr)
1544  {
1545  data = _mm256_loadu_pd(ptr);
1546  }
1547 
1554  DEAL_II_ALWAYS_INLINE
1555  void
1556  store(double *ptr) const
1557  {
1558  _mm256_storeu_pd(ptr, data);
1559  }
1560 
1564  DEAL_II_ALWAYS_INLINE
1565  void
1566  streaming_store(double *ptr) const
1567  {
1568  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1569  ExcMessage("Memory not aligned"));
1570  _mm256_stream_pd(ptr, data);
1571  }
1572 
1585  DEAL_II_ALWAYS_INLINE
1586  void
1587  gather(const double *base_ptr, const unsigned int *offsets)
1588  {
1589 # ifdef __AVX2__
1590  // unfortunately, there does not appear to be a 128 bit integer load, so
1591  // do it by some reinterpret casts here. this is allowed because the Intel
1592  // API allows aliasing between different vector types.
1593  const __m128 index_val =
1594  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1595  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
1596  data = _mm256_i32gather_pd(base_ptr, index, 8);
1597 # else
1598  for (unsigned int i = 0; i < 4; ++i)
1599  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1600 # endif
1601  }
1602 
1615  DEAL_II_ALWAYS_INLINE
1616  void
1617  scatter(const unsigned int *offsets, double *base_ptr) const
1618  {
1619  // no scatter operation in AVX/AVX2
1620  for (unsigned int i = 0; i < 4; ++i)
1621  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1622  }
1623 
1628  __m256d data;
1629 
1630 private:
1635  DEAL_II_ALWAYS_INLINE
1636  VectorizedArray
1637  get_sqrt() const
1638  {
1639  VectorizedArray res;
1640  res.data = _mm256_sqrt_pd(data);
1641  return res;
1642  }
1643 
1648  DEAL_II_ALWAYS_INLINE
1649  VectorizedArray
1650  get_abs() const
1651  {
1652  // to compute the absolute value, perform bitwise andnot with -0. This
1653  // will leave all value and exponent bits unchanged but force the sign
1654  // value to +.
1655  __m256d mask = _mm256_set1_pd(-0.);
1656  VectorizedArray res;
1657  res.data = _mm256_andnot_pd(mask, data);
1658  return res;
1659  }
1660 
1665  DEAL_II_ALWAYS_INLINE
1666  VectorizedArray
1667  get_max(const VectorizedArray &other) const
1668  {
1669  VectorizedArray res;
1670  res.data = _mm256_max_pd(data, other.data);
1671  return res;
1672  }
1673 
1678  DEAL_II_ALWAYS_INLINE
1679  VectorizedArray
1680  get_min(const VectorizedArray &other) const
1681  {
1682  VectorizedArray res;
1683  res.data = _mm256_min_pd(data, other.data);
1684  return res;
1685  }
1686 
1690  template <typename Number2>
1692  std::sqrt(const VectorizedArray<Number2> &);
1693  template <typename Number2>
1695  std::abs(const VectorizedArray<Number2> &);
1696  template <typename Number2>
1698  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1699  template <typename Number2>
1701  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1702 };
1703 
1704 
1705 
1709 template <>
1710 inline void
1711 vectorized_load_and_transpose(const unsigned int n_entries,
1712  const double * in,
1713  const unsigned int * offsets,
1715 {
1716  const unsigned int n_chunks = n_entries / 4;
1717  const double * in0 = in + offsets[0];
1718  const double * in1 = in + offsets[1];
1719  const double * in2 = in + offsets[2];
1720  const double * in3 = in + offsets[3];
1721 
1722  for (unsigned int i = 0; i < n_chunks; ++i)
1723  {
1724  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
1725  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
1726  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
1727  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
1728  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1729  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1730  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1731  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1732  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
1733  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
1734  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
1735  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
1736  }
1737  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1738  for (unsigned int v = 0; v < 4; ++v)
1739  out[i][v] = in[offsets[v] + i];
1740 }
1741 
1742 
1743 
1747 template <>
1748 inline void
1749 vectorized_transpose_and_store(const bool add_into,
1750  const unsigned int n_entries,
1751  const VectorizedArray<double> *in,
1752  const unsigned int * offsets,
1753  double * out)
1754 {
1755  const unsigned int n_chunks = n_entries / 4;
1756  double * out0 = out + offsets[0];
1757  double * out1 = out + offsets[1];
1758  double * out2 = out + offsets[2];
1759  double * out3 = out + offsets[3];
1760  for (unsigned int i = 0; i < n_chunks; ++i)
1761  {
1762  __m256d u0 = in[4 * i + 0].data;
1763  __m256d u1 = in[4 * i + 1].data;
1764  __m256d u2 = in[4 * i + 2].data;
1765  __m256d u3 = in[4 * i + 3].data;
1766  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1767  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1768  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1769  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1770  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
1771  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
1772  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
1773  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
1774 
1775  // Cannot use the same store instructions in both paths of the 'if'
1776  // because the compiler cannot know that there is no aliasing between
1777  // pointers
1778  if (add_into)
1779  {
1780  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
1781  _mm256_storeu_pd(out0 + 4 * i, res0);
1782  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
1783  _mm256_storeu_pd(out1 + 4 * i, res1);
1784  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
1785  _mm256_storeu_pd(out2 + 4 * i, res2);
1786  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
1787  _mm256_storeu_pd(out3 + 4 * i, res3);
1788  }
1789  else
1790  {
1791  _mm256_storeu_pd(out0 + 4 * i, res0);
1792  _mm256_storeu_pd(out1 + 4 * i, res1);
1793  _mm256_storeu_pd(out2 + 4 * i, res2);
1794  _mm256_storeu_pd(out3 + 4 * i, res3);
1795  }
1796  }
1797  if (add_into)
1798  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1799  for (unsigned int v = 0; v < 4; ++v)
1800  out[offsets[v] + i] += in[i][v];
1801  else
1802  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1803  for (unsigned int v = 0; v < 4; ++v)
1804  out[offsets[v] + i] = in[i][v];
1805 }
1806 
1807 
1808 
1812 template <>
1813 class VectorizedArray<float>
1814 {
1815 public:
1819  static const unsigned int n_array_elements = 8;
1820 
1824  DEAL_II_ALWAYS_INLINE
1825  VectorizedArray &
1826  operator=(const float x)
1827  {
1828  data = _mm256_set1_ps(x);
1829  return *this;
1830  }
1831 
1835  DEAL_II_ALWAYS_INLINE
1836  float &operator[](const unsigned int comp)
1837  {
1838  AssertIndexRange(comp, 8);
1839  return *(reinterpret_cast<float *>(&data) + comp);
1840  }
1841 
1845  DEAL_II_ALWAYS_INLINE
1846  const float &operator[](const unsigned int comp) const
1847  {
1848  AssertIndexRange(comp, 8);
1849  return *(reinterpret_cast<const float *>(&data) + comp);
1850  }
1851 
1855  DEAL_II_ALWAYS_INLINE
1856  VectorizedArray &
1857  operator+=(const VectorizedArray &vec)
1858  {
1859  // if the compiler supports vector arithmetics, we can simply use +=
1860  // operator on the given data type. this allows the compiler to combine
1861  // additions with multiplication (fused multiply-add) if those
1862  // instructions are available. Otherwise, we need to use the built-in
1863  // intrinsic command for __m256d
1864 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1865  data += vec.data;
1866 # else
1867  data = _mm256_add_ps(data, vec.data);
1868 # endif
1869  return *this;
1870  }
1871 
1875  DEAL_II_ALWAYS_INLINE
1876  VectorizedArray &
1877  operator-=(const VectorizedArray &vec)
1878  {
1879 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1880  data -= vec.data;
1881 # else
1882  data = _mm256_sub_ps(data, vec.data);
1883 # endif
1884  return *this;
1885  }
1889  DEAL_II_ALWAYS_INLINE
1890  VectorizedArray &
1891  operator*=(const VectorizedArray &vec)
1892  {
1893 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1894  data *= vec.data;
1895 # else
1896  data = _mm256_mul_ps(data, vec.data);
1897 # endif
1898  return *this;
1899  }
1900 
1904  DEAL_II_ALWAYS_INLINE
1905  VectorizedArray &
1906  operator/=(const VectorizedArray &vec)
1907  {
1908 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1909  data /= vec.data;
1910 # else
1911  data = _mm256_div_ps(data, vec.data);
1912 # endif
1913  return *this;
1914  }
1915 
1921  DEAL_II_ALWAYS_INLINE
1922  void
1923  load(const float *ptr)
1924  {
1925  data = _mm256_loadu_ps(ptr);
1926  }
1927 
1934  DEAL_II_ALWAYS_INLINE
1935  void
1936  store(float *ptr) const
1937  {
1938  _mm256_storeu_ps(ptr, data);
1939  }
1940 
1944  DEAL_II_ALWAYS_INLINE
1945  void
1946  streaming_store(float *ptr) const
1947  {
1948  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1949  ExcMessage("Memory not aligned"));
1950  _mm256_stream_ps(ptr, data);
1951  }
1952 
1965  DEAL_II_ALWAYS_INLINE
1966  void
1967  gather(const float *base_ptr, const unsigned int *offsets)
1968  {
1969 # ifdef __AVX2__
1970  // unfortunately, there does not appear to be a 256 bit integer load, so
1971  // do it by some reinterpret casts here. this is allowed because the Intel
1972  // API allows aliasing between different vector types.
1973  const __m256 index_val =
1974  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1975  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1976  data = _mm256_i32gather_ps(base_ptr, index, 4);
1977 # else
1978  for (unsigned int i = 0; i < 8; ++i)
1979  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1980 # endif
1981  }
1982 
1995  DEAL_II_ALWAYS_INLINE
1996  void
1997  scatter(const unsigned int *offsets, float *base_ptr) const
1998  {
1999  // no scatter operation in AVX/AVX2
2000  for (unsigned int i = 0; i < 8; ++i)
2001  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2002  }
2003 
2008  __m256 data;
2009 
2010 private:
2015  DEAL_II_ALWAYS_INLINE
2016  VectorizedArray
2017  get_sqrt() const
2018  {
2019  VectorizedArray res;
2020  res.data = _mm256_sqrt_ps(data);
2021  return res;
2022  }
2023 
2028  DEAL_II_ALWAYS_INLINE
2029  VectorizedArray
2030  get_abs() const
2031  {
2032  // to compute the absolute value, perform bitwise andnot with -0. This
2033  // will leave all value and exponent bits unchanged but force the sign
2034  // value to +.
2035  __m256 mask = _mm256_set1_ps(-0.f);
2036  VectorizedArray res;
2037  res.data = _mm256_andnot_ps(mask, data);
2038  return res;
2039  }
2040 
2045  DEAL_II_ALWAYS_INLINE
2046  VectorizedArray
2047  get_max(const VectorizedArray &other) const
2048  {
2049  VectorizedArray res;
2050  res.data = _mm256_max_ps(data, other.data);
2051  return res;
2052  }
2053 
2058  DEAL_II_ALWAYS_INLINE
2059  VectorizedArray
2060  get_min(const VectorizedArray &other) const
2061  {
2062  VectorizedArray res;
2063  res.data = _mm256_min_ps(data, other.data);
2064  return res;
2065  }
2066 
2070  template <typename Number2>
2072  std::sqrt(const VectorizedArray<Number2> &);
2073  template <typename Number2>
2075  std::abs(const VectorizedArray<Number2> &);
2076  template <typename Number2>
2078  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2079  template <typename Number2>
2081  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2082 };
2083 
2084 
2085 
2089 template <>
2090 inline void
2091 vectorized_load_and_transpose(const unsigned int n_entries,
2092  const float * in,
2093  const unsigned int * offsets,
2095 {
2096  const unsigned int n_chunks = n_entries / 4;
2097  for (unsigned int i = 0; i < n_chunks; ++i)
2098  {
2099  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2100  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2101  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2102  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2103  __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4]);
2104  __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5]);
2105  __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6]);
2106  __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7]);
2107  // To avoid warnings about uninitialized variables, need to initialize
2108  // one variable with zero before using it.
2109  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
2110  t0 = _mm256_insertf128_ps(t3, u0, 0);
2111  t0 = _mm256_insertf128_ps(t0, u4, 1);
2112  t1 = _mm256_insertf128_ps(t3, u1, 0);
2113  t1 = _mm256_insertf128_ps(t1, u5, 1);
2114  t2 = _mm256_insertf128_ps(t3, u2, 0);
2115  t2 = _mm256_insertf128_ps(t2, u6, 1);
2116  t3 = _mm256_insertf128_ps(t3, u3, 0);
2117  t3 = _mm256_insertf128_ps(t3, u7, 1);
2118  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2119  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2120  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2121  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2122  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2123  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2124  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2125  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2126  }
2127  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2128  for (unsigned int v = 0; v < 8; ++v)
2129  out[i][v] = in[offsets[v] + i];
2130 }
2131 
2132 
2133 
2137 template <>
2138 inline void
2139 vectorized_transpose_and_store(const bool add_into,
2140  const unsigned int n_entries,
2141  const VectorizedArray<float> *in,
2142  const unsigned int * offsets,
2143  float * out)
2144 {
2145  const unsigned int n_chunks = n_entries / 4;
2146  for (unsigned int i = 0; i < n_chunks; ++i)
2147  {
2148  __m256 u0 = in[4 * i + 0].data;
2149  __m256 u1 = in[4 * i + 1].data;
2150  __m256 u2 = in[4 * i + 2].data;
2151  __m256 u3 = in[4 * i + 3].data;
2152  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2153  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2154  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2155  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2156  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2157  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2158  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2159  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2160  __m128 res0 = _mm256_extractf128_ps(u0, 0);
2161  __m128 res4 = _mm256_extractf128_ps(u0, 1);
2162  __m128 res1 = _mm256_extractf128_ps(u1, 0);
2163  __m128 res5 = _mm256_extractf128_ps(u1, 1);
2164  __m128 res2 = _mm256_extractf128_ps(u2, 0);
2165  __m128 res6 = _mm256_extractf128_ps(u2, 1);
2166  __m128 res3 = _mm256_extractf128_ps(u3, 0);
2167  __m128 res7 = _mm256_extractf128_ps(u3, 1);
2168 
2169  // Cannot use the same store instructions in both paths of the 'if'
2170  // because the compiler cannot know that there is no aliasing between
2171  // pointers
2172  if (add_into)
2173  {
2174  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2175  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2176  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2177  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2178  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2179  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2180  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2181  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2182  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2183  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2184  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2185  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2186  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2187  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2188  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2189  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2190  }
2191  else
2192  {
2193  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2194  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2195  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2196  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2197  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2198  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2199  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2200  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2201  }
2202  }
2203  if (add_into)
2204  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2205  for (unsigned int v = 0; v < 8; ++v)
2206  out[offsets[v] + i] += in[i][v];
2207  else
2208  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2209  for (unsigned int v = 0; v < 8; ++v)
2210  out[offsets[v] + i] = in[i][v];
2211 }
2212 
2213 
2214 
2215 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2216 
2220 template <>
2221 class VectorizedArray<double>
2222 {
2223 public:
2227  static const unsigned int n_array_elements = 2;
2228 
2232  DEAL_II_ALWAYS_INLINE
2233  VectorizedArray &
2234  operator=(const double x)
2235  {
2236  data = _mm_set1_pd(x);
2237  return *this;
2238  }
2239 
2243  DEAL_II_ALWAYS_INLINE
2244  double &operator[](const unsigned int comp)
2245  {
2246  AssertIndexRange(comp, 2);
2247  return *(reinterpret_cast<double *>(&data) + comp);
2248  }
2249 
2253  DEAL_II_ALWAYS_INLINE
2254  const double &operator[](const unsigned int comp) const
2255  {
2256  AssertIndexRange(comp, 2);
2257  return *(reinterpret_cast<const double *>(&data) + comp);
2258  }
2259 
2263  DEAL_II_ALWAYS_INLINE
2264  VectorizedArray &
2265  operator+=(const VectorizedArray &vec)
2266  {
2267 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2268  data += vec.data;
2269 # else
2270  data = _mm_add_pd(data, vec.data);
2271 # endif
2272  return *this;
2273  }
2274 
2278  DEAL_II_ALWAYS_INLINE
2279  VectorizedArray &
2280  operator-=(const VectorizedArray &vec)
2281  {
2282 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2283  data -= vec.data;
2284 # else
2285  data = _mm_sub_pd(data, vec.data);
2286 # endif
2287  return *this;
2288  }
2289 
2293  DEAL_II_ALWAYS_INLINE
2294  VectorizedArray &
2295  operator*=(const VectorizedArray &vec)
2296  {
2297 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2298  data *= vec.data;
2299 # else
2300  data = _mm_mul_pd(data, vec.data);
2301 # endif
2302  return *this;
2303  }
2304 
2308  DEAL_II_ALWAYS_INLINE
2309  VectorizedArray &
2310  operator/=(const VectorizedArray &vec)
2311  {
2312 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2313  data /= vec.data;
2314 # else
2315  data = _mm_div_pd(data, vec.data);
2316 # endif
2317  return *this;
2318  }
2319 
2325  DEAL_II_ALWAYS_INLINE
2326  void
2327  load(const double *ptr)
2328  {
2329  data = _mm_loadu_pd(ptr);
2330  }
2331 
2338  DEAL_II_ALWAYS_INLINE
2339  void
2340  store(double *ptr) const
2341  {
2342  _mm_storeu_pd(ptr, data);
2343  }
2344 
2348  DEAL_II_ALWAYS_INLINE
2349  void
2350  streaming_store(double *ptr) const
2351  {
2352  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2353  ExcMessage("Memory not aligned"));
2354  _mm_stream_pd(ptr, data);
2355  }
2356 
2369  DEAL_II_ALWAYS_INLINE
2370  void
2371  gather(const double *base_ptr, const unsigned int *offsets)
2372  {
2373  for (unsigned int i = 0; i < 2; ++i)
2374  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2375  }
2376 
2389  DEAL_II_ALWAYS_INLINE
2390  void
2391  scatter(const unsigned int *offsets, double *base_ptr) const
2392  {
2393  for (unsigned int i = 0; i < 2; ++i)
2394  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2395  }
2396 
2401  __m128d data;
2402 
2403 private:
2408  DEAL_II_ALWAYS_INLINE
2409  VectorizedArray
2410  get_sqrt() const
2411  {
2412  VectorizedArray res;
2413  res.data = _mm_sqrt_pd(data);
2414  return res;
2415  }
2416 
2421  DEAL_II_ALWAYS_INLINE
2422  VectorizedArray
2423  get_abs() const
2424  {
2425  // to compute the absolute value, perform
2426  // bitwise andnot with -0. This will leave all
2427  // value and exponent bits unchanged but force
2428  // the sign value to +.
2429  __m128d mask = _mm_set1_pd(-0.);
2430  VectorizedArray res;
2431  res.data = _mm_andnot_pd(mask, data);
2432  return res;
2433  }
2434 
2439  DEAL_II_ALWAYS_INLINE
2440  VectorizedArray
2441  get_max(const VectorizedArray &other) const
2442  {
2443  VectorizedArray res;
2444  res.data = _mm_max_pd(data, other.data);
2445  return res;
2446  }
2447 
2452  DEAL_II_ALWAYS_INLINE
2453  VectorizedArray
2454  get_min(const VectorizedArray &other) const
2455  {
2456  VectorizedArray res;
2457  res.data = _mm_min_pd(data, other.data);
2458  return res;
2459  }
2460 
2464  template <typename Number2>
2466  std::sqrt(const VectorizedArray<Number2> &);
2467  template <typename Number2>
2469  std::abs(const VectorizedArray<Number2> &);
2470  template <typename Number2>
2472  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2473  template <typename Number2>
2475  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2476 };
2477 
2478 
2479 
2483 template <>
2484 inline void
2485 vectorized_load_and_transpose(const unsigned int n_entries,
2486  const double * in,
2487  const unsigned int * offsets,
2489 {
2490  const unsigned int n_chunks = n_entries / 2;
2491  for (unsigned int i = 0; i < n_chunks; ++i)
2492  {
2493  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2494  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2495  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
2496  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
2497  }
2498  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2499  for (unsigned int v = 0; v < 2; ++v)
2500  out[i][v] = in[offsets[v] + i];
2501 }
2502 
2503 
2504 
2508 template <>
2509 inline void
2510 vectorized_transpose_and_store(const bool add_into,
2511  const unsigned int n_entries,
2512  const VectorizedArray<double> *in,
2513  const unsigned int * offsets,
2514  double * out)
2515 {
2516  const unsigned int n_chunks = n_entries / 2;
2517  if (add_into)
2518  {
2519  for (unsigned int i = 0; i < n_chunks; ++i)
2520  {
2521  __m128d u0 = in[2 * i + 0].data;
2522  __m128d u1 = in[2 * i + 1].data;
2523  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2524  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2525  _mm_storeu_pd(out + 2 * i + offsets[0],
2526  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2527  res0));
2528  _mm_storeu_pd(out + 2 * i + offsets[1],
2529  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2530  res1));
2531  }
2532  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2533  for (unsigned int v = 0; v < 2; ++v)
2534  out[offsets[v] + i] += in[i][v];
2535  }
2536  else
2537  {
2538  for (unsigned int i = 0; i < n_chunks; ++i)
2539  {
2540  __m128d u0 = in[2 * i + 0].data;
2541  __m128d u1 = in[2 * i + 1].data;
2542  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2543  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2544  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2545  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2546  }
2547  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2548  for (unsigned int v = 0; v < 2; ++v)
2549  out[offsets[v] + i] = in[i][v];
2550  }
2551 }
2552 
2553 
2554 
2558 template <>
2559 class VectorizedArray<float>
2560 {
2561 public:
2565  static const unsigned int n_array_elements = 4;
2566 
2571  DEAL_II_ALWAYS_INLINE
2572  VectorizedArray &
2573  operator=(const float x)
2574  {
2575  data = _mm_set1_ps(x);
2576  return *this;
2577  }
2578 
2582  DEAL_II_ALWAYS_INLINE
2583  float &operator[](const unsigned int comp)
2584  {
2585  AssertIndexRange(comp, 4);
2586  return *(reinterpret_cast<float *>(&data) + comp);
2587  }
2588 
2592  DEAL_II_ALWAYS_INLINE
2593  const float &operator[](const unsigned int comp) const
2594  {
2595  AssertIndexRange(comp, 4);
2596  return *(reinterpret_cast<const float *>(&data) + comp);
2597  }
2598 
2602  DEAL_II_ALWAYS_INLINE
2603  VectorizedArray &
2604  operator+=(const VectorizedArray &vec)
2605  {
2606 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2607  data += vec.data;
2608 # else
2609  data = _mm_add_ps(data, vec.data);
2610 # endif
2611  return *this;
2612  }
2613 
2617  DEAL_II_ALWAYS_INLINE
2618  VectorizedArray &
2619  operator-=(const VectorizedArray &vec)
2620  {
2621 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2622  data -= vec.data;
2623 # else
2624  data = _mm_sub_ps(data, vec.data);
2625 # endif
2626  return *this;
2627  }
2628 
2632  DEAL_II_ALWAYS_INLINE
2633  VectorizedArray &
2634  operator*=(const VectorizedArray &vec)
2635  {
2636 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2637  data *= vec.data;
2638 # else
2639  data = _mm_mul_ps(data, vec.data);
2640 # endif
2641  return *this;
2642  }
2643 
2647  DEAL_II_ALWAYS_INLINE
2648  VectorizedArray &
2649  operator/=(const VectorizedArray &vec)
2650  {
2651 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2652  data /= vec.data;
2653 # else
2654  data = _mm_div_ps(data, vec.data);
2655 # endif
2656  return *this;
2657  }
2658 
2664  DEAL_II_ALWAYS_INLINE
2665  void
2666  load(const float *ptr)
2667  {
2668  data = _mm_loadu_ps(ptr);
2669  }
2670 
2677  DEAL_II_ALWAYS_INLINE
2678  void
2679  store(float *ptr) const
2680  {
2681  _mm_storeu_ps(ptr, data);
2682  }
2683 
2687  DEAL_II_ALWAYS_INLINE
2688  void
2689  streaming_store(float *ptr) const
2690  {
2691  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2692  ExcMessage("Memory not aligned"));
2693  _mm_stream_ps(ptr, data);
2694  }
2695 
2708  DEAL_II_ALWAYS_INLINE
2709  void
2710  gather(const float *base_ptr, const unsigned int *offsets)
2711  {
2712  for (unsigned int i = 0; i < 4; ++i)
2713  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2714  }
2715 
2728  DEAL_II_ALWAYS_INLINE
2729  void
2730  scatter(const unsigned int *offsets, float *base_ptr) const
2731  {
2732  for (unsigned int i = 0; i < 4; ++i)
2733  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2734  }
2735 
2740  __m128 data;
2741 
2742 private:
2747  DEAL_II_ALWAYS_INLINE
2748  VectorizedArray
2749  get_sqrt() const
2750  {
2751  VectorizedArray res;
2752  res.data = _mm_sqrt_ps(data);
2753  return res;
2754  }
2755 
2760  DEAL_II_ALWAYS_INLINE
2761  VectorizedArray
2762  get_abs() const
2763  {
2764  // to compute the absolute value, perform bitwise andnot with -0. This
2765  // will leave all value and exponent bits unchanged but force the sign
2766  // value to +.
2767  __m128 mask = _mm_set1_ps(-0.f);
2768  VectorizedArray res;
2769  res.data = _mm_andnot_ps(mask, data);
2770  return res;
2771  }
2772 
2777  DEAL_II_ALWAYS_INLINE
2778  VectorizedArray
2779  get_max(const VectorizedArray &other) const
2780  {
2781  VectorizedArray res;
2782  res.data = _mm_max_ps(data, other.data);
2783  return res;
2784  }
2785 
2790  DEAL_II_ALWAYS_INLINE
2791  VectorizedArray
2792  get_min(const VectorizedArray &other) const
2793  {
2794  VectorizedArray res;
2795  res.data = _mm_min_ps(data, other.data);
2796  return res;
2797  }
2798 
2802  template <typename Number2>
2804  std::sqrt(const VectorizedArray<Number2> &);
2805  template <typename Number2>
2807  std::abs(const VectorizedArray<Number2> &);
2808  template <typename Number2>
2810  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2811  template <typename Number2>
2813  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2814 };
2815 
2816 
2817 
2821 template <>
2822 inline void
2823 vectorized_load_and_transpose(const unsigned int n_entries,
2824  const float * in,
2825  const unsigned int * offsets,
2827 {
2828  const unsigned int n_chunks = n_entries / 4;
2829  for (unsigned int i = 0; i < n_chunks; ++i)
2830  {
2831  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2832  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2833  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2834  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2835  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2836  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2837  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2838  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2839  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2840  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2841  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2842  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2843  }
2844  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2845  for (unsigned int v = 0; v < 4; ++v)
2846  out[i][v] = in[offsets[v] + i];
2847 }
2848 
2849 
2850 
2854 template <>
2855 inline void
2856 vectorized_transpose_and_store(const bool add_into,
2857  const unsigned int n_entries,
2858  const VectorizedArray<float> *in,
2859  const unsigned int * offsets,
2860  float * out)
2861 {
2862  const unsigned int n_chunks = n_entries / 4;
2863  for (unsigned int i = 0; i < n_chunks; ++i)
2864  {
2865  __m128 u0 = in[4 * i + 0].data;
2866  __m128 u1 = in[4 * i + 1].data;
2867  __m128 u2 = in[4 * i + 2].data;
2868  __m128 u3 = in[4 * i + 3].data;
2869  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2870  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2871  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2872  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2873  u0 = _mm_shuffle_ps(t0, t2, 0x88);
2874  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2875  u2 = _mm_shuffle_ps(t1, t3, 0x88);
2876  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2877 
2878  // Cannot use the same store instructions in both paths of the 'if'
2879  // because the compiler cannot know that there is no aliasing between
2880  // pointers
2881  if (add_into)
2882  {
2883  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2884  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2885  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2886  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2887  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2888  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2889  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2890  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2891  }
2892  else
2893  {
2894  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2895  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2896  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2897  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2898  }
2899  }
2900  if (add_into)
2901  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2902  for (unsigned int v = 0; v < 4; ++v)
2903  out[offsets[v] + i] += in[i][v];
2904  else
2905  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2906  for (unsigned int v = 0; v < 4; ++v)
2907  out[offsets[v] + i] = in[i][v];
2908 }
2909 
2910 
2911 
2912 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
2913 
2914 
2915 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
2916  defined(__VSX__)
2917 
2918 template <>
2919 class VectorizedArray<double>
2920 {
2921 public:
2925  static const unsigned int n_array_elements = 2;
2926 
2930  DEAL_II_ALWAYS_INLINE
2931  VectorizedArray &
2932  operator=(const double x)
2933  {
2934  data = vec_splats(x);
2935  return *this;
2936  }
2937 
2941  DEAL_II_ALWAYS_INLINE
2942  double &operator[](const unsigned int comp)
2943  {
2944  AssertIndexRange(comp, 2);
2945  return *(reinterpret_cast<double *>(&data) + comp);
2946  }
2947 
2951  DEAL_II_ALWAYS_INLINE
2952  const double &operator[](const unsigned int comp) const
2953  {
2954  AssertIndexRange(comp, 2);
2955  return *(reinterpret_cast<const double *>(&data) + comp);
2956  }
2957 
2961  DEAL_II_ALWAYS_INLINE
2962  VectorizedArray &
2963  operator+=(const VectorizedArray &vec)
2964  {
2965  data = vec_add(data, vec.data);
2966  return *this;
2967  }
2968 
2972  DEAL_II_ALWAYS_INLINE
2973  VectorizedArray &
2974  operator-=(const VectorizedArray &vec)
2975  {
2976  data = vec_sub(data, vec.data);
2977  return *this;
2978  }
2979 
2983  DEAL_II_ALWAYS_INLINE
2984  VectorizedArray &
2985  operator*=(const VectorizedArray &vec)
2986  {
2987  data = vec_mul(data, vec.data);
2988  return *this;
2989  }
2990 
2994  DEAL_II_ALWAYS_INLINE
2995  VectorizedArray &
2996  operator/=(const VectorizedArray &vec)
2997  {
2998  data = vec_div(data, vec.data);
2999  return *this;
3000  }
3001 
3006  DEAL_II_ALWAYS_INLINE
3007  void
3008  load(const double *ptr)
3009  {
3010  data = vec_vsx_ld(0, ptr);
3011  }
3012 
3017  DEAL_II_ALWAYS_INLINE
3018  void
3019  store(double *ptr) const
3020  {
3021  vec_vsx_st(data, 0, ptr);
3022  }
3023 
3026  DEAL_II_ALWAYS_INLINE
3027  void
3028  streaming_store(double *ptr) const
3029  {
3030  store(ptr);
3031  }
3032 
3035  DEAL_II_ALWAYS_INLINE
3036  void
3037  gather(const double *base_ptr, const unsigned int *offsets)
3038  {
3039  for (unsigned int i = 0; i < 2; ++i)
3040  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3041  }
3042 
3045  DEAL_II_ALWAYS_INLINE
3046  void
3047  scatter(const unsigned int *offsets, double *base_ptr) const
3048  {
3049  for (unsigned int i = 0; i < 2; ++i)
3050  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3051  }
3052 
3057  __vector double data;
3058 
3059 private:
3064  DEAL_II_ALWAYS_INLINE
3065  VectorizedArray
3066  get_sqrt() const
3067  {
3068  VectorizedArray res;
3069  res.data = vec_sqrt(data);
3070  return res;
3071  }
3072 
3077  DEAL_II_ALWAYS_INLINE
3078  VectorizedArray
3079  get_abs() const
3080  {
3081  VectorizedArray res;
3082  res.data = vec_abs(data);
3083  return res;
3084  }
3085 
3090  DEAL_II_ALWAYS_INLINE
3091  VectorizedArray
3092  get_max(const VectorizedArray &other) const
3093  {
3094  VectorizedArray res;
3095  res.data = vec_max(data, other.data);
3096  return res;
3097  }
3098 
3103  DEAL_II_ALWAYS_INLINE
3104  VectorizedArray
3105  get_min(const VectorizedArray &other) const
3106  {
3107  VectorizedArray res;
3108  res.data = vec_min(data, other.data);
3109  return res;
3110  }
3111 
3115  template <typename Number2>
3117  std::sqrt(const VectorizedArray<Number2> &);
3118  template <typename Number2>
3120  std::abs(const VectorizedArray<Number2> &);
3121  template <typename Number2>
3123  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3124  template <typename Number2>
3126  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3127 };
3128 
3129 
3130 
3131 template <>
3132 class VectorizedArray<float>
3133 {
3134 public:
3138  static const unsigned int n_array_elements = 4;
3139 
3143  DEAL_II_ALWAYS_INLINE
3144  VectorizedArray &
3145  operator=(const float x)
3146  {
3147  data = vec_splats(x);
3148  return *this;
3149  }
3150 
3154  DEAL_II_ALWAYS_INLINE
3155  float &operator[](const unsigned int comp)
3156  {
3157  AssertIndexRange(comp, 4);
3158  return *(reinterpret_cast<float *>(&data) + comp);
3159  }
3160 
3164  DEAL_II_ALWAYS_INLINE
3165  const float &operator[](const unsigned int comp) const
3166  {
3167  AssertIndexRange(comp, 4);
3168  return *(reinterpret_cast<const float *>(&data) + comp);
3169  }
3170 
3174  DEAL_II_ALWAYS_INLINE
3175  VectorizedArray &
3176  operator+=(const VectorizedArray &vec)
3177  {
3178  data = vec_add(data, vec.data);
3179  return *this;
3180  }
3181 
3185  DEAL_II_ALWAYS_INLINE
3186  VectorizedArray &
3187  operator-=(const VectorizedArray &vec)
3188  {
3189  data = vec_sub(data, vec.data);
3190  return *this;
3191  }
3192 
3196  DEAL_II_ALWAYS_INLINE
3197  VectorizedArray &
3198  operator*=(const VectorizedArray &vec)
3199  {
3200  data = vec_mul(data, vec.data);
3201  return *this;
3202  }
3203 
3207  DEAL_II_ALWAYS_INLINE
3208  VectorizedArray &
3209  operator/=(const VectorizedArray &vec)
3210  {
3211  data = vec_div(data, vec.data);
3212  return *this;
3213  }
3214 
3219  DEAL_II_ALWAYS_INLINE
3220  void
3221  load(const float *ptr)
3222  {
3223  data = vec_vsx_ld(0, ptr);
3224  }
3225 
3230  DEAL_II_ALWAYS_INLINE
3231  void
3232  store(float *ptr) const
3233  {
3234  vec_vsx_st(data, 0, ptr);
3235  }
3236 
3239  DEAL_II_ALWAYS_INLINE
3240  void
3241  streaming_store(float *ptr) const
3242  {
3243  store(ptr);
3244  }
3245 
3248  DEAL_II_ALWAYS_INLINE
3249  void
3250  gather(const float *base_ptr, const unsigned int *offsets)
3251  {
3252  for (unsigned int i = 0; i < 4; ++i)
3253  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3254  }
3255 
3258  DEAL_II_ALWAYS_INLINE
3259  void
3260  scatter(const unsigned int *offsets, float *base_ptr) const
3261  {
3262  for (unsigned int i = 0; i < 4; ++i)
3263  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3264  }
3265 
3270  __vector float data;
3271 
3272 private:
3277  DEAL_II_ALWAYS_INLINE
3278  VectorizedArray
3279  get_sqrt() const
3280  {
3281  VectorizedArray res;
3282  res.data = vec_sqrt(data);
3283  return res;
3284  }
3285 
3290  DEAL_II_ALWAYS_INLINE
3291  VectorizedArray
3292  get_abs() const
3293  {
3294  VectorizedArray res;
3295  res.data = vec_abs(data);
3296  return res;
3297  }
3298 
3303  DEAL_II_ALWAYS_INLINE
3304  VectorizedArray
3305  get_max(const VectorizedArray &other) const
3306  {
3307  VectorizedArray res;
3308  res.data = vec_max(data, other.data);
3309  return res;
3310  }
3311 
3316  DEAL_II_ALWAYS_INLINE
3317  VectorizedArray
3318  get_min(const VectorizedArray &other) const
3319  {
3320  VectorizedArray res;
3321  res.data = vec_min(data, other.data);
3322  return res;
3323  }
3324 
3328  template <typename Number2>
3330  std::sqrt(const VectorizedArray<Number2> &);
3331  template <typename Number2>
3333  std::abs(const VectorizedArray<Number2> &);
3334  template <typename Number2>
3336  std::max(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3337  template <typename Number2>
3339  std::min(const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
3340 };
3341 
3342 #endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
3343  // defined(__VSX__)
3344 
3345 
3346 
3352 template <typename Number>
3353 inline DEAL_II_ALWAYS_INLINE bool
3355  const VectorizedArray<Number> &rhs)
3356 {
3357  for (unsigned int i = 0; i < VectorizedArray<Number>::n_array_elements; ++i)
3358  if (lhs[i] != rhs[i])
3359  return false;
3360 
3361  return true;
3362 }
3363 
3364 
3370 template <typename Number>
3371 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3373 {
3374  VectorizedArray<Number> tmp = u;
3375  return tmp += v;
3376 }
3377 
3383 template <typename Number>
3384 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3386 {
3387  VectorizedArray<Number> tmp = u;
3388  return tmp -= v;
3389 }
3390 
3396 template <typename Number>
3397 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3399 {
3400  VectorizedArray<Number> tmp = u;
3401  return tmp *= v;
3402 }
3403 
3409 template <typename Number>
3410 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3412 {
3413  VectorizedArray<Number> tmp = u;
3414  return tmp /= v;
3415 }
3416 
3423 template <typename Number>
3424 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3425  operator+(const Number &u, const VectorizedArray<Number> &v)
3426 {
3428  tmp = u;
3429  return tmp += v;
3430 }
3431 
3440 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3441  operator+(const double u, const VectorizedArray<float> &v)
3442 {
3444  tmp = u;
3445  return tmp += v;
3446 }
3447 
3454 template <typename Number>
3455 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3456  operator+(const VectorizedArray<Number> &v, const Number &u)
3457 {
3458  return u + v;
3459 }
3460 
3469 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3470  operator+(const VectorizedArray<float> &v, const double u)
3471 {
3472  return u + v;
3473 }
3474 
3481 template <typename Number>
3482 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3483  operator-(const Number &u, const VectorizedArray<Number> &v)
3484 {
3486  tmp = u;
3487  return tmp -= v;
3488 }
3489 
3498 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3499  operator-(const double u, const VectorizedArray<float> &v)
3500 {
3502  tmp = float(u);
3503  return tmp -= v;
3504 }
3505 
3512 template <typename Number>
3513 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3514  operator-(const VectorizedArray<Number> &v, const Number &u)
3515 {
3517  tmp = u;
3518  return v - tmp;
3519 }
3520 
3529 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3530  operator-(const VectorizedArray<float> &v, const double u)
3531 {
3533  tmp = float(u);
3534  return v - tmp;
3535 }
3536 
3543 template <typename Number>
3544 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3545  operator*(const Number &u, const VectorizedArray<Number> &v)
3546 {
3548  tmp = u;
3549  return tmp *= v;
3550 }
3551 
3560 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3561  operator*(const double u, const VectorizedArray<float> &v)
3562 {
3564  tmp = float(u);
3565  return tmp *= v;
3566 }
3567 
3574 template <typename Number>
3575 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3576  operator*(const VectorizedArray<Number> &v, const Number &u)
3577 {
3578  return u * v;
3579 }
3580 
3589 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3590  operator*(const VectorizedArray<float> &v, const double u)
3591 {
3592  return u * v;
3593 }
3594 
3601 template <typename Number>
3602 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3603  operator/(const Number &u, const VectorizedArray<Number> &v)
3604 {
3606  tmp = u;
3607  return tmp /= v;
3608 }
3609 
3618 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3619  operator/(const double u, const VectorizedArray<float> &v)
3620 {
3622  tmp = float(u);
3623  return tmp /= v;
3624 }
3625 
3632 template <typename Number>
3633 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3634  operator/(const VectorizedArray<Number> &v, const Number &u)
3635 {
3637  tmp = u;
3638  return v / tmp;
3639 }
3640 
3649 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float>
3650  operator/(const VectorizedArray<float> &v, const double u)
3651 {
3653  tmp = float(u);
3654  return v / tmp;
3655 }
3656 
3662 template <typename Number>
3663 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3665 {
3666  return u;
3667 }
3668 
3674 template <typename Number>
3675 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number>
3677 {
3678  // to get a negative sign, subtract the input from zero (could also
3679  // multiply by -1, but this one is slightly simpler)
3680  return VectorizedArray<Number>() - u;
3681 }
3682 
3683 
3684 DEAL_II_NAMESPACE_CLOSE
3685 
3686 
3693 namespace std
3694 {
3702  template <typename Number>
3703  inline ::VectorizedArray<Number>
3704  sin(const ::VectorizedArray<Number> &x)
3705  {
3706  // put values in an array and later read in that array with an unaligned
3707  // read. This should save some instructions as compared to directly
3708  // setting the individual elements and also circumvents a compiler
3709  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3710  // from April 2014, topic "matrix_free/step-48 Test").
3712  for (unsigned int i = 0;
3713  i < ::VectorizedArray<Number>::n_array_elements;
3714  ++i)
3715  values[i] = std::sin(x[i]);
3717  out.load(&values[0]);
3718  return out;
3719  }
3720 
3721 
3722 
3730  template <typename Number>
3731  inline ::VectorizedArray<Number>
3732  cos(const ::VectorizedArray<Number> &x)
3733  {
3735  for (unsigned int i = 0;
3736  i < ::VectorizedArray<Number>::n_array_elements;
3737  ++i)
3738  values[i] = std::cos(x[i]);
3740  out.load(&values[0]);
3741  return out;
3742  }
3743 
3744 
3745 
3753  template <typename Number>
3754  inline ::VectorizedArray<Number>
3755  tan(const ::VectorizedArray<Number> &x)
3756  {
3758  for (unsigned int i = 0;
3759  i < ::VectorizedArray<Number>::n_array_elements;
3760  ++i)
3761  values[i] = std::tan(x[i]);
3763  out.load(&values[0]);
3764  return out;
3765  }
3766 
3767 
3768 
3776  template <typename Number>
3777  inline ::VectorizedArray<Number>
3778  exp(const ::VectorizedArray<Number> &x)
3779  {
3781  for (unsigned int i = 0;
3782  i < ::VectorizedArray<Number>::n_array_elements;
3783  ++i)
3784  values[i] = std::exp(x[i]);
3786  out.load(&values[0]);
3787  return out;
3788  }
3789 
3790 
3791 
3799  template <typename Number>
3800  inline ::VectorizedArray<Number>
3801  log(const ::VectorizedArray<Number> &x)
3802  {
3804  for (unsigned int i = 0;
3805  i < ::VectorizedArray<Number>::n_array_elements;
3806  ++i)
3807  values[i] = std::log(x[i]);
3809  out.load(&values[0]);
3810  return out;
3811  }
3812 
3813 
3814 
3822  template <typename Number>
3823  inline ::VectorizedArray<Number>
3824  sqrt(const ::VectorizedArray<Number> &x)
3825  {
3826  return x.get_sqrt();
3827  }
3828 
3829 
3830 
3838  template <typename Number>
3839  inline ::VectorizedArray<Number>
3840  pow(const ::VectorizedArray<Number> &x, const Number p)
3841  {
3843  for (unsigned int i = 0;
3844  i < ::VectorizedArray<Number>::n_array_elements;
3845  ++i)
3846  values[i] = std::pow(x[i], p);
3848  out.load(&values[0]);
3849  return out;
3850  }
3851 
3852 
3853 
3861  template <typename Number>
3862  inline ::VectorizedArray<Number>
3863  abs(const ::VectorizedArray<Number> &x)
3864  {
3865  return x.get_abs();
3866  }
3867 
3868 
3869 
3877  template <typename Number>
3878  inline ::VectorizedArray<Number>
3879  max(const ::VectorizedArray<Number> &x,
3880  const ::VectorizedArray<Number> &y)
3881  {
3882  return x.get_max(y);
3883  }
3884 
3885 
3886 
3894  template <typename Number>
3895  inline ::VectorizedArray<Number>
3896  min(const ::VectorizedArray<Number> &x,
3897  const ::VectorizedArray<Number> &y)
3898  {
3899  return x.get_min(y);
3900  }
3901 
3902 } // namespace std
3903 
3904 #endif
VectorizedArray< Number > operator/(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
void store(Number *ptr) const
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &u, const VectorizedArray< Number > &v)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number > operator-(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > operator/(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< Number > make_vectorized_array(const Number &u)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
const Number & operator[](const unsigned int comp) const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
STL namespace.
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
bool operator==(const VectorizedArray< Number > &lhs, const VectorizedArray< Number > &rhs)
VectorizedArray< float > operator/(const double u, const VectorizedArray< float > &v)
__global__ void vec_add(Number *val, const Number a, const size_type N)
__global__ void gather(Number *val, const IndexType *indices, const Number *v, const IndexType N)
VectorizedArray< float > operator+(const VectorizedArray< float > &v, const double u)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< float > operator+(const double u, const VectorizedArray< float > &v)
VectorizedArray get_abs() const
VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float > operator-(const double u, const VectorizedArray< float > &v)
static ::ExceptionBase & ExcMessage(std::string arg1)
Number & operator[](const unsigned int comp)
VectorizedArray< Number > operator+(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &u)
#define Assert(cond, exc)
Definition: exceptions.h:1407
VectorizedArray get_sqrt() const
void streaming_store(Number *ptr) const
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number > operator+(const VectorizedArray< Number > &u)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
VectorizedArray< Number > operator+(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< float > operator-(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > operator-(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray< float > operator/(const VectorizedArray< float > &v, const double u)
VectorizedArray< float > operator*(const VectorizedArray< float > &v, const double u)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > operator/(const VectorizedArray< Number > &v, const Number &u)
VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
void load(const Number *ptr)
VectorizedArray< Number > operator*(const Number &u, const VectorizedArray< Number > &v)
VectorizedArray< float > operator*(const double u, const VectorizedArray< float > &v)
VectorizedArray< Number > operator*(const VectorizedArray< Number > &v, const Number &u)
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)