Reference documentation for deal.II version Git 082d75bebd 2019-10-16 19:44:02 +0200
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/exceptions.h>
23 #include <deal.II/base/template_constraints.h>
24 
25 #include <cmath>
26 
27 // Note:
28 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
29 // according to the following scheme (on x86-based architectures)
30 // #ifdef __AVX512F__
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
32 // #elif defined (__AVX__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
34 // #elif defined (__SSE2__)
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
36 // #else
37 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
38 // #endif
39 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
40 // 'check_01_cpu_features.cmake', ensures that these feature are not only
41 // present in the compilation unit but also working properly.
42 
43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
44 
45 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \
46  !defined(__AVX__)
47 # error \
48  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
49 # endif
50 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \
51  !defined(__AVX512F__)
52 # error \
53  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
54 # endif
55 
56 # if defined(_MSC_VER)
57 # include <intrin.h>
58 # elif defined(__ALTIVEC__)
59 # include <altivec.h>
60 
61 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
62 // them before they make trouble
63 # undef vector
64 # undef pixel
65 # undef bool
66 # else
67 # include <x86intrin.h>
68 # endif
69 
70 #endif
71 
72 
73 DEAL_II_NAMESPACE_OPEN
74 
75 
76 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
77 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
78 
79 template <typename Number, int width>
80 struct EnableIfScalar<VectorizedArray<Number, width>>
81 {
83 };
84 
85 
86 
92 template <typename T>
94 {
95 public:
102  VectorizedArrayIterator(T &data, unsigned int lane)
103  : data(data)
104  , lane(lane)
105  {}
106 
110  bool
112  {
113  return this->lane != other.lane;
114  }
115 
120  const typename T::value_type &operator*() const
121  {
122  return data[lane];
123  }
124 
125 
130  template <typename U = T>
131  typename std::enable_if<!std::is_same<U, const U>::value,
132  typename T::value_type>::type &
134  {
135  return data[lane];
136  }
137 
145  {
146  lane++;
147  return *this;
148  }
149 
150 private:
154  T &data;
155 
159  unsigned int lane;
160 };
161 
162 
163 
173 template <typename T>
175 {
176 public:
181  static constexpr unsigned int
183  {
184  return T::n_array_elements;
185  }
186 
192  {
193  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
194  }
195 
200  end()
201  {
202  return VectorizedArrayIterator<T>(static_cast<T &>(*this),
203  T::n_array_elements);
204  }
205 
211  begin() const
212  {
213  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
214  }
215 
221  end() const
222  {
223  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
224  T::n_array_elements);
225  }
226 };
227 
228 
229 
315 template <typename Number, int width>
317  : public VectorizedArrayBase<VectorizedArray<Number, width>>
318 {
319 public:
323  using value_type = Number;
324 
330  static const unsigned int n_array_elements = 1;
331 
332  static_assert(width == n_array_elements,
333  "You specified an illegal width that is not supported.");
334 
339  VectorizedArray() = default;
340 
344  VectorizedArray(const Number scalar)
345  {
346  this->operator=(scalar);
347  }
348 
352  DEAL_II_ALWAYS_INLINE
354  operator=(const Number scalar)
355  {
356  data = scalar;
357  return *this;
358  }
359 
364  DEAL_II_ALWAYS_INLINE
365  Number &operator[](const unsigned int comp)
366  {
367  (void)comp;
368  AssertIndexRange(comp, 1);
369  return data;
370  }
371 
376  DEAL_II_ALWAYS_INLINE
377  const Number &operator[](const unsigned int comp) const
378  {
379  (void)comp;
380  AssertIndexRange(comp, 1);
381  return data;
382  }
383 
387  DEAL_II_ALWAYS_INLINE
390  {
391  data += vec.data;
392  return *this;
393  }
394 
398  DEAL_II_ALWAYS_INLINE
401  {
402  data -= vec.data;
403  return *this;
404  }
405 
409  DEAL_II_ALWAYS_INLINE
412  {
413  data *= vec.data;
414  return *this;
415  }
416 
420  DEAL_II_ALWAYS_INLINE
423  {
424  data /= vec.data;
425  return *this;
426  }
427 
434  DEAL_II_ALWAYS_INLINE
435  void
436  load(const Number *ptr)
437  {
438  data = *ptr;
439  }
440 
447  DEAL_II_ALWAYS_INLINE
448  void
449  store(Number *ptr) const
450  {
451  *ptr = data;
452  }
453 
500  DEAL_II_ALWAYS_INLINE
501  void
502  streaming_store(Number *ptr) const
503  {
504  *ptr = data;
505  }
506 
519  DEAL_II_ALWAYS_INLINE
520  void
521  gather(const Number *base_ptr, const unsigned int *offsets)
522  {
523  data = base_ptr[offsets[0]];
524  }
525 
538  DEAL_II_ALWAYS_INLINE
539  void
540  scatter(const unsigned int *offsets, Number *base_ptr) const
541  {
542  base_ptr[offsets[0]] = data;
543  }
544 
550  Number data;
551 
552 private:
557  DEAL_II_ALWAYS_INLINE
559  get_sqrt() const
560  {
561  VectorizedArray res;
562  res.data = std::sqrt(data);
563  return res;
564  }
565 
570  DEAL_II_ALWAYS_INLINE
572  get_abs() const
573  {
574  VectorizedArray res;
575  res.data = std::fabs(data);
576  return res;
577  }
578 
583  DEAL_II_ALWAYS_INLINE
585  get_max(const VectorizedArray &other) const
586  {
587  VectorizedArray res;
588  res.data = std::max(data, other.data);
589  return res;
590  }
591 
596  DEAL_II_ALWAYS_INLINE
598  get_min(const VectorizedArray &other) const
599  {
600  VectorizedArray res;
601  res.data = std::min(data, other.data);
602  return res;
603  }
604 
605  // Make a few functions friends.
606  template <typename Number2, int width2>
608  std::sqrt(const VectorizedArray<Number2, width2> &);
609  template <typename Number2, int width2>
611  std::abs(const VectorizedArray<Number2, width2> &);
612  template <typename Number2, int width2>
614  std::max(const VectorizedArray<Number2, width2> &,
616  template <typename Number2, int width2>
618  std::min(const VectorizedArray<Number2, width2> &,
620 };
621 
622 
623 
624 // We need to have a separate declaration for static const members
625 template <typename Number, int width>
627 
628 
629 
634 
635 
642 template <
643  typename Number,
645 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
646  make_vectorized_array(const Number &u)
647 {
649  return result;
650 }
651 
652 
653 
660 template <typename VectorizedArrayType>
661 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
662  make_vectorized_array(const typename VectorizedArrayType::value_type &u)
663 {
664  static_assert(
665  std::is_same<VectorizedArrayType,
666  VectorizedArray<typename VectorizedArrayType::value_type,
667  VectorizedArrayType::n_array_elements>>::value,
668  "VectorizedArrayType is not a VectorizedArray.");
669 
670  VectorizedArrayType result = u;
671  return result;
672 }
673 
674 
675 
701 template <typename Number, int width>
702 inline DEAL_II_ALWAYS_INLINE void
703 vectorized_load_and_transpose(const unsigned int n_entries,
704  const Number * in,
705  const unsigned int * offsets,
707 {
708  for (unsigned int i = 0; i < n_entries; ++i)
709  for (unsigned int v = 0;
710  v < VectorizedArray<Number, width>::n_array_elements;
711  ++v)
712  out[i][v] = in[offsets[v] + i];
713 }
714 
715 
716 
755 template <typename Number, int width>
756 inline DEAL_II_ALWAYS_INLINE void
757 vectorized_transpose_and_store(const bool add_into,
758  const unsigned int n_entries,
760  const unsigned int * offsets,
761  Number * out)
762 {
763  if (add_into)
764  for (unsigned int i = 0; i < n_entries; ++i)
765  for (unsigned int v = 0;
766  v < VectorizedArray<Number, width>::n_array_elements;
767  ++v)
768  out[offsets[v] + i] += in[i][v];
769  else
770  for (unsigned int i = 0; i < n_entries; ++i)
771  for (unsigned int v = 0;
772  v < VectorizedArray<Number, width>::n_array_elements;
773  ++v)
774  out[offsets[v] + i] = in[i][v];
775 }
776 
777 
779 
780 #ifndef DOXYGEN
781 
782 // for safety, also check that __AVX512F__ is defined in case the user manually
783 // set some conflicting compile flags which prevent compilation
784 
785 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
786 
790 template <>
791 class VectorizedArray<double, 8>
792  : public VectorizedArrayBase<VectorizedArray<double, 8>>
793 {
794 public:
798  using value_type = double;
799 
803  static const unsigned int n_array_elements = 8;
804 
809  VectorizedArray() = default;
810 
814  VectorizedArray(const double scalar)
815  {
816  this->operator=(scalar);
817  }
818 
822  DEAL_II_ALWAYS_INLINE
824  operator=(const double x)
825  {
826  data = _mm512_set1_pd(x);
827  return *this;
828  }
829 
833  DEAL_II_ALWAYS_INLINE
834  double &operator[](const unsigned int comp)
835  {
836  AssertIndexRange(comp, 8);
837  return *(reinterpret_cast<double *>(&data) + comp);
838  }
839 
843  DEAL_II_ALWAYS_INLINE
844  const double &operator[](const unsigned int comp) const
845  {
846  AssertIndexRange(comp, 8);
847  return *(reinterpret_cast<const double *>(&data) + comp);
848  }
849 
853  DEAL_II_ALWAYS_INLINE
855  operator+=(const VectorizedArray &vec)
856  {
857  // if the compiler supports vector arithmetics, we can simply use +=
858  // operator on the given data type. this allows the compiler to combine
859  // additions with multiplication (fused multiply-add) if those
860  // instructions are available. Otherwise, we need to use the built-in
861  // intrinsic command for __m512d
862 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
863  data += vec.data;
864 # else
865  data = _mm512_add_pd(data, vec.data);
866 # endif
867  return *this;
868  }
869 
873  DEAL_II_ALWAYS_INLINE
875  operator-=(const VectorizedArray &vec)
876  {
877 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
878  data -= vec.data;
879 # else
880  data = _mm512_sub_pd(data, vec.data);
881 # endif
882  return *this;
883  }
887  DEAL_II_ALWAYS_INLINE
889  operator*=(const VectorizedArray &vec)
890  {
891 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
892  data *= vec.data;
893 # else
894  data = _mm512_mul_pd(data, vec.data);
895 # endif
896  return *this;
897  }
898 
902  DEAL_II_ALWAYS_INLINE
904  operator/=(const VectorizedArray &vec)
905  {
906 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
907  data /= vec.data;
908 # else
909  data = _mm512_div_pd(data, vec.data);
910 # endif
911  return *this;
912  }
913 
919  DEAL_II_ALWAYS_INLINE
920  void
921  load(const double *ptr)
922  {
923  data = _mm512_loadu_pd(ptr);
924  }
925 
932  DEAL_II_ALWAYS_INLINE
933  void
934  store(double *ptr) const
935  {
936  _mm512_storeu_pd(ptr, data);
937  }
938 
942  DEAL_II_ALWAYS_INLINE
943  void
944  streaming_store(double *ptr) const
945  {
946  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
947  ExcMessage("Memory not aligned"));
948  _mm512_stream_pd(ptr, data);
949  }
950 
963  DEAL_II_ALWAYS_INLINE
964  void
965  gather(const double *base_ptr, const unsigned int *offsets)
966  {
967  // unfortunately, there does not appear to be a 256 bit integer load, so
968  // do it by some reinterpret casts here. this is allowed because the Intel
969  // API allows aliasing between different vector types.
970  const __m256 index_val =
971  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
972  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
973  data = _mm512_i32gather_pd(index, base_ptr, 8);
974  }
975 
988  DEAL_II_ALWAYS_INLINE
989  void
990  scatter(const unsigned int *offsets, double *base_ptr) const
991  {
992  for (unsigned int i = 0; i < 8; ++i)
993  for (unsigned int j = i + 1; j < 8; ++j)
994  Assert(offsets[i] != offsets[j],
995  ExcMessage("Result of scatter undefined if two offset elements"
996  " point to the same position"));
997 
998  // unfortunately, there does not appear to be a 256 bit integer load, so
999  // do it by some reinterpret casts here. this is allowed because the Intel
1000  // API allows aliasing between different vector types.
1001  const __m256 index_val =
1002  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1003  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1004  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1005  }
1006 
1012  __m512d data;
1013 
1014 private:
1019  DEAL_II_ALWAYS_INLINE
1021  get_sqrt() const
1022  {
1023  VectorizedArray res;
1024  res.data = _mm512_sqrt_pd(data);
1025  return res;
1026  }
1027 
1032  DEAL_II_ALWAYS_INLINE
1034  get_abs() const
1035  {
1036  // to compute the absolute value, perform bitwise andnot with -0. This
1037  // will leave all value and exponent bits unchanged but force the sign
1038  // value to +. Since there is no andnot for AVX512, we interpret the data
1039  // as 64 bit integers and do the andnot on those types (note that andnot
1040  // is a bitwise operation so the data type does not matter)
1041  __m512d mask = _mm512_set1_pd(-0.);
1042  VectorizedArray res;
1043  res.data = reinterpret_cast<__m512d>(
1044  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1045  reinterpret_cast<__m512i>(data)));
1046  return res;
1047  }
1048 
1053  DEAL_II_ALWAYS_INLINE
1055  get_max(const VectorizedArray &other) const
1056  {
1057  VectorizedArray res;
1058  res.data = _mm512_max_pd(data, other.data);
1059  return res;
1060  }
1061 
1066  DEAL_II_ALWAYS_INLINE
1068  get_min(const VectorizedArray &other) const
1069  {
1070  VectorizedArray res;
1071  res.data = _mm512_min_pd(data, other.data);
1072  return res;
1073  }
1074 
1075  // Make a few functions friends.
1076  template <typename Number2, int width2>
1078  std::sqrt(const VectorizedArray<Number2, width2> &);
1079  template <typename Number2, int width2>
1081  std::abs(const VectorizedArray<Number2, width2> &);
1082  template <typename Number2, int width2>
1084  std::max(const VectorizedArray<Number2, width2> &,
1086  template <typename Number2, int width2>
1088  std::min(const VectorizedArray<Number2, width2> &,
1090 };
1091 
1092 
1093 
1097 template <>
1098 inline DEAL_II_ALWAYS_INLINE void
1099 vectorized_load_and_transpose(const unsigned int n_entries,
1100  const double * in,
1101  const unsigned int * offsets,
1103 {
1104  // do not do full transpose because the code is long and will most
1105  // likely not pay off because many processors have two load units
1106  // (for the top 8 instructions) but only 1 permute unit (for the 8
1107  // shuffle/unpack instructions). rather start the transposition on the
1108  // vectorized array of half the size with 256 bits
1109  const unsigned int n_chunks = n_entries / 4;
1110  for (unsigned int i = 0; i < n_chunks; ++i)
1111  {
1112  __m512d t0, t1, t2, t3 = {};
1113 
1114  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1115  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1116  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1117  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1118  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1119  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1120  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1121  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1122 
1123  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1124  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1125  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1126  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1127  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1128  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1129  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1130  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1131  }
1132  // remainder loop of work that does not divide by 4
1133  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1134  out[i].gather(in + i, offsets);
1135 }
1136 
1137 
1138 
1142 template <>
1143 inline DEAL_II_ALWAYS_INLINE void
1144 vectorized_transpose_and_store(const bool add_into,
1145  const unsigned int n_entries,
1146  const VectorizedArray<double, 8> *in,
1147  const unsigned int * offsets,
1148  double * out)
1149 {
1150  // as for the load, we split the store operations into 256 bit units to
1151  // better balance between code size, shuffle instructions, and stores
1152  const unsigned int n_chunks = n_entries / 4;
1153  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1154  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1155  for (unsigned int i = 0; i < n_chunks; ++i)
1156  {
1157  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1158  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1159  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1160  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1161  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1162  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1163  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1164  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1165  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1166  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1167  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1168  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1169  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1170  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1171  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1172  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1173 
1174  // Cannot use the same store instructions in both paths of the 'if'
1175  // because the compiler cannot know that there is no aliasing
1176  // between pointers
1177  if (add_into)
1178  {
1179  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1180  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1181  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1182  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1183  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1184  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1185  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1186  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1187  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1188  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1189  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1190  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1191  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1192  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1193  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1194  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1195  }
1196  else
1197  {
1198  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1199  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1200  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1201  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1202  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1203  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1204  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1205  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1206  }
1207  }
1208 
1209  // remainder loop of work that does not divide by 4
1210  if (add_into)
1211  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1212  for (unsigned int v = 0; v < 8; ++v)
1213  out[offsets[v] + i] += in[i][v];
1214  else
1215  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1216  for (unsigned int v = 0; v < 8; ++v)
1217  out[offsets[v] + i] = in[i][v];
1218 }
1219 
1220 
1221 
1225 template <>
1226 class VectorizedArray<float, 16>
1227  : public VectorizedArrayBase<VectorizedArray<float, 16>>
1228 {
1229 public:
1233  using value_type = float;
1234 
1238  static const unsigned int n_array_elements = 16;
1239 
1244  VectorizedArray() = default;
1245 
1249  VectorizedArray(const float scalar)
1250  {
1251  this->operator=(scalar);
1252  }
1253 
1257  DEAL_II_ALWAYS_INLINE
1258  VectorizedArray &
1259  operator=(const float x)
1260  {
1261  data = _mm512_set1_ps(x);
1262  return *this;
1263  }
1264 
1268  DEAL_II_ALWAYS_INLINE
1269  float &operator[](const unsigned int comp)
1270  {
1271  AssertIndexRange(comp, 16);
1272  return *(reinterpret_cast<float *>(&data) + comp);
1273  }
1274 
1278  DEAL_II_ALWAYS_INLINE
1279  const float &operator[](const unsigned int comp) const
1280  {
1281  AssertIndexRange(comp, 16);
1282  return *(reinterpret_cast<const float *>(&data) + comp);
1283  }
1284 
1288  DEAL_II_ALWAYS_INLINE
1289  VectorizedArray &
1290  operator+=(const VectorizedArray &vec)
1291  {
1292  // if the compiler supports vector arithmetics, we can simply use +=
1293  // operator on the given data type. this allows the compiler to combine
1294  // additions with multiplication (fused multiply-add) if those
1295  // instructions are available. Otherwise, we need to use the built-in
1296  // intrinsic command for __m512d
1297 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1298  data += vec.data;
1299 # else
1300  data = _mm512_add_ps(data, vec.data);
1301 # endif
1302  return *this;
1303  }
1304 
1308  DEAL_II_ALWAYS_INLINE
1309  VectorizedArray &
1310  operator-=(const VectorizedArray &vec)
1311  {
1312 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1313  data -= vec.data;
1314 # else
1315  data = _mm512_sub_ps(data, vec.data);
1316 # endif
1317  return *this;
1318  }
1322  DEAL_II_ALWAYS_INLINE
1323  VectorizedArray &
1324  operator*=(const VectorizedArray &vec)
1325  {
1326 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1327  data *= vec.data;
1328 # else
1329  data = _mm512_mul_ps(data, vec.data);
1330 # endif
1331  return *this;
1332  }
1333 
1337  DEAL_II_ALWAYS_INLINE
1338  VectorizedArray &
1339  operator/=(const VectorizedArray &vec)
1340  {
1341 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1342  data /= vec.data;
1343 # else
1344  data = _mm512_div_ps(data, vec.data);
1345 # endif
1346  return *this;
1347  }
1348 
1354  DEAL_II_ALWAYS_INLINE
1355  void
1356  load(const float *ptr)
1357  {
1358  data = _mm512_loadu_ps(ptr);
1359  }
1360 
1367  DEAL_II_ALWAYS_INLINE
1368  void
1369  store(float *ptr) const
1370  {
1371  _mm512_storeu_ps(ptr, data);
1372  }
1373 
1377  DEAL_II_ALWAYS_INLINE
1378  void
1379  streaming_store(float *ptr) const
1380  {
1381  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1382  ExcMessage("Memory not aligned"));
1383  _mm512_stream_ps(ptr, data);
1384  }
1385 
1398  DEAL_II_ALWAYS_INLINE
1399  void
1400  gather(const float *base_ptr, const unsigned int *offsets)
1401  {
1402  // unfortunately, there does not appear to be a 512 bit integer load, so
1403  // do it by some reinterpret casts here. this is allowed because the Intel
1404  // API allows aliasing between different vector types.
1405  const __m512 index_val =
1406  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1407  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1408  data = _mm512_i32gather_ps(index, base_ptr, 4);
1409  }
1410 
1423  DEAL_II_ALWAYS_INLINE
1424  void
1425  scatter(const unsigned int *offsets, float *base_ptr) const
1426  {
1427  for (unsigned int i = 0; i < 16; ++i)
1428  for (unsigned int j = i + 1; j < 16; ++j)
1429  Assert(offsets[i] != offsets[j],
1430  ExcMessage("Result of scatter undefined if two offset elements"
1431  " point to the same position"));
1432 
1433  // unfortunately, there does not appear to be a 512 bit integer load, so
1434  // do it by some reinterpret casts here. this is allowed because the Intel
1435  // API allows aliasing between different vector types.
1436  const __m512 index_val =
1437  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1438  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1439  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1440  }
1441 
1447  __m512 data;
1448 
1449 private:
1454  DEAL_II_ALWAYS_INLINE
1456  get_sqrt() const
1457  {
1458  VectorizedArray res;
1459  res.data = _mm512_sqrt_ps(data);
1460  return res;
1461  }
1462 
1467  DEAL_II_ALWAYS_INLINE
1469  get_abs() const
1470  {
1471  // to compute the absolute value, perform bitwise andnot with -0. This
1472  // will leave all value and exponent bits unchanged but force the sign
1473  // value to +. Since there is no andnot for AVX512, we interpret the data
1474  // as 32 bit integers and do the andnot on those types (note that andnot
1475  // is a bitwise operation so the data type does not matter)
1476  __m512 mask = _mm512_set1_ps(-0.f);
1477  VectorizedArray res;
1478  res.data = reinterpret_cast<__m512>(
1479  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1480  reinterpret_cast<__m512i>(data)));
1481  return res;
1482  }
1483 
1488  DEAL_II_ALWAYS_INLINE
1490  get_max(const VectorizedArray &other) const
1491  {
1492  VectorizedArray res;
1493  res.data = _mm512_max_ps(data, other.data);
1494  return res;
1495  }
1496 
1501  DEAL_II_ALWAYS_INLINE
1503  get_min(const VectorizedArray &other) const
1504  {
1505  VectorizedArray res;
1506  res.data = _mm512_min_ps(data, other.data);
1507  return res;
1508  }
1509 
1510  // Make a few functions friends.
1511  template <typename Number2, int width2>
1513  std::sqrt(const VectorizedArray<Number2, width2> &);
1514  template <typename Number2, int width2>
1516  std::abs(const VectorizedArray<Number2, width2> &);
1517  template <typename Number2, int width2>
1519  std::max(const VectorizedArray<Number2, width2> &,
1521  template <typename Number2, int width2>
1523  std::min(const VectorizedArray<Number2, width2> &,
1525 };
1526 
1527 
1528 
1532 template <>
1533 inline DEAL_II_ALWAYS_INLINE void
1534 vectorized_load_and_transpose(const unsigned int n_entries,
1535  const float * in,
1536  const unsigned int * offsets,
1538 {
1539  // Similar to the double case, we perform the work on smaller entities. In
1540  // this case, we start from 128 bit arrays and insert them into a full 512
1541  // bit index. This reduces the code size and register pressure because we do
1542  // shuffles on 4 numbers rather than 16.
1543  const unsigned int n_chunks = n_entries / 4;
1544 
1545  // To avoid warnings about uninitialized variables, need to initialize one
1546  // variable to a pre-exisiting value in out, which will never get used in
1547  // the end. Keep the initialization outside the loop because of a bug in
1548  // gcc-9 which generates a "vmovapd" instruction instead of "vmovupd" in
1549  // case t3 is initialized to zero (inside/outside of loop), see
1550  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1551  __m512 t0, t1, t2, t3;
1552  if (n_chunks > 0)
1553  t3 = out[0].data;
1554  for (unsigned int i = 0; i < n_chunks; ++i)
1555  {
1556  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1557  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1558  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1559  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1560  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1561  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1562  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1563  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1564  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1565  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1566  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1567  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1568  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1569  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1570  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1571  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1572 
1573  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1574  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1575  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1576  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1577 
1578  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1579  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1580  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1581  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1582  }
1583 
1584  // remainder loop of work that does not divide by 4
1585  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1586  for (unsigned int v = 0; v < 8; ++v)
1587  out[i].gather(in + i, offsets);
1588 }
1589 
1590 
1591 
1595 template <>
1596 inline DEAL_II_ALWAYS_INLINE void
1597 vectorized_transpose_and_store(const bool add_into,
1598  const unsigned int n_entries,
1599  const VectorizedArray<float, 16> *in,
1600  const unsigned int * offsets,
1601  float * out)
1602 {
1603  const unsigned int n_chunks = n_entries / 4;
1604  for (unsigned int i = 0; i < n_chunks; ++i)
1605  {
1606  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1607  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1608  __m512 t2 =
1609  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1610  __m512 t3 =
1611  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1612  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1613  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1614  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1615  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1616 
1617  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1618  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1619  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1620  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1621  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1622  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1623  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1624  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1625  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1626  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1627  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1628  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1629  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1630  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1631  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1632  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1633 
1634  // Cannot use the same store instructions in both paths of the 'if'
1635  // because the compiler cannot know that there is no aliasing between
1636  // pointers
1637  if (add_into)
1638  {
1639  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1640  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1641  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1642  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1643  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1644  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1645  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1646  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1647  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1648  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1649  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1650  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1651  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1652  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1653  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1654  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1655  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1656  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1657  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1658  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1659  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1660  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1661  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1662  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1663  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1664  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1665  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1666  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1667  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1668  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1669  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1670  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1671  }
1672  else
1673  {
1674  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1675  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1676  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1677  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1678  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1679  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1680  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1681  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1682  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1683  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1684  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1685  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1686  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1687  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1688  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1689  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1690  }
1691  }
1692 
1693  // remainder loop of work that does not divide by 4
1694  if (add_into)
1695  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1696  for (unsigned int v = 0; v < 16; ++v)
1697  out[offsets[v] + i] += in[i][v];
1698  else
1699  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1700  for (unsigned int v = 0; v < 16; ++v)
1701  out[offsets[v] + i] = in[i][v];
1702 }
1703 
1704 # endif
1705 
1706 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1707 
1711 template <>
1712 class VectorizedArray<double, 4>
1713  : public VectorizedArrayBase<VectorizedArray<double, 4>>
1714 {
1715 public:
1719  using value_type = double;
1720 
1724  static const unsigned int n_array_elements = 4;
1725 
1730  VectorizedArray() = default;
1731 
1735  VectorizedArray(const double scalar)
1736  {
1737  this->operator=(scalar);
1738  }
1739 
1743  DEAL_II_ALWAYS_INLINE
1744  VectorizedArray &
1745  operator=(const double x)
1746  {
1747  data = _mm256_set1_pd(x);
1748  return *this;
1749  }
1750 
1754  DEAL_II_ALWAYS_INLINE
1755  double &operator[](const unsigned int comp)
1756  {
1757  AssertIndexRange(comp, 4);
1758  return *(reinterpret_cast<double *>(&data) + comp);
1759  }
1760 
1764  DEAL_II_ALWAYS_INLINE
1765  const double &operator[](const unsigned int comp) const
1766  {
1767  AssertIndexRange(comp, 4);
1768  return *(reinterpret_cast<const double *>(&data) + comp);
1769  }
1770 
1774  DEAL_II_ALWAYS_INLINE
1775  VectorizedArray &
1776  operator+=(const VectorizedArray &vec)
1777  {
1778  // if the compiler supports vector arithmetics, we can simply use +=
1779  // operator on the given data type. this allows the compiler to combine
1780  // additions with multiplication (fused multiply-add) if those
1781  // instructions are available. Otherwise, we need to use the built-in
1782  // intrinsic command for __m256d
1783 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1784  data += vec.data;
1785 # else
1786  data = _mm256_add_pd(data, vec.data);
1787 # endif
1788  return *this;
1789  }
1790 
1794  DEAL_II_ALWAYS_INLINE
1795  VectorizedArray &
1796  operator-=(const VectorizedArray &vec)
1797  {
1798 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1799  data -= vec.data;
1800 # else
1801  data = _mm256_sub_pd(data, vec.data);
1802 # endif
1803  return *this;
1804  }
1808  DEAL_II_ALWAYS_INLINE
1809  VectorizedArray &
1810  operator*=(const VectorizedArray &vec)
1811  {
1812 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1813  data *= vec.data;
1814 # else
1815  data = _mm256_mul_pd(data, vec.data);
1816 # endif
1817  return *this;
1818  }
1819 
1823  DEAL_II_ALWAYS_INLINE
1824  VectorizedArray &
1825  operator/=(const VectorizedArray &vec)
1826  {
1827 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1828  data /= vec.data;
1829 # else
1830  data = _mm256_div_pd(data, vec.data);
1831 # endif
1832  return *this;
1833  }
1834 
1840  DEAL_II_ALWAYS_INLINE
1841  void
1842  load(const double *ptr)
1843  {
1844  data = _mm256_loadu_pd(ptr);
1845  }
1846 
1853  DEAL_II_ALWAYS_INLINE
1854  void
1855  store(double *ptr) const
1856  {
1857  _mm256_storeu_pd(ptr, data);
1858  }
1859 
1863  DEAL_II_ALWAYS_INLINE
1864  void
1865  streaming_store(double *ptr) const
1866  {
1867  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1868  ExcMessage("Memory not aligned"));
1869  _mm256_stream_pd(ptr, data);
1870  }
1871 
1884  DEAL_II_ALWAYS_INLINE
1885  void
1886  gather(const double *base_ptr, const unsigned int *offsets)
1887  {
1888 # ifdef __AVX2__
1889  // unfortunately, there does not appear to be a 128 bit integer load, so
1890  // do it by some reinterpret casts here. this is allowed because the Intel
1891  // API allows aliasing between different vector types.
1892  const __m128 index_val =
1893  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1894  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
1895  data = _mm256_i32gather_pd(base_ptr, index, 8);
1896 # else
1897  for (unsigned int i = 0; i < 4; ++i)
1898  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1899 # endif
1900  }
1901 
1914  DEAL_II_ALWAYS_INLINE
1915  void
1916  scatter(const unsigned int *offsets, double *base_ptr) const
1917  {
1918  // no scatter operation in AVX/AVX2
1919  for (unsigned int i = 0; i < 4; ++i)
1920  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1921  }
1922 
1928  __m256d data;
1929 
1930 private:
1935  DEAL_II_ALWAYS_INLINE
1937  get_sqrt() const
1938  {
1939  VectorizedArray res;
1940  res.data = _mm256_sqrt_pd(data);
1941  return res;
1942  }
1943 
1948  DEAL_II_ALWAYS_INLINE
1950  get_abs() const
1951  {
1952  // to compute the absolute value, perform bitwise andnot with -0. This
1953  // will leave all value and exponent bits unchanged but force the sign
1954  // value to +.
1955  __m256d mask = _mm256_set1_pd(-0.);
1956  VectorizedArray res;
1957  res.data = _mm256_andnot_pd(mask, data);
1958  return res;
1959  }
1960 
1965  DEAL_II_ALWAYS_INLINE
1967  get_max(const VectorizedArray &other) const
1968  {
1969  VectorizedArray res;
1970  res.data = _mm256_max_pd(data, other.data);
1971  return res;
1972  }
1973 
1978  DEAL_II_ALWAYS_INLINE
1980  get_min(const VectorizedArray &other) const
1981  {
1982  VectorizedArray res;
1983  res.data = _mm256_min_pd(data, other.data);
1984  return res;
1985  }
1986 
1987  // Make a few functions friends.
1988  template <typename Number2, int width2>
1990  std::sqrt(const VectorizedArray<Number2, width2> &);
1991  template <typename Number2, int width2>
1993  std::abs(const VectorizedArray<Number2, width2> &);
1994  template <typename Number2, int width2>
1996  std::max(const VectorizedArray<Number2, width2> &,
1998  template <typename Number2, int width2>
2000  std::min(const VectorizedArray<Number2, width2> &,
2002 };
2003 
2004 
2005 
2009 template <>
2010 inline DEAL_II_ALWAYS_INLINE void
2011 vectorized_load_and_transpose(const unsigned int n_entries,
2012  const double * in,
2013  const unsigned int * offsets,
2015 {
2016  const unsigned int n_chunks = n_entries / 4;
2017  const double * in0 = in + offsets[0];
2018  const double * in1 = in + offsets[1];
2019  const double * in2 = in + offsets[2];
2020  const double * in3 = in + offsets[3];
2021 
2022  for (unsigned int i = 0; i < n_chunks; ++i)
2023  {
2024  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2025  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2026  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2027  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2028  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2029  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2030  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2031  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2032  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2033  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2034  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2035  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2036  }
2037 
2038  // remainder loop of work that does not divide by 4
2039  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2040  out[i].gather(in + i, offsets);
2041 }
2042 
2043 
2044 
2048 template <>
2049 inline DEAL_II_ALWAYS_INLINE void
2050 vectorized_transpose_and_store(const bool add_into,
2051  const unsigned int n_entries,
2052  const VectorizedArray<double, 4> *in,
2053  const unsigned int * offsets,
2054  double * out)
2055 {
2056  const unsigned int n_chunks = n_entries / 4;
2057  double * out0 = out + offsets[0];
2058  double * out1 = out + offsets[1];
2059  double * out2 = out + offsets[2];
2060  double * out3 = out + offsets[3];
2061  for (unsigned int i = 0; i < n_chunks; ++i)
2062  {
2063  __m256d u0 = in[4 * i + 0].data;
2064  __m256d u1 = in[4 * i + 1].data;
2065  __m256d u2 = in[4 * i + 2].data;
2066  __m256d u3 = in[4 * i + 3].data;
2067  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2068  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2069  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2070  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2071  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2072  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2073  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2074  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2075 
2076  // Cannot use the same store instructions in both paths of the 'if'
2077  // because the compiler cannot know that there is no aliasing between
2078  // pointers
2079  if (add_into)
2080  {
2081  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2082  _mm256_storeu_pd(out0 + 4 * i, res0);
2083  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2084  _mm256_storeu_pd(out1 + 4 * i, res1);
2085  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2086  _mm256_storeu_pd(out2 + 4 * i, res2);
2087  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2088  _mm256_storeu_pd(out3 + 4 * i, res3);
2089  }
2090  else
2091  {
2092  _mm256_storeu_pd(out0 + 4 * i, res0);
2093  _mm256_storeu_pd(out1 + 4 * i, res1);
2094  _mm256_storeu_pd(out2 + 4 * i, res2);
2095  _mm256_storeu_pd(out3 + 4 * i, res3);
2096  }
2097  }
2098 
2099  // remainder loop of work that does not divide by 4
2100  if (add_into)
2101  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2102  for (unsigned int v = 0; v < 4; ++v)
2103  out[offsets[v] + i] += in[i][v];
2104  else
2105  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2106  for (unsigned int v = 0; v < 4; ++v)
2107  out[offsets[v] + i] = in[i][v];
2108 }
2109 
2110 
2111 
2115 template <>
2116 class VectorizedArray<float, 8>
2117  : public VectorizedArrayBase<VectorizedArray<float, 8>>
2118 {
2119 public:
2123  using value_type = float;
2124 
2128  static const unsigned int n_array_elements = 8;
2129 
2134  VectorizedArray() = default;
2135 
2139  VectorizedArray(const float scalar)
2140  {
2141  this->operator=(scalar);
2142  }
2143 
2147  DEAL_II_ALWAYS_INLINE
2148  VectorizedArray &
2149  operator=(const float x)
2150  {
2151  data = _mm256_set1_ps(x);
2152  return *this;
2153  }
2154 
2158  DEAL_II_ALWAYS_INLINE
2159  float &operator[](const unsigned int comp)
2160  {
2161  AssertIndexRange(comp, 8);
2162  return *(reinterpret_cast<float *>(&data) + comp);
2163  }
2164 
2168  DEAL_II_ALWAYS_INLINE
2169  const float &operator[](const unsigned int comp) const
2170  {
2171  AssertIndexRange(comp, 8);
2172  return *(reinterpret_cast<const float *>(&data) + comp);
2173  }
2174 
2178  DEAL_II_ALWAYS_INLINE
2179  VectorizedArray &
2180  operator+=(const VectorizedArray &vec)
2181  {
2182  // if the compiler supports vector arithmetics, we can simply use +=
2183  // operator on the given data type. this allows the compiler to combine
2184  // additions with multiplication (fused multiply-add) if those
2185  // instructions are available. Otherwise, we need to use the built-in
2186  // intrinsic command for __m256d
2187 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2188  data += vec.data;
2189 # else
2190  data = _mm256_add_ps(data, vec.data);
2191 # endif
2192  return *this;
2193  }
2194 
2198  DEAL_II_ALWAYS_INLINE
2199  VectorizedArray &
2200  operator-=(const VectorizedArray &vec)
2201  {
2202 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2203  data -= vec.data;
2204 # else
2205  data = _mm256_sub_ps(data, vec.data);
2206 # endif
2207  return *this;
2208  }
2212  DEAL_II_ALWAYS_INLINE
2213  VectorizedArray &
2214  operator*=(const VectorizedArray &vec)
2215  {
2216 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2217  data *= vec.data;
2218 # else
2219  data = _mm256_mul_ps(data, vec.data);
2220 # endif
2221  return *this;
2222  }
2223 
2227  DEAL_II_ALWAYS_INLINE
2228  VectorizedArray &
2229  operator/=(const VectorizedArray &vec)
2230  {
2231 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2232  data /= vec.data;
2233 # else
2234  data = _mm256_div_ps(data, vec.data);
2235 # endif
2236  return *this;
2237  }
2238 
2244  DEAL_II_ALWAYS_INLINE
2245  void
2246  load(const float *ptr)
2247  {
2248  data = _mm256_loadu_ps(ptr);
2249  }
2250 
2257  DEAL_II_ALWAYS_INLINE
2258  void
2259  store(float *ptr) const
2260  {
2261  _mm256_storeu_ps(ptr, data);
2262  }
2263 
2267  DEAL_II_ALWAYS_INLINE
2268  void
2269  streaming_store(float *ptr) const
2270  {
2271  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2272  ExcMessage("Memory not aligned"));
2273  _mm256_stream_ps(ptr, data);
2274  }
2275 
2288  DEAL_II_ALWAYS_INLINE
2289  void
2290  gather(const float *base_ptr, const unsigned int *offsets)
2291  {
2292 # ifdef __AVX2__
2293  // unfortunately, there does not appear to be a 256 bit integer load, so
2294  // do it by some reinterpret casts here. this is allowed because the Intel
2295  // API allows aliasing between different vector types.
2296  const __m256 index_val =
2297  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2298  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2299  data = _mm256_i32gather_ps(base_ptr, index, 4);
2300 # else
2301  for (unsigned int i = 0; i < 8; ++i)
2302  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2303 # endif
2304  }
2305 
2318  DEAL_II_ALWAYS_INLINE
2319  void
2320  scatter(const unsigned int *offsets, float *base_ptr) const
2321  {
2322  // no scatter operation in AVX/AVX2
2323  for (unsigned int i = 0; i < 8; ++i)
2324  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2325  }
2326 
2332  __m256 data;
2333 
2334 private:
2339  DEAL_II_ALWAYS_INLINE
2341  get_sqrt() const
2342  {
2343  VectorizedArray res;
2344  res.data = _mm256_sqrt_ps(data);
2345  return res;
2346  }
2347 
2352  DEAL_II_ALWAYS_INLINE
2354  get_abs() const
2355  {
2356  // to compute the absolute value, perform bitwise andnot with -0. This
2357  // will leave all value and exponent bits unchanged but force the sign
2358  // value to +.
2359  __m256 mask = _mm256_set1_ps(-0.f);
2360  VectorizedArray res;
2361  res.data = _mm256_andnot_ps(mask, data);
2362  return res;
2363  }
2364 
2369  DEAL_II_ALWAYS_INLINE
2371  get_max(const VectorizedArray &other) const
2372  {
2373  VectorizedArray res;
2374  res.data = _mm256_max_ps(data, other.data);
2375  return res;
2376  }
2377 
2382  DEAL_II_ALWAYS_INLINE
2384  get_min(const VectorizedArray &other) const
2385  {
2386  VectorizedArray res;
2387  res.data = _mm256_min_ps(data, other.data);
2388  return res;
2389  }
2390 
2391  // Make a few functions friends.
2392  template <typename Number2, int width2>
2394  std::sqrt(const VectorizedArray<Number2, width2> &);
2395  template <typename Number2, int width2>
2397  std::abs(const VectorizedArray<Number2, width2> &);
2398  template <typename Number2, int width2>
2400  std::max(const VectorizedArray<Number2, width2> &,
2402  template <typename Number2, int width2>
2404  std::min(const VectorizedArray<Number2, width2> &,
2406 };
2407 
2408 
2409 
2413 template <>
2414 inline DEAL_II_ALWAYS_INLINE void
2415 vectorized_load_and_transpose(const unsigned int n_entries,
2416  const float * in,
2417  const unsigned int * offsets,
2419 {
2420  const unsigned int n_chunks = n_entries / 4;
2421  for (unsigned int i = 0; i < n_chunks; ++i)
2422  {
2423  // To avoid warnings about uninitialized variables, need to initialize
2424  // one variable with zero before using it.
2425  __m256 t0, t1, t2, t3 = {};
2426  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2427  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2428  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2429  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2430  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2431  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2432  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2433  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2434 
2435  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2436  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2437  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2438  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2439  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2440  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2441  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2442  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2443  }
2444 
2445  // remainder loop of work that does not divide by 4
2446  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2447  out[i].gather(in + i, offsets);
2448 }
2449 
2450 
2451 
2455 template <>
2456 inline DEAL_II_ALWAYS_INLINE void
2457 vectorized_transpose_and_store(const bool add_into,
2458  const unsigned int n_entries,
2459  const VectorizedArray<float, 8> *in,
2460  const unsigned int * offsets,
2461  float * out)
2462 {
2463  const unsigned int n_chunks = n_entries / 4;
2464  for (unsigned int i = 0; i < n_chunks; ++i)
2465  {
2466  __m256 u0 = in[4 * i + 0].data;
2467  __m256 u1 = in[4 * i + 1].data;
2468  __m256 u2 = in[4 * i + 2].data;
2469  __m256 u3 = in[4 * i + 3].data;
2470  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2471  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2472  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2473  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2474  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2475  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2476  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2477  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2478  __m128 res0 = _mm256_extractf128_ps(u0, 0);
2479  __m128 res4 = _mm256_extractf128_ps(u0, 1);
2480  __m128 res1 = _mm256_extractf128_ps(u1, 0);
2481  __m128 res5 = _mm256_extractf128_ps(u1, 1);
2482  __m128 res2 = _mm256_extractf128_ps(u2, 0);
2483  __m128 res6 = _mm256_extractf128_ps(u2, 1);
2484  __m128 res3 = _mm256_extractf128_ps(u3, 0);
2485  __m128 res7 = _mm256_extractf128_ps(u3, 1);
2486 
2487  // Cannot use the same store instructions in both paths of the 'if'
2488  // because the compiler cannot know that there is no aliasing between
2489  // pointers
2490  if (add_into)
2491  {
2492  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2493  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2494  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2495  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2496  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2497  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2498  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2499  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2500  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2501  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2502  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2503  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2504  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2505  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2506  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2507  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2508  }
2509  else
2510  {
2511  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2512  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2513  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2514  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2515  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2516  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2517  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2518  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2519  }
2520  }
2521 
2522  // remainder loop of work that does not divide by 4
2523  if (add_into)
2524  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2525  for (unsigned int v = 0; v < 8; ++v)
2526  out[offsets[v] + i] += in[i][v];
2527  else
2528  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2529  for (unsigned int v = 0; v < 8; ++v)
2530  out[offsets[v] + i] = in[i][v];
2531 }
2532 
2533 # endif
2534 
2535 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2536 
2540 template <>
2541 class VectorizedArray<double, 2>
2542  : public VectorizedArrayBase<VectorizedArray<double, 2>>
2543 {
2544 public:
2548  using value_type = double;
2549 
2553  static const unsigned int n_array_elements = 2;
2554 
2559  VectorizedArray() = default;
2560 
2564  VectorizedArray(const double scalar)
2565  {
2566  this->operator=(scalar);
2567  }
2568 
2572  DEAL_II_ALWAYS_INLINE
2573  VectorizedArray &
2574  operator=(const double x)
2575  {
2576  data = _mm_set1_pd(x);
2577  return *this;
2578  }
2579 
2583  DEAL_II_ALWAYS_INLINE
2584  double &operator[](const unsigned int comp)
2585  {
2586  AssertIndexRange(comp, 2);
2587  return *(reinterpret_cast<double *>(&data) + comp);
2588  }
2589 
2593  DEAL_II_ALWAYS_INLINE
2594  const double &operator[](const unsigned int comp) const
2595  {
2596  AssertIndexRange(comp, 2);
2597  return *(reinterpret_cast<const double *>(&data) + comp);
2598  }
2599 
2603  DEAL_II_ALWAYS_INLINE
2604  VectorizedArray &
2605  operator+=(const VectorizedArray &vec)
2606  {
2607 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2608  data += vec.data;
2609 # else
2610  data = _mm_add_pd(data, vec.data);
2611 # endif
2612  return *this;
2613  }
2614 
2618  DEAL_II_ALWAYS_INLINE
2619  VectorizedArray &
2620  operator-=(const VectorizedArray &vec)
2621  {
2622 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2623  data -= vec.data;
2624 # else
2625  data = _mm_sub_pd(data, vec.data);
2626 # endif
2627  return *this;
2628  }
2629 
2633  DEAL_II_ALWAYS_INLINE
2634  VectorizedArray &
2635  operator*=(const VectorizedArray &vec)
2636  {
2637 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2638  data *= vec.data;
2639 # else
2640  data = _mm_mul_pd(data, vec.data);
2641 # endif
2642  return *this;
2643  }
2644 
2648  DEAL_II_ALWAYS_INLINE
2649  VectorizedArray &
2650  operator/=(const VectorizedArray &vec)
2651  {
2652 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2653  data /= vec.data;
2654 # else
2655  data = _mm_div_pd(data, vec.data);
2656 # endif
2657  return *this;
2658  }
2659 
2665  DEAL_II_ALWAYS_INLINE
2666  void
2667  load(const double *ptr)
2668  {
2669  data = _mm_loadu_pd(ptr);
2670  }
2671 
2678  DEAL_II_ALWAYS_INLINE
2679  void
2680  store(double *ptr) const
2681  {
2682  _mm_storeu_pd(ptr, data);
2683  }
2684 
2688  DEAL_II_ALWAYS_INLINE
2689  void
2690  streaming_store(double *ptr) const
2691  {
2692  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2693  ExcMessage("Memory not aligned"));
2694  _mm_stream_pd(ptr, data);
2695  }
2696 
2709  DEAL_II_ALWAYS_INLINE
2710  void
2711  gather(const double *base_ptr, const unsigned int *offsets)
2712  {
2713  for (unsigned int i = 0; i < 2; ++i)
2714  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2715  }
2716 
2729  DEAL_II_ALWAYS_INLINE
2730  void
2731  scatter(const unsigned int *offsets, double *base_ptr) const
2732  {
2733  for (unsigned int i = 0; i < 2; ++i)
2734  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2735  }
2736 
2742  __m128d data;
2743 
2744 private:
2749  DEAL_II_ALWAYS_INLINE
2751  get_sqrt() const
2752  {
2753  VectorizedArray res;
2754  res.data = _mm_sqrt_pd(data);
2755  return res;
2756  }
2757 
2762  DEAL_II_ALWAYS_INLINE
2764  get_abs() const
2765  {
2766  // to compute the absolute value, perform
2767  // bitwise andnot with -0. This will leave all
2768  // value and exponent bits unchanged but force
2769  // the sign value to +.
2770  __m128d mask = _mm_set1_pd(-0.);
2771  VectorizedArray res;
2772  res.data = _mm_andnot_pd(mask, data);
2773  return res;
2774  }
2775 
2780  DEAL_II_ALWAYS_INLINE
2782  get_max(const VectorizedArray &other) const
2783  {
2784  VectorizedArray res;
2785  res.data = _mm_max_pd(data, other.data);
2786  return res;
2787  }
2788 
2793  DEAL_II_ALWAYS_INLINE
2795  get_min(const VectorizedArray &other) const
2796  {
2797  VectorizedArray res;
2798  res.data = _mm_min_pd(data, other.data);
2799  return res;
2800  }
2801 
2802  // Make a few functions friends.
2803  template <typename Number2, int width2>
2805  std::sqrt(const VectorizedArray<Number2, width2> &);
2806  template <typename Number2, int width2>
2808  std::abs(const VectorizedArray<Number2, width2> &);
2809  template <typename Number2, int width2>
2811  std::max(const VectorizedArray<Number2, width2> &,
2813  template <typename Number2, int width2>
2815  std::min(const VectorizedArray<Number2, width2> &,
2817 };
2818 
2819 
2820 
2824 template <>
2825 inline DEAL_II_ALWAYS_INLINE void
2826 vectorized_load_and_transpose(const unsigned int n_entries,
2827  const double * in,
2828  const unsigned int * offsets,
2830 {
2831  const unsigned int n_chunks = n_entries / 2;
2832  for (unsigned int i = 0; i < n_chunks; ++i)
2833  {
2834  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2835  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2836  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
2837  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
2838  }
2839 
2840  // remainder loop of work that does not divide by 2
2841  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2842  for (unsigned int v = 0; v < 2; ++v)
2843  out[i][v] = in[offsets[v] + i];
2844 }
2845 
2846 
2847 
2851 template <>
2852 inline DEAL_II_ALWAYS_INLINE void
2853 vectorized_transpose_and_store(const bool add_into,
2854  const unsigned int n_entries,
2855  const VectorizedArray<double, 2> *in,
2856  const unsigned int * offsets,
2857  double * out)
2858 {
2859  const unsigned int n_chunks = n_entries / 2;
2860  if (add_into)
2861  {
2862  for (unsigned int i = 0; i < n_chunks; ++i)
2863  {
2864  __m128d u0 = in[2 * i + 0].data;
2865  __m128d u1 = in[2 * i + 1].data;
2866  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2867  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2868  _mm_storeu_pd(out + 2 * i + offsets[0],
2869  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2870  res0));
2871  _mm_storeu_pd(out + 2 * i + offsets[1],
2872  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2873  res1));
2874  }
2875  // remainder loop of work that does not divide by 2
2876  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2877  for (unsigned int v = 0; v < 2; ++v)
2878  out[offsets[v] + i] += in[i][v];
2879  }
2880  else
2881  {
2882  for (unsigned int i = 0; i < n_chunks; ++i)
2883  {
2884  __m128d u0 = in[2 * i + 0].data;
2885  __m128d u1 = in[2 * i + 1].data;
2886  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2887  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2888  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2889  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2890  }
2891  // remainder loop of work that does not divide by 2
2892  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2893  for (unsigned int v = 0; v < 2; ++v)
2894  out[offsets[v] + i] = in[i][v];
2895  }
2896 }
2897 
2898 
2899 
2903 template <>
2904 class VectorizedArray<float, 4>
2905  : public VectorizedArrayBase<VectorizedArray<float, 4>>
2906 {
2907 public:
2911  using value_type = float;
2912 
2916  static const unsigned int n_array_elements = 4;
2917 
2926  VectorizedArray() = default;
2927 
2931  VectorizedArray(const float scalar)
2932  {
2933  this->operator=(scalar);
2934  }
2935 
2936  DEAL_II_ALWAYS_INLINE
2937  VectorizedArray &
2938  operator=(const float x)
2939  {
2940  data = _mm_set1_ps(x);
2941  return *this;
2942  }
2943 
2947  DEAL_II_ALWAYS_INLINE
2948  float &operator[](const unsigned int comp)
2949  {
2950  AssertIndexRange(comp, 4);
2951  return *(reinterpret_cast<float *>(&data) + comp);
2952  }
2953 
2957  DEAL_II_ALWAYS_INLINE
2958  const float &operator[](const unsigned int comp) const
2959  {
2960  AssertIndexRange(comp, 4);
2961  return *(reinterpret_cast<const float *>(&data) + comp);
2962  }
2963 
2967  DEAL_II_ALWAYS_INLINE
2968  VectorizedArray &
2969  operator+=(const VectorizedArray &vec)
2970  {
2971 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2972  data += vec.data;
2973 # else
2974  data = _mm_add_ps(data, vec.data);
2975 # endif
2976  return *this;
2977  }
2978 
2982  DEAL_II_ALWAYS_INLINE
2983  VectorizedArray &
2984  operator-=(const VectorizedArray &vec)
2985  {
2986 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2987  data -= vec.data;
2988 # else
2989  data = _mm_sub_ps(data, vec.data);
2990 # endif
2991  return *this;
2992  }
2993 
2997  DEAL_II_ALWAYS_INLINE
2998  VectorizedArray &
2999  operator*=(const VectorizedArray &vec)
3000  {
3001 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3002  data *= vec.data;
3003 # else
3004  data = _mm_mul_ps(data, vec.data);
3005 # endif
3006  return *this;
3007  }
3008 
3012  DEAL_II_ALWAYS_INLINE
3013  VectorizedArray &
3014  operator/=(const VectorizedArray &vec)
3015  {
3016 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3017  data /= vec.data;
3018 # else
3019  data = _mm_div_ps(data, vec.data);
3020 # endif
3021  return *this;
3022  }
3023 
3029  DEAL_II_ALWAYS_INLINE
3030  void
3031  load(const float *ptr)
3032  {
3033  data = _mm_loadu_ps(ptr);
3034  }
3035 
3042  DEAL_II_ALWAYS_INLINE
3043  void
3044  store(float *ptr) const
3045  {
3046  _mm_storeu_ps(ptr, data);
3047  }
3048 
3052  DEAL_II_ALWAYS_INLINE
3053  void
3054  streaming_store(float *ptr) const
3055  {
3056  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3057  ExcMessage("Memory not aligned"));
3058  _mm_stream_ps(ptr, data);
3059  }
3060 
3073  DEAL_II_ALWAYS_INLINE
3074  void
3075  gather(const float *base_ptr, const unsigned int *offsets)
3076  {
3077  for (unsigned int i = 0; i < 4; ++i)
3078  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3079  }
3080 
3093  DEAL_II_ALWAYS_INLINE
3094  void
3095  scatter(const unsigned int *offsets, float *base_ptr) const
3096  {
3097  for (unsigned int i = 0; i < 4; ++i)
3098  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3099  }
3100 
3106  __m128 data;
3107 
3108 private:
3113  DEAL_II_ALWAYS_INLINE
3115  get_sqrt() const
3116  {
3117  VectorizedArray res;
3118  res.data = _mm_sqrt_ps(data);
3119  return res;
3120  }
3121 
3126  DEAL_II_ALWAYS_INLINE
3128  get_abs() const
3129  {
3130  // to compute the absolute value, perform bitwise andnot with -0. This
3131  // will leave all value and exponent bits unchanged but force the sign
3132  // value to +.
3133  __m128 mask = _mm_set1_ps(-0.f);
3134  VectorizedArray res;
3135  res.data = _mm_andnot_ps(mask, data);
3136  return res;
3137  }
3138 
3143  DEAL_II_ALWAYS_INLINE
3145  get_max(const VectorizedArray &other) const
3146  {
3147  VectorizedArray res;
3148  res.data = _mm_max_ps(data, other.data);
3149  return res;
3150  }
3151 
3156  DEAL_II_ALWAYS_INLINE
3158  get_min(const VectorizedArray &other) const
3159  {
3160  VectorizedArray res;
3161  res.data = _mm_min_ps(data, other.data);
3162  return res;
3163  }
3164 
3165  // Make a few functions friends.
3166  template <typename Number2, int width2>
3168  std::sqrt(const VectorizedArray<Number2, width2> &);
3169  template <typename Number2, int width2>
3171  std::abs(const VectorizedArray<Number2, width2> &);
3172  template <typename Number2, int width2>
3174  std::max(const VectorizedArray<Number2, width2> &,
3176  template <typename Number2, int width2>
3178  std::min(const VectorizedArray<Number2, width2> &,
3180 };
3181 
3182 
3183 
3187 template <>
3188 inline DEAL_II_ALWAYS_INLINE void
3189 vectorized_load_and_transpose(const unsigned int n_entries,
3190  const float * in,
3191  const unsigned int * offsets,
3193 {
3194  const unsigned int n_chunks = n_entries / 4;
3195  for (unsigned int i = 0; i < n_chunks; ++i)
3196  {
3197  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3198  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3199  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3200  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3201  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3202  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3203  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3204  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3205  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3206  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3207  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3208  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3209  }
3210 
3211  // remainder loop of work that does not divide by 4
3212  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3213  for (unsigned int v = 0; v < 4; ++v)
3214  out[i][v] = in[offsets[v] + i];
3215 }
3216 
3217 
3218 
3222 template <>
3223 inline DEAL_II_ALWAYS_INLINE void
3224 vectorized_transpose_and_store(const bool add_into,
3225  const unsigned int n_entries,
3226  const VectorizedArray<float, 4> *in,
3227  const unsigned int * offsets,
3228  float * out)
3229 {
3230  const unsigned int n_chunks = n_entries / 4;
3231  for (unsigned int i = 0; i < n_chunks; ++i)
3232  {
3233  __m128 u0 = in[4 * i + 0].data;
3234  __m128 u1 = in[4 * i + 1].data;
3235  __m128 u2 = in[4 * i + 2].data;
3236  __m128 u3 = in[4 * i + 3].data;
3237  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
3238  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
3239  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
3240  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
3241  u0 = _mm_shuffle_ps(t0, t2, 0x88);
3242  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
3243  u2 = _mm_shuffle_ps(t1, t3, 0x88);
3244  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
3245 
3246  // Cannot use the same store instructions in both paths of the 'if'
3247  // because the compiler cannot know that there is no aliasing between
3248  // pointers
3249  if (add_into)
3250  {
3251  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
3252  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3253  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
3254  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3255  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
3256  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3257  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
3258  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3259  }
3260  else
3261  {
3262  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3263  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3264  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3265  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3266  }
3267  }
3268 
3269  // remainder loop of work that does not divide by 4
3270  if (add_into)
3271  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3272  for (unsigned int v = 0; v < 4; ++v)
3273  out[offsets[v] + i] += in[i][v];
3274  else
3275  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3276  for (unsigned int v = 0; v < 4; ++v)
3277  out[offsets[v] + i] = in[i][v];
3278 }
3279 
3280 
3281 
3282 # endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
3283 
3284 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
3285  defined(__VSX__)
3286 
3287 template <>
3288 class VectorizedArray<double, 2>
3289  : public VectorizedArrayBase<VectorizedArray<double, 2>>
3290 {
3291 public:
3295  using value_type = double;
3296 
3300  static const unsigned int n_array_elements = 2;
3301 
3306  VectorizedArray() = default;
3307 
3311  VectorizedArray(const double scalar)
3312  {
3313  this->operator=(scalar);
3314  }
3315 
3319  DEAL_II_ALWAYS_INLINE
3320  VectorizedArray &
3321  operator=(const double x)
3322  {
3323  data = vec_splats(x);
3324  return *this;
3325  }
3326 
3330  DEAL_II_ALWAYS_INLINE
3331  double &operator[](const unsigned int comp)
3332  {
3333  AssertIndexRange(comp, 2);
3334  return *(reinterpret_cast<double *>(&data) + comp);
3335  }
3336 
3340  DEAL_II_ALWAYS_INLINE
3341  const double &operator[](const unsigned int comp) const
3342  {
3343  AssertIndexRange(comp, 2);
3344  return *(reinterpret_cast<const double *>(&data) + comp);
3345  }
3346 
3350  DEAL_II_ALWAYS_INLINE
3351  VectorizedArray &
3352  operator+=(const VectorizedArray &vec)
3353  {
3354  data = vec_add(data, vec.data);
3355  return *this;
3356  }
3357 
3361  DEAL_II_ALWAYS_INLINE
3362  VectorizedArray &
3363  operator-=(const VectorizedArray &vec)
3364  {
3365  data = vec_sub(data, vec.data);
3366  return *this;
3367  }
3368 
3372  DEAL_II_ALWAYS_INLINE
3373  VectorizedArray &
3374  operator*=(const VectorizedArray &vec)
3375  {
3376  data = vec_mul(data, vec.data);
3377  return *this;
3378  }
3379 
3383  DEAL_II_ALWAYS_INLINE
3384  VectorizedArray &
3385  operator/=(const VectorizedArray &vec)
3386  {
3387  data = vec_div(data, vec.data);
3388  return *this;
3389  }
3390 
3395  DEAL_II_ALWAYS_INLINE
3396  void
3397  load(const double *ptr)
3398  {
3399  data = vec_vsx_ld(0, ptr);
3400  }
3401 
3406  DEAL_II_ALWAYS_INLINE
3407  void
3408  store(double *ptr) const
3409  {
3410  vec_vsx_st(data, 0, ptr);
3411  }
3412 
3415  DEAL_II_ALWAYS_INLINE
3416  void
3417  streaming_store(double *ptr) const
3418  {
3419  store(ptr);
3420  }
3421 
3424  DEAL_II_ALWAYS_INLINE
3425  void
3426  gather(const double *base_ptr, const unsigned int *offsets)
3427  {
3428  for (unsigned int i = 0; i < 2; ++i)
3429  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3430  }
3431 
3434  DEAL_II_ALWAYS_INLINE
3435  void
3436  scatter(const unsigned int *offsets, double *base_ptr) const
3437  {
3438  for (unsigned int i = 0; i < 2; ++i)
3439  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3440  }
3441 
3447  __vector double data;
3448 
3449 private:
3454  DEAL_II_ALWAYS_INLINE
3456  get_sqrt() const
3457  {
3458  VectorizedArray res;
3459  res.data = vec_sqrt(data);
3460  return res;
3461  }
3462 
3467  DEAL_II_ALWAYS_INLINE
3469  get_abs() const
3470  {
3471  VectorizedArray res;
3472  res.data = vec_abs(data);
3473  return res;
3474  }
3475 
3480  DEAL_II_ALWAYS_INLINE
3482  get_max(const VectorizedArray &other) const
3483  {
3484  VectorizedArray res;
3485  res.data = vec_max(data, other.data);
3486  return res;
3487  }
3488 
3493  DEAL_II_ALWAYS_INLINE
3495  get_min(const VectorizedArray &other) const
3496  {
3497  VectorizedArray res;
3498  res.data = vec_min(data, other.data);
3499  return res;
3500  }
3501 
3502  // Make a few functions friends.
3503  template <typename Number2, int width2>
3505  std::sqrt(const VectorizedArray<Number2, width2> &);
3506  template <typename Number2, int width2>
3508  std::abs(const VectorizedArray<Number2, width2> &);
3509  template <typename Number2, int width2>
3511  std::max(const VectorizedArray<Number2, width2> &,
3513  template <typename Number2, int width2>
3515  std::min(const VectorizedArray<Number2, width2> &,
3517 };
3518 
3519 
3520 
3521 template <>
3522 class VectorizedArray<float, 4>
3523  : public VectorizedArrayBase<VectorizedArray<float, 4>>
3524 {
3525 public:
3529  using value_type = float;
3530 
3534  static const unsigned int n_array_elements = 4;
3535 
3540  VectorizedArray() = default;
3541 
3545  VectorizedArray(const float scalar)
3546  {
3547  this->operator=(scalar);
3548  }
3549 
3553  DEAL_II_ALWAYS_INLINE
3554  VectorizedArray &
3555  operator=(const float x)
3556  {
3557  data = vec_splats(x);
3558  return *this;
3559  }
3560 
3564  DEAL_II_ALWAYS_INLINE
3565  float &operator[](const unsigned int comp)
3566  {
3567  AssertIndexRange(comp, 4);
3568  return *(reinterpret_cast<float *>(&data) + comp);
3569  }
3570 
3574  DEAL_II_ALWAYS_INLINE
3575  const float &operator[](const unsigned int comp) const
3576  {
3577  AssertIndexRange(comp, 4);
3578  return *(reinterpret_cast<const float *>(&data) + comp);
3579  }
3580 
3584  DEAL_II_ALWAYS_INLINE
3585  VectorizedArray &
3586  operator+=(const VectorizedArray &vec)
3587  {
3588  data = vec_add(data, vec.data);
3589  return *this;
3590  }
3591 
3595  DEAL_II_ALWAYS_INLINE
3596  VectorizedArray &
3597  operator-=(const VectorizedArray &vec)
3598  {
3599  data = vec_sub(data, vec.data);
3600  return *this;
3601  }
3602 
3606  DEAL_II_ALWAYS_INLINE
3607  VectorizedArray &
3608  operator*=(const VectorizedArray &vec)
3609  {
3610  data = vec_mul(data, vec.data);
3611  return *this;
3612  }
3613 
3617  DEAL_II_ALWAYS_INLINE
3618  VectorizedArray &
3619  operator/=(const VectorizedArray &vec)
3620  {
3621  data = vec_div(data, vec.data);
3622  return *this;
3623  }
3624 
3629  DEAL_II_ALWAYS_INLINE
3630  void
3631  load(const float *ptr)
3632  {
3633  data = vec_vsx_ld(0, ptr);
3634  }
3635 
3640  DEAL_II_ALWAYS_INLINE
3641  void
3642  store(float *ptr) const
3643  {
3644  vec_vsx_st(data, 0, ptr);
3645  }
3646 
3649  DEAL_II_ALWAYS_INLINE
3650  void
3651  streaming_store(float *ptr) const
3652  {
3653  store(ptr);
3654  }
3655 
3658  DEAL_II_ALWAYS_INLINE
3659  void
3660  gather(const float *base_ptr, const unsigned int *offsets)
3661  {
3662  for (unsigned int i = 0; i < 4; ++i)
3663  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3664  }
3665 
3668  DEAL_II_ALWAYS_INLINE
3669  void
3670  scatter(const unsigned int *offsets, float *base_ptr) const
3671  {
3672  for (unsigned int i = 0; i < 4; ++i)
3673  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3674  }
3675 
3681  __vector float data;
3682 
3683 private:
3688  DEAL_II_ALWAYS_INLINE
3690  get_sqrt() const
3691  {
3692  VectorizedArray res;
3693  res.data = vec_sqrt(data);
3694  return res;
3695  }
3696 
3701  DEAL_II_ALWAYS_INLINE
3703  get_abs() const
3704  {
3705  VectorizedArray res;
3706  res.data = vec_abs(data);
3707  return res;
3708  }
3709 
3714  DEAL_II_ALWAYS_INLINE
3716  get_max(const VectorizedArray &other) const
3717  {
3718  VectorizedArray res;
3719  res.data = vec_max(data, other.data);
3720  return res;
3721  }
3722 
3727  DEAL_II_ALWAYS_INLINE
3729  get_min(const VectorizedArray &other) const
3730  {
3731  VectorizedArray res;
3732  res.data = vec_min(data, other.data);
3733  return res;
3734  }
3735 
3736  // Make a few functions friends.
3737  template <typename Number2, int width2>
3739  std::sqrt(const VectorizedArray<Number2, width2> &);
3740  template <typename Number2, int width2>
3742  std::abs(const VectorizedArray<Number2, width2> &);
3743  template <typename Number2, int width2>
3745  std::max(const VectorizedArray<Number2, width2> &,
3747  template <typename Number2, int width2>
3749  std::min(const VectorizedArray<Number2, width2> &,
3751 };
3752 
3753 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
3754  // defined(__VSX__)
3755 
3756 
3757 #endif // DOXYGEN
3758 
3763 
3769 template <typename Number, int width>
3770 inline DEAL_II_ALWAYS_INLINE bool
3772  const VectorizedArray<Number, width> &rhs)
3773 {
3774  for (unsigned int i = 0; i < VectorizedArray<Number, width>::n_array_elements;
3775  ++i)
3776  if (lhs[i] != rhs[i])
3777  return false;
3778 
3779  return true;
3780 }
3781 
3782 
3788 template <typename Number, int width>
3789 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3792 {
3794  return tmp += v;
3795 }
3796 
3802 template <typename Number, int width>
3803 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3806 {
3808  return tmp -= v;
3809 }
3810 
3816 template <typename Number, int width>
3817 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3820 {
3822  return tmp *= v;
3823 }
3824 
3830 template <typename Number, int width>
3831 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3834 {
3836  return tmp /= v;
3837 }
3838 
3845 template <typename Number, int width>
3846 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3847  operator+(const Number &u, const VectorizedArray<Number, width> &v)
3848 {
3850  return tmp += v;
3851 }
3852 
3861 template <int width>
3862 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3863  operator+(const double u, const VectorizedArray<float, width> &v)
3864 {
3866  return tmp += v;
3867 }
3868 
3875 template <typename Number, int width>
3876 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3877  operator+(const VectorizedArray<Number, width> &v, const Number &u)
3878 {
3879  return u + v;
3880 }
3881 
3890 template <int width>
3891 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3892  operator+(const VectorizedArray<float, width> &v, const double u)
3893 {
3894  return u + v;
3895 }
3896 
3903 template <typename Number, int width>
3904 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3905  operator-(const Number &u, const VectorizedArray<Number, width> &v)
3906 {
3908  return tmp -= v;
3909 }
3910 
3919 template <int width>
3920 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3921  operator-(const double u, const VectorizedArray<float, width> &v)
3922 {
3923  VectorizedArray<float, width> tmp = static_cast<float>(u);
3924  return tmp -= v;
3925 }
3926 
3933 template <typename Number, int width>
3934 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3935  operator-(const VectorizedArray<Number, width> &v, const Number &u)
3936 {
3938  return v - tmp;
3939 }
3940 
3949 template <int width>
3950 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3951  operator-(const VectorizedArray<float, width> &v, const double u)
3952 {
3953  VectorizedArray<float, width> tmp = static_cast<float>(u);
3954  return v - tmp;
3955 }
3956 
3963 template <typename Number, int width>
3964 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3965  operator*(const Number &u, const VectorizedArray<Number, width> &v)
3966 {
3968  return tmp *= v;
3969 }
3970 
3979 template <int width>
3980 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3981  operator*(const double u, const VectorizedArray<float, width> &v)
3982 {
3983  VectorizedArray<float, width> tmp = static_cast<float>(u);
3984  return tmp *= v;
3985 }
3986 
3993 template <typename Number, int width>
3994 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3995  operator*(const VectorizedArray<Number, width> &v, const Number &u)
3996 {
3997  return u * v;
3998 }
3999 
4008 template <int width>
4009 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4010  operator*(const VectorizedArray<float, width> &v, const double u)
4011 {
4012  return u * v;
4013 }
4014 
4021 template <typename Number, int width>
4022 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4023  operator/(const Number &u, const VectorizedArray<Number, width> &v)
4024 {
4026  return tmp /= v;
4027 }
4028 
4037 template <int width>
4038 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4039  operator/(const double u, const VectorizedArray<float, width> &v)
4040 {
4041  VectorizedArray<float, width> tmp = static_cast<float>(u);
4042  return tmp /= v;
4043 }
4044 
4051 template <typename Number, int width>
4052 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4053  operator/(const VectorizedArray<Number, width> &v, const Number &u)
4054 {
4056  return v / tmp;
4057 }
4058 
4067 template <int width>
4068 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4069  operator/(const VectorizedArray<float, width> &v, const double u)
4070 {
4071  VectorizedArray<float, width> tmp = static_cast<float>(u);
4072  return v / tmp;
4073 }
4074 
4080 template <typename Number, int width>
4081 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4083 {
4084  return u;
4085 }
4086 
4092 template <typename Number, int width>
4093 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4095 {
4096  // to get a negative sign, subtract the input from zero (could also
4097  // multiply by -1, but this one is slightly simpler)
4098  return VectorizedArray<Number, width>() - u;
4099 }
4100 
4106 template <typename Number, int width>
4107 inline std::ostream &
4108 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
4109 {
4110  constexpr unsigned int n = VectorizedArray<Number, width>::n_array_elements;
4111  for (unsigned int i = 0; i < n - 1; ++i)
4112  out << p[i] << ' ';
4113  out << p[n - 1];
4114 
4115  return out;
4116 }
4117 
4119 
4124 
4125 
4133 enum class SIMDComparison : int
4134 {
4135 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
4136  equal = _CMP_EQ_OQ,
4137  not_equal = _CMP_NEQ_OQ,
4138  less_than = _CMP_LT_OQ,
4139  less_than_or_equal = _CMP_LE_OQ,
4140  greater_than = _CMP_GT_OQ,
4141  greater_than_or_equal = _CMP_GE_OQ
4142 #else
4143  equal,
4144  not_equal,
4145  less_than,
4146  less_than_or_equal,
4147  greater_than,
4148  greater_than_or_equal
4149 #endif
4150 };
4151 
4152 
4216 template <SIMDComparison predicate, typename Number>
4217 DEAL_II_ALWAYS_INLINE inline Number
4218 compare_and_apply_mask(const Number &left,
4219  const Number &right,
4220  const Number &true_value,
4221  const Number &false_value)
4222 {
4223  bool mask;
4224  switch (predicate)
4225  {
4226  case SIMDComparison::equal:
4227  mask = (left == right);
4228  break;
4229  case SIMDComparison::not_equal:
4230  mask = (left != right);
4231  break;
4232  case SIMDComparison::less_than:
4233  mask = (left < right);
4234  break;
4235  case SIMDComparison::less_than_or_equal:
4236  mask = (left <= right);
4237  break;
4238  case SIMDComparison::greater_than:
4239  mask = (left > right);
4240  break;
4241  case SIMDComparison::greater_than_or_equal:
4242  mask = (left >= right);
4243  break;
4244  }
4245 
4246  return mask ? true_value : false_value;
4247 }
4248 
4249 
4254 template <SIMDComparison predicate, typename Number>
4255 DEAL_II_ALWAYS_INLINE inline VectorizedArray<Number, 1>
4256 compare_and_apply_mask(const VectorizedArray<Number, 1> &left,
4257  const VectorizedArray<Number, 1> &right,
4258  const VectorizedArray<Number, 1> &true_value,
4259  const VectorizedArray<Number, 1> &false_value)
4260 {
4262  result.data = compare_and_apply_mask<predicate, Number>(left.data,
4263  right.data,
4264  true_value.data,
4265  false_value.data);
4266  return result;
4267 }
4268 
4270 
4271 #ifndef DOXYGEN
4272 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
4273 
4274 template <SIMDComparison predicate>
4275 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 16>
4276 compare_and_apply_mask(const VectorizedArray<float, 16> &left,
4277  const VectorizedArray<float, 16> &right,
4278  const VectorizedArray<float, 16> &true_values,
4279  const VectorizedArray<float, 16> &false_values)
4280 {
4281  const __mmask16 mask =
4282  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
4284  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
4285  return result;
4286 }
4287 
4288 
4289 
4290 template <SIMDComparison predicate>
4291 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 8>
4292 compare_and_apply_mask(const VectorizedArray<double, 8> &left,
4293  const VectorizedArray<double, 8> &right,
4294  const VectorizedArray<double, 8> &true_values,
4295  const VectorizedArray<double, 8> &false_values)
4296 {
4297  const __mmask16 mask =
4298  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
4300  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
4301  return result;
4302 }
4303 
4304 # endif
4305 
4306 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
4307 
4308 template <SIMDComparison predicate>
4309 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 8>
4310 compare_and_apply_mask(const VectorizedArray<float, 8> &left,
4311  const VectorizedArray<float, 8> &right,
4312  const VectorizedArray<float, 8> &true_values,
4313  const VectorizedArray<float, 8> &false_values)
4314 {
4315  const auto mask =
4316  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
4317 
4319  result.data = _mm256_or_ps(_mm256_and_ps(mask, true_values.data),
4320  _mm256_andnot_ps(mask, false_values.data));
4321  return result;
4322 }
4323 
4324 
4325 template <SIMDComparison predicate>
4326 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 4>
4327 compare_and_apply_mask(const VectorizedArray<double, 4> &left,
4328  const VectorizedArray<double, 4> &right,
4329  const VectorizedArray<double, 4> &true_values,
4330  const VectorizedArray<double, 4> &false_values)
4331 {
4332  const auto mask =
4333  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
4334 
4336  result.data = _mm256_or_pd(_mm256_and_pd(mask, true_values.data),
4337  _mm256_andnot_pd(mask, false_values.data));
4338  return result;
4339 }
4340 
4341 # endif
4342 
4343 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
4344 
4345 template <SIMDComparison predicate>
4346 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 4>
4347 compare_and_apply_mask(const VectorizedArray<float, 4> &left,
4348  const VectorizedArray<float, 4> &right,
4349  const VectorizedArray<float, 4> &true_values,
4350  const VectorizedArray<float, 4> &false_values)
4351 {
4352  __m128 mask;
4353  switch (predicate)
4354  {
4355  case SIMDComparison::equal:
4356  mask = _mm_cmpeq_ps(left.data, right.data);
4357  break;
4358  case SIMDComparison::not_equal:
4359  mask = _mm_cmpneq_ps(left.data, right.data);
4360  break;
4361  case SIMDComparison::less_than:
4362  mask = _mm_cmplt_ps(left.data, right.data);
4363  break;
4364  case SIMDComparison::less_than_or_equal:
4365  mask = _mm_cmple_ps(left.data, right.data);
4366  break;
4367  case SIMDComparison::greater_than:
4368  mask = _mm_cmpgt_ps(left.data, right.data);
4369  break;
4370  case SIMDComparison::greater_than_or_equal:
4371  mask = _mm_cmpge_ps(left.data, right.data);
4372  break;
4373  }
4374 
4376  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
4377  _mm_andnot_ps(mask, false_values.data));
4378 
4379  return result;
4380 }
4381 
4382 
4383 template <SIMDComparison predicate>
4384 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 2>
4385 compare_and_apply_mask(const VectorizedArray<double, 2> &left,
4386  const VectorizedArray<double, 2> &right,
4387  const VectorizedArray<double, 2> &true_values,
4388  const VectorizedArray<double, 2> &false_values)
4389 {
4390  __m128d mask;
4391  switch (predicate)
4392  {
4393  case SIMDComparison::equal:
4394  mask = _mm_cmpeq_pd(left.data, right.data);
4395  break;
4396  case SIMDComparison::not_equal:
4397  mask = _mm_cmpneq_pd(left.data, right.data);
4398  break;
4399  case SIMDComparison::less_than:
4400  mask = _mm_cmplt_pd(left.data, right.data);
4401  break;
4402  case SIMDComparison::less_than_or_equal:
4403  mask = _mm_cmple_pd(left.data, right.data);
4404  break;
4405  case SIMDComparison::greater_than:
4406  mask = _mm_cmpgt_pd(left.data, right.data);
4407  break;
4408  case SIMDComparison::greater_than_or_equal:
4409  mask = _mm_cmpge_pd(left.data, right.data);
4410  break;
4411  }
4412 
4414  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
4415  _mm_andnot_pd(mask, false_values.data));
4416 
4417  return result;
4418 }
4419 
4420 # endif
4421 #endif // DOXYGEN
4422 
4423 
4424 DEAL_II_NAMESPACE_CLOSE
4425 
4432 namespace std
4433 {
4441  template <typename Number, int width>
4442  inline ::VectorizedArray<Number, width>
4443  sin(const ::VectorizedArray<Number, width> &x)
4444  {
4445  // put values in an array and later read in that array with an unaligned
4446  // read. This should save some instructions as compared to directly
4447  // setting the individual elements and also circumvents a compiler
4448  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
4449  // from April 2014, topic "matrix_free/step-48 Test").
4451  for (unsigned int i = 0;
4452  i < ::VectorizedArray<Number, width>::n_array_elements;
4453  ++i)
4454  values[i] = std::sin(x[i]);
4456  out.load(&values[0]);
4457  return out;
4458  }
4459 
4460 
4461 
4469  template <typename Number, int width>
4470  inline ::VectorizedArray<Number, width>
4471  cos(const ::VectorizedArray<Number, width> &x)
4472  {
4474  for (unsigned int i = 0;
4475  i < ::VectorizedArray<Number, width>::n_array_elements;
4476  ++i)
4477  values[i] = std::cos(x[i]);
4479  out.load(&values[0]);
4480  return out;
4481  }
4482 
4483 
4484 
4492  template <typename Number, int width>
4493  inline ::VectorizedArray<Number, width>
4494  tan(const ::VectorizedArray<Number, width> &x)
4495  {
4497  for (unsigned int i = 0;
4498  i < ::VectorizedArray<Number, width>::n_array_elements;
4499  ++i)
4500  values[i] = std::tan(x[i]);
4502  out.load(&values[0]);
4503  return out;
4504  }
4505 
4506 
4507 
4515  template <typename Number, int width>
4516  inline ::VectorizedArray<Number, width>
4517  exp(const ::VectorizedArray<Number, width> &x)
4518  {
4520  for (unsigned int i = 0;
4521  i < ::VectorizedArray<Number, width>::n_array_elements;
4522  ++i)
4523  values[i] = std::exp(x[i]);
4525  out.load(&values[0]);
4526  return out;
4527  }
4528 
4529 
4530 
4538  template <typename Number, int width>
4539  inline ::VectorizedArray<Number, width>
4540  log(const ::VectorizedArray<Number, width> &x)
4541  {
4543  for (unsigned int i = 0;
4544  i < ::VectorizedArray<Number, width>::n_array_elements;
4545  ++i)
4546  values[i] = std::log(x[i]);
4548  out.load(&values[0]);
4549  return out;
4550  }
4551 
4552 
4553 
4561  template <typename Number, int width>
4562  inline ::VectorizedArray<Number, width>
4563  sqrt(const ::VectorizedArray<Number, width> &x)
4564  {
4565  return x.get_sqrt();
4566  }
4567 
4568 
4569 
4577  template <typename Number, int width>
4578  inline ::VectorizedArray<Number, width>
4579  pow(const ::VectorizedArray<Number, width> &x, const Number p)
4580  {
4582  for (unsigned int i = 0;
4583  i < ::VectorizedArray<Number, width>::n_array_elements;
4584  ++i)
4585  values[i] = std::pow(x[i], p);
4587  out.load(&values[0]);
4588  return out;
4589  }
4590 
4591 
4592 
4600  template <typename Number, int width>
4601  inline ::VectorizedArray<Number, width>
4602  abs(const ::VectorizedArray<Number, width> &x)
4603  {
4604  return x.get_abs();
4605  }
4606 
4607 
4608 
4616  template <typename Number, int width>
4617  inline ::VectorizedArray<Number, width>
4618  max(const ::VectorizedArray<Number, width> &x,
4619  const ::VectorizedArray<Number, width> &y)
4620  {
4621  return x.get_max(y);
4622  }
4623 
4624 
4625 
4633  template <typename Number, int width>
4634  inline ::VectorizedArray<Number, width>
4635  min(const ::VectorizedArray<Number, width> &x,
4636  const ::VectorizedArray<Number, width> &y)
4637  {
4638  return x.get_min(y);
4639  }
4640 
4641 } // namespace std
4642 
4643 #endif
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_sqrt() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArrayIterator< T > begin()
void streaming_store(Number *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
__global__ void vec_add(Number *val, const Number a, const size_type N)
__global__ void gather(Number *val, const IndexType *indices, const Number *v, const IndexType N)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(Number *ptr) const
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
static constexpr unsigned int size()
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
static ::ExceptionBase & ExcMessage(std::string arg1)
VectorizedArrayIterator< const T > end() const
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
#define Assert(cond, exc)
Definition: exceptions.h:1407
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
void load(const Number *ptr)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArrayIterator< T > end()
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArrayIterator< const T > begin() const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArrayIterator(T &data, unsigned int lane)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Number & operator[](const unsigned int comp)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray(const Number scalar)
VectorizedArrayIterator< T > & operator++()
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)