Reference documentation for deal.II version GIT 982a52a150 2023-04-01 20:45:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &
146  operator*() const
147  {
148  AssertIndexRange(lane, T::size());
149  return (*data)[lane];
150  }
151 
152 
157  template <typename U = T>
158  std::enable_if_t<!std::is_same<U, const U>::value, typename T::value_type> &
160  {
161  AssertIndexRange(lane, T::size());
162  return (*data)[lane];
163  }
164 
172  {
173  AssertIndexRange(lane + 1, T::size() + 1);
174  lane++;
175  return *this;
176  }
177 
183  operator+=(const std::size_t offset)
184  {
185  AssertIndexRange(lane + offset, T::size() + 1);
186  lane += offset;
187  return *this;
188  }
189 
197  {
198  Assert(
199  lane > 0,
200  ExcMessage(
201  "You can't decrement an iterator that is already at the beginning of the range."));
202  --lane;
203  return *this;
204  }
205 
210  operator+(const std::size_t &offset) const
211  {
212  AssertIndexRange(lane + offset, T::size() + 1);
213  return VectorizedArrayIterator<T>(*data, lane + offset);
214  }
215 
219  std::ptrdiff_t
221  {
222  return static_cast<std::ptrdiff_t>(lane) -
223  static_cast<ptrdiff_t>(other.lane);
224  }
225 
226 private:
230  T *data;
231 
235  std::size_t lane;
236 };
237 
238 
239 
249 template <typename T, std::size_t width>
251 {
252 public:
256  VectorizedArrayBase() = default;
257 
261  template <typename U>
262  VectorizedArrayBase(const std::initializer_list<U> &list)
263  {
264  auto i0 = this->begin();
265  auto i1 = list.begin();
266 
267  for (; i1 != list.end(); ++i0, ++i1)
268  {
269  Assert(
270  i0 != this->end(),
271  ExcMessage(
272  "Initializer list exceeds size of this VectorizedArray object."));
273 
274  *i0 = *i1;
275  }
276 
277  for (; i0 != this->end(); ++i0)
278  {
279  *i0 = 0.0;
280  }
281  }
282 
286  static constexpr std::size_t
288  {
289  return width;
290  }
291 
297  {
298  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
299  }
300 
306  begin() const
307  {
308  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
309  }
310 
315  end()
316  {
317  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
318  }
319 
325  end() const
326  {
327  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
328  width);
329  }
330 };
331 
332 
333 
418 template <typename Number, std::size_t width>
420  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
421 {
422 public:
426  using value_type = Number;
427 
428  static_assert(width == 1,
429  "You specified an illegal width that is not supported.");
430 
435  VectorizedArray() = default;
436 
440  VectorizedArray(const Number scalar)
441  {
442  this->operator=(scalar);
443  }
444 
448  template <typename U>
449  VectorizedArray(const std::initializer_list<U> &list)
450  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
451  {}
452 
458  operator=(const Number scalar) &
459  {
460  data = scalar;
461  return *this;
462  }
463 
470  operator=(const Number scalar) && = delete;
471 
477  Number &
478  operator[](const unsigned int comp)
479  {
480  (void)comp;
481  AssertIndexRange(comp, 1);
482  return data;
483  }
484 
490  const Number &
491  operator[](const unsigned int comp) const
492  {
493  (void)comp;
494  AssertIndexRange(comp, 1);
495  return data;
496  }
497 
504  {
505  data += vec.data;
506  return *this;
507  }
508 
515  {
516  data -= vec.data;
517  return *this;
518  }
519 
526  {
527  data *= vec.data;
528  return *this;
529  }
530 
537  {
538  data /= vec.data;
539  return *this;
540  }
541 
548  template <typename OtherNumber>
550  load(const OtherNumber *ptr)
551  {
552  data = *ptr;
553  }
554 
561  template <typename OtherNumber>
563  store(OtherNumber *ptr) const
564  {
565  *ptr = data;
566  }
567 
615  void
616  streaming_store(Number *ptr) const
617  {
618  *ptr = data;
619  }
620 
634  void
635  gather(const Number *base_ptr, const unsigned int *offsets)
636  {
637  data = base_ptr[offsets[0]];
638  }
639 
653  void
654  scatter(const unsigned int *offsets, Number *base_ptr) const
655  {
656  base_ptr[offsets[0]] = data;
657  }
658 
664  Number data;
665 
666 private:
673  get_sqrt() const
674  {
675  VectorizedArray res;
676  res.data = std::sqrt(data);
677  return res;
678  }
679 
686  get_abs() const
687  {
688  VectorizedArray res;
689  res.data = std::fabs(data);
690  return res;
691  }
692 
699  get_max(const VectorizedArray &other) const
700  {
701  VectorizedArray res;
702  res.data = std::max(data, other.data);
703  return res;
704  }
705 
712  get_min(const VectorizedArray &other) const
713  {
714  VectorizedArray res;
715  res.data = std::min(data, other.data);
716  return res;
717  }
718 
719  // Make a few functions friends.
720  template <typename Number2, std::size_t width2>
723  template <typename Number2, std::size_t width2>
726  template <typename Number2, std::size_t width2>
730  template <typename Number2, std::size_t width2>
734 };
735 
736 
737 
749 template <typename Number,
750  std::size_t width =
753  make_vectorized_array(const Number &u)
754 {
756  return result;
757 }
758 
759 
760 
767 template <typename VectorizedArrayType>
768 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
769 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
770 {
771  static_assert(
772  std::is_same<VectorizedArrayType,
773  VectorizedArray<typename VectorizedArrayType::value_type,
774  VectorizedArrayType::size()>>::value,
775  "VectorizedArrayType is not a VectorizedArray.");
776 
777  VectorizedArrayType result = u;
778  return result;
779 }
780 
781 
782 
794 template <typename Number, std::size_t width>
795 inline DEAL_II_ALWAYS_INLINE void
797  const std::array<Number *, width> &ptrs,
798  const unsigned int offset)
799 {
800  for (unsigned int v = 0; v < width; ++v)
801  out.data[v] = ptrs[v][offset];
802 }
803 
804 
805 
831 template <typename Number, std::size_t width>
832 inline DEAL_II_ALWAYS_INLINE void
833 vectorized_load_and_transpose(const unsigned int n_entries,
834  const Number * in,
835  const unsigned int * offsets,
837 {
838  for (unsigned int i = 0; i < n_entries; ++i)
839  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
840  out[i][v] = in[offsets[v] + i];
841 }
842 
843 
855 template <typename Number, std::size_t width>
856 inline DEAL_II_ALWAYS_INLINE void
857 vectorized_load_and_transpose(const unsigned int n_entries,
858  const std::array<Number *, width> &in,
860 {
861  for (unsigned int i = 0; i < n_entries; ++i)
862  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
863  out[i][v] = in[v][i];
864 }
865 
866 
867 
906 template <typename Number, std::size_t width>
907 inline DEAL_II_ALWAYS_INLINE void
908 vectorized_transpose_and_store(const bool add_into,
909  const unsigned int n_entries,
911  const unsigned int * offsets,
912  Number * out)
913 {
914  if (add_into)
915  for (unsigned int i = 0; i < n_entries; ++i)
916  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
917  out[offsets[v] + i] += in[i][v];
918  else
919  for (unsigned int i = 0; i < n_entries; ++i)
920  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
921  out[offsets[v] + i] = in[i][v];
922 }
923 
924 
936 template <typename Number, std::size_t width>
937 inline DEAL_II_ALWAYS_INLINE void
938 vectorized_transpose_and_store(const bool add_into,
939  const unsigned int n_entries,
941  std::array<Number *, width> & out)
942 {
943  if (add_into)
944  for (unsigned int i = 0; i < n_entries; ++i)
945  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
946  out[v][i] += in[i][v];
947  else
948  for (unsigned int i = 0; i < n_entries; ++i)
949  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
950  out[v][i] = in[i][v];
951 }
952 
953 
956 #ifndef DOXYGEN
957 
958 // for safety, also check that __AVX512F__ is defined in case the user manually
959 // set some conflicting compile flags which prevent compilation
960 
961 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
962 
966 template <>
967 class VectorizedArray<double, 8>
968  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
969 {
970 public:
974  using value_type = double;
975 
980  VectorizedArray() = default;
981 
985  VectorizedArray(const double scalar)
986  {
987  this->operator=(scalar);
988  }
989 
993  template <typename U>
994  VectorizedArray(const std::initializer_list<U> &list)
996  {}
997 
1002  VectorizedArray &
1003  operator=(const double x) &
1004  {
1005  data = _mm512_set1_pd(x);
1006  return *this;
1007  }
1008 
1009 
1015  VectorizedArray &
1016  operator=(const double scalar) && = delete;
1017 
1022  double &
1023  operator[](const unsigned int comp)
1024  {
1025  AssertIndexRange(comp, 8);
1026  return *(reinterpret_cast<double *>(&data) + comp);
1027  }
1028 
1033  const double &
1034  operator[](const unsigned int comp) const
1035  {
1036  AssertIndexRange(comp, 8);
1037  return *(reinterpret_cast<const double *>(&data) + comp);
1038  }
1039 
1044  VectorizedArray &
1045  operator+=(const VectorizedArray &vec)
1046  {
1047  // if the compiler supports vector arithmetic, we can simply use +=
1048  // operator on the given data type. this allows the compiler to combine
1049  // additions with multiplication (fused multiply-add) if those
1050  // instructions are available. Otherwise, we need to use the built-in
1051  // intrinsic command for __m512d
1052 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1053  data += vec.data;
1054 # else
1055  data = _mm512_add_pd(data, vec.data);
1056 # endif
1057  return *this;
1058  }
1059 
1064  VectorizedArray &
1065  operator-=(const VectorizedArray &vec)
1066  {
1067 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1068  data -= vec.data;
1069 # else
1070  data = _mm512_sub_pd(data, vec.data);
1071 # endif
1072  return *this;
1073  }
1078  VectorizedArray &
1079  operator*=(const VectorizedArray &vec)
1080  {
1081 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1082  data *= vec.data;
1083 # else
1084  data = _mm512_mul_pd(data, vec.data);
1085 # endif
1086  return *this;
1087  }
1088 
1093  VectorizedArray &
1094  operator/=(const VectorizedArray &vec)
1095  {
1096 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1097  data /= vec.data;
1098 # else
1099  data = _mm512_div_pd(data, vec.data);
1100 # endif
1101  return *this;
1102  }
1103 
1110  void
1111  load(const double *ptr)
1112  {
1113  data = _mm512_loadu_pd(ptr);
1114  }
1115 
1117  void
1118  load(const float *ptr)
1119  {
1120  data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
1121  }
1122 
1130  void
1131  store(double *ptr) const
1132  {
1133  _mm512_storeu_pd(ptr, data);
1134  }
1135 
1137  void
1138  store(float *ptr) const
1139  {
1140  _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
1141  }
1142 
1148  void
1149  streaming_store(double *ptr) const
1150  {
1151  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1152  ExcMessage("Memory not aligned"));
1153  _mm512_stream_pd(ptr, data);
1154  }
1155 
1169  void
1170  gather(const double *base_ptr, const unsigned int *offsets)
1171  {
1172  // unfortunately, there does not appear to be a 256 bit integer load, so
1173  // do it by some reinterpret casts here. this is allowed because the Intel
1174  // API allows aliasing between different vector types.
1175  const __m256 index_val =
1176  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1177  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1178 
1179  // work around a warning with gcc-12 about an uninitialized initial state
1180  // for gather by starting with a zero guess, even though all lanes will be
1181  // overwritten
1182  __m512d zero = {};
1183  __mmask8 mask = 0xFF;
1184 
1185  data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
1186  }
1187 
1201  void
1202  scatter(const unsigned int *offsets, double *base_ptr) const
1203  {
1204  for (unsigned int i = 0; i < 8; ++i)
1205  for (unsigned int j = i + 1; j < 8; ++j)
1206  Assert(offsets[i] != offsets[j],
1207  ExcMessage("Result of scatter undefined if two offset elements"
1208  " point to the same position"));
1209 
1210  // unfortunately, there does not appear to be a 256 bit integer load, so
1211  // do it by some reinterpret casts here. this is allowed because the Intel
1212  // API allows aliasing between different vector types.
1213  const __m256 index_val =
1214  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1215  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1216  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1217  }
1218 
1224  __m512d data;
1225 
1226 private:
1233  get_sqrt() const
1234  {
1235  VectorizedArray res;
1236  res.data = _mm512_sqrt_pd(data);
1237  return res;
1238  }
1239 
1246  get_abs() const
1247  {
1248  // to compute the absolute value, perform bitwise andnot with -0. This
1249  // will leave all value and exponent bits unchanged but force the sign
1250  // value to +. Since there is no andnot for AVX512, we interpret the data
1251  // as 64 bit integers and do the andnot on those types (note that andnot
1252  // is a bitwise operation so the data type does not matter)
1253  __m512d mask = _mm512_set1_pd(-0.);
1254  VectorizedArray res;
1255  res.data = reinterpret_cast<__m512d>(
1256  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1257  reinterpret_cast<__m512i>(data)));
1258  return res;
1259  }
1260 
1267  get_max(const VectorizedArray &other) const
1268  {
1269  VectorizedArray res;
1270  res.data = _mm512_max_pd(data, other.data);
1271  return res;
1272  }
1273 
1280  get_min(const VectorizedArray &other) const
1281  {
1282  VectorizedArray res;
1283  res.data = _mm512_min_pd(data, other.data);
1284  return res;
1285  }
1286 
1287  // Make a few functions friends.
1288  template <typename Number2, std::size_t width2>
1291  template <typename Number2, std::size_t width2>
1294  template <typename Number2, std::size_t width2>
1298  template <typename Number2, std::size_t width2>
1302 };
1303 
1304 
1305 
1309 template <>
1310 inline DEAL_II_ALWAYS_INLINE void
1311 vectorized_load_and_transpose(const unsigned int n_entries,
1312  const double * in,
1313  const unsigned int * offsets,
1315 {
1316  // do not do full transpose because the code is long and will most
1317  // likely not pay off because many processors have two load units
1318  // (for the top 8 instructions) but only 1 permute unit (for the 8
1319  // shuffle/unpack instructions). rather start the transposition on the
1320  // vectorized array of half the size with 256 bits
1321  const unsigned int n_chunks = n_entries / 4;
1322  for (unsigned int i = 0; i < n_chunks; ++i)
1323  {
1324  __m512d t0, t1, t2, t3 = {};
1325 
1326  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1327  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1328  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1329  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1330  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1331  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1332  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1333  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1334 
1335  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1336  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1337  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1338  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1339  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1340  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1341  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1342  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1343  }
1344  // remainder loop of work that does not divide by 4
1345  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1346  out[i].gather(in + i, offsets);
1347 }
1348 
1349 
1350 
1354 template <>
1355 inline DEAL_II_ALWAYS_INLINE void
1356 vectorized_load_and_transpose(const unsigned int n_entries,
1357  const std::array<double *, 8> &in,
1359 {
1360  const unsigned int n_chunks = n_entries / 4;
1361  for (unsigned int i = 0; i < n_chunks; ++i)
1362  {
1363  __m512d t0, t1, t2, t3 = {};
1364 
1365  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1366  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1367  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1368  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1369  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1370  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1371  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1372  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1373 
1374  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1375  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1376  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1377  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1378  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1379  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1380  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1381  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1382  }
1383 
1384  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1385  gather(out[i], in, i);
1386 }
1387 
1388 
1389 
1393 template <>
1394 inline DEAL_II_ALWAYS_INLINE void
1395 vectorized_transpose_and_store(const bool add_into,
1396  const unsigned int n_entries,
1397  const VectorizedArray<double, 8> *in,
1398  const unsigned int * offsets,
1399  double * out)
1400 {
1401  // as for the load, we split the store operations into 256 bit units to
1402  // better balance between code size, shuffle instructions, and stores
1403  const unsigned int n_chunks = n_entries / 4;
1404  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1405  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1406  for (unsigned int i = 0; i < n_chunks; ++i)
1407  {
1408  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1409  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1410  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1411  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1412  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1413  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1414  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1415  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1416  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1417  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1418  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1419  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1420  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1421  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1422  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1423  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1424 
1425  // Cannot use the same store instructions in both paths of the 'if'
1426  // because the compiler cannot know that there is no aliasing
1427  // between pointers
1428  if (add_into)
1429  {
1430  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1431  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1432  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1433  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1434  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1435  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1436  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1437  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1438  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1439  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1440  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1441  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1442  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1443  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1444  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1445  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1446  }
1447  else
1448  {
1449  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1450  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1451  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1452  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1453  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1454  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1455  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1456  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1457  }
1458  }
1459 
1460  // remainder loop of work that does not divide by 4
1461  if (add_into)
1462  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1463  for (unsigned int v = 0; v < 8; ++v)
1464  out[offsets[v] + i] += in[i][v];
1465  else
1466  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1467  for (unsigned int v = 0; v < 8; ++v)
1468  out[offsets[v] + i] = in[i][v];
1469 }
1470 
1471 
1472 
1476 template <>
1477 inline DEAL_II_ALWAYS_INLINE void
1478 vectorized_transpose_and_store(const bool add_into,
1479  const unsigned int n_entries,
1480  const VectorizedArray<double, 8> *in,
1481  std::array<double *, 8> & out)
1482 {
1483  // see the comments in the vectorized_transpose_and_store above
1484 
1485  const unsigned int n_chunks = n_entries / 4;
1486  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1487  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1488  for (unsigned int i = 0; i < n_chunks; ++i)
1489  {
1490  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1491  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1492  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1493  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1494  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1495  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1496  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1497  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1498  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1499  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1500  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1501  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1502  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1503  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1504  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1505  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1506 
1507  if (add_into)
1508  {
1509  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1510  _mm256_storeu_pd(out[0] + 4 * i, res0);
1511  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1512  _mm256_storeu_pd(out[1] + 4 * i, res1);
1513  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1514  _mm256_storeu_pd(out[2] + 4 * i, res2);
1515  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1516  _mm256_storeu_pd(out[3] + 4 * i, res3);
1517  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1518  _mm256_storeu_pd(out[4] + 4 * i, res4);
1519  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1520  _mm256_storeu_pd(out[5] + 4 * i, res5);
1521  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1522  _mm256_storeu_pd(out[6] + 4 * i, res6);
1523  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1524  _mm256_storeu_pd(out[7] + 4 * i, res7);
1525  }
1526  else
1527  {
1528  _mm256_storeu_pd(out[0] + 4 * i, res0);
1529  _mm256_storeu_pd(out[1] + 4 * i, res1);
1530  _mm256_storeu_pd(out[2] + 4 * i, res2);
1531  _mm256_storeu_pd(out[3] + 4 * i, res3);
1532  _mm256_storeu_pd(out[4] + 4 * i, res4);
1533  _mm256_storeu_pd(out[5] + 4 * i, res5);
1534  _mm256_storeu_pd(out[6] + 4 * i, res6);
1535  _mm256_storeu_pd(out[7] + 4 * i, res7);
1536  }
1537  }
1538 
1539  if (add_into)
1540  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1541  for (unsigned int v = 0; v < 8; ++v)
1542  out[v][i] += in[i][v];
1543  else
1544  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1545  for (unsigned int v = 0; v < 8; ++v)
1546  out[v][i] = in[i][v];
1547 }
1548 
1549 
1550 
1554 template <>
1555 class VectorizedArray<float, 16>
1556  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1557 {
1558 public:
1562  using value_type = float;
1563 
1568  VectorizedArray() = default;
1569 
1573  VectorizedArray(const float scalar)
1574  {
1575  this->operator=(scalar);
1576  }
1577 
1581  template <typename U>
1582  VectorizedArray(const std::initializer_list<U> &list)
1583  : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
1584  {}
1585 
1590  VectorizedArray &
1591  operator=(const float x) &
1592  {
1593  data = _mm512_set1_ps(x);
1594  return *this;
1595  }
1596 
1602  VectorizedArray &
1603  operator=(const float scalar) && = delete;
1604 
1609  float &
1610  operator[](const unsigned int comp)
1611  {
1612  AssertIndexRange(comp, 16);
1613  return *(reinterpret_cast<float *>(&data) + comp);
1614  }
1615 
1620  const float &
1621  operator[](const unsigned int comp) const
1622  {
1623  AssertIndexRange(comp, 16);
1624  return *(reinterpret_cast<const float *>(&data) + comp);
1625  }
1626 
1631  VectorizedArray &
1632  operator+=(const VectorizedArray &vec)
1633  {
1634  // if the compiler supports vector arithmetic, we can simply use +=
1635  // operator on the given data type. this allows the compiler to combine
1636  // additions with multiplication (fused multiply-add) if those
1637  // instructions are available. Otherwise, we need to use the built-in
1638  // intrinsic command for __m512d
1639 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1640  data += vec.data;
1641 # else
1642  data = _mm512_add_ps(data, vec.data);
1643 # endif
1644  return *this;
1645  }
1646 
1651  VectorizedArray &
1652  operator-=(const VectorizedArray &vec)
1653  {
1654 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1655  data -= vec.data;
1656 # else
1657  data = _mm512_sub_ps(data, vec.data);
1658 # endif
1659  return *this;
1660  }
1665  VectorizedArray &
1666  operator*=(const VectorizedArray &vec)
1667  {
1668 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1669  data *= vec.data;
1670 # else
1671  data = _mm512_mul_ps(data, vec.data);
1672 # endif
1673  return *this;
1674  }
1675 
1680  VectorizedArray &
1681  operator/=(const VectorizedArray &vec)
1682  {
1683 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1684  data /= vec.data;
1685 # else
1686  data = _mm512_div_ps(data, vec.data);
1687 # endif
1688  return *this;
1689  }
1690 
1697  void
1698  load(const float *ptr)
1699  {
1700  data = _mm512_loadu_ps(ptr);
1701  }
1702 
1710  void
1711  store(float *ptr) const
1712  {
1713  _mm512_storeu_ps(ptr, data);
1714  }
1715 
1721  void
1722  streaming_store(float *ptr) const
1723  {
1724  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1725  ExcMessage("Memory not aligned"));
1726  _mm512_stream_ps(ptr, data);
1727  }
1728 
1742  void
1743  gather(const float *base_ptr, const unsigned int *offsets)
1744  {
1745  // unfortunately, there does not appear to be a 512 bit integer load, so
1746  // do it by some reinterpret casts here. this is allowed because the Intel
1747  // API allows aliasing between different vector types.
1748  const __m512 index_val =
1749  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1750  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1751 
1752  // work around a warning with gcc-12 about an uninitialized initial state
1753  // for gather by starting with a zero guess, even though all lanes will be
1754  // overwritten
1755  __m512 zero = {};
1756  __mmask16 mask = 0xFFFF;
1757 
1758  data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
1759  }
1760 
1774  void
1775  scatter(const unsigned int *offsets, float *base_ptr) const
1776  {
1777  for (unsigned int i = 0; i < 16; ++i)
1778  for (unsigned int j = i + 1; j < 16; ++j)
1779  Assert(offsets[i] != offsets[j],
1780  ExcMessage("Result of scatter undefined if two offset elements"
1781  " point to the same position"));
1782 
1783  // unfortunately, there does not appear to be a 512 bit integer load, so
1784  // do it by some reinterpret casts here. this is allowed because the Intel
1785  // API allows aliasing between different vector types.
1786  const __m512 index_val =
1787  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1788  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1789  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1790  }
1791 
1797  __m512 data;
1798 
1799 private:
1806  get_sqrt() const
1807  {
1808  VectorizedArray res;
1809  res.data = _mm512_sqrt_ps(data);
1810  return res;
1811  }
1812 
1819  get_abs() const
1820  {
1821  // to compute the absolute value, perform bitwise andnot with -0. This
1822  // will leave all value and exponent bits unchanged but force the sign
1823  // value to +. Since there is no andnot for AVX512, we interpret the data
1824  // as 32 bit integers and do the andnot on those types (note that andnot
1825  // is a bitwise operation so the data type does not matter)
1826  __m512 mask = _mm512_set1_ps(-0.f);
1827  VectorizedArray res;
1828  res.data = reinterpret_cast<__m512>(
1829  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1830  reinterpret_cast<__m512i>(data)));
1831  return res;
1832  }
1833 
1840  get_max(const VectorizedArray &other) const
1841  {
1842  VectorizedArray res;
1843  res.data = _mm512_max_ps(data, other.data);
1844  return res;
1845  }
1846 
1853  get_min(const VectorizedArray &other) const
1854  {
1855  VectorizedArray res;
1856  res.data = _mm512_min_ps(data, other.data);
1857  return res;
1858  }
1859 
1860  // Make a few functions friends.
1861  template <typename Number2, std::size_t width2>
1864  template <typename Number2, std::size_t width2>
1867  template <typename Number2, std::size_t width2>
1871  template <typename Number2, std::size_t width2>
1875 };
1876 
1877 
1878 
1882 template <>
1883 inline DEAL_II_ALWAYS_INLINE void
1884 vectorized_load_and_transpose(const unsigned int n_entries,
1885  const float * in,
1886  const unsigned int * offsets,
1888 {
1889  // Similar to the double case, we perform the work on smaller entities. In
1890  // this case, we start from 128 bit arrays and insert them into a full 512
1891  // bit index. This reduces the code size and register pressure because we do
1892  // shuffles on 4 numbers rather than 16.
1893  const unsigned int n_chunks = n_entries / 4;
1894 
1895  // To avoid warnings about uninitialized variables, need to initialize one
1896  // variable to a pre-exisiting value in out, which will never get used in
1897  // the end. Keep the initialization outside the loop because of a bug in
1898  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1899  // case t3 is initialized to zero (inside/outside of loop), see
1900  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1901  __m512 t0, t1, t2, t3;
1902  if (n_chunks > 0)
1903  t3 = out[0].data;
1904  for (unsigned int i = 0; i < n_chunks; ++i)
1905  {
1906  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1907  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1908  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1909  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1910  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1911  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1912  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1913  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1914  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1915  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1916  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1917  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1918  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1919  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1920  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1921  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1922 
1923  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1924  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1925  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1926  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1927 
1928  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1929  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1930  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1931  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1932  }
1933 
1934  // remainder loop of work that does not divide by 4
1935  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1936  out[i].gather(in + i, offsets);
1937 }
1938 
1939 
1940 
1944 template <>
1945 inline DEAL_II_ALWAYS_INLINE void
1946 vectorized_load_and_transpose(const unsigned int n_entries,
1947  const std::array<float *, 16> &in,
1949 {
1950  // see the comments in the vectorized_load_and_transpose above
1951 
1952  const unsigned int n_chunks = n_entries / 4;
1953 
1954  __m512 t0, t1, t2, t3;
1955  if (n_chunks > 0)
1956  t3 = out[0].data;
1957  for (unsigned int i = 0; i < n_chunks; ++i)
1958  {
1959  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1960  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1961  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1962  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1963  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1964  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1965  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1966  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1967  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1968  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1969  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1970  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1971  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1972  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1973  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1974  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1975 
1976  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1977  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1978  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1979  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1980 
1981  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1982  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1983  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1984  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1985  }
1986 
1987  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1988  gather(out[i], in, i);
1989 }
1990 
1991 
1992 
1996 template <>
1997 inline DEAL_II_ALWAYS_INLINE void
1998 vectorized_transpose_and_store(const bool add_into,
1999  const unsigned int n_entries,
2000  const VectorizedArray<float, 16> *in,
2001  const unsigned int * offsets,
2002  float * out)
2003 {
2004  const unsigned int n_chunks = n_entries / 4;
2005  for (unsigned int i = 0; i < n_chunks; ++i)
2006  {
2007  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2008  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2009  __m512 t2 =
2010  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2011  __m512 t3 =
2012  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2013  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2014  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2015  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2016  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2017 
2018  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2019  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2020  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2021  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2022  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2023  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2024  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2025  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2026  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2027  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2028  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2029  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2030  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2031  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2032  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2033  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2034 
2035  // Cannot use the same store instructions in both paths of the 'if'
2036  // because the compiler cannot know that there is no aliasing between
2037  // pointers
2038  if (add_into)
2039  {
2040  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2041  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2042  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2043  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2044  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2045  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2046  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2047  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2048  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2049  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2050  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2051  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2052  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2053  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2054  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2055  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2056  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2057  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2058  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2059  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2060  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2061  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2062  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2063  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2064  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2065  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2066  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2067  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2068  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2069  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2070  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2071  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2072  }
2073  else
2074  {
2075  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2076  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2077  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2078  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2079  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2080  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2081  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2082  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2083  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2084  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2085  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2086  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2087  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2088  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2089  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2090  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2091  }
2092  }
2093 
2094  // remainder loop of work that does not divide by 4
2095  if (add_into)
2096  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2097  for (unsigned int v = 0; v < 16; ++v)
2098  out[offsets[v] + i] += in[i][v];
2099  else
2100  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2101  for (unsigned int v = 0; v < 16; ++v)
2102  out[offsets[v] + i] = in[i][v];
2103 }
2104 
2105 
2106 
2110 template <>
2111 inline DEAL_II_ALWAYS_INLINE void
2112 vectorized_transpose_and_store(const bool add_into,
2113  const unsigned int n_entries,
2114  const VectorizedArray<float, 16> *in,
2115  std::array<float *, 16> & out)
2116 {
2117  // see the comments in the vectorized_transpose_and_store above
2118 
2119  const unsigned int n_chunks = n_entries / 4;
2120  for (unsigned int i = 0; i < n_chunks; ++i)
2121  {
2122  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2123  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2124  __m512 t2 =
2125  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2126  __m512 t3 =
2127  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2128  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2129  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2130  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2131  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2132 
2133  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2134  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2135  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2136  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2137  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2138  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2139  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2140  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2141  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2142  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2143  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2144  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2145  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2146  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2147  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2148  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2149 
2150  if (add_into)
2151  {
2152  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2153  _mm_storeu_ps(out[0] + 4 * i, res0);
2154  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2155  _mm_storeu_ps(out[1] + 4 * i, res1);
2156  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2157  _mm_storeu_ps(out[2] + 4 * i, res2);
2158  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2159  _mm_storeu_ps(out[3] + 4 * i, res3);
2160  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2161  _mm_storeu_ps(out[4] + 4 * i, res4);
2162  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2163  _mm_storeu_ps(out[5] + 4 * i, res5);
2164  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2165  _mm_storeu_ps(out[6] + 4 * i, res6);
2166  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2167  _mm_storeu_ps(out[7] + 4 * i, res7);
2168  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2169  _mm_storeu_ps(out[8] + 4 * i, res8);
2170  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2171  _mm_storeu_ps(out[9] + 4 * i, res9);
2172  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2173  _mm_storeu_ps(out[10] + 4 * i, res10);
2174  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2175  _mm_storeu_ps(out[11] + 4 * i, res11);
2176  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2177  _mm_storeu_ps(out[12] + 4 * i, res12);
2178  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2179  _mm_storeu_ps(out[13] + 4 * i, res13);
2180  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2181  _mm_storeu_ps(out[14] + 4 * i, res14);
2182  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2183  _mm_storeu_ps(out[15] + 4 * i, res15);
2184  }
2185  else
2186  {
2187  _mm_storeu_ps(out[0] + 4 * i, res0);
2188  _mm_storeu_ps(out[1] + 4 * i, res1);
2189  _mm_storeu_ps(out[2] + 4 * i, res2);
2190  _mm_storeu_ps(out[3] + 4 * i, res3);
2191  _mm_storeu_ps(out[4] + 4 * i, res4);
2192  _mm_storeu_ps(out[5] + 4 * i, res5);
2193  _mm_storeu_ps(out[6] + 4 * i, res6);
2194  _mm_storeu_ps(out[7] + 4 * i, res7);
2195  _mm_storeu_ps(out[8] + 4 * i, res8);
2196  _mm_storeu_ps(out[9] + 4 * i, res9);
2197  _mm_storeu_ps(out[10] + 4 * i, res10);
2198  _mm_storeu_ps(out[11] + 4 * i, res11);
2199  _mm_storeu_ps(out[12] + 4 * i, res12);
2200  _mm_storeu_ps(out[13] + 4 * i, res13);
2201  _mm_storeu_ps(out[14] + 4 * i, res14);
2202  _mm_storeu_ps(out[15] + 4 * i, res15);
2203  }
2204  }
2205 
2206  if (add_into)
2207  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2208  for (unsigned int v = 0; v < 16; ++v)
2209  out[v][i] += in[i][v];
2210  else
2211  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2212  for (unsigned int v = 0; v < 16; ++v)
2213  out[v][i] = in[i][v];
2214 }
2215 
2216 # endif
2217 
2218 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2219 
2223 template <>
2224 class VectorizedArray<double, 4>
2225  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2226 {
2227 public:
2231  using value_type = double;
2232 
2237  VectorizedArray() = default;
2238 
2242  VectorizedArray(const double scalar)
2243  {
2244  this->operator=(scalar);
2245  }
2246 
2250  template <typename U>
2251  VectorizedArray(const std::initializer_list<U> &list)
2253  {}
2254 
2259  VectorizedArray &
2260  operator=(const double x) &
2261  {
2262  data = _mm256_set1_pd(x);
2263  return *this;
2264  }
2265 
2271  VectorizedArray &
2272  operator=(const double scalar) && = delete;
2273 
2278  double &
2279  operator[](const unsigned int comp)
2280  {
2281  AssertIndexRange(comp, 4);
2282  return *(reinterpret_cast<double *>(&data) + comp);
2283  }
2284 
2289  const double &
2290  operator[](const unsigned int comp) const
2291  {
2292  AssertIndexRange(comp, 4);
2293  return *(reinterpret_cast<const double *>(&data) + comp);
2294  }
2295 
2300  VectorizedArray &
2301  operator+=(const VectorizedArray &vec)
2302  {
2303  // if the compiler supports vector arithmetic, we can simply use +=
2304  // operator on the given data type. this allows the compiler to combine
2305  // additions with multiplication (fused multiply-add) if those
2306  // instructions are available. Otherwise, we need to use the built-in
2307  // intrinsic command for __m256d
2308 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2309  data += vec.data;
2310 # else
2311  data = _mm256_add_pd(data, vec.data);
2312 # endif
2313  return *this;
2314  }
2315 
2320  VectorizedArray &
2321  operator-=(const VectorizedArray &vec)
2322  {
2323 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2324  data -= vec.data;
2325 # else
2326  data = _mm256_sub_pd(data, vec.data);
2327 # endif
2328  return *this;
2329  }
2334  VectorizedArray &
2335  operator*=(const VectorizedArray &vec)
2336  {
2337 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2338  data *= vec.data;
2339 # else
2340  data = _mm256_mul_pd(data, vec.data);
2341 # endif
2342  return *this;
2343  }
2344 
2349  VectorizedArray &
2350  operator/=(const VectorizedArray &vec)
2351  {
2352 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2353  data /= vec.data;
2354 # else
2355  data = _mm256_div_pd(data, vec.data);
2356 # endif
2357  return *this;
2358  }
2359 
2366  void
2367  load(const double *ptr)
2368  {
2369  data = _mm256_loadu_pd(ptr);
2370  }
2371 
2373  void
2374  load(const float *ptr)
2375  {
2376  data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2377  }
2378 
2386  void
2387  store(double *ptr) const
2388  {
2389  _mm256_storeu_pd(ptr, data);
2390  }
2391 
2393  void
2394  store(float *ptr) const
2395  {
2396  _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2397  }
2398 
2404  void
2405  streaming_store(double *ptr) const
2406  {
2407  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2408  ExcMessage("Memory not aligned"));
2409  _mm256_stream_pd(ptr, data);
2410  }
2411 
2425  void
2426  gather(const double *base_ptr, const unsigned int *offsets)
2427  {
2428 # ifdef __AVX2__
2429  // unfortunately, there does not appear to be a 128 bit integer load, so
2430  // do it by some reinterpret casts here. this is allowed because the Intel
2431  // API allows aliasing between different vector types.
2432  const __m128 index_val =
2433  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2434  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2435 
2436  // work around a warning with gcc-12 about an uninitialized initial state
2437  // for gather by starting with a zero guess, even though all lanes will be
2438  // overwritten
2439  __m256d zero = _mm256_setzero_pd();
2440  __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2441 
2442  data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2443 # else
2444  for (unsigned int i = 0; i < 4; ++i)
2445  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2446 # endif
2447  }
2448 
2462  void
2463  scatter(const unsigned int *offsets, double *base_ptr) const
2464  {
2465  // no scatter operation in AVX/AVX2
2466  for (unsigned int i = 0; i < 4; ++i)
2467  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2468  }
2469 
2475  __m256d data;
2476 
2477 private:
2484  get_sqrt() const
2485  {
2486  VectorizedArray res;
2487  res.data = _mm256_sqrt_pd(data);
2488  return res;
2489  }
2490 
2497  get_abs() const
2498  {
2499  // to compute the absolute value, perform bitwise andnot with -0. This
2500  // will leave all value and exponent bits unchanged but force the sign
2501  // value to +.
2502  __m256d mask = _mm256_set1_pd(-0.);
2503  VectorizedArray res;
2504  res.data = _mm256_andnot_pd(mask, data);
2505  return res;
2506  }
2507 
2514  get_max(const VectorizedArray &other) const
2515  {
2516  VectorizedArray res;
2517  res.data = _mm256_max_pd(data, other.data);
2518  return res;
2519  }
2520 
2527  get_min(const VectorizedArray &other) const
2528  {
2529  VectorizedArray res;
2530  res.data = _mm256_min_pd(data, other.data);
2531  return res;
2532  }
2533 
2534  // Make a few functions friends.
2535  template <typename Number2, std::size_t width2>
2538  template <typename Number2, std::size_t width2>
2541  template <typename Number2, std::size_t width2>
2545  template <typename Number2, std::size_t width2>
2549 };
2550 
2551 
2552 
2556 template <>
2557 inline DEAL_II_ALWAYS_INLINE void
2558 vectorized_load_and_transpose(const unsigned int n_entries,
2559  const double * in,
2560  const unsigned int * offsets,
2562 {
2563  const unsigned int n_chunks = n_entries / 4;
2564  const double * in0 = in + offsets[0];
2565  const double * in1 = in + offsets[1];
2566  const double * in2 = in + offsets[2];
2567  const double * in3 = in + offsets[3];
2568 
2569  for (unsigned int i = 0; i < n_chunks; ++i)
2570  {
2571  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2572  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2573  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2574  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2575  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2576  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2577  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2578  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2579  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2580  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2581  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2582  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2583  }
2584 
2585  // remainder loop of work that does not divide by 4
2586  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2587  out[i].gather(in + i, offsets);
2588 }
2589 
2590 
2591 
2595 template <>
2596 inline DEAL_II_ALWAYS_INLINE void
2597 vectorized_load_and_transpose(const unsigned int n_entries,
2598  const std::array<double *, 4> &in,
2600 {
2601  // see the comments in the vectorized_load_and_transpose above
2602 
2603  const unsigned int n_chunks = n_entries / 4;
2604  const double * in0 = in[0];
2605  const double * in1 = in[1];
2606  const double * in2 = in[2];
2607  const double * in3 = in[3];
2608 
2609  for (unsigned int i = 0; i < n_chunks; ++i)
2610  {
2611  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2612  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2613  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2614  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2615  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2616  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2617  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2618  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2619  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2620  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2621  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2622  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2623  }
2624 
2625  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2626  gather(out[i], in, i);
2627 }
2628 
2629 
2630 
2634 template <>
2635 inline DEAL_II_ALWAYS_INLINE void
2636 vectorized_transpose_and_store(const bool add_into,
2637  const unsigned int n_entries,
2638  const VectorizedArray<double, 4> *in,
2639  const unsigned int * offsets,
2640  double * out)
2641 {
2642  const unsigned int n_chunks = n_entries / 4;
2643  double * out0 = out + offsets[0];
2644  double * out1 = out + offsets[1];
2645  double * out2 = out + offsets[2];
2646  double * out3 = out + offsets[3];
2647  for (unsigned int i = 0; i < n_chunks; ++i)
2648  {
2649  __m256d u0 = in[4 * i + 0].data;
2650  __m256d u1 = in[4 * i + 1].data;
2651  __m256d u2 = in[4 * i + 2].data;
2652  __m256d u3 = in[4 * i + 3].data;
2653  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2654  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2655  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2656  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2657  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2658  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2659  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2660  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2661 
2662  // Cannot use the same store instructions in both paths of the 'if'
2663  // because the compiler cannot know that there is no aliasing between
2664  // pointers
2665  if (add_into)
2666  {
2667  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2668  _mm256_storeu_pd(out0 + 4 * i, res0);
2669  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2670  _mm256_storeu_pd(out1 + 4 * i, res1);
2671  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2672  _mm256_storeu_pd(out2 + 4 * i, res2);
2673  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2674  _mm256_storeu_pd(out3 + 4 * i, res3);
2675  }
2676  else
2677  {
2678  _mm256_storeu_pd(out0 + 4 * i, res0);
2679  _mm256_storeu_pd(out1 + 4 * i, res1);
2680  _mm256_storeu_pd(out2 + 4 * i, res2);
2681  _mm256_storeu_pd(out3 + 4 * i, res3);
2682  }
2683  }
2684 
2685  // remainder loop of work that does not divide by 4
2686  if (add_into)
2687  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2688  for (unsigned int v = 0; v < 4; ++v)
2689  out[offsets[v] + i] += in[i][v];
2690  else
2691  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2692  for (unsigned int v = 0; v < 4; ++v)
2693  out[offsets[v] + i] = in[i][v];
2694 }
2695 
2696 
2697 
2701 template <>
2702 inline DEAL_II_ALWAYS_INLINE void
2703 vectorized_transpose_and_store(const bool add_into,
2704  const unsigned int n_entries,
2705  const VectorizedArray<double, 4> *in,
2706  std::array<double *, 4> & out)
2707 {
2708  // see the comments in the vectorized_transpose_and_store above
2709 
2710  const unsigned int n_chunks = n_entries / 4;
2711  double * out0 = out[0];
2712  double * out1 = out[1];
2713  double * out2 = out[2];
2714  double * out3 = out[3];
2715  for (unsigned int i = 0; i < n_chunks; ++i)
2716  {
2717  __m256d u0 = in[4 * i + 0].data;
2718  __m256d u1 = in[4 * i + 1].data;
2719  __m256d u2 = in[4 * i + 2].data;
2720  __m256d u3 = in[4 * i + 3].data;
2721  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2722  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2723  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2724  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2725  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2726  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2727  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2728  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2729 
2730  // Cannot use the same store instructions in both paths of the 'if'
2731  // because the compiler cannot know that there is no aliasing between
2732  // pointers
2733  if (add_into)
2734  {
2735  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2736  _mm256_storeu_pd(out0 + 4 * i, res0);
2737  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2738  _mm256_storeu_pd(out1 + 4 * i, res1);
2739  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2740  _mm256_storeu_pd(out2 + 4 * i, res2);
2741  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2742  _mm256_storeu_pd(out3 + 4 * i, res3);
2743  }
2744  else
2745  {
2746  _mm256_storeu_pd(out0 + 4 * i, res0);
2747  _mm256_storeu_pd(out1 + 4 * i, res1);
2748  _mm256_storeu_pd(out2 + 4 * i, res2);
2749  _mm256_storeu_pd(out3 + 4 * i, res3);
2750  }
2751  }
2752 
2753  // remainder loop of work that does not divide by 4
2754  if (add_into)
2755  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2756  for (unsigned int v = 0; v < 4; ++v)
2757  out[v][i] += in[i][v];
2758  else
2759  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2760  for (unsigned int v = 0; v < 4; ++v)
2761  out[v][i] = in[i][v];
2762 }
2763 
2764 
2765 
2769 template <>
2770 class VectorizedArray<float, 8>
2771  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2772 {
2773 public:
2777  using value_type = float;
2778 
2783  VectorizedArray() = default;
2784 
2788  VectorizedArray(const float scalar)
2789  {
2790  this->operator=(scalar);
2791  }
2792 
2796  template <typename U>
2797  VectorizedArray(const std::initializer_list<U> &list)
2798  : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
2799  {}
2800 
2805  VectorizedArray &
2806  operator=(const float x) &
2807  {
2808  data = _mm256_set1_ps(x);
2809  return *this;
2810  }
2811 
2817  VectorizedArray &
2818  operator=(const float scalar) && = delete;
2819 
2824  float &
2825  operator[](const unsigned int comp)
2826  {
2827  AssertIndexRange(comp, 8);
2828  return *(reinterpret_cast<float *>(&data) + comp);
2829  }
2830 
2835  const float &
2836  operator[](const unsigned int comp) const
2837  {
2838  AssertIndexRange(comp, 8);
2839  return *(reinterpret_cast<const float *>(&data) + comp);
2840  }
2841 
2846  VectorizedArray &
2847  operator+=(const VectorizedArray &vec)
2848  {
2849  // if the compiler supports vector arithmetic, we can simply use +=
2850  // operator on the given data type. this allows the compiler to combine
2851  // additions with multiplication (fused multiply-add) if those
2852  // instructions are available. Otherwise, we need to use the built-in
2853  // intrinsic command for __m256d
2854 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2855  data += vec.data;
2856 # else
2857  data = _mm256_add_ps(data, vec.data);
2858 # endif
2859  return *this;
2860  }
2861 
2866  VectorizedArray &
2867  operator-=(const VectorizedArray &vec)
2868  {
2869 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2870  data -= vec.data;
2871 # else
2872  data = _mm256_sub_ps(data, vec.data);
2873 # endif
2874  return *this;
2875  }
2880  VectorizedArray &
2881  operator*=(const VectorizedArray &vec)
2882  {
2883 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2884  data *= vec.data;
2885 # else
2886  data = _mm256_mul_ps(data, vec.data);
2887 # endif
2888  return *this;
2889  }
2890 
2895  VectorizedArray &
2896  operator/=(const VectorizedArray &vec)
2897  {
2898 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2899  data /= vec.data;
2900 # else
2901  data = _mm256_div_ps(data, vec.data);
2902 # endif
2903  return *this;
2904  }
2905 
2912  void
2913  load(const float *ptr)
2914  {
2915  data = _mm256_loadu_ps(ptr);
2916  }
2917 
2925  void
2926  store(float *ptr) const
2927  {
2928  _mm256_storeu_ps(ptr, data);
2929  }
2930 
2936  void
2937  streaming_store(float *ptr) const
2938  {
2939  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2940  ExcMessage("Memory not aligned"));
2941  _mm256_stream_ps(ptr, data);
2942  }
2943 
2957  void
2958  gather(const float *base_ptr, const unsigned int *offsets)
2959  {
2960 # ifdef __AVX2__
2961  // unfortunately, there does not appear to be a 256 bit integer load, so
2962  // do it by some reinterpret casts here. this is allowed because the Intel
2963  // API allows aliasing between different vector types.
2964  const __m256 index_val =
2965  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2966  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2967 
2968  // work around a warning with gcc-12 about an uninitialized initial state
2969  // for gather by starting with a zero guess, even though all lanes will be
2970  // overwritten
2971  __m256 zero = _mm256_setzero_ps();
2972  __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
2973 
2974  data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
2975 # else
2976  for (unsigned int i = 0; i < 8; ++i)
2977  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2978 # endif
2979  }
2980 
2994  void
2995  scatter(const unsigned int *offsets, float *base_ptr) const
2996  {
2997  // no scatter operation in AVX/AVX2
2998  for (unsigned int i = 0; i < 8; ++i)
2999  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3000  }
3001 
3007  __m256 data;
3008 
3009 private:
3016  get_sqrt() const
3017  {
3018  VectorizedArray res;
3019  res.data = _mm256_sqrt_ps(data);
3020  return res;
3021  }
3022 
3029  get_abs() const
3030  {
3031  // to compute the absolute value, perform bitwise andnot with -0. This
3032  // will leave all value and exponent bits unchanged but force the sign
3033  // value to +.
3034  __m256 mask = _mm256_set1_ps(-0.f);
3035  VectorizedArray res;
3036  res.data = _mm256_andnot_ps(mask, data);
3037  return res;
3038  }
3039 
3046  get_max(const VectorizedArray &other) const
3047  {
3048  VectorizedArray res;
3049  res.data = _mm256_max_ps(data, other.data);
3050  return res;
3051  }
3052 
3059  get_min(const VectorizedArray &other) const
3060  {
3061  VectorizedArray res;
3062  res.data = _mm256_min_ps(data, other.data);
3063  return res;
3064  }
3065 
3066  // Make a few functions friends.
3067  template <typename Number2, std::size_t width2>
3070  template <typename Number2, std::size_t width2>
3073  template <typename Number2, std::size_t width2>
3077  template <typename Number2, std::size_t width2>
3081 };
3082 
3083 
3084 
3088 template <>
3089 inline DEAL_II_ALWAYS_INLINE void
3090 vectorized_load_and_transpose(const unsigned int n_entries,
3091  const float * in,
3092  const unsigned int * offsets,
3094 {
3095  const unsigned int n_chunks = n_entries / 4;
3096  for (unsigned int i = 0; i < n_chunks; ++i)
3097  {
3098  // To avoid warnings about uninitialized variables, need to initialize
3099  // one variable with zero before using it.
3100  __m256 t0, t1, t2, t3 = {};
3101  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3102  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3103  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3104  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3105  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3106  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3107  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3108  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3109 
3110  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3111  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3112  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3113  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3114  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3115  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3116  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3117  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3118  }
3119 
3120  // remainder loop of work that does not divide by 4
3121  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3122  out[i].gather(in + i, offsets);
3123 }
3124 
3125 
3126 
3130 template <>
3131 inline DEAL_II_ALWAYS_INLINE void
3132 vectorized_load_and_transpose(const unsigned int n_entries,
3133  const std::array<float *, 8> &in,
3135 {
3136  // see the comments in the vectorized_load_and_transpose above
3137 
3138  const unsigned int n_chunks = n_entries / 4;
3139  for (unsigned int i = 0; i < n_chunks; ++i)
3140  {
3141  __m256 t0, t1, t2, t3 = {};
3142  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3143  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3144  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3145  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3146  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3147  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3148  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3149  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3150 
3151  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3152  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3153  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3154  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3155  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3156  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3157  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3158  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3159  }
3160 
3161  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3162  gather(out[i], in, i);
3163 }
3164 
3165 
3166 
3170 template <>
3171 inline DEAL_II_ALWAYS_INLINE void
3172 vectorized_transpose_and_store(const bool add_into,
3173  const unsigned int n_entries,
3174  const VectorizedArray<float, 8> *in,
3175  const unsigned int * offsets,
3176  float * out)
3177 {
3178  const unsigned int n_chunks = n_entries / 4;
3179  for (unsigned int i = 0; i < n_chunks; ++i)
3180  {
3181  __m256 u0 = in[4 * i + 0].data;
3182  __m256 u1 = in[4 * i + 1].data;
3183  __m256 u2 = in[4 * i + 2].data;
3184  __m256 u3 = in[4 * i + 3].data;
3185  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3186  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3187  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3188  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3189  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3190  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3191  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3192  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3193  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3194  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3195  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3196  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3197  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3198  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3199  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3200  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3201 
3202  // Cannot use the same store instructions in both paths of the 'if'
3203  // because the compiler cannot know that there is no aliasing between
3204  // pointers
3205  if (add_into)
3206  {
3207  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3208  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3209  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3210  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3211  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3212  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3213  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3214  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3215  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3216  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3217  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3218  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3219  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3220  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3221  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3222  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3223  }
3224  else
3225  {
3226  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3227  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3228  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3229  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3230  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3231  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3232  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3233  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3234  }
3235  }
3236 
3237  // remainder loop of work that does not divide by 4
3238  if (add_into)
3239  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3240  for (unsigned int v = 0; v < 8; ++v)
3241  out[offsets[v] + i] += in[i][v];
3242  else
3243  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3244  for (unsigned int v = 0; v < 8; ++v)
3245  out[offsets[v] + i] = in[i][v];
3246 }
3247 
3248 
3249 
3253 template <>
3254 inline DEAL_II_ALWAYS_INLINE void
3255 vectorized_transpose_and_store(const bool add_into,
3256  const unsigned int n_entries,
3257  const VectorizedArray<float, 8> *in,
3258  std::array<float *, 8> & out)
3259 {
3260  // see the comments in the vectorized_transpose_and_store above
3261 
3262  const unsigned int n_chunks = n_entries / 4;
3263  for (unsigned int i = 0; i < n_chunks; ++i)
3264  {
3265  __m256 u0 = in[4 * i + 0].data;
3266  __m256 u1 = in[4 * i + 1].data;
3267  __m256 u2 = in[4 * i + 2].data;
3268  __m256 u3 = in[4 * i + 3].data;
3269  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3270  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3271  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3272  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3273  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3274  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3275  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3276  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3277  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3278  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3279  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3280  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3281  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3282  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3283  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3284  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3285 
3286  if (add_into)
3287  {
3288  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3289  _mm_storeu_ps(out[0] + 4 * i, res0);
3290  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3291  _mm_storeu_ps(out[1] + 4 * i, res1);
3292  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3293  _mm_storeu_ps(out[2] + 4 * i, res2);
3294  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3295  _mm_storeu_ps(out[3] + 4 * i, res3);
3296  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3297  _mm_storeu_ps(out[4] + 4 * i, res4);
3298  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3299  _mm_storeu_ps(out[5] + 4 * i, res5);
3300  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3301  _mm_storeu_ps(out[6] + 4 * i, res6);
3302  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3303  _mm_storeu_ps(out[7] + 4 * i, res7);
3304  }
3305  else
3306  {
3307  _mm_storeu_ps(out[0] + 4 * i, res0);
3308  _mm_storeu_ps(out[1] + 4 * i, res1);
3309  _mm_storeu_ps(out[2] + 4 * i, res2);
3310  _mm_storeu_ps(out[3] + 4 * i, res3);
3311  _mm_storeu_ps(out[4] + 4 * i, res4);
3312  _mm_storeu_ps(out[5] + 4 * i, res5);
3313  _mm_storeu_ps(out[6] + 4 * i, res6);
3314  _mm_storeu_ps(out[7] + 4 * i, res7);
3315  }
3316  }
3317 
3318  if (add_into)
3319  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3320  for (unsigned int v = 0; v < 8; ++v)
3321  out[v][i] += in[i][v];
3322  else
3323  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3324  for (unsigned int v = 0; v < 8; ++v)
3325  out[v][i] = in[i][v];
3326 }
3327 
3328 # endif
3329 
3330 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3331 
3335 template <>
3336 class VectorizedArray<double, 2>
3337  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3338 {
3339 public:
3343  using value_type = double;
3344 
3349  VectorizedArray() = default;
3350 
3354  VectorizedArray(const double scalar)
3355  {
3356  this->operator=(scalar);
3357  }
3358 
3362  template <typename U>
3363  VectorizedArray(const std::initializer_list<U> &list)
3365  {}
3366 
3371  VectorizedArray &
3372  operator=(const double x) &
3373  {
3374  data = _mm_set1_pd(x);
3375  return *this;
3376  }
3377 
3383  VectorizedArray &
3384  operator=(const double scalar) && = delete;
3385 
3390  double &
3391  operator[](const unsigned int comp)
3392  {
3393  AssertIndexRange(comp, 2);
3394  return *(reinterpret_cast<double *>(&data) + comp);
3395  }
3396 
3401  const double &
3402  operator[](const unsigned int comp) const
3403  {
3404  AssertIndexRange(comp, 2);
3405  return *(reinterpret_cast<const double *>(&data) + comp);
3406  }
3407 
3412  VectorizedArray &
3413  operator+=(const VectorizedArray &vec)
3414  {
3415 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3416  data += vec.data;
3417 # else
3418  data = _mm_add_pd(data, vec.data);
3419 # endif
3420  return *this;
3421  }
3422 
3427  VectorizedArray &
3428  operator-=(const VectorizedArray &vec)
3429  {
3430 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3431  data -= vec.data;
3432 # else
3433  data = _mm_sub_pd(data, vec.data);
3434 # endif
3435  return *this;
3436  }
3437 
3442  VectorizedArray &
3443  operator*=(const VectorizedArray &vec)
3444  {
3445 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3446  data *= vec.data;
3447 # else
3448  data = _mm_mul_pd(data, vec.data);
3449 # endif
3450  return *this;
3451  }
3452 
3457  VectorizedArray &
3458  operator/=(const VectorizedArray &vec)
3459  {
3460 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3461  data /= vec.data;
3462 # else
3463  data = _mm_div_pd(data, vec.data);
3464 # endif
3465  return *this;
3466  }
3467 
3474  void
3475  load(const double *ptr)
3476  {
3477  data = _mm_loadu_pd(ptr);
3478  }
3479 
3481  void
3482  load(const float *ptr)
3483  {
3485  for (unsigned int i = 0; i < 2; ++i)
3486  data[i] = ptr[i];
3487  }
3488 
3496  void
3497  store(double *ptr) const
3498  {
3499  _mm_storeu_pd(ptr, data);
3500  }
3501 
3503  void
3504  store(float *ptr) const
3505  {
3507  for (unsigned int i = 0; i < 2; ++i)
3508  ptr[i] = data[i];
3509  }
3510 
3516  void
3517  streaming_store(double *ptr) const
3518  {
3519  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3520  ExcMessage("Memory not aligned"));
3521  _mm_stream_pd(ptr, data);
3522  }
3523 
3537  void
3538  gather(const double *base_ptr, const unsigned int *offsets)
3539  {
3540  for (unsigned int i = 0; i < 2; ++i)
3541  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3542  }
3543 
3557  void
3558  scatter(const unsigned int *offsets, double *base_ptr) const
3559  {
3560  for (unsigned int i = 0; i < 2; ++i)
3561  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3562  }
3563 
3569  __m128d data;
3570 
3571 private:
3578  get_sqrt() const
3579  {
3580  VectorizedArray res;
3581  res.data = _mm_sqrt_pd(data);
3582  return res;
3583  }
3584 
3591  get_abs() const
3592  {
3593  // to compute the absolute value, perform
3594  // bitwise andnot with -0. This will leave all
3595  // value and exponent bits unchanged but force
3596  // the sign value to +.
3597  __m128d mask = _mm_set1_pd(-0.);
3598  VectorizedArray res;
3599  res.data = _mm_andnot_pd(mask, data);
3600  return res;
3601  }
3602 
3609  get_max(const VectorizedArray &other) const
3610  {
3611  VectorizedArray res;
3612  res.data = _mm_max_pd(data, other.data);
3613  return res;
3614  }
3615 
3622  get_min(const VectorizedArray &other) const
3623  {
3624  VectorizedArray res;
3625  res.data = _mm_min_pd(data, other.data);
3626  return res;
3627  }
3628 
3629  // Make a few functions friends.
3630  template <typename Number2, std::size_t width2>
3633  template <typename Number2, std::size_t width2>
3636  template <typename Number2, std::size_t width2>
3640  template <typename Number2, std::size_t width2>
3644 };
3645 
3646 
3647 
3651 template <>
3652 inline DEAL_II_ALWAYS_INLINE void
3653 vectorized_load_and_transpose(const unsigned int n_entries,
3654  const double * in,
3655  const unsigned int * offsets,
3657 {
3658  const unsigned int n_chunks = n_entries / 2;
3659  for (unsigned int i = 0; i < n_chunks; ++i)
3660  {
3661  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3662  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3663  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3664  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3665  }
3666 
3667  // remainder loop of work that does not divide by 2
3668  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3669  for (unsigned int v = 0; v < 2; ++v)
3670  out[i][v] = in[offsets[v] + i];
3671 }
3672 
3673 
3674 
3678 template <>
3679 inline DEAL_II_ALWAYS_INLINE void
3680 vectorized_load_and_transpose(const unsigned int n_entries,
3681  const std::array<double *, 2> &in,
3683 {
3684  // see the comments in the vectorized_load_and_transpose above
3685 
3686  const unsigned int n_chunks = n_entries / 2;
3687  for (unsigned int i = 0; i < n_chunks; ++i)
3688  {
3689  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3690  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3691  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3692  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3693  }
3694 
3695  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3696  for (unsigned int v = 0; v < 2; ++v)
3697  out[i][v] = in[v][i];
3698 }
3699 
3700 
3701 
3705 template <>
3706 inline DEAL_II_ALWAYS_INLINE void
3707 vectorized_transpose_and_store(const bool add_into,
3708  const unsigned int n_entries,
3709  const VectorizedArray<double, 2> *in,
3710  const unsigned int * offsets,
3711  double * out)
3712 {
3713  const unsigned int n_chunks = n_entries / 2;
3714  if (add_into)
3715  {
3716  for (unsigned int i = 0; i < n_chunks; ++i)
3717  {
3718  __m128d u0 = in[2 * i + 0].data;
3719  __m128d u1 = in[2 * i + 1].data;
3720  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3721  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3722  _mm_storeu_pd(out + 2 * i + offsets[0],
3723  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3724  res0));
3725  _mm_storeu_pd(out + 2 * i + offsets[1],
3726  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3727  res1));
3728  }
3729  // remainder loop of work that does not divide by 2
3730  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3731  for (unsigned int v = 0; v < 2; ++v)
3732  out[offsets[v] + i] += in[i][v];
3733  }
3734  else
3735  {
3736  for (unsigned int i = 0; i < n_chunks; ++i)
3737  {
3738  __m128d u0 = in[2 * i + 0].data;
3739  __m128d u1 = in[2 * i + 1].data;
3740  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3741  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3742  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3743  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3744  }
3745  // remainder loop of work that does not divide by 2
3746  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3747  for (unsigned int v = 0; v < 2; ++v)
3748  out[offsets[v] + i] = in[i][v];
3749  }
3750 }
3751 
3752 
3753 
3757 template <>
3758 inline DEAL_II_ALWAYS_INLINE void
3759 vectorized_transpose_and_store(const bool add_into,
3760  const unsigned int n_entries,
3761  const VectorizedArray<double, 2> *in,
3762  std::array<double *, 2> & out)
3763 {
3764  // see the comments in the vectorized_transpose_and_store above
3765 
3766  const unsigned int n_chunks = n_entries / 2;
3767  if (add_into)
3768  {
3769  for (unsigned int i = 0; i < n_chunks; ++i)
3770  {
3771  __m128d u0 = in[2 * i + 0].data;
3772  __m128d u1 = in[2 * i + 1].data;
3773  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3774  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3775  _mm_storeu_pd(out[0] + 2 * i,
3776  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3777  _mm_storeu_pd(out[1] + 2 * i,
3778  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3779  }
3780 
3781  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3782  for (unsigned int v = 0; v < 2; ++v)
3783  out[v][i] += in[i][v];
3784  }
3785  else
3786  {
3787  for (unsigned int i = 0; i < n_chunks; ++i)
3788  {
3789  __m128d u0 = in[2 * i + 0].data;
3790  __m128d u1 = in[2 * i + 1].data;
3791  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3792  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3793  _mm_storeu_pd(out[0] + 2 * i, res0);
3794  _mm_storeu_pd(out[1] + 2 * i, res1);
3795  }
3796 
3797  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3798  for (unsigned int v = 0; v < 2; ++v)
3799  out[v][i] = in[i][v];
3800  }
3801 }
3802 
3803 
3804 
3808 template <>
3809 class VectorizedArray<float, 4>
3810  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3811 {
3812 public:
3816  using value_type = float;
3817 
3826  VectorizedArray() = default;
3827 
3831  VectorizedArray(const float scalar)
3832  {
3833  this->operator=(scalar);
3834  }
3835 
3839  template <typename U>
3840  VectorizedArray(const std::initializer_list<U> &list)
3841  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
3842  {}
3843 
3845  VectorizedArray &
3846  operator=(const float x) &
3847  {
3848  data = _mm_set1_ps(x);
3849  return *this;
3850  }
3851 
3857  VectorizedArray &
3858  operator=(const float scalar) && = delete;
3859 
3864  float &
3865  operator[](const unsigned int comp)
3866  {
3867  AssertIndexRange(comp, 4);
3868  return *(reinterpret_cast<float *>(&data) + comp);
3869  }
3870 
3875  const float &
3876  operator[](const unsigned int comp) const
3877  {
3878  AssertIndexRange(comp, 4);
3879  return *(reinterpret_cast<const float *>(&data) + comp);
3880  }
3881 
3886  VectorizedArray &
3887  operator+=(const VectorizedArray &vec)
3888  {
3889 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3890  data += vec.data;
3891 # else
3892  data = _mm_add_ps(data, vec.data);
3893 # endif
3894  return *this;
3895  }
3896 
3901  VectorizedArray &
3902  operator-=(const VectorizedArray &vec)
3903  {
3904 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3905  data -= vec.data;
3906 # else
3907  data = _mm_sub_ps(data, vec.data);
3908 # endif
3909  return *this;
3910  }
3911 
3916  VectorizedArray &
3917  operator*=(const VectorizedArray &vec)
3918  {
3919 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3920  data *= vec.data;
3921 # else
3922  data = _mm_mul_ps(data, vec.data);
3923 # endif
3924  return *this;
3925  }
3926 
3931  VectorizedArray &
3932  operator/=(const VectorizedArray &vec)
3933  {
3934 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3935  data /= vec.data;
3936 # else
3937  data = _mm_div_ps(data, vec.data);
3938 # endif
3939  return *this;
3940  }
3941 
3948  void
3949  load(const float *ptr)
3950  {
3951  data = _mm_loadu_ps(ptr);
3952  }
3953 
3961  void
3962  store(float *ptr) const
3963  {
3964  _mm_storeu_ps(ptr, data);
3965  }
3966 
3972  void
3973  streaming_store(float *ptr) const
3974  {
3975  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3976  ExcMessage("Memory not aligned"));
3977  _mm_stream_ps(ptr, data);
3978  }
3979 
3993  void
3994  gather(const float *base_ptr, const unsigned int *offsets)
3995  {
3996  for (unsigned int i = 0; i < 4; ++i)
3997  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3998  }
3999 
4013  void
4014  scatter(const unsigned int *offsets, float *base_ptr) const
4015  {
4016  for (unsigned int i = 0; i < 4; ++i)
4017  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4018  }
4019 
4025  __m128 data;
4026 
4027 private:
4034  get_sqrt() const
4035  {
4036  VectorizedArray res;
4037  res.data = _mm_sqrt_ps(data);
4038  return res;
4039  }
4040 
4047  get_abs() const
4048  {
4049  // to compute the absolute value, perform bitwise andnot with -0. This
4050  // will leave all value and exponent bits unchanged but force the sign
4051  // value to +.
4052  __m128 mask = _mm_set1_ps(-0.f);
4053  VectorizedArray res;
4054  res.data = _mm_andnot_ps(mask, data);
4055  return res;
4056  }
4057 
4064  get_max(const VectorizedArray &other) const
4065  {
4066  VectorizedArray res;
4067  res.data = _mm_max_ps(data, other.data);
4068  return res;
4069  }
4070 
4077  get_min(const VectorizedArray &other) const
4078  {
4079  VectorizedArray res;
4080  res.data = _mm_min_ps(data, other.data);
4081  return res;
4082  }
4083 
4084  // Make a few functions friends.
4085  template <typename Number2, std::size_t width2>
4088  template <typename Number2, std::size_t width2>
4091  template <typename Number2, std::size_t width2>
4095  template <typename Number2, std::size_t width2>
4099 };
4100 
4101 
4102 
4106 template <>
4107 inline DEAL_II_ALWAYS_INLINE void
4108 vectorized_load_and_transpose(const unsigned int n_entries,
4109  const float * in,
4110  const unsigned int * offsets,
4112 {
4113  const unsigned int n_chunks = n_entries / 4;
4114  for (unsigned int i = 0; i < n_chunks; ++i)
4115  {
4116  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
4117  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
4118  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
4119  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
4120  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4121  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4122  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4123  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4124  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4125  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4126  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4127  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4128  }
4129 
4130  // remainder loop of work that does not divide by 4
4131  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4132  for (unsigned int v = 0; v < 4; ++v)
4133  out[i][v] = in[offsets[v] + i];
4134 }
4135 
4136 
4137 
4141 template <>
4142 inline DEAL_II_ALWAYS_INLINE void
4143 vectorized_load_and_transpose(const unsigned int n_entries,
4144  const std::array<float *, 4> &in,
4146 {
4147  // see the comments in the vectorized_load_and_transpose above
4148 
4149  const unsigned int n_chunks = n_entries / 4;
4150  for (unsigned int i = 0; i < n_chunks; ++i)
4151  {
4152  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4153  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4154  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4155  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4156  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
4157  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
4158  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4159  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4160  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
4161  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
4162  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
4163  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
4164  }
4165 
4166  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4167  for (unsigned int v = 0; v < 4; ++v)
4168  out[i][v] = in[v][i];
4169 }
4170 
4171 
4172 
4176 template <>
4177 inline DEAL_II_ALWAYS_INLINE void
4178 vectorized_transpose_and_store(const bool add_into,
4179  const unsigned int n_entries,
4180  const VectorizedArray<float, 4> *in,
4181  const unsigned int * offsets,
4182  float * out)
4183 {
4184  const unsigned int n_chunks = n_entries / 4;
4185  for (unsigned int i = 0; i < n_chunks; ++i)
4186  {
4187  __m128 u0 = in[4 * i + 0].data;
4188  __m128 u1 = in[4 * i + 1].data;
4189  __m128 u2 = in[4 * i + 2].data;
4190  __m128 u3 = in[4 * i + 3].data;
4191  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4192  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4193  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4194  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4195  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4196  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4197  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4198  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4199 
4200  // Cannot use the same store instructions in both paths of the 'if'
4201  // because the compiler cannot know that there is no aliasing between
4202  // pointers
4203  if (add_into)
4204  {
4205  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4206  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4207  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4208  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4209  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4210  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4211  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4212  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4213  }
4214  else
4215  {
4216  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4217  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4218  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4219  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4220  }
4221  }
4222 
4223  // remainder loop of work that does not divide by 4
4224  if (add_into)
4225  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4226  for (unsigned int v = 0; v < 4; ++v)
4227  out[offsets[v] + i] += in[i][v];
4228  else
4229  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4230  for (unsigned int v = 0; v < 4; ++v)
4231  out[offsets[v] + i] = in[i][v];
4232 }
4233 
4234 
4235 
4239 template <>
4240 inline DEAL_II_ALWAYS_INLINE void
4241 vectorized_transpose_and_store(const bool add_into,
4242  const unsigned int n_entries,
4243  const VectorizedArray<float, 4> *in,
4244  std::array<float *, 4> & out)
4245 {
4246  // see the comments in the vectorized_transpose_and_store above
4247 
4248  const unsigned int n_chunks = n_entries / 4;
4249  for (unsigned int i = 0; i < n_chunks; ++i)
4250  {
4251  __m128 u0 = in[4 * i + 0].data;
4252  __m128 u1 = in[4 * i + 1].data;
4253  __m128 u2 = in[4 * i + 2].data;
4254  __m128 u3 = in[4 * i + 3].data;
4255  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4256  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4257  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4258  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4259  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4260  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4261  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4262  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4263 
4264  if (add_into)
4265  {
4266  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4267  _mm_storeu_ps(out[0] + 4 * i, u0);
4268  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4269  _mm_storeu_ps(out[1] + 4 * i, u1);
4270  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4271  _mm_storeu_ps(out[2] + 4 * i, u2);
4272  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4273  _mm_storeu_ps(out[3] + 4 * i, u3);
4274  }
4275  else
4276  {
4277  _mm_storeu_ps(out[0] + 4 * i, u0);
4278  _mm_storeu_ps(out[1] + 4 * i, u1);
4279  _mm_storeu_ps(out[2] + 4 * i, u2);
4280  _mm_storeu_ps(out[3] + 4 * i, u3);
4281  }
4282  }
4283 
4284  if (add_into)
4285  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4286  for (unsigned int v = 0; v < 4; ++v)
4287  out[v][i] += in[i][v];
4288  else
4289  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4290  for (unsigned int v = 0; v < 4; ++v)
4291  out[v][i] = in[i][v];
4292 }
4293 
4294 
4295 
4296 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4297 
4298 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4299  defined(__VSX__)
4300 
4301 template <>
4302 class VectorizedArray<double, 2>
4303  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4304 {
4305 public:
4309  using value_type = double;
4310 
4315  VectorizedArray() = default;
4316 
4320  VectorizedArray(const double scalar)
4321  {
4322  this->operator=(scalar);
4323  }
4324 
4328  template <typename U>
4329  VectorizedArray(const std::initializer_list<U> &list)
4331  {}
4332 
4337  VectorizedArray &
4338  operator=(const double x) &
4339  {
4340  data = vec_splats(x);
4341 
4342  // Some compilers believe that vec_splats sets 'x', but that's not true.
4343  // They then warn about setting a variable and not using it. Suppress the
4344  // warning by "using" the variable:
4345  (void)x;
4346  return *this;
4347  }
4348 
4354  VectorizedArray &
4355  operator=(const double scalar) && = delete;
4356 
4361  double &
4362  operator[](const unsigned int comp)
4363  {
4364  AssertIndexRange(comp, 2);
4365  return *(reinterpret_cast<double *>(&data) + comp);
4366  }
4367 
4372  const double &
4373  operator[](const unsigned int comp) const
4374  {
4375  AssertIndexRange(comp, 2);
4376  return *(reinterpret_cast<const double *>(&data) + comp);
4377  }
4378 
4383  VectorizedArray &
4384  operator+=(const VectorizedArray &vec)
4385  {
4386  data = vec_add(data, vec.data);
4387  return *this;
4388  }
4389 
4394  VectorizedArray &
4395  operator-=(const VectorizedArray &vec)
4396  {
4397  data = vec_sub(data, vec.data);
4398  return *this;
4399  }
4400 
4405  VectorizedArray &
4406  operator*=(const VectorizedArray &vec)
4407  {
4408  data = vec_mul(data, vec.data);
4409  return *this;
4410  }
4411 
4416  VectorizedArray &
4417  operator/=(const VectorizedArray &vec)
4418  {
4419  data = vec_div(data, vec.data);
4420  return *this;
4421  }
4422 
4428  void
4429  load(const double *ptr)
4430  {
4431  data = vec_vsx_ld(0, ptr);
4432  }
4433 
4439  void
4440  store(double *ptr) const
4441  {
4442  vec_vsx_st(data, 0, ptr);
4443  }
4444 
4449  void
4450  streaming_store(double *ptr) const
4451  {
4452  store(ptr);
4453  }
4454 
4459  void
4460  gather(const double *base_ptr, const unsigned int *offsets)
4461  {
4462  for (unsigned int i = 0; i < 2; ++i)
4463  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4464  }
4465 
4470  void
4471  scatter(const unsigned int *offsets, double *base_ptr) const
4472  {
4473  for (unsigned int i = 0; i < 2; ++i)
4474  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4475  }
4476 
4482  __vector double data;
4483 
4484 private:
4491  get_sqrt() const
4492  {
4493  VectorizedArray res;
4494  res.data = vec_sqrt(data);
4495  return res;
4496  }
4497 
4504  get_abs() const
4505  {
4506  VectorizedArray res;
4507  res.data = vec_abs(data);
4508  return res;
4509  }
4510 
4517  get_max(const VectorizedArray &other) const
4518  {
4519  VectorizedArray res;
4520  res.data = vec_max(data, other.data);
4521  return res;
4522  }
4523 
4530  get_min(const VectorizedArray &other) const
4531  {
4532  VectorizedArray res;
4533  res.data = vec_min(data, other.data);
4534  return res;
4535  }
4536 
4537  // Make a few functions friends.
4538  template <typename Number2, std::size_t width2>
4541  template <typename Number2, std::size_t width2>
4544  template <typename Number2, std::size_t width2>
4548  template <typename Number2, std::size_t width2>
4552 };
4553 
4554 
4555 
4556 template <>
4557 class VectorizedArray<float, 4>
4558  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4559 {
4560 public:
4564  using value_type = float;
4565 
4570  VectorizedArray() = default;
4571 
4575  VectorizedArray(const float scalar)
4576  {
4577  this->operator=(scalar);
4578  }
4579 
4583  template <typename U>
4584  VectorizedArray(const std::initializer_list<U> &list)
4585  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
4586  {}
4587 
4592  VectorizedArray &
4593  operator=(const float x) &
4594  {
4595  data = vec_splats(x);
4596 
4597  // Some compilers believe that vec_splats sets 'x', but that's not true.
4598  // They then warn about setting a variable and not using it. Suppress the
4599  // warning by "using" the variable:
4600  (void)x;
4601  return *this;
4602  }
4603 
4609  VectorizedArray &
4610  operator=(const float scalar) && = delete;
4611 
4616  float &
4617  operator[](const unsigned int comp)
4618  {
4619  AssertIndexRange(comp, 4);
4620  return *(reinterpret_cast<float *>(&data) + comp);
4621  }
4622 
4627  const float &
4628  operator[](const unsigned int comp) const
4629  {
4630  AssertIndexRange(comp, 4);
4631  return *(reinterpret_cast<const float *>(&data) + comp);
4632  }
4633 
4638  VectorizedArray &
4639  operator+=(const VectorizedArray &vec)
4640  {
4641  data = vec_add(data, vec.data);
4642  return *this;
4643  }
4644 
4649  VectorizedArray &
4650  operator-=(const VectorizedArray &vec)
4651  {
4652  data = vec_sub(data, vec.data);
4653  return *this;
4654  }
4655 
4660  VectorizedArray &
4661  operator*=(const VectorizedArray &vec)
4662  {
4663  data = vec_mul(data, vec.data);
4664  return *this;
4665  }
4666 
4671  VectorizedArray &
4672  operator/=(const VectorizedArray &vec)
4673  {
4674  data = vec_div(data, vec.data);
4675  return *this;
4676  }
4677 
4683  void
4684  load(const float *ptr)
4685  {
4686  data = vec_vsx_ld(0, ptr);
4687  }
4688 
4694  void
4695  store(float *ptr) const
4696  {
4697  vec_vsx_st(data, 0, ptr);
4698  }
4699 
4704  void
4705  streaming_store(float *ptr) const
4706  {
4707  store(ptr);
4708  }
4709 
4714  void
4715  gather(const float *base_ptr, const unsigned int *offsets)
4716  {
4717  for (unsigned int i = 0; i < 4; ++i)
4718  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4719  }
4720 
4725  void
4726  scatter(const unsigned int *offsets, float *base_ptr) const
4727  {
4728  for (unsigned int i = 0; i < 4; ++i)
4729  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4730  }
4731 
4737  __vector float data;
4738 
4739 private:
4746  get_sqrt() const
4747  {
4748  VectorizedArray res;
4749  res.data = vec_sqrt(data);
4750  return res;
4751  }
4752 
4759  get_abs() const
4760  {
4761  VectorizedArray res;
4762  res.data = vec_abs(data);
4763  return res;
4764  }
4765 
4772  get_max(const VectorizedArray &other) const
4773  {
4774  VectorizedArray res;
4775  res.data = vec_max(data, other.data);
4776  return res;
4777  }
4778 
4785  get_min(const VectorizedArray &other) const
4786  {
4787  VectorizedArray res;
4788  res.data = vec_min(data, other.data);
4789  return res;
4790  }
4791 
4792  // Make a few functions friends.
4793  template <typename Number2, std::size_t width2>
4796  template <typename Number2, std::size_t width2>
4799  template <typename Number2, std::size_t width2>
4803  template <typename Number2, std::size_t width2>
4807 };
4808 
4809 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4810  // defined(__VSX__)
4811 
4812 
4813 #endif // DOXYGEN
4814 
4825 template <typename Number, std::size_t width>
4826 inline DEAL_II_ALWAYS_INLINE bool
4828  const VectorizedArray<Number, width> &rhs)
4829 {
4830  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4831  if (lhs[i] != rhs[i])
4832  return false;
4833 
4834  return true;
4835 }
4836 
4837 
4843 template <typename Number, std::size_t width>
4847 {
4849  return tmp += v;
4850 }
4851 
4857 template <typename Number, std::size_t width>
4861 {
4863  return tmp -= v;
4864 }
4865 
4871 template <typename Number, std::size_t width>
4875 {
4877  return tmp *= v;
4878 }
4879 
4885 template <typename Number, std::size_t width>
4889 {
4891  return tmp /= v;
4892 }
4893 
4900 template <typename Number, std::size_t width>
4902 operator+(const Number &u, const VectorizedArray<Number, width> &v)
4903 {
4905  return tmp += v;
4906 }
4907 
4916 template <std::size_t width>
4918 operator+(const double u, const VectorizedArray<float, width> &v)
4919 {
4921  return tmp += v;
4922 }
4923 
4930 template <typename Number, std::size_t width>
4932 operator+(const VectorizedArray<Number, width> &v, const Number &u)
4933 {
4934  return u + v;
4935 }
4936 
4945 template <std::size_t width>
4947 operator+(const VectorizedArray<float, width> &v, const double u)
4948 {
4949  return u + v;
4950 }
4951 
4958 template <typename Number, std::size_t width>
4960 operator-(const Number &u, const VectorizedArray<Number, width> &v)
4961 {
4963  return tmp -= v;
4964 }
4965 
4974 template <std::size_t width>
4976 operator-(const double u, const VectorizedArray<float, width> &v)
4977 {
4978  VectorizedArray<float, width> tmp = static_cast<float>(u);
4979  return tmp -= v;
4980 }
4981 
4988 template <typename Number, std::size_t width>
4990 operator-(const VectorizedArray<Number, width> &v, const Number &u)
4991 {
4993  return v - tmp;
4994 }
4995 
5004 template <std::size_t width>
5006 operator-(const VectorizedArray<float, width> &v, const double u)
5007 {
5008  VectorizedArray<float, width> tmp = static_cast<float>(u);
5009  return v - tmp;
5010 }
5011 
5018 template <typename Number, std::size_t width>
5020 operator*(const Number &u, const VectorizedArray<Number, width> &v)
5021 {
5023  return tmp *= v;
5024 }
5025 
5034 template <std::size_t width>
5036 operator*(const double u, const VectorizedArray<float, width> &v)
5037 {
5038  VectorizedArray<float, width> tmp = static_cast<float>(u);
5039  return tmp *= v;
5040 }
5041 
5048 template <typename Number, std::size_t width>
5050 operator*(const VectorizedArray<Number, width> &v, const Number &u)
5051 {
5052  return u * v;
5053 }
5054 
5063 template <std::size_t width>
5065 operator*(const VectorizedArray<float, width> &v, const double u)
5066 {
5067  return u * v;
5068 }
5069 
5076 template <typename Number, std::size_t width>
5078 operator/(const Number &u, const VectorizedArray<Number, width> &v)
5079 {
5081  return tmp /= v;
5082 }
5083 
5092 template <std::size_t width>
5094 operator/(const double u, const VectorizedArray<float, width> &v)
5095 {
5096  VectorizedArray<float, width> tmp = static_cast<float>(u);
5097  return tmp /= v;
5098 }
5099 
5106 template <typename Number, std::size_t width>
5108 operator/(const VectorizedArray<Number, width> &v, const Number &u)
5109 {
5111  return v / tmp;
5112 }
5113 
5122 template <std::size_t width>
5124 operator/(const VectorizedArray<float, width> &v, const double u)
5125 {
5126  VectorizedArray<float, width> tmp = static_cast<float>(u);
5127  return v / tmp;
5128 }
5129 
5135 template <typename Number, std::size_t width>
5138 {
5139  return u;
5140 }
5141 
5147 template <typename Number, std::size_t width>
5150 {
5151  // to get a negative sign, subtract the input from zero (could also
5152  // multiply by -1, but this one is slightly simpler)
5153  return VectorizedArray<Number, width>() - u;
5154 }
5155 
5161 template <typename Number, std::size_t width>
5162 inline std::ostream &
5163 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5164 {
5165  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5166  for (unsigned int i = 0; i < n - 1; ++i)
5167  out << p[i] << ' ';
5168  out << p[n - 1];
5169 
5170  return out;
5171 }
5172 
5187 enum class SIMDComparison : int
5188 {
5189 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5190  equal = _CMP_EQ_OQ,
5191  not_equal = _CMP_NEQ_OQ,
5192  less_than = _CMP_LT_OQ,
5193  less_than_or_equal = _CMP_LE_OQ,
5194  greater_than = _CMP_GT_OQ,
5195  greater_than_or_equal = _CMP_GE_OQ
5196 #else
5197  equal,
5198  not_equal,
5199  less_than,
5201  greater_than,
5203 #endif
5204 };
5205 
5206 
5270 template <SIMDComparison predicate, typename Number>
5271 DEAL_II_ALWAYS_INLINE inline Number
5272 compare_and_apply_mask(const Number &left,
5273  const Number &right,
5274  const Number &true_value,
5275  const Number &false_value)
5276 {
5277  bool mask;
5278  switch (predicate)
5279  {
5280  case SIMDComparison::equal:
5281  mask = (left == right);
5282  break;
5284  mask = (left != right);
5285  break;
5287  mask = (left < right);
5288  break;
5290  mask = (left <= right);
5291  break;
5293  mask = (left > right);
5294  break;
5296  mask = (left >= right);
5297  break;
5298  }
5299 
5300  return mask ? true_value : false_value;
5301 }
5302 
5303 
5308 template <SIMDComparison predicate, typename Number>
5311  const VectorizedArray<Number, 1> &right,
5312  const VectorizedArray<Number, 1> &true_value,
5313  const VectorizedArray<Number, 1> &false_value)
5314 {
5316  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5317  right.data,
5318  true_value.data,
5319  false_value.data);
5320  return result;
5321 }
5322 
5325 #ifndef DOXYGEN
5326 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5327 
5328 template <SIMDComparison predicate>
5331  const VectorizedArray<float, 16> &right,
5332  const VectorizedArray<float, 16> &true_values,
5333  const VectorizedArray<float, 16> &false_values)
5334 {
5335  const __mmask16 mask =
5336  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5338  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5339  return result;
5340 }
5341 
5342 
5343 
5344 template <SIMDComparison predicate>
5347  const VectorizedArray<double, 8> &right,
5348  const VectorizedArray<double, 8> &true_values,
5349  const VectorizedArray<double, 8> &false_values)
5350 {
5351  const __mmask16 mask =
5352  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5354  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5355  return result;
5356 }
5357 
5358 # endif
5359 
5360 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5361 
5362 template <SIMDComparison predicate>
5365  const VectorizedArray<float, 8> &right,
5366  const VectorizedArray<float, 8> &true_values,
5367  const VectorizedArray<float, 8> &false_values)
5368 {
5369  const auto mask =
5370  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5371 
5373  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
5374  return result;
5375 }
5376 
5377 
5378 template <SIMDComparison predicate>
5381  const VectorizedArray<double, 4> &right,
5382  const VectorizedArray<double, 4> &true_values,
5383  const VectorizedArray<double, 4> &false_values)
5384 {
5385  const auto mask =
5386  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5387 
5389  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
5390  return result;
5391 }
5392 
5393 # endif
5394 
5395 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5396 
5397 template <SIMDComparison predicate>
5400  const VectorizedArray<float, 4> &right,
5401  const VectorizedArray<float, 4> &true_values,
5402  const VectorizedArray<float, 4> &false_values)
5403 {
5404  __m128 mask;
5405  switch (predicate)
5406  {
5407  case SIMDComparison::equal:
5408  mask = _mm_cmpeq_ps(left.data, right.data);
5409  break;
5411  mask = _mm_cmpneq_ps(left.data, right.data);
5412  break;
5414  mask = _mm_cmplt_ps(left.data, right.data);
5415  break;
5417  mask = _mm_cmple_ps(left.data, right.data);
5418  break;
5420  mask = _mm_cmpgt_ps(left.data, right.data);
5421  break;
5423  mask = _mm_cmpge_ps(left.data, right.data);
5424  break;
5425  }
5426 
5428  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5429  _mm_andnot_ps(mask, false_values.data));
5430 
5431  return result;
5432 }
5433 
5434 
5435 template <SIMDComparison predicate>
5438  const VectorizedArray<double, 2> &right,
5439  const VectorizedArray<double, 2> &true_values,
5440  const VectorizedArray<double, 2> &false_values)
5441 {
5442  __m128d mask;
5443  switch (predicate)
5444  {
5445  case SIMDComparison::equal:
5446  mask = _mm_cmpeq_pd(left.data, right.data);
5447  break;
5449  mask = _mm_cmpneq_pd(left.data, right.data);
5450  break;
5452  mask = _mm_cmplt_pd(left.data, right.data);
5453  break;
5455  mask = _mm_cmple_pd(left.data, right.data);
5456  break;
5458  mask = _mm_cmpgt_pd(left.data, right.data);
5459  break;
5461  mask = _mm_cmpge_pd(left.data, right.data);
5462  break;
5463  }
5464 
5466  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5467  _mm_andnot_pd(mask, false_values.data));
5468 
5469  return result;
5470 }
5471 
5472 # endif
5473 #endif // DOXYGEN
5474 
5475 
5476 namespace internal
5477 {
5478  template <typename T>
5480  {
5481  using value_type = T;
5482  static constexpr std::size_t width = 1;
5483 
5484  static T &
5485  get(T &value, unsigned int c)
5486  {
5487  AssertDimension(c, 0);
5488  (void)c;
5489 
5490  return value;
5491  }
5492 
5493  static const T &
5494  get(const T &value, unsigned int c)
5495  {
5496  AssertDimension(c, 0);
5497  (void)c;
5498 
5499  return value;
5500  }
5501  };
5502 
5503  template <typename T, std::size_t width_>
5505  {
5506  using value_type = T;
5507  static constexpr std::size_t width = width_;
5508 
5509  static T &
5511  {
5512  AssertIndexRange(c, width_);
5513 
5514  return values[c];
5515  }
5516 
5517  static const T &
5518  get(const VectorizedArray<T, width_> &values, unsigned int c)
5519  {
5520  AssertIndexRange(c, width_);
5521 
5522  return values[c];
5523  }
5524  };
5525 } // namespace internal
5526 
5527 
5529 
5536 namespace std
5537 {
5545  template <typename Number, std::size_t width>
5546  inline ::VectorizedArray<Number, width>
5547  sin(const ::VectorizedArray<Number, width> &x)
5548  {
5549  // put values in an array and later read in that array with an unaligned
5550  // read. This should save some instructions as compared to directly
5551  // setting the individual elements and also circumvents a compiler
5552  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5553  // from April 2014, topic "matrix_free/step-48 Test").
5555  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5556  ++i)
5557  values[i] = std::sin(x[i]);
5559  out.load(&values[0]);
5560  return out;
5561  }
5562 
5563 
5564 
5572  template <typename Number, std::size_t width>
5573  inline ::VectorizedArray<Number, width>
5574  cos(const ::VectorizedArray<Number, width> &x)
5575  {
5577  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5578  ++i)
5579  values[i] = std::cos(x[i]);
5581  out.load(&values[0]);
5582  return out;
5583  }
5584 
5585 
5586 
5594  template <typename Number, std::size_t width>
5595  inline ::VectorizedArray<Number, width>
5596  tan(const ::VectorizedArray<Number, width> &x)
5597  {
5599  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5600  ++i)
5601  values[i] = std::tan(x[i]);
5603  out.load(&values[0]);
5604  return out;
5605  }
5606 
5607 
5608 
5616  template <typename Number, std::size_t width>
5617  inline ::VectorizedArray<Number, width>
5618  exp(const ::VectorizedArray<Number, width> &x)
5619  {
5621  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5622  ++i)
5623  values[i] = std::exp(x[i]);
5625  out.load(&values[0]);
5626  return out;
5627  }
5628 
5629 
5630 
5638  template <typename Number, std::size_t width>
5639  inline ::VectorizedArray<Number, width>
5640  log(const ::VectorizedArray<Number, width> &x)
5641  {
5643  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5644  ++i)
5645  values[i] = std::log(x[i]);
5647  out.load(&values[0]);
5648  return out;
5649  }
5650 
5651 
5652 
5660  template <typename Number, std::size_t width>
5661  inline ::VectorizedArray<Number, width>
5662  sqrt(const ::VectorizedArray<Number, width> &x)
5663  {
5664  return x.get_sqrt();
5665  }
5666 
5667 
5668 
5676  template <typename Number, std::size_t width>
5677  inline ::VectorizedArray<Number, width>
5678  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5679  {
5681  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5682  ++i)
5683  values[i] = std::pow(x[i], p);
5685  out.load(&values[0]);
5686  return out;
5687  }
5688 
5689 
5690 
5699  template <typename Number, std::size_t width>
5700  inline ::VectorizedArray<Number, width>
5701  pow(const ::VectorizedArray<Number, width> &x,
5702  const ::VectorizedArray<Number, width> &p)
5703  {
5705  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5706  ++i)
5707  values[i] = std::pow(x[i], p[i]);
5709  out.load(&values[0]);
5710  return out;
5711  }
5712 
5713 
5714 
5722  template <typename Number, std::size_t width>
5723  inline ::VectorizedArray<Number, width>
5724  abs(const ::VectorizedArray<Number, width> &x)
5725  {
5726  return x.get_abs();
5727  }
5728 
5729 
5730 
5738  template <typename Number, std::size_t width>
5739  inline ::VectorizedArray<Number, width>
5740  max(const ::VectorizedArray<Number, width> &x,
5741  const ::VectorizedArray<Number, width> &y)
5742  {
5743  return x.get_max(y);
5744  }
5745 
5746 
5747 
5755  template <typename Number, std::size_t width>
5756  inline ::VectorizedArray<Number, width>
5757  min(const ::VectorizedArray<Number, width> &x,
5758  const ::VectorizedArray<Number, width> &y)
5759  {
5760  return x.get_min(y);
5761  }
5762 
5763 
5764 
5768  template <class T>
5769  struct iterator_traits<::VectorizedArrayIterator<T>>
5770  {
5771  using iterator_category = random_access_iterator_tag;
5772  using value_type = typename T::value_type;
5773  using difference_type = std::ptrdiff_t;
5774  };
5775 
5776 } // namespace std
5777 
5778 #endif
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:165
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
std::enable_if_t<!std::is_same< U, const U >::value, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:108
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition: config.h:140
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
const unsigned int v0
Definition: grid_tools.cc:1062
const unsigned int v1
Definition: grid_tools.cc:1062
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1586
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1759
#define AssertIndexRange(index, range)
Definition: exceptions.h:1827
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
static const char T
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static T & get(VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const T &value, unsigned int c)
static constexpr std::size_t width
static T & get(T &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
SIMDComparison
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)