Reference documentation for deal.II version Git d51799cb54 2020-09-28 09:22:08 +0200
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # if defined(_MSC_VER)
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # else
73 # include <x86intrin.h>
74 # endif
75 
76 #endif
77 
78 
80 
81 
82 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
83 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
84 
85 template <typename Number, std::size_t width>
86 struct EnableIfScalar<VectorizedArray<Number, width>>
87 {
89 };
90 
91 
92 
96 template <typename T>
98 {
99 public:
106  VectorizedArrayIterator(T &data, const std::size_t lane)
107  : data(&data)
108  , lane(lane)
109  {}
110 
114  bool
116  {
117  Assert(this->data == other.data,
118  ExcMessage(
119  "You are trying to compare iterators into different arrays."));
120  return this->lane == other.lane;
121  }
122 
126  bool
128  {
129  Assert(this->data == other.data,
130  ExcMessage(
131  "You are trying to compare iterators into different arrays."));
132  return this->lane != other.lane;
133  }
134 
139  operator=(const VectorizedArrayIterator<T> &other) = default;
140 
145  const typename T::value_type &operator*() const
146  {
147  AssertIndexRange(lane, T::size());
148  return (*data)[lane];
149  }
150 
151 
156  template <typename U = T>
157  typename std::enable_if<!std::is_same<U, const U>::value,
158  typename T::value_type>::type &
160  {
161  AssertIndexRange(lane, T::size());
162  return (*data)[lane];
163  }
164 
172  {
173  AssertIndexRange(lane + 1, T::size() + 1);
174  lane++;
175  return *this;
176  }
177 
183  operator+=(const std::size_t offset)
184  {
185  AssertIndexRange(lane + offset, T::size() + 1);
186  lane += offset;
187  return *this;
188  }
189 
197  {
198  Assert(
199  lane > 0,
200  ExcMessage(
201  "You can't decrement an iterator that is already at the beginning of the range."));
202  --lane;
203  return *this;
204  }
205 
210  operator+(const std::size_t &offset) const
211  {
212  AssertIndexRange(lane + offset, T::size() + 1);
213  return VectorizedArrayIterator<T>(*data, lane + offset);
214  }
215 
219  std::ptrdiff_t
221  {
222  return static_cast<std::ptrdiff_t>(lane) -
223  static_cast<ptrdiff_t>(other.lane);
224  }
225 
226 private:
230  T *data;
231 
235  std::size_t lane;
236 };
237 
238 
239 
249 template <typename T, std::size_t width>
251 {
252 public:
256  static constexpr std::size_t
258  {
259  return width;
260  }
261 
267  {
268  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
269  }
270 
276  begin() const
277  {
278  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
279  }
280 
285  end()
286  {
287  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
288  }
289 
295  end() const
296  {
297  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
298  width);
299  }
300 };
301 
302 
303 
388 template <typename Number, std::size_t width>
390  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
391 {
392 public:
396  using value_type = Number;
397 
405  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 1;
406 
407  static_assert(width == 1,
408  "You specified an illegal width that is not supported.");
409 
414  VectorizedArray() = default;
415 
419  VectorizedArray(const Number scalar)
420  {
421  this->operator=(scalar);
422  }
423 
429  operator=(const Number scalar)
430  {
431  data = scalar;
432  return *this;
433  }
434 
440  Number &operator[](const unsigned int comp)
441  {
442  (void)comp;
443  AssertIndexRange(comp, 1);
444  return data;
445  }
446 
452  const Number &operator[](const unsigned int comp) const
453  {
454  (void)comp;
455  AssertIndexRange(comp, 1);
456  return data;
457  }
458 
465  {
466  data += vec.data;
467  return *this;
468  }
469 
476  {
477  data -= vec.data;
478  return *this;
479  }
480 
487  {
488  data *= vec.data;
489  return *this;
490  }
491 
498  {
499  data /= vec.data;
500  return *this;
501  }
502 
510  void
511  load(const Number *ptr)
512  {
513  data = *ptr;
514  }
515 
523  void
524  store(Number *ptr) const
525  {
526  *ptr = data;
527  }
528 
576  void
577  streaming_store(Number *ptr) const
578  {
579  *ptr = data;
580  }
581 
595  void
596  gather(const Number *base_ptr, const unsigned int *offsets)
597  {
598  data = base_ptr[offsets[0]];
599  }
600 
614  void
615  scatter(const unsigned int *offsets, Number *base_ptr) const
616  {
617  base_ptr[offsets[0]] = data;
618  }
619 
625  Number data;
626 
627 private:
634  get_sqrt() const
635  {
636  VectorizedArray res;
637  res.data = std::sqrt(data);
638  return res;
639  }
640 
647  get_abs() const
648  {
649  VectorizedArray res;
650  res.data = std::fabs(data);
651  return res;
652  }
653 
660  get_max(const VectorizedArray &other) const
661  {
662  VectorizedArray res;
663  res.data = std::max(data, other.data);
664  return res;
665  }
666 
673  get_min(const VectorizedArray &other) const
674  {
675  VectorizedArray res;
676  res.data = std::min(data, other.data);
677  return res;
678  }
679 
680  // Make a few functions friends.
681  template <typename Number2, std::size_t width2>
683  std::sqrt(const VectorizedArray<Number2, width2> &);
684  template <typename Number2, std::size_t width2>
686  std::abs(const VectorizedArray<Number2, width2> &);
687  template <typename Number2, std::size_t width2>
689  std::max(const VectorizedArray<Number2, width2> &,
691  template <typename Number2, std::size_t width2>
693  std::min(const VectorizedArray<Number2, width2> &,
695 };
696 
697 
698 
699 // We need to have a separate declaration for static const members
700 template <typename Number, std::size_t width>
702 
703 
704 
709 
710 
717 template <typename Number,
718  std::size_t width =
721  make_vectorized_array(const Number &u)
722 {
724  return result;
725 }
726 
727 
728 
735 template <typename VectorizedArrayType>
736 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
737  make_vectorized_array(const typename VectorizedArrayType::value_type &u)
738 {
739  static_assert(
740  std::is_same<VectorizedArrayType,
741  VectorizedArray<typename VectorizedArrayType::value_type,
742  VectorizedArrayType::size()>>::value,
743  "VectorizedArrayType is not a VectorizedArray.");
744 
745  VectorizedArrayType result = u;
746  return result;
747 }
748 
749 
750 
762 template <typename Number, std::size_t width>
763 inline DEAL_II_ALWAYS_INLINE void
765  const std::array<Number *, width> &ptrs,
766  const unsigned int offset)
767 {
768  for (unsigned int v = 0; v < width; v++)
769  out.data[v] = ptrs[v][offset];
770 }
771 
772 
773 
799 template <typename Number, std::size_t width>
800 inline DEAL_II_ALWAYS_INLINE void
801 vectorized_load_and_transpose(const unsigned int n_entries,
802  const Number * in,
803  const unsigned int * offsets,
805 {
806  for (unsigned int i = 0; i < n_entries; ++i)
807  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
808  out[i][v] = in[offsets[v] + i];
809 }
810 
811 
823 template <typename Number, std::size_t width>
824 inline DEAL_II_ALWAYS_INLINE void
825 vectorized_load_and_transpose(const unsigned int n_entries,
826  const std::array<Number *, width> &in,
828 {
829  for (unsigned int i = 0; i < n_entries; ++i)
830  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
831  out[i][v] = in[v][i];
832 }
833 
834 
835 
874 template <typename Number, std::size_t width>
875 inline DEAL_II_ALWAYS_INLINE void
876 vectorized_transpose_and_store(const bool add_into,
877  const unsigned int n_entries,
879  const unsigned int * offsets,
880  Number * out)
881 {
882  if (add_into)
883  for (unsigned int i = 0; i < n_entries; ++i)
884  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
885  out[offsets[v] + i] += in[i][v];
886  else
887  for (unsigned int i = 0; i < n_entries; ++i)
888  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
889  out[offsets[v] + i] = in[i][v];
890 }
891 
892 
904 template <typename Number, std::size_t width>
905 inline DEAL_II_ALWAYS_INLINE void
906 vectorized_transpose_and_store(const bool add_into,
907  const unsigned int n_entries,
909  std::array<Number *, width> & out)
910 {
911  if (add_into)
912  for (unsigned int i = 0; i < n_entries; ++i)
913  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
914  out[v][i] += in[i][v];
915  else
916  for (unsigned int i = 0; i < n_entries; ++i)
917  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
918  out[v][i] = in[i][v];
919 }
920 
921 
923 
924 #ifndef DOXYGEN
925 
926 // for safety, also check that __AVX512F__ is defined in case the user manually
927 // set some conflicting compile flags which prevent compilation
928 
929 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
930 
934 template <>
935 class VectorizedArray<double, 8>
936  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
937 {
938 public:
942  using value_type = double;
943 
949  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 8;
950 
955  VectorizedArray() = default;
956 
960  VectorizedArray(const double scalar)
961  {
962  this->operator=(scalar);
963  }
964 
970  operator=(const double x)
971  {
972  data = _mm512_set1_pd(x);
973  return *this;
974  }
975 
980  double &operator[](const unsigned int comp)
981  {
982  AssertIndexRange(comp, 8);
983  return *(reinterpret_cast<double *>(&data) + comp);
984  }
985 
990  const double &operator[](const unsigned int comp) const
991  {
992  AssertIndexRange(comp, 8);
993  return *(reinterpret_cast<const double *>(&data) + comp);
994  }
995 
1000  VectorizedArray &
1001  operator+=(const VectorizedArray &vec)
1002  {
1003  // if the compiler supports vector arithmetic, we can simply use +=
1004  // operator on the given data type. this allows the compiler to combine
1005  // additions with multiplication (fused multiply-add) if those
1006  // instructions are available. Otherwise, we need to use the built-in
1007  // intrinsic command for __m512d
1008 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1009  data += vec.data;
1010 # else
1011  data = _mm512_add_pd(data, vec.data);
1012 # endif
1013  return *this;
1014  }
1015 
1020  VectorizedArray &
1021  operator-=(const VectorizedArray &vec)
1022  {
1023 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1024  data -= vec.data;
1025 # else
1026  data = _mm512_sub_pd(data, vec.data);
1027 # endif
1028  return *this;
1029  }
1034  VectorizedArray &
1035  operator*=(const VectorizedArray &vec)
1036  {
1037 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1038  data *= vec.data;
1039 # else
1040  data = _mm512_mul_pd(data, vec.data);
1041 # endif
1042  return *this;
1043  }
1044 
1049  VectorizedArray &
1050  operator/=(const VectorizedArray &vec)
1051  {
1052 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1053  data /= vec.data;
1054 # else
1055  data = _mm512_div_pd(data, vec.data);
1056 # endif
1057  return *this;
1058  }
1059 
1066  void
1067  load(const double *ptr)
1068  {
1069  data = _mm512_loadu_pd(ptr);
1070  }
1071 
1079  void
1080  store(double *ptr) const
1081  {
1082  _mm512_storeu_pd(ptr, data);
1083  }
1084 
1089  void
1090  streaming_store(double *ptr) const
1091  {
1092  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1093  ExcMessage("Memory not aligned"));
1094  _mm512_stream_pd(ptr, data);
1095  }
1096 
1110  void
1111  gather(const double *base_ptr, const unsigned int *offsets)
1112  {
1113  // unfortunately, there does not appear to be a 256 bit integer load, so
1114  // do it by some reinterpret casts here. this is allowed because the Intel
1115  // API allows aliasing between different vector types.
1116  const __m256 index_val =
1117  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1118  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1119  data = _mm512_i32gather_pd(index, base_ptr, 8);
1120  }
1121 
1135  void
1136  scatter(const unsigned int *offsets, double *base_ptr) const
1137  {
1138  for (unsigned int i = 0; i < 8; ++i)
1139  for (unsigned int j = i + 1; j < 8; ++j)
1140  Assert(offsets[i] != offsets[j],
1141  ExcMessage("Result of scatter undefined if two offset elements"
1142  " point to the same position"));
1143 
1144  // unfortunately, there does not appear to be a 256 bit integer load, so
1145  // do it by some reinterpret casts here. this is allowed because the Intel
1146  // API allows aliasing between different vector types.
1147  const __m256 index_val =
1148  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1149  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1150  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1151  }
1152 
1158  __m512d data;
1159 
1160 private:
1167  get_sqrt() const
1168  {
1169  VectorizedArray res;
1170  res.data = _mm512_sqrt_pd(data);
1171  return res;
1172  }
1173 
1180  get_abs() const
1181  {
1182  // to compute the absolute value, perform bitwise andnot with -0. This
1183  // will leave all value and exponent bits unchanged but force the sign
1184  // value to +. Since there is no andnot for AVX512, we interpret the data
1185  // as 64 bit integers and do the andnot on those types (note that andnot
1186  // is a bitwise operation so the data type does not matter)
1187  __m512d mask = _mm512_set1_pd(-0.);
1188  VectorizedArray res;
1189  res.data = reinterpret_cast<__m512d>(
1190  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1191  reinterpret_cast<__m512i>(data)));
1192  return res;
1193  }
1194 
1201  get_max(const VectorizedArray &other) const
1202  {
1203  VectorizedArray res;
1204  res.data = _mm512_max_pd(data, other.data);
1205  return res;
1206  }
1207 
1214  get_min(const VectorizedArray &other) const
1215  {
1216  VectorizedArray res;
1217  res.data = _mm512_min_pd(data, other.data);
1218  return res;
1219  }
1220 
1221  // Make a few functions friends.
1222  template <typename Number2, std::size_t width2>
1224  std::sqrt(const VectorizedArray<Number2, width2> &);
1225  template <typename Number2, std::size_t width2>
1227  std::abs(const VectorizedArray<Number2, width2> &);
1228  template <typename Number2, std::size_t width2>
1230  std::max(const VectorizedArray<Number2, width2> &,
1232  template <typename Number2, std::size_t width2>
1234  std::min(const VectorizedArray<Number2, width2> &,
1236 };
1237 
1238 
1239 
1243 template <>
1244 inline DEAL_II_ALWAYS_INLINE void
1245 vectorized_load_and_transpose(const unsigned int n_entries,
1246  const double * in,
1247  const unsigned int * offsets,
1249 {
1250  // do not do full transpose because the code is long and will most
1251  // likely not pay off because many processors have two load units
1252  // (for the top 8 instructions) but only 1 permute unit (for the 8
1253  // shuffle/unpack instructions). rather start the transposition on the
1254  // vectorized array of half the size with 256 bits
1255  const unsigned int n_chunks = n_entries / 4;
1256  for (unsigned int i = 0; i < n_chunks; ++i)
1257  {
1258  __m512d t0, t1, t2, t3 = {};
1259 
1260  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1261  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1262  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1263  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1264  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1265  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1266  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1267  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1268 
1269  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1270  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1271  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1272  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1273  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1274  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1275  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1276  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1277  }
1278  // remainder loop of work that does not divide by 4
1279  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1280  out[i].gather(in + i, offsets);
1281 }
1282 
1283 
1284 
1288 template <>
1289 inline DEAL_II_ALWAYS_INLINE void
1290 vectorized_load_and_transpose(const unsigned int n_entries,
1291  const std::array<double *, 8> &in,
1293 {
1294  const unsigned int n_chunks = n_entries / 4;
1295  for (unsigned int i = 0; i < n_chunks; ++i)
1296  {
1297  __m512d t0, t1, t2, t3 = {};
1298 
1299  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1300  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1301  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1302  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1303  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1304  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1305  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1306  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1307 
1308  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1309  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1310  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1311  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1312  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1313  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1314  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1315  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1316  }
1317 
1318  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1319  gather(out[i], in, i);
1320 }
1321 
1322 
1323 
1327 template <>
1328 inline DEAL_II_ALWAYS_INLINE void
1329 vectorized_transpose_and_store(const bool add_into,
1330  const unsigned int n_entries,
1331  const VectorizedArray<double, 8> *in,
1332  const unsigned int * offsets,
1333  double * out)
1334 {
1335  // as for the load, we split the store operations into 256 bit units to
1336  // better balance between code size, shuffle instructions, and stores
1337  const unsigned int n_chunks = n_entries / 4;
1338  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1339  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1340  for (unsigned int i = 0; i < n_chunks; ++i)
1341  {
1342  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1343  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1344  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1345  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1346  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1347  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1348  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1349  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1350  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1351  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1352  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1353  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1354  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1355  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1356  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1357  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1358 
1359  // Cannot use the same store instructions in both paths of the 'if'
1360  // because the compiler cannot know that there is no aliasing
1361  // between pointers
1362  if (add_into)
1363  {
1364  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1365  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1366  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1367  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1368  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1369  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1370  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1371  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1372  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1373  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1374  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1375  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1376  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1377  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1378  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1379  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1380  }
1381  else
1382  {
1383  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1384  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1385  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1386  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1387  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1388  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1389  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1390  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1391  }
1392  }
1393 
1394  // remainder loop of work that does not divide by 4
1395  if (add_into)
1396  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1397  for (unsigned int v = 0; v < 8; ++v)
1398  out[offsets[v] + i] += in[i][v];
1399  else
1400  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1401  for (unsigned int v = 0; v < 8; ++v)
1402  out[offsets[v] + i] = in[i][v];
1403 }
1404 
1405 
1406 
1410 template <>
1411 inline DEAL_II_ALWAYS_INLINE void
1412 vectorized_transpose_and_store(const bool add_into,
1413  const unsigned int n_entries,
1414  const VectorizedArray<double, 8> *in,
1415  std::array<double *, 8> & out)
1416 {
1417  // see the comments in the vectorized_transpose_and_store above
1418 
1419  const unsigned int n_chunks = n_entries / 4;
1420  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1421  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1422  for (unsigned int i = 0; i < n_chunks; ++i)
1423  {
1424  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1425  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1426  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1427  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1428  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1429  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1430  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1431  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1432  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1433  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1434  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1435  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1436  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1437  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1438  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1439  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1440 
1441  if (add_into)
1442  {
1443  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1444  _mm256_storeu_pd(out[0] + 4 * i, res0);
1445  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1446  _mm256_storeu_pd(out[1] + 4 * i, res1);
1447  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1448  _mm256_storeu_pd(out[2] + 4 * i, res2);
1449  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1450  _mm256_storeu_pd(out[3] + 4 * i, res3);
1451  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1452  _mm256_storeu_pd(out[4] + 4 * i, res4);
1453  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1454  _mm256_storeu_pd(out[5] + 4 * i, res5);
1455  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1456  _mm256_storeu_pd(out[6] + 4 * i, res6);
1457  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1458  _mm256_storeu_pd(out[7] + 4 * i, res7);
1459  }
1460  else
1461  {
1462  _mm256_storeu_pd(out[0] + 4 * i, res0);
1463  _mm256_storeu_pd(out[1] + 4 * i, res1);
1464  _mm256_storeu_pd(out[2] + 4 * i, res2);
1465  _mm256_storeu_pd(out[3] + 4 * i, res3);
1466  _mm256_storeu_pd(out[4] + 4 * i, res4);
1467  _mm256_storeu_pd(out[5] + 4 * i, res5);
1468  _mm256_storeu_pd(out[6] + 4 * i, res6);
1469  _mm256_storeu_pd(out[7] + 4 * i, res7);
1470  }
1471  }
1472 
1473  if (add_into)
1474  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1475  for (unsigned int v = 0; v < 8; ++v)
1476  out[v][i] += in[i][v];
1477  else
1478  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1479  for (unsigned int v = 0; v < 8; ++v)
1480  out[v][i] = in[i][v];
1481 }
1482 
1483 
1484 
1488 template <>
1489 class VectorizedArray<float, 16>
1490  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
1491 {
1492 public:
1496  using value_type = float;
1497 
1503  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 16;
1504 
1509  VectorizedArray() = default;
1510 
1514  VectorizedArray(const float scalar)
1515  {
1516  this->operator=(scalar);
1517  }
1518 
1523  VectorizedArray &
1524  operator=(const float x)
1525  {
1526  data = _mm512_set1_ps(x);
1527  return *this;
1528  }
1529 
1534  float &operator[](const unsigned int comp)
1535  {
1536  AssertIndexRange(comp, 16);
1537  return *(reinterpret_cast<float *>(&data) + comp);
1538  }
1539 
1544  const float &operator[](const unsigned int comp) const
1545  {
1546  AssertIndexRange(comp, 16);
1547  return *(reinterpret_cast<const float *>(&data) + comp);
1548  }
1549 
1554  VectorizedArray &
1555  operator+=(const VectorizedArray &vec)
1556  {
1557  // if the compiler supports vector arithmetic, we can simply use +=
1558  // operator on the given data type. this allows the compiler to combine
1559  // additions with multiplication (fused multiply-add) if those
1560  // instructions are available. Otherwise, we need to use the built-in
1561  // intrinsic command for __m512d
1562 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1563  data += vec.data;
1564 # else
1565  data = _mm512_add_ps(data, vec.data);
1566 # endif
1567  return *this;
1568  }
1569 
1574  VectorizedArray &
1575  operator-=(const VectorizedArray &vec)
1576  {
1577 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1578  data -= vec.data;
1579 # else
1580  data = _mm512_sub_ps(data, vec.data);
1581 # endif
1582  return *this;
1583  }
1588  VectorizedArray &
1589  operator*=(const VectorizedArray &vec)
1590  {
1591 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1592  data *= vec.data;
1593 # else
1594  data = _mm512_mul_ps(data, vec.data);
1595 # endif
1596  return *this;
1597  }
1598 
1603  VectorizedArray &
1604  operator/=(const VectorizedArray &vec)
1605  {
1606 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1607  data /= vec.data;
1608 # else
1609  data = _mm512_div_ps(data, vec.data);
1610 # endif
1611  return *this;
1612  }
1613 
1620  void
1621  load(const float *ptr)
1622  {
1623  data = _mm512_loadu_ps(ptr);
1624  }
1625 
1633  void
1634  store(float *ptr) const
1635  {
1636  _mm512_storeu_ps(ptr, data);
1637  }
1638 
1643  void
1644  streaming_store(float *ptr) const
1645  {
1646  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1647  ExcMessage("Memory not aligned"));
1648  _mm512_stream_ps(ptr, data);
1649  }
1650 
1664  void
1665  gather(const float *base_ptr, const unsigned int *offsets)
1666  {
1667  // unfortunately, there does not appear to be a 512 bit integer load, so
1668  // do it by some reinterpret casts here. this is allowed because the Intel
1669  // API allows aliasing between different vector types.
1670  const __m512 index_val =
1671  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1672  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1673  data = _mm512_i32gather_ps(index, base_ptr, 4);
1674  }
1675 
1689  void
1690  scatter(const unsigned int *offsets, float *base_ptr) const
1691  {
1692  for (unsigned int i = 0; i < 16; ++i)
1693  for (unsigned int j = i + 1; j < 16; ++j)
1694  Assert(offsets[i] != offsets[j],
1695  ExcMessage("Result of scatter undefined if two offset elements"
1696  " point to the same position"));
1697 
1698  // unfortunately, there does not appear to be a 512 bit integer load, so
1699  // do it by some reinterpret casts here. this is allowed because the Intel
1700  // API allows aliasing between different vector types.
1701  const __m512 index_val =
1702  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1703  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1704  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1705  }
1706 
1712  __m512 data;
1713 
1714 private:
1721  get_sqrt() const
1722  {
1723  VectorizedArray res;
1724  res.data = _mm512_sqrt_ps(data);
1725  return res;
1726  }
1727 
1734  get_abs() const
1735  {
1736  // to compute the absolute value, perform bitwise andnot with -0. This
1737  // will leave all value and exponent bits unchanged but force the sign
1738  // value to +. Since there is no andnot for AVX512, we interpret the data
1739  // as 32 bit integers and do the andnot on those types (note that andnot
1740  // is a bitwise operation so the data type does not matter)
1741  __m512 mask = _mm512_set1_ps(-0.f);
1742  VectorizedArray res;
1743  res.data = reinterpret_cast<__m512>(
1744  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1745  reinterpret_cast<__m512i>(data)));
1746  return res;
1747  }
1748 
1755  get_max(const VectorizedArray &other) const
1756  {
1757  VectorizedArray res;
1758  res.data = _mm512_max_ps(data, other.data);
1759  return res;
1760  }
1761 
1768  get_min(const VectorizedArray &other) const
1769  {
1770  VectorizedArray res;
1771  res.data = _mm512_min_ps(data, other.data);
1772  return res;
1773  }
1774 
1775  // Make a few functions friends.
1776  template <typename Number2, std::size_t width2>
1778  std::sqrt(const VectorizedArray<Number2, width2> &);
1779  template <typename Number2, std::size_t width2>
1781  std::abs(const VectorizedArray<Number2, width2> &);
1782  template <typename Number2, std::size_t width2>
1784  std::max(const VectorizedArray<Number2, width2> &,
1786  template <typename Number2, std::size_t width2>
1788  std::min(const VectorizedArray<Number2, width2> &,
1790 };
1791 
1792 
1793 
1797 template <>
1798 inline DEAL_II_ALWAYS_INLINE void
1799 vectorized_load_and_transpose(const unsigned int n_entries,
1800  const float * in,
1801  const unsigned int * offsets,
1803 {
1804  // Similar to the double case, we perform the work on smaller entities. In
1805  // this case, we start from 128 bit arrays and insert them into a full 512
1806  // bit index. This reduces the code size and register pressure because we do
1807  // shuffles on 4 numbers rather than 16.
1808  const unsigned int n_chunks = n_entries / 4;
1809 
1810  // To avoid warnings about uninitialized variables, need to initialize one
1811  // variable to a pre-exisiting value in out, which will never get used in
1812  // the end. Keep the initialization outside the loop because of a bug in
1813  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
1814  // case t3 is initialized to zero (inside/outside of loop), see
1815  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1816  __m512 t0, t1, t2, t3;
1817  if (n_chunks > 0)
1818  t3 = out[0].data;
1819  for (unsigned int i = 0; i < n_chunks; ++i)
1820  {
1821  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1822  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1823  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1824  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1825  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1826  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1827  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1828  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1829  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1830  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1831  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1832  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1833  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1834  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1835  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1836  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1837 
1838  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1839  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1840  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1841  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1842 
1843  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1844  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1845  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1846  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1847  }
1848 
1849  // remainder loop of work that does not divide by 4
1850  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1851  out[i].gather(in + i, offsets);
1852 }
1853 
1854 
1855 
1859 template <>
1860 inline DEAL_II_ALWAYS_INLINE void
1861 vectorized_load_and_transpose(const unsigned int n_entries,
1862  const std::array<float *, 16> &in,
1864 {
1865  // see the comments in the vectorized_load_and_transpose above
1866 
1867  const unsigned int n_chunks = n_entries / 4;
1868 
1869  __m512 t0, t1, t2, t3;
1870  if (n_chunks > 0)
1871  t3 = out[0].data;
1872  for (unsigned int i = 0; i < n_chunks; ++i)
1873  {
1874  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1875  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1876  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1877  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1878  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1879  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1880  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1881  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1882  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1883  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1884  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1885  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1886  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1887  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1888  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1889  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1890 
1891  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1892  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1893  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1894  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1895 
1896  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1897  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1898  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1899  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1900  }
1901 
1902  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1903  gather(out[i], in, i);
1904 }
1905 
1906 
1907 
1911 template <>
1912 inline DEAL_II_ALWAYS_INLINE void
1913 vectorized_transpose_and_store(const bool add_into,
1914  const unsigned int n_entries,
1915  const VectorizedArray<float, 16> *in,
1916  const unsigned int * offsets,
1917  float * out)
1918 {
1919  const unsigned int n_chunks = n_entries / 4;
1920  for (unsigned int i = 0; i < n_chunks; ++i)
1921  {
1922  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1923  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1924  __m512 t2 =
1925  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1926  __m512 t3 =
1927  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1928  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1929  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1930  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1931  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1932 
1933  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1934  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1935  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1936  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1937  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1938  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1939  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1940  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1941  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1942  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1943  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1944  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1945  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1946  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1947  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1948  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1949 
1950  // Cannot use the same store instructions in both paths of the 'if'
1951  // because the compiler cannot know that there is no aliasing between
1952  // pointers
1953  if (add_into)
1954  {
1955  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1956  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1957  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1958  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1959  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1960  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1961  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1962  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1963  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1964  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1965  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1966  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1967  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1968  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1969  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1970  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1971  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1972  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1973  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1974  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1975  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1976  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1977  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1978  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1979  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1980  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1981  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1982  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1983  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1984  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1985  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1986  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1987  }
1988  else
1989  {
1990  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1991  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1992  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1993  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1994  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1995  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1996  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1997  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1998  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1999  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2000  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2001  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2002  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2003  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2004  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2005  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2006  }
2007  }
2008 
2009  // remainder loop of work that does not divide by 4
2010  if (add_into)
2011  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2012  for (unsigned int v = 0; v < 16; ++v)
2013  out[offsets[v] + i] += in[i][v];
2014  else
2015  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2016  for (unsigned int v = 0; v < 16; ++v)
2017  out[offsets[v] + i] = in[i][v];
2018 }
2019 
2020 
2021 
2025 template <>
2026 inline DEAL_II_ALWAYS_INLINE void
2027 vectorized_transpose_and_store(const bool add_into,
2028  const unsigned int n_entries,
2029  const VectorizedArray<float, 16> *in,
2030  std::array<float *, 16> & out)
2031 {
2032  // see the comments in the vectorized_transpose_and_store above
2033 
2034  const unsigned int n_chunks = n_entries / 4;
2035  for (unsigned int i = 0; i < n_chunks; ++i)
2036  {
2037  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2038  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2039  __m512 t2 =
2040  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2041  __m512 t3 =
2042  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2043  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2044  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2045  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2046  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2047 
2048  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2049  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2050  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2051  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2052  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2053  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2054  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2055  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2056  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2057  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2058  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2059  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2060  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2061  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2062  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2063  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2064 
2065  if (add_into)
2066  {
2067  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2068  _mm_storeu_ps(out[0] + 4 * i, res0);
2069  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2070  _mm_storeu_ps(out[1] + 4 * i, res1);
2071  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2072  _mm_storeu_ps(out[2] + 4 * i, res2);
2073  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2074  _mm_storeu_ps(out[3] + 4 * i, res3);
2075  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2076  _mm_storeu_ps(out[4] + 4 * i, res4);
2077  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2078  _mm_storeu_ps(out[5] + 4 * i, res5);
2079  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2080  _mm_storeu_ps(out[6] + 4 * i, res6);
2081  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2082  _mm_storeu_ps(out[7] + 4 * i, res7);
2083  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2084  _mm_storeu_ps(out[8] + 4 * i, res8);
2085  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2086  _mm_storeu_ps(out[9] + 4 * i, res9);
2087  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2088  _mm_storeu_ps(out[10] + 4 * i, res10);
2089  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2090  _mm_storeu_ps(out[11] + 4 * i, res11);
2091  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2092  _mm_storeu_ps(out[12] + 4 * i, res12);
2093  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2094  _mm_storeu_ps(out[13] + 4 * i, res13);
2095  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2096  _mm_storeu_ps(out[14] + 4 * i, res14);
2097  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2098  _mm_storeu_ps(out[15] + 4 * i, res15);
2099  }
2100  else
2101  {
2102  _mm_storeu_ps(out[0] + 4 * i, res0);
2103  _mm_storeu_ps(out[1] + 4 * i, res1);
2104  _mm_storeu_ps(out[2] + 4 * i, res2);
2105  _mm_storeu_ps(out[3] + 4 * i, res3);
2106  _mm_storeu_ps(out[4] + 4 * i, res4);
2107  _mm_storeu_ps(out[5] + 4 * i, res5);
2108  _mm_storeu_ps(out[6] + 4 * i, res6);
2109  _mm_storeu_ps(out[7] + 4 * i, res7);
2110  _mm_storeu_ps(out[8] + 4 * i, res8);
2111  _mm_storeu_ps(out[9] + 4 * i, res9);
2112  _mm_storeu_ps(out[10] + 4 * i, res10);
2113  _mm_storeu_ps(out[11] + 4 * i, res11);
2114  _mm_storeu_ps(out[12] + 4 * i, res12);
2115  _mm_storeu_ps(out[13] + 4 * i, res13);
2116  _mm_storeu_ps(out[14] + 4 * i, res14);
2117  _mm_storeu_ps(out[15] + 4 * i, res15);
2118  }
2119  }
2120 
2121  if (add_into)
2122  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2123  for (unsigned int v = 0; v < 16; ++v)
2124  out[v][i] += in[i][v];
2125  else
2126  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2127  for (unsigned int v = 0; v < 16; ++v)
2128  out[v][i] = in[i][v];
2129 }
2130 
2131 # endif
2132 
2133 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2134 
2138 template <>
2139 class VectorizedArray<double, 4>
2140  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2141 {
2142 public:
2146  using value_type = double;
2147 
2153  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
2154 
2159  VectorizedArray() = default;
2160 
2164  VectorizedArray(const double scalar)
2165  {
2166  this->operator=(scalar);
2167  }
2168 
2173  VectorizedArray &
2174  operator=(const double x)
2175  {
2176  data = _mm256_set1_pd(x);
2177  return *this;
2178  }
2179 
2184  double &operator[](const unsigned int comp)
2185  {
2186  AssertIndexRange(comp, 4);
2187  return *(reinterpret_cast<double *>(&data) + comp);
2188  }
2189 
2194  const double &operator[](const unsigned int comp) const
2195  {
2196  AssertIndexRange(comp, 4);
2197  return *(reinterpret_cast<const double *>(&data) + comp);
2198  }
2199 
2204  VectorizedArray &
2205  operator+=(const VectorizedArray &vec)
2206  {
2207  // if the compiler supports vector arithmetic, we can simply use +=
2208  // operator on the given data type. this allows the compiler to combine
2209  // additions with multiplication (fused multiply-add) if those
2210  // instructions are available. Otherwise, we need to use the built-in
2211  // intrinsic command for __m256d
2212 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2213  data += vec.data;
2214 # else
2215  data = _mm256_add_pd(data, vec.data);
2216 # endif
2217  return *this;
2218  }
2219 
2224  VectorizedArray &
2225  operator-=(const VectorizedArray &vec)
2226  {
2227 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2228  data -= vec.data;
2229 # else
2230  data = _mm256_sub_pd(data, vec.data);
2231 # endif
2232  return *this;
2233  }
2238  VectorizedArray &
2239  operator*=(const VectorizedArray &vec)
2240  {
2241 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2242  data *= vec.data;
2243 # else
2244  data = _mm256_mul_pd(data, vec.data);
2245 # endif
2246  return *this;
2247  }
2248 
2253  VectorizedArray &
2254  operator/=(const VectorizedArray &vec)
2255  {
2256 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2257  data /= vec.data;
2258 # else
2259  data = _mm256_div_pd(data, vec.data);
2260 # endif
2261  return *this;
2262  }
2263 
2270  void
2271  load(const double *ptr)
2272  {
2273  data = _mm256_loadu_pd(ptr);
2274  }
2275 
2283  void
2284  store(double *ptr) const
2285  {
2286  _mm256_storeu_pd(ptr, data);
2287  }
2288 
2293  void
2294  streaming_store(double *ptr) const
2295  {
2296  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2297  ExcMessage("Memory not aligned"));
2298  _mm256_stream_pd(ptr, data);
2299  }
2300 
2314  void
2315  gather(const double *base_ptr, const unsigned int *offsets)
2316  {
2317 # ifdef __AVX2__
2318  // unfortunately, there does not appear to be a 128 bit integer load, so
2319  // do it by some reinterpret casts here. this is allowed because the Intel
2320  // API allows aliasing between different vector types.
2321  const __m128 index_val =
2322  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2323  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2324  data = _mm256_i32gather_pd(base_ptr, index, 8);
2325 # else
2326  for (unsigned int i = 0; i < 4; ++i)
2327  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2328 # endif
2329  }
2330 
2344  void
2345  scatter(const unsigned int *offsets, double *base_ptr) const
2346  {
2347  // no scatter operation in AVX/AVX2
2348  for (unsigned int i = 0; i < 4; ++i)
2349  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2350  }
2351 
2357  __m256d data;
2358 
2359 private:
2366  get_sqrt() const
2367  {
2368  VectorizedArray res;
2369  res.data = _mm256_sqrt_pd(data);
2370  return res;
2371  }
2372 
2379  get_abs() const
2380  {
2381  // to compute the absolute value, perform bitwise andnot with -0. This
2382  // will leave all value and exponent bits unchanged but force the sign
2383  // value to +.
2384  __m256d mask = _mm256_set1_pd(-0.);
2385  VectorizedArray res;
2386  res.data = _mm256_andnot_pd(mask, data);
2387  return res;
2388  }
2389 
2396  get_max(const VectorizedArray &other) const
2397  {
2398  VectorizedArray res;
2399  res.data = _mm256_max_pd(data, other.data);
2400  return res;
2401  }
2402 
2409  get_min(const VectorizedArray &other) const
2410  {
2411  VectorizedArray res;
2412  res.data = _mm256_min_pd(data, other.data);
2413  return res;
2414  }
2415 
2416  // Make a few functions friends.
2417  template <typename Number2, std::size_t width2>
2419  std::sqrt(const VectorizedArray<Number2, width2> &);
2420  template <typename Number2, std::size_t width2>
2422  std::abs(const VectorizedArray<Number2, width2> &);
2423  template <typename Number2, std::size_t width2>
2425  std::max(const VectorizedArray<Number2, width2> &,
2427  template <typename Number2, std::size_t width2>
2429  std::min(const VectorizedArray<Number2, width2> &,
2431 };
2432 
2433 
2434 
2438 template <>
2439 inline DEAL_II_ALWAYS_INLINE void
2440 vectorized_load_and_transpose(const unsigned int n_entries,
2441  const double * in,
2442  const unsigned int * offsets,
2444 {
2445  const unsigned int n_chunks = n_entries / 4;
2446  const double * in0 = in + offsets[0];
2447  const double * in1 = in + offsets[1];
2448  const double * in2 = in + offsets[2];
2449  const double * in3 = in + offsets[3];
2450 
2451  for (unsigned int i = 0; i < n_chunks; ++i)
2452  {
2453  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2454  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2455  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2456  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2457  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2458  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2459  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2460  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2461  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2462  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2463  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2464  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2465  }
2466 
2467  // remainder loop of work that does not divide by 4
2468  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2469  out[i].gather(in + i, offsets);
2470 }
2471 
2472 
2473 
2477 template <>
2478 inline DEAL_II_ALWAYS_INLINE void
2479 vectorized_load_and_transpose(const unsigned int n_entries,
2480  const std::array<double *, 4> &in,
2482 {
2483  // see the comments in the vectorized_load_and_transpose above
2484 
2485  const unsigned int n_chunks = n_entries / 4;
2486  const double * in0 = in[0];
2487  const double * in1 = in[1];
2488  const double * in2 = in[2];
2489  const double * in3 = in[3];
2490 
2491  for (unsigned int i = 0; i < n_chunks; ++i)
2492  {
2493  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2494  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2495  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2496  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2497  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2498  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2499  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2500  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2501  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2502  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2503  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2504  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2505  }
2506 
2507  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2508  gather(out[i], in, i);
2509 }
2510 
2511 
2512 
2516 template <>
2517 inline DEAL_II_ALWAYS_INLINE void
2518 vectorized_transpose_and_store(const bool add_into,
2519  const unsigned int n_entries,
2520  const VectorizedArray<double, 4> *in,
2521  const unsigned int * offsets,
2522  double * out)
2523 {
2524  const unsigned int n_chunks = n_entries / 4;
2525  double * out0 = out + offsets[0];
2526  double * out1 = out + offsets[1];
2527  double * out2 = out + offsets[2];
2528  double * out3 = out + offsets[3];
2529  for (unsigned int i = 0; i < n_chunks; ++i)
2530  {
2531  __m256d u0 = in[4 * i + 0].data;
2532  __m256d u1 = in[4 * i + 1].data;
2533  __m256d u2 = in[4 * i + 2].data;
2534  __m256d u3 = in[4 * i + 3].data;
2535  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2536  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2537  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2538  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2539  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2540  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2541  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2542  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2543 
2544  // Cannot use the same store instructions in both paths of the 'if'
2545  // because the compiler cannot know that there is no aliasing between
2546  // pointers
2547  if (add_into)
2548  {
2549  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2550  _mm256_storeu_pd(out0 + 4 * i, res0);
2551  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2552  _mm256_storeu_pd(out1 + 4 * i, res1);
2553  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2554  _mm256_storeu_pd(out2 + 4 * i, res2);
2555  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2556  _mm256_storeu_pd(out3 + 4 * i, res3);
2557  }
2558  else
2559  {
2560  _mm256_storeu_pd(out0 + 4 * i, res0);
2561  _mm256_storeu_pd(out1 + 4 * i, res1);
2562  _mm256_storeu_pd(out2 + 4 * i, res2);
2563  _mm256_storeu_pd(out3 + 4 * i, res3);
2564  }
2565  }
2566 
2567  // remainder loop of work that does not divide by 4
2568  if (add_into)
2569  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2570  for (unsigned int v = 0; v < 4; ++v)
2571  out[offsets[v] + i] += in[i][v];
2572  else
2573  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2574  for (unsigned int v = 0; v < 4; ++v)
2575  out[offsets[v] + i] = in[i][v];
2576 }
2577 
2578 
2579 
2583 template <>
2584 inline DEAL_II_ALWAYS_INLINE void
2585 vectorized_transpose_and_store(const bool add_into,
2586  const unsigned int n_entries,
2587  const VectorizedArray<double, 4> *in,
2588  std::array<double *, 4> & out)
2589 {
2590  // see the comments in the vectorized_transpose_and_store above
2591 
2592  const unsigned int n_chunks = n_entries / 4;
2593  double * out0 = out[0];
2594  double * out1 = out[1];
2595  double * out2 = out[2];
2596  double * out3 = out[3];
2597  for (unsigned int i = 0; i < n_chunks; ++i)
2598  {
2599  __m256d u0 = in[4 * i + 0].data;
2600  __m256d u1 = in[4 * i + 1].data;
2601  __m256d u2 = in[4 * i + 2].data;
2602  __m256d u3 = in[4 * i + 3].data;
2603  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2604  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2605  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2606  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2607  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2608  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2609  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2610  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2611 
2612  // Cannot use the same store instructions in both paths of the 'if'
2613  // because the compiler cannot know that there is no aliasing between
2614  // pointers
2615  if (add_into)
2616  {
2617  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2618  _mm256_storeu_pd(out0 + 4 * i, res0);
2619  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2620  _mm256_storeu_pd(out1 + 4 * i, res1);
2621  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2622  _mm256_storeu_pd(out2 + 4 * i, res2);
2623  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2624  _mm256_storeu_pd(out3 + 4 * i, res3);
2625  }
2626  else
2627  {
2628  _mm256_storeu_pd(out0 + 4 * i, res0);
2629  _mm256_storeu_pd(out1 + 4 * i, res1);
2630  _mm256_storeu_pd(out2 + 4 * i, res2);
2631  _mm256_storeu_pd(out3 + 4 * i, res3);
2632  }
2633  }
2634 
2635  // remainder loop of work that does not divide by 4
2636  if (add_into)
2637  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2638  for (unsigned int v = 0; v < 4; ++v)
2639  out[v][i] += in[i][v];
2640  else
2641  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2642  for (unsigned int v = 0; v < 4; ++v)
2643  out[v][i] = in[i][v];
2644 }
2645 
2646 
2647 
2651 template <>
2652 class VectorizedArray<float, 8>
2653  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
2654 {
2655 public:
2659  using value_type = float;
2660 
2666  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 8;
2667 
2672  VectorizedArray() = default;
2673 
2677  VectorizedArray(const float scalar)
2678  {
2679  this->operator=(scalar);
2680  }
2681 
2686  VectorizedArray &
2687  operator=(const float x)
2688  {
2689  data = _mm256_set1_ps(x);
2690  return *this;
2691  }
2692 
2697  float &operator[](const unsigned int comp)
2698  {
2699  AssertIndexRange(comp, 8);
2700  return *(reinterpret_cast<float *>(&data) + comp);
2701  }
2702 
2707  const float &operator[](const unsigned int comp) const
2708  {
2709  AssertIndexRange(comp, 8);
2710  return *(reinterpret_cast<const float *>(&data) + comp);
2711  }
2712 
2717  VectorizedArray &
2718  operator+=(const VectorizedArray &vec)
2719  {
2720  // if the compiler supports vector arithmetic, we can simply use +=
2721  // operator on the given data type. this allows the compiler to combine
2722  // additions with multiplication (fused multiply-add) if those
2723  // instructions are available. Otherwise, we need to use the built-in
2724  // intrinsic command for __m256d
2725 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2726  data += vec.data;
2727 # else
2728  data = _mm256_add_ps(data, vec.data);
2729 # endif
2730  return *this;
2731  }
2732 
2737  VectorizedArray &
2738  operator-=(const VectorizedArray &vec)
2739  {
2740 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2741  data -= vec.data;
2742 # else
2743  data = _mm256_sub_ps(data, vec.data);
2744 # endif
2745  return *this;
2746  }
2751  VectorizedArray &
2752  operator*=(const VectorizedArray &vec)
2753  {
2754 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2755  data *= vec.data;
2756 # else
2757  data = _mm256_mul_ps(data, vec.data);
2758 # endif
2759  return *this;
2760  }
2761 
2766  VectorizedArray &
2767  operator/=(const VectorizedArray &vec)
2768  {
2769 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2770  data /= vec.data;
2771 # else
2772  data = _mm256_div_ps(data, vec.data);
2773 # endif
2774  return *this;
2775  }
2776 
2783  void
2784  load(const float *ptr)
2785  {
2786  data = _mm256_loadu_ps(ptr);
2787  }
2788 
2796  void
2797  store(float *ptr) const
2798  {
2799  _mm256_storeu_ps(ptr, data);
2800  }
2801 
2806  void
2807  streaming_store(float *ptr) const
2808  {
2809  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2810  ExcMessage("Memory not aligned"));
2811  _mm256_stream_ps(ptr, data);
2812  }
2813 
2827  void
2828  gather(const float *base_ptr, const unsigned int *offsets)
2829  {
2830 # ifdef __AVX2__
2831  // unfortunately, there does not appear to be a 256 bit integer load, so
2832  // do it by some reinterpret casts here. this is allowed because the Intel
2833  // API allows aliasing between different vector types.
2834  const __m256 index_val =
2835  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2836  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2837  data = _mm256_i32gather_ps(base_ptr, index, 4);
2838 # else
2839  for (unsigned int i = 0; i < 8; ++i)
2840  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2841 # endif
2842  }
2843 
2857  void
2858  scatter(const unsigned int *offsets, float *base_ptr) const
2859  {
2860  // no scatter operation in AVX/AVX2
2861  for (unsigned int i = 0; i < 8; ++i)
2862  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2863  }
2864 
2870  __m256 data;
2871 
2872 private:
2879  get_sqrt() const
2880  {
2881  VectorizedArray res;
2882  res.data = _mm256_sqrt_ps(data);
2883  return res;
2884  }
2885 
2892  get_abs() const
2893  {
2894  // to compute the absolute value, perform bitwise andnot with -0. This
2895  // will leave all value and exponent bits unchanged but force the sign
2896  // value to +.
2897  __m256 mask = _mm256_set1_ps(-0.f);
2898  VectorizedArray res;
2899  res.data = _mm256_andnot_ps(mask, data);
2900  return res;
2901  }
2902 
2909  get_max(const VectorizedArray &other) const
2910  {
2911  VectorizedArray res;
2912  res.data = _mm256_max_ps(data, other.data);
2913  return res;
2914  }
2915 
2922  get_min(const VectorizedArray &other) const
2923  {
2924  VectorizedArray res;
2925  res.data = _mm256_min_ps(data, other.data);
2926  return res;
2927  }
2928 
2929  // Make a few functions friends.
2930  template <typename Number2, std::size_t width2>
2932  std::sqrt(const VectorizedArray<Number2, width2> &);
2933  template <typename Number2, std::size_t width2>
2935  std::abs(const VectorizedArray<Number2, width2> &);
2936  template <typename Number2, std::size_t width2>
2938  std::max(const VectorizedArray<Number2, width2> &,
2940  template <typename Number2, std::size_t width2>
2942  std::min(const VectorizedArray<Number2, width2> &,
2944 };
2945 
2946 
2947 
2951 template <>
2952 inline DEAL_II_ALWAYS_INLINE void
2953 vectorized_load_and_transpose(const unsigned int n_entries,
2954  const float * in,
2955  const unsigned int * offsets,
2957 {
2958  const unsigned int n_chunks = n_entries / 4;
2959  for (unsigned int i = 0; i < n_chunks; ++i)
2960  {
2961  // To avoid warnings about uninitialized variables, need to initialize
2962  // one variable with zero before using it.
2963  __m256 t0, t1, t2, t3 = {};
2964  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2965  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2966  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2967  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2968  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2969  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2970  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2971  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2972 
2973  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2974  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2975  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2976  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2977  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2978  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2979  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2980  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2981  }
2982 
2983  // remainder loop of work that does not divide by 4
2984  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2985  out[i].gather(in + i, offsets);
2986 }
2987 
2988 
2989 
2993 template <>
2994 inline DEAL_II_ALWAYS_INLINE void
2995 vectorized_load_and_transpose(const unsigned int n_entries,
2996  const std::array<float *, 8> &in,
2998 {
2999  // see the comments in the vectorized_load_and_transpose above
3000 
3001  const unsigned int n_chunks = n_entries / 4;
3002  for (unsigned int i = 0; i < n_chunks; ++i)
3003  {
3004  __m256 t0, t1, t2, t3 = {};
3005  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3006  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3007  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3008  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3009  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3010  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3011  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3012  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3013 
3014  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3015  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3016  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3017  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3018  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3019  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3020  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3021  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3022  }
3023 
3024  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3025  gather(out[i], in, i);
3026 }
3027 
3028 
3029 
3033 template <>
3034 inline DEAL_II_ALWAYS_INLINE void
3035 vectorized_transpose_and_store(const bool add_into,
3036  const unsigned int n_entries,
3037  const VectorizedArray<float, 8> *in,
3038  const unsigned int * offsets,
3039  float * out)
3040 {
3041  const unsigned int n_chunks = n_entries / 4;
3042  for (unsigned int i = 0; i < n_chunks; ++i)
3043  {
3044  __m256 u0 = in[4 * i + 0].data;
3045  __m256 u1 = in[4 * i + 1].data;
3046  __m256 u2 = in[4 * i + 2].data;
3047  __m256 u3 = in[4 * i + 3].data;
3048  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3049  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3050  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3051  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3052  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3053  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3054  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3055  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3056  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3057  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3058  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3059  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3060  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3061  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3062  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3063  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3064 
3065  // Cannot use the same store instructions in both paths of the 'if'
3066  // because the compiler cannot know that there is no aliasing between
3067  // pointers
3068  if (add_into)
3069  {
3070  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3071  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3072  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3073  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3074  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3075  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3076  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3077  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3078  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3079  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3080  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3081  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3082  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3083  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3084  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3085  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3086  }
3087  else
3088  {
3089  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3090  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3091  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3092  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3093  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3094  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3095  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3096  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3097  }
3098  }
3099 
3100  // remainder loop of work that does not divide by 4
3101  if (add_into)
3102  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3103  for (unsigned int v = 0; v < 8; ++v)
3104  out[offsets[v] + i] += in[i][v];
3105  else
3106  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3107  for (unsigned int v = 0; v < 8; ++v)
3108  out[offsets[v] + i] = in[i][v];
3109 }
3110 
3111 
3112 
3116 template <>
3117 inline DEAL_II_ALWAYS_INLINE void
3118 vectorized_transpose_and_store(const bool add_into,
3119  const unsigned int n_entries,
3120  const VectorizedArray<float, 8> *in,
3121  std::array<float *, 8> & out)
3122 {
3123  // see the comments in the vectorized_transpose_and_store above
3124 
3125  const unsigned int n_chunks = n_entries / 4;
3126  for (unsigned int i = 0; i < n_chunks; ++i)
3127  {
3128  __m256 u0 = in[4 * i + 0].data;
3129  __m256 u1 = in[4 * i + 1].data;
3130  __m256 u2 = in[4 * i + 2].data;
3131  __m256 u3 = in[4 * i + 3].data;
3132  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3133  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3134  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3135  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3136  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3137  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3138  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3139  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3140  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3141  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3142  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3143  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3144  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3145  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3146  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3147  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3148 
3149  if (add_into)
3150  {
3151  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3152  _mm_storeu_ps(out[0] + 4 * i, res0);
3153  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3154  _mm_storeu_ps(out[1] + 4 * i, res1);
3155  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3156  _mm_storeu_ps(out[2] + 4 * i, res2);
3157  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3158  _mm_storeu_ps(out[3] + 4 * i, res3);
3159  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3160  _mm_storeu_ps(out[4] + 4 * i, res4);
3161  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3162  _mm_storeu_ps(out[5] + 4 * i, res5);
3163  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3164  _mm_storeu_ps(out[6] + 4 * i, res6);
3165  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3166  _mm_storeu_ps(out[7] + 4 * i, res7);
3167  }
3168  else
3169  {
3170  _mm_storeu_ps(out[0] + 4 * i, res0);
3171  _mm_storeu_ps(out[1] + 4 * i, res1);
3172  _mm_storeu_ps(out[2] + 4 * i, res2);
3173  _mm_storeu_ps(out[3] + 4 * i, res3);
3174  _mm_storeu_ps(out[4] + 4 * i, res4);
3175  _mm_storeu_ps(out[5] + 4 * i, res5);
3176  _mm_storeu_ps(out[6] + 4 * i, res6);
3177  _mm_storeu_ps(out[7] + 4 * i, res7);
3178  }
3179  }
3180 
3181  if (add_into)
3182  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3183  for (unsigned int v = 0; v < 8; ++v)
3184  out[v][i] += in[i][v];
3185  else
3186  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3187  for (unsigned int v = 0; v < 8; ++v)
3188  out[v][i] = in[i][v];
3189 }
3190 
3191 # endif
3192 
3193 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3194 
3198 template <>
3199 class VectorizedArray<double, 2>
3200  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
3201 {
3202 public:
3206  using value_type = double;
3207 
3213  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 2;
3214 
3219  VectorizedArray() = default;
3220 
3224  VectorizedArray(const double scalar)
3225  {
3226  this->operator=(scalar);
3227  }
3228 
3233  VectorizedArray &
3234  operator=(const double x)
3235  {
3236  data = _mm_set1_pd(x);
3237  return *this;
3238  }
3239 
3244  double &operator[](const unsigned int comp)
3245  {
3246  AssertIndexRange(comp, 2);
3247  return *(reinterpret_cast<double *>(&data) + comp);
3248  }
3249 
3254  const double &operator[](const unsigned int comp) const
3255  {
3256  AssertIndexRange(comp, 2);
3257  return *(reinterpret_cast<const double *>(&data) + comp);
3258  }
3259 
3264  VectorizedArray &
3265  operator+=(const VectorizedArray &vec)
3266  {
3267 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3268  data += vec.data;
3269 # else
3270  data = _mm_add_pd(data, vec.data);
3271 # endif
3272  return *this;
3273  }
3274 
3279  VectorizedArray &
3280  operator-=(const VectorizedArray &vec)
3281  {
3282 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3283  data -= vec.data;
3284 # else
3285  data = _mm_sub_pd(data, vec.data);
3286 # endif
3287  return *this;
3288  }
3289 
3294  VectorizedArray &
3295  operator*=(const VectorizedArray &vec)
3296  {
3297 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3298  data *= vec.data;
3299 # else
3300  data = _mm_mul_pd(data, vec.data);
3301 # endif
3302  return *this;
3303  }
3304 
3309  VectorizedArray &
3310  operator/=(const VectorizedArray &vec)
3311  {
3312 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3313  data /= vec.data;
3314 # else
3315  data = _mm_div_pd(data, vec.data);
3316 # endif
3317  return *this;
3318  }
3319 
3326  void
3327  load(const double *ptr)
3328  {
3329  data = _mm_loadu_pd(ptr);
3330  }
3331 
3339  void
3340  store(double *ptr) const
3341  {
3342  _mm_storeu_pd(ptr, data);
3343  }
3344 
3349  void
3350  streaming_store(double *ptr) const
3351  {
3352  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3353  ExcMessage("Memory not aligned"));
3354  _mm_stream_pd(ptr, data);
3355  }
3356 
3370  void
3371  gather(const double *base_ptr, const unsigned int *offsets)
3372  {
3373  for (unsigned int i = 0; i < 2; ++i)
3374  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3375  }
3376 
3390  void
3391  scatter(const unsigned int *offsets, double *base_ptr) const
3392  {
3393  for (unsigned int i = 0; i < 2; ++i)
3394  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3395  }
3396 
3402  __m128d data;
3403 
3404 private:
3411  get_sqrt() const
3412  {
3413  VectorizedArray res;
3414  res.data = _mm_sqrt_pd(data);
3415  return res;
3416  }
3417 
3424  get_abs() const
3425  {
3426  // to compute the absolute value, perform
3427  // bitwise andnot with -0. This will leave all
3428  // value and exponent bits unchanged but force
3429  // the sign value to +.
3430  __m128d mask = _mm_set1_pd(-0.);
3431  VectorizedArray res;
3432  res.data = _mm_andnot_pd(mask, data);
3433  return res;
3434  }
3435 
3442  get_max(const VectorizedArray &other) const
3443  {
3444  VectorizedArray res;
3445  res.data = _mm_max_pd(data, other.data);
3446  return res;
3447  }
3448 
3455  get_min(const VectorizedArray &other) const
3456  {
3457  VectorizedArray res;
3458  res.data = _mm_min_pd(data, other.data);
3459  return res;
3460  }
3461 
3462  // Make a few functions friends.
3463  template <typename Number2, std::size_t width2>
3465  std::sqrt(const VectorizedArray<Number2, width2> &);
3466  template <typename Number2, std::size_t width2>
3468  std::abs(const VectorizedArray<Number2, width2> &);
3469  template <typename Number2, std::size_t width2>
3471  std::max(const VectorizedArray<Number2, width2> &,
3473  template <typename Number2, std::size_t width2>
3475  std::min(const VectorizedArray<Number2, width2> &,
3477 };
3478 
3479 
3480 
3484 template <>
3485 inline DEAL_II_ALWAYS_INLINE void
3486 vectorized_load_and_transpose(const unsigned int n_entries,
3487  const double * in,
3488  const unsigned int * offsets,
3490 {
3491  const unsigned int n_chunks = n_entries / 2;
3492  for (unsigned int i = 0; i < n_chunks; ++i)
3493  {
3494  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3495  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3496  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3497  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3498  }
3499 
3500  // remainder loop of work that does not divide by 2
3501  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3502  for (unsigned int v = 0; v < 2; ++v)
3503  out[i][v] = in[offsets[v] + i];
3504 }
3505 
3506 
3507 
3511 template <>
3512 inline DEAL_II_ALWAYS_INLINE void
3513 vectorized_load_and_transpose(const unsigned int n_entries,
3514  const std::array<double *, 2> &in,
3516 {
3517  // see the comments in the vectorized_load_and_transpose above
3518 
3519  const unsigned int n_chunks = n_entries / 2;
3520  for (unsigned int i = 0; i < n_chunks; ++i)
3521  {
3522  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3523  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3524  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
3525  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
3526  }
3527 
3528  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3529  for (unsigned int v = 0; v < 2; ++v)
3530  out[i][v] = in[v][i];
3531 }
3532 
3533 
3534 
3538 template <>
3539 inline DEAL_II_ALWAYS_INLINE void
3540 vectorized_transpose_and_store(const bool add_into,
3541  const unsigned int n_entries,
3542  const VectorizedArray<double, 2> *in,
3543  const unsigned int * offsets,
3544  double * out)
3545 {
3546  const unsigned int n_chunks = n_entries / 2;
3547  if (add_into)
3548  {
3549  for (unsigned int i = 0; i < n_chunks; ++i)
3550  {
3551  __m128d u0 = in[2 * i + 0].data;
3552  __m128d u1 = in[2 * i + 1].data;
3553  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3554  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3555  _mm_storeu_pd(out + 2 * i + offsets[0],
3556  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3557  res0));
3558  _mm_storeu_pd(out + 2 * i + offsets[1],
3559  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3560  res1));
3561  }
3562  // remainder loop of work that does not divide by 2
3563  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3564  for (unsigned int v = 0; v < 2; ++v)
3565  out[offsets[v] + i] += in[i][v];
3566  }
3567  else
3568  {
3569  for (unsigned int i = 0; i < n_chunks; ++i)
3570  {
3571  __m128d u0 = in[2 * i + 0].data;
3572  __m128d u1 = in[2 * i + 1].data;
3573  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3574  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3575  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3576  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3577  }
3578  // remainder loop of work that does not divide by 2
3579  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3580  for (unsigned int v = 0; v < 2; ++v)
3581  out[offsets[v] + i] = in[i][v];
3582  }
3583 }
3584 
3585 
3586 
3590 template <>
3591 inline DEAL_II_ALWAYS_INLINE void
3592 vectorized_transpose_and_store(const bool add_into,
3593  const unsigned int n_entries,
3594  const VectorizedArray<double, 2> *in,
3595  std::array<double *, 2> & out)
3596 {
3597  // see the comments in the vectorized_transpose_and_store above
3598 
3599  const unsigned int n_chunks = n_entries / 2;
3600  if (add_into)
3601  {
3602  for (unsigned int i = 0; i < n_chunks; ++i)
3603  {
3604  __m128d u0 = in[2 * i + 0].data;
3605  __m128d u1 = in[2 * i + 1].data;
3606  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3607  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3608  _mm_storeu_pd(out[0] + 2 * i,
3609  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3610  _mm_storeu_pd(out[1] + 2 * i,
3611  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3612  }
3613 
3614  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3615  for (unsigned int v = 0; v < 2; ++v)
3616  out[v][i] += in[i][v];
3617  }
3618  else
3619  {
3620  for (unsigned int i = 0; i < n_chunks; ++i)
3621  {
3622  __m128d u0 = in[2 * i + 0].data;
3623  __m128d u1 = in[2 * i + 1].data;
3624  __m128d res0 = _mm_unpacklo_pd(u0, u1);
3625  __m128d res1 = _mm_unpackhi_pd(u0, u1);
3626  _mm_storeu_pd(out[0] + 2 * i, res0);
3627  _mm_storeu_pd(out[1] + 2 * i, res1);
3628  }
3629 
3630  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3631  for (unsigned int v = 0; v < 2; ++v)
3632  out[v][i] = in[i][v];
3633  }
3634 }
3635 
3636 
3637 
3641 template <>
3642 class VectorizedArray<float, 4>
3643  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
3644 {
3645 public:
3649  using value_type = float;
3650 
3656  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
3657 
3666  VectorizedArray() = default;
3667 
3671  VectorizedArray(const float scalar)
3672  {
3673  this->operator=(scalar);
3674  }
3675 
3677  VectorizedArray &
3678  operator=(const float x)
3679  {
3680  data = _mm_set1_ps(x);
3681  return *this;
3682  }
3683 
3688  float &operator[](const unsigned int comp)
3689  {
3690  AssertIndexRange(comp, 4);
3691  return *(reinterpret_cast<float *>(&data) + comp);
3692  }
3693 
3698  const float &operator[](const unsigned int comp) const
3699  {
3700  AssertIndexRange(comp, 4);
3701  return *(reinterpret_cast<const float *>(&data) + comp);
3702  }
3703 
3708  VectorizedArray &
3709  operator+=(const VectorizedArray &vec)
3710  {
3711 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3712  data += vec.data;
3713 # else
3714  data = _mm_add_ps(data, vec.data);
3715 # endif
3716  return *this;
3717  }
3718 
3723  VectorizedArray &
3724  operator-=(const VectorizedArray &vec)
3725  {
3726 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3727  data -= vec.data;
3728 # else
3729  data = _mm_sub_ps(data, vec.data);
3730 # endif
3731  return *this;
3732  }
3733 
3738  VectorizedArray &
3739  operator*=(const VectorizedArray &vec)
3740  {
3741 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3742  data *= vec.data;
3743 # else
3744  data = _mm_mul_ps(data, vec.data);
3745 # endif
3746  return *this;
3747  }
3748 
3753  VectorizedArray &
3754  operator/=(const VectorizedArray &vec)
3755  {
3756 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3757  data /= vec.data;
3758 # else
3759  data = _mm_div_ps(data, vec.data);
3760 # endif
3761  return *this;
3762  }
3763 
3770  void
3771  load(const float *ptr)
3772  {
3773  data = _mm_loadu_ps(ptr);
3774  }
3775 
3783  void
3784  store(float *ptr) const
3785  {
3786  _mm_storeu_ps(ptr, data);
3787  }
3788 
3793  void
3794  streaming_store(float *ptr) const
3795  {
3796  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3797  ExcMessage("Memory not aligned"));
3798  _mm_stream_ps(ptr, data);
3799  }
3800 
3814  void
3815  gather(const float *base_ptr, const unsigned int *offsets)
3816  {
3817  for (unsigned int i = 0; i < 4; ++i)
3818  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3819  }
3820 
3834  void
3835  scatter(const unsigned int *offsets, float *base_ptr) const
3836  {
3837  for (unsigned int i = 0; i < 4; ++i)
3838  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3839  }
3840 
3846  __m128 data;
3847 
3848 private:
3855  get_sqrt() const
3856  {
3857  VectorizedArray res;
3858  res.data = _mm_sqrt_ps(data);
3859  return res;
3860  }
3861 
3868  get_abs() const
3869  {
3870  // to compute the absolute value, perform bitwise andnot with -0. This
3871  // will leave all value and exponent bits unchanged but force the sign
3872  // value to +.
3873  __m128 mask = _mm_set1_ps(-0.f);
3874  VectorizedArray res;
3875  res.data = _mm_andnot_ps(mask, data);
3876  return res;
3877  }
3878 
3885  get_max(const VectorizedArray &other) const
3886  {
3887  VectorizedArray res;
3888  res.data = _mm_max_ps(data, other.data);
3889  return res;
3890  }
3891 
3898  get_min(const VectorizedArray &other) const
3899  {
3900  VectorizedArray res;
3901  res.data = _mm_min_ps(data, other.data);
3902  return res;
3903  }
3904 
3905  // Make a few functions friends.
3906  template <typename Number2, std::size_t width2>
3908  std::sqrt(const VectorizedArray<Number2, width2> &);
3909  template <typename Number2, std::size_t width2>
3911  std::abs(const VectorizedArray<Number2, width2> &);
3912  template <typename Number2, std::size_t width2>
3914  std::max(const VectorizedArray<Number2, width2> &,
3916  template <typename Number2, std::size_t width2>
3918  std::min(const VectorizedArray<Number2, width2> &,
3920 };
3921 
3922 
3923 
3927 template <>
3928 inline DEAL_II_ALWAYS_INLINE void
3929 vectorized_load_and_transpose(const unsigned int n_entries,
3930  const float * in,
3931  const unsigned int * offsets,
3933 {
3934  const unsigned int n_chunks = n_entries / 4;
3935  for (unsigned int i = 0; i < n_chunks; ++i)
3936  {
3937  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3938  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3939  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3940  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3941  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3942  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3943  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3944  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3945  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3946  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3947  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3948  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3949  }
3950 
3951  // remainder loop of work that does not divide by 4
3952  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3953  for (unsigned int v = 0; v < 4; ++v)
3954  out[i][v] = in[offsets[v] + i];
3955 }
3956 
3957 
3958 
3962 template <>
3963 inline DEAL_II_ALWAYS_INLINE void
3964 vectorized_load_and_transpose(const unsigned int n_entries,
3965  const std::array<float *, 4> &in,
3967 {
3968  // see the comments in the vectorized_load_and_transpose above
3969 
3970  const unsigned int n_chunks = n_entries / 4;
3971  for (unsigned int i = 0; i < n_chunks; ++i)
3972  {
3973  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
3974  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
3975  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
3976  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
3977  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3978  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3979  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3980  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3981  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3982  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3983  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3984  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3985  }
3986 
3987  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3988  for (unsigned int v = 0; v < 4; ++v)
3989  out[i][v] = in[v][i];
3990 }
3991 
3992 
3993 
3997 template <>
3998 inline DEAL_II_ALWAYS_INLINE void
3999 vectorized_transpose_and_store(const bool add_into,
4000  const unsigned int n_entries,
4001  const VectorizedArray<float, 4> *in,
4002  const unsigned int * offsets,
4003  float * out)
4004 {
4005  const unsigned int n_chunks = n_entries / 4;
4006  for (unsigned int i = 0; i < n_chunks; ++i)
4007  {
4008  __m128 u0 = in[4 * i + 0].data;
4009  __m128 u1 = in[4 * i + 1].data;
4010  __m128 u2 = in[4 * i + 2].data;
4011  __m128 u3 = in[4 * i + 3].data;
4012  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4013  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4014  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4015  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4016  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4017  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4018  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4019  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4020 
4021  // Cannot use the same store instructions in both paths of the 'if'
4022  // because the compiler cannot know that there is no aliasing between
4023  // pointers
4024  if (add_into)
4025  {
4026  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4027  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4028  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4029  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4030  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4031  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4032  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4033  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4034  }
4035  else
4036  {
4037  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4038  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4039  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4040  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4041  }
4042  }
4043 
4044  // remainder loop of work that does not divide by 4
4045  if (add_into)
4046  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4047  for (unsigned int v = 0; v < 4; ++v)
4048  out[offsets[v] + i] += in[i][v];
4049  else
4050  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4051  for (unsigned int v = 0; v < 4; ++v)
4052  out[offsets[v] + i] = in[i][v];
4053 }
4054 
4055 
4056 
4060 template <>
4061 inline DEAL_II_ALWAYS_INLINE void
4062 vectorized_transpose_and_store(const bool add_into,
4063  const unsigned int n_entries,
4064  const VectorizedArray<float, 4> *in,
4065  std::array<float *, 4> & out)
4066 {
4067  // see the comments in the vectorized_transpose_and_store above
4068 
4069  const unsigned int n_chunks = n_entries / 4;
4070  for (unsigned int i = 0; i < n_chunks; ++i)
4071  {
4072  __m128 u0 = in[4 * i + 0].data;
4073  __m128 u1 = in[4 * i + 1].data;
4074  __m128 u2 = in[4 * i + 2].data;
4075  __m128 u3 = in[4 * i + 3].data;
4076  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4077  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4078  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4079  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4080  u0 = _mm_shuffle_ps(t0, t2, 0x88);
4081  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4082  u2 = _mm_shuffle_ps(t1, t3, 0x88);
4083  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4084 
4085  if (add_into)
4086  {
4087  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4088  _mm_storeu_ps(out[0] + 4 * i, u0);
4089  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4090  _mm_storeu_ps(out[1] + 4 * i, u1);
4091  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4092  _mm_storeu_ps(out[2] + 4 * i, u2);
4093  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4094  _mm_storeu_ps(out[3] + 4 * i, u3);
4095  }
4096  else
4097  {
4098  _mm_storeu_ps(out[0] + 4 * i, u0);
4099  _mm_storeu_ps(out[1] + 4 * i, u1);
4100  _mm_storeu_ps(out[2] + 4 * i, u2);
4101  _mm_storeu_ps(out[3] + 4 * i, u3);
4102  }
4103  }
4104 
4105  if (add_into)
4106  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4107  for (unsigned int v = 0; v < 4; ++v)
4108  out[v][i] += in[i][v];
4109  else
4110  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4111  for (unsigned int v = 0; v < 4; ++v)
4112  out[v][i] = in[i][v];
4113 }
4114 
4115 
4116 
4117 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
4118 
4119 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4120  defined(__VSX__)
4121 
4122 template <>
4123 class VectorizedArray<double, 2>
4124  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
4125 {
4126 public:
4130  using value_type = double;
4131 
4137  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 2;
4138 
4143  VectorizedArray() = default;
4144 
4148  VectorizedArray(const double scalar)
4149  {
4150  this->operator=(scalar);
4151  }
4152 
4157  VectorizedArray &
4158  operator=(const double x)
4159  {
4160  data = vec_splats(x);
4161 
4162  // Some compilers believe that vec_splats sets 'x', but that's not true.
4163  // They then warn about setting a variable and not using it. Suppress the
4164  // warning by "using" the variable:
4165  (void)x;
4166  return *this;
4167  }
4168 
4173  double &operator[](const unsigned int comp)
4174  {
4175  AssertIndexRange(comp, 2);
4176  return *(reinterpret_cast<double *>(&data) + comp);
4177  }
4178 
4183  const double &operator[](const unsigned int comp) const
4184  {
4185  AssertIndexRange(comp, 2);
4186  return *(reinterpret_cast<const double *>(&data) + comp);
4187  }
4188 
4193  VectorizedArray &
4194  operator+=(const VectorizedArray &vec)
4195  {
4196  data = vec_add(data, vec.data);
4197  return *this;
4198  }
4199 
4204  VectorizedArray &
4205  operator-=(const VectorizedArray &vec)
4206  {
4207  data = vec_sub(data, vec.data);
4208  return *this;
4209  }
4210 
4215  VectorizedArray &
4216  operator*=(const VectorizedArray &vec)
4217  {
4218  data = vec_mul(data, vec.data);
4219  return *this;
4220  }
4221 
4226  VectorizedArray &
4227  operator/=(const VectorizedArray &vec)
4228  {
4229  data = vec_div(data, vec.data);
4230  return *this;
4231  }
4232 
4238  void
4239  load(const double *ptr)
4240  {
4241  data = vec_vsx_ld(0, ptr);
4242  }
4243 
4249  void
4250  store(double *ptr) const
4251  {
4252  vec_vsx_st(data, 0, ptr);
4253  }
4254 
4258  void
4259  streaming_store(double *ptr) const
4260  {
4261  store(ptr);
4262  }
4263 
4267  void
4268  gather(const double *base_ptr, const unsigned int *offsets)
4269  {
4270  for (unsigned int i = 0; i < 2; ++i)
4271  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
4272  }
4273 
4277  void
4278  scatter(const unsigned int *offsets, double *base_ptr) const
4279  {
4280  for (unsigned int i = 0; i < 2; ++i)
4281  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
4282  }
4283 
4289  __vector double data;
4290 
4291 private:
4298  get_sqrt() const
4299  {
4300  VectorizedArray res;
4301  res.data = vec_sqrt(data);
4302  return res;
4303  }
4304 
4311  get_abs() const
4312  {
4313  VectorizedArray res;
4314  res.data = vec_abs(data);
4315  return res;
4316  }
4317 
4324  get_max(const VectorizedArray &other) const
4325  {
4326  VectorizedArray res;
4327  res.data = vec_max(data, other.data);
4328  return res;
4329  }
4330 
4337  get_min(const VectorizedArray &other) const
4338  {
4339  VectorizedArray res;
4340  res.data = vec_min(data, other.data);
4341  return res;
4342  }
4343 
4344  // Make a few functions friends.
4345  template <typename Number2, std::size_t width2>
4347  std::sqrt(const VectorizedArray<Number2, width2> &);
4348  template <typename Number2, std::size_t width2>
4350  std::abs(const VectorizedArray<Number2, width2> &);
4351  template <typename Number2, std::size_t width2>
4353  std::max(const VectorizedArray<Number2, width2> &,
4355  template <typename Number2, std::size_t width2>
4357  std::min(const VectorizedArray<Number2, width2> &,
4359 };
4360 
4361 
4362 
4363 template <>
4364 class VectorizedArray<float, 4>
4365  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
4366 {
4367 public:
4371  using value_type = float;
4372 
4378  DEAL_II_DEPRECATED static const unsigned int n_array_elements = 4;
4379 
4384  VectorizedArray() = default;
4385 
4389  VectorizedArray(const float scalar)
4390  {
4391  this->operator=(scalar);
4392  }
4393 
4398  VectorizedArray &
4399  operator=(const float x)
4400  {
4401  data = vec_splats(x);
4402 
4403  // Some compilers believe that vec_splats sets 'x', but that's not true.
4404  // They then warn about setting a variable and not using it. Suppress the
4405  // warning by "using" the variable:
4406  (void)x;
4407  return *this;
4408  }
4409 
4414  float &operator[](const unsigned int comp)
4415  {
4416  AssertIndexRange(comp, 4);
4417  return *(reinterpret_cast<float *>(&data) + comp);
4418  }
4419 
4424  const float &operator[](const unsigned int comp) const
4425  {
4426  AssertIndexRange(comp, 4);
4427  return *(reinterpret_cast<const float *>(&data) + comp);
4428  }
4429 
4434  VectorizedArray &
4435  operator+=(const VectorizedArray &vec)
4436  {
4437  data = vec_add(data, vec.data);
4438  return *this;
4439  }
4440 
4445  VectorizedArray &
4446  operator-=(const VectorizedArray &vec)
4447  {
4448  data = vec_sub(data, vec.data);
4449  return *this;
4450  }
4451 
4456  VectorizedArray &
4457  operator*=(const VectorizedArray &vec)
4458  {
4459  data = vec_mul(data, vec.data);
4460  return *this;
4461  }
4462 
4467  VectorizedArray &
4468  operator/=(const VectorizedArray &vec)
4469  {
4470  data = vec_div(data, vec.data);
4471  return *this;
4472  }
4473 
4479  void
4480  load(const float *ptr)
4481  {
4482  data = vec_vsx_ld(0, ptr);
4483  }
4484 
4490  void
4491  store(float *ptr) const
4492  {
4493  vec_vsx_st(data, 0, ptr);
4494  }
4495 
4499  void
4500  streaming_store(float *ptr) const
4501  {
4502  store(ptr);
4503  }
4504 
4508  void
4509  gather(const float *base_ptr, const unsigned int *offsets)
4510  {
4511  for (unsigned int i = 0; i < 4; ++i)
4512  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4513  }
4514 
4518  void
4519  scatter(const unsigned int *offsets, float *base_ptr) const
4520  {
4521  for (unsigned int i = 0; i < 4; ++i)
4522  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4523  }
4524 
4530  __vector float data;
4531 
4532 private:
4539  get_sqrt() const
4540  {
4541  VectorizedArray res;
4542  res.data = vec_sqrt(data);
4543  return res;
4544  }
4545 
4552  get_abs() const
4553  {
4554  VectorizedArray res;
4555  res.data = vec_abs(data);
4556  return res;
4557  }
4558 
4565  get_max(const VectorizedArray &other) const
4566  {
4567  VectorizedArray res;
4568  res.data = vec_max(data, other.data);
4569  return res;
4570  }
4571 
4578  get_min(const VectorizedArray &other) const
4579  {
4580  VectorizedArray res;
4581  res.data = vec_min(data, other.data);
4582  return res;
4583  }
4584 
4585  // Make a few functions friends.
4586  template <typename Number2, std::size_t width2>
4588  std::sqrt(const VectorizedArray<Number2, width2> &);
4589  template <typename Number2, std::size_t width2>
4591  std::abs(const VectorizedArray<Number2, width2> &);
4592  template <typename Number2, std::size_t width2>
4594  std::max(const VectorizedArray<Number2, width2> &,
4596  template <typename Number2, std::size_t width2>
4598  std::min(const VectorizedArray<Number2, width2> &,
4600 };
4601 
4602 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
4603  // defined(__VSX__)
4604 
4605 
4606 #endif // DOXYGEN
4607 
4612 
4618 template <typename Number, std::size_t width>
4619 inline DEAL_II_ALWAYS_INLINE bool
4621  const VectorizedArray<Number, width> &rhs)
4622 {
4623  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4624  if (lhs[i] != rhs[i])
4625  return false;
4626 
4627  return true;
4628 }
4629 
4630 
4636 template <typename Number, std::size_t width>
4640 {
4642  return tmp += v;
4643 }
4644 
4650 template <typename Number, std::size_t width>
4654 {
4656  return tmp -= v;
4657 }
4658 
4664 template <typename Number, std::size_t width>
4668 {
4670  return tmp *= v;
4671 }
4672 
4678 template <typename Number, std::size_t width>
4682 {
4684  return tmp /= v;
4685 }
4686 
4693 template <typename Number, std::size_t width>
4695  operator+(const Number &u, const VectorizedArray<Number, width> &v)
4696 {
4698  return tmp += v;
4699 }
4700 
4709 template <std::size_t width>
4711  operator+(const double u, const VectorizedArray<float, width> &v)
4712 {
4714  return tmp += v;
4715 }
4716 
4723 template <typename Number, std::size_t width>
4725  operator+(const VectorizedArray<Number, width> &v, const Number &u)
4726 {
4727  return u + v;
4728 }
4729 
4738 template <std::size_t width>
4740  operator+(const VectorizedArray<float, width> &v, const double u)
4741 {
4742  return u + v;
4743 }
4744 
4751 template <typename Number, std::size_t width>
4753  operator-(const Number &u, const VectorizedArray<Number, width> &v)
4754 {
4756  return tmp -= v;
4757 }
4758 
4767 template <std::size_t width>
4769  operator-(const double u, const VectorizedArray<float, width> &v)
4770 {
4771  VectorizedArray<float, width> tmp = static_cast<float>(u);
4772  return tmp -= v;
4773 }
4774 
4781 template <typename Number, std::size_t width>
4783  operator-(const VectorizedArray<Number, width> &v, const Number &u)
4784 {
4786  return v - tmp;
4787 }
4788 
4797 template <std::size_t width>
4799  operator-(const VectorizedArray<float, width> &v, const double u)
4800 {
4801  VectorizedArray<float, width> tmp = static_cast<float>(u);
4802  return v - tmp;
4803 }
4804 
4811 template <typename Number, std::size_t width>
4813  operator*(const Number &u, const VectorizedArray<Number, width> &v)
4814 {
4816  return tmp *= v;
4817 }
4818 
4827 template <std::size_t width>
4829  operator*(const double u, const VectorizedArray<float, width> &v)
4830 {
4831  VectorizedArray<float, width> tmp = static_cast<float>(u);
4832  return tmp *= v;
4833 }
4834 
4841 template <typename Number, std::size_t width>
4843  operator*(const VectorizedArray<Number, width> &v, const Number &u)
4844 {
4845  return u * v;
4846 }
4847 
4856 template <std::size_t width>
4858  operator*(const VectorizedArray<float, width> &v, const double u)
4859 {
4860  return u * v;
4861 }
4862 
4869 template <typename Number, std::size_t width>
4871  operator/(const Number &u, const VectorizedArray<Number, width> &v)
4872 {
4874  return tmp /= v;
4875 }
4876 
4885 template <std::size_t width>
4887  operator/(const double u, const VectorizedArray<float, width> &v)
4888 {
4889  VectorizedArray<float, width> tmp = static_cast<float>(u);
4890  return tmp /= v;
4891 }
4892 
4899 template <typename Number, std::size_t width>
4901  operator/(const VectorizedArray<Number, width> &v, const Number &u)
4902 {
4904  return v / tmp;
4905 }
4906 
4915 template <std::size_t width>
4917  operator/(const VectorizedArray<float, width> &v, const double u)
4918 {
4919  VectorizedArray<float, width> tmp = static_cast<float>(u);
4920  return v / tmp;
4921 }
4922 
4928 template <typename Number, std::size_t width>
4931 {
4932  return u;
4933 }
4934 
4940 template <typename Number, std::size_t width>
4943 {
4944  // to get a negative sign, subtract the input from zero (could also
4945  // multiply by -1, but this one is slightly simpler)
4946  return VectorizedArray<Number, width>() - u;
4947 }
4948 
4954 template <typename Number, std::size_t width>
4955 inline std::ostream &
4956 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
4957 {
4958  constexpr unsigned int n = VectorizedArray<Number, width>::size();
4959  for (unsigned int i = 0; i < n - 1; ++i)
4960  out << p[i] << ' ';
4961  out << p[n - 1];
4962 
4963  return out;
4964 }
4965 
4967 
4972 
4973 
4981 enum class SIMDComparison : int
4982 {
4983 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
4984  equal = _CMP_EQ_OQ,
4985  not_equal = _CMP_NEQ_OQ,
4986  less_than = _CMP_LT_OQ,
4987  less_than_or_equal = _CMP_LE_OQ,
4988  greater_than = _CMP_GT_OQ,
4989  greater_than_or_equal = _CMP_GE_OQ
4990 #else
4991  equal,
4992  not_equal,
4993  less_than,
4995  greater_than,
4996  greater_than_or_equal
4997 #endif
4998 };
4999 
5000 
5064 template <SIMDComparison predicate, typename Number>
5065 DEAL_II_ALWAYS_INLINE inline Number
5066 compare_and_apply_mask(const Number &left,
5067  const Number &right,
5068  const Number &true_value,
5069  const Number &false_value)
5070 {
5071  bool mask;
5072  switch (predicate)
5073  {
5074  case SIMDComparison::equal:
5075  mask = (left == right);
5076  break;
5078  mask = (left != right);
5079  break;
5081  mask = (left < right);
5082  break;
5084  mask = (left <= right);
5085  break;
5087  mask = (left > right);
5088  break;
5090  mask = (left >= right);
5091  break;
5092  }
5093 
5094  return mask ? true_value : false_value;
5095 }
5096 
5097 
5102 template <SIMDComparison predicate, typename Number>
5105  const VectorizedArray<Number, 1> &right,
5106  const VectorizedArray<Number, 1> &true_value,
5107  const VectorizedArray<Number, 1> &false_value)
5108 {
5110  result.data = compare_and_apply_mask<predicate, Number>(left.data,
5111  right.data,
5112  true_value.data,
5113  false_value.data);
5114  return result;
5115 }
5116 
5118 
5119 #ifndef DOXYGEN
5120 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5121 
5122 template <SIMDComparison predicate>
5125  const VectorizedArray<float, 16> &right,
5126  const VectorizedArray<float, 16> &true_values,
5127  const VectorizedArray<float, 16> &false_values)
5128 {
5129  const __mmask16 mask =
5130  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
5132  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
5133  return result;
5134 }
5135 
5136 
5137 
5138 template <SIMDComparison predicate>
5141  const VectorizedArray<double, 8> &right,
5142  const VectorizedArray<double, 8> &true_values,
5143  const VectorizedArray<double, 8> &false_values)
5144 {
5145  const __mmask16 mask =
5146  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
5148  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
5149  return result;
5150 }
5151 
5152 # endif
5153 
5154 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5155 
5156 template <SIMDComparison predicate>
5159  const VectorizedArray<float, 8> &right,
5160  const VectorizedArray<float, 8> &true_values,
5161  const VectorizedArray<float, 8> &false_values)
5162 {
5163  const auto mask =
5164  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
5165 
5167  result.data = _mm256_or_ps(_mm256_and_ps(mask, true_values.data),
5168  _mm256_andnot_ps(mask, false_values.data));
5169  return result;
5170 }
5171 
5172 
5173 template <SIMDComparison predicate>
5176  const VectorizedArray<double, 4> &right,
5177  const VectorizedArray<double, 4> &true_values,
5178  const VectorizedArray<double, 4> &false_values)
5179 {
5180  const auto mask =
5181  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
5182 
5184  result.data = _mm256_or_pd(_mm256_and_pd(mask, true_values.data),
5185  _mm256_andnot_pd(mask, false_values.data));
5186  return result;
5187 }
5188 
5189 # endif
5190 
5191 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5192 
5193 template <SIMDComparison predicate>
5196  const VectorizedArray<float, 4> &right,
5197  const VectorizedArray<float, 4> &true_values,
5198  const VectorizedArray<float, 4> &false_values)
5199 {
5200  __m128 mask;
5201  switch (predicate)
5202  {
5203  case SIMDComparison::equal:
5204  mask = _mm_cmpeq_ps(left.data, right.data);
5205  break;
5207  mask = _mm_cmpneq_ps(left.data, right.data);
5208  break;
5210  mask = _mm_cmplt_ps(left.data, right.data);
5211  break;
5213  mask = _mm_cmple_ps(left.data, right.data);
5214  break;
5216  mask = _mm_cmpgt_ps(left.data, right.data);
5217  break;
5219  mask = _mm_cmpge_ps(left.data, right.data);
5220  break;
5221  }
5222 
5224  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
5225  _mm_andnot_ps(mask, false_values.data));
5226 
5227  return result;
5228 }
5229 
5230 
5231 template <SIMDComparison predicate>
5234  const VectorizedArray<double, 2> &right,
5235  const VectorizedArray<double, 2> &true_values,
5236  const VectorizedArray<double, 2> &false_values)
5237 {
5238  __m128d mask;
5239  switch (predicate)
5240  {
5241  case SIMDComparison::equal:
5242  mask = _mm_cmpeq_pd(left.data, right.data);
5243  break;
5245  mask = _mm_cmpneq_pd(left.data, right.data);
5246  break;
5248  mask = _mm_cmplt_pd(left.data, right.data);
5249  break;
5251  mask = _mm_cmple_pd(left.data, right.data);
5252  break;
5254  mask = _mm_cmpgt_pd(left.data, right.data);
5255  break;
5257  mask = _mm_cmpge_pd(left.data, right.data);
5258  break;
5259  }
5260 
5262  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
5263  _mm_andnot_pd(mask, false_values.data));
5264 
5265  return result;
5266 }
5267 
5268 # endif
5269 #endif // DOXYGEN
5270 
5271 
5273 
5280 namespace std
5281 {
5289  template <typename Number, std::size_t width>
5290  inline ::VectorizedArray<Number, width>
5291  sin(const ::VectorizedArray<Number, width> &x)
5292  {
5293  // put values in an array and later read in that array with an unaligned
5294  // read. This should save some instructions as compared to directly
5295  // setting the individual elements and also circumvents a compiler
5296  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
5297  // from April 2014, topic "matrix_free/step-48 Test").
5299  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5300  ++i)
5301  values[i] = std::sin(x[i]);
5303  out.load(&values[0]);
5304  return out;
5305  }
5306 
5307 
5308 
5316  template <typename Number, std::size_t width>
5317  inline ::VectorizedArray<Number, width>
5318  cos(const ::VectorizedArray<Number, width> &x)
5319  {
5321  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5322  ++i)
5323  values[i] = std::cos(x[i]);
5325  out.load(&values[0]);
5326  return out;
5327  }
5328 
5329 
5330 
5338  template <typename Number, std::size_t width>
5339  inline ::VectorizedArray<Number, width>
5340  tan(const ::VectorizedArray<Number, width> &x)
5341  {
5343  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5344  ++i)
5345  values[i] = std::tan(x[i]);
5347  out.load(&values[0]);
5348  return out;
5349  }
5350 
5351 
5352 
5360  template <typename Number, std::size_t width>
5361  inline ::VectorizedArray<Number, width>
5362  exp(const ::VectorizedArray<Number, width> &x)
5363  {
5365  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5366  ++i)
5367  values[i] = std::exp(x[i]);
5369  out.load(&values[0]);
5370  return out;
5371  }
5372 
5373 
5374 
5382  template <typename Number, std::size_t width>
5383  inline ::VectorizedArray<Number, width>
5384  log(const ::VectorizedArray<Number, width> &x)
5385  {
5387  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5388  ++i)
5389  values[i] = std::log(x[i]);
5391  out.load(&values[0]);
5392  return out;
5393  }
5394 
5395 
5396 
5404  template <typename Number, std::size_t width>
5405  inline ::VectorizedArray<Number, width>
5406  sqrt(const ::VectorizedArray<Number, width> &x)
5407  {
5408  return x.get_sqrt();
5409  }
5410 
5411 
5412 
5420  template <typename Number, std::size_t width>
5421  inline ::VectorizedArray<Number, width>
5422  pow(const ::VectorizedArray<Number, width> &x, const Number p)
5423  {
5425  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5426  ++i)
5427  values[i] = std::pow(x[i], p);
5429  out.load(&values[0]);
5430  return out;
5431  }
5432 
5433 
5434 
5442  template <typename Number, std::size_t width>
5443  inline ::VectorizedArray<Number, width>
5444  abs(const ::VectorizedArray<Number, width> &x)
5445  {
5446  return x.get_abs();
5447  }
5448 
5449 
5450 
5458  template <typename Number, std::size_t width>
5459  inline ::VectorizedArray<Number, width>
5460  max(const ::VectorizedArray<Number, width> &x,
5461  const ::VectorizedArray<Number, width> &y)
5462  {
5463  return x.get_max(y);
5464  }
5465 
5466 
5467 
5475  template <typename Number, std::size_t width>
5476  inline ::VectorizedArray<Number, width>
5477  min(const ::VectorizedArray<Number, width> &x,
5478  const ::VectorizedArray<Number, width> &y)
5479  {
5480  return x.get_min(y);
5481  }
5482 
5483 
5484 
5488  template <class T>
5489  struct iterator_traits<::VectorizedArrayIterator<T>>
5490  {
5491  using iterator_category = random_access_iterator_tag;
5492  using value_type = typename T::value_type;
5493  using difference_type = std::ptrdiff_t;
5494  };
5495 
5496 } // namespace std
5497 
5498 #endif
inline ::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)
::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &)
inline ::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber factor, const Point< dim, Number > &p)
Definition: point.h:655
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray get_sqrt() const
::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1636
VectorizedArrayIterator< T > begin()
void streaming_store(Number *ptr) const
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
__global__ void vec_add(Number *val, const Number a, const size_type N)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
void store(Number *ptr) const
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArrayIterator(T &data, const std::size_t lane)
inline ::VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
#define Assert(cond, exc)
Definition: exceptions.h:1411
bool operator==(const VectorizedArrayIterator< T > &other) const
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
inline ::VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &)
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:363
void load(const Number *ptr)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:94
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray get_min(const VectorizedArray &other) const
static constexpr std::size_t size()
Expression fabs(const Expression &x)
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
SIMDComparison
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > end()
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
const T::value_type & operator*() const
inline ::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
Number & operator[](const unsigned int comp)
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:362
VectorizedArrayIterator< const T > begin() const
VectorizedArray(const Number scalar)
VectorizedArrayIterator< const T > end() const
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArrayIterator< T > & operator++()
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
const Number & operator[](const unsigned int comp) const
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
#define DEAL_II_DEPRECATED
Definition: config.h:152
inline ::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)