Reference documentation for deal.II version GIT 0582167846 2023-09-24 21:20:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
24 
25 #include <array>
26 #include <cmath>
27 
28 // Note:
29 // The flag DEAL_II_VECTORIZATION_WIDTH_IN_BITS is essentially constructed
30 // according to the following scheme (on x86-based architectures)
31 // #ifdef __AVX512F__
32 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 512
33 // #elif defined (__AVX__)
34 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 256
35 // #elif defined (__SSE2__)
36 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 128
37 // #else
38 // #define DEAL_II_VECTORIZATION_WIDTH_IN_BITS 0
39 // #endif
40 // In addition to checking the flags __AVX512F__, __AVX__ and __SSE2__, a CMake
41 // test, 'check_01_cpu_features.cmake', ensures that these feature are not only
42 // present in the compilation unit but also working properly.
43 
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
45 
46 // These error messages try to detect the case that deal.II was compiled with
47 // a wider instruction set extension as the current compilation unit, for
48 // example because deal.II was compiled with AVX, but a user project does not
49 // add -march=native or similar flags, making it fall to SSE2. This leads to
50 // very strange errors as the size of data structures differs between the
51 // compiled deal.II code sitting in libdeal_II.so and the user code if not
52 // detected.
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
54 # error \
55  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
56 # endif
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
58 # error \
59  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
60 # endif
61 
62 # ifdef _MSC_VER
63 # include <intrin.h>
64 # elif defined(__ALTIVEC__)
65 # include <altivec.h>
66 
67 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
68 // them before they make trouble
69 # undef vector
70 # undef pixel
71 # undef bool
72 # elif defined(__ARM_NEON)
73 # include <arm_neon.h>
74 # elif defined(__x86_64__)
75 # include <x86intrin.h>
76 # endif
77 
78 #endif
79 
80 
82 
83 
84 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
85 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
86 
87 template <typename Number, std::size_t width>
88 struct EnableIfScalar<VectorizedArray<Number, width>>
89 {
91 };
92 
93 
94 
98 template <typename T>
100 {
101 public:
108  VectorizedArrayIterator(T &data, const std::size_t lane)
109  : data(&data)
110  , lane(lane)
111  {}
112 
116  bool
118  {
119  Assert(this->data == other.data,
120  ExcMessage(
121  "You are trying to compare iterators into different arrays."));
122  return this->lane == other.lane;
123  }
124 
128  bool
130  {
131  Assert(this->data == other.data,
132  ExcMessage(
133  "You are trying to compare iterators into different arrays."));
134  return this->lane != other.lane;
135  }
136 
141  operator=(const VectorizedArrayIterator<T> &other) = default;
142 
147  const typename T::value_type &
148  operator*() const
149  {
150  AssertIndexRange(lane, T::size());
151  return (*data)[lane];
152  }
153 
154 
159  template <typename U = T>
160  std::enable_if_t<!std::is_same_v<U, const U>, typename T::value_type> &
162  {
163  AssertIndexRange(lane, T::size());
164  return (*data)[lane];
165  }
166 
174  {
175  AssertIndexRange(lane + 1, T::size() + 1);
176  lane++;
177  return *this;
178  }
179 
185  operator+=(const std::size_t offset)
186  {
187  AssertIndexRange(lane + offset, T::size() + 1);
188  lane += offset;
189  return *this;
190  }
191 
199  {
200  Assert(
201  lane > 0,
202  ExcMessage(
203  "You can't decrement an iterator that is already at the beginning of the range."));
204  --lane;
205  return *this;
206  }
207 
212  operator+(const std::size_t &offset) const
213  {
214  AssertIndexRange(lane + offset, T::size() + 1);
215  return VectorizedArrayIterator<T>(*data, lane + offset);
216  }
217 
221  std::ptrdiff_t
223  {
224  return static_cast<std::ptrdiff_t>(lane) -
225  static_cast<ptrdiff_t>(other.lane);
226  }
227 
228 private:
232  T *data;
233 
237  std::size_t lane;
238 };
239 
240 
241 
251 template <typename T, std::size_t width>
253 {
254 public:
258  VectorizedArrayBase() = default;
259 
263  template <typename U>
264  VectorizedArrayBase(const std::initializer_list<U> &list)
265  {
266  auto i0 = this->begin();
267  auto i1 = list.begin();
268 
269  for (; i1 != list.end(); ++i0, ++i1)
270  {
271  Assert(
272  i0 != this->end(),
273  ExcMessage(
274  "Initializer list exceeds size of this VectorizedArray object."));
275 
276  *i0 = *i1;
277  }
278 
279  for (; i0 != this->end(); ++i0)
280  {
281  *i0 = 0.0;
282  }
283  }
284 
288  static constexpr std::size_t
290  {
291  return width;
292  }
293 
299  {
300  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
301  }
302 
308  begin() const
309  {
310  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
311  }
312 
317  end()
318  {
319  return VectorizedArrayIterator<T>(static_cast<T &>(*this), width);
320  }
321 
327  end() const
328  {
329  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
330  width);
331  }
332 };
333 
334 
335 
420 template <typename Number, std::size_t width>
422  : public VectorizedArrayBase<VectorizedArray<Number, width>, 1>
423 {
424 public:
428  using value_type = Number;
429 
430  static_assert(width == 1,
431  "You specified an illegal width that is not supported.");
432 
437  VectorizedArray() = default;
438 
442  VectorizedArray(const Number scalar)
443  {
444  this->operator=(scalar);
445  }
446 
450  template <typename U>
451  VectorizedArray(const std::initializer_list<U> &list)
452  : VectorizedArrayBase<VectorizedArray<Number, width>, 1>(list)
453  {}
454 
460  operator=(const Number scalar) &
461  {
462  data = scalar;
463  return *this;
464  }
465 
472  operator=(const Number scalar) && = delete;
473 
479  Number &
480  operator[](const unsigned int comp)
481  {
482  (void)comp;
483  AssertIndexRange(comp, 1);
484  return data;
485  }
486 
492  const Number &
493  operator[](const unsigned int comp) const
494  {
495  (void)comp;
496  AssertIndexRange(comp, 1);
497  return data;
498  }
499 
506  {
507  data += vec.data;
508  return *this;
509  }
510 
517  {
518  data -= vec.data;
519  return *this;
520  }
521 
528  {
529  data *= vec.data;
530  return *this;
531  }
532 
539  {
540  data /= vec.data;
541  return *this;
542  }
543 
550  template <typename OtherNumber>
552  load(const OtherNumber *ptr)
553  {
554  data = *ptr;
555  }
556 
563  template <typename OtherNumber>
565  store(OtherNumber *ptr) const
566  {
567  *ptr = data;
568  }
569 
617  void
618  streaming_store(Number *ptr) const
619  {
620  *ptr = data;
621  }
622 
636  void
637  gather(const Number *base_ptr, const unsigned int *offsets)
638  {
639  data = base_ptr[offsets[0]];
640  }
641 
655  void
656  scatter(const unsigned int *offsets, Number *base_ptr) const
657  {
658  base_ptr[offsets[0]] = data;
659  }
660 
666  Number
667  sum() const
668  {
669  return data;
670  }
671 
677  Number data;
678 
679 private:
686  get_sqrt() const
687  {
688  VectorizedArray res;
689  res.data = std::sqrt(data);
690  return res;
691  }
692 
699  get_abs() const
700  {
701  VectorizedArray res;
702  res.data = std::fabs(data);
703  return res;
704  }
705 
712  get_max(const VectorizedArray &other) const
713  {
714  VectorizedArray res;
715  res.data = std::max(data, other.data);
716  return res;
717  }
718 
725  get_min(const VectorizedArray &other) const
726  {
727  VectorizedArray res;
728  res.data = std::min(data, other.data);
729  return res;
730  }
731 
732  // Make a few functions friends.
733  template <typename Number2, std::size_t width2>
736  template <typename Number2, std::size_t width2>
739  template <typename Number2, std::size_t width2>
743  template <typename Number2, std::size_t width2>
747 };
748 
749 
750 
762 template <typename Number,
763  std::size_t width =
766  make_vectorized_array(const Number &u)
767 {
769  return result;
770 }
771 
772 
773 
780 template <typename VectorizedArrayType>
781 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
782 make_vectorized_array(const typename VectorizedArrayType::value_type &u)
783 {
784  static_assert(
785  std::is_same_v<VectorizedArrayType,
786  VectorizedArray<typename VectorizedArrayType::value_type,
787  VectorizedArrayType::size()>>,
788  "VectorizedArrayType is not a VectorizedArray.");
789 
790  VectorizedArrayType result = u;
791  return result;
792 }
793 
794 
795 
807 template <typename Number, std::size_t width>
808 inline DEAL_II_ALWAYS_INLINE void
810  const std::array<Number *, width> &ptrs,
811  const unsigned int offset)
812 {
813  for (unsigned int v = 0; v < width; ++v)
814  out.data[v] = ptrs[v][offset];
815 }
816 
817 
818 
844 template <typename Number, std::size_t width>
845 inline DEAL_II_ALWAYS_INLINE void
846 vectorized_load_and_transpose(const unsigned int n_entries,
847  const Number *in,
848  const unsigned int *offsets,
850 {
851  for (unsigned int i = 0; i < n_entries; ++i)
852  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
853  out[i][v] = in[offsets[v] + i];
854 }
855 
856 
868 template <typename Number, std::size_t width>
869 inline DEAL_II_ALWAYS_INLINE void
870 vectorized_load_and_transpose(const unsigned int n_entries,
871  const std::array<Number *, width> &in,
873 {
874  for (unsigned int i = 0; i < n_entries; ++i)
875  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
876  out[i][v] = in[v][i];
877 }
878 
879 
880 
919 template <typename Number, std::size_t width>
920 inline DEAL_II_ALWAYS_INLINE void
921 vectorized_transpose_and_store(const bool add_into,
922  const unsigned int n_entries,
924  const unsigned int *offsets,
925  Number *out)
926 {
927  if (add_into)
928  for (unsigned int i = 0; i < n_entries; ++i)
929  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
930  out[offsets[v] + i] += in[i][v];
931  else
932  for (unsigned int i = 0; i < n_entries; ++i)
933  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
934  out[offsets[v] + i] = in[i][v];
935 }
936 
937 
949 template <typename Number, std::size_t width>
950 inline DEAL_II_ALWAYS_INLINE void
951 vectorized_transpose_and_store(const bool add_into,
952  const unsigned int n_entries,
954  std::array<Number *, width> &out)
955 {
956  if (add_into)
957  for (unsigned int i = 0; i < n_entries; ++i)
958  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
959  out[v][i] += in[i][v];
960  else
961  for (unsigned int i = 0; i < n_entries; ++i)
962  for (unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
963  out[v][i] = in[i][v];
964 }
965 
966 
969 #ifndef DOXYGEN
970 
971 # if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
972 
976 template <>
977 class VectorizedArray<double, 2>
978  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
979 {
980 public:
984  using value_type = double;
985 
990  VectorizedArray() = default;
991 
995  VectorizedArray(const double scalar)
996  {
997  this->operator=(scalar);
998  }
999 
1003  template <typename U>
1004  VectorizedArray(const std::initializer_list<U> &list)
1006  {}
1007 
1011  VectorizedArray &
1012  operator=(const double x) &
1013  {
1014  data = vdupq_n_f64(x);
1015  return *this;
1016  }
1017 
1023  VectorizedArray &
1024  operator=(const double scalar) && = delete;
1025 
1029  double &
1030  operator[](const unsigned int comp)
1031  {
1032  return *(reinterpret_cast<double *>(&data) + comp);
1033  }
1034 
1038  const double &
1039  operator[](const unsigned int comp) const
1040  {
1041  return *(reinterpret_cast<const double *>(&data) + comp);
1042  }
1043 
1047  VectorizedArray &
1048  operator+=(const VectorizedArray &vec)
1049  {
1050  data = vaddq_f64(data, vec.data);
1051  return *this;
1052  }
1053 
1057  VectorizedArray &
1058  operator-=(const VectorizedArray &vec)
1059  {
1060  data = vsubq_f64(data, vec.data);
1061  return *this;
1062  }
1063 
1067  VectorizedArray &
1068  operator*=(const VectorizedArray &vec)
1069  {
1070  data = vmulq_f64(data, vec.data);
1071  return *this;
1072  }
1073 
1077  VectorizedArray &
1078  operator/=(const VectorizedArray &vec)
1079  {
1080  data = vdivq_f64(data, vec.data);
1081  return *this;
1082  }
1083 
1089  void
1090  load(const double *ptr)
1091  {
1092  data = vld1q_f64(ptr);
1093  }
1094 
1096  void
1097  load(const float *ptr)
1098  {
1100  for (unsigned int i = 0; i < 2; ++i)
1101  data[i] = ptr[i];
1102  }
1103 
1110  void
1111  store(double *ptr) const
1112  {
1113  vst1q_f64(ptr, data);
1114  }
1115 
1117  void
1118  store(float *ptr) const
1119  {
1121  for (unsigned int i = 0; i < 2; ++i)
1122  ptr[i] = data[i];
1123  }
1124 
1130  void
1131  streaming_store(double *ptr) const
1132  {
1133  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1134  ExcMessage("Memory not aligned"));
1135  vst1q_f64(ptr, data);
1136  }
1137 
1150  void
1151  gather(const double *base_ptr, const unsigned int *offsets)
1152  {
1153  for (unsigned int i = 0; i < 2; ++i)
1154  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1155  }
1156 
1169  void
1170  scatter(const unsigned int *offsets, double *base_ptr) const
1171  {
1172  for (unsigned int i = 0; i < 2; ++i)
1173  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1174  }
1175 
1180  double
1181  sum() const
1182  {
1183  return vaddvq_f64(data);
1184  }
1185 
1191  mutable float64x2_t data;
1192 
1193 private:
1199  get_sqrt() const
1200  {
1201  VectorizedArray res;
1202  res.data = vsqrtq_f64(data);
1203  return res;
1204  }
1205 
1211  get_abs() const
1212  {
1213  VectorizedArray res;
1214  res.data = vabsq_f64(data);
1215  return res;
1216  }
1217 
1223  get_max(const VectorizedArray &other) const
1224  {
1225  VectorizedArray res;
1226  res.data = vmaxq_f64(data, other.data);
1227  return res;
1228  }
1229 
1235  get_min(const VectorizedArray &other) const
1236  {
1237  VectorizedArray res;
1238  res.data = vminq_f64(data, other.data);
1239  return res;
1240  }
1241 
1242  // Make a few functions friends.
1243  template <typename Number2, std::size_t width2>
1246  template <typename Number2, std::size_t width2>
1249  template <typename Number2, std::size_t width2>
1253  template <typename Number2, std::size_t width2>
1257 };
1258 
1262 template <>
1263 class VectorizedArray<float, 4>
1264  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
1265 {
1266 public:
1270  using value_type = float;
1271 
1276  VectorizedArray() = default;
1277 
1281  VectorizedArray(const float scalar)
1282  {
1283  this->operator=(scalar);
1284  }
1285 
1289  template <typename U>
1290  VectorizedArray(const std::initializer_list<U> &list)
1291  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
1292  {}
1293 
1297  VectorizedArray &
1298  operator=(const float x) &
1299  {
1300  data = vdupq_n_f32(x);
1301  return *this;
1302  }
1303 
1309  VectorizedArray &
1310  operator=(const float scalar) && = delete;
1311 
1315  value_type &
1316  operator[](const unsigned int comp)
1317  {
1318  return *(reinterpret_cast<float *>(&data) + comp);
1319  }
1320 
1324  const value_type &
1325  operator[](const unsigned int comp) const
1326  {
1327  return *(reinterpret_cast<const float *>(&data) + comp);
1328  }
1329 
1333  VectorizedArray &
1334  operator+=(const VectorizedArray &vec)
1335  {
1336  data = vaddq_f32(data, vec.data);
1337  return *this;
1338  }
1339 
1343  VectorizedArray &
1344  operator-=(const VectorizedArray &vec)
1345  {
1346  data = vsubq_f32(data, vec.data);
1347  return *this;
1348  }
1349 
1353  VectorizedArray &
1354  operator*=(const VectorizedArray &vec)
1355  {
1356  data = vmulq_f32(data, vec.data);
1357  return *this;
1358  }
1359 
1363  VectorizedArray &
1364  operator/=(const VectorizedArray &vec)
1365  {
1366  data = vdivq_f32(data, vec.data);
1367  return *this;
1368  }
1369 
1375  void
1376  load(const float *ptr)
1377  {
1378  data = vld1q_f32(ptr);
1379  }
1380 
1387  void
1388  store(float *ptr) const
1389  {
1390  vst1q_f32(ptr, data);
1391  }
1392 
1398  void
1399  streaming_store(float *ptr) const
1400  {
1401  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1402  ExcMessage("Memory not aligned"));
1403  vst1q_f32(ptr, data);
1404  }
1405 
1418  void
1419  gather(const float *base_ptr, const unsigned int *offsets)
1420  {
1421  for (unsigned int i = 0; i < 4; ++i)
1422  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
1423  }
1424 
1437  void
1438  scatter(const unsigned int *offsets, float *base_ptr) const
1439  {
1440  for (unsigned int i = 0; i < 4; ++i)
1441  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
1442  }
1443 
1448  float
1449  sum() const
1450  {
1451  return vaddvq_f32(data);
1452  }
1453 
1459  mutable float32x4_t data;
1460 
1461 private:
1467  get_sqrt() const
1468  {
1469  VectorizedArray res;
1470  res.data = vsqrtq_f32(data);
1471  return res;
1472  }
1473 
1479  get_abs() const
1480  {
1481  VectorizedArray res;
1482  res.data = vabsq_f32(data);
1483  return res;
1484  }
1485 
1491  get_max(const VectorizedArray &other) const
1492  {
1493  VectorizedArray res;
1494  res.data = vmaxq_f32(data, other.data);
1495  return res;
1496  }
1497 
1503  get_min(const VectorizedArray &other) const
1504  {
1505  VectorizedArray res;
1506  res.data = vminq_f32(data, other.data);
1507  return res;
1508  }
1509 
1510  // Make a few functions friends.
1511  template <typename Number2, std::size_t width2>
1514  template <typename Number2, std::size_t width2>
1517  template <typename Number2, std::size_t width2>
1521  template <typename Number2, std::size_t width2>
1525 };
1526 
1527 
1528 # endif
1529 
1530 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1531 
1535 template <>
1536 class VectorizedArray<double, 2>
1537  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
1538 {
1539 public:
1543  using value_type = double;
1544 
1549  VectorizedArray() = default;
1550 
1554  VectorizedArray(const double scalar)
1555  {
1556  this->operator=(scalar);
1557  }
1558 
1562  template <typename U>
1563  VectorizedArray(const std::initializer_list<U> &list)
1565  {}
1566 
1571  VectorizedArray &
1572  operator=(const double x) &
1573  {
1574  data = _mm_set1_pd(x);
1575  return *this;
1576  }
1577 
1583  VectorizedArray &
1584  operator=(const double scalar) && = delete;
1585 
1590  double &
1591  operator[](const unsigned int comp)
1592  {
1593  AssertIndexRange(comp, 2);
1594  return *(reinterpret_cast<double *>(&data) + comp);
1595  }
1596 
1601  const double &
1602  operator[](const unsigned int comp) const
1603  {
1604  AssertIndexRange(comp, 2);
1605  return *(reinterpret_cast<const double *>(&data) + comp);
1606  }
1607 
1612  VectorizedArray &
1613  operator+=(const VectorizedArray &vec)
1614  {
1615 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1616  data += vec.data;
1617 # else
1618  data = _mm_add_pd(data, vec.data);
1619 # endif
1620  return *this;
1621  }
1622 
1627  VectorizedArray &
1628  operator-=(const VectorizedArray &vec)
1629  {
1630 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1631  data -= vec.data;
1632 # else
1633  data = _mm_sub_pd(data, vec.data);
1634 # endif
1635  return *this;
1636  }
1637 
1642  VectorizedArray &
1643  operator*=(const VectorizedArray &vec)
1644  {
1645 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1646  data *= vec.data;
1647 # else
1648  data = _mm_mul_pd(data, vec.data);
1649 # endif
1650  return *this;
1651  }
1652 
1657  VectorizedArray &
1658  operator/=(const VectorizedArray &vec)
1659  {
1660 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1661  data /= vec.data;
1662 # else
1663  data = _mm_div_pd(data, vec.data);
1664 # endif
1665  return *this;
1666  }
1667 
1674  void
1675  load(const double *ptr)
1676  {
1677  data = _mm_loadu_pd(ptr);
1678  }
1679 
1681  void
1682  load(const float *ptr)
1683  {
1685  for (unsigned int i = 0; i < 2; ++i)
1686  data[i] = ptr[i];
1687  }
1688 
1696  void
1697  store(double *ptr) const
1698  {
1699  _mm_storeu_pd(ptr, data);
1700  }
1701 
1703  void
1704  store(float *ptr) const
1705  {
1707  for (unsigned int i = 0; i < 2; ++i)
1708  ptr[i] = data[i];
1709  }
1710 
1716  void
1717  streaming_store(double *ptr) const
1718  {
1719  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
1720  ExcMessage("Memory not aligned"));
1721  _mm_stream_pd(ptr, data);
1722  }
1723 
1737  void
1738  gather(const double *base_ptr, const unsigned int *offsets)
1739  {
1740  for (unsigned int i = 0; i < 2; ++i)
1741  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1742  }
1743 
1757  void
1758  scatter(const unsigned int *offsets, double *base_ptr) const
1759  {
1760  for (unsigned int i = 0; i < 2; ++i)
1761  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1762  }
1763 
1768  double
1769  sum() const
1770  {
1771  __m128d t1 = _mm_unpackhi_pd(data, data);
1772  __m128d t2 = _mm_add_pd(data, t1);
1773  return _mm_cvtsd_f64(t2);
1774  }
1775 
1781  __m128d data;
1782 
1783 private:
1790  get_sqrt() const
1791  {
1792  VectorizedArray res;
1793  res.data = _mm_sqrt_pd(data);
1794  return res;
1795  }
1796 
1803  get_abs() const
1804  {
1805  // to compute the absolute value, perform
1806  // bitwise andnot with -0. This will leave all
1807  // value and exponent bits unchanged but force
1808  // the sign value to +.
1809  __m128d mask = _mm_set1_pd(-0.);
1810  VectorizedArray res;
1811  res.data = _mm_andnot_pd(mask, data);
1812  return res;
1813  }
1814 
1821  get_max(const VectorizedArray &other) const
1822  {
1823  VectorizedArray res;
1824  res.data = _mm_max_pd(data, other.data);
1825  return res;
1826  }
1827 
1834  get_min(const VectorizedArray &other) const
1835  {
1836  VectorizedArray res;
1837  res.data = _mm_min_pd(data, other.data);
1838  return res;
1839  }
1840 
1841  // Make a few functions friends.
1842  template <typename Number2, std::size_t width2>
1845  template <typename Number2, std::size_t width2>
1848  template <typename Number2, std::size_t width2>
1852  template <typename Number2, std::size_t width2>
1856 };
1857 
1858 
1859 
1863 template <>
1864 inline DEAL_II_ALWAYS_INLINE void
1865 vectorized_load_and_transpose(const unsigned int n_entries,
1866  const double *in,
1867  const unsigned int *offsets,
1869 {
1870  const unsigned int n_chunks = n_entries / 2;
1871  for (unsigned int i = 0; i < n_chunks; ++i)
1872  {
1873  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1874  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1875  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1876  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1877  }
1878 
1879  // remainder loop of work that does not divide by 2
1880  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1881  for (unsigned int v = 0; v < 2; ++v)
1882  out[i][v] = in[offsets[v] + i];
1883 }
1884 
1885 
1886 
1890 template <>
1891 inline DEAL_II_ALWAYS_INLINE void
1892 vectorized_load_and_transpose(const unsigned int n_entries,
1893  const std::array<double *, 2> &in,
1895 {
1896  // see the comments in the vectorized_load_and_transpose above
1897 
1898  const unsigned int n_chunks = n_entries / 2;
1899  for (unsigned int i = 0; i < n_chunks; ++i)
1900  {
1901  __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1902  __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1903  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
1904  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
1905  }
1906 
1907  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1908  for (unsigned int v = 0; v < 2; ++v)
1909  out[i][v] = in[v][i];
1910 }
1911 
1912 
1913 
1917 template <>
1918 inline DEAL_II_ALWAYS_INLINE void
1919 vectorized_transpose_and_store(const bool add_into,
1920  const unsigned int n_entries,
1921  const VectorizedArray<double, 2> *in,
1922  const unsigned int *offsets,
1923  double *out)
1924 {
1925  const unsigned int n_chunks = n_entries / 2;
1926  if (add_into)
1927  {
1928  for (unsigned int i = 0; i < n_chunks; ++i)
1929  {
1930  __m128d u0 = in[2 * i + 0].data;
1931  __m128d u1 = in[2 * i + 1].data;
1932  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1933  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1934  _mm_storeu_pd(out + 2 * i + offsets[0],
1935  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1936  res0));
1937  _mm_storeu_pd(out + 2 * i + offsets[1],
1938  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1939  res1));
1940  }
1941  // remainder loop of work that does not divide by 2
1942  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1943  for (unsigned int v = 0; v < 2; ++v)
1944  out[offsets[v] + i] += in[i][v];
1945  }
1946  else
1947  {
1948  for (unsigned int i = 0; i < n_chunks; ++i)
1949  {
1950  __m128d u0 = in[2 * i + 0].data;
1951  __m128d u1 = in[2 * i + 1].data;
1952  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1953  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1954  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
1955  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
1956  }
1957  // remainder loop of work that does not divide by 2
1958  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1959  for (unsigned int v = 0; v < 2; ++v)
1960  out[offsets[v] + i] = in[i][v];
1961  }
1962 }
1963 
1964 
1965 
1969 template <>
1970 inline DEAL_II_ALWAYS_INLINE void
1971 vectorized_transpose_and_store(const bool add_into,
1972  const unsigned int n_entries,
1973  const VectorizedArray<double, 2> *in,
1974  std::array<double *, 2> &out)
1975 {
1976  // see the comments in the vectorized_transpose_and_store above
1977 
1978  const unsigned int n_chunks = n_entries / 2;
1979  if (add_into)
1980  {
1981  for (unsigned int i = 0; i < n_chunks; ++i)
1982  {
1983  __m128d u0 = in[2 * i + 0].data;
1984  __m128d u1 = in[2 * i + 1].data;
1985  __m128d res0 = _mm_unpacklo_pd(u0, u1);
1986  __m128d res1 = _mm_unpackhi_pd(u0, u1);
1987  _mm_storeu_pd(out[0] + 2 * i,
1988  _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
1989  _mm_storeu_pd(out[1] + 2 * i,
1990  _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
1991  }
1992 
1993  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1994  for (unsigned int v = 0; v < 2; ++v)
1995  out[v][i] += in[i][v];
1996  }
1997  else
1998  {
1999  for (unsigned int i = 0; i < n_chunks; ++i)
2000  {
2001  __m128d u0 = in[2 * i + 0].data;
2002  __m128d u1 = in[2 * i + 1].data;
2003  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2004  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2005  _mm_storeu_pd(out[0] + 2 * i, res0);
2006  _mm_storeu_pd(out[1] + 2 * i, res1);
2007  }
2008 
2009  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2010  for (unsigned int v = 0; v < 2; ++v)
2011  out[v][i] = in[i][v];
2012  }
2013 }
2014 
2015 
2016 
2020 template <>
2021 class VectorizedArray<float, 4>
2022  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
2023 {
2024 public:
2028  using value_type = float;
2029 
2034  VectorizedArray() = default;
2035 
2039  VectorizedArray(const float scalar)
2040  {
2041  this->operator=(scalar);
2042  }
2043 
2047  template <typename U>
2048  VectorizedArray(const std::initializer_list<U> &list)
2049  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
2050  {}
2051 
2056  VectorizedArray &
2057  operator=(const float x) &
2058  {
2059  data = _mm_set1_ps(x);
2060  return *this;
2061  }
2062 
2068  VectorizedArray &
2069  operator=(const float scalar) && = delete;
2070 
2075  float &
2076  operator[](const unsigned int comp)
2077  {
2078  AssertIndexRange(comp, 4);
2079  return *(reinterpret_cast<float *>(&data) + comp);
2080  }
2081 
2086  const float &
2087  operator[](const unsigned int comp) const
2088  {
2089  AssertIndexRange(comp, 4);
2090  return *(reinterpret_cast<const float *>(&data) + comp);
2091  }
2092 
2097  VectorizedArray &
2098  operator+=(const VectorizedArray &vec)
2099  {
2100 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2101  data += vec.data;
2102 # else
2103  data = _mm_add_ps(data, vec.data);
2104 # endif
2105  return *this;
2106  }
2107 
2112  VectorizedArray &
2113  operator-=(const VectorizedArray &vec)
2114  {
2115 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2116  data -= vec.data;
2117 # else
2118  data = _mm_sub_ps(data, vec.data);
2119 # endif
2120  return *this;
2121  }
2122 
2127  VectorizedArray &
2128  operator*=(const VectorizedArray &vec)
2129  {
2130 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2131  data *= vec.data;
2132 # else
2133  data = _mm_mul_ps(data, vec.data);
2134 # endif
2135  return *this;
2136  }
2137 
2142  VectorizedArray &
2143  operator/=(const VectorizedArray &vec)
2144  {
2145 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2146  data /= vec.data;
2147 # else
2148  data = _mm_div_ps(data, vec.data);
2149 # endif
2150  return *this;
2151  }
2152 
2159  void
2160  load(const float *ptr)
2161  {
2162  data = _mm_loadu_ps(ptr);
2163  }
2164 
2172  void
2173  store(float *ptr) const
2174  {
2175  _mm_storeu_ps(ptr, data);
2176  }
2177 
2183  void
2184  streaming_store(float *ptr) const
2185  {
2186  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2187  ExcMessage("Memory not aligned"));
2188  _mm_stream_ps(ptr, data);
2189  }
2190 
2204  void
2205  gather(const float *base_ptr, const unsigned int *offsets)
2206  {
2207  for (unsigned int i = 0; i < 4; ++i)
2208  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2209  }
2210 
2224  void
2225  scatter(const unsigned int *offsets, float *base_ptr) const
2226  {
2227  for (unsigned int i = 0; i < 4; ++i)
2228  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2229  }
2230 
2235  float
2236  sum() const
2237  {
2238  __m128 t1 = _mm_movehl_ps(data, data);
2239  __m128 t2 = _mm_add_ps(data, t1);
2240  __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2241  __m128 t4 = _mm_add_ss(t2, t3);
2242  return _mm_cvtss_f32(t4);
2243  }
2244 
2250  __m128 data;
2251 
2252 private:
2259  get_sqrt() const
2260  {
2261  VectorizedArray res;
2262  res.data = _mm_sqrt_ps(data);
2263  return res;
2264  }
2265 
2272  get_abs() const
2273  {
2274  // to compute the absolute value, perform bitwise andnot with -0. This
2275  // will leave all value and exponent bits unchanged but force the sign
2276  // value to +.
2277  __m128 mask = _mm_set1_ps(-0.f);
2278  VectorizedArray res;
2279  res.data = _mm_andnot_ps(mask, data);
2280  return res;
2281  }
2282 
2289  get_max(const VectorizedArray &other) const
2290  {
2291  VectorizedArray res;
2292  res.data = _mm_max_ps(data, other.data);
2293  return res;
2294  }
2295 
2302  get_min(const VectorizedArray &other) const
2303  {
2304  VectorizedArray res;
2305  res.data = _mm_min_ps(data, other.data);
2306  return res;
2307  }
2308 
2309  // Make a few functions friends.
2310  template <typename Number2, std::size_t width2>
2313  template <typename Number2, std::size_t width2>
2316  template <typename Number2, std::size_t width2>
2320  template <typename Number2, std::size_t width2>
2324 };
2325 
2326 
2327 
2331 template <>
2332 inline DEAL_II_ALWAYS_INLINE void
2333 vectorized_load_and_transpose(const unsigned int n_entries,
2334  const float *in,
2335  const unsigned int *offsets,
2337 {
2338  const unsigned int n_chunks = n_entries / 4;
2339  for (unsigned int i = 0; i < n_chunks; ++i)
2340  {
2341  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2342  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2343  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2344  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2345  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2346  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2347  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2348  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2349  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2350  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2351  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2352  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2353  }
2354 
2355  // remainder loop of work that does not divide by 4
2356  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2357  for (unsigned int v = 0; v < 4; ++v)
2358  out[i][v] = in[offsets[v] + i];
2359 }
2360 
2361 
2362 
2366 template <>
2367 inline DEAL_II_ALWAYS_INLINE void
2368 vectorized_load_and_transpose(const unsigned int n_entries,
2369  const std::array<float *, 4> &in,
2371 {
2372  // see the comments in the vectorized_load_and_transpose above
2373 
2374  const unsigned int n_chunks = n_entries / 4;
2375  for (unsigned int i = 0; i < n_chunks; ++i)
2376  {
2377  __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2378  __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2379  __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2380  __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2381  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
2382  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
2383  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2384  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2385  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
2386  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
2387  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
2388  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
2389  }
2390 
2391  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2392  for (unsigned int v = 0; v < 4; ++v)
2393  out[i][v] = in[v][i];
2394 }
2395 
2396 
2397 
2401 template <>
2402 inline DEAL_II_ALWAYS_INLINE void
2403 vectorized_transpose_and_store(const bool add_into,
2404  const unsigned int n_entries,
2405  const VectorizedArray<float, 4> *in,
2406  const unsigned int *offsets,
2407  float *out)
2408 {
2409  const unsigned int n_chunks = n_entries / 4;
2410  for (unsigned int i = 0; i < n_chunks; ++i)
2411  {
2412  __m128 u0 = in[4 * i + 0].data;
2413  __m128 u1 = in[4 * i + 1].data;
2414  __m128 u2 = in[4 * i + 2].data;
2415  __m128 u3 = in[4 * i + 3].data;
2416  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2417  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2418  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2419  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2420  u0 = _mm_shuffle_ps(t0, t2, 0x88);
2421  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2422  u2 = _mm_shuffle_ps(t1, t3, 0x88);
2423  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2424 
2425  // Cannot use the same store instructions in both paths of the 'if'
2426  // because the compiler cannot know that there is no aliasing between
2427  // pointers
2428  if (add_into)
2429  {
2430  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2431  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2432  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2433  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2434  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2435  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2436  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2437  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2438  }
2439  else
2440  {
2441  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2442  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2443  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2444  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2445  }
2446  }
2447 
2448  // remainder loop of work that does not divide by 4
2449  if (add_into)
2450  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2451  for (unsigned int v = 0; v < 4; ++v)
2452  out[offsets[v] + i] += in[i][v];
2453  else
2454  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2455  for (unsigned int v = 0; v < 4; ++v)
2456  out[offsets[v] + i] = in[i][v];
2457 }
2458 
2459 
2460 
2464 template <>
2465 inline DEAL_II_ALWAYS_INLINE void
2466 vectorized_transpose_and_store(const bool add_into,
2467  const unsigned int n_entries,
2468  const VectorizedArray<float, 4> *in,
2469  std::array<float *, 4> &out)
2470 {
2471  // see the comments in the vectorized_transpose_and_store above
2472 
2473  const unsigned int n_chunks = n_entries / 4;
2474  for (unsigned int i = 0; i < n_chunks; ++i)
2475  {
2476  __m128 u0 = in[4 * i + 0].data;
2477  __m128 u1 = in[4 * i + 1].data;
2478  __m128 u2 = in[4 * i + 2].data;
2479  __m128 u3 = in[4 * i + 3].data;
2480  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2481  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2482  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2483  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2484  u0 = _mm_shuffle_ps(t0, t2, 0x88);
2485  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2486  u2 = _mm_shuffle_ps(t1, t3, 0x88);
2487  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2488 
2489  if (add_into)
2490  {
2491  u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2492  _mm_storeu_ps(out[0] + 4 * i, u0);
2493  u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2494  _mm_storeu_ps(out[1] + 4 * i, u1);
2495  u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2496  _mm_storeu_ps(out[2] + 4 * i, u2);
2497  u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2498  _mm_storeu_ps(out[3] + 4 * i, u3);
2499  }
2500  else
2501  {
2502  _mm_storeu_ps(out[0] + 4 * i, u0);
2503  _mm_storeu_ps(out[1] + 4 * i, u1);
2504  _mm_storeu_ps(out[2] + 4 * i, u2);
2505  _mm_storeu_ps(out[3] + 4 * i, u3);
2506  }
2507  }
2508 
2509  if (add_into)
2510  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2511  for (unsigned int v = 0; v < 4; ++v)
2512  out[v][i] += in[i][v];
2513  else
2514  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2515  for (unsigned int v = 0; v < 4; ++v)
2516  out[v][i] = in[i][v];
2517 }
2518 
2519 
2520 
2521 # endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
2522 
2523 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2524 
2528 template <>
2529 class VectorizedArray<double, 4>
2530  : public VectorizedArrayBase<VectorizedArray<double, 4>, 4>
2531 {
2532 public:
2536  using value_type = double;
2537 
2542  VectorizedArray() = default;
2543 
2547  VectorizedArray(const double scalar)
2548  {
2549  this->operator=(scalar);
2550  }
2551 
2555  template <typename U>
2556  VectorizedArray(const std::initializer_list<U> &list)
2558  {}
2559 
2564  VectorizedArray &
2565  operator=(const double x) &
2566  {
2567  data = _mm256_set1_pd(x);
2568  return *this;
2569  }
2570 
2576  VectorizedArray &
2577  operator=(const double scalar) && = delete;
2578 
2583  double &
2584  operator[](const unsigned int comp)
2585  {
2586  AssertIndexRange(comp, 4);
2587  return *(reinterpret_cast<double *>(&data) + comp);
2588  }
2589 
2594  const double &
2595  operator[](const unsigned int comp) const
2596  {
2597  AssertIndexRange(comp, 4);
2598  return *(reinterpret_cast<const double *>(&data) + comp);
2599  }
2600 
2605  VectorizedArray &
2606  operator+=(const VectorizedArray &vec)
2607  {
2608  // if the compiler supports vector arithmetic, we can simply use +=
2609  // operator on the given data type. this allows the compiler to combine
2610  // additions with multiplication (fused multiply-add) if those
2611  // instructions are available. Otherwise, we need to use the built-in
2612  // intrinsic command for __m256d
2613 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2614  data += vec.data;
2615 # else
2616  data = _mm256_add_pd(data, vec.data);
2617 # endif
2618  return *this;
2619  }
2620 
2625  VectorizedArray &
2626  operator-=(const VectorizedArray &vec)
2627  {
2628 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2629  data -= vec.data;
2630 # else
2631  data = _mm256_sub_pd(data, vec.data);
2632 # endif
2633  return *this;
2634  }
2639  VectorizedArray &
2640  operator*=(const VectorizedArray &vec)
2641  {
2642 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2643  data *= vec.data;
2644 # else
2645  data = _mm256_mul_pd(data, vec.data);
2646 # endif
2647  return *this;
2648  }
2649 
2654  VectorizedArray &
2655  operator/=(const VectorizedArray &vec)
2656  {
2657 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2658  data /= vec.data;
2659 # else
2660  data = _mm256_div_pd(data, vec.data);
2661 # endif
2662  return *this;
2663  }
2664 
2671  void
2672  load(const double *ptr)
2673  {
2674  data = _mm256_loadu_pd(ptr);
2675  }
2676 
2678  void
2679  load(const float *ptr)
2680  {
2681  data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2682  }
2683 
2691  void
2692  store(double *ptr) const
2693  {
2694  _mm256_storeu_pd(ptr, data);
2695  }
2696 
2698  void
2699  store(float *ptr) const
2700  {
2701  _mm_storeu_ps(ptr, _mm256_cvtpd_ps(data));
2702  }
2703 
2709  void
2710  streaming_store(double *ptr) const
2711  {
2712  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2713  ExcMessage("Memory not aligned"));
2714  _mm256_stream_pd(ptr, data);
2715  }
2716 
2730  void
2731  gather(const double *base_ptr, const unsigned int *offsets)
2732  {
2733 # if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2734  // unfortunately, there does not appear to be a 128 bit integer load, so
2735  // do it by some reinterpret casts here. this is allowed because the Intel
2736  // API allows aliasing between different vector types.
2737  const __m128 index_val =
2738  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
2739  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
2740 
2741  // work around a warning with gcc-12 about an uninitialized initial state
2742  // for gather by starting with a zero guess, even though all lanes will be
2743  // overwritten
2744  __m256d zero = _mm256_setzero_pd();
2745  __m256d mask = _mm256_cmp_pd(zero, zero, _CMP_EQ_OQ);
2746 
2747  data = _mm256_mask_i32gather_pd(zero, base_ptr, index, mask, 8);
2748 # else
2749  for (unsigned int i = 0; i < 4; ++i)
2750  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2751 # endif
2752  }
2753 
2767  void
2768  scatter(const unsigned int *offsets, double *base_ptr) const
2769  {
2770  // no scatter operation in AVX/AVX2
2771  for (unsigned int i = 0; i < 4; ++i)
2772  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2773  }
2774 
2779  double
2780  sum() const
2781  {
2783  t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
2784  return t1.sum();
2785  }
2786 
2792  __m256d data;
2793 
2794 private:
2799  __m128d
2800  get_lower() const
2801  {
2802  return _mm256_castpd256_pd128(data);
2803  }
2804 
2809  __m128d
2810  get_upper() const
2811  {
2812  return _mm256_extractf128_pd(data, 1);
2813  }
2814 
2821  get_sqrt() const
2822  {
2823  VectorizedArray res;
2824  res.data = _mm256_sqrt_pd(data);
2825  return res;
2826  }
2827 
2834  get_abs() const
2835  {
2836  // to compute the absolute value, perform bitwise andnot with -0. This
2837  // will leave all value and exponent bits unchanged but force the sign
2838  // value to +.
2839  __m256d mask = _mm256_set1_pd(-0.);
2840  VectorizedArray res;
2841  res.data = _mm256_andnot_pd(mask, data);
2842  return res;
2843  }
2844 
2851  get_max(const VectorizedArray &other) const
2852  {
2853  VectorizedArray res;
2854  res.data = _mm256_max_pd(data, other.data);
2855  return res;
2856  }
2857 
2864  get_min(const VectorizedArray &other) const
2865  {
2866  VectorizedArray res;
2867  res.data = _mm256_min_pd(data, other.data);
2868  return res;
2869  }
2870 
2871  // Make a few functions friends.
2872  template <typename Number2, std::size_t width2>
2875  template <typename Number2, std::size_t width2>
2878  template <typename Number2, std::size_t width2>
2882  template <typename Number2, std::size_t width2>
2886 };
2887 
2888 
2889 
2893 template <>
2894 inline DEAL_II_ALWAYS_INLINE void
2895 vectorized_load_and_transpose(const unsigned int n_entries,
2896  const double *in,
2897  const unsigned int *offsets,
2899 {
2900  const unsigned int n_chunks = n_entries / 4;
2901  const double *in0 = in + offsets[0];
2902  const double *in1 = in + offsets[1];
2903  const double *in2 = in + offsets[2];
2904  const double *in3 = in + offsets[3];
2905 
2906  for (unsigned int i = 0; i < n_chunks; ++i)
2907  {
2908  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2909  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2910  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2911  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2912  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2913  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2914  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2915  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2916  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2917  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2918  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2919  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2920  }
2921 
2922  // remainder loop of work that does not divide by 4
2923  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2924  out[i].gather(in + i, offsets);
2925 }
2926 
2927 
2928 
2932 template <>
2933 inline DEAL_II_ALWAYS_INLINE void
2934 vectorized_load_and_transpose(const unsigned int n_entries,
2935  const std::array<double *, 4> &in,
2937 {
2938  // see the comments in the vectorized_load_and_transpose above
2939 
2940  const unsigned int n_chunks = n_entries / 4;
2941  const double *in0 = in[0];
2942  const double *in1 = in[1];
2943  const double *in2 = in[2];
2944  const double *in3 = in[3];
2945 
2946  for (unsigned int i = 0; i < n_chunks; ++i)
2947  {
2948  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2949  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2950  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2951  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2952  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2953  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2954  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2955  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2956  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2957  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2958  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2959  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2960  }
2961 
2962  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2963  gather(out[i], in, i);
2964 }
2965 
2966 
2967 
2971 template <>
2972 inline DEAL_II_ALWAYS_INLINE void
2973 vectorized_transpose_and_store(const bool add_into,
2974  const unsigned int n_entries,
2975  const VectorizedArray<double, 4> *in,
2976  const unsigned int *offsets,
2977  double *out)
2978 {
2979  const unsigned int n_chunks = n_entries / 4;
2980  double *out0 = out + offsets[0];
2981  double *out1 = out + offsets[1];
2982  double *out2 = out + offsets[2];
2983  double *out3 = out + offsets[3];
2984  for (unsigned int i = 0; i < n_chunks; ++i)
2985  {
2986  __m256d u0 = in[4 * i + 0].data;
2987  __m256d u1 = in[4 * i + 1].data;
2988  __m256d u2 = in[4 * i + 2].data;
2989  __m256d u3 = in[4 * i + 3].data;
2990  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2991  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2992  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2993  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2994  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2995  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2996  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2997  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2998 
2999  // Cannot use the same store instructions in both paths of the 'if'
3000  // because the compiler cannot know that there is no aliasing between
3001  // pointers
3002  if (add_into)
3003  {
3004  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3005  _mm256_storeu_pd(out0 + 4 * i, res0);
3006  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3007  _mm256_storeu_pd(out1 + 4 * i, res1);
3008  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3009  _mm256_storeu_pd(out2 + 4 * i, res2);
3010  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3011  _mm256_storeu_pd(out3 + 4 * i, res3);
3012  }
3013  else
3014  {
3015  _mm256_storeu_pd(out0 + 4 * i, res0);
3016  _mm256_storeu_pd(out1 + 4 * i, res1);
3017  _mm256_storeu_pd(out2 + 4 * i, res2);
3018  _mm256_storeu_pd(out3 + 4 * i, res3);
3019  }
3020  }
3021 
3022  // remainder loop of work that does not divide by 4
3023  if (add_into)
3024  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3025  for (unsigned int v = 0; v < 4; ++v)
3026  out[offsets[v] + i] += in[i][v];
3027  else
3028  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3029  for (unsigned int v = 0; v < 4; ++v)
3030  out[offsets[v] + i] = in[i][v];
3031 }
3032 
3033 
3034 
3038 template <>
3039 inline DEAL_II_ALWAYS_INLINE void
3040 vectorized_transpose_and_store(const bool add_into,
3041  const unsigned int n_entries,
3042  const VectorizedArray<double, 4> *in,
3043  std::array<double *, 4> &out)
3044 {
3045  // see the comments in the vectorized_transpose_and_store above
3046 
3047  const unsigned int n_chunks = n_entries / 4;
3048  double *out0 = out[0];
3049  double *out1 = out[1];
3050  double *out2 = out[2];
3051  double *out3 = out[3];
3052  for (unsigned int i = 0; i < n_chunks; ++i)
3053  {
3054  __m256d u0 = in[4 * i + 0].data;
3055  __m256d u1 = in[4 * i + 1].data;
3056  __m256d u2 = in[4 * i + 2].data;
3057  __m256d u3 = in[4 * i + 3].data;
3058  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3059  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3060  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3061  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3062  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3063  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3064  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3065  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3066 
3067  // Cannot use the same store instructions in both paths of the 'if'
3068  // because the compiler cannot know that there is no aliasing between
3069  // pointers
3070  if (add_into)
3071  {
3072  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3073  _mm256_storeu_pd(out0 + 4 * i, res0);
3074  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3075  _mm256_storeu_pd(out1 + 4 * i, res1);
3076  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3077  _mm256_storeu_pd(out2 + 4 * i, res2);
3078  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3079  _mm256_storeu_pd(out3 + 4 * i, res3);
3080  }
3081  else
3082  {
3083  _mm256_storeu_pd(out0 + 4 * i, res0);
3084  _mm256_storeu_pd(out1 + 4 * i, res1);
3085  _mm256_storeu_pd(out2 + 4 * i, res2);
3086  _mm256_storeu_pd(out3 + 4 * i, res3);
3087  }
3088  }
3089 
3090  // remainder loop of work that does not divide by 4
3091  if (add_into)
3092  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3093  for (unsigned int v = 0; v < 4; ++v)
3094  out[v][i] += in[i][v];
3095  else
3096  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3097  for (unsigned int v = 0; v < 4; ++v)
3098  out[v][i] = in[i][v];
3099 }
3100 
3101 
3102 
3106 template <>
3107 class VectorizedArray<float, 8>
3108  : public VectorizedArrayBase<VectorizedArray<float, 8>, 8>
3109 {
3110 public:
3114  using value_type = float;
3115 
3120  VectorizedArray() = default;
3121 
3125  VectorizedArray(const float scalar)
3126  {
3127  this->operator=(scalar);
3128  }
3129 
3133  template <typename U>
3134  VectorizedArray(const std::initializer_list<U> &list)
3135  : VectorizedArrayBase<VectorizedArray<float, 8>, 8>(list)
3136  {}
3137 
3142  VectorizedArray &
3143  operator=(const float x) &
3144  {
3145  data = _mm256_set1_ps(x);
3146  return *this;
3147  }
3148 
3154  VectorizedArray &
3155  operator=(const float scalar) && = delete;
3156 
3161  float &
3162  operator[](const unsigned int comp)
3163  {
3164  AssertIndexRange(comp, 8);
3165  return *(reinterpret_cast<float *>(&data) + comp);
3166  }
3167 
3172  const float &
3173  operator[](const unsigned int comp) const
3174  {
3175  AssertIndexRange(comp, 8);
3176  return *(reinterpret_cast<const float *>(&data) + comp);
3177  }
3178 
3183  VectorizedArray &
3184  operator+=(const VectorizedArray &vec)
3185  {
3186  // if the compiler supports vector arithmetic, we can simply use +=
3187  // operator on the given data type. this allows the compiler to combine
3188  // additions with multiplication (fused multiply-add) if those
3189  // instructions are available. Otherwise, we need to use the built-in
3190  // intrinsic command for __m256d
3191 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3192  data += vec.data;
3193 # else
3194  data = _mm256_add_ps(data, vec.data);
3195 # endif
3196  return *this;
3197  }
3198 
3203  VectorizedArray &
3204  operator-=(const VectorizedArray &vec)
3205  {
3206 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3207  data -= vec.data;
3208 # else
3209  data = _mm256_sub_ps(data, vec.data);
3210 # endif
3211  return *this;
3212  }
3217  VectorizedArray &
3218  operator*=(const VectorizedArray &vec)
3219  {
3220 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3221  data *= vec.data;
3222 # else
3223  data = _mm256_mul_ps(data, vec.data);
3224 # endif
3225  return *this;
3226  }
3227 
3232  VectorizedArray &
3233  operator/=(const VectorizedArray &vec)
3234  {
3235 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3236  data /= vec.data;
3237 # else
3238  data = _mm256_div_ps(data, vec.data);
3239 # endif
3240  return *this;
3241  }
3242 
3249  void
3250  load(const float *ptr)
3251  {
3252  data = _mm256_loadu_ps(ptr);
3253  }
3254 
3262  void
3263  store(float *ptr) const
3264  {
3265  _mm256_storeu_ps(ptr, data);
3266  }
3267 
3273  void
3274  streaming_store(float *ptr) const
3275  {
3276  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
3277  ExcMessage("Memory not aligned"));
3278  _mm256_stream_ps(ptr, data);
3279  }
3280 
3294  void
3295  gather(const float *base_ptr, const unsigned int *offsets)
3296  {
3297 # if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3298  // unfortunately, there does not appear to be a 256 bit integer load, so
3299  // do it by some reinterpret casts here. this is allowed because the Intel
3300  // API allows aliasing between different vector types.
3301  const __m256 index_val =
3302  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3303  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3304 
3305  // work around a warning with gcc-12 about an uninitialized initial state
3306  // for gather by starting with a zero guess, even though all lanes will be
3307  // overwritten
3308  __m256 zero = _mm256_setzero_ps();
3309  __m256 mask = _mm256_cmp_ps(zero, zero, _CMP_EQ_OQ);
3310 
3311  data = _mm256_mask_i32gather_ps(zero, base_ptr, index, mask, 4);
3312 # else
3313  for (unsigned int i = 0; i < 8; ++i)
3314  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3315 # endif
3316  }
3317 
3331  void
3332  scatter(const unsigned int *offsets, float *base_ptr) const
3333  {
3334  // no scatter operation in AVX/AVX2
3335  for (unsigned int i = 0; i < 8; ++i)
3336  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3337  }
3338 
3343  float
3344  sum() const
3345  {
3347  t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
3348  return t1.sum();
3349  }
3350 
3356  __m256 data;
3357 
3358 private:
3363  __m128
3364  get_lower() const
3365  {
3366  return _mm256_castps256_ps128(data);
3367  }
3368 
3373  __m128
3374  get_upper() const
3375  {
3376  return _mm256_extractf128_ps(data, 1);
3377  }
3378 
3385  get_sqrt() const
3386  {
3387  VectorizedArray res;
3388  res.data = _mm256_sqrt_ps(data);
3389  return res;
3390  }
3391 
3398  get_abs() const
3399  {
3400  // to compute the absolute value, perform bitwise andnot with -0. This
3401  // will leave all value and exponent bits unchanged but force the sign
3402  // value to +.
3403  __m256 mask = _mm256_set1_ps(-0.f);
3404  VectorizedArray res;
3405  res.data = _mm256_andnot_ps(mask, data);
3406  return res;
3407  }
3408 
3415  get_max(const VectorizedArray &other) const
3416  {
3417  VectorizedArray res;
3418  res.data = _mm256_max_ps(data, other.data);
3419  return res;
3420  }
3421 
3428  get_min(const VectorizedArray &other) const
3429  {
3430  VectorizedArray res;
3431  res.data = _mm256_min_ps(data, other.data);
3432  return res;
3433  }
3434 
3435  // Make a few functions friends.
3436  template <typename Number2, std::size_t width2>
3439  template <typename Number2, std::size_t width2>
3442  template <typename Number2, std::size_t width2>
3446  template <typename Number2, std::size_t width2>
3450 };
3451 
3452 
3453 
3457 template <>
3458 inline DEAL_II_ALWAYS_INLINE void
3459 vectorized_load_and_transpose(const unsigned int n_entries,
3460  const float *in,
3461  const unsigned int *offsets,
3463 {
3464  const unsigned int n_chunks = n_entries / 4;
3465  for (unsigned int i = 0; i < n_chunks; ++i)
3466  {
3467  // To avoid warnings about uninitialized variables, need to initialize
3468  // one variable with zero before using it.
3469  __m256 t0, t1, t2, t3 = {};
3470  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3471  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3472  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3473  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3474  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3475  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3476  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3477  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3478 
3479  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3480  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3481  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3482  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3483  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3484  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3485  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3486  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3487  }
3488 
3489  // remainder loop of work that does not divide by 4
3490  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3491  out[i].gather(in + i, offsets);
3492 }
3493 
3494 
3495 
3499 template <>
3500 inline DEAL_II_ALWAYS_INLINE void
3501 vectorized_load_and_transpose(const unsigned int n_entries,
3502  const std::array<float *, 8> &in,
3504 {
3505  // see the comments in the vectorized_load_and_transpose above
3506 
3507  const unsigned int n_chunks = n_entries / 4;
3508  for (unsigned int i = 0; i < n_chunks; ++i)
3509  {
3510  __m256 t0, t1, t2, t3 = {};
3511  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3512  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3513  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3514  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3515  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3516  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3517  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3518  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3519 
3520  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3521  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3522  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3523  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3524  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
3525  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
3526  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
3527  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
3528  }
3529 
3530  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3531  gather(out[i], in, i);
3532 }
3533 
3534 
3535 
3539 template <>
3540 inline DEAL_II_ALWAYS_INLINE void
3541 vectorized_transpose_and_store(const bool add_into,
3542  const unsigned int n_entries,
3543  const VectorizedArray<float, 8> *in,
3544  const unsigned int *offsets,
3545  float *out)
3546 {
3547  const unsigned int n_chunks = n_entries / 4;
3548  for (unsigned int i = 0; i < n_chunks; ++i)
3549  {
3550  __m256 u0 = in[4 * i + 0].data;
3551  __m256 u1 = in[4 * i + 1].data;
3552  __m256 u2 = in[4 * i + 2].data;
3553  __m256 u3 = in[4 * i + 3].data;
3554  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3555  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3556  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3557  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3558  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3559  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3560  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3561  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3562  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3563  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3564  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3565  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3566  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3567  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3568  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3569  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3570 
3571  // Cannot use the same store instructions in both paths of the 'if'
3572  // because the compiler cannot know that there is no aliasing between
3573  // pointers
3574  if (add_into)
3575  {
3576  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3577  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3578  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3579  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3580  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3581  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3582  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3583  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3584  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3585  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3586  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3587  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3588  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3589  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3590  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3591  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3592  }
3593  else
3594  {
3595  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3596  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3597  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3598  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3599  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3600  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3601  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3602  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3603  }
3604  }
3605 
3606  // remainder loop of work that does not divide by 4
3607  if (add_into)
3608  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3609  for (unsigned int v = 0; v < 8; ++v)
3610  out[offsets[v] + i] += in[i][v];
3611  else
3612  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3613  for (unsigned int v = 0; v < 8; ++v)
3614  out[offsets[v] + i] = in[i][v];
3615 }
3616 
3617 
3618 
3622 template <>
3623 inline DEAL_II_ALWAYS_INLINE void
3624 vectorized_transpose_and_store(const bool add_into,
3625  const unsigned int n_entries,
3626  const VectorizedArray<float, 8> *in,
3627  std::array<float *, 8> &out)
3628 {
3629  // see the comments in the vectorized_transpose_and_store above
3630 
3631  const unsigned int n_chunks = n_entries / 4;
3632  for (unsigned int i = 0; i < n_chunks; ++i)
3633  {
3634  __m256 u0 = in[4 * i + 0].data;
3635  __m256 u1 = in[4 * i + 1].data;
3636  __m256 u2 = in[4 * i + 2].data;
3637  __m256 u3 = in[4 * i + 3].data;
3638  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3639  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3640  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3641  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3642  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3643  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3644  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3645  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3646  __m128 res0 = _mm256_extractf128_ps(u0, 0);
3647  __m128 res4 = _mm256_extractf128_ps(u0, 1);
3648  __m128 res1 = _mm256_extractf128_ps(u1, 0);
3649  __m128 res5 = _mm256_extractf128_ps(u1, 1);
3650  __m128 res2 = _mm256_extractf128_ps(u2, 0);
3651  __m128 res6 = _mm256_extractf128_ps(u2, 1);
3652  __m128 res3 = _mm256_extractf128_ps(u3, 0);
3653  __m128 res7 = _mm256_extractf128_ps(u3, 1);
3654 
3655  if (add_into)
3656  {
3657  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3658  _mm_storeu_ps(out[0] + 4 * i, res0);
3659  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3660  _mm_storeu_ps(out[1] + 4 * i, res1);
3661  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3662  _mm_storeu_ps(out[2] + 4 * i, res2);
3663  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3664  _mm_storeu_ps(out[3] + 4 * i, res3);
3665  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3666  _mm_storeu_ps(out[4] + 4 * i, res4);
3667  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3668  _mm_storeu_ps(out[5] + 4 * i, res5);
3669  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3670  _mm_storeu_ps(out[6] + 4 * i, res6);
3671  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3672  _mm_storeu_ps(out[7] + 4 * i, res7);
3673  }
3674  else
3675  {
3676  _mm_storeu_ps(out[0] + 4 * i, res0);
3677  _mm_storeu_ps(out[1] + 4 * i, res1);
3678  _mm_storeu_ps(out[2] + 4 * i, res2);
3679  _mm_storeu_ps(out[3] + 4 * i, res3);
3680  _mm_storeu_ps(out[4] + 4 * i, res4);
3681  _mm_storeu_ps(out[5] + 4 * i, res5);
3682  _mm_storeu_ps(out[6] + 4 * i, res6);
3683  _mm_storeu_ps(out[7] + 4 * i, res7);
3684  }
3685  }
3686 
3687  if (add_into)
3688  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3689  for (unsigned int v = 0; v < 8; ++v)
3690  out[v][i] += in[i][v];
3691  else
3692  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3693  for (unsigned int v = 0; v < 8; ++v)
3694  out[v][i] = in[i][v];
3695 }
3696 
3697 # endif
3698 
3699 // for safety, also check that __AVX512F__ is defined in case the user manually
3700 // set some conflicting compile flags which prevent compilation
3701 
3702 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3703 
3707 template <>
3708 class VectorizedArray<double, 8>
3709  : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
3710 {
3711 public:
3715  using value_type = double;
3716 
3721  VectorizedArray() = default;
3722 
3726  VectorizedArray(const double scalar)
3727  {
3728  this->operator=(scalar);
3729  }
3730 
3734  template <typename U>
3735  VectorizedArray(const std::initializer_list<U> &list)
3737  {}
3738 
3743  VectorizedArray &
3744  operator=(const double x) &
3745  {
3746  data = _mm512_set1_pd(x);
3747  return *this;
3748  }
3749 
3750 
3756  VectorizedArray &
3757  operator=(const double scalar) && = delete;
3758 
3763  double &
3764  operator[](const unsigned int comp)
3765  {
3766  AssertIndexRange(comp, 8);
3767  return *(reinterpret_cast<double *>(&data) + comp);
3768  }
3769 
3774  const double &
3775  operator[](const unsigned int comp) const
3776  {
3777  AssertIndexRange(comp, 8);
3778  return *(reinterpret_cast<const double *>(&data) + comp);
3779  }
3780 
3785  VectorizedArray &
3786  operator+=(const VectorizedArray &vec)
3787  {
3788  // if the compiler supports vector arithmetic, we can simply use +=
3789  // operator on the given data type. this allows the compiler to combine
3790  // additions with multiplication (fused multiply-add) if those
3791  // instructions are available. Otherwise, we need to use the built-in
3792  // intrinsic command for __m512d
3793 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3794  data += vec.data;
3795 # else
3796  data = _mm512_add_pd(data, vec.data);
3797 # endif
3798  return *this;
3799  }
3800 
3805  VectorizedArray &
3806  operator-=(const VectorizedArray &vec)
3807  {
3808 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3809  data -= vec.data;
3810 # else
3811  data = _mm512_sub_pd(data, vec.data);
3812 # endif
3813  return *this;
3814  }
3819  VectorizedArray &
3820  operator*=(const VectorizedArray &vec)
3821  {
3822 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3823  data *= vec.data;
3824 # else
3825  data = _mm512_mul_pd(data, vec.data);
3826 # endif
3827  return *this;
3828  }
3829 
3834  VectorizedArray &
3835  operator/=(const VectorizedArray &vec)
3836  {
3837 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3838  data /= vec.data;
3839 # else
3840  data = _mm512_div_pd(data, vec.data);
3841 # endif
3842  return *this;
3843  }
3844 
3851  void
3852  load(const double *ptr)
3853  {
3854  data = _mm512_loadu_pd(ptr);
3855  }
3856 
3858  void
3859  load(const float *ptr)
3860  {
3861  data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3862  }
3863 
3871  void
3872  store(double *ptr) const
3873  {
3874  _mm512_storeu_pd(ptr, data);
3875  }
3876 
3878  void
3879  store(float *ptr) const
3880  {
3881  _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
3882  }
3883 
3889  void
3890  streaming_store(double *ptr) const
3891  {
3892  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
3893  ExcMessage("Memory not aligned"));
3894  _mm512_stream_pd(ptr, data);
3895  }
3896 
3910  void
3911  gather(const double *base_ptr, const unsigned int *offsets)
3912  {
3913 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
3914  // unfortunately, there does not appear to be a 256 bit integer load, so
3915  // do it by some reinterpret casts here. this is allowed because the Intel
3916  // API allows aliasing between different vector types.
3917  const __m256 index_val =
3918  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3919  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3920 
3921  // work around a warning with gcc-12 about an uninitialized initial state
3922  // for gather by starting with a zero guess, even though all lanes will be
3923  // overwritten
3924  __m512d zero = {};
3925  __mmask8 mask = 0xFF;
3926 
3927  data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
3928 # else
3929  for (unsigned int i = 0; i < 8; ++i)
3930  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3931 # endif
3932  }
3933 
3947  void
3948  scatter(const unsigned int *offsets, double *base_ptr) const
3949  {
3950 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
3951  for (unsigned int i = 0; i < 8; ++i)
3952  for (unsigned int j = i + 1; j < 8; ++j)
3953  Assert(offsets[i] != offsets[j],
3954  ExcMessage("Result of scatter undefined if two offset elements"
3955  " point to the same position"));
3956 
3957  // unfortunately, there does not appear to be a 256 bit integer load, so
3958  // do it by some reinterpret casts here. this is allowed because the Intel
3959  // API allows aliasing between different vector types.
3960  const __m256 index_val =
3961  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
3962  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
3963  _mm512_i32scatter_pd(base_ptr, index, data, 8);
3964 # else
3965  for (unsigned int i = 0; i < 8; ++i)
3966  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3967 # endif
3968  }
3969 
3974  double
3975  sum() const
3976  {
3978  t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
3979  return t1.sum();
3980  }
3981 
3987  __m512d data;
3988 
3989 private:
3994  __m256d
3995  get_lower() const
3996  {
3997  return _mm512_castpd512_pd256(data);
3998  }
3999 
4004  __m256d
4005  get_upper() const
4006  {
4007  return _mm512_extractf64x4_pd(data, 1);
4008  }
4009 
4016  get_sqrt() const
4017  {
4018  VectorizedArray res;
4019  res.data = _mm512_sqrt_pd(data);
4020  return res;
4021  }
4022 
4029  get_abs() const
4030  {
4031  // to compute the absolute value, perform bitwise andnot with -0. This
4032  // will leave all value and exponent bits unchanged but force the sign
4033  // value to +. Since there is no andnot for AVX512, we interpret the data
4034  // as 64 bit integers and do the andnot on those types (note that andnot
4035  // is a bitwise operation so the data type does not matter)
4036  __m512d mask = _mm512_set1_pd(-0.);
4037  VectorizedArray res;
4038  res.data = reinterpret_cast<__m512d>(
4039  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
4040  reinterpret_cast<__m512i>(data)));
4041  return res;
4042  }
4043 
4050  get_max(const VectorizedArray &other) const
4051  {
4052  VectorizedArray res;
4053  res.data = _mm512_max_pd(data, other.data);
4054  return res;
4055  }
4056 
4063  get_min(const VectorizedArray &other) const
4064  {
4065  VectorizedArray res;
4066  res.data = _mm512_min_pd(data, other.data);
4067  return res;
4068  }
4069 
4070  // Make a few functions friends.
4071  template <typename Number2, std::size_t width2>
4074  template <typename Number2, std::size_t width2>
4077  template <typename Number2, std::size_t width2>
4081  template <typename Number2, std::size_t width2>
4085 };
4086 
4087 
4088 
4092 template <>
4093 inline DEAL_II_ALWAYS_INLINE void
4094 vectorized_load_and_transpose(const unsigned int n_entries,
4095  const double *in,
4096  const unsigned int *offsets,
4098 {
4099  // do not do full transpose because the code is long and will most
4100  // likely not pay off because many processors have two load units
4101  // (for the top 8 instructions) but only 1 permute unit (for the 8
4102  // shuffle/unpack instructions). rather start the transposition on the
4103  // vectorized array of half the size with 256 bits
4104  const unsigned int n_chunks = n_entries / 4;
4105  for (unsigned int i = 0; i < n_chunks; ++i)
4106  {
4107  __m512d t0, t1, t2, t3 = {};
4108 
4109  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4110  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4111  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4112  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4113  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4114  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4115  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4116  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4117 
4118  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4119  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4120  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4121  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4122  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4123  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4124  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4125  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4126  }
4127  // remainder loop of work that does not divide by 4
4128  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4129  out[i].gather(in + i, offsets);
4130 }
4131 
4132 
4133 
4137 template <>
4138 inline DEAL_II_ALWAYS_INLINE void
4139 vectorized_load_and_transpose(const unsigned int n_entries,
4140  const std::array<double *, 8> &in,
4142 {
4143  const unsigned int n_chunks = n_entries / 4;
4144  for (unsigned int i = 0; i < n_chunks; ++i)
4145  {
4146  __m512d t0, t1, t2, t3 = {};
4147 
4148  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4149  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4150  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4151  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4152  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4153  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4154  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4155  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4156 
4157  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4158  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4159  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4160  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4161  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
4162  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
4163  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
4164  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
4165  }
4166 
4167  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4168  gather(out[i], in, i);
4169 }
4170 
4171 
4172 
4176 template <>
4177 inline DEAL_II_ALWAYS_INLINE void
4178 vectorized_transpose_and_store(const bool add_into,
4179  const unsigned int n_entries,
4180  const VectorizedArray<double, 8> *in,
4181  const unsigned int *offsets,
4182  double *out)
4183 {
4184  // as for the load, we split the store operations into 256 bit units to
4185  // better balance between code size, shuffle instructions, and stores
4186  const unsigned int n_chunks = n_entries / 4;
4187  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4188  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4189  for (unsigned int i = 0; i < n_chunks; ++i)
4190  {
4191  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4192  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4193  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4194  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4195  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4196  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4197  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4198  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4199  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4200  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4201  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4202  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4203  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4204  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4205  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4206  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4207 
4208  // Cannot use the same store instructions in both paths of the 'if'
4209  // because the compiler cannot know that there is no aliasing
4210  // between pointers
4211  if (add_into)
4212  {
4213  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4214  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4215  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4216  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4217  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4218  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4219  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4220  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4221  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4222  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4223  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4224  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4225  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4226  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4227  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4228  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4229  }
4230  else
4231  {
4232  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4233  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4234  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4235  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4236  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4237  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4238  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4239  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4240  }
4241  }
4242 
4243  // remainder loop of work that does not divide by 4
4244  if (add_into)
4245  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4246  for (unsigned int v = 0; v < 8; ++v)
4247  out[offsets[v] + i] += in[i][v];
4248  else
4249  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4250  for (unsigned int v = 0; v < 8; ++v)
4251  out[offsets[v] + i] = in[i][v];
4252 }
4253 
4254 
4255 
4259 template <>
4260 inline DEAL_II_ALWAYS_INLINE void
4261 vectorized_transpose_and_store(const bool add_into,
4262  const unsigned int n_entries,
4263  const VectorizedArray<double, 8> *in,
4264  std::array<double *, 8> &out)
4265 {
4266  // see the comments in the vectorized_transpose_and_store above
4267 
4268  const unsigned int n_chunks = n_entries / 4;
4269  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4270  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4271  for (unsigned int i = 0; i < n_chunks; ++i)
4272  {
4273  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4274  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4275  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4276  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4277  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4278  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4279  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4280  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4281  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
4282  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
4283  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4284  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4285  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
4286  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
4287  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4288  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4289 
4290  if (add_into)
4291  {
4292  res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4293  _mm256_storeu_pd(out[0] + 4 * i, res0);
4294  res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4295  _mm256_storeu_pd(out[1] + 4 * i, res1);
4296  res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4297  _mm256_storeu_pd(out[2] + 4 * i, res2);
4298  res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4299  _mm256_storeu_pd(out[3] + 4 * i, res3);
4300  res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4301  _mm256_storeu_pd(out[4] + 4 * i, res4);
4302  res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4303  _mm256_storeu_pd(out[5] + 4 * i, res5);
4304  res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4305  _mm256_storeu_pd(out[6] + 4 * i, res6);
4306  res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4307  _mm256_storeu_pd(out[7] + 4 * i, res7);
4308  }
4309  else
4310  {
4311  _mm256_storeu_pd(out[0] + 4 * i, res0);
4312  _mm256_storeu_pd(out[1] + 4 * i, res1);
4313  _mm256_storeu_pd(out[2] + 4 * i, res2);
4314  _mm256_storeu_pd(out[3] + 4 * i, res3);
4315  _mm256_storeu_pd(out[4] + 4 * i, res4);
4316  _mm256_storeu_pd(out[5] + 4 * i, res5);
4317  _mm256_storeu_pd(out[6] + 4 * i, res6);
4318  _mm256_storeu_pd(out[7] + 4 * i, res7);
4319  }
4320  }
4321 
4322  if (add_into)
4323  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4324  for (unsigned int v = 0; v < 8; ++v)
4325  out[v][i] += in[i][v];
4326  else
4327  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4328  for (unsigned int v = 0; v < 8; ++v)
4329  out[v][i] = in[i][v];
4330 }
4331 
4332 
4333 
4337 template <>
4338 class VectorizedArray<float, 16>
4339  : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
4340 {
4341 public:
4345  using value_type = float;
4346 
4351  VectorizedArray() = default;
4352 
4356  VectorizedArray(const float scalar)
4357  {
4358  this->operator=(scalar);
4359  }
4360 
4364  template <typename U>
4365  VectorizedArray(const std::initializer_list<U> &list)
4366  : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
4367  {}
4368 
4373  VectorizedArray &
4374  operator=(const float x) &
4375  {
4376  data = _mm512_set1_ps(x);
4377  return *this;
4378  }
4379 
4385  VectorizedArray &
4386  operator=(const float scalar) && = delete;
4387 
4392  float &
4393  operator[](const unsigned int comp)
4394  {
4395  AssertIndexRange(comp, 16);
4396  return *(reinterpret_cast<float *>(&data) + comp);
4397  }
4398 
4403  const float &
4404  operator[](const unsigned int comp) const
4405  {
4406  AssertIndexRange(comp, 16);
4407  return *(reinterpret_cast<const float *>(&data) + comp);
4408  }
4409 
4414  VectorizedArray &
4415  operator+=(const VectorizedArray &vec)
4416  {
4417  // if the compiler supports vector arithmetic, we can simply use +=
4418  // operator on the given data type. this allows the compiler to combine
4419  // additions with multiplication (fused multiply-add) if those
4420  // instructions are available. Otherwise, we need to use the built-in
4421  // intrinsic command for __m512d
4422 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4423  data += vec.data;
4424 # else
4425  data = _mm512_add_ps(data, vec.data);
4426 # endif
4427  return *this;
4428  }
4429 
4434  VectorizedArray &
4435  operator-=(const VectorizedArray &vec)
4436  {
4437 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4438  data -= vec.data;
4439 # else
4440  data = _mm512_sub_ps(data, vec.data);
4441 # endif
4442  return *this;
4443  }
4448  VectorizedArray &
4449  operator*=(const VectorizedArray &vec)
4450  {
4451 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4452  data *= vec.data;
4453 # else
4454  data = _mm512_mul_ps(data, vec.data);
4455 # endif
4456  return *this;
4457  }
4458 
4463  VectorizedArray &
4464  operator/=(const VectorizedArray &vec)
4465  {
4466 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4467  data /= vec.data;
4468 # else
4469  data = _mm512_div_ps(data, vec.data);
4470 # endif
4471  return *this;
4472  }
4473 
4480  void
4481  load(const float *ptr)
4482  {
4483  data = _mm512_loadu_ps(ptr);
4484  }
4485 
4493  void
4494  store(float *ptr) const
4495  {
4496  _mm512_storeu_ps(ptr, data);
4497  }
4498 
4504  void
4505  streaming_store(float *ptr) const
4506  {
4507  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
4508  ExcMessage("Memory not aligned"));
4509  _mm512_stream_ps(ptr, data);
4510  }
4511 
4525  void
4526  gather(const float *base_ptr, const unsigned int *offsets)
4527  {
4528 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
4529  // unfortunately, there does not appear to be a 512 bit integer load, so
4530  // do it by some reinterpret casts here. this is allowed because the Intel
4531  // API allows aliasing between different vector types.
4532  const __m512 index_val =
4533  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4534  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4535 
4536  // work around a warning with gcc-12 about an uninitialized initial state
4537  // for gather by starting with a zero guess, even though all lanes will be
4538  // overwritten
4539  __m512 zero = {};
4540  __mmask16 mask = 0xFFFF;
4541 
4542  data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
4543 # else
4544  for (unsigned int i = 0; i < 16; ++i)
4545  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
4546 # endif
4547  }
4548 
4562  void
4563  scatter(const unsigned int *offsets, float *base_ptr) const
4564  {
4565 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
4566  for (unsigned int i = 0; i < 16; ++i)
4567  for (unsigned int j = i + 1; j < 16; ++j)
4568  Assert(offsets[i] != offsets[j],
4569  ExcMessage("Result of scatter undefined if two offset elements"
4570  " point to the same position"));
4571 
4572  // unfortunately, there does not appear to be a 512 bit integer load, so
4573  // do it by some reinterpret casts here. this is allowed because the Intel
4574  // API allows aliasing between different vector types.
4575  const __m512 index_val =
4576  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
4577  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
4578  _mm512_i32scatter_ps(base_ptr, index, data, 4);
4579 # else
4580  for (unsigned int i = 0; i < 16; ++i)
4581  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
4582 # endif
4583  }
4584 
4589  float
4590  sum() const
4591  {
4593  t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
4594  return t1.sum();
4595  }
4596 
4602  __m512 data;
4603 
4604 private:
4609  __m256
4610  get_lower() const
4611  {
4612  return _mm512_castps512_ps256(data);
4613  }
4614 
4619  __m256
4620  get_upper() const
4621  {
4622  return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
4623  }
4624 
4631  get_sqrt() const
4632  {
4633  VectorizedArray res;
4634  res.data = _mm512_sqrt_ps(data);
4635  return res;
4636  }
4637 
4644  get_abs() const
4645  {
4646  // to compute the absolute value, perform bitwise andnot with -0. This
4647  // will leave all value and exponent bits unchanged but force the sign
4648  // value to +. Since there is no andnot for AVX512, we interpret the data
4649  // as 32 bit integers and do the andnot on those types (note that andnot
4650  // is a bitwise operation so the data type does not matter)
4651  __m512 mask = _mm512_set1_ps(-0.f);
4652  VectorizedArray res;
4653  res.data = reinterpret_cast<__m512>(
4654  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
4655  reinterpret_cast<__m512i>(data)));
4656  return res;
4657  }
4658 
4665  get_max(const VectorizedArray &other) const
4666  {
4667  VectorizedArray res;
4668  res.data = _mm512_max_ps(data, other.data);
4669  return res;
4670  }
4671 
4678  get_min(const VectorizedArray &other) const
4679  {
4680  VectorizedArray res;
4681  res.data = _mm512_min_ps(data, other.data);
4682  return res;
4683  }
4684 
4685  // Make a few functions friends.
4686  template <typename Number2, std::size_t width2>
4689  template <typename Number2, std::size_t width2>
4692  template <typename Number2, std::size_t width2>
4696  template <typename Number2, std::size_t width2>
4700 };
4701 
4702 
4703 
4707 template <>
4708 inline DEAL_II_ALWAYS_INLINE void
4709 vectorized_load_and_transpose(const unsigned int n_entries,
4710  const float *in,
4711  const unsigned int *offsets,
4713 {
4714  // Similar to the double case, we perform the work on smaller entities. In
4715  // this case, we start from 128 bit arrays and insert them into a full 512
4716  // bit index. This reduces the code size and register pressure because we do
4717  // shuffles on 4 numbers rather than 16.
4718  const unsigned int n_chunks = n_entries / 4;
4719 
4720  // To avoid warnings about uninitialized variables, need to initialize one
4721  // variable to a pre-existing value in out, which will never get used in
4722  // the end. Keep the initialization outside the loop because of a bug in
4723  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
4724  // case t3 is initialized to zero (inside/outside of loop), see
4725  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
4726  __m512 t0, t1, t2, t3;
4727  if (n_chunks > 0)
4728  t3 = out[0].data;
4729  for (unsigned int i = 0; i < n_chunks; ++i)
4730  {
4731  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4732  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4733  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4734  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4735  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4736  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4737  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4738  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4739  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4740  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4741  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4742  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4743  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4744  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4745  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4746  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4747 
4748  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4749  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4750  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4751  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4752 
4753  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4754  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4755  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4756  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4757  }
4758 
4759  // remainder loop of work that does not divide by 4
4760  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4761  out[i].gather(in + i, offsets);
4762 }
4763 
4764 
4765 
4769 template <>
4770 inline DEAL_II_ALWAYS_INLINE void
4771 vectorized_load_and_transpose(const unsigned int n_entries,
4772  const std::array<float *, 16> &in,
4774 {
4775  // see the comments in the vectorized_load_and_transpose above
4776 
4777  const unsigned int n_chunks = n_entries / 4;
4778 
4779  __m512 t0, t1, t2, t3;
4780  if (n_chunks > 0)
4781  t3 = out[0].data;
4782  for (unsigned int i = 0; i < n_chunks; ++i)
4783  {
4784  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4785  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4786  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4787  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4788  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4789  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4790  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4791  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4792  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4793  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4794  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4795  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4796  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4797  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4798  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4799  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4800 
4801  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4802  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4803  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4804  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4805 
4806  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
4807  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
4808  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
4809  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
4810  }
4811 
4812  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4813  gather(out[i], in, i);
4814 }
4815 
4816 
4817 
4821 template <>
4822 inline DEAL_II_ALWAYS_INLINE void
4823 vectorized_transpose_and_store(const bool add_into,
4824  const unsigned int n_entries,
4825  const VectorizedArray<float, 16> *in,
4826  const unsigned int *offsets,
4827  float *out)
4828 {
4829  const unsigned int n_chunks = n_entries / 4;
4830  for (unsigned int i = 0; i < n_chunks; ++i)
4831  {
4832  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4833  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4834  __m512 t2 =
4835  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4836  __m512 t3 =
4837  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4838  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4839  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4840  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4841  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4842 
4843  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4844  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4845  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4846  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4847  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4848  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4849  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4850  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4851  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4852  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4853  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4854  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4855  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4856  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4857  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4858  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4859 
4860  // Cannot use the same store instructions in both paths of the 'if'
4861  // because the compiler cannot know that there is no aliasing between
4862  // pointers
4863  if (add_into)
4864  {
4865  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4866  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4867  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4868  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4869  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4870  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4871  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4872  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4873  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4874  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4875  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4876  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4877  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4878  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4879  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4880  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4881  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4882  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4883  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4884  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4885  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4886  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4887  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4888  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4889  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4890  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4891  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4892  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4893  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4894  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4895  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4896  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4897  }
4898  else
4899  {
4900  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4901  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4902  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4903  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4904  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4905  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4906  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4907  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4908  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4909  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4910  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4911  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4912  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4913  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4914  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4915  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4916  }
4917  }
4918 
4919  // remainder loop of work that does not divide by 4
4920  if (add_into)
4921  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4922  for (unsigned int v = 0; v < 16; ++v)
4923  out[offsets[v] + i] += in[i][v];
4924  else
4925  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4926  for (unsigned int v = 0; v < 16; ++v)
4927  out[offsets[v] + i] = in[i][v];
4928 }
4929 
4930 
4931 
4935 template <>
4936 inline DEAL_II_ALWAYS_INLINE void
4937 vectorized_transpose_and_store(const bool add_into,
4938  const unsigned int n_entries,
4939  const VectorizedArray<float, 16> *in,
4940  std::array<float *, 16> &out)
4941 {
4942  // see the comments in the vectorized_transpose_and_store above
4943 
4944  const unsigned int n_chunks = n_entries / 4;
4945  for (unsigned int i = 0; i < n_chunks; ++i)
4946  {
4947  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4948  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4949  __m512 t2 =
4950  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4951  __m512 t3 =
4952  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4953  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4954  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4955  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4956  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4957 
4958  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4959  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4960  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4961  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4962  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4963  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4964  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4965  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4966  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4967  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4968  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4969  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4970  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4971  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4972  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4973  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4974 
4975  if (add_into)
4976  {
4977  res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
4978  _mm_storeu_ps(out[0] + 4 * i, res0);
4979  res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
4980  _mm_storeu_ps(out[1] + 4 * i, res1);
4981  res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
4982  _mm_storeu_ps(out[2] + 4 * i, res2);
4983  res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
4984  _mm_storeu_ps(out[3] + 4 * i, res3);
4985  res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
4986  _mm_storeu_ps(out[4] + 4 * i, res4);
4987  res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
4988  _mm_storeu_ps(out[5] + 4 * i, res5);
4989  res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
4990  _mm_storeu_ps(out[6] + 4 * i, res6);
4991  res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
4992  _mm_storeu_ps(out[7] + 4 * i, res7);
4993  res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
4994  _mm_storeu_ps(out[8] + 4 * i, res8);
4995  res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
4996  _mm_storeu_ps(out[9] + 4 * i, res9);
4997  res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
4998  _mm_storeu_ps(out[10] + 4 * i, res10);
4999  res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5000  _mm_storeu_ps(out[11] + 4 * i, res11);
5001  res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5002  _mm_storeu_ps(out[12] + 4 * i, res12);
5003  res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5004  _mm_storeu_ps(out[13] + 4 * i, res13);
5005  res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5006  _mm_storeu_ps(out[14] + 4 * i, res14);
5007  res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5008  _mm_storeu_ps(out[15] + 4 * i, res15);
5009  }
5010  else
5011  {
5012  _mm_storeu_ps(out[0] + 4 * i, res0);
5013  _mm_storeu_ps(out[1] + 4 * i, res1);
5014  _mm_storeu_ps(out[2] + 4 * i, res2);
5015  _mm_storeu_ps(out[3] + 4 * i, res3);
5016  _mm_storeu_ps(out[4] + 4 * i, res4);
5017  _mm_storeu_ps(out[5] + 4 * i, res5);
5018  _mm_storeu_ps(out[6] + 4 * i, res6);
5019  _mm_storeu_ps(out[7] + 4 * i, res7);
5020  _mm_storeu_ps(out[8] + 4 * i, res8);
5021  _mm_storeu_ps(out[9] + 4 * i, res9);
5022  _mm_storeu_ps(out[10] + 4 * i, res10);
5023  _mm_storeu_ps(out[11] + 4 * i, res11);
5024  _mm_storeu_ps(out[12] + 4 * i, res12);
5025  _mm_storeu_ps(out[13] + 4 * i, res13);
5026  _mm_storeu_ps(out[14] + 4 * i, res14);
5027  _mm_storeu_ps(out[15] + 4 * i, res15);
5028  }
5029  }
5030 
5031  if (add_into)
5032  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5033  for (unsigned int v = 0; v < 16; ++v)
5034  out[v][i] += in[i][v];
5035  else
5036  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5037  for (unsigned int v = 0; v < 16; ++v)
5038  out[v][i] = in[i][v];
5039 }
5040 
5041 # endif
5042 
5043 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5044  defined(__VSX__)
5045 
5046 template <>
5047 class VectorizedArray<double, 2>
5048  : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
5049 {
5050 public:
5054  using value_type = double;
5055 
5060  VectorizedArray() = default;
5061 
5065  VectorizedArray(const double scalar)
5066  {
5067  this->operator=(scalar);
5068  }
5069 
5073  template <typename U>
5074  VectorizedArray(const std::initializer_list<U> &list)
5076  {}
5077 
5082  VectorizedArray &
5083  operator=(const double x) &
5084  {
5085  data = vec_splats(x);
5086 
5087  // Some compilers believe that vec_splats sets 'x', but that's not true.
5088  // They then warn about setting a variable and not using it. Suppress the
5089  // warning by "using" the variable:
5090  (void)x;
5091  return *this;
5092  }
5093 
5099  VectorizedArray &
5100  operator=(const double scalar) && = delete;
5101 
5106  double &
5107  operator[](const unsigned int comp)
5108  {
5109  AssertIndexRange(comp, 2);
5110  return *(reinterpret_cast<double *>(&data) + comp);
5111  }
5112 
5117  const double &
5118  operator[](const unsigned int comp) const
5119  {
5120  AssertIndexRange(comp, 2);
5121  return *(reinterpret_cast<const double *>(&data) + comp);
5122  }
5123 
5128  VectorizedArray &
5129  operator+=(const VectorizedArray &vec)
5130  {
5131  data = vec_add(data, vec.data);
5132  return *this;
5133  }
5134 
5139  VectorizedArray &
5140  operator-=(const VectorizedArray &vec)
5141  {
5142  data = vec_sub(data, vec.data);
5143  return *this;
5144  }
5145 
5150  VectorizedArray &
5151  operator*=(const VectorizedArray &vec)
5152  {
5153  data = vec_mul(data, vec.data);
5154  return *this;
5155  }
5156 
5161  VectorizedArray &
5162  operator/=(const VectorizedArray &vec)
5163  {
5164  data = vec_div(data, vec.data);
5165  return *this;
5166  }
5167 
5173  void
5174  load(const double *ptr)
5175  {
5176  data = vec_vsx_ld(0, ptr);
5177  }
5178 
5184  void
5185  store(double *ptr) const
5186  {
5187  vec_vsx_st(data, 0, ptr);
5188  }
5189 
5194  void
5195  streaming_store(double *ptr) const
5196  {
5197  store(ptr);
5198  }
5199 
5204  void
5205  gather(const double *base_ptr, const unsigned int *offsets)
5206  {
5207  for (unsigned int i = 0; i < 2; ++i)
5208  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
5209  }
5210 
5215  void
5216  scatter(const unsigned int *offsets, double *base_ptr) const
5217  {
5218  for (unsigned int i = 0; i < 2; ++i)
5219  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
5220  }
5221 
5227  __vector double data;
5228 
5229 private:
5236  get_sqrt() const
5237  {
5238  VectorizedArray res;
5239  res.data = vec_sqrt(data);
5240  return res;
5241  }
5242 
5249  get_abs() const
5250  {
5251  VectorizedArray res;
5252  res.data = vec_abs(data);
5253  return res;
5254  }
5255 
5262  get_max(const VectorizedArray &other) const
5263  {
5264  VectorizedArray res;
5265  res.data = vec_max(data, other.data);
5266  return res;
5267  }
5268 
5275  get_min(const VectorizedArray &other) const
5276  {
5277  VectorizedArray res;
5278  res.data = vec_min(data, other.data);
5279  return res;
5280  }
5281 
5282  // Make a few functions friends.
5283  template <typename Number2, std::size_t width2>
5286  template <typename Number2, std::size_t width2>
5289  template <typename Number2, std::size_t width2>
5293  template <typename Number2, std::size_t width2>
5297 };
5298 
5299 
5300 
5301 template <>
5302 class VectorizedArray<float, 4>
5303  : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
5304 {
5305 public:
5309  using value_type = float;
5310 
5315  VectorizedArray() = default;
5316 
5320  VectorizedArray(const float scalar)
5321  {
5322  this->operator=(scalar);
5323  }
5324 
5328  template <typename U>
5329  VectorizedArray(const std::initializer_list<U> &list)
5330  : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
5331  {}
5332 
5337  VectorizedArray &
5338  operator=(const float x) &
5339  {
5340  data = vec_splats(x);
5341 
5342  // Some compilers believe that vec_splats sets 'x', but that's not true.
5343  // They then warn about setting a variable and not using it. Suppress the
5344  // warning by "using" the variable:
5345  (void)x;
5346  return *this;
5347  }
5348 
5354  VectorizedArray &
5355  operator=(const float scalar) && = delete;
5356 
5361  float &
5362  operator[](const unsigned int comp)
5363  {
5364  AssertIndexRange(comp, 4);
5365  return *(reinterpret_cast<float *>(&data) + comp);
5366  }
5367 
5372  const float &
5373  operator[](const unsigned int comp) const
5374  {
5375  AssertIndexRange(comp, 4);
5376  return *(reinterpret_cast<const float *>(&data) + comp);
5377  }
5378 
5383  VectorizedArray &
5384  operator+=(const VectorizedArray &vec)
5385  {
5386  data = vec_add(data, vec.data);
5387  return *this;
5388  }
5389 
5394  VectorizedArray &
5395  operator-=(const VectorizedArray &vec)
5396  {
5397  data = vec_sub(data, vec.data);
5398  return *this;
5399  }
5400 
5405  VectorizedArray &
5406  operator*=(const VectorizedArray &vec)
5407  {
5408  data = vec_mul(data, vec.data);
5409  return *this;
5410  }
5411 
5416  VectorizedArray &
5417  operator/=(const VectorizedArray &vec)
5418  {
5419  data = vec_div(data, vec.data);
5420  return *this;
5421  }
5422 
5428  void
5429  load(const float *ptr)
5430  {
5431  data = vec_vsx_ld(0, ptr);
5432  }
5433 
5439  void
5440  store(float *ptr) const
5441  {
5442  vec_vsx_st(data, 0, ptr);
5443  }
5444 
5449  void
5450  streaming_store(float *ptr) const
5451  {
5452  store(ptr);
5453  }
5454 
5459  void
5460  gather(const float *base_ptr, const unsigned int *offsets)
5461  {
5462  for (unsigned int i = 0; i < 4; ++i)
5463  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
5464  }
5465 
5470  void
5471  scatter(const unsigned int *offsets, float *base_ptr) const
5472  {
5473  for (unsigned int i = 0; i < 4; ++i)
5474  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
5475  }
5476 
5482  __vector float data;
5483 
5484 private:
5491  get_sqrt() const
5492  {
5493  VectorizedArray res;
5494  res.data = vec_sqrt(data);
5495  return res;
5496  }
5497 
5504  get_abs() const
5505  {
5506  VectorizedArray res;
5507  res.data = vec_abs(data);
5508  return res;
5509  }
5510 
5517  get_max(const VectorizedArray &other) const
5518  {
5519  VectorizedArray res;
5520  res.data = vec_max(data, other.data);
5521  return res;
5522  }
5523 
5530  get_min(const VectorizedArray &other) const
5531  {
5532  VectorizedArray res;
5533  res.data = vec_min(data, other.data);
5534  return res;
5535  }
5536 
5537  // Make a few functions friends.
5538  template <typename Number2, std::size_t width2>
5541  template <typename Number2, std::size_t width2>
5544  template <typename Number2, std::size_t width2>
5548  template <typename Number2, std::size_t width2>
5552 };
5553 
5554 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
5555  // defined(__VSX__)
5556 
5557 
5558 #endif // DOXYGEN
5559 
5560 
5561 
5572 template <typename Number, std::size_t width>
5573 inline DEAL_II_ALWAYS_INLINE bool
5575  const VectorizedArray<Number, width> &rhs)
5576 {
5577  for (unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5578  if (lhs[i] != rhs[i])
5579  return false;
5580 
5581  return true;
5582 }
5583 
5584 
5590 template <typename Number, std::size_t width>
5594 {
5596  return tmp += v;
5597 }
5598 
5604 template <typename Number, std::size_t width>
5608 {
5610  return tmp -= v;
5611 }
5612 
5618 template <typename Number, std::size_t width>
5622 {
5624  return tmp *= v;
5625 }
5626 
5632 template <typename Number, std::size_t width>
5636 {
5638  return tmp /= v;
5639 }
5640 
5647 template <typename Number, std::size_t width>
5649 operator+(const Number &u, const VectorizedArray<Number, width> &v)
5650 {
5652  return tmp += v;
5653 }
5654 
5663 template <std::size_t width>
5665 operator+(const double u, const VectorizedArray<float, width> &v)
5666 {
5668  return tmp += v;
5669 }
5670 
5677 template <typename Number, std::size_t width>
5679 operator+(const VectorizedArray<Number, width> &v, const Number &u)
5680 {
5681  return u + v;
5682 }
5683 
5692 template <std::size_t width>
5694 operator+(const VectorizedArray<float, width> &v, const double u)
5695 {
5696  return u + v;
5697 }
5698 
5705 template <typename Number, std::size_t width>
5707 operator-(const Number &u, const VectorizedArray<Number, width> &v)
5708 {
5710  return tmp -= v;
5711 }
5712 
5721 template <std::size_t width>
5723 operator-(const double u, const VectorizedArray<float, width> &v)
5724 {
5725  VectorizedArray<float, width> tmp = static_cast<float>(u);
5726  return tmp -= v;
5727 }
5728 
5735 template <typename Number, std::size_t width>
5737 operator-(const VectorizedArray<Number, width> &v, const Number &u)
5738 {
5740  return v - tmp;
5741 }
5742 
5751 template <std::size_t width>
5753 operator-(const VectorizedArray<float, width> &v, const double u)
5754 {
5755  VectorizedArray<float, width> tmp = static_cast<float>(u);
5756  return v - tmp;
5757 }
5758 
5765 template <typename Number, std::size_t width>
5767 operator*(const Number &u, const VectorizedArray<Number, width> &v)
5768 {
5770  return tmp *= v;
5771 }
5772 
5781 template <std::size_t width>
5783 operator*(const double u, const VectorizedArray<float, width> &v)
5784 {
5785  VectorizedArray<float, width> tmp = static_cast<float>(u);
5786  return tmp *= v;
5787 }
5788 
5795 template <typename Number, std::size_t width>
5797 operator*(const VectorizedArray<Number, width> &v, const Number &u)
5798 {
5799  return u * v;
5800 }
5801 
5810 template <std::size_t width>
5812 operator*(const VectorizedArray<float, width> &v, const double u)
5813 {
5814  return u * v;
5815 }
5816 
5823 template <typename Number, std::size_t width>
5825 operator/(const Number &u, const VectorizedArray<Number, width> &v)
5826 {
5828  return tmp /= v;
5829 }
5830 
5839 template <std::size_t width>
5841 operator/(const double u, const VectorizedArray<float, width> &v)
5842 {
5843  VectorizedArray<float, width> tmp = static_cast<float>(u);
5844  return tmp /= v;
5845 }
5846 
5853 template <typename Number, std::size_t width>
5855 operator/(const VectorizedArray<Number, width> &v, const Number &u)
5856 {
5858  return v / tmp;
5859 }
5860 
5869 template <std::size_t width>
5871 operator/(const VectorizedArray<float, width> &v, const double u)
5872 {
5873  VectorizedArray<float, width> tmp = static_cast<float>(u);
5874  return v / tmp;
5875 }
5876 
5882 template <typename Number, std::size_t width>
5885 {
5886  return u;
5887 }
5888 
5894 template <typename Number, std::size_t width>
5897 {
5898  // to get a negative sign, subtract the input from zero (could also
5899  // multiply by -1, but this one is slightly simpler)
5900  return VectorizedArray<Number, width>() - u;
5901 }
5902 
5908 template <typename Number, std::size_t width>
5909 inline std::ostream &
5910 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
5911 {
5912  constexpr unsigned int n = VectorizedArray<Number, width>::size();
5913  for (unsigned int i = 0; i < n - 1; ++i)
5914  out << p[i] << ' ';
5915  out << p[n - 1];
5916 
5917  return out;
5918 }
5919 
5934 enum class SIMDComparison : int
5935 {
5936 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5937  equal = _CMP_EQ_OQ,
5938  not_equal = _CMP_NEQ_OQ,
5939  less_than = _CMP_LT_OQ,
5940  less_than_or_equal = _CMP_LE_OQ,
5941  greater_than = _CMP_GT_OQ,
5942  greater_than_or_equal = _CMP_GE_OQ
5943 #else
5944  equal,
5945  not_equal,
5946  less_than,
5948  greater_than,
5950 #endif
5951 };
5952 
5953 
6017 template <SIMDComparison predicate, typename Number>
6018 DEAL_II_ALWAYS_INLINE inline Number
6019 compare_and_apply_mask(const Number &left,
6020  const Number &right,
6021  const Number &true_value,
6022  const Number &false_value)
6023 {
6024  bool mask;
6025  switch (predicate)
6026  {
6027  case SIMDComparison::equal:
6028  mask = (left == right);
6029  break;
6031  mask = (left != right);
6032  break;
6034  mask = (left < right);
6035  break;
6037  mask = (left <= right);
6038  break;
6040  mask = (left > right);
6041  break;
6043  mask = (left >= right);
6044  break;
6045  }
6046 
6047  return mask ? true_value : false_value;
6048 }
6049 
6050 
6055 template <SIMDComparison predicate, typename Number>
6058  const VectorizedArray<Number, 1> &right,
6059  const VectorizedArray<Number, 1> &true_value,
6060  const VectorizedArray<Number, 1> &false_value)
6061 {
6063  result.data = compare_and_apply_mask<predicate, Number>(left.data,
6064  right.data,
6065  true_value.data,
6066  false_value.data);
6067  return result;
6068 }
6069 
6072 #ifndef DOXYGEN
6073 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6074 
6075 template <SIMDComparison predicate>
6078  const VectorizedArray<float, 16> &right,
6079  const VectorizedArray<float, 16> &true_values,
6080  const VectorizedArray<float, 16> &false_values)
6081 {
6082  const __mmask16 mask =
6083  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
6085  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
6086  return result;
6087 }
6088 
6089 
6090 
6091 template <SIMDComparison predicate>
6094  const VectorizedArray<double, 8> &right,
6095  const VectorizedArray<double, 8> &true_values,
6096  const VectorizedArray<double, 8> &false_values)
6097 {
6098  const __mmask16 mask =
6099  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
6101  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
6102  return result;
6103 }
6104 
6105 # endif
6106 
6107 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6108 
6109 template <SIMDComparison predicate>
6112  const VectorizedArray<float, 8> &right,
6113  const VectorizedArray<float, 8> &true_values,
6114  const VectorizedArray<float, 8> &false_values)
6115 {
6116  const auto mask =
6117  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
6118 
6120  result.data = _mm256_blendv_ps(false_values.data, true_values.data, mask);
6121  return result;
6122 }
6123 
6124 
6125 template <SIMDComparison predicate>
6128  const VectorizedArray<double, 4> &right,
6129  const VectorizedArray<double, 4> &true_values,
6130  const VectorizedArray<double, 4> &false_values)
6131 {
6132  const auto mask =
6133  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
6134 
6136  result.data = _mm256_blendv_pd(false_values.data, true_values.data, mask);
6137  return result;
6138 }
6139 
6140 # endif
6141 
6142 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6143 
6144 template <SIMDComparison predicate>
6147  const VectorizedArray<float, 4> &right,
6148  const VectorizedArray<float, 4> &true_values,
6149  const VectorizedArray<float, 4> &false_values)
6150 {
6151  __m128 mask;
6152  switch (predicate)
6153  {
6154  case SIMDComparison::equal:
6155  mask = _mm_cmpeq_ps(left.data, right.data);
6156  break;
6158  mask = _mm_cmpneq_ps(left.data, right.data);
6159  break;
6161  mask = _mm_cmplt_ps(left.data, right.data);
6162  break;
6164  mask = _mm_cmple_ps(left.data, right.data);
6165  break;
6167  mask = _mm_cmpgt_ps(left.data, right.data);
6168  break;
6170  mask = _mm_cmpge_ps(left.data, right.data);
6171  break;
6172  }
6173 
6175  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
6176  _mm_andnot_ps(mask, false_values.data));
6177 
6178  return result;
6179 }
6180 
6181 
6182 template <SIMDComparison predicate>
6185  const VectorizedArray<double, 2> &right,
6186  const VectorizedArray<double, 2> &true_values,
6187  const VectorizedArray<double, 2> &false_values)
6188 {
6189  __m128d mask;
6190  switch (predicate)
6191  {
6192  case SIMDComparison::equal:
6193  mask = _mm_cmpeq_pd(left.data, right.data);
6194  break;
6196  mask = _mm_cmpneq_pd(left.data, right.data);
6197  break;
6199  mask = _mm_cmplt_pd(left.data, right.data);
6200  break;
6202  mask = _mm_cmple_pd(left.data, right.data);
6203  break;
6205  mask = _mm_cmpgt_pd(left.data, right.data);
6206  break;
6208  mask = _mm_cmpge_pd(left.data, right.data);
6209  break;
6210  }
6211 
6213  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
6214  _mm_andnot_pd(mask, false_values.data));
6215 
6216  return result;
6217 }
6218 
6219 # endif
6220 
6221 # if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
6222 
6223 template <SIMDComparison predicate>
6226  const VectorizedArray<float, 4> &right,
6227  const VectorizedArray<float, 4> &true_values,
6228  const VectorizedArray<float, 4> &false_values)
6229 {
6230  uint32x4_t mask;
6231  switch (predicate)
6232  {
6233  case SIMDComparison::equal:
6234  mask = vceqq_f32(left.data, right.data);
6235  break;
6237  mask = vmvnq_u32(vceqq_f32(left.data, right.data));
6238  break;
6240  mask = vcltq_f32(left.data, right.data);
6241  break;
6243  mask = vcleq_f32(left.data, right.data);
6244  break;
6246  mask = vcgtq_f32(left.data, right.data);
6247  break;
6249  mask = vcgeq_f32(left.data, right.data);
6250  break;
6251  }
6252 
6254  result.data = vreinterpretq_f32_u32(vorrq_u32(
6255  vandq_u32(mask, vreinterpretq_u32_f32(true_values.data)),
6256  vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.data))));
6257 
6258  return result;
6259 }
6260 
6261 
6262 template <SIMDComparison predicate>
6265  const VectorizedArray<double, 2> &right,
6266  const VectorizedArray<double, 2> &true_values,
6267  const VectorizedArray<double, 2> &false_values)
6268 {
6269  uint64x2_t mask;
6270  switch (predicate)
6271  {
6272  case SIMDComparison::equal:
6273  mask = vceqq_f64(left.data, right.data);
6274  break;
6276  mask = vreinterpretq_u64_u32(
6277  vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.data, right.data))));
6278  break;
6280  mask = vcltq_f64(left.data, right.data);
6281  break;
6283  mask = vcleq_f64(left.data, right.data);
6284  break;
6286  mask = vcgtq_f64(left.data, right.data);
6287  break;
6289  mask = vcgeq_f64(left.data, right.data);
6290  break;
6291  }
6292 
6294  result.data = vreinterpretq_f64_u64(vorrq_u64(
6295  vandq_u64(mask, vreinterpretq_u64_f64(true_values.data)),
6296  vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6297  vreinterpretq_u64_f64(false_values.data))));
6298 
6299  return result;
6300 }
6301 
6302 # endif
6303 #endif // DOXYGEN
6304 
6305 
6306 namespace internal
6307 {
6308  template <typename T>
6310  {
6314  using value_type = T;
6315 
6319  static constexpr std::size_t
6321  {
6322  return 1;
6323  }
6324 
6329 
6336  static constexpr std::size_t
6338  {
6339  return vectorized_value_type::size();
6340  }
6341 
6345  static value_type &
6346  get(value_type &value, unsigned int c)
6347  {
6348  AssertIndexRange(c, 1);
6349  (void)c;
6350 
6351  return value;
6352  }
6353 
6357  static const value_type &
6358  get(const value_type &value, unsigned int c)
6359  {
6360  AssertIndexRange(c, 1);
6361  (void)c;
6362 
6363  return value;
6364  }
6365 
6369  static value_type &
6371  {
6372  AssertIndexRange(c, stride());
6373 
6374  return values[c];
6375  }
6376 
6381  static const value_type &
6383  {
6384  AssertIndexRange(c, stride());
6385 
6386  return values[c];
6387  }
6388  };
6389 
6390  template <typename T, std::size_t width_>
6392  {
6396  using value_type = T;
6397 
6401  static constexpr std::size_t
6403  {
6404  return width_;
6405  }
6406 
6411 
6419  static constexpr std::size_t
6421  {
6422  return 1;
6423  }
6424 
6428  static value_type &
6430  {
6431  AssertIndexRange(c, width_);
6432 
6433  return values[c];
6434  }
6435 
6439  static const value_type &
6440  get(const vectorized_value_type &values, unsigned int c)
6441  {
6442  AssertIndexRange(c, width_);
6443 
6444  return values[c];
6445  }
6446 
6450  static vectorized_value_type &
6452  {
6453  (void)c;
6454  AssertIndexRange(c, stride());
6455 
6456  return values;
6457  }
6458 
6463  static const vectorized_value_type &
6465  {
6466  (void)c;
6467  AssertIndexRange(c, stride());
6468 
6469  return values;
6470  }
6471  };
6472 } // namespace internal
6473 
6474 
6476 
6483 namespace std
6484 {
6492  template <typename Number, std::size_t width>
6493  inline ::VectorizedArray<Number, width>
6494  sin(const ::VectorizedArray<Number, width> &x)
6495  {
6496  // put values in an array and later read in that array with an unaligned
6497  // read. This should save some instructions as compared to directly
6498  // setting the individual elements and also circumvents a compiler
6499  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
6500  // from April 2014, topic "matrix_free/step-48 Test").
6502  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6503  ++i)
6504  values[i] = std::sin(x[i]);
6506  out.load(&values[0]);
6507  return out;
6508  }
6509 
6510 
6511 
6519  template <typename Number, std::size_t width>
6520  inline ::VectorizedArray<Number, width>
6521  cos(const ::VectorizedArray<Number, width> &x)
6522  {
6524  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6525  ++i)
6526  values[i] = std::cos(x[i]);
6528  out.load(&values[0]);
6529  return out;
6530  }
6531 
6532 
6533 
6541  template <typename Number, std::size_t width>
6542  inline ::VectorizedArray<Number, width>
6543  tan(const ::VectorizedArray<Number, width> &x)
6544  {
6546  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6547  ++i)
6548  values[i] = std::tan(x[i]);
6550  out.load(&values[0]);
6551  return out;
6552  }
6553 
6554 
6555 
6563  template <typename Number, std::size_t width>
6564  inline ::VectorizedArray<Number, width>
6565  exp(const ::VectorizedArray<Number, width> &x)
6566  {
6568  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6569  ++i)
6570  values[i] = std::exp(x[i]);
6572  out.load(&values[0]);
6573  return out;
6574  }
6575 
6576 
6577 
6585  template <typename Number, std::size_t width>
6586  inline ::VectorizedArray<Number, width>
6587  log(const ::VectorizedArray<Number, width> &x)
6588  {
6590  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6591  ++i)
6592  values[i] = std::log(x[i]);
6594  out.load(&values[0]);
6595  return out;
6596  }
6597 
6598 
6599 
6607  template <typename Number, std::size_t width>
6608  inline ::VectorizedArray<Number, width>
6609  sqrt(const ::VectorizedArray<Number, width> &x)
6610  {
6611  return x.get_sqrt();
6612  }
6613 
6614 
6615 
6623  template <typename Number, std::size_t width>
6624  inline ::VectorizedArray<Number, width>
6625  pow(const ::VectorizedArray<Number, width> &x, const Number p)
6626  {
6628  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6629  ++i)
6630  values[i] = std::pow(x[i], p);
6632  out.load(&values[0]);
6633  return out;
6634  }
6635 
6636 
6637 
6646  template <typename Number, std::size_t width>
6647  inline ::VectorizedArray<Number, width>
6648  pow(const ::VectorizedArray<Number, width> &x,
6649  const ::VectorizedArray<Number, width> &p)
6650  {
6652  for (unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6653  ++i)
6654  values[i] = std::pow(x[i], p[i]);
6656  out.load(&values[0]);
6657  return out;
6658  }
6659 
6660 
6661 
6669  template <typename Number, std::size_t width>
6670  inline ::VectorizedArray<Number, width>
6671  abs(const ::VectorizedArray<Number, width> &x)
6672  {
6673  return x.get_abs();
6674  }
6675 
6676 
6677 
6685  template <typename Number, std::size_t width>
6686  inline ::VectorizedArray<Number, width>
6687  max(const ::VectorizedArray<Number, width> &x,
6688  const ::VectorizedArray<Number, width> &y)
6689  {
6690  return x.get_max(y);
6691  }
6692 
6693 
6694 
6702  template <typename Number, std::size_t width>
6703  inline ::VectorizedArray<Number, width>
6704  min(const ::VectorizedArray<Number, width> &x,
6705  const ::VectorizedArray<Number, width> &y)
6706  {
6707  return x.get_min(y);
6708  }
6709 
6710 
6711 
6715  template <class T>
6717  {
6718 #ifdef DEAL_II_HAVE_CXX20
6719  using iterator_category = contiguous_iterator_tag;
6720 #else
6721  using iterator_category = random_access_iterator_tag;
6722 #endif
6723  using value_type = typename T::value_type;
6724  using difference_type = std::ptrdiff_t;
6725  };
6726 
6727 } // namespace std
6728 
6729 #endif
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
Definition: operator.h:165
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
Number sum() const
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:110
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition: config.h:144
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
const unsigned int v0
Definition: grid_tools.cc:1063
const unsigned int v1
Definition: grid_tools.cc:1063
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
Definition: exceptions.h:1616
#define AssertIndexRange(index, range)
Definition: exceptions.h:1857
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
static const char T
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(vectorized_value_type &values, unsigned int c)
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
VectorizedArray< T > vectorized_value_type
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static const value_type & get(const value_type &value, unsigned int c)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
SIMDComparison
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)