Reference documentation for deal.II version Git a0b41b6d0f 2020-02-26 20:08:13 -0600
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/exceptions.h>
23 #include <deal.II/base/template_constraints.h>
24 
25 #include <cmath>
26 
27 // Note:
28 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
29 // according to the following scheme (on x86-based architectures)
30 // #ifdef __AVX512F__
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
32 // #elif defined (__AVX__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
34 // #elif defined (__SSE2__)
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
36 // #else
37 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
38 // #endif
39 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
40 // 'check_01_cpu_features.cmake', ensures that these feature are not only
41 // present in the compilation unit but also working properly.
42 
43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
44 
45 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \
46  !defined(__AVX__)
47 # error \
48  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
49 # endif
50 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \
51  !defined(__AVX512F__)
52 # error \
53  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
54 # endif
55 
56 # if defined(_MSC_VER)
57 # include <intrin.h>
58 # elif defined(__ALTIVEC__)
59 # include <altivec.h>
60 
61 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
62 // them before they make trouble
63 # undef vector
64 # undef pixel
65 # undef bool
66 # else
67 # include <x86intrin.h>
68 # endif
69 
70 #endif
71 
72 
73 DEAL_II_NAMESPACE_OPEN
74 
75 
76 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
77 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
78 
79 template <typename Number, int width>
80 struct EnableIfScalar<VectorizedArray<Number, width>>
81 {
83 };
84 
85 
86 
92 template <typename T>
94 {
95 public:
102  VectorizedArrayIterator(T &data, const unsigned int lane)
103  : data(data)
104  , lane(lane)
105  {}
106 
110  bool
112  {
113  return this->lane != other.lane;
114  }
115 
120  const typename T::value_type &operator*() const
121  {
122  return data[lane];
123  }
124 
125 
130  template <typename U = T>
131  typename std::enable_if<!std::is_same<U, const U>::value,
132  typename T::value_type>::type &
134  {
135  return data[lane];
136  }
137 
145  {
146  lane++;
147  return *this;
148  }
149 
150 private:
154  T &data;
155 
159  unsigned int lane;
160 };
161 
162 
163 
175 template <typename T>
177 {
178 public:
183  static constexpr unsigned int
185  {
186  return T::n_array_elements;
187  }
188 
194  {
195  return VectorizedArrayIterator<T>(static_cast<T &>(*this), 0);
196  }
197 
203  begin() const
204  {
205  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this), 0);
206  }
207 
212  end()
213  {
214  return VectorizedArrayIterator<T>(static_cast<T &>(*this),
215  T::n_array_elements);
216  }
217 
223  end() const
224  {
225  return VectorizedArrayIterator<const T>(static_cast<const T &>(*this),
226  T::n_array_elements);
227  }
228 };
229 
230 
231 
318 template <typename Number, int width>
320  : public VectorizedArrayBase<VectorizedArray<Number, width>>
321 {
322 public:
326  using value_type = Number;
327 
333  static const unsigned int n_array_elements = 1;
334 
335  static_assert(width == n_array_elements,
336  "You specified an illegal width that is not supported.");
337 
342  VectorizedArray() = default;
343 
347  VectorizedArray(const Number scalar)
348  {
349  this->operator=(scalar);
350  }
351 
355  DEAL_II_ALWAYS_INLINE
357  operator=(const Number scalar)
358  {
359  data = scalar;
360  return *this;
361  }
362 
367  DEAL_II_ALWAYS_INLINE
368  Number &operator[](const unsigned int comp)
369  {
370  (void)comp;
371  AssertIndexRange(comp, 1);
372  return data;
373  }
374 
379  DEAL_II_ALWAYS_INLINE
380  const Number &operator[](const unsigned int comp) const
381  {
382  (void)comp;
383  AssertIndexRange(comp, 1);
384  return data;
385  }
386 
390  DEAL_II_ALWAYS_INLINE
393  {
394  data += vec.data;
395  return *this;
396  }
397 
401  DEAL_II_ALWAYS_INLINE
404  {
405  data -= vec.data;
406  return *this;
407  }
408 
412  DEAL_II_ALWAYS_INLINE
415  {
416  data *= vec.data;
417  return *this;
418  }
419 
423  DEAL_II_ALWAYS_INLINE
426  {
427  data /= vec.data;
428  return *this;
429  }
430 
437  DEAL_II_ALWAYS_INLINE
438  void
439  load(const Number *ptr)
440  {
441  data = *ptr;
442  }
443 
450  DEAL_II_ALWAYS_INLINE
451  void
452  store(Number *ptr) const
453  {
454  *ptr = data;
455  }
456 
503  DEAL_II_ALWAYS_INLINE
504  void
505  streaming_store(Number *ptr) const
506  {
507  *ptr = data;
508  }
509 
522  DEAL_II_ALWAYS_INLINE
523  void
524  gather(const Number *base_ptr, const unsigned int *offsets)
525  {
526  data = base_ptr[offsets[0]];
527  }
528 
541  DEAL_II_ALWAYS_INLINE
542  void
543  scatter(const unsigned int *offsets, Number *base_ptr) const
544  {
545  base_ptr[offsets[0]] = data;
546  }
547 
553  Number data;
554 
555 private:
560  DEAL_II_ALWAYS_INLINE
562  get_sqrt() const
563  {
564  VectorizedArray res;
565  res.data = std::sqrt(data);
566  return res;
567  }
568 
573  DEAL_II_ALWAYS_INLINE
575  get_abs() const
576  {
577  VectorizedArray res;
578  res.data = std::fabs(data);
579  return res;
580  }
581 
586  DEAL_II_ALWAYS_INLINE
588  get_max(const VectorizedArray &other) const
589  {
590  VectorizedArray res;
591  res.data = std::max(data, other.data);
592  return res;
593  }
594 
599  DEAL_II_ALWAYS_INLINE
601  get_min(const VectorizedArray &other) const
602  {
603  VectorizedArray res;
604  res.data = std::min(data, other.data);
605  return res;
606  }
607 
608  // Make a few functions friends.
609  template <typename Number2, int width2>
611  std::sqrt(const VectorizedArray<Number2, width2> &);
612  template <typename Number2, int width2>
614  std::abs(const VectorizedArray<Number2, width2> &);
615  template <typename Number2, int width2>
617  std::max(const VectorizedArray<Number2, width2> &,
619  template <typename Number2, int width2>
621  std::min(const VectorizedArray<Number2, width2> &,
623 };
624 
625 
626 
627 // We need to have a separate declaration for static const members
628 template <typename Number, int width>
630 
631 
632 
637 
638 
645 template <
646  typename Number,
648 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
649  make_vectorized_array(const Number &u)
650 {
652  return result;
653 }
654 
655 
656 
663 template <typename VectorizedArrayType>
664 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
665  make_vectorized_array(const typename VectorizedArrayType::value_type &u)
666 {
667  static_assert(
668  std::is_same<VectorizedArrayType,
669  VectorizedArray<typename VectorizedArrayType::value_type,
670  VectorizedArrayType::n_array_elements>>::value,
671  "VectorizedArrayType is not a VectorizedArray.");
672 
673  VectorizedArrayType result = u;
674  return result;
675 }
676 
677 
678 
704 template <typename Number, int width>
705 inline DEAL_II_ALWAYS_INLINE void
706 vectorized_load_and_transpose(const unsigned int n_entries,
707  const Number * in,
708  const unsigned int * offsets,
710 {
711  for (unsigned int i = 0; i < n_entries; ++i)
712  for (unsigned int v = 0;
713  v < VectorizedArray<Number, width>::n_array_elements;
714  ++v)
715  out[i][v] = in[offsets[v] + i];
716 }
717 
718 
719 
758 template <typename Number, int width>
759 inline DEAL_II_ALWAYS_INLINE void
760 vectorized_transpose_and_store(const bool add_into,
761  const unsigned int n_entries,
763  const unsigned int * offsets,
764  Number * out)
765 {
766  if (add_into)
767  for (unsigned int i = 0; i < n_entries; ++i)
768  for (unsigned int v = 0;
769  v < VectorizedArray<Number, width>::n_array_elements;
770  ++v)
771  out[offsets[v] + i] += in[i][v];
772  else
773  for (unsigned int i = 0; i < n_entries; ++i)
774  for (unsigned int v = 0;
775  v < VectorizedArray<Number, width>::n_array_elements;
776  ++v)
777  out[offsets[v] + i] = in[i][v];
778 }
779 
780 
782 
783 #ifndef DOXYGEN
784 
785 // for safety, also check that __AVX512F__ is defined in case the user manually
786 // set some conflicting compile flags which prevent compilation
787 
788 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
789 
793 template <>
794 class VectorizedArray<double, 8>
795  : public VectorizedArrayBase<VectorizedArray<double, 8>>
796 {
797 public:
801  using value_type = double;
802 
806  static const unsigned int n_array_elements = 8;
807 
812  VectorizedArray() = default;
813 
817  VectorizedArray(const double scalar)
818  {
819  this->operator=(scalar);
820  }
821 
825  DEAL_II_ALWAYS_INLINE
827  operator=(const double x)
828  {
829  data = _mm512_set1_pd(x);
830  return *this;
831  }
832 
836  DEAL_II_ALWAYS_INLINE
837  double &operator[](const unsigned int comp)
838  {
839  AssertIndexRange(comp, 8);
840  return *(reinterpret_cast<double *>(&data) + comp);
841  }
842 
846  DEAL_II_ALWAYS_INLINE
847  const double &operator[](const unsigned int comp) const
848  {
849  AssertIndexRange(comp, 8);
850  return *(reinterpret_cast<const double *>(&data) + comp);
851  }
852 
856  DEAL_II_ALWAYS_INLINE
858  operator+=(const VectorizedArray &vec)
859  {
860  // if the compiler supports vector arithmetics, we can simply use +=
861  // operator on the given data type. this allows the compiler to combine
862  // additions with multiplication (fused multiply-add) if those
863  // instructions are available. Otherwise, we need to use the built-in
864  // intrinsic command for __m512d
865 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
866  data += vec.data;
867 # else
868  data = _mm512_add_pd(data, vec.data);
869 # endif
870  return *this;
871  }
872 
876  DEAL_II_ALWAYS_INLINE
878  operator-=(const VectorizedArray &vec)
879  {
880 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
881  data -= vec.data;
882 # else
883  data = _mm512_sub_pd(data, vec.data);
884 # endif
885  return *this;
886  }
890  DEAL_II_ALWAYS_INLINE
892  operator*=(const VectorizedArray &vec)
893  {
894 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
895  data *= vec.data;
896 # else
897  data = _mm512_mul_pd(data, vec.data);
898 # endif
899  return *this;
900  }
901 
905  DEAL_II_ALWAYS_INLINE
907  operator/=(const VectorizedArray &vec)
908  {
909 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
910  data /= vec.data;
911 # else
912  data = _mm512_div_pd(data, vec.data);
913 # endif
914  return *this;
915  }
916 
922  DEAL_II_ALWAYS_INLINE
923  void
924  load(const double *ptr)
925  {
926  data = _mm512_loadu_pd(ptr);
927  }
928 
935  DEAL_II_ALWAYS_INLINE
936  void
937  store(double *ptr) const
938  {
939  _mm512_storeu_pd(ptr, data);
940  }
941 
945  DEAL_II_ALWAYS_INLINE
946  void
947  streaming_store(double *ptr) const
948  {
949  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
950  ExcMessage("Memory not aligned"));
951  _mm512_stream_pd(ptr, data);
952  }
953 
966  DEAL_II_ALWAYS_INLINE
967  void
968  gather(const double *base_ptr, const unsigned int *offsets)
969  {
970  // unfortunately, there does not appear to be a 256 bit integer load, so
971  // do it by some reinterpret casts here. this is allowed because the Intel
972  // API allows aliasing between different vector types.
973  const __m256 index_val =
974  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
975  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
976  data = _mm512_i32gather_pd(index, base_ptr, 8);
977  }
978 
991  DEAL_II_ALWAYS_INLINE
992  void
993  scatter(const unsigned int *offsets, double *base_ptr) const
994  {
995  for (unsigned int i = 0; i < 8; ++i)
996  for (unsigned int j = i + 1; j < 8; ++j)
997  Assert(offsets[i] != offsets[j],
998  ExcMessage("Result of scatter undefined if two offset elements"
999  " point to the same position"));
1000 
1001  // unfortunately, there does not appear to be a 256 bit integer load, so
1002  // do it by some reinterpret casts here. this is allowed because the Intel
1003  // API allows aliasing between different vector types.
1004  const __m256 index_val =
1005  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
1006  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
1007  _mm512_i32scatter_pd(base_ptr, index, data, 8);
1008  }
1009 
1015  __m512d data;
1016 
1017 private:
1022  DEAL_II_ALWAYS_INLINE
1024  get_sqrt() const
1025  {
1026  VectorizedArray res;
1027  res.data = _mm512_sqrt_pd(data);
1028  return res;
1029  }
1030 
1035  DEAL_II_ALWAYS_INLINE
1037  get_abs() const
1038  {
1039  // to compute the absolute value, perform bitwise andnot with -0. This
1040  // will leave all value and exponent bits unchanged but force the sign
1041  // value to +. Since there is no andnot for AVX512, we interpret the data
1042  // as 64 bit integers and do the andnot on those types (note that andnot
1043  // is a bitwise operation so the data type does not matter)
1044  __m512d mask = _mm512_set1_pd(-0.);
1045  VectorizedArray res;
1046  res.data = reinterpret_cast<__m512d>(
1047  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
1048  reinterpret_cast<__m512i>(data)));
1049  return res;
1050  }
1051 
1056  DEAL_II_ALWAYS_INLINE
1058  get_max(const VectorizedArray &other) const
1059  {
1060  VectorizedArray res;
1061  res.data = _mm512_max_pd(data, other.data);
1062  return res;
1063  }
1064 
1069  DEAL_II_ALWAYS_INLINE
1071  get_min(const VectorizedArray &other) const
1072  {
1073  VectorizedArray res;
1074  res.data = _mm512_min_pd(data, other.data);
1075  return res;
1076  }
1077 
1078  // Make a few functions friends.
1079  template <typename Number2, int width2>
1081  std::sqrt(const VectorizedArray<Number2, width2> &);
1082  template <typename Number2, int width2>
1084  std::abs(const VectorizedArray<Number2, width2> &);
1085  template <typename Number2, int width2>
1087  std::max(const VectorizedArray<Number2, width2> &,
1089  template <typename Number2, int width2>
1091  std::min(const VectorizedArray<Number2, width2> &,
1093 };
1094 
1095 
1096 
1100 template <>
1101 inline DEAL_II_ALWAYS_INLINE void
1102 vectorized_load_and_transpose(const unsigned int n_entries,
1103  const double * in,
1104  const unsigned int * offsets,
1106 {
1107  // do not do full transpose because the code is long and will most
1108  // likely not pay off because many processors have two load units
1109  // (for the top 8 instructions) but only 1 permute unit (for the 8
1110  // shuffle/unpack instructions). rather start the transposition on the
1111  // vectorized array of half the size with 256 bits
1112  const unsigned int n_chunks = n_entries / 4;
1113  for (unsigned int i = 0; i < n_chunks; ++i)
1114  {
1115  __m512d t0, t1, t2, t3 = {};
1116 
1117  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1118  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1119  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1120  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1121  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1122  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1123  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1124  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1125 
1126  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1127  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1128  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1129  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1130  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
1131  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
1132  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
1133  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
1134  }
1135  // remainder loop of work that does not divide by 4
1136  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1137  out[i].gather(in + i, offsets);
1138 }
1139 
1140 
1141 
1145 template <>
1146 inline DEAL_II_ALWAYS_INLINE void
1147 vectorized_transpose_and_store(const bool add_into,
1148  const unsigned int n_entries,
1149  const VectorizedArray<double, 8> *in,
1150  const unsigned int * offsets,
1151  double * out)
1152 {
1153  // as for the load, we split the store operations into 256 bit units to
1154  // better balance between code size, shuffle instructions, and stores
1155  const unsigned int n_chunks = n_entries / 4;
1156  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1157  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1158  for (unsigned int i = 0; i < n_chunks; ++i)
1159  {
1160  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1161  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1162  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1163  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1164  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1165  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1166  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1167  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1168  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1169  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1170  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1171  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1172  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1173  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1174  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1175  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1176 
1177  // Cannot use the same store instructions in both paths of the 'if'
1178  // because the compiler cannot know that there is no aliasing
1179  // between pointers
1180  if (add_into)
1181  {
1182  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1183  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1184  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1185  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1186  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1187  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1188  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1189  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1190  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1191  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1192  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1193  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1194  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1195  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1196  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1197  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1198  }
1199  else
1200  {
1201  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1202  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1203  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1204  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1205  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1206  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1207  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1208  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1209  }
1210  }
1211 
1212  // remainder loop of work that does not divide by 4
1213  if (add_into)
1214  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1215  for (unsigned int v = 0; v < 8; ++v)
1216  out[offsets[v] + i] += in[i][v];
1217  else
1218  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1219  for (unsigned int v = 0; v < 8; ++v)
1220  out[offsets[v] + i] = in[i][v];
1221 }
1222 
1223 
1224 
1228 template <>
1229 class VectorizedArray<float, 16>
1230  : public VectorizedArrayBase<VectorizedArray<float, 16>>
1231 {
1232 public:
1236  using value_type = float;
1237 
1241  static const unsigned int n_array_elements = 16;
1242 
1247  VectorizedArray() = default;
1248 
1252  VectorizedArray(const float scalar)
1253  {
1254  this->operator=(scalar);
1255  }
1256 
1260  DEAL_II_ALWAYS_INLINE
1261  VectorizedArray &
1262  operator=(const float x)
1263  {
1264  data = _mm512_set1_ps(x);
1265  return *this;
1266  }
1267 
1271  DEAL_II_ALWAYS_INLINE
1272  float &operator[](const unsigned int comp)
1273  {
1274  AssertIndexRange(comp, 16);
1275  return *(reinterpret_cast<float *>(&data) + comp);
1276  }
1277 
1281  DEAL_II_ALWAYS_INLINE
1282  const float &operator[](const unsigned int comp) const
1283  {
1284  AssertIndexRange(comp, 16);
1285  return *(reinterpret_cast<const float *>(&data) + comp);
1286  }
1287 
1291  DEAL_II_ALWAYS_INLINE
1292  VectorizedArray &
1293  operator+=(const VectorizedArray &vec)
1294  {
1295  // if the compiler supports vector arithmetics, we can simply use +=
1296  // operator on the given data type. this allows the compiler to combine
1297  // additions with multiplication (fused multiply-add) if those
1298  // instructions are available. Otherwise, we need to use the built-in
1299  // intrinsic command for __m512d
1300 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1301  data += vec.data;
1302 # else
1303  data = _mm512_add_ps(data, vec.data);
1304 # endif
1305  return *this;
1306  }
1307 
1311  DEAL_II_ALWAYS_INLINE
1312  VectorizedArray &
1313  operator-=(const VectorizedArray &vec)
1314  {
1315 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1316  data -= vec.data;
1317 # else
1318  data = _mm512_sub_ps(data, vec.data);
1319 # endif
1320  return *this;
1321  }
1325  DEAL_II_ALWAYS_INLINE
1326  VectorizedArray &
1327  operator*=(const VectorizedArray &vec)
1328  {
1329 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1330  data *= vec.data;
1331 # else
1332  data = _mm512_mul_ps(data, vec.data);
1333 # endif
1334  return *this;
1335  }
1336 
1340  DEAL_II_ALWAYS_INLINE
1341  VectorizedArray &
1342  operator/=(const VectorizedArray &vec)
1343  {
1344 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1345  data /= vec.data;
1346 # else
1347  data = _mm512_div_ps(data, vec.data);
1348 # endif
1349  return *this;
1350  }
1351 
1357  DEAL_II_ALWAYS_INLINE
1358  void
1359  load(const float *ptr)
1360  {
1361  data = _mm512_loadu_ps(ptr);
1362  }
1363 
1370  DEAL_II_ALWAYS_INLINE
1371  void
1372  store(float *ptr) const
1373  {
1374  _mm512_storeu_ps(ptr, data);
1375  }
1376 
1380  DEAL_II_ALWAYS_INLINE
1381  void
1382  streaming_store(float *ptr) const
1383  {
1384  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1385  ExcMessage("Memory not aligned"));
1386  _mm512_stream_ps(ptr, data);
1387  }
1388 
1401  DEAL_II_ALWAYS_INLINE
1402  void
1403  gather(const float *base_ptr, const unsigned int *offsets)
1404  {
1405  // unfortunately, there does not appear to be a 512 bit integer load, so
1406  // do it by some reinterpret casts here. this is allowed because the Intel
1407  // API allows aliasing between different vector types.
1408  const __m512 index_val =
1409  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1410  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1411  data = _mm512_i32gather_ps(index, base_ptr, 4);
1412  }
1413 
1426  DEAL_II_ALWAYS_INLINE
1427  void
1428  scatter(const unsigned int *offsets, float *base_ptr) const
1429  {
1430  for (unsigned int i = 0; i < 16; ++i)
1431  for (unsigned int j = i + 1; j < 16; ++j)
1432  Assert(offsets[i] != offsets[j],
1433  ExcMessage("Result of scatter undefined if two offset elements"
1434  " point to the same position"));
1435 
1436  // unfortunately, there does not appear to be a 512 bit integer load, so
1437  // do it by some reinterpret casts here. this is allowed because the Intel
1438  // API allows aliasing between different vector types.
1439  const __m512 index_val =
1440  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1441  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1442  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1443  }
1444 
1450  __m512 data;
1451 
1452 private:
1457  DEAL_II_ALWAYS_INLINE
1459  get_sqrt() const
1460  {
1461  VectorizedArray res;
1462  res.data = _mm512_sqrt_ps(data);
1463  return res;
1464  }
1465 
1470  DEAL_II_ALWAYS_INLINE
1472  get_abs() const
1473  {
1474  // to compute the absolute value, perform bitwise andnot with -0. This
1475  // will leave all value and exponent bits unchanged but force the sign
1476  // value to +. Since there is no andnot for AVX512, we interpret the data
1477  // as 32 bit integers and do the andnot on those types (note that andnot
1478  // is a bitwise operation so the data type does not matter)
1479  __m512 mask = _mm512_set1_ps(-0.f);
1480  VectorizedArray res;
1481  res.data = reinterpret_cast<__m512>(
1482  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1483  reinterpret_cast<__m512i>(data)));
1484  return res;
1485  }
1486 
1491  DEAL_II_ALWAYS_INLINE
1493  get_max(const VectorizedArray &other) const
1494  {
1495  VectorizedArray res;
1496  res.data = _mm512_max_ps(data, other.data);
1497  return res;
1498  }
1499 
1504  DEAL_II_ALWAYS_INLINE
1506  get_min(const VectorizedArray &other) const
1507  {
1508  VectorizedArray res;
1509  res.data = _mm512_min_ps(data, other.data);
1510  return res;
1511  }
1512 
1513  // Make a few functions friends.
1514  template <typename Number2, int width2>
1516  std::sqrt(const VectorizedArray<Number2, width2> &);
1517  template <typename Number2, int width2>
1519  std::abs(const VectorizedArray<Number2, width2> &);
1520  template <typename Number2, int width2>
1522  std::max(const VectorizedArray<Number2, width2> &,
1524  template <typename Number2, int width2>
1526  std::min(const VectorizedArray<Number2, width2> &,
1528 };
1529 
1530 
1531 
1535 template <>
1536 inline DEAL_II_ALWAYS_INLINE void
1537 vectorized_load_and_transpose(const unsigned int n_entries,
1538  const float * in,
1539  const unsigned int * offsets,
1541 {
1542  // Similar to the double case, we perform the work on smaller entities. In
1543  // this case, we start from 128 bit arrays and insert them into a full 512
1544  // bit index. This reduces the code size and register pressure because we do
1545  // shuffles on 4 numbers rather than 16.
1546  const unsigned int n_chunks = n_entries / 4;
1547 
1548  // To avoid warnings about uninitialized variables, need to initialize one
1549  // variable to a pre-exisiting value in out, which will never get used in
1550  // the end. Keep the initialization outside the loop because of a bug in
1551  // gcc-9 which generates a "vmovapd" instruction instead of "vmovupd" in
1552  // case t3 is initialized to zero (inside/outside of loop), see
1553  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1554  __m512 t0, t1, t2, t3;
1555  if (n_chunks > 0)
1556  t3 = out[0].data;
1557  for (unsigned int i = 0; i < n_chunks; ++i)
1558  {
1559  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1560  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1561  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1562  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1563  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1564  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1565  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1566  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1567  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1568  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1569  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1570  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1571  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1572  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1573  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1574  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1575 
1576  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1577  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1578  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1579  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1580 
1581  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1582  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1583  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1584  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1585  }
1586 
1587  // remainder loop of work that does not divide by 4
1588  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1589  for (unsigned int v = 0; v < 8; ++v)
1590  out[i].gather(in + i, offsets);
1591 }
1592 
1593 
1594 
1598 template <>
1599 inline DEAL_II_ALWAYS_INLINE void
1600 vectorized_transpose_and_store(const bool add_into,
1601  const unsigned int n_entries,
1602  const VectorizedArray<float, 16> *in,
1603  const unsigned int * offsets,
1604  float * out)
1605 {
1606  const unsigned int n_chunks = n_entries / 4;
1607  for (unsigned int i = 0; i < n_chunks; ++i)
1608  {
1609  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1610  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1611  __m512 t2 =
1612  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1613  __m512 t3 =
1614  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1615  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1616  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1617  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1618  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1619 
1620  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1621  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1622  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1623  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1624  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1625  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1626  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1627  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1628  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1629  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1630  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1631  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1632  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1633  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1634  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1635  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1636 
1637  // Cannot use the same store instructions in both paths of the 'if'
1638  // because the compiler cannot know that there is no aliasing between
1639  // pointers
1640  if (add_into)
1641  {
1642  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1643  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1644  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1645  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1646  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1647  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1648  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1649  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1650  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1651  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1652  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1653  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1654  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1655  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1656  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1657  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1658  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1659  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1660  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1661  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1662  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1663  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1664  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1665  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1666  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1667  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1668  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1669  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1670  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1671  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1672  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1673  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1674  }
1675  else
1676  {
1677  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1678  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1679  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1680  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1681  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1682  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1683  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1684  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1685  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1686  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1687  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1688  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1689  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1690  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1691  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1692  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1693  }
1694  }
1695 
1696  // remainder loop of work that does not divide by 4
1697  if (add_into)
1698  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1699  for (unsigned int v = 0; v < 16; ++v)
1700  out[offsets[v] + i] += in[i][v];
1701  else
1702  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1703  for (unsigned int v = 0; v < 16; ++v)
1704  out[offsets[v] + i] = in[i][v];
1705 }
1706 
1707 # endif
1708 
1709 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1710 
1714 template <>
1715 class VectorizedArray<double, 4>
1716  : public VectorizedArrayBase<VectorizedArray<double, 4>>
1717 {
1718 public:
1722  using value_type = double;
1723 
1727  static const unsigned int n_array_elements = 4;
1728 
1733  VectorizedArray() = default;
1734 
1738  VectorizedArray(const double scalar)
1739  {
1740  this->operator=(scalar);
1741  }
1742 
1746  DEAL_II_ALWAYS_INLINE
1747  VectorizedArray &
1748  operator=(const double x)
1749  {
1750  data = _mm256_set1_pd(x);
1751  return *this;
1752  }
1753 
1757  DEAL_II_ALWAYS_INLINE
1758  double &operator[](const unsigned int comp)
1759  {
1760  AssertIndexRange(comp, 4);
1761  return *(reinterpret_cast<double *>(&data) + comp);
1762  }
1763 
1767  DEAL_II_ALWAYS_INLINE
1768  const double &operator[](const unsigned int comp) const
1769  {
1770  AssertIndexRange(comp, 4);
1771  return *(reinterpret_cast<const double *>(&data) + comp);
1772  }
1773 
1777  DEAL_II_ALWAYS_INLINE
1778  VectorizedArray &
1779  operator+=(const VectorizedArray &vec)
1780  {
1781  // if the compiler supports vector arithmetics, we can simply use +=
1782  // operator on the given data type. this allows the compiler to combine
1783  // additions with multiplication (fused multiply-add) if those
1784  // instructions are available. Otherwise, we need to use the built-in
1785  // intrinsic command for __m256d
1786 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1787  data += vec.data;
1788 # else
1789  data = _mm256_add_pd(data, vec.data);
1790 # endif
1791  return *this;
1792  }
1793 
1797  DEAL_II_ALWAYS_INLINE
1798  VectorizedArray &
1799  operator-=(const VectorizedArray &vec)
1800  {
1801 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1802  data -= vec.data;
1803 # else
1804  data = _mm256_sub_pd(data, vec.data);
1805 # endif
1806  return *this;
1807  }
1811  DEAL_II_ALWAYS_INLINE
1812  VectorizedArray &
1813  operator*=(const VectorizedArray &vec)
1814  {
1815 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1816  data *= vec.data;
1817 # else
1818  data = _mm256_mul_pd(data, vec.data);
1819 # endif
1820  return *this;
1821  }
1822 
1826  DEAL_II_ALWAYS_INLINE
1827  VectorizedArray &
1828  operator/=(const VectorizedArray &vec)
1829  {
1830 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1831  data /= vec.data;
1832 # else
1833  data = _mm256_div_pd(data, vec.data);
1834 # endif
1835  return *this;
1836  }
1837 
1843  DEAL_II_ALWAYS_INLINE
1844  void
1845  load(const double *ptr)
1846  {
1847  data = _mm256_loadu_pd(ptr);
1848  }
1849 
1856  DEAL_II_ALWAYS_INLINE
1857  void
1858  store(double *ptr) const
1859  {
1860  _mm256_storeu_pd(ptr, data);
1861  }
1862 
1866  DEAL_II_ALWAYS_INLINE
1867  void
1868  streaming_store(double *ptr) const
1869  {
1870  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1871  ExcMessage("Memory not aligned"));
1872  _mm256_stream_pd(ptr, data);
1873  }
1874 
1887  DEAL_II_ALWAYS_INLINE
1888  void
1889  gather(const double *base_ptr, const unsigned int *offsets)
1890  {
1891 # ifdef __AVX2__
1892  // unfortunately, there does not appear to be a 128 bit integer load, so
1893  // do it by some reinterpret casts here. this is allowed because the Intel
1894  // API allows aliasing between different vector types.
1895  const __m128 index_val =
1896  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1897  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
1898  data = _mm256_i32gather_pd(base_ptr, index, 8);
1899 # else
1900  for (unsigned int i = 0; i < 4; ++i)
1901  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1902 # endif
1903  }
1904 
1917  DEAL_II_ALWAYS_INLINE
1918  void
1919  scatter(const unsigned int *offsets, double *base_ptr) const
1920  {
1921  // no scatter operation in AVX/AVX2
1922  for (unsigned int i = 0; i < 4; ++i)
1923  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1924  }
1925 
1931  __m256d data;
1932 
1933 private:
1938  DEAL_II_ALWAYS_INLINE
1940  get_sqrt() const
1941  {
1942  VectorizedArray res;
1943  res.data = _mm256_sqrt_pd(data);
1944  return res;
1945  }
1946 
1951  DEAL_II_ALWAYS_INLINE
1953  get_abs() const
1954  {
1955  // to compute the absolute value, perform bitwise andnot with -0. This
1956  // will leave all value and exponent bits unchanged but force the sign
1957  // value to +.
1958  __m256d mask = _mm256_set1_pd(-0.);
1959  VectorizedArray res;
1960  res.data = _mm256_andnot_pd(mask, data);
1961  return res;
1962  }
1963 
1968  DEAL_II_ALWAYS_INLINE
1970  get_max(const VectorizedArray &other) const
1971  {
1972  VectorizedArray res;
1973  res.data = _mm256_max_pd(data, other.data);
1974  return res;
1975  }
1976 
1981  DEAL_II_ALWAYS_INLINE
1983  get_min(const VectorizedArray &other) const
1984  {
1985  VectorizedArray res;
1986  res.data = _mm256_min_pd(data, other.data);
1987  return res;
1988  }
1989 
1990  // Make a few functions friends.
1991  template <typename Number2, int width2>
1993  std::sqrt(const VectorizedArray<Number2, width2> &);
1994  template <typename Number2, int width2>
1996  std::abs(const VectorizedArray<Number2, width2> &);
1997  template <typename Number2, int width2>
1999  std::max(const VectorizedArray<Number2, width2> &,
2001  template <typename Number2, int width2>
2003  std::min(const VectorizedArray<Number2, width2> &,
2005 };
2006 
2007 
2008 
2012 template <>
2013 inline DEAL_II_ALWAYS_INLINE void
2014 vectorized_load_and_transpose(const unsigned int n_entries,
2015  const double * in,
2016  const unsigned int * offsets,
2018 {
2019  const unsigned int n_chunks = n_entries / 4;
2020  const double * in0 = in + offsets[0];
2021  const double * in1 = in + offsets[1];
2022  const double * in2 = in + offsets[2];
2023  const double * in3 = in + offsets[3];
2024 
2025  for (unsigned int i = 0; i < n_chunks; ++i)
2026  {
2027  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2028  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2029  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2030  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2031  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2032  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2033  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2034  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2035  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
2036  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
2037  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
2038  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
2039  }
2040 
2041  // remainder loop of work that does not divide by 4
2042  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2043  out[i].gather(in + i, offsets);
2044 }
2045 
2046 
2047 
2051 template <>
2052 inline DEAL_II_ALWAYS_INLINE void
2053 vectorized_transpose_and_store(const bool add_into,
2054  const unsigned int n_entries,
2055  const VectorizedArray<double, 4> *in,
2056  const unsigned int * offsets,
2057  double * out)
2058 {
2059  const unsigned int n_chunks = n_entries / 4;
2060  double * out0 = out + offsets[0];
2061  double * out1 = out + offsets[1];
2062  double * out2 = out + offsets[2];
2063  double * out3 = out + offsets[3];
2064  for (unsigned int i = 0; i < n_chunks; ++i)
2065  {
2066  __m256d u0 = in[4 * i + 0].data;
2067  __m256d u1 = in[4 * i + 1].data;
2068  __m256d u2 = in[4 * i + 2].data;
2069  __m256d u3 = in[4 * i + 3].data;
2070  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2071  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2072  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2073  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2074  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2075  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2076  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2077  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2078 
2079  // Cannot use the same store instructions in both paths of the 'if'
2080  // because the compiler cannot know that there is no aliasing between
2081  // pointers
2082  if (add_into)
2083  {
2084  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2085  _mm256_storeu_pd(out0 + 4 * i, res0);
2086  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2087  _mm256_storeu_pd(out1 + 4 * i, res1);
2088  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2089  _mm256_storeu_pd(out2 + 4 * i, res2);
2090  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2091  _mm256_storeu_pd(out3 + 4 * i, res3);
2092  }
2093  else
2094  {
2095  _mm256_storeu_pd(out0 + 4 * i, res0);
2096  _mm256_storeu_pd(out1 + 4 * i, res1);
2097  _mm256_storeu_pd(out2 + 4 * i, res2);
2098  _mm256_storeu_pd(out3 + 4 * i, res3);
2099  }
2100  }
2101 
2102  // remainder loop of work that does not divide by 4
2103  if (add_into)
2104  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2105  for (unsigned int v = 0; v < 4; ++v)
2106  out[offsets[v] + i] += in[i][v];
2107  else
2108  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2109  for (unsigned int v = 0; v < 4; ++v)
2110  out[offsets[v] + i] = in[i][v];
2111 }
2112 
2113 
2114 
2118 template <>
2119 class VectorizedArray<float, 8>
2120  : public VectorizedArrayBase<VectorizedArray<float, 8>>
2121 {
2122 public:
2126  using value_type = float;
2127 
2131  static const unsigned int n_array_elements = 8;
2132 
2137  VectorizedArray() = default;
2138 
2142  VectorizedArray(const float scalar)
2143  {
2144  this->operator=(scalar);
2145  }
2146 
2150  DEAL_II_ALWAYS_INLINE
2151  VectorizedArray &
2152  operator=(const float x)
2153  {
2154  data = _mm256_set1_ps(x);
2155  return *this;
2156  }
2157 
2161  DEAL_II_ALWAYS_INLINE
2162  float &operator[](const unsigned int comp)
2163  {
2164  AssertIndexRange(comp, 8);
2165  return *(reinterpret_cast<float *>(&data) + comp);
2166  }
2167 
2171  DEAL_II_ALWAYS_INLINE
2172  const float &operator[](const unsigned int comp) const
2173  {
2174  AssertIndexRange(comp, 8);
2175  return *(reinterpret_cast<const float *>(&data) + comp);
2176  }
2177 
2181  DEAL_II_ALWAYS_INLINE
2182  VectorizedArray &
2183  operator+=(const VectorizedArray &vec)
2184  {
2185  // if the compiler supports vector arithmetics, we can simply use +=
2186  // operator on the given data type. this allows the compiler to combine
2187  // additions with multiplication (fused multiply-add) if those
2188  // instructions are available. Otherwise, we need to use the built-in
2189  // intrinsic command for __m256d
2190 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2191  data += vec.data;
2192 # else
2193  data = _mm256_add_ps(data, vec.data);
2194 # endif
2195  return *this;
2196  }
2197 
2201  DEAL_II_ALWAYS_INLINE
2202  VectorizedArray &
2203  operator-=(const VectorizedArray &vec)
2204  {
2205 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2206  data -= vec.data;
2207 # else
2208  data = _mm256_sub_ps(data, vec.data);
2209 # endif
2210  return *this;
2211  }
2215  DEAL_II_ALWAYS_INLINE
2216  VectorizedArray &
2217  operator*=(const VectorizedArray &vec)
2218  {
2219 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2220  data *= vec.data;
2221 # else
2222  data = _mm256_mul_ps(data, vec.data);
2223 # endif
2224  return *this;
2225  }
2226 
2230  DEAL_II_ALWAYS_INLINE
2231  VectorizedArray &
2232  operator/=(const VectorizedArray &vec)
2233  {
2234 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2235  data /= vec.data;
2236 # else
2237  data = _mm256_div_ps(data, vec.data);
2238 # endif
2239  return *this;
2240  }
2241 
2247  DEAL_II_ALWAYS_INLINE
2248  void
2249  load(const float *ptr)
2250  {
2251  data = _mm256_loadu_ps(ptr);
2252  }
2253 
2260  DEAL_II_ALWAYS_INLINE
2261  void
2262  store(float *ptr) const
2263  {
2264  _mm256_storeu_ps(ptr, data);
2265  }
2266 
2270  DEAL_II_ALWAYS_INLINE
2271  void
2272  streaming_store(float *ptr) const
2273  {
2274  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2275  ExcMessage("Memory not aligned"));
2276  _mm256_stream_ps(ptr, data);
2277  }
2278 
2291  DEAL_II_ALWAYS_INLINE
2292  void
2293  gather(const float *base_ptr, const unsigned int *offsets)
2294  {
2295 # ifdef __AVX2__
2296  // unfortunately, there does not appear to be a 256 bit integer load, so
2297  // do it by some reinterpret casts here. this is allowed because the Intel
2298  // API allows aliasing between different vector types.
2299  const __m256 index_val =
2300  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2301  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2302  data = _mm256_i32gather_ps(base_ptr, index, 4);
2303 # else
2304  for (unsigned int i = 0; i < 8; ++i)
2305  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2306 # endif
2307  }
2308 
2321  DEAL_II_ALWAYS_INLINE
2322  void
2323  scatter(const unsigned int *offsets, float *base_ptr) const
2324  {
2325  // no scatter operation in AVX/AVX2
2326  for (unsigned int i = 0; i < 8; ++i)
2327  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2328  }
2329 
2335  __m256 data;
2336 
2337 private:
2342  DEAL_II_ALWAYS_INLINE
2344  get_sqrt() const
2345  {
2346  VectorizedArray res;
2347  res.data = _mm256_sqrt_ps(data);
2348  return res;
2349  }
2350 
2355  DEAL_II_ALWAYS_INLINE
2357  get_abs() const
2358  {
2359  // to compute the absolute value, perform bitwise andnot with -0. This
2360  // will leave all value and exponent bits unchanged but force the sign
2361  // value to +.
2362  __m256 mask = _mm256_set1_ps(-0.f);
2363  VectorizedArray res;
2364  res.data = _mm256_andnot_ps(mask, data);
2365  return res;
2366  }
2367 
2372  DEAL_II_ALWAYS_INLINE
2374  get_max(const VectorizedArray &other) const
2375  {
2376  VectorizedArray res;
2377  res.data = _mm256_max_ps(data, other.data);
2378  return res;
2379  }
2380 
2385  DEAL_II_ALWAYS_INLINE
2387  get_min(const VectorizedArray &other) const
2388  {
2389  VectorizedArray res;
2390  res.data = _mm256_min_ps(data, other.data);
2391  return res;
2392  }
2393 
2394  // Make a few functions friends.
2395  template <typename Number2, int width2>
2397  std::sqrt(const VectorizedArray<Number2, width2> &);
2398  template <typename Number2, int width2>
2400  std::abs(const VectorizedArray<Number2, width2> &);
2401  template <typename Number2, int width2>
2403  std::max(const VectorizedArray<Number2, width2> &,
2405  template <typename Number2, int width2>
2407  std::min(const VectorizedArray<Number2, width2> &,
2409 };
2410 
2411 
2412 
2416 template <>
2417 inline DEAL_II_ALWAYS_INLINE void
2418 vectorized_load_and_transpose(const unsigned int n_entries,
2419  const float * in,
2420  const unsigned int * offsets,
2422 {
2423  const unsigned int n_chunks = n_entries / 4;
2424  for (unsigned int i = 0; i < n_chunks; ++i)
2425  {
2426  // To avoid warnings about uninitialized variables, need to initialize
2427  // one variable with zero before using it.
2428  __m256 t0, t1, t2, t3 = {};
2429  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2430  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2431  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2432  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2433  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2434  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2435  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2436  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2437 
2438  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2439  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2440  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2441  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2442  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2443  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2444  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2445  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2446  }
2447 
2448  // remainder loop of work that does not divide by 4
2449  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2450  out[i].gather(in + i, offsets);
2451 }
2452 
2453 
2454 
2458 template <>
2459 inline DEAL_II_ALWAYS_INLINE void
2460 vectorized_transpose_and_store(const bool add_into,
2461  const unsigned int n_entries,
2462  const VectorizedArray<float, 8> *in,
2463  const unsigned int * offsets,
2464  float * out)
2465 {
2466  const unsigned int n_chunks = n_entries / 4;
2467  for (unsigned int i = 0; i < n_chunks; ++i)
2468  {
2469  __m256 u0 = in[4 * i + 0].data;
2470  __m256 u1 = in[4 * i + 1].data;
2471  __m256 u2 = in[4 * i + 2].data;
2472  __m256 u3 = in[4 * i + 3].data;
2473  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2474  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2475  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2476  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2477  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2478  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2479  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2480  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2481  __m128 res0 = _mm256_extractf128_ps(u0, 0);
2482  __m128 res4 = _mm256_extractf128_ps(u0, 1);
2483  __m128 res1 = _mm256_extractf128_ps(u1, 0);
2484  __m128 res5 = _mm256_extractf128_ps(u1, 1);
2485  __m128 res2 = _mm256_extractf128_ps(u2, 0);
2486  __m128 res6 = _mm256_extractf128_ps(u2, 1);
2487  __m128 res3 = _mm256_extractf128_ps(u3, 0);
2488  __m128 res7 = _mm256_extractf128_ps(u3, 1);
2489 
2490  // Cannot use the same store instructions in both paths of the 'if'
2491  // because the compiler cannot know that there is no aliasing between
2492  // pointers
2493  if (add_into)
2494  {
2495  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2496  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2497  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2498  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2499  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2500  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2501  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2502  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2503  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2504  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2505  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2506  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2507  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2508  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2509  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2510  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2511  }
2512  else
2513  {
2514  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2515  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2516  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2517  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2518  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2519  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2520  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2521  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2522  }
2523  }
2524 
2525  // remainder loop of work that does not divide by 4
2526  if (add_into)
2527  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2528  for (unsigned int v = 0; v < 8; ++v)
2529  out[offsets[v] + i] += in[i][v];
2530  else
2531  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2532  for (unsigned int v = 0; v < 8; ++v)
2533  out[offsets[v] + i] = in[i][v];
2534 }
2535 
2536 # endif
2537 
2538 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2539 
2543 template <>
2544 class VectorizedArray<double, 2>
2545  : public VectorizedArrayBase<VectorizedArray<double, 2>>
2546 {
2547 public:
2551  using value_type = double;
2552 
2556  static const unsigned int n_array_elements = 2;
2557 
2562  VectorizedArray() = default;
2563 
2567  VectorizedArray(const double scalar)
2568  {
2569  this->operator=(scalar);
2570  }
2571 
2575  DEAL_II_ALWAYS_INLINE
2576  VectorizedArray &
2577  operator=(const double x)
2578  {
2579  data = _mm_set1_pd(x);
2580  return *this;
2581  }
2582 
2586  DEAL_II_ALWAYS_INLINE
2587  double &operator[](const unsigned int comp)
2588  {
2589  AssertIndexRange(comp, 2);
2590  return *(reinterpret_cast<double *>(&data) + comp);
2591  }
2592 
2596  DEAL_II_ALWAYS_INLINE
2597  const double &operator[](const unsigned int comp) const
2598  {
2599  AssertIndexRange(comp, 2);
2600  return *(reinterpret_cast<const double *>(&data) + comp);
2601  }
2602 
2606  DEAL_II_ALWAYS_INLINE
2607  VectorizedArray &
2608  operator+=(const VectorizedArray &vec)
2609  {
2610 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2611  data += vec.data;
2612 # else
2613  data = _mm_add_pd(data, vec.data);
2614 # endif
2615  return *this;
2616  }
2617 
2621  DEAL_II_ALWAYS_INLINE
2622  VectorizedArray &
2623  operator-=(const VectorizedArray &vec)
2624  {
2625 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2626  data -= vec.data;
2627 # else
2628  data = _mm_sub_pd(data, vec.data);
2629 # endif
2630  return *this;
2631  }
2632 
2636  DEAL_II_ALWAYS_INLINE
2637  VectorizedArray &
2638  operator*=(const VectorizedArray &vec)
2639  {
2640 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2641  data *= vec.data;
2642 # else
2643  data = _mm_mul_pd(data, vec.data);
2644 # endif
2645  return *this;
2646  }
2647 
2651  DEAL_II_ALWAYS_INLINE
2652  VectorizedArray &
2653  operator/=(const VectorizedArray &vec)
2654  {
2655 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2656  data /= vec.data;
2657 # else
2658  data = _mm_div_pd(data, vec.data);
2659 # endif
2660  return *this;
2661  }
2662 
2668  DEAL_II_ALWAYS_INLINE
2669  void
2670  load(const double *ptr)
2671  {
2672  data = _mm_loadu_pd(ptr);
2673  }
2674 
2681  DEAL_II_ALWAYS_INLINE
2682  void
2683  store(double *ptr) const
2684  {
2685  _mm_storeu_pd(ptr, data);
2686  }
2687 
2691  DEAL_II_ALWAYS_INLINE
2692  void
2693  streaming_store(double *ptr) const
2694  {
2695  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2696  ExcMessage("Memory not aligned"));
2697  _mm_stream_pd(ptr, data);
2698  }
2699 
2712  DEAL_II_ALWAYS_INLINE
2713  void
2714  gather(const double *base_ptr, const unsigned int *offsets)
2715  {
2716  for (unsigned int i = 0; i < 2; ++i)
2717  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2718  }
2719 
2732  DEAL_II_ALWAYS_INLINE
2733  void
2734  scatter(const unsigned int *offsets, double *base_ptr) const
2735  {
2736  for (unsigned int i = 0; i < 2; ++i)
2737  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2738  }
2739 
2745  __m128d data;
2746 
2747 private:
2752  DEAL_II_ALWAYS_INLINE
2754  get_sqrt() const
2755  {
2756  VectorizedArray res;
2757  res.data = _mm_sqrt_pd(data);
2758  return res;
2759  }
2760 
2765  DEAL_II_ALWAYS_INLINE
2767  get_abs() const
2768  {
2769  // to compute the absolute value, perform
2770  // bitwise andnot with -0. This will leave all
2771  // value and exponent bits unchanged but force
2772  // the sign value to +.
2773  __m128d mask = _mm_set1_pd(-0.);
2774  VectorizedArray res;
2775  res.data = _mm_andnot_pd(mask, data);
2776  return res;
2777  }
2778 
2783  DEAL_II_ALWAYS_INLINE
2785  get_max(const VectorizedArray &other) const
2786  {
2787  VectorizedArray res;
2788  res.data = _mm_max_pd(data, other.data);
2789  return res;
2790  }
2791 
2796  DEAL_II_ALWAYS_INLINE
2798  get_min(const VectorizedArray &other) const
2799  {
2800  VectorizedArray res;
2801  res.data = _mm_min_pd(data, other.data);
2802  return res;
2803  }
2804 
2805  // Make a few functions friends.
2806  template <typename Number2, int width2>
2808  std::sqrt(const VectorizedArray<Number2, width2> &);
2809  template <typename Number2, int width2>
2811  std::abs(const VectorizedArray<Number2, width2> &);
2812  template <typename Number2, int width2>
2814  std::max(const VectorizedArray<Number2, width2> &,
2816  template <typename Number2, int width2>
2818  std::min(const VectorizedArray<Number2, width2> &,
2820 };
2821 
2822 
2823 
2827 template <>
2828 inline DEAL_II_ALWAYS_INLINE void
2829 vectorized_load_and_transpose(const unsigned int n_entries,
2830  const double * in,
2831  const unsigned int * offsets,
2833 {
2834  const unsigned int n_chunks = n_entries / 2;
2835  for (unsigned int i = 0; i < n_chunks; ++i)
2836  {
2837  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2838  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2839  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
2840  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
2841  }
2842 
2843  // remainder loop of work that does not divide by 2
2844  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2845  for (unsigned int v = 0; v < 2; ++v)
2846  out[i][v] = in[offsets[v] + i];
2847 }
2848 
2849 
2850 
2854 template <>
2855 inline DEAL_II_ALWAYS_INLINE void
2856 vectorized_transpose_and_store(const bool add_into,
2857  const unsigned int n_entries,
2858  const VectorizedArray<double, 2> *in,
2859  const unsigned int * offsets,
2860  double * out)
2861 {
2862  const unsigned int n_chunks = n_entries / 2;
2863  if (add_into)
2864  {
2865  for (unsigned int i = 0; i < n_chunks; ++i)
2866  {
2867  __m128d u0 = in[2 * i + 0].data;
2868  __m128d u1 = in[2 * i + 1].data;
2869  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2870  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2871  _mm_storeu_pd(out + 2 * i + offsets[0],
2872  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2873  res0));
2874  _mm_storeu_pd(out + 2 * i + offsets[1],
2875  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2876  res1));
2877  }
2878  // remainder loop of work that does not divide by 2
2879  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2880  for (unsigned int v = 0; v < 2; ++v)
2881  out[offsets[v] + i] += in[i][v];
2882  }
2883  else
2884  {
2885  for (unsigned int i = 0; i < n_chunks; ++i)
2886  {
2887  __m128d u0 = in[2 * i + 0].data;
2888  __m128d u1 = in[2 * i + 1].data;
2889  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2890  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2891  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2892  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2893  }
2894  // remainder loop of work that does not divide by 2
2895  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2896  for (unsigned int v = 0; v < 2; ++v)
2897  out[offsets[v] + i] = in[i][v];
2898  }
2899 }
2900 
2901 
2902 
2906 template <>
2907 class VectorizedArray<float, 4>
2908  : public VectorizedArrayBase<VectorizedArray<float, 4>>
2909 {
2910 public:
2914  using value_type = float;
2915 
2919  static const unsigned int n_array_elements = 4;
2920 
2929  VectorizedArray() = default;
2930 
2934  VectorizedArray(const float scalar)
2935  {
2936  this->operator=(scalar);
2937  }
2938 
2939  DEAL_II_ALWAYS_INLINE
2940  VectorizedArray &
2941  operator=(const float x)
2942  {
2943  data = _mm_set1_ps(x);
2944  return *this;
2945  }
2946 
2950  DEAL_II_ALWAYS_INLINE
2951  float &operator[](const unsigned int comp)
2952  {
2953  AssertIndexRange(comp, 4);
2954  return *(reinterpret_cast<float *>(&data) + comp);
2955  }
2956 
2960  DEAL_II_ALWAYS_INLINE
2961  const float &operator[](const unsigned int comp) const
2962  {
2963  AssertIndexRange(comp, 4);
2964  return *(reinterpret_cast<const float *>(&data) + comp);
2965  }
2966 
2970  DEAL_II_ALWAYS_INLINE
2971  VectorizedArray &
2972  operator+=(const VectorizedArray &vec)
2973  {
2974 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2975  data += vec.data;
2976 # else
2977  data = _mm_add_ps(data, vec.data);
2978 # endif
2979  return *this;
2980  }
2981 
2985  DEAL_II_ALWAYS_INLINE
2986  VectorizedArray &
2987  operator-=(const VectorizedArray &vec)
2988  {
2989 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2990  data -= vec.data;
2991 # else
2992  data = _mm_sub_ps(data, vec.data);
2993 # endif
2994  return *this;
2995  }
2996 
3000  DEAL_II_ALWAYS_INLINE
3001  VectorizedArray &
3002  operator*=(const VectorizedArray &vec)
3003  {
3004 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3005  data *= vec.data;
3006 # else
3007  data = _mm_mul_ps(data, vec.data);
3008 # endif
3009  return *this;
3010  }
3011 
3015  DEAL_II_ALWAYS_INLINE
3016  VectorizedArray &
3017  operator/=(const VectorizedArray &vec)
3018  {
3019 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3020  data /= vec.data;
3021 # else
3022  data = _mm_div_ps(data, vec.data);
3023 # endif
3024  return *this;
3025  }
3026 
3032  DEAL_II_ALWAYS_INLINE
3033  void
3034  load(const float *ptr)
3035  {
3036  data = _mm_loadu_ps(ptr);
3037  }
3038 
3045  DEAL_II_ALWAYS_INLINE
3046  void
3047  store(float *ptr) const
3048  {
3049  _mm_storeu_ps(ptr, data);
3050  }
3051 
3055  DEAL_II_ALWAYS_INLINE
3056  void
3057  streaming_store(float *ptr) const
3058  {
3059  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
3060  ExcMessage("Memory not aligned"));
3061  _mm_stream_ps(ptr, data);
3062  }
3063 
3076  DEAL_II_ALWAYS_INLINE
3077  void
3078  gather(const float *base_ptr, const unsigned int *offsets)
3079  {
3080  for (unsigned int i = 0; i < 4; ++i)
3081  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3082  }
3083 
3096  DEAL_II_ALWAYS_INLINE
3097  void
3098  scatter(const unsigned int *offsets, float *base_ptr) const
3099  {
3100  for (unsigned int i = 0; i < 4; ++i)
3101  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3102  }
3103 
3109  __m128 data;
3110 
3111 private:
3116  DEAL_II_ALWAYS_INLINE
3118  get_sqrt() const
3119  {
3120  VectorizedArray res;
3121  res.data = _mm_sqrt_ps(data);
3122  return res;
3123  }
3124 
3129  DEAL_II_ALWAYS_INLINE
3131  get_abs() const
3132  {
3133  // to compute the absolute value, perform bitwise andnot with -0. This
3134  // will leave all value and exponent bits unchanged but force the sign
3135  // value to +.
3136  __m128 mask = _mm_set1_ps(-0.f);
3137  VectorizedArray res;
3138  res.data = _mm_andnot_ps(mask, data);
3139  return res;
3140  }
3141 
3146  DEAL_II_ALWAYS_INLINE
3148  get_max(const VectorizedArray &other) const
3149  {
3150  VectorizedArray res;
3151  res.data = _mm_max_ps(data, other.data);
3152  return res;
3153  }
3154 
3159  DEAL_II_ALWAYS_INLINE
3161  get_min(const VectorizedArray &other) const
3162  {
3163  VectorizedArray res;
3164  res.data = _mm_min_ps(data, other.data);
3165  return res;
3166  }
3167 
3168  // Make a few functions friends.
3169  template <typename Number2, int width2>
3171  std::sqrt(const VectorizedArray<Number2, width2> &);
3172  template <typename Number2, int width2>
3174  std::abs(const VectorizedArray<Number2, width2> &);
3175  template <typename Number2, int width2>
3177  std::max(const VectorizedArray<Number2, width2> &,
3179  template <typename Number2, int width2>
3181  std::min(const VectorizedArray<Number2, width2> &,
3183 };
3184 
3185 
3186 
3190 template <>
3191 inline DEAL_II_ALWAYS_INLINE void
3192 vectorized_load_and_transpose(const unsigned int n_entries,
3193  const float * in,
3194  const unsigned int * offsets,
3196 {
3197  const unsigned int n_chunks = n_entries / 4;
3198  for (unsigned int i = 0; i < n_chunks; ++i)
3199  {
3200  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3201  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3202  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3203  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3204  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3205  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3206  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3207  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3208  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3209  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3210  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3211  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3212  }
3213 
3214  // remainder loop of work that does not divide by 4
3215  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3216  for (unsigned int v = 0; v < 4; ++v)
3217  out[i][v] = in[offsets[v] + i];
3218 }
3219 
3220 
3221 
3225 template <>
3226 inline DEAL_II_ALWAYS_INLINE void
3227 vectorized_transpose_and_store(const bool add_into,
3228  const unsigned int n_entries,
3229  const VectorizedArray<float, 4> *in,
3230  const unsigned int * offsets,
3231  float * out)
3232 {
3233  const unsigned int n_chunks = n_entries / 4;
3234  for (unsigned int i = 0; i < n_chunks; ++i)
3235  {
3236  __m128 u0 = in[4 * i + 0].data;
3237  __m128 u1 = in[4 * i + 1].data;
3238  __m128 u2 = in[4 * i + 2].data;
3239  __m128 u3 = in[4 * i + 3].data;
3240  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
3241  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
3242  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
3243  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
3244  u0 = _mm_shuffle_ps(t0, t2, 0x88);
3245  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
3246  u2 = _mm_shuffle_ps(t1, t3, 0x88);
3247  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
3248 
3249  // Cannot use the same store instructions in both paths of the 'if'
3250  // because the compiler cannot know that there is no aliasing between
3251  // pointers
3252  if (add_into)
3253  {
3254  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
3255  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3256  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
3257  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3258  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
3259  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3260  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
3261  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3262  }
3263  else
3264  {
3265  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3266  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3267  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3268  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3269  }
3270  }
3271 
3272  // remainder loop of work that does not divide by 4
3273  if (add_into)
3274  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3275  for (unsigned int v = 0; v < 4; ++v)
3276  out[offsets[v] + i] += in[i][v];
3277  else
3278  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3279  for (unsigned int v = 0; v < 4; ++v)
3280  out[offsets[v] + i] = in[i][v];
3281 }
3282 
3283 
3284 
3285 # endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
3286 
3287 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
3288  defined(__VSX__)
3289 
3290 template <>
3291 class VectorizedArray<double, 2>
3292  : public VectorizedArrayBase<VectorizedArray<double, 2>>
3293 {
3294 public:
3298  using value_type = double;
3299 
3303  static const unsigned int n_array_elements = 2;
3304 
3309  VectorizedArray() = default;
3310 
3314  VectorizedArray(const double scalar)
3315  {
3316  this->operator=(scalar);
3317  }
3318 
3322  DEAL_II_ALWAYS_INLINE
3323  VectorizedArray &
3324  operator=(const double x)
3325  {
3326  data = vec_splats(x);
3327 
3328  // Some compilers believe that vec_splats sets 'x', but that's not true.
3329  // They then warn about setting a variable and not using it. Suppress the
3330  // warning by "using" the variable:
3331  (void)x;
3332  return *this;
3333  }
3334 
3338  DEAL_II_ALWAYS_INLINE
3339  double &operator[](const unsigned int comp)
3340  {
3341  AssertIndexRange(comp, 2);
3342  return *(reinterpret_cast<double *>(&data) + comp);
3343  }
3344 
3348  DEAL_II_ALWAYS_INLINE
3349  const double &operator[](const unsigned int comp) const
3350  {
3351  AssertIndexRange(comp, 2);
3352  return *(reinterpret_cast<const double *>(&data) + comp);
3353  }
3354 
3358  DEAL_II_ALWAYS_INLINE
3359  VectorizedArray &
3360  operator+=(const VectorizedArray &vec)
3361  {
3362  data = vec_add(data, vec.data);
3363  return *this;
3364  }
3365 
3369  DEAL_II_ALWAYS_INLINE
3370  VectorizedArray &
3371  operator-=(const VectorizedArray &vec)
3372  {
3373  data = vec_sub(data, vec.data);
3374  return *this;
3375  }
3376 
3380  DEAL_II_ALWAYS_INLINE
3381  VectorizedArray &
3382  operator*=(const VectorizedArray &vec)
3383  {
3384  data = vec_mul(data, vec.data);
3385  return *this;
3386  }
3387 
3391  DEAL_II_ALWAYS_INLINE
3392  VectorizedArray &
3393  operator/=(const VectorizedArray &vec)
3394  {
3395  data = vec_div(data, vec.data);
3396  return *this;
3397  }
3398 
3403  DEAL_II_ALWAYS_INLINE
3404  void
3405  load(const double *ptr)
3406  {
3407  data = vec_vsx_ld(0, ptr);
3408  }
3409 
3414  DEAL_II_ALWAYS_INLINE
3415  void
3416  store(double *ptr) const
3417  {
3418  vec_vsx_st(data, 0, ptr);
3419  }
3420 
3423  DEAL_II_ALWAYS_INLINE
3424  void
3425  streaming_store(double *ptr) const
3426  {
3427  store(ptr);
3428  }
3429 
3432  DEAL_II_ALWAYS_INLINE
3433  void
3434  gather(const double *base_ptr, const unsigned int *offsets)
3435  {
3436  for (unsigned int i = 0; i < 2; ++i)
3437  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3438  }
3439 
3442  DEAL_II_ALWAYS_INLINE
3443  void
3444  scatter(const unsigned int *offsets, double *base_ptr) const
3445  {
3446  for (unsigned int i = 0; i < 2; ++i)
3447  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3448  }
3449 
3455  __vector double data;
3456 
3457 private:
3462  DEAL_II_ALWAYS_INLINE
3464  get_sqrt() const
3465  {
3466  VectorizedArray res;
3467  res.data = vec_sqrt(data);
3468  return res;
3469  }
3470 
3475  DEAL_II_ALWAYS_INLINE
3477  get_abs() const
3478  {
3479  VectorizedArray res;
3480  res.data = vec_abs(data);
3481  return res;
3482  }
3483 
3488  DEAL_II_ALWAYS_INLINE
3490  get_max(const VectorizedArray &other) const
3491  {
3492  VectorizedArray res;
3493  res.data = vec_max(data, other.data);
3494  return res;
3495  }
3496 
3501  DEAL_II_ALWAYS_INLINE
3503  get_min(const VectorizedArray &other) const
3504  {
3505  VectorizedArray res;
3506  res.data = vec_min(data, other.data);
3507  return res;
3508  }
3509 
3510  // Make a few functions friends.
3511  template <typename Number2, int width2>
3513  std::sqrt(const VectorizedArray<Number2, width2> &);
3514  template <typename Number2, int width2>
3516  std::abs(const VectorizedArray<Number2, width2> &);
3517  template <typename Number2, int width2>
3519  std::max(const VectorizedArray<Number2, width2> &,
3521  template <typename Number2, int width2>
3523  std::min(const VectorizedArray<Number2, width2> &,
3525 };
3526 
3527 
3528 
3529 template <>
3530 class VectorizedArray<float, 4>
3531  : public VectorizedArrayBase<VectorizedArray<float, 4>>
3532 {
3533 public:
3537  using value_type = float;
3538 
3542  static const unsigned int n_array_elements = 4;
3543 
3548  VectorizedArray() = default;
3549 
3553  VectorizedArray(const float scalar)
3554  {
3555  this->operator=(scalar);
3556  }
3557 
3561  DEAL_II_ALWAYS_INLINE
3562  VectorizedArray &
3563  operator=(const float x)
3564  {
3565  data = vec_splats(x);
3566 
3567  // Some compilers believe that vec_splats sets 'x', but that's not true.
3568  // They then warn about setting a variable and not using it. Suppress the
3569  // warning by "using" the variable:
3570  (void)x;
3571  return *this;
3572  }
3573 
3577  DEAL_II_ALWAYS_INLINE
3578  float &operator[](const unsigned int comp)
3579  {
3580  AssertIndexRange(comp, 4);
3581  return *(reinterpret_cast<float *>(&data) + comp);
3582  }
3583 
3587  DEAL_II_ALWAYS_INLINE
3588  const float &operator[](const unsigned int comp) const
3589  {
3590  AssertIndexRange(comp, 4);
3591  return *(reinterpret_cast<const float *>(&data) + comp);
3592  }
3593 
3597  DEAL_II_ALWAYS_INLINE
3598  VectorizedArray &
3599  operator+=(const VectorizedArray &vec)
3600  {
3601  data = vec_add(data, vec.data);
3602  return *this;
3603  }
3604 
3608  DEAL_II_ALWAYS_INLINE
3609  VectorizedArray &
3610  operator-=(const VectorizedArray &vec)
3611  {
3612  data = vec_sub(data, vec.data);
3613  return *this;
3614  }
3615 
3619  DEAL_II_ALWAYS_INLINE
3620  VectorizedArray &
3621  operator*=(const VectorizedArray &vec)
3622  {
3623  data = vec_mul(data, vec.data);
3624  return *this;
3625  }
3626 
3630  DEAL_II_ALWAYS_INLINE
3631  VectorizedArray &
3632  operator/=(const VectorizedArray &vec)
3633  {
3634  data = vec_div(data, vec.data);
3635  return *this;
3636  }
3637 
3642  DEAL_II_ALWAYS_INLINE
3643  void
3644  load(const float *ptr)
3645  {
3646  data = vec_vsx_ld(0, ptr);
3647  }
3648 
3653  DEAL_II_ALWAYS_INLINE
3654  void
3655  store(float *ptr) const
3656  {
3657  vec_vsx_st(data, 0, ptr);
3658  }
3659 
3662  DEAL_II_ALWAYS_INLINE
3663  void
3664  streaming_store(float *ptr) const
3665  {
3666  store(ptr);
3667  }
3668 
3671  DEAL_II_ALWAYS_INLINE
3672  void
3673  gather(const float *base_ptr, const unsigned int *offsets)
3674  {
3675  for (unsigned int i = 0; i < 4; ++i)
3676  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3677  }
3678 
3681  DEAL_II_ALWAYS_INLINE
3682  void
3683  scatter(const unsigned int *offsets, float *base_ptr) const
3684  {
3685  for (unsigned int i = 0; i < 4; ++i)
3686  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3687  }
3688 
3694  __vector float data;
3695 
3696 private:
3701  DEAL_II_ALWAYS_INLINE
3703  get_sqrt() const
3704  {
3705  VectorizedArray res;
3706  res.data = vec_sqrt(data);
3707  return res;
3708  }
3709 
3714  DEAL_II_ALWAYS_INLINE
3716  get_abs() const
3717  {
3718  VectorizedArray res;
3719  res.data = vec_abs(data);
3720  return res;
3721  }
3722 
3727  DEAL_II_ALWAYS_INLINE
3729  get_max(const VectorizedArray &other) const
3730  {
3731  VectorizedArray res;
3732  res.data = vec_max(data, other.data);
3733  return res;
3734  }
3735 
3740  DEAL_II_ALWAYS_INLINE
3742  get_min(const VectorizedArray &other) const
3743  {
3744  VectorizedArray res;
3745  res.data = vec_min(data, other.data);
3746  return res;
3747  }
3748 
3749  // Make a few functions friends.
3750  template <typename Number2, int width2>
3752  std::sqrt(const VectorizedArray<Number2, width2> &);
3753  template <typename Number2, int width2>
3755  std::abs(const VectorizedArray<Number2, width2> &);
3756  template <typename Number2, int width2>
3758  std::max(const VectorizedArray<Number2, width2> &,
3760  template <typename Number2, int width2>
3762  std::min(const VectorizedArray<Number2, width2> &,
3764 };
3765 
3766 # endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
3767  // defined(__VSX__)
3768 
3769 
3770 #endif // DOXYGEN
3771 
3776 
3782 template <typename Number, int width>
3783 inline DEAL_II_ALWAYS_INLINE bool
3785  const VectorizedArray<Number, width> &rhs)
3786 {
3787  for (unsigned int i = 0; i < VectorizedArray<Number, width>::n_array_elements;
3788  ++i)
3789  if (lhs[i] != rhs[i])
3790  return false;
3791 
3792  return true;
3793 }
3794 
3795 
3801 template <typename Number, int width>
3802 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3805 {
3807  return tmp += v;
3808 }
3809 
3815 template <typename Number, int width>
3816 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3819 {
3821  return tmp -= v;
3822 }
3823 
3829 template <typename Number, int width>
3830 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3833 {
3835  return tmp *= v;
3836 }
3837 
3843 template <typename Number, int width>
3844 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3847 {
3849  return tmp /= v;
3850 }
3851 
3858 template <typename Number, int width>
3859 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3860  operator+(const Number &u, const VectorizedArray<Number, width> &v)
3861 {
3863  return tmp += v;
3864 }
3865 
3874 template <int width>
3875 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3876  operator+(const double u, const VectorizedArray<float, width> &v)
3877 {
3879  return tmp += v;
3880 }
3881 
3888 template <typename Number, int width>
3889 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3890  operator+(const VectorizedArray<Number, width> &v, const Number &u)
3891 {
3892  return u + v;
3893 }
3894 
3903 template <int width>
3904 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3905  operator+(const VectorizedArray<float, width> &v, const double u)
3906 {
3907  return u + v;
3908 }
3909 
3916 template <typename Number, int width>
3917 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3918  operator-(const Number &u, const VectorizedArray<Number, width> &v)
3919 {
3921  return tmp -= v;
3922 }
3923 
3932 template <int width>
3933 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3934  operator-(const double u, const VectorizedArray<float, width> &v)
3935 {
3936  VectorizedArray<float, width> tmp = static_cast<float>(u);
3937  return tmp -= v;
3938 }
3939 
3946 template <typename Number, int width>
3947 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3948  operator-(const VectorizedArray<Number, width> &v, const Number &u)
3949 {
3951  return v - tmp;
3952 }
3953 
3962 template <int width>
3963 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3964  operator-(const VectorizedArray<float, width> &v, const double u)
3965 {
3966  VectorizedArray<float, width> tmp = static_cast<float>(u);
3967  return v - tmp;
3968 }
3969 
3976 template <typename Number, int width>
3977 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3978  operator*(const Number &u, const VectorizedArray<Number, width> &v)
3979 {
3981  return tmp *= v;
3982 }
3983 
3992 template <int width>
3993 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3994  operator*(const double u, const VectorizedArray<float, width> &v)
3995 {
3996  VectorizedArray<float, width> tmp = static_cast<float>(u);
3997  return tmp *= v;
3998 }
3999 
4006 template <typename Number, int width>
4007 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4008  operator*(const VectorizedArray<Number, width> &v, const Number &u)
4009 {
4010  return u * v;
4011 }
4012 
4021 template <int width>
4022 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4023  operator*(const VectorizedArray<float, width> &v, const double u)
4024 {
4025  return u * v;
4026 }
4027 
4034 template <typename Number, int width>
4035 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4036  operator/(const Number &u, const VectorizedArray<Number, width> &v)
4037 {
4039  return tmp /= v;
4040 }
4041 
4050 template <int width>
4051 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4052  operator/(const double u, const VectorizedArray<float, width> &v)
4053 {
4054  VectorizedArray<float, width> tmp = static_cast<float>(u);
4055  return tmp /= v;
4056 }
4057 
4064 template <typename Number, int width>
4065 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4066  operator/(const VectorizedArray<Number, width> &v, const Number &u)
4067 {
4069  return v / tmp;
4070 }
4071 
4080 template <int width>
4081 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
4082  operator/(const VectorizedArray<float, width> &v, const double u)
4083 {
4084  VectorizedArray<float, width> tmp = static_cast<float>(u);
4085  return v / tmp;
4086 }
4087 
4093 template <typename Number, int width>
4094 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4096 {
4097  return u;
4098 }
4099 
4105 template <typename Number, int width>
4106 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
4108 {
4109  // to get a negative sign, subtract the input from zero (could also
4110  // multiply by -1, but this one is slightly simpler)
4111  return VectorizedArray<Number, width>() - u;
4112 }
4113 
4119 template <typename Number, int width>
4120 inline std::ostream &
4121 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
4122 {
4123  constexpr unsigned int n = VectorizedArray<Number, width>::n_array_elements;
4124  for (unsigned int i = 0; i < n - 1; ++i)
4125  out << p[i] << ' ';
4126  out << p[n - 1];
4127 
4128  return out;
4129 }
4130 
4132 
4137 
4138 
4146 enum class SIMDComparison : int
4147 {
4148 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
4149  equal = _CMP_EQ_OQ,
4150  not_equal = _CMP_NEQ_OQ,
4151  less_than = _CMP_LT_OQ,
4152  less_than_or_equal = _CMP_LE_OQ,
4153  greater_than = _CMP_GT_OQ,
4154  greater_than_or_equal = _CMP_GE_OQ
4155 #else
4156  equal,
4157  not_equal,
4158  less_than,
4159  less_than_or_equal,
4160  greater_than,
4161  greater_than_or_equal
4162 #endif
4163 };
4164 
4165 
4229 template <SIMDComparison predicate, typename Number>
4230 DEAL_II_ALWAYS_INLINE inline Number
4231 compare_and_apply_mask(const Number &left,
4232  const Number &right,
4233  const Number &true_value,
4234  const Number &false_value)
4235 {
4236  bool mask;
4237  switch (predicate)
4238  {
4239  case SIMDComparison::equal:
4240  mask = (left == right);
4241  break;
4242  case SIMDComparison::not_equal:
4243  mask = (left != right);
4244  break;
4245  case SIMDComparison::less_than:
4246  mask = (left < right);
4247  break;
4248  case SIMDComparison::less_than_or_equal:
4249  mask = (left <= right);
4250  break;
4251  case SIMDComparison::greater_than:
4252  mask = (left > right);
4253  break;
4254  case SIMDComparison::greater_than_or_equal:
4255  mask = (left >= right);
4256  break;
4257  }
4258 
4259  return mask ? true_value : false_value;
4260 }
4261 
4262 
4267 template <SIMDComparison predicate, typename Number>
4268 DEAL_II_ALWAYS_INLINE inline VectorizedArray<Number, 1>
4269 compare_and_apply_mask(const VectorizedArray<Number, 1> &left,
4270  const VectorizedArray<Number, 1> &right,
4271  const VectorizedArray<Number, 1> &true_value,
4272  const VectorizedArray<Number, 1> &false_value)
4273 {
4275  result.data = compare_and_apply_mask<predicate, Number>(left.data,
4276  right.data,
4277  true_value.data,
4278  false_value.data);
4279  return result;
4280 }
4281 
4283 
4284 #ifndef DOXYGEN
4285 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
4286 
4287 template <SIMDComparison predicate>
4288 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 16>
4289 compare_and_apply_mask(const VectorizedArray<float, 16> &left,
4290  const VectorizedArray<float, 16> &right,
4291  const VectorizedArray<float, 16> &true_values,
4292  const VectorizedArray<float, 16> &false_values)
4293 {
4294  const __mmask16 mask =
4295  _mm512_cmp_ps_mask(left.data, right.data, static_cast<int>(predicate));
4297  result.data = _mm512_mask_mov_ps(false_values.data, mask, true_values.data);
4298  return result;
4299 }
4300 
4301 
4302 
4303 template <SIMDComparison predicate>
4304 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 8>
4305 compare_and_apply_mask(const VectorizedArray<double, 8> &left,
4306  const VectorizedArray<double, 8> &right,
4307  const VectorizedArray<double, 8> &true_values,
4308  const VectorizedArray<double, 8> &false_values)
4309 {
4310  const __mmask16 mask =
4311  _mm512_cmp_pd_mask(left.data, right.data, static_cast<int>(predicate));
4313  result.data = _mm512_mask_mov_pd(false_values.data, mask, true_values.data);
4314  return result;
4315 }
4316 
4317 # endif
4318 
4319 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
4320 
4321 template <SIMDComparison predicate>
4322 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 8>
4323 compare_and_apply_mask(const VectorizedArray<float, 8> &left,
4324  const VectorizedArray<float, 8> &right,
4325  const VectorizedArray<float, 8> &true_values,
4326  const VectorizedArray<float, 8> &false_values)
4327 {
4328  const auto mask =
4329  _mm256_cmp_ps(left.data, right.data, static_cast<int>(predicate));
4330 
4332  result.data = _mm256_or_ps(_mm256_and_ps(mask, true_values.data),
4333  _mm256_andnot_ps(mask, false_values.data));
4334  return result;
4335 }
4336 
4337 
4338 template <SIMDComparison predicate>
4339 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 4>
4340 compare_and_apply_mask(const VectorizedArray<double, 4> &left,
4341  const VectorizedArray<double, 4> &right,
4342  const VectorizedArray<double, 4> &true_values,
4343  const VectorizedArray<double, 4> &false_values)
4344 {
4345  const auto mask =
4346  _mm256_cmp_pd(left.data, right.data, static_cast<int>(predicate));
4347 
4349  result.data = _mm256_or_pd(_mm256_and_pd(mask, true_values.data),
4350  _mm256_andnot_pd(mask, false_values.data));
4351  return result;
4352 }
4353 
4354 # endif
4355 
4356 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
4357 
4358 template <SIMDComparison predicate>
4359 DEAL_II_ALWAYS_INLINE inline VectorizedArray<float, 4>
4360 compare_and_apply_mask(const VectorizedArray<float, 4> &left,
4361  const VectorizedArray<float, 4> &right,
4362  const VectorizedArray<float, 4> &true_values,
4363  const VectorizedArray<float, 4> &false_values)
4364 {
4365  __m128 mask;
4366  switch (predicate)
4367  {
4368  case SIMDComparison::equal:
4369  mask = _mm_cmpeq_ps(left.data, right.data);
4370  break;
4371  case SIMDComparison::not_equal:
4372  mask = _mm_cmpneq_ps(left.data, right.data);
4373  break;
4374  case SIMDComparison::less_than:
4375  mask = _mm_cmplt_ps(left.data, right.data);
4376  break;
4377  case SIMDComparison::less_than_or_equal:
4378  mask = _mm_cmple_ps(left.data, right.data);
4379  break;
4380  case SIMDComparison::greater_than:
4381  mask = _mm_cmpgt_ps(left.data, right.data);
4382  break;
4383  case SIMDComparison::greater_than_or_equal:
4384  mask = _mm_cmpge_ps(left.data, right.data);
4385  break;
4386  }
4387 
4389  result.data = _mm_or_ps(_mm_and_ps(mask, true_values.data),
4390  _mm_andnot_ps(mask, false_values.data));
4391 
4392  return result;
4393 }
4394 
4395 
4396 template <SIMDComparison predicate>
4397 DEAL_II_ALWAYS_INLINE inline VectorizedArray<double, 2>
4398 compare_and_apply_mask(const VectorizedArray<double, 2> &left,
4399  const VectorizedArray<double, 2> &right,
4400  const VectorizedArray<double, 2> &true_values,
4401  const VectorizedArray<double, 2> &false_values)
4402 {
4403  __m128d mask;
4404  switch (predicate)
4405  {
4406  case SIMDComparison::equal:
4407  mask = _mm_cmpeq_pd(left.data, right.data);
4408  break;
4409  case SIMDComparison::not_equal:
4410  mask = _mm_cmpneq_pd(left.data, right.data);
4411  break;
4412  case SIMDComparison::less_than:
4413  mask = _mm_cmplt_pd(left.data, right.data);
4414  break;
4415  case SIMDComparison::less_than_or_equal:
4416  mask = _mm_cmple_pd(left.data, right.data);
4417  break;
4418  case SIMDComparison::greater_than:
4419  mask = _mm_cmpgt_pd(left.data, right.data);
4420  break;
4421  case SIMDComparison::greater_than_or_equal:
4422  mask = _mm_cmpge_pd(left.data, right.data);
4423  break;
4424  }
4425 
4427  result.data = _mm_or_pd(_mm_and_pd(mask, true_values.data),
4428  _mm_andnot_pd(mask, false_values.data));
4429 
4430  return result;
4431 }
4432 
4433 # endif
4434 #endif // DOXYGEN
4435 
4436 
4437 DEAL_II_NAMESPACE_CLOSE
4438 
4445 namespace std
4446 {
4454  template <typename Number, int width>
4455  inline ::VectorizedArray<Number, width>
4456  sin(const ::VectorizedArray<Number, width> &x)
4457  {
4458  // put values in an array and later read in that array with an unaligned
4459  // read. This should save some instructions as compared to directly
4460  // setting the individual elements and also circumvents a compiler
4461  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
4462  // from April 2014, topic "matrix_free/step-48 Test").
4464  for (unsigned int i = 0;
4465  i < ::VectorizedArray<Number, width>::n_array_elements;
4466  ++i)
4467  values[i] = std::sin(x[i]);
4469  out.load(&values[0]);
4470  return out;
4471  }
4472 
4473 
4474 
4482  template <typename Number, int width>
4483  inline ::VectorizedArray<Number, width>
4484  cos(const ::VectorizedArray<Number, width> &x)
4485  {
4487  for (unsigned int i = 0;
4488  i < ::VectorizedArray<Number, width>::n_array_elements;
4489  ++i)
4490  values[i] = std::cos(x[i]);
4492  out.load(&values[0]);
4493  return out;
4494  }
4495 
4496 
4497 
4505  template <typename Number, int width>
4506  inline ::VectorizedArray<Number, width>
4507  tan(const ::VectorizedArray<Number, width> &x)
4508  {
4510  for (unsigned int i = 0;
4511  i < ::VectorizedArray<Number, width>::n_array_elements;
4512  ++i)
4513  values[i] = std::tan(x[i]);
4515  out.load(&values[0]);
4516  return out;
4517  }
4518 
4519 
4520 
4528  template <typename Number, int width>
4529  inline ::VectorizedArray<Number, width>
4530  exp(const ::VectorizedArray<Number, width> &x)
4531  {
4533  for (unsigned int i = 0;
4534  i < ::VectorizedArray<Number, width>::n_array_elements;
4535  ++i)
4536  values[i] = std::exp(x[i]);
4538  out.load(&values[0]);
4539  return out;
4540  }
4541 
4542 
4543 
4551  template <typename Number, int width>
4552  inline ::VectorizedArray<Number, width>
4553  log(const ::VectorizedArray<Number, width> &x)
4554  {
4556  for (unsigned int i = 0;
4557  i < ::VectorizedArray<Number, width>::n_array_elements;
4558  ++i)
4559  values[i] = std::log(x[i]);
4561  out.load(&values[0]);
4562  return out;
4563  }
4564 
4565 
4566 
4574  template <typename Number, int width>
4575  inline ::VectorizedArray<Number, width>
4576  sqrt(const ::VectorizedArray<Number, width> &x)
4577  {
4578  return x.get_sqrt();
4579  }
4580 
4581 
4582 
4590  template <typename Number, int width>
4591  inline ::VectorizedArray<Number, width>
4592  pow(const ::VectorizedArray<Number, width> &x, const Number p)
4593  {
4595  for (unsigned int i = 0;
4596  i < ::VectorizedArray<Number, width>::n_array_elements;
4597  ++i)
4598  values[i] = std::pow(x[i], p);
4600  out.load(&values[0]);
4601  return out;
4602  }
4603 
4604 
4605 
4613  template <typename Number, int width>
4614  inline ::VectorizedArray<Number, width>
4615  abs(const ::VectorizedArray<Number, width> &x)
4616  {
4617  return x.get_abs();
4618  }
4619 
4620 
4621 
4629  template <typename Number, int width>
4630  inline ::VectorizedArray<Number, width>
4631  max(const ::VectorizedArray<Number, width> &x,
4632  const ::VectorizedArray<Number, width> &y)
4633  {
4634  return x.get_max(y);
4635  }
4636 
4637 
4638 
4646  template <typename Number, int width>
4647  inline ::VectorizedArray<Number, width>
4648  min(const ::VectorizedArray<Number, width> &x,
4649  const ::VectorizedArray<Number, width> &y)
4650  {
4651  return x.get_min(y);
4652  }
4653 
4654 } // namespace std
4655 
4656 #endif
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_sqrt() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArrayIterator< T > begin()
void streaming_store(Number *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
std::enable_if<!std::is_same< U, const U >::value, typename T::value_type >::type & operator*()
__global__ void vec_add(Number *val, const Number a, const size_type N)
__global__ void gather(Number *val, const IndexType *indices, const Number *v, const IndexType N)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(Number *ptr) const
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
static constexpr unsigned int size()
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
static ::ExceptionBase & ExcMessage(std::string arg1)
VectorizedArrayIterator< const T > end() const
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
#define Assert(cond, exc)
Definition: exceptions.h:1411
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
void load(const Number *ptr)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
bool operator!=(const VectorizedArrayIterator< T > &other) const
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArrayIterator< T > end()
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArrayIterator(T &data, const unsigned int lane)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArrayIterator< const T > begin() const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Number & operator[](const unsigned int comp)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray(const Number scalar)
VectorizedArrayIterator< T > & operator++()
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)