Reference documentation for deal.II version Git b431077572 2019-08-18 09:09:48 -0400
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/exceptions.h>
23 #include <deal.II/base/template_constraints.h>
24 
25 #include <cmath>
26 
27 // Note:
28 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
29 // according to the following scheme (on x86-based architectures)
30 // #ifdef __AVX512F__
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
32 // #elif defined (__AVX__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
34 // #elif defined (__SSE2__)
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
36 // #else
37 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
38 // #endif
39 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
40 // 'check_01_cpu_features.cmake', ensures that these feature are not only
41 // present in the compilation unit but also working properly.
42 
43 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
44 
45 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__SSE2__) && \
46  !defined(__AVX__)
47 # error \
48  "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
49 # endif
50 # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__SSE2__) && \
51  !defined(__AVX512F__)
52 # error \
53  "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
54 # endif
55 
56 # if defined(_MSC_VER)
57 # include <intrin.h>
58 # elif defined(__ALTIVEC__)
59 # include <altivec.h>
60 
61 // altivec.h defines vector, pixel, bool, but we do not use them, so undefine
62 // them before they make trouble
63 # undef vector
64 # undef pixel
65 # undef bool
66 # else
67 # include <x86intrin.h>
68 # endif
69 
70 #endif
71 
72 
73 DEAL_II_NAMESPACE_OPEN
74 
75 
76 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
77 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
78 
79 template <typename Number, int width>
80 struct EnableIfScalar<VectorizedArray<Number, width>>
81 {
83 };
84 
85 
86 
172 template <typename Number, int width>
174 {
175 public:
179  using value_type = Number;
180 
186  static const unsigned int n_array_elements = 1;
187 
188  static_assert(width == n_array_elements,
189  "You specified an illegal width that is not supported.");
190 
191  // POD means that there should be no user-defined constructors, destructors
192  // and copy functions (the standard is somewhat relaxed in C++2011, though).
193 
198  VectorizedArray() = default;
199 
203  VectorizedArray(const Number scalar)
204  {
205  this->operator=(scalar);
206  }
207 
211  DEAL_II_ALWAYS_INLINE
213  operator=(const Number scalar)
214  {
215  data = scalar;
216  return *this;
217  }
218 
223  DEAL_II_ALWAYS_INLINE
224  Number &operator[](const unsigned int comp)
225  {
226  (void)comp;
227  AssertIndexRange(comp, 1);
228  return data;
229  }
230 
235  DEAL_II_ALWAYS_INLINE
236  const Number &operator[](const unsigned int comp) const
237  {
238  (void)comp;
239  AssertIndexRange(comp, 1);
240  return data;
241  }
242 
246  DEAL_II_ALWAYS_INLINE
249  {
250  data += vec.data;
251  return *this;
252  }
253 
257  DEAL_II_ALWAYS_INLINE
260  {
261  data -= vec.data;
262  return *this;
263  }
264 
268  DEAL_II_ALWAYS_INLINE
271  {
272  data *= vec.data;
273  return *this;
274  }
275 
279  DEAL_II_ALWAYS_INLINE
282  {
283  data /= vec.data;
284  return *this;
285  }
286 
293  DEAL_II_ALWAYS_INLINE
294  void
295  load(const Number *ptr)
296  {
297  data = *ptr;
298  }
299 
306  DEAL_II_ALWAYS_INLINE
307  void
308  store(Number *ptr) const
309  {
310  *ptr = data;
311  }
312 
359  DEAL_II_ALWAYS_INLINE
360  void
361  streaming_store(Number *ptr) const
362  {
363  *ptr = data;
364  }
365 
378  DEAL_II_ALWAYS_INLINE
379  void
380  gather(const Number *base_ptr, const unsigned int *offsets)
381  {
382  data = base_ptr[offsets[0]];
383  }
384 
397  DEAL_II_ALWAYS_INLINE
398  void
399  scatter(const unsigned int *offsets, Number *base_ptr) const
400  {
401  base_ptr[offsets[0]] = data;
402  }
403 
408  Number data;
409 
410 private:
415  DEAL_II_ALWAYS_INLINE
417  get_sqrt() const
418  {
419  VectorizedArray res;
420  res.data = std::sqrt(data);
421  return res;
422  }
423 
428  DEAL_II_ALWAYS_INLINE
430  get_abs() const
431  {
432  VectorizedArray res;
433  res.data = std::fabs(data);
434  return res;
435  }
436 
441  DEAL_II_ALWAYS_INLINE
443  get_max(const VectorizedArray &other) const
444  {
445  VectorizedArray res;
446  res.data = std::max(data, other.data);
447  return res;
448  }
449 
454  DEAL_II_ALWAYS_INLINE
456  get_min(const VectorizedArray &other) const
457  {
458  VectorizedArray res;
459  res.data = std::min(data, other.data);
460  return res;
461  }
462 
463  // Make a few functions friends.
464  template <typename Number2, int width2>
466  std::sqrt(const VectorizedArray<Number2, width2> &);
467  template <typename Number2, int width2>
469  std::abs(const VectorizedArray<Number2, width2> &);
470  template <typename Number2, int width2>
472  std::max(const VectorizedArray<Number2, width2> &,
474  template <typename Number2, int width2>
476  std::min(const VectorizedArray<Number2, width2> &,
478 };
479 
480 
481 
482 // We need to have a separate declaration for static const members
483 template <typename Number, int width>
485 
486 
487 
494 template <
495  typename Number,
497 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
498  make_vectorized_array(const Number &u)
499 {
501  return result;
502 }
503 
504 
505 
512 template <typename VectorizedArrayType>
513 inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
514  make_vectorized_array(const typename VectorizedArrayType::value_type &u)
515 {
516  static_assert(
517  std::is_same<VectorizedArrayType,
518  VectorizedArray<typename VectorizedArrayType::value_type,
519  VectorizedArrayType::n_array_elements>>::value,
520  "VectorizedArrayType is not a VectorizedArray.");
521 
522  VectorizedArrayType result = u;
523  return result;
524 }
525 
526 
527 
553 template <typename Number, int width>
554 inline DEAL_II_ALWAYS_INLINE void
555 vectorized_load_and_transpose(const unsigned int n_entries,
556  const Number * in,
557  const unsigned int * offsets,
559 {
560  for (unsigned int i = 0; i < n_entries; ++i)
561  for (unsigned int v = 0;
562  v < VectorizedArray<Number, width>::n_array_elements;
563  ++v)
564  out[i][v] = in[offsets[v] + i];
565 }
566 
567 
568 
607 template <typename Number, int width>
608 inline DEAL_II_ALWAYS_INLINE void
609 vectorized_transpose_and_store(const bool add_into,
610  const unsigned int n_entries,
612  const unsigned int * offsets,
613  Number * out)
614 {
615  if (add_into)
616  for (unsigned int i = 0; i < n_entries; ++i)
617  for (unsigned int v = 0;
618  v < VectorizedArray<Number, width>::n_array_elements;
619  ++v)
620  out[offsets[v] + i] += in[i][v];
621  else
622  for (unsigned int i = 0; i < n_entries; ++i)
623  for (unsigned int v = 0;
624  v < VectorizedArray<Number, width>::n_array_elements;
625  ++v)
626  out[offsets[v] + i] = in[i][v];
627 }
628 
629 
630 // for safety, also check that __AVX512F__ is defined in case the user manually
631 // set some conflicting compile flags which prevent compilation
632 
633 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
634 
638 template <>
639 class VectorizedArray<double, 8>
640 {
641 public:
645  using value_type = double;
646 
650  static const unsigned int n_array_elements = 8;
651 
656  VectorizedArray() = default;
657 
661  VectorizedArray(const double scalar)
662  {
663  this->operator=(scalar);
664  }
665 
669  DEAL_II_ALWAYS_INLINE
671  operator=(const double x)
672  {
673  data = _mm512_set1_pd(x);
674  return *this;
675  }
676 
680  DEAL_II_ALWAYS_INLINE
681  double &operator[](const unsigned int comp)
682  {
683  AssertIndexRange(comp, 8);
684  return *(reinterpret_cast<double *>(&data) + comp);
685  }
686 
690  DEAL_II_ALWAYS_INLINE
691  const double &operator[](const unsigned int comp) const
692  {
693  AssertIndexRange(comp, 8);
694  return *(reinterpret_cast<const double *>(&data) + comp);
695  }
696 
700  DEAL_II_ALWAYS_INLINE
702  operator+=(const VectorizedArray &vec)
703  {
704  // if the compiler supports vector arithmetics, we can simply use +=
705  // operator on the given data type. this allows the compiler to combine
706  // additions with multiplication (fused multiply-add) if those
707  // instructions are available. Otherwise, we need to use the built-in
708  // intrinsic command for __m512d
709 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
710  data += vec.data;
711 # else
712  data = _mm512_add_pd(data, vec.data);
713 # endif
714  return *this;
715  }
716 
720  DEAL_II_ALWAYS_INLINE
722  operator-=(const VectorizedArray &vec)
723  {
724 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
725  data -= vec.data;
726 # else
727  data = _mm512_sub_pd(data, vec.data);
728 # endif
729  return *this;
730  }
734  DEAL_II_ALWAYS_INLINE
736  operator*=(const VectorizedArray &vec)
737  {
738 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
739  data *= vec.data;
740 # else
741  data = _mm512_mul_pd(data, vec.data);
742 # endif
743  return *this;
744  }
745 
749  DEAL_II_ALWAYS_INLINE
751  operator/=(const VectorizedArray &vec)
752  {
753 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
754  data /= vec.data;
755 # else
756  data = _mm512_div_pd(data, vec.data);
757 # endif
758  return *this;
759  }
760 
766  DEAL_II_ALWAYS_INLINE
767  void
768  load(const double *ptr)
769  {
770  data = _mm512_loadu_pd(ptr);
771  }
772 
779  DEAL_II_ALWAYS_INLINE
780  void
781  store(double *ptr) const
782  {
783  _mm512_storeu_pd(ptr, data);
784  }
785 
789  DEAL_II_ALWAYS_INLINE
790  void
791  streaming_store(double *ptr) const
792  {
793  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
794  ExcMessage("Memory not aligned"));
795  _mm512_stream_pd(ptr, data);
796  }
797 
810  DEAL_II_ALWAYS_INLINE
811  void
812  gather(const double *base_ptr, const unsigned int *offsets)
813  {
814  // unfortunately, there does not appear to be a 256 bit integer load, so
815  // do it by some reinterpret casts here. this is allowed because the Intel
816  // API allows aliasing between different vector types.
817  const __m256 index_val =
818  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
819  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
820  data = _mm512_i32gather_pd(index, base_ptr, 8);
821  }
822 
835  DEAL_II_ALWAYS_INLINE
836  void
837  scatter(const unsigned int *offsets, double *base_ptr) const
838  {
839  for (unsigned int i = 0; i < 8; ++i)
840  for (unsigned int j = i + 1; j < 8; ++j)
841  Assert(offsets[i] != offsets[j],
842  ExcMessage("Result of scatter undefined if two offset elements"
843  " point to the same position"));
844 
845  // unfortunately, there does not appear to be a 256 bit integer load, so
846  // do it by some reinterpret casts here. this is allowed because the Intel
847  // API allows aliasing between different vector types.
848  const __m256 index_val =
849  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
850  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
851  _mm512_i32scatter_pd(base_ptr, index, data, 8);
852  }
853 
858  __m512d data;
859 
860 private:
865  DEAL_II_ALWAYS_INLINE
867  get_sqrt() const
868  {
869  VectorizedArray res;
870  res.data = _mm512_sqrt_pd(data);
871  return res;
872  }
873 
878  DEAL_II_ALWAYS_INLINE
880  get_abs() const
881  {
882  // to compute the absolute value, perform bitwise andnot with -0. This
883  // will leave all value and exponent bits unchanged but force the sign
884  // value to +. Since there is no andnot for AVX512, we interpret the data
885  // as 64 bit integers and do the andnot on those types (note that andnot
886  // is a bitwise operation so the data type does not matter)
887  __m512d mask = _mm512_set1_pd(-0.);
888  VectorizedArray res;
889  res.data = reinterpret_cast<__m512d>(
890  _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
891  reinterpret_cast<__m512i>(data)));
892  return res;
893  }
894 
899  DEAL_II_ALWAYS_INLINE
901  get_max(const VectorizedArray &other) const
902  {
903  VectorizedArray res;
904  res.data = _mm512_max_pd(data, other.data);
905  return res;
906  }
907 
912  DEAL_II_ALWAYS_INLINE
914  get_min(const VectorizedArray &other) const
915  {
916  VectorizedArray res;
917  res.data = _mm512_min_pd(data, other.data);
918  return res;
919  }
920 
921  // Make a few functions friends.
922  template <typename Number2, int width2>
924  std::sqrt(const VectorizedArray<Number2, width2> &);
925  template <typename Number2, int width2>
927  std::abs(const VectorizedArray<Number2, width2> &);
928  template <typename Number2, int width2>
930  std::max(const VectorizedArray<Number2, width2> &,
932  template <typename Number2, int width2>
934  std::min(const VectorizedArray<Number2, width2> &,
936 };
937 
938 
939 
943 template <>
944 inline DEAL_II_ALWAYS_INLINE void
945 vectorized_load_and_transpose(const unsigned int n_entries,
946  const double * in,
947  const unsigned int * offsets,
949 {
950  // do not do full transpose because the code is long and will most
951  // likely not pay off because many processors have two load units
952  // (for the top 8 instructions) but only 1 permute unit (for the 8
953  // shuffle/unpack instructions). rather start the transposition on the
954  // vectorized array of half the size with 256 bits
955  const unsigned int n_chunks = n_entries / 4;
956  for (unsigned int i = 0; i < n_chunks; ++i)
957  {
958  __m512d t0, t1, t2, t3 = {};
959 
960  t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
961  t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
962  t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
963  t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
964  t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
965  t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
966  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
967  t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
968 
969  __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
970  __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
971  __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
972  __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
973  out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
974  out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
975  out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
976  out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
977  }
978  // remainder loop of work that does not divide by 4
979  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
980  out[i].gather(in + i, offsets);
981 }
982 
983 
984 
988 template <>
989 inline DEAL_II_ALWAYS_INLINE void
990 vectorized_transpose_and_store(const bool add_into,
991  const unsigned int n_entries,
992  const VectorizedArray<double, 8> *in,
993  const unsigned int * offsets,
994  double * out)
995 {
996  // as for the load, we split the store operations into 256 bit units to
997  // better balance between code size, shuffle instructions, and stores
998  const unsigned int n_chunks = n_entries / 4;
999  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1000  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1001  for (unsigned int i = 0; i < n_chunks; ++i)
1002  {
1003  __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1004  __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1005  __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1006  __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1007  __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1008  __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1009  __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1010  __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1011  __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
1012  __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
1013  __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1014  __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1015  __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
1016  __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
1017  __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1018  __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1019 
1020  // Cannot use the same store instructions in both paths of the 'if'
1021  // because the compiler cannot know that there is no aliasing
1022  // between pointers
1023  if (add_into)
1024  {
1025  res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1026  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1027  res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1028  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1029  res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1030  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1031  res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1032  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1033  res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1034  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1035  res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1036  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1037  res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1038  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1039  res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1040  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1041  }
1042  else
1043  {
1044  _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1045  _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1046  _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1047  _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1048  _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1049  _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1050  _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1051  _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1052  }
1053  }
1054 
1055  // remainder loop of work that does not divide by 4
1056  if (add_into)
1057  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1058  for (unsigned int v = 0; v < 8; ++v)
1059  out[offsets[v] + i] += in[i][v];
1060  else
1061  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1062  for (unsigned int v = 0; v < 8; ++v)
1063  out[offsets[v] + i] = in[i][v];
1064 }
1065 
1066 
1067 
1071 template <>
1072 class VectorizedArray<float, 16>
1073 {
1074 public:
1078  using value_type = float;
1079 
1083  static const unsigned int n_array_elements = 16;
1084 
1089  VectorizedArray() = default;
1090 
1094  VectorizedArray(const float scalar)
1095  {
1096  this->operator=(scalar);
1097  }
1098 
1102  DEAL_II_ALWAYS_INLINE
1103  VectorizedArray &
1104  operator=(const float x)
1105  {
1106  data = _mm512_set1_ps(x);
1107  return *this;
1108  }
1109 
1113  DEAL_II_ALWAYS_INLINE
1114  float &operator[](const unsigned int comp)
1115  {
1116  AssertIndexRange(comp, 16);
1117  return *(reinterpret_cast<float *>(&data) + comp);
1118  }
1119 
1123  DEAL_II_ALWAYS_INLINE
1124  const float &operator[](const unsigned int comp) const
1125  {
1126  AssertIndexRange(comp, 16);
1127  return *(reinterpret_cast<const float *>(&data) + comp);
1128  }
1129 
1133  DEAL_II_ALWAYS_INLINE
1134  VectorizedArray &
1135  operator+=(const VectorizedArray &vec)
1136  {
1137  // if the compiler supports vector arithmetics, we can simply use +=
1138  // operator on the given data type. this allows the compiler to combine
1139  // additions with multiplication (fused multiply-add) if those
1140  // instructions are available. Otherwise, we need to use the built-in
1141  // intrinsic command for __m512d
1142 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1143  data += vec.data;
1144 # else
1145  data = _mm512_add_ps(data, vec.data);
1146 # endif
1147  return *this;
1148  }
1149 
1153  DEAL_II_ALWAYS_INLINE
1154  VectorizedArray &
1155  operator-=(const VectorizedArray &vec)
1156  {
1157 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1158  data -= vec.data;
1159 # else
1160  data = _mm512_sub_ps(data, vec.data);
1161 # endif
1162  return *this;
1163  }
1167  DEAL_II_ALWAYS_INLINE
1168  VectorizedArray &
1169  operator*=(const VectorizedArray &vec)
1170  {
1171 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1172  data *= vec.data;
1173 # else
1174  data = _mm512_mul_ps(data, vec.data);
1175 # endif
1176  return *this;
1177  }
1178 
1182  DEAL_II_ALWAYS_INLINE
1183  VectorizedArray &
1184  operator/=(const VectorizedArray &vec)
1185  {
1186 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1187  data /= vec.data;
1188 # else
1189  data = _mm512_div_ps(data, vec.data);
1190 # endif
1191  return *this;
1192  }
1193 
1199  DEAL_II_ALWAYS_INLINE
1200  void
1201  load(const float *ptr)
1202  {
1203  data = _mm512_loadu_ps(ptr);
1204  }
1205 
1212  DEAL_II_ALWAYS_INLINE
1213  void
1214  store(float *ptr) const
1215  {
1216  _mm512_storeu_ps(ptr, data);
1217  }
1218 
1222  DEAL_II_ALWAYS_INLINE
1223  void
1224  streaming_store(float *ptr) const
1225  {
1226  Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
1227  ExcMessage("Memory not aligned"));
1228  _mm512_stream_ps(ptr, data);
1229  }
1230 
1243  DEAL_II_ALWAYS_INLINE
1244  void
1245  gather(const float *base_ptr, const unsigned int *offsets)
1246  {
1247  // unfortunately, there does not appear to be a 512 bit integer load, so
1248  // do it by some reinterpret casts here. this is allowed because the Intel
1249  // API allows aliasing between different vector types.
1250  const __m512 index_val =
1251  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1252  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1253  data = _mm512_i32gather_ps(index, base_ptr, 4);
1254  }
1255 
1268  DEAL_II_ALWAYS_INLINE
1269  void
1270  scatter(const unsigned int *offsets, float *base_ptr) const
1271  {
1272  for (unsigned int i = 0; i < 16; ++i)
1273  for (unsigned int j = i + 1; j < 16; ++j)
1274  Assert(offsets[i] != offsets[j],
1275  ExcMessage("Result of scatter undefined if two offset elements"
1276  " point to the same position"));
1277 
1278  // unfortunately, there does not appear to be a 512 bit integer load, so
1279  // do it by some reinterpret casts here. this is allowed because the Intel
1280  // API allows aliasing between different vector types.
1281  const __m512 index_val =
1282  _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
1283  const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
1284  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1285  }
1286 
1291  __m512 data;
1292 
1293 private:
1298  DEAL_II_ALWAYS_INLINE
1300  get_sqrt() const
1301  {
1302  VectorizedArray res;
1303  res.data = _mm512_sqrt_ps(data);
1304  return res;
1305  }
1306 
1311  DEAL_II_ALWAYS_INLINE
1313  get_abs() const
1314  {
1315  // to compute the absolute value, perform bitwise andnot with -0. This
1316  // will leave all value and exponent bits unchanged but force the sign
1317  // value to +. Since there is no andnot for AVX512, we interpret the data
1318  // as 32 bit integers and do the andnot on those types (note that andnot
1319  // is a bitwise operation so the data type does not matter)
1320  __m512 mask = _mm512_set1_ps(-0.f);
1321  VectorizedArray res;
1322  res.data = reinterpret_cast<__m512>(
1323  _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
1324  reinterpret_cast<__m512i>(data)));
1325  return res;
1326  }
1327 
1332  DEAL_II_ALWAYS_INLINE
1334  get_max(const VectorizedArray &other) const
1335  {
1336  VectorizedArray res;
1337  res.data = _mm512_max_ps(data, other.data);
1338  return res;
1339  }
1340 
1345  DEAL_II_ALWAYS_INLINE
1347  get_min(const VectorizedArray &other) const
1348  {
1349  VectorizedArray res;
1350  res.data = _mm512_min_ps(data, other.data);
1351  return res;
1352  }
1353 
1354  // Make a few functions friends.
1355  template <typename Number2, int width2>
1357  std::sqrt(const VectorizedArray<Number2, width2> &);
1358  template <typename Number2, int width2>
1360  std::abs(const VectorizedArray<Number2, width2> &);
1361  template <typename Number2, int width2>
1363  std::max(const VectorizedArray<Number2, width2> &,
1365  template <typename Number2, int width2>
1367  std::min(const VectorizedArray<Number2, width2> &,
1369 };
1370 
1371 
1372 
1376 template <>
1377 inline DEAL_II_ALWAYS_INLINE void
1378 vectorized_load_and_transpose(const unsigned int n_entries,
1379  const float * in,
1380  const unsigned int * offsets,
1382 {
1383  // Similar to the double case, we perform the work on smaller entities. In
1384  // this case, we start from 128 bit arrays and insert them into a full 512
1385  // bit index. This reduces the code size and register pressure because we do
1386  // shuffles on 4 numbers rather than 16.
1387  const unsigned int n_chunks = n_entries / 4;
1388 
1389  // To avoid warnings about uninitialized variables, need to initialize one
1390  // variable to a pre-exisiting value in out, which will never get used in
1391  // the end. Keep the initialization outside the loop because of a bug in
1392  // gcc-9 which generates a "vmovapd" instruction instead of "vmovupd" in
1393  // case t3 is initialized to zero (inside/outside of loop), see
1394  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
1395  __m512 t0, t1, t2, t3;
1396  if (n_chunks > 0)
1397  t3 = out[0].data;
1398  for (unsigned int i = 0; i < n_chunks; ++i)
1399  {
1400  t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1401  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1402  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1403  t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1404  t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1405  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1406  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1407  t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1408  t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1409  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1410  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1411  t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1412  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1413  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1414  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1415  t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1416 
1417  __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1418  __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1419  __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1420  __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1421 
1422  out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
1423  out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
1424  out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
1425  out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
1426  }
1427 
1428  // remainder loop of work that does not divide by 4
1429  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1430  for (unsigned int v = 0; v < 8; ++v)
1431  out[i].gather(in + i, offsets);
1432 }
1433 
1434 
1435 
1439 template <>
1440 inline DEAL_II_ALWAYS_INLINE void
1441 vectorized_transpose_and_store(const bool add_into,
1442  const unsigned int n_entries,
1443  const VectorizedArray<float, 16> *in,
1444  const unsigned int * offsets,
1445  float * out)
1446 {
1447  const unsigned int n_chunks = n_entries / 4;
1448  for (unsigned int i = 0; i < n_chunks; ++i)
1449  {
1450  __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
1451  __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
1452  __m512 t2 =
1453  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
1454  __m512 t3 =
1455  _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
1456  __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
1457  __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
1458  __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
1459  __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
1460 
1461  __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
1462  __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
1463  __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
1464  __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
1465  __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
1466  __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
1467  __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
1468  __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
1469  __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
1470  __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
1471  __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
1472  __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
1473  __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
1474  __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
1475  __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
1476  __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
1477 
1478  // Cannot use the same store instructions in both paths of the 'if'
1479  // because the compiler cannot know that there is no aliasing between
1480  // pointers
1481  if (add_into)
1482  {
1483  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
1484  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1485  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
1486  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1487  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
1488  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1489  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
1490  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1491  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
1492  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1493  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
1494  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1495  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
1496  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1497  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
1498  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1499  res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
1500  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1501  res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
1502  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1503  res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
1504  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1505  res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
1506  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1507  res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
1508  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1509  res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
1510  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1511  res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
1512  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1513  res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
1514  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1515  }
1516  else
1517  {
1518  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
1519  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
1520  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
1521  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
1522  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
1523  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
1524  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
1525  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
1526  _mm_storeu_ps(out + 4 * i + offsets[8], res8);
1527  _mm_storeu_ps(out + 4 * i + offsets[9], res9);
1528  _mm_storeu_ps(out + 4 * i + offsets[10], res10);
1529  _mm_storeu_ps(out + 4 * i + offsets[11], res11);
1530  _mm_storeu_ps(out + 4 * i + offsets[12], res12);
1531  _mm_storeu_ps(out + 4 * i + offsets[13], res13);
1532  _mm_storeu_ps(out + 4 * i + offsets[14], res14);
1533  _mm_storeu_ps(out + 4 * i + offsets[15], res15);
1534  }
1535  }
1536 
1537  // remainder loop of work that does not divide by 4
1538  if (add_into)
1539  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1540  for (unsigned int v = 0; v < 16; ++v)
1541  out[offsets[v] + i] += in[i][v];
1542  else
1543  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1544  for (unsigned int v = 0; v < 16; ++v)
1545  out[offsets[v] + i] = in[i][v];
1546 }
1547 
1548 #endif
1549 
1550 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1551 
1555 template <>
1556 class VectorizedArray<double, 4>
1557 {
1558 public:
1562  using value_type = double;
1563 
1567  static const unsigned int n_array_elements = 4;
1568 
1573  VectorizedArray() = default;
1574 
1578  VectorizedArray(const double scalar)
1579  {
1580  this->operator=(scalar);
1581  }
1582 
1586  DEAL_II_ALWAYS_INLINE
1587  VectorizedArray &
1588  operator=(const double x)
1589  {
1590  data = _mm256_set1_pd(x);
1591  return *this;
1592  }
1593 
1597  DEAL_II_ALWAYS_INLINE
1598  double &operator[](const unsigned int comp)
1599  {
1600  AssertIndexRange(comp, 4);
1601  return *(reinterpret_cast<double *>(&data) + comp);
1602  }
1603 
1607  DEAL_II_ALWAYS_INLINE
1608  const double &operator[](const unsigned int comp) const
1609  {
1610  AssertIndexRange(comp, 4);
1611  return *(reinterpret_cast<const double *>(&data) + comp);
1612  }
1613 
1617  DEAL_II_ALWAYS_INLINE
1618  VectorizedArray &
1619  operator+=(const VectorizedArray &vec)
1620  {
1621  // if the compiler supports vector arithmetics, we can simply use +=
1622  // operator on the given data type. this allows the compiler to combine
1623  // additions with multiplication (fused multiply-add) if those
1624  // instructions are available. Otherwise, we need to use the built-in
1625  // intrinsic command for __m256d
1626 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1627  data += vec.data;
1628 # else
1629  data = _mm256_add_pd(data, vec.data);
1630 # endif
1631  return *this;
1632  }
1633 
1637  DEAL_II_ALWAYS_INLINE
1638  VectorizedArray &
1639  operator-=(const VectorizedArray &vec)
1640  {
1641 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1642  data -= vec.data;
1643 # else
1644  data = _mm256_sub_pd(data, vec.data);
1645 # endif
1646  return *this;
1647  }
1651  DEAL_II_ALWAYS_INLINE
1652  VectorizedArray &
1653  operator*=(const VectorizedArray &vec)
1654  {
1655 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1656  data *= vec.data;
1657 # else
1658  data = _mm256_mul_pd(data, vec.data);
1659 # endif
1660  return *this;
1661  }
1662 
1666  DEAL_II_ALWAYS_INLINE
1667  VectorizedArray &
1668  operator/=(const VectorizedArray &vec)
1669  {
1670 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1671  data /= vec.data;
1672 # else
1673  data = _mm256_div_pd(data, vec.data);
1674 # endif
1675  return *this;
1676  }
1677 
1683  DEAL_II_ALWAYS_INLINE
1684  void
1685  load(const double *ptr)
1686  {
1687  data = _mm256_loadu_pd(ptr);
1688  }
1689 
1696  DEAL_II_ALWAYS_INLINE
1697  void
1698  store(double *ptr) const
1699  {
1700  _mm256_storeu_pd(ptr, data);
1701  }
1702 
1706  DEAL_II_ALWAYS_INLINE
1707  void
1708  streaming_store(double *ptr) const
1709  {
1710  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
1711  ExcMessage("Memory not aligned"));
1712  _mm256_stream_pd(ptr, data);
1713  }
1714 
1727  DEAL_II_ALWAYS_INLINE
1728  void
1729  gather(const double *base_ptr, const unsigned int *offsets)
1730  {
1731 # ifdef __AVX2__
1732  // unfortunately, there does not appear to be a 128 bit integer load, so
1733  // do it by some reinterpret casts here. this is allowed because the Intel
1734  // API allows aliasing between different vector types.
1735  const __m128 index_val =
1736  _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
1737  const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
1738  data = _mm256_i32gather_pd(base_ptr, index, 8);
1739 # else
1740  for (unsigned int i = 0; i < 4; ++i)
1741  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
1742 # endif
1743  }
1744 
1757  DEAL_II_ALWAYS_INLINE
1758  void
1759  scatter(const unsigned int *offsets, double *base_ptr) const
1760  {
1761  // no scatter operation in AVX/AVX2
1762  for (unsigned int i = 0; i < 4; ++i)
1763  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
1764  }
1765 
1770  __m256d data;
1771 
1772 private:
1777  DEAL_II_ALWAYS_INLINE
1779  get_sqrt() const
1780  {
1781  VectorizedArray res;
1782  res.data = _mm256_sqrt_pd(data);
1783  return res;
1784  }
1785 
1790  DEAL_II_ALWAYS_INLINE
1792  get_abs() const
1793  {
1794  // to compute the absolute value, perform bitwise andnot with -0. This
1795  // will leave all value and exponent bits unchanged but force the sign
1796  // value to +.
1797  __m256d mask = _mm256_set1_pd(-0.);
1798  VectorizedArray res;
1799  res.data = _mm256_andnot_pd(mask, data);
1800  return res;
1801  }
1802 
1807  DEAL_II_ALWAYS_INLINE
1809  get_max(const VectorizedArray &other) const
1810  {
1811  VectorizedArray res;
1812  res.data = _mm256_max_pd(data, other.data);
1813  return res;
1814  }
1815 
1820  DEAL_II_ALWAYS_INLINE
1822  get_min(const VectorizedArray &other) const
1823  {
1824  VectorizedArray res;
1825  res.data = _mm256_min_pd(data, other.data);
1826  return res;
1827  }
1828 
1829  // Make a few functions friends.
1830  template <typename Number2, int width2>
1832  std::sqrt(const VectorizedArray<Number2, width2> &);
1833  template <typename Number2, int width2>
1835  std::abs(const VectorizedArray<Number2, width2> &);
1836  template <typename Number2, int width2>
1838  std::max(const VectorizedArray<Number2, width2> &,
1840  template <typename Number2, int width2>
1842  std::min(const VectorizedArray<Number2, width2> &,
1844 };
1845 
1846 
1847 
1851 template <>
1852 inline DEAL_II_ALWAYS_INLINE void
1853 vectorized_load_and_transpose(const unsigned int n_entries,
1854  const double * in,
1855  const unsigned int * offsets,
1857 {
1858  const unsigned int n_chunks = n_entries / 4;
1859  const double * in0 = in + offsets[0];
1860  const double * in1 = in + offsets[1];
1861  const double * in2 = in + offsets[2];
1862  const double * in3 = in + offsets[3];
1863 
1864  for (unsigned int i = 0; i < n_chunks; ++i)
1865  {
1866  __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
1867  __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
1868  __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
1869  __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
1870  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1871  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1872  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1873  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1874  out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
1875  out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
1876  out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
1877  out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
1878  }
1879 
1880  // remainder loop of work that does not divide by 4
1881  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1882  out[i].gather(in + i, offsets);
1883 }
1884 
1885 
1886 
1890 template <>
1891 inline DEAL_II_ALWAYS_INLINE void
1892 vectorized_transpose_and_store(const bool add_into,
1893  const unsigned int n_entries,
1894  const VectorizedArray<double, 4> *in,
1895  const unsigned int * offsets,
1896  double * out)
1897 {
1898  const unsigned int n_chunks = n_entries / 4;
1899  double * out0 = out + offsets[0];
1900  double * out1 = out + offsets[1];
1901  double * out2 = out + offsets[2];
1902  double * out3 = out + offsets[3];
1903  for (unsigned int i = 0; i < n_chunks; ++i)
1904  {
1905  __m256d u0 = in[4 * i + 0].data;
1906  __m256d u1 = in[4 * i + 1].data;
1907  __m256d u2 = in[4 * i + 2].data;
1908  __m256d u3 = in[4 * i + 3].data;
1909  __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
1910  __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
1911  __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
1912  __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
1913  __m256d res0 = _mm256_unpacklo_pd(t0, t1);
1914  __m256d res1 = _mm256_unpackhi_pd(t0, t1);
1915  __m256d res2 = _mm256_unpacklo_pd(t2, t3);
1916  __m256d res3 = _mm256_unpackhi_pd(t2, t3);
1917 
1918  // Cannot use the same store instructions in both paths of the 'if'
1919  // because the compiler cannot know that there is no aliasing between
1920  // pointers
1921  if (add_into)
1922  {
1923  res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
1924  _mm256_storeu_pd(out0 + 4 * i, res0);
1925  res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
1926  _mm256_storeu_pd(out1 + 4 * i, res1);
1927  res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
1928  _mm256_storeu_pd(out2 + 4 * i, res2);
1929  res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
1930  _mm256_storeu_pd(out3 + 4 * i, res3);
1931  }
1932  else
1933  {
1934  _mm256_storeu_pd(out0 + 4 * i, res0);
1935  _mm256_storeu_pd(out1 + 4 * i, res1);
1936  _mm256_storeu_pd(out2 + 4 * i, res2);
1937  _mm256_storeu_pd(out3 + 4 * i, res3);
1938  }
1939  }
1940 
1941  // remainder loop of work that does not divide by 4
1942  if (add_into)
1943  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1944  for (unsigned int v = 0; v < 4; ++v)
1945  out[offsets[v] + i] += in[i][v];
1946  else
1947  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1948  for (unsigned int v = 0; v < 4; ++v)
1949  out[offsets[v] + i] = in[i][v];
1950 }
1951 
1952 
1953 
1957 template <>
1958 class VectorizedArray<float, 8>
1959 {
1960 public:
1964  using value_type = float;
1965 
1969  static const unsigned int n_array_elements = 8;
1970 
1975  VectorizedArray() = default;
1976 
1980  VectorizedArray(const float scalar)
1981  {
1982  this->operator=(scalar);
1983  }
1984 
1988  DEAL_II_ALWAYS_INLINE
1989  VectorizedArray &
1990  operator=(const float x)
1991  {
1992  data = _mm256_set1_ps(x);
1993  return *this;
1994  }
1995 
1999  DEAL_II_ALWAYS_INLINE
2000  float &operator[](const unsigned int comp)
2001  {
2002  AssertIndexRange(comp, 8);
2003  return *(reinterpret_cast<float *>(&data) + comp);
2004  }
2005 
2009  DEAL_II_ALWAYS_INLINE
2010  const float &operator[](const unsigned int comp) const
2011  {
2012  AssertIndexRange(comp, 8);
2013  return *(reinterpret_cast<const float *>(&data) + comp);
2014  }
2015 
2019  DEAL_II_ALWAYS_INLINE
2020  VectorizedArray &
2021  operator+=(const VectorizedArray &vec)
2022  {
2023  // if the compiler supports vector arithmetics, we can simply use +=
2024  // operator on the given data type. this allows the compiler to combine
2025  // additions with multiplication (fused multiply-add) if those
2026  // instructions are available. Otherwise, we need to use the built-in
2027  // intrinsic command for __m256d
2028 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2029  data += vec.data;
2030 # else
2031  data = _mm256_add_ps(data, vec.data);
2032 # endif
2033  return *this;
2034  }
2035 
2039  DEAL_II_ALWAYS_INLINE
2040  VectorizedArray &
2041  operator-=(const VectorizedArray &vec)
2042  {
2043 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2044  data -= vec.data;
2045 # else
2046  data = _mm256_sub_ps(data, vec.data);
2047 # endif
2048  return *this;
2049  }
2053  DEAL_II_ALWAYS_INLINE
2054  VectorizedArray &
2055  operator*=(const VectorizedArray &vec)
2056  {
2057 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2058  data *= vec.data;
2059 # else
2060  data = _mm256_mul_ps(data, vec.data);
2061 # endif
2062  return *this;
2063  }
2064 
2068  DEAL_II_ALWAYS_INLINE
2069  VectorizedArray &
2070  operator/=(const VectorizedArray &vec)
2071  {
2072 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2073  data /= vec.data;
2074 # else
2075  data = _mm256_div_ps(data, vec.data);
2076 # endif
2077  return *this;
2078  }
2079 
2085  DEAL_II_ALWAYS_INLINE
2086  void
2087  load(const float *ptr)
2088  {
2089  data = _mm256_loadu_ps(ptr);
2090  }
2091 
2098  DEAL_II_ALWAYS_INLINE
2099  void
2100  store(float *ptr) const
2101  {
2102  _mm256_storeu_ps(ptr, data);
2103  }
2104 
2108  DEAL_II_ALWAYS_INLINE
2109  void
2110  streaming_store(float *ptr) const
2111  {
2112  Assert(reinterpret_cast<std::size_t>(ptr) % 32 == 0,
2113  ExcMessage("Memory not aligned"));
2114  _mm256_stream_ps(ptr, data);
2115  }
2116 
2129  DEAL_II_ALWAYS_INLINE
2130  void
2131  gather(const float *base_ptr, const unsigned int *offsets)
2132  {
2133 # ifdef __AVX2__
2134  // unfortunately, there does not appear to be a 256 bit integer load, so
2135  // do it by some reinterpret casts here. this is allowed because the Intel
2136  // API allows aliasing between different vector types.
2137  const __m256 index_val =
2138  _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
2139  const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
2140  data = _mm256_i32gather_ps(base_ptr, index, 4);
2141 # else
2142  for (unsigned int i = 0; i < 8; ++i)
2143  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2144 # endif
2145  }
2146 
2159  DEAL_II_ALWAYS_INLINE
2160  void
2161  scatter(const unsigned int *offsets, float *base_ptr) const
2162  {
2163  // no scatter operation in AVX/AVX2
2164  for (unsigned int i = 0; i < 8; ++i)
2165  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2166  }
2167 
2172  __m256 data;
2173 
2174 private:
2179  DEAL_II_ALWAYS_INLINE
2181  get_sqrt() const
2182  {
2183  VectorizedArray res;
2184  res.data = _mm256_sqrt_ps(data);
2185  return res;
2186  }
2187 
2192  DEAL_II_ALWAYS_INLINE
2194  get_abs() const
2195  {
2196  // to compute the absolute value, perform bitwise andnot with -0. This
2197  // will leave all value and exponent bits unchanged but force the sign
2198  // value to +.
2199  __m256 mask = _mm256_set1_ps(-0.f);
2200  VectorizedArray res;
2201  res.data = _mm256_andnot_ps(mask, data);
2202  return res;
2203  }
2204 
2209  DEAL_II_ALWAYS_INLINE
2211  get_max(const VectorizedArray &other) const
2212  {
2213  VectorizedArray res;
2214  res.data = _mm256_max_ps(data, other.data);
2215  return res;
2216  }
2217 
2222  DEAL_II_ALWAYS_INLINE
2224  get_min(const VectorizedArray &other) const
2225  {
2226  VectorizedArray res;
2227  res.data = _mm256_min_ps(data, other.data);
2228  return res;
2229  }
2230 
2231  // Make a few functions friends.
2232  template <typename Number2, int width2>
2234  std::sqrt(const VectorizedArray<Number2, width2> &);
2235  template <typename Number2, int width2>
2237  std::abs(const VectorizedArray<Number2, width2> &);
2238  template <typename Number2, int width2>
2240  std::max(const VectorizedArray<Number2, width2> &,
2242  template <typename Number2, int width2>
2244  std::min(const VectorizedArray<Number2, width2> &,
2246 };
2247 
2248 
2249 
2253 template <>
2254 inline DEAL_II_ALWAYS_INLINE void
2255 vectorized_load_and_transpose(const unsigned int n_entries,
2256  const float * in,
2257  const unsigned int * offsets,
2259 {
2260  const unsigned int n_chunks = n_entries / 4;
2261  for (unsigned int i = 0; i < n_chunks; ++i)
2262  {
2263  // To avoid warnings about uninitialized variables, need to initialize
2264  // one variable with zero before using it.
2265  __m256 t0, t1, t2, t3 = {};
2266  t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
2267  t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
2268  t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
2269  t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
2270  t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
2271  t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
2272  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
2273  t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
2274 
2275  __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
2276  __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
2277  __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
2278  __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
2279  out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
2280  out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
2281  out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
2282  out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
2283  }
2284 
2285  // remainder loop of work that does not divide by 4
2286  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2287  out[i].gather(in + i, offsets);
2288 }
2289 
2290 
2291 
2295 template <>
2296 inline DEAL_II_ALWAYS_INLINE void
2297 vectorized_transpose_and_store(const bool add_into,
2298  const unsigned int n_entries,
2299  const VectorizedArray<float, 8> *in,
2300  const unsigned int * offsets,
2301  float * out)
2302 {
2303  const unsigned int n_chunks = n_entries / 4;
2304  for (unsigned int i = 0; i < n_chunks; ++i)
2305  {
2306  __m256 u0 = in[4 * i + 0].data;
2307  __m256 u1 = in[4 * i + 1].data;
2308  __m256 u2 = in[4 * i + 2].data;
2309  __m256 u3 = in[4 * i + 3].data;
2310  __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
2311  __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
2312  __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
2313  __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
2314  u0 = _mm256_shuffle_ps(t0, t2, 0x88);
2315  u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
2316  u2 = _mm256_shuffle_ps(t1, t3, 0x88);
2317  u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
2318  __m128 res0 = _mm256_extractf128_ps(u0, 0);
2319  __m128 res4 = _mm256_extractf128_ps(u0, 1);
2320  __m128 res1 = _mm256_extractf128_ps(u1, 0);
2321  __m128 res5 = _mm256_extractf128_ps(u1, 1);
2322  __m128 res2 = _mm256_extractf128_ps(u2, 0);
2323  __m128 res6 = _mm256_extractf128_ps(u2, 1);
2324  __m128 res3 = _mm256_extractf128_ps(u3, 0);
2325  __m128 res7 = _mm256_extractf128_ps(u3, 1);
2326 
2327  // Cannot use the same store instructions in both paths of the 'if'
2328  // because the compiler cannot know that there is no aliasing between
2329  // pointers
2330  if (add_into)
2331  {
2332  res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2333  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2334  res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2335  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2336  res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2337  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2338  res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2339  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2340  res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2341  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2342  res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2343  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2344  res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2345  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2346  res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2347  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2348  }
2349  else
2350  {
2351  _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2352  _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2353  _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2354  _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2355  _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2356  _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2357  _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2358  _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2359  }
2360  }
2361 
2362  // remainder loop of work that does not divide by 4
2363  if (add_into)
2364  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2365  for (unsigned int v = 0; v < 8; ++v)
2366  out[offsets[v] + i] += in[i][v];
2367  else
2368  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2369  for (unsigned int v = 0; v < 8; ++v)
2370  out[offsets[v] + i] = in[i][v];
2371 }
2372 
2373 #endif
2374 
2375 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2376 
2380 template <>
2381 class VectorizedArray<double, 2>
2382 {
2383 public:
2387  using value_type = double;
2388 
2392  static const unsigned int n_array_elements = 2;
2393 
2398  VectorizedArray() = default;
2399 
2403  VectorizedArray(const double scalar)
2404  {
2405  this->operator=(scalar);
2406  }
2407 
2411  DEAL_II_ALWAYS_INLINE
2412  VectorizedArray &
2413  operator=(const double x)
2414  {
2415  data = _mm_set1_pd(x);
2416  return *this;
2417  }
2418 
2422  DEAL_II_ALWAYS_INLINE
2423  double &operator[](const unsigned int comp)
2424  {
2425  AssertIndexRange(comp, 2);
2426  return *(reinterpret_cast<double *>(&data) + comp);
2427  }
2428 
2432  DEAL_II_ALWAYS_INLINE
2433  const double &operator[](const unsigned int comp) const
2434  {
2435  AssertIndexRange(comp, 2);
2436  return *(reinterpret_cast<const double *>(&data) + comp);
2437  }
2438 
2442  DEAL_II_ALWAYS_INLINE
2443  VectorizedArray &
2444  operator+=(const VectorizedArray &vec)
2445  {
2446 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2447  data += vec.data;
2448 # else
2449  data = _mm_add_pd(data, vec.data);
2450 # endif
2451  return *this;
2452  }
2453 
2457  DEAL_II_ALWAYS_INLINE
2458  VectorizedArray &
2459  operator-=(const VectorizedArray &vec)
2460  {
2461 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2462  data -= vec.data;
2463 # else
2464  data = _mm_sub_pd(data, vec.data);
2465 # endif
2466  return *this;
2467  }
2468 
2472  DEAL_II_ALWAYS_INLINE
2473  VectorizedArray &
2474  operator*=(const VectorizedArray &vec)
2475  {
2476 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2477  data *= vec.data;
2478 # else
2479  data = _mm_mul_pd(data, vec.data);
2480 # endif
2481  return *this;
2482  }
2483 
2487  DEAL_II_ALWAYS_INLINE
2488  VectorizedArray &
2489  operator/=(const VectorizedArray &vec)
2490  {
2491 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2492  data /= vec.data;
2493 # else
2494  data = _mm_div_pd(data, vec.data);
2495 # endif
2496  return *this;
2497  }
2498 
2504  DEAL_II_ALWAYS_INLINE
2505  void
2506  load(const double *ptr)
2507  {
2508  data = _mm_loadu_pd(ptr);
2509  }
2510 
2517  DEAL_II_ALWAYS_INLINE
2518  void
2519  store(double *ptr) const
2520  {
2521  _mm_storeu_pd(ptr, data);
2522  }
2523 
2527  DEAL_II_ALWAYS_INLINE
2528  void
2529  streaming_store(double *ptr) const
2530  {
2531  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2532  ExcMessage("Memory not aligned"));
2533  _mm_stream_pd(ptr, data);
2534  }
2535 
2548  DEAL_II_ALWAYS_INLINE
2549  void
2550  gather(const double *base_ptr, const unsigned int *offsets)
2551  {
2552  for (unsigned int i = 0; i < 2; ++i)
2553  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
2554  }
2555 
2568  DEAL_II_ALWAYS_INLINE
2569  void
2570  scatter(const unsigned int *offsets, double *base_ptr) const
2571  {
2572  for (unsigned int i = 0; i < 2; ++i)
2573  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
2574  }
2575 
2580  __m128d data;
2581 
2582 private:
2587  DEAL_II_ALWAYS_INLINE
2589  get_sqrt() const
2590  {
2591  VectorizedArray res;
2592  res.data = _mm_sqrt_pd(data);
2593  return res;
2594  }
2595 
2600  DEAL_II_ALWAYS_INLINE
2602  get_abs() const
2603  {
2604  // to compute the absolute value, perform
2605  // bitwise andnot with -0. This will leave all
2606  // value and exponent bits unchanged but force
2607  // the sign value to +.
2608  __m128d mask = _mm_set1_pd(-0.);
2609  VectorizedArray res;
2610  res.data = _mm_andnot_pd(mask, data);
2611  return res;
2612  }
2613 
2618  DEAL_II_ALWAYS_INLINE
2620  get_max(const VectorizedArray &other) const
2621  {
2622  VectorizedArray res;
2623  res.data = _mm_max_pd(data, other.data);
2624  return res;
2625  }
2626 
2631  DEAL_II_ALWAYS_INLINE
2633  get_min(const VectorizedArray &other) const
2634  {
2635  VectorizedArray res;
2636  res.data = _mm_min_pd(data, other.data);
2637  return res;
2638  }
2639 
2640  // Make a few functions friends.
2641  template <typename Number2, int width2>
2643  std::sqrt(const VectorizedArray<Number2, width2> &);
2644  template <typename Number2, int width2>
2646  std::abs(const VectorizedArray<Number2, width2> &);
2647  template <typename Number2, int width2>
2649  std::max(const VectorizedArray<Number2, width2> &,
2651  template <typename Number2, int width2>
2653  std::min(const VectorizedArray<Number2, width2> &,
2655 };
2656 
2657 
2658 
2662 template <>
2663 inline DEAL_II_ALWAYS_INLINE void
2664 vectorized_load_and_transpose(const unsigned int n_entries,
2665  const double * in,
2666  const unsigned int * offsets,
2668 {
2669  const unsigned int n_chunks = n_entries / 2;
2670  for (unsigned int i = 0; i < n_chunks; ++i)
2671  {
2672  __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
2673  __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
2674  out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
2675  out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
2676  }
2677 
2678  // remainder loop of work that does not divide by 2
2679  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2680  for (unsigned int v = 0; v < 2; ++v)
2681  out[i][v] = in[offsets[v] + i];
2682 }
2683 
2684 
2685 
2689 template <>
2690 inline DEAL_II_ALWAYS_INLINE void
2691 vectorized_transpose_and_store(const bool add_into,
2692  const unsigned int n_entries,
2693  const VectorizedArray<double, 2> *in,
2694  const unsigned int * offsets,
2695  double * out)
2696 {
2697  const unsigned int n_chunks = n_entries / 2;
2698  if (add_into)
2699  {
2700  for (unsigned int i = 0; i < n_chunks; ++i)
2701  {
2702  __m128d u0 = in[2 * i + 0].data;
2703  __m128d u1 = in[2 * i + 1].data;
2704  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2705  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2706  _mm_storeu_pd(out + 2 * i + offsets[0],
2707  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
2708  res0));
2709  _mm_storeu_pd(out + 2 * i + offsets[1],
2710  _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
2711  res1));
2712  }
2713  // remainder loop of work that does not divide by 2
2714  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2715  for (unsigned int v = 0; v < 2; ++v)
2716  out[offsets[v] + i] += in[i][v];
2717  }
2718  else
2719  {
2720  for (unsigned int i = 0; i < n_chunks; ++i)
2721  {
2722  __m128d u0 = in[2 * i + 0].data;
2723  __m128d u1 = in[2 * i + 1].data;
2724  __m128d res0 = _mm_unpacklo_pd(u0, u1);
2725  __m128d res1 = _mm_unpackhi_pd(u0, u1);
2726  _mm_storeu_pd(out + 2 * i + offsets[0], res0);
2727  _mm_storeu_pd(out + 2 * i + offsets[1], res1);
2728  }
2729  // remainder loop of work that does not divide by 2
2730  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2731  for (unsigned int v = 0; v < 2; ++v)
2732  out[offsets[v] + i] = in[i][v];
2733  }
2734 }
2735 
2736 
2737 
2741 template <>
2742 class VectorizedArray<float, 4>
2743 {
2744 public:
2748  using value_type = float;
2749 
2753  static const unsigned int n_array_elements = 4;
2754 
2763  VectorizedArray() = default;
2764 
2768  VectorizedArray(const float scalar)
2769  {
2770  this->operator=(scalar);
2771  }
2772 
2773  DEAL_II_ALWAYS_INLINE
2774  VectorizedArray &
2775  operator=(const float x)
2776  {
2777  data = _mm_set1_ps(x);
2778  return *this;
2779  }
2780 
2784  DEAL_II_ALWAYS_INLINE
2785  float &operator[](const unsigned int comp)
2786  {
2787  AssertIndexRange(comp, 4);
2788  return *(reinterpret_cast<float *>(&data) + comp);
2789  }
2790 
2794  DEAL_II_ALWAYS_INLINE
2795  const float &operator[](const unsigned int comp) const
2796  {
2797  AssertIndexRange(comp, 4);
2798  return *(reinterpret_cast<const float *>(&data) + comp);
2799  }
2800 
2804  DEAL_II_ALWAYS_INLINE
2805  VectorizedArray &
2806  operator+=(const VectorizedArray &vec)
2807  {
2808 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2809  data += vec.data;
2810 # else
2811  data = _mm_add_ps(data, vec.data);
2812 # endif
2813  return *this;
2814  }
2815 
2819  DEAL_II_ALWAYS_INLINE
2820  VectorizedArray &
2821  operator-=(const VectorizedArray &vec)
2822  {
2823 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2824  data -= vec.data;
2825 # else
2826  data = _mm_sub_ps(data, vec.data);
2827 # endif
2828  return *this;
2829  }
2830 
2834  DEAL_II_ALWAYS_INLINE
2835  VectorizedArray &
2836  operator*=(const VectorizedArray &vec)
2837  {
2838 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2839  data *= vec.data;
2840 # else
2841  data = _mm_mul_ps(data, vec.data);
2842 # endif
2843  return *this;
2844  }
2845 
2849  DEAL_II_ALWAYS_INLINE
2850  VectorizedArray &
2851  operator/=(const VectorizedArray &vec)
2852  {
2853 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2854  data /= vec.data;
2855 # else
2856  data = _mm_div_ps(data, vec.data);
2857 # endif
2858  return *this;
2859  }
2860 
2866  DEAL_II_ALWAYS_INLINE
2867  void
2868  load(const float *ptr)
2869  {
2870  data = _mm_loadu_ps(ptr);
2871  }
2872 
2879  DEAL_II_ALWAYS_INLINE
2880  void
2881  store(float *ptr) const
2882  {
2883  _mm_storeu_ps(ptr, data);
2884  }
2885 
2889  DEAL_II_ALWAYS_INLINE
2890  void
2891  streaming_store(float *ptr) const
2892  {
2893  Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
2894  ExcMessage("Memory not aligned"));
2895  _mm_stream_ps(ptr, data);
2896  }
2897 
2910  DEAL_II_ALWAYS_INLINE
2911  void
2912  gather(const float *base_ptr, const unsigned int *offsets)
2913  {
2914  for (unsigned int i = 0; i < 4; ++i)
2915  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
2916  }
2917 
2930  DEAL_II_ALWAYS_INLINE
2931  void
2932  scatter(const unsigned int *offsets, float *base_ptr) const
2933  {
2934  for (unsigned int i = 0; i < 4; ++i)
2935  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
2936  }
2937 
2942  __m128 data;
2943 
2944 private:
2949  DEAL_II_ALWAYS_INLINE
2951  get_sqrt() const
2952  {
2953  VectorizedArray res;
2954  res.data = _mm_sqrt_ps(data);
2955  return res;
2956  }
2957 
2962  DEAL_II_ALWAYS_INLINE
2964  get_abs() const
2965  {
2966  // to compute the absolute value, perform bitwise andnot with -0. This
2967  // will leave all value and exponent bits unchanged but force the sign
2968  // value to +.
2969  __m128 mask = _mm_set1_ps(-0.f);
2970  VectorizedArray res;
2971  res.data = _mm_andnot_ps(mask, data);
2972  return res;
2973  }
2974 
2979  DEAL_II_ALWAYS_INLINE
2981  get_max(const VectorizedArray &other) const
2982  {
2983  VectorizedArray res;
2984  res.data = _mm_max_ps(data, other.data);
2985  return res;
2986  }
2987 
2992  DEAL_II_ALWAYS_INLINE
2994  get_min(const VectorizedArray &other) const
2995  {
2996  VectorizedArray res;
2997  res.data = _mm_min_ps(data, other.data);
2998  return res;
2999  }
3000 
3001  // Make a few functions friends.
3002  template <typename Number2, int width2>
3004  std::sqrt(const VectorizedArray<Number2, width2> &);
3005  template <typename Number2, int width2>
3007  std::abs(const VectorizedArray<Number2, width2> &);
3008  template <typename Number2, int width2>
3010  std::max(const VectorizedArray<Number2, width2> &,
3012  template <typename Number2, int width2>
3014  std::min(const VectorizedArray<Number2, width2> &,
3016 };
3017 
3018 
3019 
3023 template <>
3024 inline DEAL_II_ALWAYS_INLINE void
3025 vectorized_load_and_transpose(const unsigned int n_entries,
3026  const float * in,
3027  const unsigned int * offsets,
3029 {
3030  const unsigned int n_chunks = n_entries / 4;
3031  for (unsigned int i = 0; i < n_chunks; ++i)
3032  {
3033  __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
3034  __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
3035  __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
3036  __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
3037  __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
3038  __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
3039  __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
3040  __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
3041  out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
3042  out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
3043  out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
3044  out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
3045  }
3046 
3047  // remainder loop of work that does not divide by 4
3048  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3049  for (unsigned int v = 0; v < 4; ++v)
3050  out[i][v] = in[offsets[v] + i];
3051 }
3052 
3053 
3054 
3058 template <>
3059 inline DEAL_II_ALWAYS_INLINE void
3060 vectorized_transpose_and_store(const bool add_into,
3061  const unsigned int n_entries,
3062  const VectorizedArray<float, 4> *in,
3063  const unsigned int * offsets,
3064  float * out)
3065 {
3066  const unsigned int n_chunks = n_entries / 4;
3067  for (unsigned int i = 0; i < n_chunks; ++i)
3068  {
3069  __m128 u0 = in[4 * i + 0].data;
3070  __m128 u1 = in[4 * i + 1].data;
3071  __m128 u2 = in[4 * i + 2].data;
3072  __m128 u3 = in[4 * i + 3].data;
3073  __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
3074  __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
3075  __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
3076  __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
3077  u0 = _mm_shuffle_ps(t0, t2, 0x88);
3078  u1 = _mm_shuffle_ps(t0, t2, 0xdd);
3079  u2 = _mm_shuffle_ps(t1, t3, 0x88);
3080  u3 = _mm_shuffle_ps(t1, t3, 0xdd);
3081 
3082  // Cannot use the same store instructions in both paths of the 'if'
3083  // because the compiler cannot know that there is no aliasing between
3084  // pointers
3085  if (add_into)
3086  {
3087  u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
3088  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3089  u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
3090  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3091  u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
3092  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3093  u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
3094  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3095  }
3096  else
3097  {
3098  _mm_storeu_ps(out + 4 * i + offsets[0], u0);
3099  _mm_storeu_ps(out + 4 * i + offsets[1], u1);
3100  _mm_storeu_ps(out + 4 * i + offsets[2], u2);
3101  _mm_storeu_ps(out + 4 * i + offsets[3], u3);
3102  }
3103  }
3104 
3105  // remainder loop of work that does not divide by 4
3106  if (add_into)
3107  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3108  for (unsigned int v = 0; v < 4; ++v)
3109  out[offsets[v] + i] += in[i][v];
3110  else
3111  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3112  for (unsigned int v = 0; v < 4; ++v)
3113  out[offsets[v] + i] = in[i][v];
3114 }
3115 
3116 
3117 
3118 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
3119 
3120 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
3121  defined(__VSX__)
3122 
3123 template <>
3124 class VectorizedArray<double, 2>
3125 {
3126 public:
3130  using value_type = double;
3131 
3135  static const unsigned int n_array_elements = 2;
3136 
3141  VectorizedArray() = default;
3142 
3146  VectorizedArray(const double scalar)
3147  {
3148  this->operator=(scalar);
3149  }
3150 
3154  DEAL_II_ALWAYS_INLINE
3155  VectorizedArray &
3156  operator=(const double x)
3157  {
3158  data = vec_splats(x);
3159  return *this;
3160  }
3161 
3165  DEAL_II_ALWAYS_INLINE
3166  double &operator[](const unsigned int comp)
3167  {
3168  AssertIndexRange(comp, 2);
3169  return *(reinterpret_cast<double *>(&data) + comp);
3170  }
3171 
3175  DEAL_II_ALWAYS_INLINE
3176  const double &operator[](const unsigned int comp) const
3177  {
3178  AssertIndexRange(comp, 2);
3179  return *(reinterpret_cast<const double *>(&data) + comp);
3180  }
3181 
3185  DEAL_II_ALWAYS_INLINE
3186  VectorizedArray &
3187  operator+=(const VectorizedArray &vec)
3188  {
3189  data = vec_add(data, vec.data);
3190  return *this;
3191  }
3192 
3196  DEAL_II_ALWAYS_INLINE
3197  VectorizedArray &
3198  operator-=(const VectorizedArray &vec)
3199  {
3200  data = vec_sub(data, vec.data);
3201  return *this;
3202  }
3203 
3207  DEAL_II_ALWAYS_INLINE
3208  VectorizedArray &
3209  operator*=(const VectorizedArray &vec)
3210  {
3211  data = vec_mul(data, vec.data);
3212  return *this;
3213  }
3214 
3218  DEAL_II_ALWAYS_INLINE
3219  VectorizedArray &
3220  operator/=(const VectorizedArray &vec)
3221  {
3222  data = vec_div(data, vec.data);
3223  return *this;
3224  }
3225 
3230  DEAL_II_ALWAYS_INLINE
3231  void
3232  load(const double *ptr)
3233  {
3234  data = vec_vsx_ld(0, ptr);
3235  }
3236 
3241  DEAL_II_ALWAYS_INLINE
3242  void
3243  store(double *ptr) const
3244  {
3245  vec_vsx_st(data, 0, ptr);
3246  }
3247 
3250  DEAL_II_ALWAYS_INLINE
3251  void
3252  streaming_store(double *ptr) const
3253  {
3254  store(ptr);
3255  }
3256 
3259  DEAL_II_ALWAYS_INLINE
3260  void
3261  gather(const double *base_ptr, const unsigned int *offsets)
3262  {
3263  for (unsigned int i = 0; i < 2; ++i)
3264  *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
3265  }
3266 
3269  DEAL_II_ALWAYS_INLINE
3270  void
3271  scatter(const unsigned int *offsets, double *base_ptr) const
3272  {
3273  for (unsigned int i = 0; i < 2; ++i)
3274  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
3275  }
3276 
3281  __vector double data;
3282 
3283 private:
3288  DEAL_II_ALWAYS_INLINE
3290  get_sqrt() const
3291  {
3292  VectorizedArray res;
3293  res.data = vec_sqrt(data);
3294  return res;
3295  }
3296 
3301  DEAL_II_ALWAYS_INLINE
3303  get_abs() const
3304  {
3305  VectorizedArray res;
3306  res.data = vec_abs(data);
3307  return res;
3308  }
3309 
3314  DEAL_II_ALWAYS_INLINE
3316  get_max(const VectorizedArray &other) const
3317  {
3318  VectorizedArray res;
3319  res.data = vec_max(data, other.data);
3320  return res;
3321  }
3322 
3327  DEAL_II_ALWAYS_INLINE
3329  get_min(const VectorizedArray &other) const
3330  {
3331  VectorizedArray res;
3332  res.data = vec_min(data, other.data);
3333  return res;
3334  }
3335 
3336  // Make a few functions friends.
3337  template <typename Number2, int width2>
3339  std::sqrt(const VectorizedArray<Number2, width2> &);
3340  template <typename Number2, int width2>
3342  std::abs(const VectorizedArray<Number2, width2> &);
3343  template <typename Number2, int width2>
3345  std::max(const VectorizedArray<Number2, width2> &,
3347  template <typename Number2, int width2>
3349  std::min(const VectorizedArray<Number2, width2> &,
3351 };
3352 
3353 
3354 
3355 template <>
3356 class VectorizedArray<float, 4>
3357 {
3358 public:
3362  using value_type = float;
3363 
3367  static const unsigned int n_array_elements = 4;
3368 
3373  VectorizedArray() = default;
3374 
3378  VectorizedArray(const float scalar)
3379  {
3380  this->operator=(scalar);
3381  }
3382 
3386  DEAL_II_ALWAYS_INLINE
3387  VectorizedArray &
3388  operator=(const float x)
3389  {
3390  data = vec_splats(x);
3391  return *this;
3392  }
3393 
3397  DEAL_II_ALWAYS_INLINE
3398  float &operator[](const unsigned int comp)
3399  {
3400  AssertIndexRange(comp, 4);
3401  return *(reinterpret_cast<float *>(&data) + comp);
3402  }
3403 
3407  DEAL_II_ALWAYS_INLINE
3408  const float &operator[](const unsigned int comp) const
3409  {
3410  AssertIndexRange(comp, 4);
3411  return *(reinterpret_cast<const float *>(&data) + comp);
3412  }
3413 
3417  DEAL_II_ALWAYS_INLINE
3418  VectorizedArray &
3419  operator+=(const VectorizedArray &vec)
3420  {
3421  data = vec_add(data, vec.data);
3422  return *this;
3423  }
3424 
3428  DEAL_II_ALWAYS_INLINE
3429  VectorizedArray &
3430  operator-=(const VectorizedArray &vec)
3431  {
3432  data = vec_sub(data, vec.data);
3433  return *this;
3434  }
3435 
3439  DEAL_II_ALWAYS_INLINE
3440  VectorizedArray &
3441  operator*=(const VectorizedArray &vec)
3442  {
3443  data = vec_mul(data, vec.data);
3444  return *this;
3445  }
3446 
3450  DEAL_II_ALWAYS_INLINE
3451  VectorizedArray &
3452  operator/=(const VectorizedArray &vec)
3453  {
3454  data = vec_div(data, vec.data);
3455  return *this;
3456  }
3457 
3462  DEAL_II_ALWAYS_INLINE
3463  void
3464  load(const float *ptr)
3465  {
3466  data = vec_vsx_ld(0, ptr);
3467  }
3468 
3473  DEAL_II_ALWAYS_INLINE
3474  void
3475  store(float *ptr) const
3476  {
3477  vec_vsx_st(data, 0, ptr);
3478  }
3479 
3482  DEAL_II_ALWAYS_INLINE
3483  void
3484  streaming_store(float *ptr) const
3485  {
3486  store(ptr);
3487  }
3488 
3491  DEAL_II_ALWAYS_INLINE
3492  void
3493  gather(const float *base_ptr, const unsigned int *offsets)
3494  {
3495  for (unsigned int i = 0; i < 4; ++i)
3496  *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
3497  }
3498 
3501  DEAL_II_ALWAYS_INLINE
3502  void
3503  scatter(const unsigned int *offsets, float *base_ptr) const
3504  {
3505  for (unsigned int i = 0; i < 4; ++i)
3506  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
3507  }
3508 
3513  __vector float data;
3514 
3515 private:
3520  DEAL_II_ALWAYS_INLINE
3522  get_sqrt() const
3523  {
3524  VectorizedArray res;
3525  res.data = vec_sqrt(data);
3526  return res;
3527  }
3528 
3533  DEAL_II_ALWAYS_INLINE
3535  get_abs() const
3536  {
3537  VectorizedArray res;
3538  res.data = vec_abs(data);
3539  return res;
3540  }
3541 
3546  DEAL_II_ALWAYS_INLINE
3548  get_max(const VectorizedArray &other) const
3549  {
3550  VectorizedArray res;
3551  res.data = vec_max(data, other.data);
3552  return res;
3553  }
3554 
3559  DEAL_II_ALWAYS_INLINE
3561  get_min(const VectorizedArray &other) const
3562  {
3563  VectorizedArray res;
3564  res.data = vec_min(data, other.data);
3565  return res;
3566  }
3567 
3568  // Make a few functions friends.
3569  template <typename Number2, int width2>
3571  std::sqrt(const VectorizedArray<Number2, width2> &);
3572  template <typename Number2, int width2>
3574  std::abs(const VectorizedArray<Number2, width2> &);
3575  template <typename Number2, int width2>
3577  std::max(const VectorizedArray<Number2, width2> &,
3579  template <typename Number2, int width2>
3581  std::min(const VectorizedArray<Number2, width2> &,
3583 };
3584 
3585 #endif // if DEAL_II_VECTORIZATION_LEVEL >=1 && defined(__ALTIVEC__) &&
3586  // defined(__VSX__)
3587 
3588 
3589 
3595 template <typename Number, int width>
3596 inline DEAL_II_ALWAYS_INLINE bool
3598  const VectorizedArray<Number, width> &rhs)
3599 {
3600  for (unsigned int i = 0; i < VectorizedArray<Number, width>::n_array_elements;
3601  ++i)
3602  if (lhs[i] != rhs[i])
3603  return false;
3604 
3605  return true;
3606 }
3607 
3608 
3614 template <typename Number, int width>
3615 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3618 {
3620  return tmp += v;
3621 }
3622 
3628 template <typename Number, int width>
3629 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3632 {
3634  return tmp -= v;
3635 }
3636 
3642 template <typename Number, int width>
3643 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3646 {
3648  return tmp *= v;
3649 }
3650 
3656 template <typename Number, int width>
3657 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3660 {
3662  return tmp /= v;
3663 }
3664 
3671 template <typename Number, int width>
3672 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3673  operator+(const Number &u, const VectorizedArray<Number, width> &v)
3674 {
3676  return tmp += v;
3677 }
3678 
3687 template <int width>
3688 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3689  operator+(const double u, const VectorizedArray<float, width> &v)
3690 {
3692  return tmp += v;
3693 }
3694 
3701 template <typename Number, int width>
3702 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3703  operator+(const VectorizedArray<Number, width> &v, const Number &u)
3704 {
3705  return u + v;
3706 }
3707 
3716 template <int width>
3717 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3718  operator+(const VectorizedArray<float, width> &v, const double u)
3719 {
3720  return u + v;
3721 }
3722 
3729 template <typename Number, int width>
3730 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3731  operator-(const Number &u, const VectorizedArray<Number, width> &v)
3732 {
3734  return tmp -= v;
3735 }
3736 
3745 template <int width>
3746 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3747  operator-(const double u, const VectorizedArray<float, width> &v)
3748 {
3749  VectorizedArray<float, width> tmp = static_cast<float>(u);
3750  return tmp -= v;
3751 }
3752 
3759 template <typename Number, int width>
3760 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3761  operator-(const VectorizedArray<Number, width> &v, const Number &u)
3762 {
3764  return v - tmp;
3765 }
3766 
3775 template <int width>
3776 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3777  operator-(const VectorizedArray<float, width> &v, const double u)
3778 {
3779  VectorizedArray<float, width> tmp = static_cast<float>(u);
3780  return v - tmp;
3781 }
3782 
3789 template <typename Number, int width>
3790 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3791  operator*(const Number &u, const VectorizedArray<Number, width> &v)
3792 {
3794  return tmp *= v;
3795 }
3796 
3805 template <int width>
3806 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3807  operator*(const double u, const VectorizedArray<float, width> &v)
3808 {
3809  VectorizedArray<float, width> tmp = static_cast<float>(u);
3810  return tmp *= v;
3811 }
3812 
3819 template <typename Number, int width>
3820 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3821  operator*(const VectorizedArray<Number, width> &v, const Number &u)
3822 {
3823  return u * v;
3824 }
3825 
3834 template <int width>
3835 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3836  operator*(const VectorizedArray<float, width> &v, const double u)
3837 {
3838  return u * v;
3839 }
3840 
3847 template <typename Number, int width>
3848 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3849  operator/(const Number &u, const VectorizedArray<Number, width> &v)
3850 {
3852  return tmp /= v;
3853 }
3854 
3863 template <int width>
3864 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3865  operator/(const double u, const VectorizedArray<float, width> &v)
3866 {
3867  VectorizedArray<float, width> tmp = static_cast<float>(u);
3868  return tmp /= v;
3869 }
3870 
3877 template <typename Number, int width>
3878 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3879  operator/(const VectorizedArray<Number, width> &v, const Number &u)
3880 {
3882  return v / tmp;
3883 }
3884 
3893 template <int width>
3894 inline DEAL_II_ALWAYS_INLINE VectorizedArray<float, width>
3895  operator/(const VectorizedArray<float, width> &v, const double u)
3896 {
3897  VectorizedArray<float, width> tmp = static_cast<float>(u);
3898  return v / tmp;
3899 }
3900 
3906 template <typename Number, int width>
3907 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3909 {
3910  return u;
3911 }
3912 
3918 template <typename Number, int width>
3919 inline DEAL_II_ALWAYS_INLINE VectorizedArray<Number, width>
3921 {
3922  // to get a negative sign, subtract the input from zero (could also
3923  // multiply by -1, but this one is slightly simpler)
3924  return VectorizedArray<Number, width>() - u;
3925 }
3926 
3932 template <typename Number, int width>
3933 inline std::ostream &
3934 operator<<(std::ostream &out, const VectorizedArray<Number, width> &p)
3935 {
3936  constexpr unsigned int n = VectorizedArray<Number, width>::n_array_elements;
3937  for (unsigned int i = 0; i < n - 1; ++i)
3938  out << p[i] << ' ';
3939  out << p[n - 1];
3940 
3941  return out;
3942 }
3943 
3944 DEAL_II_NAMESPACE_CLOSE
3945 
3952 namespace std
3953 {
3961  template <typename Number, int width>
3962  inline ::VectorizedArray<Number, width>
3963  sin(const ::VectorizedArray<Number, width> &x)
3964  {
3965  // put values in an array and later read in that array with an unaligned
3966  // read. This should save some instructions as compared to directly
3967  // setting the individual elements and also circumvents a compiler
3968  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3969  // from April 2014, topic "matrix_free/step-48 Test").
3971  for (unsigned int i = 0;
3972  i < ::VectorizedArray<Number, width>::n_array_elements;
3973  ++i)
3974  values[i] = std::sin(x[i]);
3976  out.load(&values[0]);
3977  return out;
3978  }
3979 
3980 
3981 
3989  template <typename Number, int width>
3990  inline ::VectorizedArray<Number, width>
3991  cos(const ::VectorizedArray<Number, width> &x)
3992  {
3994  for (unsigned int i = 0;
3995  i < ::VectorizedArray<Number, width>::n_array_elements;
3996  ++i)
3997  values[i] = std::cos(x[i]);
3999  out.load(&values[0]);
4000  return out;
4001  }
4002 
4003 
4004 
4012  template <typename Number, int width>
4013  inline ::VectorizedArray<Number, width>
4014  tan(const ::VectorizedArray<Number, width> &x)
4015  {
4017  for (unsigned int i = 0;
4018  i < ::VectorizedArray<Number, width>::n_array_elements;
4019  ++i)
4020  values[i] = std::tan(x[i]);
4022  out.load(&values[0]);
4023  return out;
4024  }
4025 
4026 
4027 
4035  template <typename Number, int width>
4036  inline ::VectorizedArray<Number, width>
4037  exp(const ::VectorizedArray<Number, width> &x)
4038  {
4040  for (unsigned int i = 0;
4041  i < ::VectorizedArray<Number, width>::n_array_elements;
4042  ++i)
4043  values[i] = std::exp(x[i]);
4045  out.load(&values[0]);
4046  return out;
4047  }
4048 
4049 
4050 
4058  template <typename Number, int width>
4059  inline ::VectorizedArray<Number, width>
4060  log(const ::VectorizedArray<Number, width> &x)
4061  {
4063  for (unsigned int i = 0;
4064  i < ::VectorizedArray<Number, width>::n_array_elements;
4065  ++i)
4066  values[i] = std::log(x[i]);
4068  out.load(&values[0]);
4069  return out;
4070  }
4071 
4072 
4073 
4081  template <typename Number, int width>
4082  inline ::VectorizedArray<Number, width>
4083  sqrt(const ::VectorizedArray<Number, width> &x)
4084  {
4085  return x.get_sqrt();
4086  }
4087 
4088 
4089 
4097  template <typename Number, int width>
4098  inline ::VectorizedArray<Number, width>
4099  pow(const ::VectorizedArray<Number, width> &x, const Number p)
4100  {
4102  for (unsigned int i = 0;
4103  i < ::VectorizedArray<Number, width>::n_array_elements;
4104  ++i)
4105  values[i] = std::pow(x[i], p);
4107  out.load(&values[0]);
4108  return out;
4109  }
4110 
4111 
4112 
4120  template <typename Number, int width>
4121  inline ::VectorizedArray<Number, width>
4122  abs(const ::VectorizedArray<Number, width> &x)
4123  {
4124  return x.get_abs();
4125  }
4126 
4127 
4128 
4136  template <typename Number, int width>
4137  inline ::VectorizedArray<Number, width>
4138  max(const ::VectorizedArray<Number, width> &x,
4139  const ::VectorizedArray<Number, width> &y)
4140  {
4141  return x.get_max(y);
4142  }
4143 
4144 
4145 
4153  template <typename Number, int width>
4154  inline ::VectorizedArray<Number, width>
4155  min(const ::VectorizedArray<Number, width> &x,
4156  const ::VectorizedArray<Number, width> &y)
4157  {
4158  return x.get_min(y);
4159  }
4160 
4161 } // namespace std
4162 
4163 #endif
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_sqrt() const
#define AssertIndexRange(index, range)
Definition: exceptions.h:1637
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
void streaming_store(Number *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
STL namespace.
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
__global__ void vec_add(Number *val, const Number a, const size_type N)
__global__ void gather(Number *val, const IndexType *indices, const Number *v, const IndexType N)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(Number *ptr) const
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
static ::ExceptionBase & ExcMessage(std::string arg1)
void gather(const Number *base_ptr, const unsigned int *offsets)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
#define Assert(cond, exc)
Definition: exceptions.h:1407
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
void load(const Number *ptr)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray & operator=(const Number scalar)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Number & operator[](const unsigned int comp)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_abs() const
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)