Reference documentation for deal.II version Git 3ecc854 2017-02-27 14:34:24 -0500
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vectorization_h
18 #define dealii__vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
43 #include <immintrin.h>
44 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
45 #include <emmintrin.h>
46 #endif
47 
48 
49 // forward declarations
50 DEAL_II_NAMESPACE_OPEN
51 template <typename Number> class VectorizedArray;
52 template <typename T> struct EnableIfScalar;
53 DEAL_II_NAMESPACE_CLOSE
54 
55 
56 namespace std
57 {
58  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
59  sqrt(const ::VectorizedArray<Number> &);
60  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
61  abs(const ::VectorizedArray<Number> &);
62  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
63  max(const ::VectorizedArray<Number> &, const ::VectorizedArray<Number> &);
64  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
65  min (const ::VectorizedArray<Number> &, const ::VectorizedArray<Number> &);
66 }
67 
68 
69 DEAL_II_NAMESPACE_OPEN
70 
71 
72 namespace internal
73 {
84  template <typename T>
86  {
87  static VectorizedArray<T> value (const T &t)
88  {
90  tmp=t;
91  return tmp;
92  }
93  };
94 }
95 
96 
97 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
98 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
99 
100 template<typename Number>
101 struct EnableIfScalar<VectorizedArray<Number> >
102 {
104 };
105 
106 
107 
108 #ifndef DEAL_II_WITH_CXX11
109 // Specify the types for the implemented multiplications explicitly
110 
111 template <typename Number>
112 struct ProductType<Number, VectorizedArray<Number> >
113 {
114  typedef VectorizedArray<Number> type;
115 };
116 
117 template <typename Number>
118 struct ProductType<VectorizedArray<Number>, Number>
119 {
120  typedef VectorizedArray<Number> type;
121 };
122 
123 // In contrast to scalar types for which the product of a float and a double
124 // variable would be a double variable, the implemented type here really is
125 // VectorizedArray<float>. Since VectorizedArray<double> is only half as
126 // wide as VectorizedArray<float>, we would have to throw away half of the
127 // vector otherwise.
128 template<>
129 struct ProductType<double, VectorizedArray<float> >
130 {
131  typedef VectorizedArray<float> type;
132 };
133 
134 template<>
135 struct ProductType<VectorizedArray<float>, double>
136 {
137  typedef VectorizedArray<float> type;
138 };
139 #endif
140 
141 
142 
193 template <typename Number>
194 class VectorizedArray
195 {
196 public:
200  static const unsigned int n_array_elements = 1;
201 
202  // POD means that there should be no user-defined constructors, destructors
203  // and copy functions (the standard is somewhat relaxed in C++2011, though).
204 
208  DEAL_II_ALWAYS_INLINE
210  operator = (const Number scalar)
211  {
212  data = scalar;
213  return *this;
214  }
215 
219  DEAL_II_ALWAYS_INLINE
220  Number &
221  operator [] (const unsigned int comp)
222  {
223  (void)comp;
224  AssertIndexRange (comp, 1);
225  return data;
226  }
227 
231  DEAL_II_ALWAYS_INLINE
232  const Number &
233  operator [] (const unsigned int comp) const
234  {
235  (void)comp;
236  AssertIndexRange (comp, 1);
237  return data;
238  }
239 
243  DEAL_II_ALWAYS_INLINE
246  {
247  data+=vec.data;
248  return *this;
249  }
250 
254  DEAL_II_ALWAYS_INLINE
257  {
258  data-=vec.data;
259  return *this;
260  }
261 
265  DEAL_II_ALWAYS_INLINE
268  {
269  data*=vec.data;
270  return *this;
271  }
272 
276  DEAL_II_ALWAYS_INLINE
279  {
280  data/=vec.data;
281  return *this;
282  }
283 
290  DEAL_II_ALWAYS_INLINE
291  void load (const Number *ptr)
292  {
293  data = *ptr;
294  }
295 
302  DEAL_II_ALWAYS_INLINE
303  void store (Number *ptr) const
304  {
305  *ptr = data;
306  }
307 
320  DEAL_II_ALWAYS_INLINE
321  void gather (const Number *base_ptr,
322  const unsigned int *offsets)
323  {
324  data = base_ptr[offsets[0]];
325  }
326 
339  DEAL_II_ALWAYS_INLINE
340  void scatter (const unsigned int *offsets,
341  Number *base_ptr) const
342  {
343  base_ptr[offsets[0]] = data;
344  }
345 
350  Number data;
351 
352 private:
357  DEAL_II_ALWAYS_INLINE
359  get_sqrt () const
360  {
361  VectorizedArray res;
362  res.data = std::sqrt(data);
363  return res;
364  }
365 
370  DEAL_II_ALWAYS_INLINE
372  get_abs () const
373  {
374  VectorizedArray res;
375  res.data = std::fabs(data);
376  return res;
377  }
378 
383  DEAL_II_ALWAYS_INLINE
385  get_max (const VectorizedArray &other) const
386  {
387  VectorizedArray res;
388  res.data = std::max (data, other.data);
389  return res;
390  }
391 
396  DEAL_II_ALWAYS_INLINE
398  get_min (const VectorizedArray &other) const
399  {
400  VectorizedArray res;
401  res.data = std::min (data, other.data);
402  return res;
403  }
404 
408  template <typename Number2> friend VectorizedArray<Number2>
409  std::sqrt (const VectorizedArray<Number2> &);
410  template <typename Number2> friend VectorizedArray<Number2>
411  std::abs (const VectorizedArray<Number2> &);
412  template <typename Number2> friend VectorizedArray<Number2>
413  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
414  template <typename Number2> friend VectorizedArray<Number2>
415  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
416 };
417 
418 
419 
426 template <typename Number>
427 inline DEAL_II_ALWAYS_INLINE
429 make_vectorized_array (const Number &u)
430 {
432  result = u;
433  return result;
434 }
435 
436 
437 
463 template <typename Number>
464 inline
465 void
466 vectorized_load_and_transpose(const unsigned int n_entries,
467  const Number *in,
468  const unsigned int *offsets,
470 {
471  for (unsigned int i=0; i<n_entries; ++i)
472  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
473  out[i][v] = in[offsets[v]+i];
474 }
475 
476 
477 
516 template <typename Number>
517 inline
518 void
519 vectorized_transpose_and_store(const bool add_into,
520  const unsigned int n_entries,
521  const VectorizedArray<Number> *in,
522  const unsigned int *offsets,
523  Number *out)
524 {
525  if (add_into)
526  for (unsigned int i=0; i<n_entries; ++i)
527  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
528  out[offsets[v]+i] += in[i][v];
529  else
530  for (unsigned int i=0; i<n_entries; ++i)
531  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
532  out[offsets[v]+i] = in[i][v];
533 }
534 
535 
536 
537 // for safety, also check that __AVX512F__ is defined in case the user manually
538 // set some conflicting compile flags which prevent compilation
539 
540 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
541 
545 template <>
546 class VectorizedArray<double>
547 {
548 public:
552  static const unsigned int n_array_elements = 8;
553 
557  DEAL_II_ALWAYS_INLINE
559  operator = (const double x)
560  {
561  data = _mm512_set1_pd(x);
562  return *this;
563  }
564 
568  DEAL_II_ALWAYS_INLINE
569  double &
570  operator [] (const unsigned int comp)
571  {
572  AssertIndexRange (comp, 8);
573  return *(reinterpret_cast<double *>(&data)+comp);
574  }
575 
579  DEAL_II_ALWAYS_INLINE
580  const double &
581  operator [] (const unsigned int comp) const
582  {
583  AssertIndexRange (comp, 8);
584  return *(reinterpret_cast<const double *>(&data)+comp);
585  }
586 
590  DEAL_II_ALWAYS_INLINE
592  operator += (const VectorizedArray &vec)
593  {
594  // if the compiler supports vector arithmetics, we can simply use +=
595  // operator on the given data type. this allows the compiler to combine
596  // additions with multiplication (fused multiply-add) if those
597  // instructions are available. Otherwise, we need to use the built-in
598  // intrinsic command for __m512d
599 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
600  data += vec.data;
601 #else
602  data = _mm512_add_pd(data,vec.data);
603 #endif
604  return *this;
605  }
606 
610  DEAL_II_ALWAYS_INLINE
612  operator -= (const VectorizedArray &vec)
613  {
614 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
615  data -= vec.data;
616 #else
617  data = _mm512_sub_pd(data,vec.data);
618 #endif
619  return *this;
620  }
624  DEAL_II_ALWAYS_INLINE
626  operator *= (const VectorizedArray &vec)
627  {
628 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
629  data *= vec.data;
630 #else
631  data = _mm512_mul_pd(data,vec.data);
632 #endif
633  return *this;
634  }
635 
639  DEAL_II_ALWAYS_INLINE
641  operator /= (const VectorizedArray &vec)
642  {
643 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
644  data /= vec.data;
645 #else
646  data = _mm512_div_pd(data,vec.data);
647 #endif
648  return *this;
649  }
650 
656  DEAL_II_ALWAYS_INLINE
657  void load (const double *ptr)
658  {
659  data = _mm512_loadu_pd (ptr);
660  }
661 
668  DEAL_II_ALWAYS_INLINE
669  void store (double *ptr) const
670  {
671  _mm512_storeu_pd (ptr, data);
672  }
673 
686  DEAL_II_ALWAYS_INLINE
687  void gather (const double *base_ptr,
688  const unsigned int *offsets)
689  {
690  // unfortunately, there does not appear to be a 256 bit integer load, so
691  // do it by some reinterpret casts here. this is allowed because the Intel
692  // API allows aliasing between different vector types.
693  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
694  const __m256i index = *((__m256i *)(&index_val));
695  data = _mm512_i32gather_pd(index, base_ptr, 8);
696  }
697 
710  DEAL_II_ALWAYS_INLINE
711  void scatter (const unsigned int *offsets,
712  double *base_ptr) const
713  {
714  for (unsigned int i=0; i<8; ++i)
715  for (unsigned int j=i+1; j<8; ++j)
716  Assert(offsets[i] != offsets[j],
717  ExcMessage("Result of scatter undefined if two offset elements"
718  " point to the same position"));
719 
720  // unfortunately, there does not appear to be a 256 bit integer load, so
721  // do it by some reinterpret casts here. this is allowed because the Intel
722  // API allows aliasing between different vector types.
723  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
724  const __m256i index = *((__m256i *)(&index_val));
725  _mm512_i32scatter_pd(base_ptr, index, data, 8);
726  }
727 
732  __m512d data;
733 
734 private:
739  DEAL_II_ALWAYS_INLINE
741  get_sqrt () const
742  {
743  VectorizedArray res;
744  res.data = _mm512_sqrt_pd(data);
745  return res;
746  }
747 
752  DEAL_II_ALWAYS_INLINE
754  get_abs () const
755  {
756  // to compute the absolute value, perform bitwise andnot with -0. This
757  // will leave all value and exponent bits unchanged but force the sign
758  // value to +. Since there is no andnot for AVX512, we interpret the data
759  // as 64 bit integers and do the andnot on those types (note that andnot
760  // is a bitwise operation so the data type does not matter)
761  __m512d mask = _mm512_set1_pd (-0.);
762  VectorizedArray res;
763  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
764  return res;
765  }
766 
771  DEAL_II_ALWAYS_INLINE
773  get_max (const VectorizedArray &other) const
774  {
775  VectorizedArray res;
776  res.data = _mm512_max_pd (data, other.data);
777  return res;
778  }
779 
784  DEAL_II_ALWAYS_INLINE
786  get_min (const VectorizedArray &other) const
787  {
788  VectorizedArray res;
789  res.data = _mm512_min_pd (data, other.data);
790  return res;
791  }
792 
796  template <typename Number2> friend VectorizedArray<Number2>
797  std::sqrt (const VectorizedArray<Number2> &);
798  template <typename Number2> friend VectorizedArray<Number2>
799  std::abs (const VectorizedArray<Number2> &);
800  template <typename Number2> friend VectorizedArray<Number2>
801  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
802  template <typename Number2> friend VectorizedArray<Number2>
803  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
804 };
805 
806 
807 
811 template <>
812 inline
813 void
814 vectorized_load_and_transpose(const unsigned int n_entries,
815  const double *in,
816  const unsigned int *offsets,
818 {
819  const unsigned int n_chunks = n_entries/4;
820  for (unsigned int outer=0; outer<8; outer += 4)
821  {
822  const double *in0 = in + offsets[0+outer];
823  const double *in1 = in + offsets[1+outer];
824  const double *in2 = in + offsets[2+outer];
825  const double *in3 = in + offsets[3+outer];
826 
827  for (unsigned int i=0; i<n_chunks; ++i)
828  {
829  __m256d u0 = _mm256_loadu_pd(in0+4*i);
830  __m256d u1 = _mm256_loadu_pd(in1+4*i);
831  __m256d u2 = _mm256_loadu_pd(in2+4*i);
832  __m256d u3 = _mm256_loadu_pd(in3+4*i);
833  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
834  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
835  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
836  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
837  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
838  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
839  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
840  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
841  }
842  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
843  for (unsigned int v=0; v<4; ++v)
844  out[i][outer+v] = in[offsets[v+outer]+i];
845  }
846 }
847 
848 
849 
853 template <>
854 inline
855 void
856 vectorized_transpose_and_store(const bool add_into,
857  const unsigned int n_entries,
858  const VectorizedArray<double> *in,
859  const unsigned int *offsets,
860  double *out)
861 {
862  const unsigned int n_chunks = n_entries/4;
863  // do not do full transpose because the code is too long and will most
864  // likely not pay off. rather do the transposition on the vectorized array
865  // on size smaller, mm256d
866  for (unsigned int outer=0; outer<8; outer += 4)
867  {
868  double *out0 = out + offsets[0+outer];
869  double *out1 = out + offsets[1+outer];
870  double *out2 = out + offsets[2+outer];
871  double *out3 = out + offsets[3+outer];
872  for (unsigned int i=0; i<n_chunks; ++i)
873  {
874  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
875  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
876  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
877  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
878  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
879  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
880  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
881  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
882  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
883  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
884  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
885  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
886 
887  // Cannot use the same store instructions in both paths of the 'if'
888  // because the compiler cannot know that there is no aliasing between
889  // pointers
890  if (add_into)
891  {
892  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
893  _mm256_storeu_pd(out0+4*i, res0);
894  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
895  _mm256_storeu_pd(out1+4*i, res1);
896  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
897  _mm256_storeu_pd(out2+4*i, res2);
898  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
899  _mm256_storeu_pd(out3+4*i, res3);
900  }
901  else
902  {
903  _mm256_storeu_pd(out0+4*i, res0);
904  _mm256_storeu_pd(out1+4*i, res1);
905  _mm256_storeu_pd(out2+4*i, res2);
906  _mm256_storeu_pd(out3+4*i, res3);
907  }
908  }
909  if (add_into)
910  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
911  for (unsigned int v=0; v<4; ++v)
912  out[offsets[v+outer]+i] += in[i][v+outer];
913  else
914  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
915  for (unsigned int v=0; v<4; ++v)
916  out[offsets[v+outer]+i] = in[i][v+outer];
917  }
918 }
919 
920 
921 
925 template<>
926 class VectorizedArray<float>
927 {
928 public:
932  static const unsigned int n_array_elements = 16;
933 
937  DEAL_II_ALWAYS_INLINE
939  operator = (const float x)
940  {
941  data = _mm512_set1_ps(x);
942  return *this;
943  }
944 
948  DEAL_II_ALWAYS_INLINE
949  float &
950  operator [] (const unsigned int comp)
951  {
952  AssertIndexRange (comp, 16);
953  return *(reinterpret_cast<float *>(&data)+comp);
954  }
955 
959  DEAL_II_ALWAYS_INLINE
960  const float &
961  operator [] (const unsigned int comp) const
962  {
963  AssertIndexRange (comp, 16);
964  return *(reinterpret_cast<const float *>(&data)+comp);
965  }
966 
970  DEAL_II_ALWAYS_INLINE
972  operator += (const VectorizedArray &vec)
973  {
974  // if the compiler supports vector arithmetics, we can simply use +=
975  // operator on the given data type. this allows the compiler to combine
976  // additions with multiplication (fused multiply-add) if those
977  // instructions are available. Otherwise, we need to use the built-in
978  // intrinsic command for __m512d
979 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
980  data += vec.data;
981 #else
982  data = _mm512_add_ps(data,vec.data);
983 #endif
984  return *this;
985  }
986 
990  DEAL_II_ALWAYS_INLINE
992  operator -= (const VectorizedArray &vec)
993  {
994 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
995  data -= vec.data;
996 #else
997  data = _mm512_sub_ps(data,vec.data);
998 #endif
999  return *this;
1000  }
1004  DEAL_II_ALWAYS_INLINE
1005  VectorizedArray &
1006  operator *= (const VectorizedArray &vec)
1007  {
1008 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1009  data *= vec.data;
1010 #else
1011  data = _mm512_mul_ps(data,vec.data);
1012 #endif
1013  return *this;
1014  }
1015 
1019  DEAL_II_ALWAYS_INLINE
1020  VectorizedArray &
1021  operator /= (const VectorizedArray &vec)
1022  {
1023 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1024  data /= vec.data;
1025 #else
1026  data = _mm512_div_ps(data,vec.data);
1027 #endif
1028  return *this;
1029  }
1030 
1036  DEAL_II_ALWAYS_INLINE
1037  void load (const float *ptr)
1038  {
1039  data = _mm512_loadu_ps (ptr);
1040  }
1041 
1048  DEAL_II_ALWAYS_INLINE
1049  void store (float *ptr) const
1050  {
1051  _mm512_storeu_ps (ptr, data);
1052  }
1053 
1066  DEAL_II_ALWAYS_INLINE
1067  void gather (const float *base_ptr,
1068  const unsigned int *offsets)
1069  {
1070  // unfortunately, there does not appear to be a 512 bit integer load, so
1071  // do it by some reinterpret casts here. this is allowed because the Intel
1072  // API allows aliasing between different vector types.
1073  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1074  const __m512i index = *((__m512i *)(&index_val));
1075  data = _mm512_i32gather_ps(index, base_ptr, 4);
1076  }
1077 
1090  DEAL_II_ALWAYS_INLINE
1091  void scatter (const unsigned int *offsets,
1092  float *base_ptr) const
1093  {
1094  for (unsigned int i=0; i<16; ++i)
1095  for (unsigned int j=i+1; j<16; ++j)
1096  Assert(offsets[i] != offsets[j],
1097  ExcMessage("Result of scatter undefined if two offset elements"
1098  " point to the same position"));
1099 
1100  // unfortunately, there does not appear to be a 512 bit integer load, so
1101  // do it by some reinterpret casts here. this is allowed because the Intel
1102  // API allows aliasing between different vector types.
1103  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1104  const __m512i index = *((__m512i *)(&index_val));
1105  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1106  }
1107 
1112  __m512 data;
1113 
1114 private:
1115 
1120  DEAL_II_ALWAYS_INLINE
1122  get_sqrt () const
1123  {
1124  VectorizedArray res;
1125  res.data = _mm512_sqrt_ps(data);
1126  return res;
1127  }
1128 
1133  DEAL_II_ALWAYS_INLINE
1135  get_abs () const
1136  {
1137  // to compute the absolute value, perform bitwise andnot with -0. This
1138  // will leave all value and exponent bits unchanged but force the sign
1139  // value to +. Since there is no andnot for AVX512, we interpret the data
1140  // as 32 bit integers and do the andnot on those types (note that andnot
1141  // is a bitwise operation so the data type does not matter)
1142  __m512 mask = _mm512_set1_ps (-0.f);
1143  VectorizedArray res;
1144  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1145  return res;
1146  }
1147 
1152  DEAL_II_ALWAYS_INLINE
1154  get_max (const VectorizedArray &other) const
1155  {
1156  VectorizedArray res;
1157  res.data = _mm512_max_ps (data, other.data);
1158  return res;
1159  }
1160 
1165  DEAL_II_ALWAYS_INLINE
1167  get_min (const VectorizedArray &other) const
1168  {
1169  VectorizedArray res;
1170  res.data = _mm512_min_ps (data, other.data);
1171  return res;
1172  }
1173 
1177  template <typename Number2> friend VectorizedArray<Number2>
1178  std::sqrt (const VectorizedArray<Number2> &);
1179  template <typename Number2> friend VectorizedArray<Number2>
1180  std::abs (const VectorizedArray<Number2> &);
1181  template <typename Number2> friend VectorizedArray<Number2>
1182  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1183  template <typename Number2> friend VectorizedArray<Number2>
1184  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1185 };
1186 
1187 
1188 
1192 template <>
1193 inline
1194 void
1195 vectorized_load_and_transpose(const unsigned int n_entries,
1196  const float *in,
1197  const unsigned int *offsets,
1199 {
1200  const unsigned int n_chunks = n_entries/4;
1201  for (unsigned int outer = 0; outer<16; outer += 8)
1202  {
1203  for (unsigned int i=0; i<n_chunks; ++i)
1204  {
1205  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1206  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1207  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1208  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1209  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1210  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1211  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1212  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1213  // To avoid warnings about uninitialized variables, need to initialize
1214  // one variable with zero before using it.
1215  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1216  t0 = _mm256_insertf128_ps (t3, u0, 0);
1217  t0 = _mm256_insertf128_ps (t0, u4, 1);
1218  t1 = _mm256_insertf128_ps (t3, u1, 0);
1219  t1 = _mm256_insertf128_ps (t1, u5, 1);
1220  t2 = _mm256_insertf128_ps (t3, u2, 0);
1221  t2 = _mm256_insertf128_ps (t2, u6, 1);
1222  t3 = _mm256_insertf128_ps (t3, u3, 0);
1223  t3 = _mm256_insertf128_ps (t3, u7, 1);
1224  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1225  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1226  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1227  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1228  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1229  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1230  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1231  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1232  }
1233  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1234  for (unsigned int v=0; v<8; ++v)
1235  out[i][v+outer] = in[offsets[v+outer]+i];
1236  }
1237 }
1238 
1239 
1240 
1244 template <>
1245 inline
1246 void
1247 vectorized_transpose_and_store(const bool add_into,
1248  const unsigned int n_entries,
1249  const VectorizedArray<float> *in,
1250  const unsigned int *offsets,
1251  float *out)
1252 {
1253  const unsigned int n_chunks = n_entries/4;
1254  for (unsigned int outer = 0; outer<16; outer += 8)
1255  {
1256  for (unsigned int i=0; i<n_chunks; ++i)
1257  {
1258  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1259  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1260  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1261  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1262  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1263  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1264  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1265  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1266  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1267  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1268  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1269  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1270  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1271  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1272  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1273  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1274  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1275  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1276  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1277  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1278 
1279  // Cannot use the same store instructions in both paths of the 'if'
1280  // because the compiler cannot know that there is no aliasing between
1281  // pointers
1282  if (add_into)
1283  {
1284  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1285  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1286  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1287  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1288  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1289  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1290  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1291  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1292  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1293  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1294  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1295  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1296  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1297  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1298  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1299  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1300  }
1301  else
1302  {
1303  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1304  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1305  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1306  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1307  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1308  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1309  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1310  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1311  }
1312  }
1313  if (add_into)
1314  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1315  for (unsigned int v=0; v<8; ++v)
1316  out[offsets[v+outer]+i] += in[i][v+outer];
1317  else
1318  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1319  for (unsigned int v=0; v<8; ++v)
1320  out[offsets[v+outer]+i] = in[i][v+outer];
1321  }
1322 }
1323 
1324 
1325 
1326 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1327 
1331 template <>
1332 class VectorizedArray<double>
1333 {
1334 public:
1338  static const unsigned int n_array_elements = 4;
1339 
1343  DEAL_II_ALWAYS_INLINE
1344  VectorizedArray &
1345  operator = (const double x)
1346  {
1347  data = _mm256_set1_pd(x);
1348  return *this;
1349  }
1350 
1354  DEAL_II_ALWAYS_INLINE
1355  double &
1356  operator [] (const unsigned int comp)
1357  {
1358  AssertIndexRange (comp, 4);
1359  return *(reinterpret_cast<double *>(&data)+comp);
1360  }
1361 
1365  DEAL_II_ALWAYS_INLINE
1366  const double &
1367  operator [] (const unsigned int comp) const
1368  {
1369  AssertIndexRange (comp, 4);
1370  return *(reinterpret_cast<const double *>(&data)+comp);
1371  }
1372 
1376  DEAL_II_ALWAYS_INLINE
1377  VectorizedArray &
1378  operator += (const VectorizedArray &vec)
1379  {
1380  // if the compiler supports vector arithmetics, we can simply use +=
1381  // operator on the given data type. this allows the compiler to combine
1382  // additions with multiplication (fused multiply-add) if those
1383  // instructions are available. Otherwise, we need to use the built-in
1384  // intrinsic command for __m256d
1385 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1386  data += vec.data;
1387 #else
1388  data = _mm256_add_pd(data,vec.data);
1389 #endif
1390  return *this;
1391  }
1392 
1396  DEAL_II_ALWAYS_INLINE
1397  VectorizedArray &
1398  operator -= (const VectorizedArray &vec)
1399  {
1400 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1401  data -= vec.data;
1402 #else
1403  data = _mm256_sub_pd(data,vec.data);
1404 #endif
1405  return *this;
1406  }
1410  DEAL_II_ALWAYS_INLINE
1411  VectorizedArray &
1412  operator *= (const VectorizedArray &vec)
1413  {
1414 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1415  data *= vec.data;
1416 #else
1417  data = _mm256_mul_pd(data,vec.data);
1418 #endif
1419  return *this;
1420  }
1421 
1425  DEAL_II_ALWAYS_INLINE
1426  VectorizedArray &
1427  operator /= (const VectorizedArray &vec)
1428  {
1429 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1430  data /= vec.data;
1431 #else
1432  data = _mm256_div_pd(data,vec.data);
1433 #endif
1434  return *this;
1435  }
1436 
1442  DEAL_II_ALWAYS_INLINE
1443  void load (const double *ptr)
1444  {
1445  data = _mm256_loadu_pd (ptr);
1446  }
1447 
1454  DEAL_II_ALWAYS_INLINE
1455  void store (double *ptr) const
1456  {
1457  _mm256_storeu_pd (ptr, data);
1458  }
1459 
1472  DEAL_II_ALWAYS_INLINE
1473  void gather (const double *base_ptr,
1474  const unsigned int *offsets)
1475  {
1476 #ifdef __AVX2__
1477  // unfortunately, there does not appear to be a 128 bit integer load, so
1478  // do it by some reinterpret casts here. this is allowed because the Intel
1479  // API allows aliasing between different vector types.
1480  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1481  const __m128i index = *((__m128i *)(&index_val));
1482  data = _mm256_i32gather_pd(base_ptr, index, 8);
1483 #else
1484  for (unsigned int i=0; i<4; ++i)
1485  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1486 #endif
1487  }
1488 
1501  DEAL_II_ALWAYS_INLINE
1502  void scatter (const unsigned int *offsets,
1503  double *base_ptr) const
1504  {
1505  for (unsigned int i=0; i<4; ++i)
1506  for (unsigned int j=i+1; j<4; ++j)
1507  Assert(offsets[i] != offsets[j],
1508  ExcMessage("Result of scatter undefined if two offset elements"
1509  " point to the same position"));
1510 
1511  // no scatter operation in AVX/AVX2
1512  for (unsigned int i=0; i<4; ++i)
1513  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1514  }
1515 
1520  __m256d data;
1521 
1522 private:
1527  DEAL_II_ALWAYS_INLINE
1529  get_sqrt () const
1530  {
1531  VectorizedArray res;
1532  res.data = _mm256_sqrt_pd(data);
1533  return res;
1534  }
1535 
1540  DEAL_II_ALWAYS_INLINE
1542  get_abs () const
1543  {
1544  // to compute the absolute value, perform bitwise andnot with -0. This
1545  // will leave all value and exponent bits unchanged but force the sign
1546  // value to +.
1547  __m256d mask = _mm256_set1_pd (-0.);
1548  VectorizedArray res;
1549  res.data = _mm256_andnot_pd(mask, data);
1550  return res;
1551  }
1552 
1557  DEAL_II_ALWAYS_INLINE
1559  get_max (const VectorizedArray &other) const
1560  {
1561  VectorizedArray res;
1562  res.data = _mm256_max_pd (data, other.data);
1563  return res;
1564  }
1565 
1570  DEAL_II_ALWAYS_INLINE
1572  get_min (const VectorizedArray &other) const
1573  {
1574  VectorizedArray res;
1575  res.data = _mm256_min_pd (data, other.data);
1576  return res;
1577  }
1578 
1582  template <typename Number2> friend VectorizedArray<Number2>
1583  std::sqrt (const VectorizedArray<Number2> &);
1584  template <typename Number2> friend VectorizedArray<Number2>
1585  std::abs (const VectorizedArray<Number2> &);
1586  template <typename Number2> friend VectorizedArray<Number2>
1587  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1588  template <typename Number2> friend VectorizedArray<Number2>
1589  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1590 };
1591 
1592 
1593 
1597 template <>
1598 inline
1599 void
1600 vectorized_load_and_transpose(const unsigned int n_entries,
1601  const double *in,
1602  const unsigned int *offsets,
1604 {
1605  const unsigned int n_chunks = n_entries/4;
1606  const double *in0 = in + offsets[0];
1607  const double *in1 = in + offsets[1];
1608  const double *in2 = in + offsets[2];
1609  const double *in3 = in + offsets[3];
1610 
1611  for (unsigned int i=0; i<n_chunks; ++i)
1612  {
1613  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1614  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1615  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1616  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1617  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1618  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1619  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1620  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1621  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1622  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1623  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1624  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1625  }
1626  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1627  for (unsigned int v=0; v<4; ++v)
1628  out[i][v] = in[offsets[v]+i];
1629 }
1630 
1631 
1632 
1636 template <>
1637 inline
1638 void
1639 vectorized_transpose_and_store(const bool add_into,
1640  const unsigned int n_entries,
1641  const VectorizedArray<double> *in,
1642  const unsigned int *offsets,
1643  double *out)
1644 {
1645  const unsigned int n_chunks = n_entries/4;
1646  double *out0 = out + offsets[0];
1647  double *out1 = out + offsets[1];
1648  double *out2 = out + offsets[2];
1649  double *out3 = out + offsets[3];
1650  for (unsigned int i=0; i<n_chunks; ++i)
1651  {
1652  __m256d u0 = in[4*i+0].data;
1653  __m256d u1 = in[4*i+1].data;
1654  __m256d u2 = in[4*i+2].data;
1655  __m256d u3 = in[4*i+3].data;
1656  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1657  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1658  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1659  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1660  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1661  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1662  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1663  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1664 
1665  // Cannot use the same store instructions in both paths of the 'if'
1666  // because the compiler cannot know that there is no aliasing between
1667  // pointers
1668  if (add_into)
1669  {
1670  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1671  _mm256_storeu_pd(out0+4*i, res0);
1672  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1673  _mm256_storeu_pd(out1+4*i, res1);
1674  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1675  _mm256_storeu_pd(out2+4*i, res2);
1676  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1677  _mm256_storeu_pd(out3+4*i, res3);
1678  }
1679  else
1680  {
1681  _mm256_storeu_pd(out0+4*i, res0);
1682  _mm256_storeu_pd(out1+4*i, res1);
1683  _mm256_storeu_pd(out2+4*i, res2);
1684  _mm256_storeu_pd(out3+4*i, res3);
1685  }
1686  }
1687  if (add_into)
1688  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1689  for (unsigned int v=0; v<4; ++v)
1690  out[offsets[v]+i] += in[i][v];
1691  else
1692  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1693  for (unsigned int v=0; v<4; ++v)
1694  out[offsets[v]+i] = in[i][v];
1695 }
1696 
1697 
1698 
1702 template<>
1703 class VectorizedArray<float>
1704 {
1705 public:
1709  static const unsigned int n_array_elements = 8;
1710 
1714  DEAL_II_ALWAYS_INLINE
1715  VectorizedArray &
1716  operator = (const float x)
1717  {
1718  data = _mm256_set1_ps(x);
1719  return *this;
1720  }
1721 
1725  DEAL_II_ALWAYS_INLINE
1726  float &
1727  operator [] (const unsigned int comp)
1728  {
1729  AssertIndexRange (comp, 8);
1730  return *(reinterpret_cast<float *>(&data)+comp);
1731  }
1732 
1736  DEAL_II_ALWAYS_INLINE
1737  const float &
1738  operator [] (const unsigned int comp) const
1739  {
1740  AssertIndexRange (comp, 8);
1741  return *(reinterpret_cast<const float *>(&data)+comp);
1742  }
1743 
1747  DEAL_II_ALWAYS_INLINE
1748  VectorizedArray &
1749  operator += (const VectorizedArray &vec)
1750  {
1751  // if the compiler supports vector arithmetics, we can simply use +=
1752  // operator on the given data type. this allows the compiler to combine
1753  // additions with multiplication (fused multiply-add) if those
1754  // instructions are available. Otherwise, we need to use the built-in
1755  // intrinsic command for __m256d
1756 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1757  data += vec.data;
1758 #else
1759  data = _mm256_add_ps(data,vec.data);
1760 #endif
1761  return *this;
1762  }
1763 
1767  DEAL_II_ALWAYS_INLINE
1768  VectorizedArray &
1769  operator -= (const VectorizedArray &vec)
1770  {
1771 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1772  data -= vec.data;
1773 #else
1774  data = _mm256_sub_ps(data,vec.data);
1775 #endif
1776  return *this;
1777  }
1781  DEAL_II_ALWAYS_INLINE
1782  VectorizedArray &
1783  operator *= (const VectorizedArray &vec)
1784  {
1785 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1786  data *= vec.data;
1787 #else
1788  data = _mm256_mul_ps(data,vec.data);
1789 #endif
1790  return *this;
1791  }
1792 
1796  DEAL_II_ALWAYS_INLINE
1797  VectorizedArray &
1798  operator /= (const VectorizedArray &vec)
1799  {
1800 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1801  data /= vec.data;
1802 #else
1803  data = _mm256_div_ps(data,vec.data);
1804 #endif
1805  return *this;
1806  }
1807 
1813  DEAL_II_ALWAYS_INLINE
1814  void load (const float *ptr)
1815  {
1816  data = _mm256_loadu_ps (ptr);
1817  }
1818 
1825  DEAL_II_ALWAYS_INLINE
1826  void store (float *ptr) const
1827  {
1828  _mm256_storeu_ps (ptr, data);
1829  }
1830 
1843  DEAL_II_ALWAYS_INLINE
1844  void gather (const float *base_ptr,
1845  const unsigned int *offsets)
1846  {
1847 #ifdef __AVX2__
1848  // unfortunately, there does not appear to be a 256 bit integer load, so
1849  // do it by some reinterpret casts here. this is allowed because the Intel
1850  // API allows aliasing between different vector types.
1851  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1852  const __m256i index = *((__m256i *)(&index_val));
1853  data = _mm256_i32gather_ps(base_ptr, index, 4);
1854 #else
1855  for (unsigned int i=0; i<8; ++i)
1856  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1857 #endif
1858  }
1859 
1872  DEAL_II_ALWAYS_INLINE
1873  void scatter (const unsigned int *offsets,
1874  float *base_ptr) const
1875  {
1876  for (unsigned int i=0; i<8; ++i)
1877  for (unsigned int j=i+1; j<8; ++j)
1878  Assert(offsets[i] != offsets[j],
1879  ExcMessage("Result of scatter undefined if two offset elements"
1880  " point to the same position"));
1881 
1882  // no scatter operation in AVX/AVX2
1883  for (unsigned int i=0; i<8; ++i)
1884  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1885  }
1886 
1891  __m256 data;
1892 
1893 private:
1894 
1899  DEAL_II_ALWAYS_INLINE
1901  get_sqrt () const
1902  {
1903  VectorizedArray res;
1904  res.data = _mm256_sqrt_ps(data);
1905  return res;
1906  }
1907 
1912  DEAL_II_ALWAYS_INLINE
1914  get_abs () const
1915  {
1916  // to compute the absolute value, perform bitwise andnot with -0. This
1917  // will leave all value and exponent bits unchanged but force the sign
1918  // value to +.
1919  __m256 mask = _mm256_set1_ps (-0.f);
1920  VectorizedArray res;
1921  res.data = _mm256_andnot_ps(mask, data);
1922  return res;
1923  }
1924 
1929  DEAL_II_ALWAYS_INLINE
1931  get_max (const VectorizedArray &other) const
1932  {
1933  VectorizedArray res;
1934  res.data = _mm256_max_ps (data, other.data);
1935  return res;
1936  }
1937 
1942  DEAL_II_ALWAYS_INLINE
1944  get_min (const VectorizedArray &other) const
1945  {
1946  VectorizedArray res;
1947  res.data = _mm256_min_ps (data, other.data);
1948  return res;
1949  }
1950 
1954  template <typename Number2> friend VectorizedArray<Number2>
1955  std::sqrt (const VectorizedArray<Number2> &);
1956  template <typename Number2> friend VectorizedArray<Number2>
1957  std::abs (const VectorizedArray<Number2> &);
1958  template <typename Number2> friend VectorizedArray<Number2>
1959  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1960  template <typename Number2> friend VectorizedArray<Number2>
1961  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1962 };
1963 
1964 
1965 
1969 template <>
1970 inline
1971 void
1972 vectorized_load_and_transpose(const unsigned int n_entries,
1973  const float *in,
1974  const unsigned int *offsets,
1976 {
1977  const unsigned int n_chunks = n_entries/4;
1978  for (unsigned int i=0; i<n_chunks; ++i)
1979  {
1980  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1981  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1982  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1983  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1984  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1985  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1986  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1987  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1988  // To avoid warnings about uninitialized variables, need to initialize
1989  // one variable with zero before using it.
1990  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1991  t0 = _mm256_insertf128_ps (t3, u0, 0);
1992  t0 = _mm256_insertf128_ps (t0, u4, 1);
1993  t1 = _mm256_insertf128_ps (t3, u1, 0);
1994  t1 = _mm256_insertf128_ps (t1, u5, 1);
1995  t2 = _mm256_insertf128_ps (t3, u2, 0);
1996  t2 = _mm256_insertf128_ps (t2, u6, 1);
1997  t3 = _mm256_insertf128_ps (t3, u3, 0);
1998  t3 = _mm256_insertf128_ps (t3, u7, 1);
1999  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
2000  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
2001  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
2002  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
2003  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
2004  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
2005  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
2006  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
2007  }
2008  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2009  for (unsigned int v=0; v<8; ++v)
2010  out[i][v] = in[offsets[v]+i];
2011 }
2012 
2013 
2014 
2018 template <>
2019 inline
2020 void
2021 vectorized_transpose_and_store(const bool add_into,
2022  const unsigned int n_entries,
2023  const VectorizedArray<float> *in,
2024  const unsigned int *offsets,
2025  float *out)
2026 {
2027  const unsigned int n_chunks = n_entries/4;
2028  for (unsigned int i=0; i<n_chunks; ++i)
2029  {
2030  __m256 u0 = in[4*i+0].data;
2031  __m256 u1 = in[4*i+1].data;
2032  __m256 u2 = in[4*i+2].data;
2033  __m256 u3 = in[4*i+3].data;
2034  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
2035  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
2036  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
2037  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
2038  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
2039  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
2040  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
2041  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
2042  __m128 res0 = _mm256_extractf128_ps (u0, 0);
2043  __m128 res4 = _mm256_extractf128_ps (u0, 1);
2044  __m128 res1 = _mm256_extractf128_ps (u1, 0);
2045  __m128 res5 = _mm256_extractf128_ps (u1, 1);
2046  __m128 res2 = _mm256_extractf128_ps (u2, 0);
2047  __m128 res6 = _mm256_extractf128_ps (u2, 1);
2048  __m128 res3 = _mm256_extractf128_ps (u3, 0);
2049  __m128 res7 = _mm256_extractf128_ps (u3, 1);
2050 
2051  // Cannot use the same store instructions in both paths of the 'if'
2052  // because the compiler cannot know that there is no aliasing between
2053  // pointers
2054  if (add_into)
2055  {
2056  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2057  _mm_storeu_ps(out+4*i+offsets[0], res0);
2058  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2059  _mm_storeu_ps(out+4*i+offsets[1], res1);
2060  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2061  _mm_storeu_ps(out+4*i+offsets[2], res2);
2062  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2063  _mm_storeu_ps(out+4*i+offsets[3], res3);
2064  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2065  _mm_storeu_ps(out+4*i+offsets[4], res4);
2066  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2067  _mm_storeu_ps(out+4*i+offsets[5], res5);
2068  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2069  _mm_storeu_ps(out+4*i+offsets[6], res6);
2070  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2071  _mm_storeu_ps(out+4*i+offsets[7], res7);
2072  }
2073  else
2074  {
2075  _mm_storeu_ps(out+4*i+offsets[0], res0);
2076  _mm_storeu_ps(out+4*i+offsets[1], res1);
2077  _mm_storeu_ps(out+4*i+offsets[2], res2);
2078  _mm_storeu_ps(out+4*i+offsets[3], res3);
2079  _mm_storeu_ps(out+4*i+offsets[4], res4);
2080  _mm_storeu_ps(out+4*i+offsets[5], res5);
2081  _mm_storeu_ps(out+4*i+offsets[6], res6);
2082  _mm_storeu_ps(out+4*i+offsets[7], res7);
2083  }
2084  }
2085  if (add_into)
2086  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2087  for (unsigned int v=0; v<8; ++v)
2088  out[offsets[v]+i] += in[i][v];
2089  else
2090  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2091  for (unsigned int v=0; v<8; ++v)
2092  out[offsets[v]+i] = in[i][v];
2093 }
2094 
2095 
2096 
2097 // for safety, also check that __SSE2__ is defined in case the user manually
2098 // set some conflicting compile flags which prevent compilation
2099 
2100 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2101 
2105 template <>
2106 class VectorizedArray<double>
2107 {
2108 public:
2112  static const unsigned int n_array_elements = 2;
2113 
2117  DEAL_II_ALWAYS_INLINE
2118  VectorizedArray &
2119  operator = (const double x)
2120  {
2121  data = _mm_set1_pd(x);
2122  return *this;
2123  }
2124 
2128  DEAL_II_ALWAYS_INLINE
2129  double &
2130  operator [] (const unsigned int comp)
2131  {
2132  AssertIndexRange (comp, 2);
2133  return *(reinterpret_cast<double *>(&data)+comp);
2134  }
2135 
2139  DEAL_II_ALWAYS_INLINE
2140  const double &
2141  operator [] (const unsigned int comp) const
2142  {
2143  AssertIndexRange (comp, 2);
2144  return *(reinterpret_cast<const double *>(&data)+comp);
2145  }
2146 
2150  DEAL_II_ALWAYS_INLINE
2151  VectorizedArray &
2152  operator += (const VectorizedArray &vec)
2153  {
2154 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2155  data += vec.data;
2156 #else
2157  data = _mm_add_pd(data,vec.data);
2158 #endif
2159  return *this;
2160  }
2161 
2165  DEAL_II_ALWAYS_INLINE
2166  VectorizedArray &
2167  operator -= (const VectorizedArray &vec)
2168  {
2169 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2170  data -= vec.data;
2171 #else
2172  data = _mm_sub_pd(data,vec.data);
2173 #endif
2174  return *this;
2175  }
2176 
2180  DEAL_II_ALWAYS_INLINE
2181  VectorizedArray &
2182  operator *= (const VectorizedArray &vec)
2183  {
2184 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2185  data *= vec.data;
2186 #else
2187  data = _mm_mul_pd(data,vec.data);
2188 #endif
2189  return *this;
2190  }
2191 
2195  DEAL_II_ALWAYS_INLINE
2196  VectorizedArray &
2197  operator /= (const VectorizedArray &vec)
2198  {
2199 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2200  data /= vec.data;
2201 #else
2202  data = _mm_div_pd(data,vec.data);
2203 #endif
2204  return *this;
2205  }
2206 
2212  DEAL_II_ALWAYS_INLINE
2213  void load (const double *ptr)
2214  {
2215  data = _mm_loadu_pd (ptr);
2216  }
2217 
2224  DEAL_II_ALWAYS_INLINE
2225  void store (double *ptr) const
2226  {
2227  _mm_storeu_pd (ptr, data);
2228  }
2229 
2242  DEAL_II_ALWAYS_INLINE
2243  void gather (const double *base_ptr,
2244  const unsigned int *offsets)
2245  {
2246  for (unsigned int i=0; i<2; ++i)
2247  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2248  }
2249 
2262  DEAL_II_ALWAYS_INLINE
2263  void scatter (const unsigned int *offsets,
2264  double *base_ptr) const
2265  {
2266  Assert(offsets[0] != offsets[1],
2267  ExcMessage("Result of scatter undefined if two offset elements"
2268  " point to the same position"));
2269 
2270  for (unsigned int i=0; i<2; ++i)
2271  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2272  }
2273 
2278  __m128d data;
2279 
2280 private:
2285  DEAL_II_ALWAYS_INLINE
2287  get_sqrt () const
2288  {
2289  VectorizedArray res;
2290  res.data = _mm_sqrt_pd(data);
2291  return res;
2292  }
2293 
2298  DEAL_II_ALWAYS_INLINE
2300  get_abs () const
2301  {
2302  // to compute the absolute value, perform
2303  // bitwise andnot with -0. This will leave all
2304  // value and exponent bits unchanged but force
2305  // the sign value to +.
2306  __m128d mask = _mm_set1_pd (-0.);
2307  VectorizedArray res;
2308  res.data = _mm_andnot_pd(mask, data);
2309  return res;
2310  }
2311 
2316  DEAL_II_ALWAYS_INLINE
2318  get_max (const VectorizedArray &other) const
2319  {
2320  VectorizedArray res;
2321  res.data = _mm_max_pd (data, other.data);
2322  return res;
2323  }
2324 
2329  DEAL_II_ALWAYS_INLINE
2331  get_min (const VectorizedArray &other) const
2332  {
2333  VectorizedArray res;
2334  res.data = _mm_min_pd (data, other.data);
2335  return res;
2336  }
2337 
2341  template <typename Number2> friend VectorizedArray<Number2>
2342  std::sqrt (const VectorizedArray<Number2> &);
2343  template <typename Number2> friend VectorizedArray<Number2>
2344  std::abs (const VectorizedArray<Number2> &);
2345  template <typename Number2> friend VectorizedArray<Number2>
2346  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2347  template <typename Number2> friend VectorizedArray<Number2>
2348  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2349 };
2350 
2351 
2352 
2356 template <>
2357 inline
2358 void vectorized_load_and_transpose(const unsigned int n_entries,
2359  const double *in,
2360  const unsigned int *offsets,
2362 {
2363  const unsigned int n_chunks = n_entries/2;
2364  for (unsigned int i=0; i<n_chunks; ++i)
2365  {
2366  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2367  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2368  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2369  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2370  }
2371  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2372  for (unsigned int v=0; v<2; ++v)
2373  out[i][v] = in[offsets[v]+i];
2374 }
2375 
2376 
2377 
2381 template <>
2382 inline
2383 void
2384 vectorized_transpose_and_store(const bool add_into,
2385  const unsigned int n_entries,
2386  const VectorizedArray<double> *in,
2387  const unsigned int *offsets,
2388  double *out)
2389 {
2390  const unsigned int n_chunks = n_entries/2;
2391  if (add_into)
2392  {
2393  for (unsigned int i=0; i<n_chunks; ++i)
2394  {
2395  __m128d u0 = in[2*i+0].data;
2396  __m128d u1 = in[2*i+1].data;
2397  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2398  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2399  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2400  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2401  }
2402  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2403  for (unsigned int v=0; v<2; ++v)
2404  out[offsets[v]+i] += in[i][v];
2405  }
2406  else
2407  {
2408  for (unsigned int i=0; i<n_chunks; ++i)
2409  {
2410  __m128d u0 = in[2*i+0].data;
2411  __m128d u1 = in[2*i+1].data;
2412  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2413  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2414  _mm_storeu_pd(out+2*i+offsets[0], res0);
2415  _mm_storeu_pd(out+2*i+offsets[1], res1);
2416  }
2417  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2418  for (unsigned int v=0; v<2; ++v)
2419  out[offsets[v]+i] = in[i][v];
2420  }
2421 }
2422 
2423 
2424 
2428 template <>
2429 class VectorizedArray<float>
2430 {
2431 public:
2435  static const unsigned int n_array_elements = 4;
2436 
2441  DEAL_II_ALWAYS_INLINE
2442  VectorizedArray &
2443  operator = (const float x)
2444  {
2445  data = _mm_set1_ps(x);
2446  return *this;
2447  }
2448 
2452  DEAL_II_ALWAYS_INLINE
2453  float &
2454  operator [] (const unsigned int comp)
2455  {
2456  AssertIndexRange (comp, 4);
2457  return *(reinterpret_cast<float *>(&data)+comp);
2458  }
2459 
2463  DEAL_II_ALWAYS_INLINE
2464  const float &
2465  operator [] (const unsigned int comp) const
2466  {
2467  AssertIndexRange (comp, 4);
2468  return *(reinterpret_cast<const float *>(&data)+comp);
2469  }
2470 
2474  DEAL_II_ALWAYS_INLINE
2475  VectorizedArray &
2476  operator += (const VectorizedArray &vec)
2477  {
2478 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2479  data += vec.data;
2480 #else
2481  data = _mm_add_ps(data,vec.data);
2482 #endif
2483  return *this;
2484  }
2485 
2489  DEAL_II_ALWAYS_INLINE
2490  VectorizedArray &
2491  operator -= (const VectorizedArray &vec)
2492  {
2493 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2494  data -= vec.data;
2495 #else
2496  data = _mm_sub_ps(data,vec.data);
2497 #endif
2498  return *this;
2499  }
2500 
2504  DEAL_II_ALWAYS_INLINE
2505  VectorizedArray &
2506  operator *= (const VectorizedArray &vec)
2507  {
2508 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2509  data *= vec.data;
2510 #else
2511  data = _mm_mul_ps(data,vec.data);
2512 #endif
2513  return *this;
2514  }
2515 
2519  DEAL_II_ALWAYS_INLINE
2520  VectorizedArray &
2521  operator /= (const VectorizedArray &vec)
2522  {
2523 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2524  data /= vec.data;
2525 #else
2526  data = _mm_div_ps(data,vec.data);
2527 #endif
2528  return *this;
2529  }
2530 
2536  DEAL_II_ALWAYS_INLINE
2537  void load (const float *ptr)
2538  {
2539  data = _mm_loadu_ps (ptr);
2540  }
2541 
2548  DEAL_II_ALWAYS_INLINE
2549  void store (float *ptr) const
2550  {
2551  _mm_storeu_ps (ptr, data);
2552  }
2553 
2566  DEAL_II_ALWAYS_INLINE
2567  void gather (const float *base_ptr,
2568  const unsigned int *offsets)
2569  {
2570  for (unsigned int i=0; i<4; ++i)
2571  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2572  }
2573 
2586  DEAL_II_ALWAYS_INLINE
2587  void scatter (const unsigned int *offsets,
2588  float *base_ptr) const
2589  {
2590  for (unsigned int i=0; i<4; ++i)
2591  for (unsigned int j=i+1; j<4; ++j)
2592  Assert(offsets[i] != offsets[j],
2593  ExcMessage("Result of scatter undefined if two offset elements"
2594  " point to the same position"));
2595 
2596  for (unsigned int i=0; i<4; ++i)
2597  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2598  }
2599 
2604  __m128 data;
2605 
2606 private:
2611  DEAL_II_ALWAYS_INLINE
2613  get_sqrt () const
2614  {
2615  VectorizedArray res;
2616  res.data = _mm_sqrt_ps(data);
2617  return res;
2618  }
2619 
2624  DEAL_II_ALWAYS_INLINE
2626  get_abs () const
2627  {
2628  // to compute the absolute value, perform bitwise andnot with -0. This
2629  // will leave all value and exponent bits unchanged but force the sign
2630  // value to +.
2631  __m128 mask = _mm_set1_ps (-0.f);
2632  VectorizedArray res;
2633  res.data = _mm_andnot_ps(mask, data);
2634  return res;
2635  }
2636 
2641  DEAL_II_ALWAYS_INLINE
2643  get_max (const VectorizedArray &other) const
2644  {
2645  VectorizedArray res;
2646  res.data = _mm_max_ps (data, other.data);
2647  return res;
2648  }
2649 
2654  DEAL_II_ALWAYS_INLINE
2656  get_min (const VectorizedArray &other) const
2657  {
2658  VectorizedArray res;
2659  res.data = _mm_min_ps (data, other.data);
2660  return res;
2661  }
2662 
2666  template <typename Number2> friend VectorizedArray<Number2>
2667  std::sqrt (const VectorizedArray<Number2> &);
2668  template <typename Number2> friend VectorizedArray<Number2>
2669  std::abs (const VectorizedArray<Number2> &);
2670  template <typename Number2> friend VectorizedArray<Number2>
2671  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2672  template <typename Number2> friend VectorizedArray<Number2>
2673  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2674 };
2675 
2676 
2677 
2681 template <>
2682 inline
2683 void vectorized_load_and_transpose(const unsigned int n_entries,
2684  const float *in,
2685  const unsigned int *offsets,
2687 {
2688  const unsigned int n_chunks = n_entries/4;
2689  for (unsigned int i=0; i<n_chunks; ++i)
2690  {
2691  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2692  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2693  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2694  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2695  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2696  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2697  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2698  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2699  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2700  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2701  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2702  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2703  }
2704  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2705  for (unsigned int v=0; v<4; ++v)
2706  out[i][v] = in[offsets[v]+i];
2707 }
2708 
2709 
2710 
2714 template <>
2715 inline
2716 void
2717 vectorized_transpose_and_store(const bool add_into,
2718  const unsigned int n_entries,
2719  const VectorizedArray<float> *in,
2720  const unsigned int *offsets,
2721  float *out)
2722 {
2723  const unsigned int n_chunks = n_entries/4;
2724  for (unsigned int i=0; i<n_chunks; ++i)
2725  {
2726  __m128 u0 = in[4*i+0].data;
2727  __m128 u1 = in[4*i+1].data;
2728  __m128 u2 = in[4*i+2].data;
2729  __m128 u3 = in[4*i+3].data;
2730  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2731  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2732  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2733  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2734  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2735  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2736  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2737  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2738 
2739  // Cannot use the same store instructions in both paths of the 'if'
2740  // because the compiler cannot know that there is no aliasing between
2741  // pointers
2742  if (add_into)
2743  {
2744  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2745  _mm_storeu_ps(out+4*i+offsets[0], u0);
2746  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2747  _mm_storeu_ps(out+4*i+offsets[1], u1);
2748  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2749  _mm_storeu_ps(out+4*i+offsets[2], u2);
2750  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2751  _mm_storeu_ps(out+4*i+offsets[3], u3);
2752  }
2753  else
2754  {
2755  _mm_storeu_ps(out+4*i+offsets[0], u0);
2756  _mm_storeu_ps(out+4*i+offsets[1], u1);
2757  _mm_storeu_ps(out+4*i+offsets[2], u2);
2758  _mm_storeu_ps(out+4*i+offsets[3], u3);
2759  }
2760  }
2761  if (add_into)
2762  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2763  for (unsigned int v=0; v<4; ++v)
2764  out[offsets[v]+i] += in[i][v];
2765  else
2766  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2767  for (unsigned int v=0; v<4; ++v)
2768  out[offsets[v]+i] = in[i][v];
2769 }
2770 
2771 
2772 
2773 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2774 
2775 
2781 template <typename Number>
2782 inline DEAL_II_ALWAYS_INLINE
2785  const VectorizedArray<Number> &v)
2786 {
2787  VectorizedArray<Number> tmp = u;
2788  return tmp+=v;
2789 }
2790 
2796 template <typename Number>
2797 inline DEAL_II_ALWAYS_INLINE
2800  const VectorizedArray<Number> &v)
2801 {
2802  VectorizedArray<Number> tmp = u;
2803  return tmp-=v;
2804 }
2805 
2811 template <typename Number>
2812 inline DEAL_II_ALWAYS_INLINE
2815  const VectorizedArray<Number> &v)
2816 {
2817  VectorizedArray<Number> tmp = u;
2818  return tmp*=v;
2819 }
2820 
2826 template <typename Number>
2827 inline DEAL_II_ALWAYS_INLINE
2830  const VectorizedArray<Number> &v)
2831 {
2832  VectorizedArray<Number> tmp = u;
2833  return tmp/=v;
2834 }
2835 
2842 template <typename Number>
2843 inline DEAL_II_ALWAYS_INLINE
2845 operator + (const Number &u,
2846  const VectorizedArray<Number> &v)
2847 {
2849  tmp = u;
2850  return tmp+=v;
2851 }
2852 
2861 inline DEAL_II_ALWAYS_INLINE
2863 operator + (const double &u,
2864  const VectorizedArray<float> &v)
2865 {
2867  tmp = u;
2868  return tmp+=v;
2869 }
2870 
2877 template <typename Number>
2878 inline DEAL_II_ALWAYS_INLINE
2881  const Number &u)
2882 {
2883  return u + v;
2884 }
2885 
2894 inline DEAL_II_ALWAYS_INLINE
2897  const double &u)
2898 {
2899  return u + v;
2900 }
2901 
2908 template <typename Number>
2909 inline DEAL_II_ALWAYS_INLINE
2911 operator - (const Number &u,
2912  const VectorizedArray<Number> &v)
2913 {
2915  tmp = u;
2916  return tmp-=v;
2917 }
2918 
2927 inline DEAL_II_ALWAYS_INLINE
2929 operator - (const double &u,
2930  const VectorizedArray<float> &v)
2931 {
2933  tmp = float(u);
2934  return tmp-=v;
2935 }
2936 
2943 template <typename Number>
2944 inline DEAL_II_ALWAYS_INLINE
2947  const Number &u)
2948 {
2950  tmp = u;
2951  return v-tmp;
2952 }
2953 
2962 inline DEAL_II_ALWAYS_INLINE
2965  const double &u)
2966 {
2968  tmp = float(u);
2969  return v-tmp;
2970 }
2971 
2978 template <typename Number>
2979 inline DEAL_II_ALWAYS_INLINE
2981 operator * (const Number &u,
2982  const VectorizedArray<Number> &v)
2983 {
2985  tmp = u;
2986  return tmp*=v;
2987 }
2988 
2997 inline DEAL_II_ALWAYS_INLINE
2999 operator * (const double &u,
3000  const VectorizedArray<float> &v)
3001 {
3003  tmp = float(u);
3004  return tmp*=v;
3005 }
3006 
3013 template <typename Number>
3014 inline DEAL_II_ALWAYS_INLINE
3017  const Number &u)
3018 {
3019  return u * v;
3020 }
3021 
3030 inline DEAL_II_ALWAYS_INLINE
3033  const double &u)
3034 {
3035  return u * v;
3036 }
3037 
3044 template <typename Number>
3045 inline DEAL_II_ALWAYS_INLINE
3047 operator / (const Number &u,
3048  const VectorizedArray<Number> &v)
3049 {
3051  tmp = u;
3052  return tmp/=v;
3053 }
3054 
3063 inline DEAL_II_ALWAYS_INLINE
3065 operator / (const double &u,
3066  const VectorizedArray<float> &v)
3067 {
3069  tmp = float(u);
3070  return tmp/=v;
3071 }
3072 
3079 template <typename Number>
3080 inline DEAL_II_ALWAYS_INLINE
3083  const Number &u)
3084 {
3086  tmp = u;
3087  return v/tmp;
3088 }
3089 
3098 inline DEAL_II_ALWAYS_INLINE
3101  const double &u)
3102 {
3104  tmp = float(u);
3105  return v/tmp;
3106 }
3107 
3113 template <typename Number>
3114 inline DEAL_II_ALWAYS_INLINE
3117 {
3118  return u;
3119 }
3120 
3126 template <typename Number>
3127 inline DEAL_II_ALWAYS_INLINE
3130 {
3131  // to get a negative sign, subtract the input from zero (could also
3132  // multiply by -1, but this one is slightly simpler)
3133  return VectorizedArray<Number>()-u;
3134 }
3135 
3136 
3137 DEAL_II_NAMESPACE_CLOSE
3138 
3139 
3146 namespace std
3147 {
3155  template <typename Number>
3156  inline
3157  ::VectorizedArray<Number>
3158  sin (const ::VectorizedArray<Number> &x)
3159  {
3160  // put values in an array and later read in that array with an unaligned
3161  // read. This should save some instructions as compared to directly
3162  // setting the individual elements and also circumvents a compiler
3163  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3164  // from April 2014, topic "matrix_free/step-48 Test").
3166  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3167  values[i] = std::sin(x[i]);
3169  out.load(&values[0]);
3170  return out;
3171  }
3172 
3173 
3174 
3182  template <typename Number>
3183  inline
3184  ::VectorizedArray<Number>
3185  cos (const ::VectorizedArray<Number> &x)
3186  {
3188  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3189  values[i] = std::cos(x[i]);
3191  out.load(&values[0]);
3192  return out;
3193  }
3194 
3195 
3196 
3204  template <typename Number>
3205  inline
3206  ::VectorizedArray<Number>
3207  tan (const ::VectorizedArray<Number> &x)
3208  {
3210  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3211  values[i] = std::tan(x[i]);
3213  out.load(&values[0]);
3214  return out;
3215  }
3216 
3217 
3218 
3226  template <typename Number>
3227  inline
3228  ::VectorizedArray<Number>
3229  exp (const ::VectorizedArray<Number> &x)
3230  {
3232  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3233  values[i] = std::exp(x[i]);
3235  out.load(&values[0]);
3236  return out;
3237  }
3238 
3239 
3240 
3248  template <typename Number>
3249  inline
3250  ::VectorizedArray<Number>
3251  log (const ::VectorizedArray<Number> &x)
3252  {
3254  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3255  values[i] = std::log(x[i]);
3257  out.load(&values[0]);
3258  return out;
3259  }
3260 
3261 
3262 
3270  template <typename Number>
3271  inline
3272  ::VectorizedArray<Number>
3273  sqrt (const ::VectorizedArray<Number> &x)
3274  {
3275  return x.get_sqrt();
3276  }
3277 
3278 
3279 
3287  template <typename Number>
3288  inline
3289  ::VectorizedArray<Number>
3290  pow (const ::VectorizedArray<Number> &x,
3291  const Number p)
3292  {
3294  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3295  values[i] = std::pow(x[i], p);
3297  out.load(&values[0]);
3298  return out;
3299  }
3300 
3301 
3302 
3310  template <typename Number>
3311  inline
3312  ::VectorizedArray<Number>
3313  abs (const ::VectorizedArray<Number> &x)
3314  {
3315  return x.get_abs();
3316  }
3317 
3318 
3319 
3327  template <typename Number>
3328  inline
3329  ::VectorizedArray<Number>
3330  max (const ::VectorizedArray<Number> &x,
3331  const ::VectorizedArray<Number> &y)
3332  {
3333  return x.get_max(y);
3334  }
3335 
3336 
3337 
3345  template <typename Number>
3346  inline
3347  ::VectorizedArray<Number>
3348  min (const ::VectorizedArray<Number> &x,
3349  const ::VectorizedArray<Number> &y)
3350  {
3351  return x.get_min(y);
3352  }
3353 
3354 }
3355 
3356 #endif
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
SymmetricTensor< rank, dim, Number > operator/(const SymmetricTensor< rank, dim, Number > &t, const Number factor)
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
STL namespace.
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static::ExceptionBase & ExcMessage(std::string arg1)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:313
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
T min(const T &t, const MPI_Comm &mpi_communicator)
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
T max(const T &t, const MPI_Comm &mpi_communicator)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const