Reference documentation for deal.II version Git 20e5cff 2017-01-17 12:52:29 -0500
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2015 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vectorization_h
18 #define dealii__vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 
23 #include <cmath>
24 
25 // Note:
26 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
27 // according to the following scheme
28 // #ifdef __AVX512F__
29 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
30 // #elif defined (__AVX__)
31 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
32 // #elif defined (__SSE2__)
33 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
34 // #else
35 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
36 // #endif
37 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
38 // 'check_01_cpu_features.cmake', ensures that these feature are not only
39 // present in the compilation unit but also working properly.
40 
41 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
42 #include <immintrin.h>
43 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
44 #include <emmintrin.h>
45 #endif
46 
47 
48 // forward declarations
49 DEAL_II_NAMESPACE_OPEN
50 template <typename Number> class VectorizedArray;
51 template <typename T> struct EnableIfScalar;
52 DEAL_II_NAMESPACE_CLOSE
53 
54 
55 namespace std
56 {
57  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
58  sqrt(const ::VectorizedArray<Number> &);
59  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
60  abs(const ::VectorizedArray<Number> &);
61  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
62  max(const ::VectorizedArray<Number> &, const ::VectorizedArray<Number> &);
63  template <typename Number> DEAL_II_ALWAYS_INLINE ::VectorizedArray<Number>
64  min (const ::VectorizedArray<Number> &, const ::VectorizedArray<Number> &);
65 }
66 
67 
68 DEAL_II_NAMESPACE_OPEN
69 
70 
71 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
72 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
73 
74 template<typename Number>
75 struct EnableIfScalar<VectorizedArray<Number> >
76 {
78 };
79 
80 
131 template <typename Number>
132 class VectorizedArray
133 {
134 public:
138  static const unsigned int n_array_elements = 1;
139 
140  // POD means that there should be no user-defined constructors, destructors
141  // and copy functions (the standard is somewhat relaxed in C++2011, though).
142 
146  DEAL_II_ALWAYS_INLINE
148  operator = (const Number scalar)
149  {
150  data = scalar;
151  return *this;
152  }
153 
157  DEAL_II_ALWAYS_INLINE
158  Number &
159  operator [] (const unsigned int comp)
160  {
161  (void)comp;
162  AssertIndexRange (comp, 1);
163  return data;
164  }
165 
169  DEAL_II_ALWAYS_INLINE
170  const Number &
171  operator [] (const unsigned int comp) const
172  {
173  (void)comp;
174  AssertIndexRange (comp, 1);
175  return data;
176  }
177 
181  DEAL_II_ALWAYS_INLINE
184  {
185  data+=vec.data;
186  return *this;
187  }
188 
192  DEAL_II_ALWAYS_INLINE
195  {
196  data-=vec.data;
197  return *this;
198  }
199 
203  DEAL_II_ALWAYS_INLINE
206  {
207  data*=vec.data;
208  return *this;
209  }
210 
214  DEAL_II_ALWAYS_INLINE
217  {
218  data/=vec.data;
219  return *this;
220  }
221 
228  DEAL_II_ALWAYS_INLINE
229  void load (const Number *ptr)
230  {
231  data = *ptr;
232  }
233 
240  DEAL_II_ALWAYS_INLINE
241  void store (Number *ptr) const
242  {
243  *ptr = data;
244  }
245 
258  DEAL_II_ALWAYS_INLINE
259  void gather (const Number *base_ptr,
260  const unsigned int *offsets)
261  {
262  data = base_ptr[offsets[0]];
263  }
264 
277  DEAL_II_ALWAYS_INLINE
278  void scatter (const unsigned int *offsets,
279  Number *base_ptr) const
280  {
281  base_ptr[offsets[0]] = data;
282  }
283 
288  Number data;
289 
290 private:
295  DEAL_II_ALWAYS_INLINE
297  get_sqrt () const
298  {
299  VectorizedArray res;
300  res.data = std::sqrt(data);
301  return res;
302  }
303 
308  DEAL_II_ALWAYS_INLINE
310  get_abs () const
311  {
312  VectorizedArray res;
313  res.data = std::fabs(data);
314  return res;
315  }
316 
321  DEAL_II_ALWAYS_INLINE
323  get_max (const VectorizedArray &other) const
324  {
325  VectorizedArray res;
326  res.data = std::max (data, other.data);
327  return res;
328  }
329 
334  DEAL_II_ALWAYS_INLINE
336  get_min (const VectorizedArray &other) const
337  {
338  VectorizedArray res;
339  res.data = std::min (data, other.data);
340  return res;
341  }
342 
346  template <typename Number2> friend VectorizedArray<Number2>
347  std::sqrt (const VectorizedArray<Number2> &);
348  template <typename Number2> friend VectorizedArray<Number2>
349  std::abs (const VectorizedArray<Number2> &);
350  template <typename Number2> friend VectorizedArray<Number2>
351  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
352  template <typename Number2> friend VectorizedArray<Number2>
353  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
354 };
355 
356 
357 
364 template <typename Number>
365 inline DEAL_II_ALWAYS_INLINE
367 make_vectorized_array (const Number &u)
368 {
370  result = u;
371  return result;
372 }
373 
374 
375 
401 template <typename Number>
402 inline
403 void
404 vectorized_load_and_transpose(const unsigned int n_entries,
405  const Number *in,
406  const unsigned int *offsets,
408 {
409  for (unsigned int i=0; i<n_entries; ++i)
410  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
411  out[i][v] = in[offsets[v]+i];
412 }
413 
414 
415 
454 template <typename Number>
455 inline
456 void
457 vectorized_transpose_and_store(const bool add_into,
458  const unsigned int n_entries,
459  const VectorizedArray<Number> *in,
460  const unsigned int *offsets,
461  Number *out)
462 {
463  if (add_into)
464  for (unsigned int i=0; i<n_entries; ++i)
465  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
466  out[offsets[v]+i] += in[i][v];
467  else
468  for (unsigned int i=0; i<n_entries; ++i)
469  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
470  out[offsets[v]+i] = in[i][v];
471 }
472 
473 
474 
475 // for safety, also check that __AVX512F__ is defined in case the user manually
476 // set some conflicting compile flags which prevent compilation
477 
478 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
479 
483 template <>
484 class VectorizedArray<double>
485 {
486 public:
490  static const unsigned int n_array_elements = 8;
491 
495  DEAL_II_ALWAYS_INLINE
497  operator = (const double x)
498  {
499  data = _mm512_set1_pd(x);
500  return *this;
501  }
502 
506  DEAL_II_ALWAYS_INLINE
507  double &
508  operator [] (const unsigned int comp)
509  {
510  AssertIndexRange (comp, 8);
511  return *(reinterpret_cast<double *>(&data)+comp);
512  }
513 
517  DEAL_II_ALWAYS_INLINE
518  const double &
519  operator [] (const unsigned int comp) const
520  {
521  AssertIndexRange (comp, 8);
522  return *(reinterpret_cast<const double *>(&data)+comp);
523  }
524 
528  DEAL_II_ALWAYS_INLINE
530  operator += (const VectorizedArray &vec)
531  {
532  // if the compiler supports vector arithmetics, we can simply use +=
533  // operator on the given data type. this allows the compiler to combine
534  // additions with multiplication (fused multiply-add) if those
535  // instructions are available. Otherwise, we need to use the built-in
536  // intrinsic command for __m512d
537 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
538  data += vec.data;
539 #else
540  data = _mm512_add_pd(data,vec.data);
541 #endif
542  return *this;
543  }
544 
548  DEAL_II_ALWAYS_INLINE
550  operator -= (const VectorizedArray &vec)
551  {
552 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
553  data -= vec.data;
554 #else
555  data = _mm512_sub_pd(data,vec.data);
556 #endif
557  return *this;
558  }
562  DEAL_II_ALWAYS_INLINE
564  operator *= (const VectorizedArray &vec)
565  {
566 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
567  data *= vec.data;
568 #else
569  data = _mm512_mul_pd(data,vec.data);
570 #endif
571  return *this;
572  }
573 
577  DEAL_II_ALWAYS_INLINE
579  operator /= (const VectorizedArray &vec)
580  {
581 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
582  data /= vec.data;
583 #else
584  data = _mm512_div_pd(data,vec.data);
585 #endif
586  return *this;
587  }
588 
594  DEAL_II_ALWAYS_INLINE
595  void load (const double *ptr)
596  {
597  data = _mm512_loadu_pd (ptr);
598  }
599 
606  DEAL_II_ALWAYS_INLINE
607  void store (double *ptr) const
608  {
609  _mm512_storeu_pd (ptr, data);
610  }
611 
624  DEAL_II_ALWAYS_INLINE
625  void gather (const double *base_ptr,
626  const unsigned int *offsets)
627  {
628  // unfortunately, there does not appear to be a 256 bit integer load, so
629  // do it by some reinterpret casts here. this is allowed because the Intel
630  // API allows aliasing between different vector types.
631  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
632  const __m256i index = *((__m256i *)(&index_val));
633  data = _mm512_i32gather_pd(index, base_ptr, 8);
634  }
635 
648  DEAL_II_ALWAYS_INLINE
649  void scatter (const unsigned int *offsets,
650  double *base_ptr) const
651  {
652  for (unsigned int i=0; i<8; ++i)
653  for (unsigned int j=i+1; j<8; ++j)
654  Assert(offsets[i] != offsets[j],
655  ExcMessage("Result of scatter undefined if two offset elements"
656  " point to the same position"));
657 
658  // unfortunately, there does not appear to be a 256 bit integer load, so
659  // do it by some reinterpret casts here. this is allowed because the Intel
660  // API allows aliasing between different vector types.
661  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
662  const __m256i index = *((__m256i *)(&index_val));
663  _mm512_i32scatter_pd(base_ptr, index, data, 8);
664  }
665 
670  __m512d data;
671 
672 private:
677  DEAL_II_ALWAYS_INLINE
679  get_sqrt () const
680  {
681  VectorizedArray res;
682  res.data = _mm512_sqrt_pd(data);
683  return res;
684  }
685 
690  DEAL_II_ALWAYS_INLINE
692  get_abs () const
693  {
694  // to compute the absolute value, perform bitwise andnot with -0. This
695  // will leave all value and exponent bits unchanged but force the sign
696  // value to +. Since there is no andnot for AVX512, we interpret the data
697  // as 64 bit integers and do the andnot on those types (note that andnot
698  // is a bitwise operation so the data type does not matter)
699  __m512d mask = _mm512_set1_pd (-0.);
700  VectorizedArray res;
701  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
702  return res;
703  }
704 
709  DEAL_II_ALWAYS_INLINE
711  get_max (const VectorizedArray &other) const
712  {
713  VectorizedArray res;
714  res.data = _mm512_max_pd (data, other.data);
715  return res;
716  }
717 
722  DEAL_II_ALWAYS_INLINE
724  get_min (const VectorizedArray &other) const
725  {
726  VectorizedArray res;
727  res.data = _mm512_min_pd (data, other.data);
728  return res;
729  }
730 
734  template <typename Number2> friend VectorizedArray<Number2>
735  std::sqrt (const VectorizedArray<Number2> &);
736  template <typename Number2> friend VectorizedArray<Number2>
737  std::abs (const VectorizedArray<Number2> &);
738  template <typename Number2> friend VectorizedArray<Number2>
739  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
740  template <typename Number2> friend VectorizedArray<Number2>
741  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
742 };
743 
744 
745 
749 template <>
750 inline
751 void
752 vectorized_load_and_transpose(const unsigned int n_entries,
753  const double *in,
754  const unsigned int *offsets,
756 {
757  const unsigned int n_chunks = n_entries/4;
758  for (unsigned int outer=0; outer<8; outer += 4)
759  {
760  const double *in0 = in + offsets[0+outer];
761  const double *in1 = in + offsets[1+outer];
762  const double *in2 = in + offsets[2+outer];
763  const double *in3 = in + offsets[3+outer];
764 
765  for (unsigned int i=0; i<n_chunks; ++i)
766  {
767  __m256d u0 = _mm256_loadu_pd(in0+4*i);
768  __m256d u1 = _mm256_loadu_pd(in1+4*i);
769  __m256d u2 = _mm256_loadu_pd(in2+4*i);
770  __m256d u3 = _mm256_loadu_pd(in3+4*i);
771  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
772  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
773  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
774  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
775  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
776  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
777  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
778  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
779  }
780  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
781  for (unsigned int v=0; v<4; ++v)
782  out[i][outer+v] = in[offsets[v+outer]+i];
783  }
784 }
785 
786 
787 
791 template <>
792 inline
793 void
794 vectorized_transpose_and_store(const bool add_into,
795  const unsigned int n_entries,
796  const VectorizedArray<double> *in,
797  const unsigned int *offsets,
798  double *out)
799 {
800  const unsigned int n_chunks = n_entries/4;
801  // do not do full transpose because the code is too long and will most
802  // likely not pay off. rather do the transposition on the vectorized array
803  // on size smaller, mm256d
804  for (unsigned int outer=0; outer<8; outer += 4)
805  {
806  double *out0 = out + offsets[0+outer];
807  double *out1 = out + offsets[1+outer];
808  double *out2 = out + offsets[2+outer];
809  double *out3 = out + offsets[3+outer];
810  for (unsigned int i=0; i<n_chunks; ++i)
811  {
812  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
813  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
814  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
815  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
816  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
817  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
818  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
819  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
820  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
821  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
822  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
823  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
824 
825  // Cannot use the same store instructions in both paths of the 'if'
826  // because the compiler cannot know that there is no aliasing between
827  // pointers
828  if (add_into)
829  {
830  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
831  _mm256_storeu_pd(out0+4*i, res0);
832  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
833  _mm256_storeu_pd(out1+4*i, res1);
834  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
835  _mm256_storeu_pd(out2+4*i, res2);
836  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
837  _mm256_storeu_pd(out3+4*i, res3);
838  }
839  else
840  {
841  _mm256_storeu_pd(out0+4*i, res0);
842  _mm256_storeu_pd(out1+4*i, res1);
843  _mm256_storeu_pd(out2+4*i, res2);
844  _mm256_storeu_pd(out3+4*i, res3);
845  }
846  }
847  if (add_into)
848  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
849  for (unsigned int v=0; v<4; ++v)
850  out[offsets[v+outer]+i] += in[i][v+outer];
851  else
852  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
853  for (unsigned int v=0; v<4; ++v)
854  out[offsets[v+outer]+i] = in[i][v+outer];
855  }
856 }
857 
858 
859 
863 template<>
864 class VectorizedArray<float>
865 {
866 public:
870  static const unsigned int n_array_elements = 16;
871 
875  DEAL_II_ALWAYS_INLINE
877  operator = (const float x)
878  {
879  data = _mm512_set1_ps(x);
880  return *this;
881  }
882 
886  DEAL_II_ALWAYS_INLINE
887  float &
888  operator [] (const unsigned int comp)
889  {
890  AssertIndexRange (comp, 16);
891  return *(reinterpret_cast<float *>(&data)+comp);
892  }
893 
897  DEAL_II_ALWAYS_INLINE
898  const float &
899  operator [] (const unsigned int comp) const
900  {
901  AssertIndexRange (comp, 16);
902  return *(reinterpret_cast<const float *>(&data)+comp);
903  }
904 
908  DEAL_II_ALWAYS_INLINE
910  operator += (const VectorizedArray &vec)
911  {
912  // if the compiler supports vector arithmetics, we can simply use +=
913  // operator on the given data type. this allows the compiler to combine
914  // additions with multiplication (fused multiply-add) if those
915  // instructions are available. Otherwise, we need to use the built-in
916  // intrinsic command for __m512d
917 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
918  data += vec.data;
919 #else
920  data = _mm512_add_ps(data,vec.data);
921 #endif
922  return *this;
923  }
924 
928  DEAL_II_ALWAYS_INLINE
930  operator -= (const VectorizedArray &vec)
931  {
932 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
933  data -= vec.data;
934 #else
935  data = _mm512_sub_ps(data,vec.data);
936 #endif
937  return *this;
938  }
942  DEAL_II_ALWAYS_INLINE
944  operator *= (const VectorizedArray &vec)
945  {
946 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
947  data *= vec.data;
948 #else
949  data = _mm512_mul_ps(data,vec.data);
950 #endif
951  return *this;
952  }
953 
957  DEAL_II_ALWAYS_INLINE
959  operator /= (const VectorizedArray &vec)
960  {
961 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
962  data /= vec.data;
963 #else
964  data = _mm512_div_ps(data,vec.data);
965 #endif
966  return *this;
967  }
968 
974  DEAL_II_ALWAYS_INLINE
975  void load (const float *ptr)
976  {
977  data = _mm512_loadu_ps (ptr);
978  }
979 
986  DEAL_II_ALWAYS_INLINE
987  void store (float *ptr) const
988  {
989  _mm512_storeu_ps (ptr, data);
990  }
991 
1004  DEAL_II_ALWAYS_INLINE
1005  void gather (const float *base_ptr,
1006  const unsigned int *offsets)
1007  {
1008  // unfortunately, there does not appear to be a 512 bit integer load, so
1009  // do it by some reinterpret casts here. this is allowed because the Intel
1010  // API allows aliasing between different vector types.
1011  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1012  const __m512i index = *((__m512i *)(&index_val));
1013  data = _mm512_i32gather_ps(index, base_ptr, 4);
1014  }
1015 
1028  DEAL_II_ALWAYS_INLINE
1029  void scatter (const unsigned int *offsets,
1030  float *base_ptr) const
1031  {
1032  for (unsigned int i=0; i<16; ++i)
1033  for (unsigned int j=i+1; j<16; ++j)
1034  Assert(offsets[i] != offsets[j],
1035  ExcMessage("Result of scatter undefined if two offset elements"
1036  " point to the same position"));
1037 
1038  // unfortunately, there does not appear to be a 512 bit integer load, so
1039  // do it by some reinterpret casts here. this is allowed because the Intel
1040  // API allows aliasing between different vector types.
1041  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1042  const __m512i index = *((__m512i *)(&index_val));
1043  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1044  }
1045 
1050  __m512 data;
1051 
1052 private:
1053 
1058  DEAL_II_ALWAYS_INLINE
1060  get_sqrt () const
1061  {
1062  VectorizedArray res;
1063  res.data = _mm512_sqrt_ps(data);
1064  return res;
1065  }
1066 
1071  DEAL_II_ALWAYS_INLINE
1073  get_abs () const
1074  {
1075  // to compute the absolute value, perform bitwise andnot with -0. This
1076  // will leave all value and exponent bits unchanged but force the sign
1077  // value to +. Since there is no andnot for AVX512, we interpret the data
1078  // as 32 bit integers and do the andnot on those types (note that andnot
1079  // is a bitwise operation so the data type does not matter)
1080  __m512 mask = _mm512_set1_ps (-0.f);
1081  VectorizedArray res;
1082  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1083  return res;
1084  }
1085 
1090  DEAL_II_ALWAYS_INLINE
1092  get_max (const VectorizedArray &other) const
1093  {
1094  VectorizedArray res;
1095  res.data = _mm512_max_ps (data, other.data);
1096  return res;
1097  }
1098 
1103  DEAL_II_ALWAYS_INLINE
1105  get_min (const VectorizedArray &other) const
1106  {
1107  VectorizedArray res;
1108  res.data = _mm512_min_ps (data, other.data);
1109  return res;
1110  }
1111 
1115  template <typename Number2> friend VectorizedArray<Number2>
1116  std::sqrt (const VectorizedArray<Number2> &);
1117  template <typename Number2> friend VectorizedArray<Number2>
1118  std::abs (const VectorizedArray<Number2> &);
1119  template <typename Number2> friend VectorizedArray<Number2>
1120  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1121  template <typename Number2> friend VectorizedArray<Number2>
1122  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1123 };
1124 
1125 
1126 
1130 template <>
1131 inline
1132 void
1133 vectorized_load_and_transpose(const unsigned int n_entries,
1134  const float *in,
1135  const unsigned int *offsets,
1137 {
1138  const unsigned int n_chunks = n_entries/4;
1139  for (unsigned int outer = 0; outer<16; outer += 8)
1140  {
1141  for (unsigned int i=0; i<n_chunks; ++i)
1142  {
1143  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1144  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1145  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1146  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1147  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1148  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1149  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1150  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1151  // To avoid warnings about uninitialized variables, need to initialize
1152  // one variable with zero before using it.
1153  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1154  t0 = _mm256_insertf128_ps (t3, u0, 0);
1155  t0 = _mm256_insertf128_ps (t0, u4, 1);
1156  t1 = _mm256_insertf128_ps (t3, u1, 0);
1157  t1 = _mm256_insertf128_ps (t1, u5, 1);
1158  t2 = _mm256_insertf128_ps (t3, u2, 0);
1159  t2 = _mm256_insertf128_ps (t2, u6, 1);
1160  t3 = _mm256_insertf128_ps (t3, u3, 0);
1161  t3 = _mm256_insertf128_ps (t3, u7, 1);
1162  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1163  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1164  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1165  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1166  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1167  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1168  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1169  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1170  }
1171  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1172  for (unsigned int v=0; v<8; ++v)
1173  out[i][v+outer] = in[offsets[v+outer]+i];
1174  }
1175 }
1176 
1177 
1178 
1182 template <>
1183 inline
1184 void
1185 vectorized_transpose_and_store(const bool add_into,
1186  const unsigned int n_entries,
1187  const VectorizedArray<float> *in,
1188  const unsigned int *offsets,
1189  float *out)
1190 {
1191  const unsigned int n_chunks = n_entries/4;
1192  for (unsigned int outer = 0; outer<16; outer += 8)
1193  {
1194  for (unsigned int i=0; i<n_chunks; ++i)
1195  {
1196  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1197  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1198  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1199  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1200  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1201  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1202  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1203  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1204  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1205  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1206  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1207  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1208  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1209  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1210  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1211  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1212  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1213  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1214  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1215  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1216 
1217  // Cannot use the same store instructions in both paths of the 'if'
1218  // because the compiler cannot know that there is no aliasing between
1219  // pointers
1220  if (add_into)
1221  {
1222  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1223  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1224  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1225  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1226  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1227  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1228  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1229  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1230  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1231  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1232  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1233  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1234  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1235  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1236  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1237  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1238  }
1239  else
1240  {
1241  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1242  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1243  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1244  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1245  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1246  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1247  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1248  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1249  }
1250  }
1251  if (add_into)
1252  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1253  for (unsigned int v=0; v<8; ++v)
1254  out[offsets[v+outer]+i] += in[i][v+outer];
1255  else
1256  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1257  for (unsigned int v=0; v<8; ++v)
1258  out[offsets[v+outer]+i] = in[i][v+outer];
1259  }
1260 }
1261 
1262 
1263 
1264 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1265 
1269 template <>
1270 class VectorizedArray<double>
1271 {
1272 public:
1276  static const unsigned int n_array_elements = 4;
1277 
1281  DEAL_II_ALWAYS_INLINE
1282  VectorizedArray &
1283  operator = (const double x)
1284  {
1285  data = _mm256_set1_pd(x);
1286  return *this;
1287  }
1288 
1292  DEAL_II_ALWAYS_INLINE
1293  double &
1294  operator [] (const unsigned int comp)
1295  {
1296  AssertIndexRange (comp, 4);
1297  return *(reinterpret_cast<double *>(&data)+comp);
1298  }
1299 
1303  DEAL_II_ALWAYS_INLINE
1304  const double &
1305  operator [] (const unsigned int comp) const
1306  {
1307  AssertIndexRange (comp, 4);
1308  return *(reinterpret_cast<const double *>(&data)+comp);
1309  }
1310 
1314  DEAL_II_ALWAYS_INLINE
1315  VectorizedArray &
1316  operator += (const VectorizedArray &vec)
1317  {
1318  // if the compiler supports vector arithmetics, we can simply use +=
1319  // operator on the given data type. this allows the compiler to combine
1320  // additions with multiplication (fused multiply-add) if those
1321  // instructions are available. Otherwise, we need to use the built-in
1322  // intrinsic command for __m256d
1323 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1324  data += vec.data;
1325 #else
1326  data = _mm256_add_pd(data,vec.data);
1327 #endif
1328  return *this;
1329  }
1330 
1334  DEAL_II_ALWAYS_INLINE
1335  VectorizedArray &
1336  operator -= (const VectorizedArray &vec)
1337  {
1338 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1339  data -= vec.data;
1340 #else
1341  data = _mm256_sub_pd(data,vec.data);
1342 #endif
1343  return *this;
1344  }
1348  DEAL_II_ALWAYS_INLINE
1349  VectorizedArray &
1350  operator *= (const VectorizedArray &vec)
1351  {
1352 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1353  data *= vec.data;
1354 #else
1355  data = _mm256_mul_pd(data,vec.data);
1356 #endif
1357  return *this;
1358  }
1359 
1363  DEAL_II_ALWAYS_INLINE
1364  VectorizedArray &
1365  operator /= (const VectorizedArray &vec)
1366  {
1367 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1368  data /= vec.data;
1369 #else
1370  data = _mm256_div_pd(data,vec.data);
1371 #endif
1372  return *this;
1373  }
1374 
1380  DEAL_II_ALWAYS_INLINE
1381  void load (const double *ptr)
1382  {
1383  data = _mm256_loadu_pd (ptr);
1384  }
1385 
1392  DEAL_II_ALWAYS_INLINE
1393  void store (double *ptr) const
1394  {
1395  _mm256_storeu_pd (ptr, data);
1396  }
1397 
1410  DEAL_II_ALWAYS_INLINE
1411  void gather (const double *base_ptr,
1412  const unsigned int *offsets)
1413  {
1414 #ifdef __AVX2__
1415  // unfortunately, there does not appear to be a 128 bit integer load, so
1416  // do it by some reinterpret casts here. this is allowed because the Intel
1417  // API allows aliasing between different vector types.
1418  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1419  const __m128i index = *((__m128i *)(&index_val));
1420  data = _mm256_i32gather_pd(base_ptr, index, 8);
1421 #else
1422  for (unsigned int i=0; i<4; ++i)
1423  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1424 #endif
1425  }
1426 
1439  DEAL_II_ALWAYS_INLINE
1440  void scatter (const unsigned int *offsets,
1441  double *base_ptr) const
1442  {
1443  for (unsigned int i=0; i<4; ++i)
1444  for (unsigned int j=i+1; j<4; ++j)
1445  Assert(offsets[i] != offsets[j],
1446  ExcMessage("Result of scatter undefined if two offset elements"
1447  " point to the same position"));
1448 
1449  // no scatter operation in AVX/AVX2
1450  for (unsigned int i=0; i<4; ++i)
1451  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1452  }
1453 
1458  __m256d data;
1459 
1460 private:
1465  DEAL_II_ALWAYS_INLINE
1467  get_sqrt () const
1468  {
1469  VectorizedArray res;
1470  res.data = _mm256_sqrt_pd(data);
1471  return res;
1472  }
1473 
1478  DEAL_II_ALWAYS_INLINE
1480  get_abs () const
1481  {
1482  // to compute the absolute value, perform bitwise andnot with -0. This
1483  // will leave all value and exponent bits unchanged but force the sign
1484  // value to +.
1485  __m256d mask = _mm256_set1_pd (-0.);
1486  VectorizedArray res;
1487  res.data = _mm256_andnot_pd(mask, data);
1488  return res;
1489  }
1490 
1495  DEAL_II_ALWAYS_INLINE
1497  get_max (const VectorizedArray &other) const
1498  {
1499  VectorizedArray res;
1500  res.data = _mm256_max_pd (data, other.data);
1501  return res;
1502  }
1503 
1508  DEAL_II_ALWAYS_INLINE
1510  get_min (const VectorizedArray &other) const
1511  {
1512  VectorizedArray res;
1513  res.data = _mm256_min_pd (data, other.data);
1514  return res;
1515  }
1516 
1520  template <typename Number2> friend VectorizedArray<Number2>
1521  std::sqrt (const VectorizedArray<Number2> &);
1522  template <typename Number2> friend VectorizedArray<Number2>
1523  std::abs (const VectorizedArray<Number2> &);
1524  template <typename Number2> friend VectorizedArray<Number2>
1525  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1526  template <typename Number2> friend VectorizedArray<Number2>
1527  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1528 };
1529 
1530 
1531 
1535 template <>
1536 inline
1537 void
1538 vectorized_load_and_transpose(const unsigned int n_entries,
1539  const double *in,
1540  const unsigned int *offsets,
1542 {
1543  const unsigned int n_chunks = n_entries/4;
1544  const double *in0 = in + offsets[0];
1545  const double *in1 = in + offsets[1];
1546  const double *in2 = in + offsets[2];
1547  const double *in3 = in + offsets[3];
1548 
1549  for (unsigned int i=0; i<n_chunks; ++i)
1550  {
1551  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1552  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1553  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1554  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1555  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1556  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1557  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1558  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1559  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1560  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1561  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1562  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1563  }
1564  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1565  for (unsigned int v=0; v<4; ++v)
1566  out[i][v] = in[offsets[v]+i];
1567 }
1568 
1569 
1570 
1574 template <>
1575 inline
1576 void
1577 vectorized_transpose_and_store(const bool add_into,
1578  const unsigned int n_entries,
1579  const VectorizedArray<double> *in,
1580  const unsigned int *offsets,
1581  double *out)
1582 {
1583  const unsigned int n_chunks = n_entries/4;
1584  double *out0 = out + offsets[0];
1585  double *out1 = out + offsets[1];
1586  double *out2 = out + offsets[2];
1587  double *out3 = out + offsets[3];
1588  for (unsigned int i=0; i<n_chunks; ++i)
1589  {
1590  __m256d u0 = in[4*i+0].data;
1591  __m256d u1 = in[4*i+1].data;
1592  __m256d u2 = in[4*i+2].data;
1593  __m256d u3 = in[4*i+3].data;
1594  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1595  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1596  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1597  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1598  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1599  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1600  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1601  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1602 
1603  // Cannot use the same store instructions in both paths of the 'if'
1604  // because the compiler cannot know that there is no aliasing between
1605  // pointers
1606  if (add_into)
1607  {
1608  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1609  _mm256_storeu_pd(out0+4*i, res0);
1610  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1611  _mm256_storeu_pd(out1+4*i, res1);
1612  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1613  _mm256_storeu_pd(out2+4*i, res2);
1614  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1615  _mm256_storeu_pd(out3+4*i, res3);
1616  }
1617  else
1618  {
1619  _mm256_storeu_pd(out0+4*i, res0);
1620  _mm256_storeu_pd(out1+4*i, res1);
1621  _mm256_storeu_pd(out2+4*i, res2);
1622  _mm256_storeu_pd(out3+4*i, res3);
1623  }
1624  }
1625  if (add_into)
1626  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1627  for (unsigned int v=0; v<4; ++v)
1628  out[offsets[v]+i] += in[i][v];
1629  else
1630  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1631  for (unsigned int v=0; v<4; ++v)
1632  out[offsets[v]+i] = in[i][v];
1633 }
1634 
1635 
1636 
1640 template<>
1641 class VectorizedArray<float>
1642 {
1643 public:
1647  static const unsigned int n_array_elements = 8;
1648 
1652  DEAL_II_ALWAYS_INLINE
1653  VectorizedArray &
1654  operator = (const float x)
1655  {
1656  data = _mm256_set1_ps(x);
1657  return *this;
1658  }
1659 
1663  DEAL_II_ALWAYS_INLINE
1664  float &
1665  operator [] (const unsigned int comp)
1666  {
1667  AssertIndexRange (comp, 8);
1668  return *(reinterpret_cast<float *>(&data)+comp);
1669  }
1670 
1674  DEAL_II_ALWAYS_INLINE
1675  const float &
1676  operator [] (const unsigned int comp) const
1677  {
1678  AssertIndexRange (comp, 8);
1679  return *(reinterpret_cast<const float *>(&data)+comp);
1680  }
1681 
1685  DEAL_II_ALWAYS_INLINE
1686  VectorizedArray &
1687  operator += (const VectorizedArray &vec)
1688  {
1689  // if the compiler supports vector arithmetics, we can simply use +=
1690  // operator on the given data type. this allows the compiler to combine
1691  // additions with multiplication (fused multiply-add) if those
1692  // instructions are available. Otherwise, we need to use the built-in
1693  // intrinsic command for __m256d
1694 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1695  data += vec.data;
1696 #else
1697  data = _mm256_add_ps(data,vec.data);
1698 #endif
1699  return *this;
1700  }
1701 
1705  DEAL_II_ALWAYS_INLINE
1706  VectorizedArray &
1707  operator -= (const VectorizedArray &vec)
1708  {
1709 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1710  data -= vec.data;
1711 #else
1712  data = _mm256_sub_ps(data,vec.data);
1713 #endif
1714  return *this;
1715  }
1719  DEAL_II_ALWAYS_INLINE
1720  VectorizedArray &
1721  operator *= (const VectorizedArray &vec)
1722  {
1723 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1724  data *= vec.data;
1725 #else
1726  data = _mm256_mul_ps(data,vec.data);
1727 #endif
1728  return *this;
1729  }
1730 
1734  DEAL_II_ALWAYS_INLINE
1735  VectorizedArray &
1736  operator /= (const VectorizedArray &vec)
1737  {
1738 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1739  data /= vec.data;
1740 #else
1741  data = _mm256_div_ps(data,vec.data);
1742 #endif
1743  return *this;
1744  }
1745 
1751  DEAL_II_ALWAYS_INLINE
1752  void load (const float *ptr)
1753  {
1754  data = _mm256_loadu_ps (ptr);
1755  }
1756 
1763  DEAL_II_ALWAYS_INLINE
1764  void store (float *ptr) const
1765  {
1766  _mm256_storeu_ps (ptr, data);
1767  }
1768 
1781  DEAL_II_ALWAYS_INLINE
1782  void gather (const float *base_ptr,
1783  const unsigned int *offsets)
1784  {
1785 #ifdef __AVX2__
1786  // unfortunately, there does not appear to be a 256 bit integer load, so
1787  // do it by some reinterpret casts here. this is allowed because the Intel
1788  // API allows aliasing between different vector types.
1789  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1790  const __m256i index = *((__m256i *)(&index_val));
1791  data = _mm256_i32gather_ps(base_ptr, index, 4);
1792 #else
1793  for (unsigned int i=0; i<8; ++i)
1794  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1795 #endif
1796  }
1797 
1810  DEAL_II_ALWAYS_INLINE
1811  void scatter (const unsigned int *offsets,
1812  float *base_ptr) const
1813  {
1814  for (unsigned int i=0; i<8; ++i)
1815  for (unsigned int j=i+1; j<8; ++j)
1816  Assert(offsets[i] != offsets[j],
1817  ExcMessage("Result of scatter undefined if two offset elements"
1818  " point to the same position"));
1819 
1820  // no scatter operation in AVX/AVX2
1821  for (unsigned int i=0; i<8; ++i)
1822  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1823  }
1824 
1829  __m256 data;
1830 
1831 private:
1832 
1837  DEAL_II_ALWAYS_INLINE
1839  get_sqrt () const
1840  {
1841  VectorizedArray res;
1842  res.data = _mm256_sqrt_ps(data);
1843  return res;
1844  }
1845 
1850  DEAL_II_ALWAYS_INLINE
1852  get_abs () const
1853  {
1854  // to compute the absolute value, perform bitwise andnot with -0. This
1855  // will leave all value and exponent bits unchanged but force the sign
1856  // value to +.
1857  __m256 mask = _mm256_set1_ps (-0.f);
1858  VectorizedArray res;
1859  res.data = _mm256_andnot_ps(mask, data);
1860  return res;
1861  }
1862 
1867  DEAL_II_ALWAYS_INLINE
1869  get_max (const VectorizedArray &other) const
1870  {
1871  VectorizedArray res;
1872  res.data = _mm256_max_ps (data, other.data);
1873  return res;
1874  }
1875 
1880  DEAL_II_ALWAYS_INLINE
1882  get_min (const VectorizedArray &other) const
1883  {
1884  VectorizedArray res;
1885  res.data = _mm256_min_ps (data, other.data);
1886  return res;
1887  }
1888 
1892  template <typename Number2> friend VectorizedArray<Number2>
1893  std::sqrt (const VectorizedArray<Number2> &);
1894  template <typename Number2> friend VectorizedArray<Number2>
1895  std::abs (const VectorizedArray<Number2> &);
1896  template <typename Number2> friend VectorizedArray<Number2>
1897  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1898  template <typename Number2> friend VectorizedArray<Number2>
1899  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1900 };
1901 
1902 
1903 
1907 template <>
1908 inline
1909 void
1910 vectorized_load_and_transpose(const unsigned int n_entries,
1911  const float *in,
1912  const unsigned int *offsets,
1914 {
1915  const unsigned int n_chunks = n_entries/4;
1916  for (unsigned int i=0; i<n_chunks; ++i)
1917  {
1918  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1919  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1920  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1921  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1922  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1923  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1924  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1925  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1926  // To avoid warnings about uninitialized variables, need to initialize
1927  // one variable with zero before using it.
1928  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1929  t0 = _mm256_insertf128_ps (t3, u0, 0);
1930  t0 = _mm256_insertf128_ps (t0, u4, 1);
1931  t1 = _mm256_insertf128_ps (t3, u1, 0);
1932  t1 = _mm256_insertf128_ps (t1, u5, 1);
1933  t2 = _mm256_insertf128_ps (t3, u2, 0);
1934  t2 = _mm256_insertf128_ps (t2, u6, 1);
1935  t3 = _mm256_insertf128_ps (t3, u3, 0);
1936  t3 = _mm256_insertf128_ps (t3, u7, 1);
1937  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1938  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1939  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1940  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1941  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
1942  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
1943  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
1944  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
1945  }
1946  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1947  for (unsigned int v=0; v<8; ++v)
1948  out[i][v] = in[offsets[v]+i];
1949 }
1950 
1951 
1952 
1956 template <>
1957 inline
1958 void
1959 vectorized_transpose_and_store(const bool add_into,
1960  const unsigned int n_entries,
1961  const VectorizedArray<float> *in,
1962  const unsigned int *offsets,
1963  float *out)
1964 {
1965  const unsigned int n_chunks = n_entries/4;
1966  for (unsigned int i=0; i<n_chunks; ++i)
1967  {
1968  __m256 u0 = in[4*i+0].data;
1969  __m256 u1 = in[4*i+1].data;
1970  __m256 u2 = in[4*i+2].data;
1971  __m256 u3 = in[4*i+3].data;
1972  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1973  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1974  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1975  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1976  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1977  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1978  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1979  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1980  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1981  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1982  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1983  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1984  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1985  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1986  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1987  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1988 
1989  // Cannot use the same store instructions in both paths of the 'if'
1990  // because the compiler cannot know that there is no aliasing between
1991  // pointers
1992  if (add_into)
1993  {
1994  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
1995  _mm_storeu_ps(out+4*i+offsets[0], res0);
1996  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
1997  _mm_storeu_ps(out+4*i+offsets[1], res1);
1998  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
1999  _mm_storeu_ps(out+4*i+offsets[2], res2);
2000  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2001  _mm_storeu_ps(out+4*i+offsets[3], res3);
2002  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2003  _mm_storeu_ps(out+4*i+offsets[4], res4);
2004  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2005  _mm_storeu_ps(out+4*i+offsets[5], res5);
2006  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2007  _mm_storeu_ps(out+4*i+offsets[6], res6);
2008  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2009  _mm_storeu_ps(out+4*i+offsets[7], res7);
2010  }
2011  else
2012  {
2013  _mm_storeu_ps(out+4*i+offsets[0], res0);
2014  _mm_storeu_ps(out+4*i+offsets[1], res1);
2015  _mm_storeu_ps(out+4*i+offsets[2], res2);
2016  _mm_storeu_ps(out+4*i+offsets[3], res3);
2017  _mm_storeu_ps(out+4*i+offsets[4], res4);
2018  _mm_storeu_ps(out+4*i+offsets[5], res5);
2019  _mm_storeu_ps(out+4*i+offsets[6], res6);
2020  _mm_storeu_ps(out+4*i+offsets[7], res7);
2021  }
2022  }
2023  if (add_into)
2024  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2025  for (unsigned int v=0; v<8; ++v)
2026  out[offsets[v]+i] += in[i][v];
2027  else
2028  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2029  for (unsigned int v=0; v<8; ++v)
2030  out[offsets[v]+i] = in[i][v];
2031 }
2032 
2033 
2034 
2035 // for safety, also check that __SSE2__ is defined in case the user manually
2036 // set some conflicting compile flags which prevent compilation
2037 
2038 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2039 
2043 template <>
2044 class VectorizedArray<double>
2045 {
2046 public:
2050  static const unsigned int n_array_elements = 2;
2051 
2055  DEAL_II_ALWAYS_INLINE
2056  VectorizedArray &
2057  operator = (const double x)
2058  {
2059  data = _mm_set1_pd(x);
2060  return *this;
2061  }
2062 
2066  DEAL_II_ALWAYS_INLINE
2067  double &
2068  operator [] (const unsigned int comp)
2069  {
2070  AssertIndexRange (comp, 2);
2071  return *(reinterpret_cast<double *>(&data)+comp);
2072  }
2073 
2077  DEAL_II_ALWAYS_INLINE
2078  const double &
2079  operator [] (const unsigned int comp) const
2080  {
2081  AssertIndexRange (comp, 2);
2082  return *(reinterpret_cast<const double *>(&data)+comp);
2083  }
2084 
2088  DEAL_II_ALWAYS_INLINE
2089  VectorizedArray &
2090  operator += (const VectorizedArray &vec)
2091  {
2092 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2093  data += vec.data;
2094 #else
2095  data = _mm_add_pd(data,vec.data);
2096 #endif
2097  return *this;
2098  }
2099 
2103  DEAL_II_ALWAYS_INLINE
2104  VectorizedArray &
2105  operator -= (const VectorizedArray &vec)
2106  {
2107 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2108  data -= vec.data;
2109 #else
2110  data = _mm_sub_pd(data,vec.data);
2111 #endif
2112  return *this;
2113  }
2114 
2118  DEAL_II_ALWAYS_INLINE
2119  VectorizedArray &
2120  operator *= (const VectorizedArray &vec)
2121  {
2122 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2123  data *= vec.data;
2124 #else
2125  data = _mm_mul_pd(data,vec.data);
2126 #endif
2127  return *this;
2128  }
2129 
2133  DEAL_II_ALWAYS_INLINE
2134  VectorizedArray &
2135  operator /= (const VectorizedArray &vec)
2136  {
2137 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2138  data /= vec.data;
2139 #else
2140  data = _mm_div_pd(data,vec.data);
2141 #endif
2142  return *this;
2143  }
2144 
2150  DEAL_II_ALWAYS_INLINE
2151  void load (const double *ptr)
2152  {
2153  data = _mm_loadu_pd (ptr);
2154  }
2155 
2162  DEAL_II_ALWAYS_INLINE
2163  void store (double *ptr) const
2164  {
2165  _mm_storeu_pd (ptr, data);
2166  }
2167 
2180  DEAL_II_ALWAYS_INLINE
2181  void gather (const double *base_ptr,
2182  const unsigned int *offsets)
2183  {
2184  for (unsigned int i=0; i<2; ++i)
2185  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2186  }
2187 
2200  DEAL_II_ALWAYS_INLINE
2201  void scatter (const unsigned int *offsets,
2202  double *base_ptr) const
2203  {
2204  Assert(offsets[0] != offsets[1],
2205  ExcMessage("Result of scatter undefined if two offset elements"
2206  " point to the same position"));
2207 
2208  for (unsigned int i=0; i<2; ++i)
2209  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2210  }
2211 
2216  __m128d data;
2217 
2218 private:
2223  DEAL_II_ALWAYS_INLINE
2225  get_sqrt () const
2226  {
2227  VectorizedArray res;
2228  res.data = _mm_sqrt_pd(data);
2229  return res;
2230  }
2231 
2236  DEAL_II_ALWAYS_INLINE
2238  get_abs () const
2239  {
2240  // to compute the absolute value, perform
2241  // bitwise andnot with -0. This will leave all
2242  // value and exponent bits unchanged but force
2243  // the sign value to +.
2244  __m128d mask = _mm_set1_pd (-0.);
2245  VectorizedArray res;
2246  res.data = _mm_andnot_pd(mask, data);
2247  return res;
2248  }
2249 
2254  DEAL_II_ALWAYS_INLINE
2256  get_max (const VectorizedArray &other) const
2257  {
2258  VectorizedArray res;
2259  res.data = _mm_max_pd (data, other.data);
2260  return res;
2261  }
2262 
2267  DEAL_II_ALWAYS_INLINE
2269  get_min (const VectorizedArray &other) const
2270  {
2271  VectorizedArray res;
2272  res.data = _mm_min_pd (data, other.data);
2273  return res;
2274  }
2275 
2279  template <typename Number2> friend VectorizedArray<Number2>
2280  std::sqrt (const VectorizedArray<Number2> &);
2281  template <typename Number2> friend VectorizedArray<Number2>
2282  std::abs (const VectorizedArray<Number2> &);
2283  template <typename Number2> friend VectorizedArray<Number2>
2284  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2285  template <typename Number2> friend VectorizedArray<Number2>
2286  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2287 };
2288 
2289 
2290 
2294 template <>
2295 inline
2296 void vectorized_load_and_transpose(const unsigned int n_entries,
2297  const double *in,
2298  const unsigned int *offsets,
2300 {
2301  const unsigned int n_chunks = n_entries/2;
2302  for (unsigned int i=0; i<n_chunks; ++i)
2303  {
2304  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2305  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2306  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2307  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2308  }
2309  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2310  for (unsigned int v=0; v<2; ++v)
2311  out[i][v] = in[offsets[v]+i];
2312 }
2313 
2314 
2315 
2319 template <>
2320 inline
2321 void
2322 vectorized_transpose_and_store(const bool add_into,
2323  const unsigned int n_entries,
2324  const VectorizedArray<double> *in,
2325  const unsigned int *offsets,
2326  double *out)
2327 {
2328  const unsigned int n_chunks = n_entries/2;
2329  if (add_into)
2330  {
2331  for (unsigned int i=0; i<n_chunks; ++i)
2332  {
2333  __m128d u0 = in[2*i+0].data;
2334  __m128d u1 = in[2*i+1].data;
2335  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2336  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2337  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2338  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2339  }
2340  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2341  for (unsigned int v=0; v<2; ++v)
2342  out[offsets[v]+i] += in[i][v];
2343  }
2344  else
2345  {
2346  for (unsigned int i=0; i<n_chunks; ++i)
2347  {
2348  __m128d u0 = in[2*i+0].data;
2349  __m128d u1 = in[2*i+1].data;
2350  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2351  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2352  _mm_storeu_pd(out+2*i+offsets[0], res0);
2353  _mm_storeu_pd(out+2*i+offsets[1], res1);
2354  }
2355  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2356  for (unsigned int v=0; v<2; ++v)
2357  out[offsets[v]+i] = in[i][v];
2358  }
2359 }
2360 
2361 
2362 
2366 template <>
2367 class VectorizedArray<float>
2368 {
2369 public:
2373  static const unsigned int n_array_elements = 4;
2374 
2379  DEAL_II_ALWAYS_INLINE
2380  VectorizedArray &
2381  operator = (const float x)
2382  {
2383  data = _mm_set1_ps(x);
2384  return *this;
2385  }
2386 
2390  DEAL_II_ALWAYS_INLINE
2391  float &
2392  operator [] (const unsigned int comp)
2393  {
2394  AssertIndexRange (comp, 4);
2395  return *(reinterpret_cast<float *>(&data)+comp);
2396  }
2397 
2401  DEAL_II_ALWAYS_INLINE
2402  const float &
2403  operator [] (const unsigned int comp) const
2404  {
2405  AssertIndexRange (comp, 4);
2406  return *(reinterpret_cast<const float *>(&data)+comp);
2407  }
2408 
2412  DEAL_II_ALWAYS_INLINE
2413  VectorizedArray &
2414  operator += (const VectorizedArray &vec)
2415  {
2416 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2417  data += vec.data;
2418 #else
2419  data = _mm_add_ps(data,vec.data);
2420 #endif
2421  return *this;
2422  }
2423 
2427  DEAL_II_ALWAYS_INLINE
2428  VectorizedArray &
2429  operator -= (const VectorizedArray &vec)
2430  {
2431 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2432  data -= vec.data;
2433 #else
2434  data = _mm_sub_ps(data,vec.data);
2435 #endif
2436  return *this;
2437  }
2438 
2442  DEAL_II_ALWAYS_INLINE
2443  VectorizedArray &
2444  operator *= (const VectorizedArray &vec)
2445  {
2446 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2447  data *= vec.data;
2448 #else
2449  data = _mm_mul_ps(data,vec.data);
2450 #endif
2451  return *this;
2452  }
2453 
2457  DEAL_II_ALWAYS_INLINE
2458  VectorizedArray &
2459  operator /= (const VectorizedArray &vec)
2460  {
2461 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2462  data /= vec.data;
2463 #else
2464  data = _mm_div_ps(data,vec.data);
2465 #endif
2466  return *this;
2467  }
2468 
2474  DEAL_II_ALWAYS_INLINE
2475  void load (const float *ptr)
2476  {
2477  data = _mm_loadu_ps (ptr);
2478  }
2479 
2486  DEAL_II_ALWAYS_INLINE
2487  void store (float *ptr) const
2488  {
2489  _mm_storeu_ps (ptr, data);
2490  }
2491 
2504  DEAL_II_ALWAYS_INLINE
2505  void gather (const float *base_ptr,
2506  const unsigned int *offsets)
2507  {
2508  for (unsigned int i=0; i<4; ++i)
2509  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2510  }
2511 
2524  DEAL_II_ALWAYS_INLINE
2525  void scatter (const unsigned int *offsets,
2526  float *base_ptr) const
2527  {
2528  for (unsigned int i=0; i<4; ++i)
2529  for (unsigned int j=i+1; j<4; ++j)
2530  Assert(offsets[i] != offsets[j],
2531  ExcMessage("Result of scatter undefined if two offset elements"
2532  " point to the same position"));
2533 
2534  for (unsigned int i=0; i<4; ++i)
2535  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2536  }
2537 
2542  __m128 data;
2543 
2544 private:
2549  DEAL_II_ALWAYS_INLINE
2551  get_sqrt () const
2552  {
2553  VectorizedArray res;
2554  res.data = _mm_sqrt_ps(data);
2555  return res;
2556  }
2557 
2562  DEAL_II_ALWAYS_INLINE
2564  get_abs () const
2565  {
2566  // to compute the absolute value, perform bitwise andnot with -0. This
2567  // will leave all value and exponent bits unchanged but force the sign
2568  // value to +.
2569  __m128 mask = _mm_set1_ps (-0.f);
2570  VectorizedArray res;
2571  res.data = _mm_andnot_ps(mask, data);
2572  return res;
2573  }
2574 
2579  DEAL_II_ALWAYS_INLINE
2581  get_max (const VectorizedArray &other) const
2582  {
2583  VectorizedArray res;
2584  res.data = _mm_max_ps (data, other.data);
2585  return res;
2586  }
2587 
2592  DEAL_II_ALWAYS_INLINE
2594  get_min (const VectorizedArray &other) const
2595  {
2596  VectorizedArray res;
2597  res.data = _mm_min_ps (data, other.data);
2598  return res;
2599  }
2600 
2604  template <typename Number2> friend VectorizedArray<Number2>
2605  std::sqrt (const VectorizedArray<Number2> &);
2606  template <typename Number2> friend VectorizedArray<Number2>
2607  std::abs (const VectorizedArray<Number2> &);
2608  template <typename Number2> friend VectorizedArray<Number2>
2609  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2610  template <typename Number2> friend VectorizedArray<Number2>
2611  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2612 };
2613 
2614 
2615 
2619 template <>
2620 inline
2621 void vectorized_load_and_transpose(const unsigned int n_entries,
2622  const float *in,
2623  const unsigned int *offsets,
2625 {
2626  const unsigned int n_chunks = n_entries/4;
2627  for (unsigned int i=0; i<n_chunks; ++i)
2628  {
2629  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2630  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2631  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2632  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2633  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2634  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2635  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2636  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2637  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2638  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2639  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2640  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2641  }
2642  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2643  for (unsigned int v=0; v<4; ++v)
2644  out[i][v] = in[offsets[v]+i];
2645 }
2646 
2647 
2648 
2652 template <>
2653 inline
2654 void
2655 vectorized_transpose_and_store(const bool add_into,
2656  const unsigned int n_entries,
2657  const VectorizedArray<float> *in,
2658  const unsigned int *offsets,
2659  float *out)
2660 {
2661  const unsigned int n_chunks = n_entries/4;
2662  for (unsigned int i=0; i<n_chunks; ++i)
2663  {
2664  __m128 u0 = in[4*i+0].data;
2665  __m128 u1 = in[4*i+1].data;
2666  __m128 u2 = in[4*i+2].data;
2667  __m128 u3 = in[4*i+3].data;
2668  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2669  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2670  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2671  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2672  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2673  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2674  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2675  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2676 
2677  // Cannot use the same store instructions in both paths of the 'if'
2678  // because the compiler cannot know that there is no aliasing between
2679  // pointers
2680  if (add_into)
2681  {
2682  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2683  _mm_storeu_ps(out+4*i+offsets[0], u0);
2684  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2685  _mm_storeu_ps(out+4*i+offsets[1], u1);
2686  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2687  _mm_storeu_ps(out+4*i+offsets[2], u2);
2688  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2689  _mm_storeu_ps(out+4*i+offsets[3], u3);
2690  }
2691  else
2692  {
2693  _mm_storeu_ps(out+4*i+offsets[0], u0);
2694  _mm_storeu_ps(out+4*i+offsets[1], u1);
2695  _mm_storeu_ps(out+4*i+offsets[2], u2);
2696  _mm_storeu_ps(out+4*i+offsets[3], u3);
2697  }
2698  }
2699  if (add_into)
2700  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2701  for (unsigned int v=0; v<4; ++v)
2702  out[offsets[v]+i] += in[i][v];
2703  else
2704  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2705  for (unsigned int v=0; v<4; ++v)
2706  out[offsets[v]+i] = in[i][v];
2707 }
2708 
2709 
2710 
2711 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2712 
2713 
2719 template <typename Number>
2720 inline DEAL_II_ALWAYS_INLINE
2723  const VectorizedArray<Number> &v)
2724 {
2725  VectorizedArray<Number> tmp = u;
2726  return tmp+=v;
2727 }
2728 
2734 template <typename Number>
2735 inline DEAL_II_ALWAYS_INLINE
2738  const VectorizedArray<Number> &v)
2739 {
2740  VectorizedArray<Number> tmp = u;
2741  return tmp-=v;
2742 }
2743 
2749 template <typename Number>
2750 inline DEAL_II_ALWAYS_INLINE
2753  const VectorizedArray<Number> &v)
2754 {
2755  VectorizedArray<Number> tmp = u;
2756  return tmp*=v;
2757 }
2758 
2764 template <typename Number>
2765 inline DEAL_II_ALWAYS_INLINE
2768  const VectorizedArray<Number> &v)
2769 {
2770  VectorizedArray<Number> tmp = u;
2771  return tmp/=v;
2772 }
2773 
2780 template <typename Number>
2781 inline DEAL_II_ALWAYS_INLINE
2783 operator + (const Number &u,
2784  const VectorizedArray<Number> &v)
2785 {
2787  tmp = u;
2788  return tmp+=v;
2789 }
2790 
2799 inline DEAL_II_ALWAYS_INLINE
2801 operator + (const double &u,
2802  const VectorizedArray<float> &v)
2803 {
2805  tmp = u;
2806  return tmp+=v;
2807 }
2808 
2815 template <typename Number>
2816 inline DEAL_II_ALWAYS_INLINE
2819  const Number &u)
2820 {
2821  return u + v;
2822 }
2823 
2832 inline DEAL_II_ALWAYS_INLINE
2835  const double &u)
2836 {
2837  return u + v;
2838 }
2839 
2846 template <typename Number>
2847 inline DEAL_II_ALWAYS_INLINE
2849 operator - (const Number &u,
2850  const VectorizedArray<Number> &v)
2851 {
2853  tmp = u;
2854  return tmp-=v;
2855 }
2856 
2865 inline DEAL_II_ALWAYS_INLINE
2867 operator - (const double &u,
2868  const VectorizedArray<float> &v)
2869 {
2871  tmp = float(u);
2872  return tmp-=v;
2873 }
2874 
2881 template <typename Number>
2882 inline DEAL_II_ALWAYS_INLINE
2885  const Number &u)
2886 {
2888  tmp = u;
2889  return v-tmp;
2890 }
2891 
2900 inline DEAL_II_ALWAYS_INLINE
2903  const double &u)
2904 {
2906  tmp = float(u);
2907  return v-tmp;
2908 }
2909 
2916 template <typename Number>
2917 inline DEAL_II_ALWAYS_INLINE
2919 operator * (const Number &u,
2920  const VectorizedArray<Number> &v)
2921 {
2923  tmp = u;
2924  return tmp*=v;
2925 }
2926 
2935 inline DEAL_II_ALWAYS_INLINE
2937 operator * (const double &u,
2938  const VectorizedArray<float> &v)
2939 {
2941  tmp = float(u);
2942  return tmp*=v;
2943 }
2944 
2951 template <typename Number>
2952 inline DEAL_II_ALWAYS_INLINE
2955  const Number &u)
2956 {
2957  return u * v;
2958 }
2959 
2968 inline DEAL_II_ALWAYS_INLINE
2971  const double &u)
2972 {
2973  return u * v;
2974 }
2975 
2982 template <typename Number>
2983 inline DEAL_II_ALWAYS_INLINE
2985 operator / (const Number &u,
2986  const VectorizedArray<Number> &v)
2987 {
2989  tmp = u;
2990  return tmp/=v;
2991 }
2992 
3001 inline DEAL_II_ALWAYS_INLINE
3003 operator / (const double &u,
3004  const VectorizedArray<float> &v)
3005 {
3007  tmp = float(u);
3008  return tmp/=v;
3009 }
3010 
3017 template <typename Number>
3018 inline DEAL_II_ALWAYS_INLINE
3021  const Number &u)
3022 {
3024  tmp = u;
3025  return v/tmp;
3026 }
3027 
3036 inline DEAL_II_ALWAYS_INLINE
3039  const double &u)
3040 {
3042  tmp = float(u);
3043  return v/tmp;
3044 }
3045 
3051 template <typename Number>
3052 inline DEAL_II_ALWAYS_INLINE
3055 {
3056  return u;
3057 }
3058 
3064 template <typename Number>
3065 inline DEAL_II_ALWAYS_INLINE
3068 {
3069  // to get a negative sign, subtract the input from zero (could also
3070  // multiply by -1, but this one is slightly simpler)
3071  return VectorizedArray<Number>()-u;
3072 }
3073 
3074 
3075 DEAL_II_NAMESPACE_CLOSE
3076 
3077 
3084 namespace std
3085 {
3093  template <typename Number>
3094  inline
3095  ::VectorizedArray<Number>
3096  sin (const ::VectorizedArray<Number> &x)
3097  {
3098  // put values in an array and later read in that array with an unaligned
3099  // read. This should save some instructions as compared to directly
3100  // setting the individual elements and also circumvents a compiler
3101  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3102  // from April 2014, topic "matrix_free/step-48 Test").
3104  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3105  values[i] = std::sin(x[i]);
3107  out.load(&values[0]);
3108  return out;
3109  }
3110 
3111 
3112 
3120  template <typename Number>
3121  inline
3122  ::VectorizedArray<Number>
3123  cos (const ::VectorizedArray<Number> &x)
3124  {
3126  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3127  values[i] = std::cos(x[i]);
3129  out.load(&values[0]);
3130  return out;
3131  }
3132 
3133 
3134 
3142  template <typename Number>
3143  inline
3144  ::VectorizedArray<Number>
3145  tan (const ::VectorizedArray<Number> &x)
3146  {
3148  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3149  values[i] = std::tan(x[i]);
3151  out.load(&values[0]);
3152  return out;
3153  }
3154 
3155 
3156 
3164  template <typename Number>
3165  inline
3166  ::VectorizedArray<Number>
3167  exp (const ::VectorizedArray<Number> &x)
3168  {
3170  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3171  values[i] = std::exp(x[i]);
3173  out.load(&values[0]);
3174  return out;
3175  }
3176 
3177 
3178 
3186  template <typename Number>
3187  inline
3188  ::VectorizedArray<Number>
3189  log (const ::VectorizedArray<Number> &x)
3190  {
3192  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3193  values[i] = std::log(x[i]);
3195  out.load(&values[0]);
3196  return out;
3197  }
3198 
3199 
3200 
3208  template <typename Number>
3209  inline
3210  ::VectorizedArray<Number>
3211  sqrt (const ::VectorizedArray<Number> &x)
3212  {
3213  return x.get_sqrt();
3214  }
3215 
3216 
3217 
3225  template <typename Number>
3226  inline
3227  ::VectorizedArray<Number>
3228  pow (const ::VectorizedArray<Number> &x,
3229  const Number p)
3230  {
3232  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3233  values[i] = std::pow(x[i], p);
3235  out.load(&values[0]);
3236  return out;
3237  }
3238 
3239 
3240 
3248  template <typename Number>
3249  inline
3250  ::VectorizedArray<Number>
3251  abs (const ::VectorizedArray<Number> &x)
3252  {
3253  return x.get_abs();
3254  }
3255 
3256 
3257 
3265  template <typename Number>
3266  inline
3267  ::VectorizedArray<Number>
3268  max (const ::VectorizedArray<Number> &x,
3269  const ::VectorizedArray<Number> &y)
3270  {
3271  return x.get_max(y);
3272  }
3273 
3274 
3275 
3283  template <typename Number>
3284  inline
3285  ::VectorizedArray<Number>
3286  min (const ::VectorizedArray<Number> &x,
3287  const ::VectorizedArray<Number> &y)
3288  {
3289  return x.get_min(y);
3290  }
3291 
3292 }
3293 
3294 #endif
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
SymmetricTensor< rank, dim, Number > operator/(const SymmetricTensor< rank, dim, Number > &t, const Number factor)
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
STL namespace.
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static::ExceptionBase & ExcMessage(std::string arg1)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:313
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
T min(const T &t, const MPI_Comm &mpi_communicator)
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
T max(const T &t, const MPI_Comm &mpi_communicator)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const