Reference documentation for deal.II version Git ca6ed5c 2017-11-20 14:53:00 +0100
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && !defined(__AVX__)
43 #error "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
44 #endif
45 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && !defined(__AVX512F__)
46 #error "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
47 #endif
48 
49 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
50 #include <immintrin.h>
51 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
52 #include <emmintrin.h>
53 #endif
54 
55 
56 DEAL_II_NAMESPACE_OPEN
57 
58 
59 namespace internal
60 {
71  template <typename T>
73  {
74  static const VectorizedArray<T> &value (const VectorizedArray<T> &t)
75  {
76  return t;
77  }
78 
79  static VectorizedArray<T> value (const T &t)
80  {
82  tmp=t;
83  return tmp;
84  }
85  };
86 }
87 
88 
89 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
90 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
91 
92 template <typename Number>
93 struct EnableIfScalar<VectorizedArray<Number> >
94 {
96 };
97 
98 
99 
150 template <typename Number>
151 class VectorizedArray
152 {
153 public:
157  static const unsigned int n_array_elements = 1;
158 
159  // POD means that there should be no user-defined constructors, destructors
160  // and copy functions (the standard is somewhat relaxed in C++2011, though).
161 
165  DEAL_II_ALWAYS_INLINE
167  operator = (const Number scalar)
168  {
169  data = scalar;
170  return *this;
171  }
172 
176  DEAL_II_ALWAYS_INLINE
177  Number &
178  operator [] (const unsigned int comp)
179  {
180  (void)comp;
181  AssertIndexRange (comp, 1);
182  return data;
183  }
184 
188  DEAL_II_ALWAYS_INLINE
189  const Number &
190  operator [] (const unsigned int comp) const
191  {
192  (void)comp;
193  AssertIndexRange (comp, 1);
194  return data;
195  }
196 
200  DEAL_II_ALWAYS_INLINE
203  {
204  data+=vec.data;
205  return *this;
206  }
207 
211  DEAL_II_ALWAYS_INLINE
214  {
215  data-=vec.data;
216  return *this;
217  }
218 
222  DEAL_II_ALWAYS_INLINE
225  {
226  data*=vec.data;
227  return *this;
228  }
229 
233  DEAL_II_ALWAYS_INLINE
236  {
237  data/=vec.data;
238  return *this;
239  }
240 
247  DEAL_II_ALWAYS_INLINE
248  void load (const Number *ptr)
249  {
250  data = *ptr;
251  }
252 
259  DEAL_II_ALWAYS_INLINE
260  void store (Number *ptr) const
261  {
262  *ptr = data;
263  }
264 
277  DEAL_II_ALWAYS_INLINE
278  void gather (const Number *base_ptr,
279  const unsigned int *offsets)
280  {
281  data = base_ptr[offsets[0]];
282  }
283 
296  DEAL_II_ALWAYS_INLINE
297  void scatter (const unsigned int *offsets,
298  Number *base_ptr) const
299  {
300  base_ptr[offsets[0]] = data;
301  }
302 
307  Number data;
308 
309 private:
314  DEAL_II_ALWAYS_INLINE
316  get_sqrt () const
317  {
318  VectorizedArray res;
319  res.data = std::sqrt(data);
320  return res;
321  }
322 
327  DEAL_II_ALWAYS_INLINE
329  get_abs () const
330  {
331  VectorizedArray res;
332  res.data = std::fabs(data);
333  return res;
334  }
335 
340  DEAL_II_ALWAYS_INLINE
342  get_max (const VectorizedArray &other) const
343  {
344  VectorizedArray res;
345  res.data = std::max (data, other.data);
346  return res;
347  }
348 
353  DEAL_II_ALWAYS_INLINE
355  get_min (const VectorizedArray &other) const
356  {
357  VectorizedArray res;
358  res.data = std::min (data, other.data);
359  return res;
360  }
361 
365  template <typename Number2> friend VectorizedArray<Number2>
366  std::sqrt (const VectorizedArray<Number2> &);
367  template <typename Number2> friend VectorizedArray<Number2>
368  std::abs (const VectorizedArray<Number2> &);
369  template <typename Number2> friend VectorizedArray<Number2>
370  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
371  template <typename Number2> friend VectorizedArray<Number2>
372  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
373 };
374 
375 
376 
383 template <typename Number>
384 inline DEAL_II_ALWAYS_INLINE
386 make_vectorized_array (const Number &u)
387 {
389  result = u;
390  return result;
391 }
392 
393 
394 
420 template <typename Number>
421 inline
422 void
423 vectorized_load_and_transpose(const unsigned int n_entries,
424  const Number *in,
425  const unsigned int *offsets,
427 {
428  for (unsigned int i=0; i<n_entries; ++i)
429  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
430  out[i][v] = in[offsets[v]+i];
431 }
432 
433 
434 
473 template <typename Number>
474 inline
475 void
476 vectorized_transpose_and_store(const bool add_into,
477  const unsigned int n_entries,
478  const VectorizedArray<Number> *in,
479  const unsigned int *offsets,
480  Number *out)
481 {
482  if (add_into)
483  for (unsigned int i=0; i<n_entries; ++i)
484  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
485  out[offsets[v]+i] += in[i][v];
486  else
487  for (unsigned int i=0; i<n_entries; ++i)
488  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
489  out[offsets[v]+i] = in[i][v];
490 }
491 
492 
493 
494 // for safety, also check that __AVX512F__ is defined in case the user manually
495 // set some conflicting compile flags which prevent compilation
496 
497 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
498 
502 template <>
503 class VectorizedArray<double>
504 {
505 public:
509  static const unsigned int n_array_elements = 8;
510 
514  DEAL_II_ALWAYS_INLINE
516  operator = (const double x)
517  {
518  data = _mm512_set1_pd(x);
519  return *this;
520  }
521 
525  DEAL_II_ALWAYS_INLINE
526  double &
527  operator [] (const unsigned int comp)
528  {
529  AssertIndexRange (comp, 8);
530  return *(reinterpret_cast<double *>(&data)+comp);
531  }
532 
536  DEAL_II_ALWAYS_INLINE
537  const double &
538  operator [] (const unsigned int comp) const
539  {
540  AssertIndexRange (comp, 8);
541  return *(reinterpret_cast<const double *>(&data)+comp);
542  }
543 
547  DEAL_II_ALWAYS_INLINE
549  operator += (const VectorizedArray &vec)
550  {
551  // if the compiler supports vector arithmetics, we can simply use +=
552  // operator on the given data type. this allows the compiler to combine
553  // additions with multiplication (fused multiply-add) if those
554  // instructions are available. Otherwise, we need to use the built-in
555  // intrinsic command for __m512d
556 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
557  data += vec.data;
558 #else
559  data = _mm512_add_pd(data,vec.data);
560 #endif
561  return *this;
562  }
563 
567  DEAL_II_ALWAYS_INLINE
569  operator -= (const VectorizedArray &vec)
570  {
571 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
572  data -= vec.data;
573 #else
574  data = _mm512_sub_pd(data,vec.data);
575 #endif
576  return *this;
577  }
581  DEAL_II_ALWAYS_INLINE
583  operator *= (const VectorizedArray &vec)
584  {
585 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
586  data *= vec.data;
587 #else
588  data = _mm512_mul_pd(data,vec.data);
589 #endif
590  return *this;
591  }
592 
596  DEAL_II_ALWAYS_INLINE
598  operator /= (const VectorizedArray &vec)
599  {
600 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
601  data /= vec.data;
602 #else
603  data = _mm512_div_pd(data,vec.data);
604 #endif
605  return *this;
606  }
607 
613  DEAL_II_ALWAYS_INLINE
614  void load (const double *ptr)
615  {
616  data = _mm512_loadu_pd (ptr);
617  }
618 
625  DEAL_II_ALWAYS_INLINE
626  void store (double *ptr) const
627  {
628  _mm512_storeu_pd (ptr, data);
629  }
630 
643  DEAL_II_ALWAYS_INLINE
644  void gather (const double *base_ptr,
645  const unsigned int *offsets)
646  {
647  // unfortunately, there does not appear to be a 256 bit integer load, so
648  // do it by some reinterpret casts here. this is allowed because the Intel
649  // API allows aliasing between different vector types.
650  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
651  const __m256i index = *((__m256i *)(&index_val));
652  data = _mm512_i32gather_pd(index, base_ptr, 8);
653  }
654 
667  DEAL_II_ALWAYS_INLINE
668  void scatter (const unsigned int *offsets,
669  double *base_ptr) const
670  {
671  for (unsigned int i=0; i<8; ++i)
672  for (unsigned int j=i+1; j<8; ++j)
673  Assert(offsets[i] != offsets[j],
674  ExcMessage("Result of scatter undefined if two offset elements"
675  " point to the same position"));
676 
677  // unfortunately, there does not appear to be a 256 bit integer load, so
678  // do it by some reinterpret casts here. this is allowed because the Intel
679  // API allows aliasing between different vector types.
680  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
681  const __m256i index = *((__m256i *)(&index_val));
682  _mm512_i32scatter_pd(base_ptr, index, data, 8);
683  }
684 
689  __m512d data;
690 
691 private:
696  DEAL_II_ALWAYS_INLINE
698  get_sqrt () const
699  {
700  VectorizedArray res;
701  res.data = _mm512_sqrt_pd(data);
702  return res;
703  }
704 
709  DEAL_II_ALWAYS_INLINE
711  get_abs () const
712  {
713  // to compute the absolute value, perform bitwise andnot with -0. This
714  // will leave all value and exponent bits unchanged but force the sign
715  // value to +. Since there is no andnot for AVX512, we interpret the data
716  // as 64 bit integers and do the andnot on those types (note that andnot
717  // is a bitwise operation so the data type does not matter)
718  __m512d mask = _mm512_set1_pd (-0.);
719  VectorizedArray res;
720  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
721  return res;
722  }
723 
728  DEAL_II_ALWAYS_INLINE
730  get_max (const VectorizedArray &other) const
731  {
732  VectorizedArray res;
733  res.data = _mm512_max_pd (data, other.data);
734  return res;
735  }
736 
741  DEAL_II_ALWAYS_INLINE
743  get_min (const VectorizedArray &other) const
744  {
745  VectorizedArray res;
746  res.data = _mm512_min_pd (data, other.data);
747  return res;
748  }
749 
753  template <typename Number2> friend VectorizedArray<Number2>
754  std::sqrt (const VectorizedArray<Number2> &);
755  template <typename Number2> friend VectorizedArray<Number2>
756  std::abs (const VectorizedArray<Number2> &);
757  template <typename Number2> friend VectorizedArray<Number2>
758  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
759  template <typename Number2> friend VectorizedArray<Number2>
760  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
761 };
762 
763 
764 
768 template <>
769 inline
770 void
771 vectorized_load_and_transpose(const unsigned int n_entries,
772  const double *in,
773  const unsigned int *offsets,
775 {
776  const unsigned int n_chunks = n_entries/4;
777  for (unsigned int outer=0; outer<8; outer += 4)
778  {
779  const double *in0 = in + offsets[0+outer];
780  const double *in1 = in + offsets[1+outer];
781  const double *in2 = in + offsets[2+outer];
782  const double *in3 = in + offsets[3+outer];
783 
784  for (unsigned int i=0; i<n_chunks; ++i)
785  {
786  __m256d u0 = _mm256_loadu_pd(in0+4*i);
787  __m256d u1 = _mm256_loadu_pd(in1+4*i);
788  __m256d u2 = _mm256_loadu_pd(in2+4*i);
789  __m256d u3 = _mm256_loadu_pd(in3+4*i);
790  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
791  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
792  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
793  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
794  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
795  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
796  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
797  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
798  }
799  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
800  for (unsigned int v=0; v<4; ++v)
801  out[i][outer+v] = in[offsets[v+outer]+i];
802  }
803 }
804 
805 
806 
810 template <>
811 inline
812 void
813 vectorized_transpose_and_store(const bool add_into,
814  const unsigned int n_entries,
815  const VectorizedArray<double> *in,
816  const unsigned int *offsets,
817  double *out)
818 {
819  const unsigned int n_chunks = n_entries/4;
820  // do not do full transpose because the code is too long and will most
821  // likely not pay off. rather do the transposition on the vectorized array
822  // on size smaller, mm256d
823  for (unsigned int outer=0; outer<8; outer += 4)
824  {
825  double *out0 = out + offsets[0+outer];
826  double *out1 = out + offsets[1+outer];
827  double *out2 = out + offsets[2+outer];
828  double *out3 = out + offsets[3+outer];
829  for (unsigned int i=0; i<n_chunks; ++i)
830  {
831  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
832  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
833  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
834  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
835  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
836  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
837  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
838  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
839  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
840  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
841  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
842  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
843 
844  // Cannot use the same store instructions in both paths of the 'if'
845  // because the compiler cannot know that there is no aliasing between
846  // pointers
847  if (add_into)
848  {
849  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
850  _mm256_storeu_pd(out0+4*i, res0);
851  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
852  _mm256_storeu_pd(out1+4*i, res1);
853  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
854  _mm256_storeu_pd(out2+4*i, res2);
855  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
856  _mm256_storeu_pd(out3+4*i, res3);
857  }
858  else
859  {
860  _mm256_storeu_pd(out0+4*i, res0);
861  _mm256_storeu_pd(out1+4*i, res1);
862  _mm256_storeu_pd(out2+4*i, res2);
863  _mm256_storeu_pd(out3+4*i, res3);
864  }
865  }
866  if (add_into)
867  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
868  for (unsigned int v=0; v<4; ++v)
869  out[offsets[v+outer]+i] += in[i][v+outer];
870  else
871  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
872  for (unsigned int v=0; v<4; ++v)
873  out[offsets[v+outer]+i] = in[i][v+outer];
874  }
875 }
876 
877 
878 
882 template <>
883 class VectorizedArray<float>
884 {
885 public:
889  static const unsigned int n_array_elements = 16;
890 
894  DEAL_II_ALWAYS_INLINE
896  operator = (const float x)
897  {
898  data = _mm512_set1_ps(x);
899  return *this;
900  }
901 
905  DEAL_II_ALWAYS_INLINE
906  float &
907  operator [] (const unsigned int comp)
908  {
909  AssertIndexRange (comp, 16);
910  return *(reinterpret_cast<float *>(&data)+comp);
911  }
912 
916  DEAL_II_ALWAYS_INLINE
917  const float &
918  operator [] (const unsigned int comp) const
919  {
920  AssertIndexRange (comp, 16);
921  return *(reinterpret_cast<const float *>(&data)+comp);
922  }
923 
927  DEAL_II_ALWAYS_INLINE
929  operator += (const VectorizedArray &vec)
930  {
931  // if the compiler supports vector arithmetics, we can simply use +=
932  // operator on the given data type. this allows the compiler to combine
933  // additions with multiplication (fused multiply-add) if those
934  // instructions are available. Otherwise, we need to use the built-in
935  // intrinsic command for __m512d
936 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
937  data += vec.data;
938 #else
939  data = _mm512_add_ps(data,vec.data);
940 #endif
941  return *this;
942  }
943 
947  DEAL_II_ALWAYS_INLINE
949  operator -= (const VectorizedArray &vec)
950  {
951 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
952  data -= vec.data;
953 #else
954  data = _mm512_sub_ps(data,vec.data);
955 #endif
956  return *this;
957  }
961  DEAL_II_ALWAYS_INLINE
963  operator *= (const VectorizedArray &vec)
964  {
965 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
966  data *= vec.data;
967 #else
968  data = _mm512_mul_ps(data,vec.data);
969 #endif
970  return *this;
971  }
972 
976  DEAL_II_ALWAYS_INLINE
978  operator /= (const VectorizedArray &vec)
979  {
980 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
981  data /= vec.data;
982 #else
983  data = _mm512_div_ps(data,vec.data);
984 #endif
985  return *this;
986  }
987 
993  DEAL_II_ALWAYS_INLINE
994  void load (const float *ptr)
995  {
996  data = _mm512_loadu_ps (ptr);
997  }
998 
1005  DEAL_II_ALWAYS_INLINE
1006  void store (float *ptr) const
1007  {
1008  _mm512_storeu_ps (ptr, data);
1009  }
1010 
1023  DEAL_II_ALWAYS_INLINE
1024  void gather (const float *base_ptr,
1025  const unsigned int *offsets)
1026  {
1027  // unfortunately, there does not appear to be a 512 bit integer load, so
1028  // do it by some reinterpret casts here. this is allowed because the Intel
1029  // API allows aliasing between different vector types.
1030  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1031  const __m512i index = *((__m512i *)(&index_val));
1032  data = _mm512_i32gather_ps(index, base_ptr, 4);
1033  }
1034 
1047  DEAL_II_ALWAYS_INLINE
1048  void scatter (const unsigned int *offsets,
1049  float *base_ptr) const
1050  {
1051  for (unsigned int i=0; i<16; ++i)
1052  for (unsigned int j=i+1; j<16; ++j)
1053  Assert(offsets[i] != offsets[j],
1054  ExcMessage("Result of scatter undefined if two offset elements"
1055  " point to the same position"));
1056 
1057  // unfortunately, there does not appear to be a 512 bit integer load, so
1058  // do it by some reinterpret casts here. this is allowed because the Intel
1059  // API allows aliasing between different vector types.
1060  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1061  const __m512i index = *((__m512i *)(&index_val));
1062  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1063  }
1064 
1069  __m512 data;
1070 
1071 private:
1072 
1077  DEAL_II_ALWAYS_INLINE
1079  get_sqrt () const
1080  {
1081  VectorizedArray res;
1082  res.data = _mm512_sqrt_ps(data);
1083  return res;
1084  }
1085 
1090  DEAL_II_ALWAYS_INLINE
1092  get_abs () const
1093  {
1094  // to compute the absolute value, perform bitwise andnot with -0. This
1095  // will leave all value and exponent bits unchanged but force the sign
1096  // value to +. Since there is no andnot for AVX512, we interpret the data
1097  // as 32 bit integers and do the andnot on those types (note that andnot
1098  // is a bitwise operation so the data type does not matter)
1099  __m512 mask = _mm512_set1_ps (-0.f);
1100  VectorizedArray res;
1101  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1102  return res;
1103  }
1104 
1109  DEAL_II_ALWAYS_INLINE
1111  get_max (const VectorizedArray &other) const
1112  {
1113  VectorizedArray res;
1114  res.data = _mm512_max_ps (data, other.data);
1115  return res;
1116  }
1117 
1122  DEAL_II_ALWAYS_INLINE
1124  get_min (const VectorizedArray &other) const
1125  {
1126  VectorizedArray res;
1127  res.data = _mm512_min_ps (data, other.data);
1128  return res;
1129  }
1130 
1134  template <typename Number2> friend VectorizedArray<Number2>
1135  std::sqrt (const VectorizedArray<Number2> &);
1136  template <typename Number2> friend VectorizedArray<Number2>
1137  std::abs (const VectorizedArray<Number2> &);
1138  template <typename Number2> friend VectorizedArray<Number2>
1139  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1140  template <typename Number2> friend VectorizedArray<Number2>
1141  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1142 };
1143 
1144 
1145 
1149 template <>
1150 inline
1151 void
1152 vectorized_load_and_transpose(const unsigned int n_entries,
1153  const float *in,
1154  const unsigned int *offsets,
1156 {
1157  const unsigned int n_chunks = n_entries/4;
1158  for (unsigned int outer = 0; outer<16; outer += 8)
1159  {
1160  for (unsigned int i=0; i<n_chunks; ++i)
1161  {
1162  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1163  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1164  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1165  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1166  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1167  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1168  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1169  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1170  // To avoid warnings about uninitialized variables, need to initialize
1171  // one variable with zero before using it.
1172  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1173  t0 = _mm256_insertf128_ps (t3, u0, 0);
1174  t0 = _mm256_insertf128_ps (t0, u4, 1);
1175  t1 = _mm256_insertf128_ps (t3, u1, 0);
1176  t1 = _mm256_insertf128_ps (t1, u5, 1);
1177  t2 = _mm256_insertf128_ps (t3, u2, 0);
1178  t2 = _mm256_insertf128_ps (t2, u6, 1);
1179  t3 = _mm256_insertf128_ps (t3, u3, 0);
1180  t3 = _mm256_insertf128_ps (t3, u7, 1);
1181  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1182  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1183  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1184  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1185  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1186  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1187  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1188  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1189  }
1190  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1191  for (unsigned int v=0; v<8; ++v)
1192  out[i][v+outer] = in[offsets[v+outer]+i];
1193  }
1194 }
1195 
1196 
1197 
1201 template <>
1202 inline
1203 void
1204 vectorized_transpose_and_store(const bool add_into,
1205  const unsigned int n_entries,
1206  const VectorizedArray<float> *in,
1207  const unsigned int *offsets,
1208  float *out)
1209 {
1210  const unsigned int n_chunks = n_entries/4;
1211  for (unsigned int outer = 0; outer<16; outer += 8)
1212  {
1213  for (unsigned int i=0; i<n_chunks; ++i)
1214  {
1215  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1216  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1217  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1218  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1219  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1220  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1221  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1222  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1223  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1224  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1225  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1226  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1227  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1228  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1229  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1230  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1231  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1232  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1233  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1234  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1235 
1236  // Cannot use the same store instructions in both paths of the 'if'
1237  // because the compiler cannot know that there is no aliasing between
1238  // pointers
1239  if (add_into)
1240  {
1241  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1242  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1243  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1244  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1245  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1246  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1247  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1248  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1249  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1250  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1251  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1252  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1253  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1254  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1255  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1256  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1257  }
1258  else
1259  {
1260  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1261  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1262  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1263  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1264  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1265  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1266  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1267  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1268  }
1269  }
1270  if (add_into)
1271  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1272  for (unsigned int v=0; v<8; ++v)
1273  out[offsets[v+outer]+i] += in[i][v+outer];
1274  else
1275  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1276  for (unsigned int v=0; v<8; ++v)
1277  out[offsets[v+outer]+i] = in[i][v+outer];
1278  }
1279 }
1280 
1281 
1282 
1283 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1284 
1288 template <>
1289 class VectorizedArray<double>
1290 {
1291 public:
1295  static const unsigned int n_array_elements = 4;
1296 
1300  DEAL_II_ALWAYS_INLINE
1301  VectorizedArray &
1302  operator = (const double x)
1303  {
1304  data = _mm256_set1_pd(x);
1305  return *this;
1306  }
1307 
1311  DEAL_II_ALWAYS_INLINE
1312  double &
1313  operator [] (const unsigned int comp)
1314  {
1315  AssertIndexRange (comp, 4);
1316  return *(reinterpret_cast<double *>(&data)+comp);
1317  }
1318 
1322  DEAL_II_ALWAYS_INLINE
1323  const double &
1324  operator [] (const unsigned int comp) const
1325  {
1326  AssertIndexRange (comp, 4);
1327  return *(reinterpret_cast<const double *>(&data)+comp);
1328  }
1329 
1333  DEAL_II_ALWAYS_INLINE
1334  VectorizedArray &
1335  operator += (const VectorizedArray &vec)
1336  {
1337  // if the compiler supports vector arithmetics, we can simply use +=
1338  // operator on the given data type. this allows the compiler to combine
1339  // additions with multiplication (fused multiply-add) if those
1340  // instructions are available. Otherwise, we need to use the built-in
1341  // intrinsic command for __m256d
1342 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1343  data += vec.data;
1344 #else
1345  data = _mm256_add_pd(data,vec.data);
1346 #endif
1347  return *this;
1348  }
1349 
1353  DEAL_II_ALWAYS_INLINE
1354  VectorizedArray &
1355  operator -= (const VectorizedArray &vec)
1356  {
1357 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1358  data -= vec.data;
1359 #else
1360  data = _mm256_sub_pd(data,vec.data);
1361 #endif
1362  return *this;
1363  }
1367  DEAL_II_ALWAYS_INLINE
1368  VectorizedArray &
1369  operator *= (const VectorizedArray &vec)
1370  {
1371 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1372  data *= vec.data;
1373 #else
1374  data = _mm256_mul_pd(data,vec.data);
1375 #endif
1376  return *this;
1377  }
1378 
1382  DEAL_II_ALWAYS_INLINE
1383  VectorizedArray &
1384  operator /= (const VectorizedArray &vec)
1385  {
1386 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1387  data /= vec.data;
1388 #else
1389  data = _mm256_div_pd(data,vec.data);
1390 #endif
1391  return *this;
1392  }
1393 
1399  DEAL_II_ALWAYS_INLINE
1400  void load (const double *ptr)
1401  {
1402  data = _mm256_loadu_pd (ptr);
1403  }
1404 
1411  DEAL_II_ALWAYS_INLINE
1412  void store (double *ptr) const
1413  {
1414  _mm256_storeu_pd (ptr, data);
1415  }
1416 
1429  DEAL_II_ALWAYS_INLINE
1430  void gather (const double *base_ptr,
1431  const unsigned int *offsets)
1432  {
1433 #ifdef __AVX2__
1434  // unfortunately, there does not appear to be a 128 bit integer load, so
1435  // do it by some reinterpret casts here. this is allowed because the Intel
1436  // API allows aliasing between different vector types.
1437  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1438  const __m128i index = *((__m128i *)(&index_val));
1439  data = _mm256_i32gather_pd(base_ptr, index, 8);
1440 #else
1441  for (unsigned int i=0; i<4; ++i)
1442  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1443 #endif
1444  }
1445 
1458  DEAL_II_ALWAYS_INLINE
1459  void scatter (const unsigned int *offsets,
1460  double *base_ptr) const
1461  {
1462  // no scatter operation in AVX/AVX2
1463  for (unsigned int i=0; i<4; ++i)
1464  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1465  }
1466 
1471  __m256d data;
1472 
1473 private:
1478  DEAL_II_ALWAYS_INLINE
1480  get_sqrt () const
1481  {
1482  VectorizedArray res;
1483  res.data = _mm256_sqrt_pd(data);
1484  return res;
1485  }
1486 
1491  DEAL_II_ALWAYS_INLINE
1493  get_abs () const
1494  {
1495  // to compute the absolute value, perform bitwise andnot with -0. This
1496  // will leave all value and exponent bits unchanged but force the sign
1497  // value to +.
1498  __m256d mask = _mm256_set1_pd (-0.);
1499  VectorizedArray res;
1500  res.data = _mm256_andnot_pd(mask, data);
1501  return res;
1502  }
1503 
1508  DEAL_II_ALWAYS_INLINE
1510  get_max (const VectorizedArray &other) const
1511  {
1512  VectorizedArray res;
1513  res.data = _mm256_max_pd (data, other.data);
1514  return res;
1515  }
1516 
1521  DEAL_II_ALWAYS_INLINE
1523  get_min (const VectorizedArray &other) const
1524  {
1525  VectorizedArray res;
1526  res.data = _mm256_min_pd (data, other.data);
1527  return res;
1528  }
1529 
1533  template <typename Number2> friend VectorizedArray<Number2>
1534  std::sqrt (const VectorizedArray<Number2> &);
1535  template <typename Number2> friend VectorizedArray<Number2>
1536  std::abs (const VectorizedArray<Number2> &);
1537  template <typename Number2> friend VectorizedArray<Number2>
1538  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1539  template <typename Number2> friend VectorizedArray<Number2>
1540  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1541 };
1542 
1543 
1544 
1548 template <>
1549 inline
1550 void
1551 vectorized_load_and_transpose(const unsigned int n_entries,
1552  const double *in,
1553  const unsigned int *offsets,
1555 {
1556  const unsigned int n_chunks = n_entries/4;
1557  const double *in0 = in + offsets[0];
1558  const double *in1 = in + offsets[1];
1559  const double *in2 = in + offsets[2];
1560  const double *in3 = in + offsets[3];
1561 
1562  for (unsigned int i=0; i<n_chunks; ++i)
1563  {
1564  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1565  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1566  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1567  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1568  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1569  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1570  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1571  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1572  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1573  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1574  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1575  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1576  }
1577  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1578  for (unsigned int v=0; v<4; ++v)
1579  out[i][v] = in[offsets[v]+i];
1580 }
1581 
1582 
1583 
1587 template <>
1588 inline
1589 void
1590 vectorized_transpose_and_store(const bool add_into,
1591  const unsigned int n_entries,
1592  const VectorizedArray<double> *in,
1593  const unsigned int *offsets,
1594  double *out)
1595 {
1596  const unsigned int n_chunks = n_entries/4;
1597  double *out0 = out + offsets[0];
1598  double *out1 = out + offsets[1];
1599  double *out2 = out + offsets[2];
1600  double *out3 = out + offsets[3];
1601  for (unsigned int i=0; i<n_chunks; ++i)
1602  {
1603  __m256d u0 = in[4*i+0].data;
1604  __m256d u1 = in[4*i+1].data;
1605  __m256d u2 = in[4*i+2].data;
1606  __m256d u3 = in[4*i+3].data;
1607  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1608  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1609  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1610  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1611  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1612  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1613  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1614  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1615 
1616  // Cannot use the same store instructions in both paths of the 'if'
1617  // because the compiler cannot know that there is no aliasing between
1618  // pointers
1619  if (add_into)
1620  {
1621  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1622  _mm256_storeu_pd(out0+4*i, res0);
1623  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1624  _mm256_storeu_pd(out1+4*i, res1);
1625  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1626  _mm256_storeu_pd(out2+4*i, res2);
1627  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1628  _mm256_storeu_pd(out3+4*i, res3);
1629  }
1630  else
1631  {
1632  _mm256_storeu_pd(out0+4*i, res0);
1633  _mm256_storeu_pd(out1+4*i, res1);
1634  _mm256_storeu_pd(out2+4*i, res2);
1635  _mm256_storeu_pd(out3+4*i, res3);
1636  }
1637  }
1638  if (add_into)
1639  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1640  for (unsigned int v=0; v<4; ++v)
1641  out[offsets[v]+i] += in[i][v];
1642  else
1643  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1644  for (unsigned int v=0; v<4; ++v)
1645  out[offsets[v]+i] = in[i][v];
1646 }
1647 
1648 
1649 
1653 template <>
1654 class VectorizedArray<float>
1655 {
1656 public:
1660  static const unsigned int n_array_elements = 8;
1661 
1665  DEAL_II_ALWAYS_INLINE
1666  VectorizedArray &
1667  operator = (const float x)
1668  {
1669  data = _mm256_set1_ps(x);
1670  return *this;
1671  }
1672 
1676  DEAL_II_ALWAYS_INLINE
1677  float &
1678  operator [] (const unsigned int comp)
1679  {
1680  AssertIndexRange (comp, 8);
1681  return *(reinterpret_cast<float *>(&data)+comp);
1682  }
1683 
1687  DEAL_II_ALWAYS_INLINE
1688  const float &
1689  operator [] (const unsigned int comp) const
1690  {
1691  AssertIndexRange (comp, 8);
1692  return *(reinterpret_cast<const float *>(&data)+comp);
1693  }
1694 
1698  DEAL_II_ALWAYS_INLINE
1699  VectorizedArray &
1700  operator += (const VectorizedArray &vec)
1701  {
1702  // if the compiler supports vector arithmetics, we can simply use +=
1703  // operator on the given data type. this allows the compiler to combine
1704  // additions with multiplication (fused multiply-add) if those
1705  // instructions are available. Otherwise, we need to use the built-in
1706  // intrinsic command for __m256d
1707 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1708  data += vec.data;
1709 #else
1710  data = _mm256_add_ps(data,vec.data);
1711 #endif
1712  return *this;
1713  }
1714 
1718  DEAL_II_ALWAYS_INLINE
1719  VectorizedArray &
1720  operator -= (const VectorizedArray &vec)
1721  {
1722 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1723  data -= vec.data;
1724 #else
1725  data = _mm256_sub_ps(data,vec.data);
1726 #endif
1727  return *this;
1728  }
1732  DEAL_II_ALWAYS_INLINE
1733  VectorizedArray &
1734  operator *= (const VectorizedArray &vec)
1735  {
1736 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1737  data *= vec.data;
1738 #else
1739  data = _mm256_mul_ps(data,vec.data);
1740 #endif
1741  return *this;
1742  }
1743 
1747  DEAL_II_ALWAYS_INLINE
1748  VectorizedArray &
1749  operator /= (const VectorizedArray &vec)
1750  {
1751 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1752  data /= vec.data;
1753 #else
1754  data = _mm256_div_ps(data,vec.data);
1755 #endif
1756  return *this;
1757  }
1758 
1764  DEAL_II_ALWAYS_INLINE
1765  void load (const float *ptr)
1766  {
1767  data = _mm256_loadu_ps (ptr);
1768  }
1769 
1776  DEAL_II_ALWAYS_INLINE
1777  void store (float *ptr) const
1778  {
1779  _mm256_storeu_ps (ptr, data);
1780  }
1781 
1794  DEAL_II_ALWAYS_INLINE
1795  void gather (const float *base_ptr,
1796  const unsigned int *offsets)
1797  {
1798 #ifdef __AVX2__
1799  // unfortunately, there does not appear to be a 256 bit integer load, so
1800  // do it by some reinterpret casts here. this is allowed because the Intel
1801  // API allows aliasing between different vector types.
1802  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1803  const __m256i index = *((__m256i *)(&index_val));
1804  data = _mm256_i32gather_ps(base_ptr, index, 4);
1805 #else
1806  for (unsigned int i=0; i<8; ++i)
1807  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1808 #endif
1809  }
1810 
1823  DEAL_II_ALWAYS_INLINE
1824  void scatter (const unsigned int *offsets,
1825  float *base_ptr) const
1826  {
1827  // no scatter operation in AVX/AVX2
1828  for (unsigned int i=0; i<8; ++i)
1829  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1830  }
1831 
1836  __m256 data;
1837 
1838 private:
1839 
1844  DEAL_II_ALWAYS_INLINE
1846  get_sqrt () const
1847  {
1848  VectorizedArray res;
1849  res.data = _mm256_sqrt_ps(data);
1850  return res;
1851  }
1852 
1857  DEAL_II_ALWAYS_INLINE
1859  get_abs () const
1860  {
1861  // to compute the absolute value, perform bitwise andnot with -0. This
1862  // will leave all value and exponent bits unchanged but force the sign
1863  // value to +.
1864  __m256 mask = _mm256_set1_ps (-0.f);
1865  VectorizedArray res;
1866  res.data = _mm256_andnot_ps(mask, data);
1867  return res;
1868  }
1869 
1874  DEAL_II_ALWAYS_INLINE
1876  get_max (const VectorizedArray &other) const
1877  {
1878  VectorizedArray res;
1879  res.data = _mm256_max_ps (data, other.data);
1880  return res;
1881  }
1882 
1887  DEAL_II_ALWAYS_INLINE
1889  get_min (const VectorizedArray &other) const
1890  {
1891  VectorizedArray res;
1892  res.data = _mm256_min_ps (data, other.data);
1893  return res;
1894  }
1895 
1899  template <typename Number2> friend VectorizedArray<Number2>
1900  std::sqrt (const VectorizedArray<Number2> &);
1901  template <typename Number2> friend VectorizedArray<Number2>
1902  std::abs (const VectorizedArray<Number2> &);
1903  template <typename Number2> friend VectorizedArray<Number2>
1904  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1905  template <typename Number2> friend VectorizedArray<Number2>
1906  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1907 };
1908 
1909 
1910 
1914 template <>
1915 inline
1916 void
1917 vectorized_load_and_transpose(const unsigned int n_entries,
1918  const float *in,
1919  const unsigned int *offsets,
1921 {
1922  const unsigned int n_chunks = n_entries/4;
1923  for (unsigned int i=0; i<n_chunks; ++i)
1924  {
1925  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1926  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1927  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1928  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1929  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1930  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1931  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1932  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1933  // To avoid warnings about uninitialized variables, need to initialize
1934  // one variable with zero before using it.
1935  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1936  t0 = _mm256_insertf128_ps (t3, u0, 0);
1937  t0 = _mm256_insertf128_ps (t0, u4, 1);
1938  t1 = _mm256_insertf128_ps (t3, u1, 0);
1939  t1 = _mm256_insertf128_ps (t1, u5, 1);
1940  t2 = _mm256_insertf128_ps (t3, u2, 0);
1941  t2 = _mm256_insertf128_ps (t2, u6, 1);
1942  t3 = _mm256_insertf128_ps (t3, u3, 0);
1943  t3 = _mm256_insertf128_ps (t3, u7, 1);
1944  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1945  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1946  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1947  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1948  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
1949  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
1950  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
1951  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
1952  }
1953  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1954  for (unsigned int v=0; v<8; ++v)
1955  out[i][v] = in[offsets[v]+i];
1956 }
1957 
1958 
1959 
1963 template <>
1964 inline
1965 void
1966 vectorized_transpose_and_store(const bool add_into,
1967  const unsigned int n_entries,
1968  const VectorizedArray<float> *in,
1969  const unsigned int *offsets,
1970  float *out)
1971 {
1972  const unsigned int n_chunks = n_entries/4;
1973  for (unsigned int i=0; i<n_chunks; ++i)
1974  {
1975  __m256 u0 = in[4*i+0].data;
1976  __m256 u1 = in[4*i+1].data;
1977  __m256 u2 = in[4*i+2].data;
1978  __m256 u3 = in[4*i+3].data;
1979  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1980  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1981  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1982  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1983  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1984  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1985  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1986  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1987  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1988  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1989  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1990  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1991  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1992  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1993  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1994  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1995 
1996  // Cannot use the same store instructions in both paths of the 'if'
1997  // because the compiler cannot know that there is no aliasing between
1998  // pointers
1999  if (add_into)
2000  {
2001  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2002  _mm_storeu_ps(out+4*i+offsets[0], res0);
2003  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2004  _mm_storeu_ps(out+4*i+offsets[1], res1);
2005  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2006  _mm_storeu_ps(out+4*i+offsets[2], res2);
2007  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2008  _mm_storeu_ps(out+4*i+offsets[3], res3);
2009  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2010  _mm_storeu_ps(out+4*i+offsets[4], res4);
2011  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2012  _mm_storeu_ps(out+4*i+offsets[5], res5);
2013  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2014  _mm_storeu_ps(out+4*i+offsets[6], res6);
2015  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2016  _mm_storeu_ps(out+4*i+offsets[7], res7);
2017  }
2018  else
2019  {
2020  _mm_storeu_ps(out+4*i+offsets[0], res0);
2021  _mm_storeu_ps(out+4*i+offsets[1], res1);
2022  _mm_storeu_ps(out+4*i+offsets[2], res2);
2023  _mm_storeu_ps(out+4*i+offsets[3], res3);
2024  _mm_storeu_ps(out+4*i+offsets[4], res4);
2025  _mm_storeu_ps(out+4*i+offsets[5], res5);
2026  _mm_storeu_ps(out+4*i+offsets[6], res6);
2027  _mm_storeu_ps(out+4*i+offsets[7], res7);
2028  }
2029  }
2030  if (add_into)
2031  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2032  for (unsigned int v=0; v<8; ++v)
2033  out[offsets[v]+i] += in[i][v];
2034  else
2035  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2036  for (unsigned int v=0; v<8; ++v)
2037  out[offsets[v]+i] = in[i][v];
2038 }
2039 
2040 
2041 
2042 // for safety, also check that __SSE2__ is defined in case the user manually
2043 // set some conflicting compile flags which prevent compilation
2044 
2045 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1
2046 
2050 template <>
2051 class VectorizedArray<double>
2052 {
2053 public:
2057  static const unsigned int n_array_elements = 2;
2058 
2062  DEAL_II_ALWAYS_INLINE
2063  VectorizedArray &
2064  operator = (const double x)
2065  {
2066  data = _mm_set1_pd(x);
2067  return *this;
2068  }
2069 
2073  DEAL_II_ALWAYS_INLINE
2074  double &
2075  operator [] (const unsigned int comp)
2076  {
2077  AssertIndexRange (comp, 2);
2078  return *(reinterpret_cast<double *>(&data)+comp);
2079  }
2080 
2084  DEAL_II_ALWAYS_INLINE
2085  const double &
2086  operator [] (const unsigned int comp) const
2087  {
2088  AssertIndexRange (comp, 2);
2089  return *(reinterpret_cast<const double *>(&data)+comp);
2090  }
2091 
2095  DEAL_II_ALWAYS_INLINE
2096  VectorizedArray &
2098  {
2099 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2100  data += vec.data;
2101 #else
2102  data = _mm_add_pd(data,vec.data);
2103 #endif
2104  return *this;
2105  }
2106 
2110  DEAL_II_ALWAYS_INLINE
2111  VectorizedArray &
2113  {
2114 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2115  data -= vec.data;
2116 #else
2117  data = _mm_sub_pd(data,vec.data);
2118 #endif
2119  return *this;
2120  }
2121 
2125  DEAL_II_ALWAYS_INLINE
2126  VectorizedArray &
2128  {
2129 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2130  data *= vec.data;
2131 #else
2132  data = _mm_mul_pd(data,vec.data);
2133 #endif
2134  return *this;
2135  }
2136 
2140  DEAL_II_ALWAYS_INLINE
2141  VectorizedArray &
2143  {
2144 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2145  data /= vec.data;
2146 #else
2147  data = _mm_div_pd(data,vec.data);
2148 #endif
2149  return *this;
2150  }
2151 
2157  DEAL_II_ALWAYS_INLINE
2158  void load (const double *ptr)
2159  {
2160  data = _mm_loadu_pd (ptr);
2161  }
2162 
2169  DEAL_II_ALWAYS_INLINE
2170  void store (double *ptr) const
2171  {
2172  _mm_storeu_pd (ptr, data);
2173  }
2174 
2187  DEAL_II_ALWAYS_INLINE
2188  void gather (const double *base_ptr,
2189  const unsigned int *offsets)
2190  {
2191  for (unsigned int i=0; i<2; ++i)
2192  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2193  }
2194 
2207  DEAL_II_ALWAYS_INLINE
2208  void scatter (const unsigned int *offsets,
2209  double *base_ptr) const
2210  {
2211  for (unsigned int i=0; i<2; ++i)
2212  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2213  }
2214 
2219  __m128d data;
2220 
2221 private:
2226  DEAL_II_ALWAYS_INLINE
2228  get_sqrt () const
2229  {
2230  VectorizedArray res;
2231  res.data = _mm_sqrt_pd(data);
2232  return res;
2233  }
2234 
2239  DEAL_II_ALWAYS_INLINE
2241  get_abs () const
2242  {
2243  // to compute the absolute value, perform
2244  // bitwise andnot with -0. This will leave all
2245  // value and exponent bits unchanged but force
2246  // the sign value to +.
2247  __m128d mask = _mm_set1_pd (-0.);
2248  VectorizedArray res;
2249  res.data = _mm_andnot_pd(mask, data);
2250  return res;
2251  }
2252 
2257  DEAL_II_ALWAYS_INLINE
2259  get_max (const VectorizedArray &other) const
2260  {
2261  VectorizedArray res;
2262  res.data = _mm_max_pd (data, other.data);
2263  return res;
2264  }
2265 
2270  DEAL_II_ALWAYS_INLINE
2272  get_min (const VectorizedArray &other) const
2273  {
2274  VectorizedArray res;
2275  res.data = _mm_min_pd (data, other.data);
2276  return res;
2277  }
2278 
2282  template <typename Number2> friend VectorizedArray<Number2>
2283  std::sqrt (const VectorizedArray<Number2> &);
2284  template <typename Number2> friend VectorizedArray<Number2>
2285  std::abs (const VectorizedArray<Number2> &);
2286  template <typename Number2> friend VectorizedArray<Number2>
2287  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2288  template <typename Number2> friend VectorizedArray<Number2>
2289  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2290 };
2291 
2292 
2293 
2297 template <>
2298 inline
2299 void vectorized_load_and_transpose(const unsigned int n_entries,
2300  const double *in,
2301  const unsigned int *offsets,
2303 {
2304  const unsigned int n_chunks = n_entries/2;
2305  for (unsigned int i=0; i<n_chunks; ++i)
2306  {
2307  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2308  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2309  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2310  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2311  }
2312  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2313  for (unsigned int v=0; v<2; ++v)
2314  out[i][v] = in[offsets[v]+i];
2315 }
2316 
2317 
2318 
2322 template <>
2323 inline
2324 void
2325 vectorized_transpose_and_store(const bool add_into,
2326  const unsigned int n_entries,
2327  const VectorizedArray<double> *in,
2328  const unsigned int *offsets,
2329  double *out)
2330 {
2331  const unsigned int n_chunks = n_entries/2;
2332  if (add_into)
2333  {
2334  for (unsigned int i=0; i<n_chunks; ++i)
2335  {
2336  __m128d u0 = in[2*i+0].data;
2337  __m128d u1 = in[2*i+1].data;
2338  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2339  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2340  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2341  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2342  }
2343  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2344  for (unsigned int v=0; v<2; ++v)
2345  out[offsets[v]+i] += in[i][v];
2346  }
2347  else
2348  {
2349  for (unsigned int i=0; i<n_chunks; ++i)
2350  {
2351  __m128d u0 = in[2*i+0].data;
2352  __m128d u1 = in[2*i+1].data;
2353  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2354  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2355  _mm_storeu_pd(out+2*i+offsets[0], res0);
2356  _mm_storeu_pd(out+2*i+offsets[1], res1);
2357  }
2358  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2359  for (unsigned int v=0; v<2; ++v)
2360  out[offsets[v]+i] = in[i][v];
2361  }
2362 }
2363 
2364 
2365 
2369 template <>
2370 class VectorizedArray<float>
2371 {
2372 public:
2376  static const unsigned int n_array_elements = 4;
2377 
2382  DEAL_II_ALWAYS_INLINE
2383  VectorizedArray &
2384  operator = (const float x)
2385  {
2386  data = _mm_set1_ps(x);
2387  return *this;
2388  }
2389 
2393  DEAL_II_ALWAYS_INLINE
2394  float &
2395  operator [] (const unsigned int comp)
2396  {
2397  AssertIndexRange (comp, 4);
2398  return *(reinterpret_cast<float *>(&data)+comp);
2399  }
2400 
2404  DEAL_II_ALWAYS_INLINE
2405  const float &
2406  operator [] (const unsigned int comp) const
2407  {
2408  AssertIndexRange (comp, 4);
2409  return *(reinterpret_cast<const float *>(&data)+comp);
2410  }
2411 
2415  DEAL_II_ALWAYS_INLINE
2416  VectorizedArray &
2418  {
2419 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2420  data += vec.data;
2421 #else
2422  data = _mm_add_ps(data,vec.data);
2423 #endif
2424  return *this;
2425  }
2426 
2430  DEAL_II_ALWAYS_INLINE
2431  VectorizedArray &
2433  {
2434 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2435  data -= vec.data;
2436 #else
2437  data = _mm_sub_ps(data,vec.data);
2438 #endif
2439  return *this;
2440  }
2441 
2445  DEAL_II_ALWAYS_INLINE
2446  VectorizedArray &
2448  {
2449 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2450  data *= vec.data;
2451 #else
2452  data = _mm_mul_ps(data,vec.data);
2453 #endif
2454  return *this;
2455  }
2456 
2460  DEAL_II_ALWAYS_INLINE
2461  VectorizedArray &
2463  {
2464 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2465  data /= vec.data;
2466 #else
2467  data = _mm_div_ps(data,vec.data);
2468 #endif
2469  return *this;
2470  }
2471 
2477  DEAL_II_ALWAYS_INLINE
2478  void load (const float *ptr)
2479  {
2480  data = _mm_loadu_ps (ptr);
2481  }
2482 
2489  DEAL_II_ALWAYS_INLINE
2490  void store (float *ptr) const
2491  {
2492  _mm_storeu_ps (ptr, data);
2493  }
2494 
2507  DEAL_II_ALWAYS_INLINE
2508  void gather (const float *base_ptr,
2509  const unsigned int *offsets)
2510  {
2511  for (unsigned int i=0; i<4; ++i)
2512  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2513  }
2514 
2527  DEAL_II_ALWAYS_INLINE
2528  void scatter (const unsigned int *offsets,
2529  float *base_ptr) const
2530  {
2531  for (unsigned int i=0; i<4; ++i)
2532  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2533  }
2534 
2539  __m128 data;
2540 
2541 private:
2546  DEAL_II_ALWAYS_INLINE
2548  get_sqrt () const
2549  {
2550  VectorizedArray res;
2551  res.data = _mm_sqrt_ps(data);
2552  return res;
2553  }
2554 
2559  DEAL_II_ALWAYS_INLINE
2561  get_abs () const
2562  {
2563  // to compute the absolute value, perform bitwise andnot with -0. This
2564  // will leave all value and exponent bits unchanged but force the sign
2565  // value to +.
2566  __m128 mask = _mm_set1_ps (-0.f);
2567  VectorizedArray res;
2568  res.data = _mm_andnot_ps(mask, data);
2569  return res;
2570  }
2571 
2576  DEAL_II_ALWAYS_INLINE
2578  get_max (const VectorizedArray &other) const
2579  {
2580  VectorizedArray res;
2581  res.data = _mm_max_ps (data, other.data);
2582  return res;
2583  }
2584 
2589  DEAL_II_ALWAYS_INLINE
2591  get_min (const VectorizedArray &other) const
2592  {
2593  VectorizedArray res;
2594  res.data = _mm_min_ps (data, other.data);
2595  return res;
2596  }
2597 
2601  template <typename Number2> friend VectorizedArray<Number2>
2602  std::sqrt (const VectorizedArray<Number2> &);
2603  template <typename Number2> friend VectorizedArray<Number2>
2604  std::abs (const VectorizedArray<Number2> &);
2605  template <typename Number2> friend VectorizedArray<Number2>
2606  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2607  template <typename Number2> friend VectorizedArray<Number2>
2608  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2609 };
2610 
2611 
2612 
2616 template <>
2617 inline
2618 void vectorized_load_and_transpose(const unsigned int n_entries,
2619  const float *in,
2620  const unsigned int *offsets,
2622 {
2623  const unsigned int n_chunks = n_entries/4;
2624  for (unsigned int i=0; i<n_chunks; ++i)
2625  {
2626  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2627  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2628  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2629  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2630  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2631  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2632  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2633  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2634  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2635  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2636  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2637  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2638  }
2639  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2640  for (unsigned int v=0; v<4; ++v)
2641  out[i][v] = in[offsets[v]+i];
2642 }
2643 
2644 
2645 
2649 template <>
2650 inline
2651 void
2652 vectorized_transpose_and_store(const bool add_into,
2653  const unsigned int n_entries,
2654  const VectorizedArray<float> *in,
2655  const unsigned int *offsets,
2656  float *out)
2657 {
2658  const unsigned int n_chunks = n_entries/4;
2659  for (unsigned int i=0; i<n_chunks; ++i)
2660  {
2661  __m128 u0 = in[4*i+0].data;
2662  __m128 u1 = in[4*i+1].data;
2663  __m128 u2 = in[4*i+2].data;
2664  __m128 u3 = in[4*i+3].data;
2665  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2666  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2667  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2668  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2669  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2670  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2671  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2672  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2673 
2674  // Cannot use the same store instructions in both paths of the 'if'
2675  // because the compiler cannot know that there is no aliasing between
2676  // pointers
2677  if (add_into)
2678  {
2679  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2680  _mm_storeu_ps(out+4*i+offsets[0], u0);
2681  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2682  _mm_storeu_ps(out+4*i+offsets[1], u1);
2683  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2684  _mm_storeu_ps(out+4*i+offsets[2], u2);
2685  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2686  _mm_storeu_ps(out+4*i+offsets[3], u3);
2687  }
2688  else
2689  {
2690  _mm_storeu_ps(out+4*i+offsets[0], u0);
2691  _mm_storeu_ps(out+4*i+offsets[1], u1);
2692  _mm_storeu_ps(out+4*i+offsets[2], u2);
2693  _mm_storeu_ps(out+4*i+offsets[3], u3);
2694  }
2695  }
2696  if (add_into)
2697  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2698  for (unsigned int v=0; v<4; ++v)
2699  out[offsets[v]+i] += in[i][v];
2700  else
2701  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2702  for (unsigned int v=0; v<4; ++v)
2703  out[offsets[v]+i] = in[i][v];
2704 }
2705 
2706 
2707 
2708 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2709 
2710 
2716 template <typename Number>
2717 inline DEAL_II_ALWAYS_INLINE
2718 bool
2720  const VectorizedArray<Number> &rhs)
2721 {
2722  for (unsigned int i=0; i<VectorizedArray<Number>::n_array_elements; ++i)
2723  if (lhs[i] != rhs[i])
2724  return false;
2725 
2726  return true;
2727 }
2728 
2729 
2735 template <typename Number>
2736 inline DEAL_II_ALWAYS_INLINE
2739  const VectorizedArray<Number> &v)
2740 {
2741  VectorizedArray<Number> tmp = u;
2742  return tmp+=v;
2743 }
2744 
2750 template <typename Number>
2751 inline DEAL_II_ALWAYS_INLINE
2754  const VectorizedArray<Number> &v)
2755 {
2756  VectorizedArray<Number> tmp = u;
2757  return tmp-=v;
2758 }
2759 
2765 template <typename Number>
2766 inline DEAL_II_ALWAYS_INLINE
2769  const VectorizedArray<Number> &v)
2770 {
2771  VectorizedArray<Number> tmp = u;
2772  return tmp*=v;
2773 }
2774 
2780 template <typename Number>
2781 inline DEAL_II_ALWAYS_INLINE
2784  const VectorizedArray<Number> &v)
2785 {
2786  VectorizedArray<Number> tmp = u;
2787  return tmp/=v;
2788 }
2789 
2796 template <typename Number>
2797 inline DEAL_II_ALWAYS_INLINE
2799 operator + (const Number &u,
2800  const VectorizedArray<Number> &v)
2801 {
2803  tmp = u;
2804  return tmp+=v;
2805 }
2806 
2815 inline DEAL_II_ALWAYS_INLINE
2817 operator + (const double &u,
2818  const VectorizedArray<float> &v)
2819 {
2821  tmp = u;
2822  return tmp+=v;
2823 }
2824 
2831 template <typename Number>
2832 inline DEAL_II_ALWAYS_INLINE
2835  const Number &u)
2836 {
2837  return u + v;
2838 }
2839 
2848 inline DEAL_II_ALWAYS_INLINE
2851  const double &u)
2852 {
2853  return u + v;
2854 }
2855 
2862 template <typename Number>
2863 inline DEAL_II_ALWAYS_INLINE
2865 operator - (const Number &u,
2866  const VectorizedArray<Number> &v)
2867 {
2869  tmp = u;
2870  return tmp-=v;
2871 }
2872 
2881 inline DEAL_II_ALWAYS_INLINE
2883 operator - (const double &u,
2884  const VectorizedArray<float> &v)
2885 {
2887  tmp = float(u);
2888  return tmp-=v;
2889 }
2890 
2897 template <typename Number>
2898 inline DEAL_II_ALWAYS_INLINE
2901  const Number &u)
2902 {
2904  tmp = u;
2905  return v-tmp;
2906 }
2907 
2916 inline DEAL_II_ALWAYS_INLINE
2919  const double &u)
2920 {
2922  tmp = float(u);
2923  return v-tmp;
2924 }
2925 
2932 template <typename Number>
2933 inline DEAL_II_ALWAYS_INLINE
2935 operator * (const Number &u,
2936  const VectorizedArray<Number> &v)
2937 {
2939  tmp = u;
2940  return tmp*=v;
2941 }
2942 
2951 inline DEAL_II_ALWAYS_INLINE
2953 operator * (const double &u,
2954  const VectorizedArray<float> &v)
2955 {
2957  tmp = float(u);
2958  return tmp*=v;
2959 }
2960 
2967 template <typename Number>
2968 inline DEAL_II_ALWAYS_INLINE
2971  const Number &u)
2972 {
2973  return u * v;
2974 }
2975 
2984 inline DEAL_II_ALWAYS_INLINE
2987  const double &u)
2988 {
2989  return u * v;
2990 }
2991 
2998 template <typename Number>
2999 inline DEAL_II_ALWAYS_INLINE
3001 operator / (const Number &u,
3002  const VectorizedArray<Number> &v)
3003 {
3005  tmp = u;
3006  return tmp/=v;
3007 }
3008 
3017 inline DEAL_II_ALWAYS_INLINE
3019 operator / (const double &u,
3020  const VectorizedArray<float> &v)
3021 {
3023  tmp = float(u);
3024  return tmp/=v;
3025 }
3026 
3033 template <typename Number>
3034 inline DEAL_II_ALWAYS_INLINE
3037  const Number &u)
3038 {
3040  tmp = u;
3041  return v/tmp;
3042 }
3043 
3052 inline DEAL_II_ALWAYS_INLINE
3055  const double &u)
3056 {
3058  tmp = float(u);
3059  return v/tmp;
3060 }
3061 
3067 template <typename Number>
3068 inline DEAL_II_ALWAYS_INLINE
3071 {
3072  return u;
3073 }
3074 
3080 template <typename Number>
3081 inline DEAL_II_ALWAYS_INLINE
3084 {
3085  // to get a negative sign, subtract the input from zero (could also
3086  // multiply by -1, but this one is slightly simpler)
3087  return VectorizedArray<Number>()-u;
3088 }
3089 
3090 
3091 DEAL_II_NAMESPACE_CLOSE
3092 
3093 
3100 namespace std
3101 {
3109  template <typename Number>
3110  inline
3111  ::VectorizedArray<Number>
3112  sin (const ::VectorizedArray<Number> &x)
3113  {
3114  // put values in an array and later read in that array with an unaligned
3115  // read. This should save some instructions as compared to directly
3116  // setting the individual elements and also circumvents a compiler
3117  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3118  // from April 2014, topic "matrix_free/step-48 Test").
3120  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3121  values[i] = std::sin(x[i]);
3123  out.load(&values[0]);
3124  return out;
3125  }
3126 
3127 
3128 
3136  template <typename Number>
3137  inline
3138  ::VectorizedArray<Number>
3139  cos (const ::VectorizedArray<Number> &x)
3140  {
3142  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3143  values[i] = std::cos(x[i]);
3145  out.load(&values[0]);
3146  return out;
3147  }
3148 
3149 
3150 
3158  template <typename Number>
3159  inline
3160  ::VectorizedArray<Number>
3161  tan (const ::VectorizedArray<Number> &x)
3162  {
3164  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3165  values[i] = std::tan(x[i]);
3167  out.load(&values[0]);
3168  return out;
3169  }
3170 
3171 
3172 
3180  template <typename Number>
3181  inline
3182  ::VectorizedArray<Number>
3183  exp (const ::VectorizedArray<Number> &x)
3184  {
3186  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3187  values[i] = std::exp(x[i]);
3189  out.load(&values[0]);
3190  return out;
3191  }
3192 
3193 
3194 
3202  template <typename Number>
3203  inline
3204  ::VectorizedArray<Number>
3205  log (const ::VectorizedArray<Number> &x)
3206  {
3208  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3209  values[i] = std::log(x[i]);
3211  out.load(&values[0]);
3212  return out;
3213  }
3214 
3215 
3216 
3224  template <typename Number>
3225  inline
3226  ::VectorizedArray<Number>
3227  sqrt (const ::VectorizedArray<Number> &x)
3228  {
3229  return x.get_sqrt();
3230  }
3231 
3232 
3233 
3241  template <typename Number>
3242  inline
3243  ::VectorizedArray<Number>
3244  pow (const ::VectorizedArray<Number> &x,
3245  const Number p)
3246  {
3248  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3249  values[i] = std::pow(x[i], p);
3251  out.load(&values[0]);
3252  return out;
3253  }
3254 
3255 
3256 
3264  template <typename Number>
3265  inline
3266  ::VectorizedArray<Number>
3267  abs (const ::VectorizedArray<Number> &x)
3268  {
3269  return x.get_abs();
3270  }
3271 
3272 
3273 
3281  template <typename Number>
3282  inline
3283  ::VectorizedArray<Number>
3284  max (const ::VectorizedArray<Number> &x,
3285  const ::VectorizedArray<Number> &y)
3286  {
3287  return x.get_max(y);
3288  }
3289 
3290 
3291 
3299  template <typename Number>
3300  inline
3301  ::VectorizedArray<Number>
3302  min (const ::VectorizedArray<Number> &x,
3303  const ::VectorizedArray<Number> &y)
3304  {
3305  return x.get_min(y);
3306  }
3307 
3308 }
3309 
3310 #endif
SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
DEAL_II_ALWAYS_INLINE void store(double *ptr) const
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1207
STL namespace.
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static::ExceptionBase & ExcMessage(std::string arg1)
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
DEAL_II_ALWAYS_INLINE void load(const double *ptr)
#define Assert(cond, exc)
Definition: exceptions.h:337
DEAL_II_ALWAYS_INLINE void gather(const double *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(float *ptr) const
DEAL_II_ALWAYS_INLINE void load(const float *ptr)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
DEAL_II_ALWAYS_INLINE void gather(const float *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, float *base_ptr) const
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, double *base_ptr) const
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const