Reference documentation for deal.II version Git 053e5b2 2017-07-21 22:33:36 -0600
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vectorization_h
18 #define dealii__vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && not defined(__SSE2__)
43 #error "Mismatch in vectorization capabilities: SSE2 was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
44 #endif
45 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && not defined(__AVX__)
46 #error "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
47 #endif
48 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && not defined(__AVX512F__)
49 #error "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
50 #endif
51 
52 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
53 #include <immintrin.h>
54 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
55 #include <emmintrin.h>
56 #endif
57 
58 
59 DEAL_II_NAMESPACE_OPEN
60 
61 
62 namespace internal
63 {
74  template <typename T>
76  {
77  static VectorizedArray<T> value (const T &t)
78  {
80  tmp=t;
81  return tmp;
82  }
83  };
84 }
85 
86 
87 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
88 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
89 
90 template <typename Number>
91 struct EnableIfScalar<VectorizedArray<Number> >
92 {
94 };
95 
96 
97 
148 template <typename Number>
149 class VectorizedArray
150 {
151 public:
155  static const unsigned int n_array_elements = 1;
156 
157  // POD means that there should be no user-defined constructors, destructors
158  // and copy functions (the standard is somewhat relaxed in C++2011, though).
159 
163  DEAL_II_ALWAYS_INLINE
165  operator = (const Number scalar)
166  {
167  data = scalar;
168  return *this;
169  }
170 
174  DEAL_II_ALWAYS_INLINE
175  Number &
176  operator [] (const unsigned int comp)
177  {
178  (void)comp;
179  AssertIndexRange (comp, 1);
180  return data;
181  }
182 
186  DEAL_II_ALWAYS_INLINE
187  const Number &
188  operator [] (const unsigned int comp) const
189  {
190  (void)comp;
191  AssertIndexRange (comp, 1);
192  return data;
193  }
194 
198  DEAL_II_ALWAYS_INLINE
201  {
202  data+=vec.data;
203  return *this;
204  }
205 
209  DEAL_II_ALWAYS_INLINE
212  {
213  data-=vec.data;
214  return *this;
215  }
216 
220  DEAL_II_ALWAYS_INLINE
223  {
224  data*=vec.data;
225  return *this;
226  }
227 
231  DEAL_II_ALWAYS_INLINE
234  {
235  data/=vec.data;
236  return *this;
237  }
238 
245  DEAL_II_ALWAYS_INLINE
246  void load (const Number *ptr)
247  {
248  data = *ptr;
249  }
250 
257  DEAL_II_ALWAYS_INLINE
258  void store (Number *ptr) const
259  {
260  *ptr = data;
261  }
262 
275  DEAL_II_ALWAYS_INLINE
276  void gather (const Number *base_ptr,
277  const unsigned int *offsets)
278  {
279  data = base_ptr[offsets[0]];
280  }
281 
294  DEAL_II_ALWAYS_INLINE
295  void scatter (const unsigned int *offsets,
296  Number *base_ptr) const
297  {
298  base_ptr[offsets[0]] = data;
299  }
300 
305  Number data;
306 
307 private:
312  DEAL_II_ALWAYS_INLINE
314  get_sqrt () const
315  {
316  VectorizedArray res;
317  res.data = std::sqrt(data);
318  return res;
319  }
320 
325  DEAL_II_ALWAYS_INLINE
327  get_abs () const
328  {
329  VectorizedArray res;
330  res.data = std::fabs(data);
331  return res;
332  }
333 
338  DEAL_II_ALWAYS_INLINE
340  get_max (const VectorizedArray &other) const
341  {
342  VectorizedArray res;
343  res.data = std::max (data, other.data);
344  return res;
345  }
346 
351  DEAL_II_ALWAYS_INLINE
353  get_min (const VectorizedArray &other) const
354  {
355  VectorizedArray res;
356  res.data = std::min (data, other.data);
357  return res;
358  }
359 
363  template <typename Number2> friend VectorizedArray<Number2>
364  std::sqrt (const VectorizedArray<Number2> &);
365  template <typename Number2> friend VectorizedArray<Number2>
366  std::abs (const VectorizedArray<Number2> &);
367  template <typename Number2> friend VectorizedArray<Number2>
368  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
369  template <typename Number2> friend VectorizedArray<Number2>
370  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
371 };
372 
373 
374 
381 template <typename Number>
382 inline DEAL_II_ALWAYS_INLINE
384 make_vectorized_array (const Number &u)
385 {
387  result = u;
388  return result;
389 }
390 
391 
392 
418 template <typename Number>
419 inline
420 void
421 vectorized_load_and_transpose(const unsigned int n_entries,
422  const Number *in,
423  const unsigned int *offsets,
425 {
426  for (unsigned int i=0; i<n_entries; ++i)
427  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
428  out[i][v] = in[offsets[v]+i];
429 }
430 
431 
432 
471 template <typename Number>
472 inline
473 void
474 vectorized_transpose_and_store(const bool add_into,
475  const unsigned int n_entries,
476  const VectorizedArray<Number> *in,
477  const unsigned int *offsets,
478  Number *out)
479 {
480  if (add_into)
481  for (unsigned int i=0; i<n_entries; ++i)
482  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
483  out[offsets[v]+i] += in[i][v];
484  else
485  for (unsigned int i=0; i<n_entries; ++i)
486  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
487  out[offsets[v]+i] = in[i][v];
488 }
489 
490 
491 
492 // for safety, also check that __AVX512F__ is defined in case the user manually
493 // set some conflicting compile flags which prevent compilation
494 
495 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
496 
500 template <>
501 class VectorizedArray<double>
502 {
503 public:
507  static const unsigned int n_array_elements = 8;
508 
512  DEAL_II_ALWAYS_INLINE
514  operator = (const double x)
515  {
516  data = _mm512_set1_pd(x);
517  return *this;
518  }
519 
523  DEAL_II_ALWAYS_INLINE
524  double &
525  operator [] (const unsigned int comp)
526  {
527  AssertIndexRange (comp, 8);
528  return *(reinterpret_cast<double *>(&data)+comp);
529  }
530 
534  DEAL_II_ALWAYS_INLINE
535  const double &
536  operator [] (const unsigned int comp) const
537  {
538  AssertIndexRange (comp, 8);
539  return *(reinterpret_cast<const double *>(&data)+comp);
540  }
541 
545  DEAL_II_ALWAYS_INLINE
547  operator += (const VectorizedArray &vec)
548  {
549  // if the compiler supports vector arithmetics, we can simply use +=
550  // operator on the given data type. this allows the compiler to combine
551  // additions with multiplication (fused multiply-add) if those
552  // instructions are available. Otherwise, we need to use the built-in
553  // intrinsic command for __m512d
554 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
555  data += vec.data;
556 #else
557  data = _mm512_add_pd(data,vec.data);
558 #endif
559  return *this;
560  }
561 
565  DEAL_II_ALWAYS_INLINE
567  operator -= (const VectorizedArray &vec)
568  {
569 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
570  data -= vec.data;
571 #else
572  data = _mm512_sub_pd(data,vec.data);
573 #endif
574  return *this;
575  }
579  DEAL_II_ALWAYS_INLINE
581  operator *= (const VectorizedArray &vec)
582  {
583 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
584  data *= vec.data;
585 #else
586  data = _mm512_mul_pd(data,vec.data);
587 #endif
588  return *this;
589  }
590 
594  DEAL_II_ALWAYS_INLINE
596  operator /= (const VectorizedArray &vec)
597  {
598 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
599  data /= vec.data;
600 #else
601  data = _mm512_div_pd(data,vec.data);
602 #endif
603  return *this;
604  }
605 
611  DEAL_II_ALWAYS_INLINE
612  void load (const double *ptr)
613  {
614  data = _mm512_loadu_pd (ptr);
615  }
616 
623  DEAL_II_ALWAYS_INLINE
624  void store (double *ptr) const
625  {
626  _mm512_storeu_pd (ptr, data);
627  }
628 
641  DEAL_II_ALWAYS_INLINE
642  void gather (const double *base_ptr,
643  const unsigned int *offsets)
644  {
645  // unfortunately, there does not appear to be a 256 bit integer load, so
646  // do it by some reinterpret casts here. this is allowed because the Intel
647  // API allows aliasing between different vector types.
648  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
649  const __m256i index = *((__m256i *)(&index_val));
650  data = _mm512_i32gather_pd(index, base_ptr, 8);
651  }
652 
665  DEAL_II_ALWAYS_INLINE
666  void scatter (const unsigned int *offsets,
667  double *base_ptr) const
668  {
669  for (unsigned int i=0; i<8; ++i)
670  for (unsigned int j=i+1; j<8; ++j)
671  Assert(offsets[i] != offsets[j],
672  ExcMessage("Result of scatter undefined if two offset elements"
673  " point to the same position"));
674 
675  // unfortunately, there does not appear to be a 256 bit integer load, so
676  // do it by some reinterpret casts here. this is allowed because the Intel
677  // API allows aliasing between different vector types.
678  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
679  const __m256i index = *((__m256i *)(&index_val));
680  _mm512_i32scatter_pd(base_ptr, index, data, 8);
681  }
682 
687  __m512d data;
688 
689 private:
694  DEAL_II_ALWAYS_INLINE
696  get_sqrt () const
697  {
698  VectorizedArray res;
699  res.data = _mm512_sqrt_pd(data);
700  return res;
701  }
702 
707  DEAL_II_ALWAYS_INLINE
709  get_abs () const
710  {
711  // to compute the absolute value, perform bitwise andnot with -0. This
712  // will leave all value and exponent bits unchanged but force the sign
713  // value to +. Since there is no andnot for AVX512, we interpret the data
714  // as 64 bit integers and do the andnot on those types (note that andnot
715  // is a bitwise operation so the data type does not matter)
716  __m512d mask = _mm512_set1_pd (-0.);
717  VectorizedArray res;
718  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
719  return res;
720  }
721 
726  DEAL_II_ALWAYS_INLINE
728  get_max (const VectorizedArray &other) const
729  {
730  VectorizedArray res;
731  res.data = _mm512_max_pd (data, other.data);
732  return res;
733  }
734 
739  DEAL_II_ALWAYS_INLINE
741  get_min (const VectorizedArray &other) const
742  {
743  VectorizedArray res;
744  res.data = _mm512_min_pd (data, other.data);
745  return res;
746  }
747 
751  template <typename Number2> friend VectorizedArray<Number2>
752  std::sqrt (const VectorizedArray<Number2> &);
753  template <typename Number2> friend VectorizedArray<Number2>
754  std::abs (const VectorizedArray<Number2> &);
755  template <typename Number2> friend VectorizedArray<Number2>
756  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
757  template <typename Number2> friend VectorizedArray<Number2>
758  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
759 };
760 
761 
762 
766 template <>
767 inline
768 void
769 vectorized_load_and_transpose(const unsigned int n_entries,
770  const double *in,
771  const unsigned int *offsets,
773 {
774  const unsigned int n_chunks = n_entries/4;
775  for (unsigned int outer=0; outer<8; outer += 4)
776  {
777  const double *in0 = in + offsets[0+outer];
778  const double *in1 = in + offsets[1+outer];
779  const double *in2 = in + offsets[2+outer];
780  const double *in3 = in + offsets[3+outer];
781 
782  for (unsigned int i=0; i<n_chunks; ++i)
783  {
784  __m256d u0 = _mm256_loadu_pd(in0+4*i);
785  __m256d u1 = _mm256_loadu_pd(in1+4*i);
786  __m256d u2 = _mm256_loadu_pd(in2+4*i);
787  __m256d u3 = _mm256_loadu_pd(in3+4*i);
788  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
789  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
790  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
791  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
792  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
793  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
794  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
795  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
796  }
797  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
798  for (unsigned int v=0; v<4; ++v)
799  out[i][outer+v] = in[offsets[v+outer]+i];
800  }
801 }
802 
803 
804 
808 template <>
809 inline
810 void
811 vectorized_transpose_and_store(const bool add_into,
812  const unsigned int n_entries,
813  const VectorizedArray<double> *in,
814  const unsigned int *offsets,
815  double *out)
816 {
817  const unsigned int n_chunks = n_entries/4;
818  // do not do full transpose because the code is too long and will most
819  // likely not pay off. rather do the transposition on the vectorized array
820  // on size smaller, mm256d
821  for (unsigned int outer=0; outer<8; outer += 4)
822  {
823  double *out0 = out + offsets[0+outer];
824  double *out1 = out + offsets[1+outer];
825  double *out2 = out + offsets[2+outer];
826  double *out3 = out + offsets[3+outer];
827  for (unsigned int i=0; i<n_chunks; ++i)
828  {
829  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
830  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
831  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
832  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
833  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
834  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
835  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
836  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
837  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
838  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
839  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
840  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
841 
842  // Cannot use the same store instructions in both paths of the 'if'
843  // because the compiler cannot know that there is no aliasing between
844  // pointers
845  if (add_into)
846  {
847  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
848  _mm256_storeu_pd(out0+4*i, res0);
849  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
850  _mm256_storeu_pd(out1+4*i, res1);
851  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
852  _mm256_storeu_pd(out2+4*i, res2);
853  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
854  _mm256_storeu_pd(out3+4*i, res3);
855  }
856  else
857  {
858  _mm256_storeu_pd(out0+4*i, res0);
859  _mm256_storeu_pd(out1+4*i, res1);
860  _mm256_storeu_pd(out2+4*i, res2);
861  _mm256_storeu_pd(out3+4*i, res3);
862  }
863  }
864  if (add_into)
865  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
866  for (unsigned int v=0; v<4; ++v)
867  out[offsets[v+outer]+i] += in[i][v+outer];
868  else
869  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
870  for (unsigned int v=0; v<4; ++v)
871  out[offsets[v+outer]+i] = in[i][v+outer];
872  }
873 }
874 
875 
876 
880 template <>
881 class VectorizedArray<float>
882 {
883 public:
887  static const unsigned int n_array_elements = 16;
888 
892  DEAL_II_ALWAYS_INLINE
894  operator = (const float x)
895  {
896  data = _mm512_set1_ps(x);
897  return *this;
898  }
899 
903  DEAL_II_ALWAYS_INLINE
904  float &
905  operator [] (const unsigned int comp)
906  {
907  AssertIndexRange (comp, 16);
908  return *(reinterpret_cast<float *>(&data)+comp);
909  }
910 
914  DEAL_II_ALWAYS_INLINE
915  const float &
916  operator [] (const unsigned int comp) const
917  {
918  AssertIndexRange (comp, 16);
919  return *(reinterpret_cast<const float *>(&data)+comp);
920  }
921 
925  DEAL_II_ALWAYS_INLINE
927  operator += (const VectorizedArray &vec)
928  {
929  // if the compiler supports vector arithmetics, we can simply use +=
930  // operator on the given data type. this allows the compiler to combine
931  // additions with multiplication (fused multiply-add) if those
932  // instructions are available. Otherwise, we need to use the built-in
933  // intrinsic command for __m512d
934 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
935  data += vec.data;
936 #else
937  data = _mm512_add_ps(data,vec.data);
938 #endif
939  return *this;
940  }
941 
945  DEAL_II_ALWAYS_INLINE
947  operator -= (const VectorizedArray &vec)
948  {
949 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
950  data -= vec.data;
951 #else
952  data = _mm512_sub_ps(data,vec.data);
953 #endif
954  return *this;
955  }
959  DEAL_II_ALWAYS_INLINE
961  operator *= (const VectorizedArray &vec)
962  {
963 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
964  data *= vec.data;
965 #else
966  data = _mm512_mul_ps(data,vec.data);
967 #endif
968  return *this;
969  }
970 
974  DEAL_II_ALWAYS_INLINE
976  operator /= (const VectorizedArray &vec)
977  {
978 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
979  data /= vec.data;
980 #else
981  data = _mm512_div_ps(data,vec.data);
982 #endif
983  return *this;
984  }
985 
991  DEAL_II_ALWAYS_INLINE
992  void load (const float *ptr)
993  {
994  data = _mm512_loadu_ps (ptr);
995  }
996 
1003  DEAL_II_ALWAYS_INLINE
1004  void store (float *ptr) const
1005  {
1006  _mm512_storeu_ps (ptr, data);
1007  }
1008 
1021  DEAL_II_ALWAYS_INLINE
1022  void gather (const float *base_ptr,
1023  const unsigned int *offsets)
1024  {
1025  // unfortunately, there does not appear to be a 512 bit integer load, so
1026  // do it by some reinterpret casts here. this is allowed because the Intel
1027  // API allows aliasing between different vector types.
1028  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1029  const __m512i index = *((__m512i *)(&index_val));
1030  data = _mm512_i32gather_ps(index, base_ptr, 4);
1031  }
1032 
1045  DEAL_II_ALWAYS_INLINE
1046  void scatter (const unsigned int *offsets,
1047  float *base_ptr) const
1048  {
1049  for (unsigned int i=0; i<16; ++i)
1050  for (unsigned int j=i+1; j<16; ++j)
1051  Assert(offsets[i] != offsets[j],
1052  ExcMessage("Result of scatter undefined if two offset elements"
1053  " point to the same position"));
1054 
1055  // unfortunately, there does not appear to be a 512 bit integer load, so
1056  // do it by some reinterpret casts here. this is allowed because the Intel
1057  // API allows aliasing between different vector types.
1058  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1059  const __m512i index = *((__m512i *)(&index_val));
1060  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1061  }
1062 
1067  __m512 data;
1068 
1069 private:
1070 
1075  DEAL_II_ALWAYS_INLINE
1077  get_sqrt () const
1078  {
1079  VectorizedArray res;
1080  res.data = _mm512_sqrt_ps(data);
1081  return res;
1082  }
1083 
1088  DEAL_II_ALWAYS_INLINE
1090  get_abs () const
1091  {
1092  // to compute the absolute value, perform bitwise andnot with -0. This
1093  // will leave all value and exponent bits unchanged but force the sign
1094  // value to +. Since there is no andnot for AVX512, we interpret the data
1095  // as 32 bit integers and do the andnot on those types (note that andnot
1096  // is a bitwise operation so the data type does not matter)
1097  __m512 mask = _mm512_set1_ps (-0.f);
1098  VectorizedArray res;
1099  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1100  return res;
1101  }
1102 
1107  DEAL_II_ALWAYS_INLINE
1109  get_max (const VectorizedArray &other) const
1110  {
1111  VectorizedArray res;
1112  res.data = _mm512_max_ps (data, other.data);
1113  return res;
1114  }
1115 
1120  DEAL_II_ALWAYS_INLINE
1122  get_min (const VectorizedArray &other) const
1123  {
1124  VectorizedArray res;
1125  res.data = _mm512_min_ps (data, other.data);
1126  return res;
1127  }
1128 
1132  template <typename Number2> friend VectorizedArray<Number2>
1133  std::sqrt (const VectorizedArray<Number2> &);
1134  template <typename Number2> friend VectorizedArray<Number2>
1135  std::abs (const VectorizedArray<Number2> &);
1136  template <typename Number2> friend VectorizedArray<Number2>
1137  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1138  template <typename Number2> friend VectorizedArray<Number2>
1139  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1140 };
1141 
1142 
1143 
1147 template <>
1148 inline
1149 void
1150 vectorized_load_and_transpose(const unsigned int n_entries,
1151  const float *in,
1152  const unsigned int *offsets,
1154 {
1155  const unsigned int n_chunks = n_entries/4;
1156  for (unsigned int outer = 0; outer<16; outer += 8)
1157  {
1158  for (unsigned int i=0; i<n_chunks; ++i)
1159  {
1160  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1161  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1162  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1163  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1164  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1165  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1166  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1167  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1168  // To avoid warnings about uninitialized variables, need to initialize
1169  // one variable with zero before using it.
1170  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1171  t0 = _mm256_insertf128_ps (t3, u0, 0);
1172  t0 = _mm256_insertf128_ps (t0, u4, 1);
1173  t1 = _mm256_insertf128_ps (t3, u1, 0);
1174  t1 = _mm256_insertf128_ps (t1, u5, 1);
1175  t2 = _mm256_insertf128_ps (t3, u2, 0);
1176  t2 = _mm256_insertf128_ps (t2, u6, 1);
1177  t3 = _mm256_insertf128_ps (t3, u3, 0);
1178  t3 = _mm256_insertf128_ps (t3, u7, 1);
1179  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1180  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1181  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1182  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1183  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1184  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1185  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1186  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1187  }
1188  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1189  for (unsigned int v=0; v<8; ++v)
1190  out[i][v+outer] = in[offsets[v+outer]+i];
1191  }
1192 }
1193 
1194 
1195 
1199 template <>
1200 inline
1201 void
1202 vectorized_transpose_and_store(const bool add_into,
1203  const unsigned int n_entries,
1204  const VectorizedArray<float> *in,
1205  const unsigned int *offsets,
1206  float *out)
1207 {
1208  const unsigned int n_chunks = n_entries/4;
1209  for (unsigned int outer = 0; outer<16; outer += 8)
1210  {
1211  for (unsigned int i=0; i<n_chunks; ++i)
1212  {
1213  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1214  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1215  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1216  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1217  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1218  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1219  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1220  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1221  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1222  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1223  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1224  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1225  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1226  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1227  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1228  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1229  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1230  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1231  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1232  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1233 
1234  // Cannot use the same store instructions in both paths of the 'if'
1235  // because the compiler cannot know that there is no aliasing between
1236  // pointers
1237  if (add_into)
1238  {
1239  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1240  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1241  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1242  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1243  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1244  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1245  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1246  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1247  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1248  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1249  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1250  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1251  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1252  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1253  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1254  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1255  }
1256  else
1257  {
1258  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1259  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1260  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1261  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1262  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1263  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1264  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1265  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1266  }
1267  }
1268  if (add_into)
1269  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1270  for (unsigned int v=0; v<8; ++v)
1271  out[offsets[v+outer]+i] += in[i][v+outer];
1272  else
1273  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1274  for (unsigned int v=0; v<8; ++v)
1275  out[offsets[v+outer]+i] = in[i][v+outer];
1276  }
1277 }
1278 
1279 
1280 
1281 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1282 
1286 template <>
1287 class VectorizedArray<double>
1288 {
1289 public:
1293  static const unsigned int n_array_elements = 4;
1294 
1298  DEAL_II_ALWAYS_INLINE
1299  VectorizedArray &
1300  operator = (const double x)
1301  {
1302  data = _mm256_set1_pd(x);
1303  return *this;
1304  }
1305 
1309  DEAL_II_ALWAYS_INLINE
1310  double &
1311  operator [] (const unsigned int comp)
1312  {
1313  AssertIndexRange (comp, 4);
1314  return *(reinterpret_cast<double *>(&data)+comp);
1315  }
1316 
1320  DEAL_II_ALWAYS_INLINE
1321  const double &
1322  operator [] (const unsigned int comp) const
1323  {
1324  AssertIndexRange (comp, 4);
1325  return *(reinterpret_cast<const double *>(&data)+comp);
1326  }
1327 
1331  DEAL_II_ALWAYS_INLINE
1332  VectorizedArray &
1333  operator += (const VectorizedArray &vec)
1334  {
1335  // if the compiler supports vector arithmetics, we can simply use +=
1336  // operator on the given data type. this allows the compiler to combine
1337  // additions with multiplication (fused multiply-add) if those
1338  // instructions are available. Otherwise, we need to use the built-in
1339  // intrinsic command for __m256d
1340 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1341  data += vec.data;
1342 #else
1343  data = _mm256_add_pd(data,vec.data);
1344 #endif
1345  return *this;
1346  }
1347 
1351  DEAL_II_ALWAYS_INLINE
1352  VectorizedArray &
1353  operator -= (const VectorizedArray &vec)
1354  {
1355 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1356  data -= vec.data;
1357 #else
1358  data = _mm256_sub_pd(data,vec.data);
1359 #endif
1360  return *this;
1361  }
1365  DEAL_II_ALWAYS_INLINE
1366  VectorizedArray &
1367  operator *= (const VectorizedArray &vec)
1368  {
1369 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1370  data *= vec.data;
1371 #else
1372  data = _mm256_mul_pd(data,vec.data);
1373 #endif
1374  return *this;
1375  }
1376 
1380  DEAL_II_ALWAYS_INLINE
1381  VectorizedArray &
1382  operator /= (const VectorizedArray &vec)
1383  {
1384 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1385  data /= vec.data;
1386 #else
1387  data = _mm256_div_pd(data,vec.data);
1388 #endif
1389  return *this;
1390  }
1391 
1397  DEAL_II_ALWAYS_INLINE
1398  void load (const double *ptr)
1399  {
1400  data = _mm256_loadu_pd (ptr);
1401  }
1402 
1409  DEAL_II_ALWAYS_INLINE
1410  void store (double *ptr) const
1411  {
1412  _mm256_storeu_pd (ptr, data);
1413  }
1414 
1427  DEAL_II_ALWAYS_INLINE
1428  void gather (const double *base_ptr,
1429  const unsigned int *offsets)
1430  {
1431 #ifdef __AVX2__
1432  // unfortunately, there does not appear to be a 128 bit integer load, so
1433  // do it by some reinterpret casts here. this is allowed because the Intel
1434  // API allows aliasing between different vector types.
1435  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1436  const __m128i index = *((__m128i *)(&index_val));
1437  data = _mm256_i32gather_pd(base_ptr, index, 8);
1438 #else
1439  for (unsigned int i=0; i<4; ++i)
1440  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1441 #endif
1442  }
1443 
1456  DEAL_II_ALWAYS_INLINE
1457  void scatter (const unsigned int *offsets,
1458  double *base_ptr) const
1459  {
1460  // no scatter operation in AVX/AVX2
1461  for (unsigned int i=0; i<4; ++i)
1462  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1463  }
1464 
1469  __m256d data;
1470 
1471 private:
1476  DEAL_II_ALWAYS_INLINE
1478  get_sqrt () const
1479  {
1480  VectorizedArray res;
1481  res.data = _mm256_sqrt_pd(data);
1482  return res;
1483  }
1484 
1489  DEAL_II_ALWAYS_INLINE
1491  get_abs () const
1492  {
1493  // to compute the absolute value, perform bitwise andnot with -0. This
1494  // will leave all value and exponent bits unchanged but force the sign
1495  // value to +.
1496  __m256d mask = _mm256_set1_pd (-0.);
1497  VectorizedArray res;
1498  res.data = _mm256_andnot_pd(mask, data);
1499  return res;
1500  }
1501 
1506  DEAL_II_ALWAYS_INLINE
1508  get_max (const VectorizedArray &other) const
1509  {
1510  VectorizedArray res;
1511  res.data = _mm256_max_pd (data, other.data);
1512  return res;
1513  }
1514 
1519  DEAL_II_ALWAYS_INLINE
1521  get_min (const VectorizedArray &other) const
1522  {
1523  VectorizedArray res;
1524  res.data = _mm256_min_pd (data, other.data);
1525  return res;
1526  }
1527 
1531  template <typename Number2> friend VectorizedArray<Number2>
1532  std::sqrt (const VectorizedArray<Number2> &);
1533  template <typename Number2> friend VectorizedArray<Number2>
1534  std::abs (const VectorizedArray<Number2> &);
1535  template <typename Number2> friend VectorizedArray<Number2>
1536  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1537  template <typename Number2> friend VectorizedArray<Number2>
1538  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1539 };
1540 
1541 
1542 
1546 template <>
1547 inline
1548 void
1549 vectorized_load_and_transpose(const unsigned int n_entries,
1550  const double *in,
1551  const unsigned int *offsets,
1553 {
1554  const unsigned int n_chunks = n_entries/4;
1555  const double *in0 = in + offsets[0];
1556  const double *in1 = in + offsets[1];
1557  const double *in2 = in + offsets[2];
1558  const double *in3 = in + offsets[3];
1559 
1560  for (unsigned int i=0; i<n_chunks; ++i)
1561  {
1562  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1563  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1564  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1565  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1566  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1567  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1568  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1569  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1570  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1571  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1572  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1573  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1574  }
1575  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1576  for (unsigned int v=0; v<4; ++v)
1577  out[i][v] = in[offsets[v]+i];
1578 }
1579 
1580 
1581 
1585 template <>
1586 inline
1587 void
1588 vectorized_transpose_and_store(const bool add_into,
1589  const unsigned int n_entries,
1590  const VectorizedArray<double> *in,
1591  const unsigned int *offsets,
1592  double *out)
1593 {
1594  const unsigned int n_chunks = n_entries/4;
1595  double *out0 = out + offsets[0];
1596  double *out1 = out + offsets[1];
1597  double *out2 = out + offsets[2];
1598  double *out3 = out + offsets[3];
1599  for (unsigned int i=0; i<n_chunks; ++i)
1600  {
1601  __m256d u0 = in[4*i+0].data;
1602  __m256d u1 = in[4*i+1].data;
1603  __m256d u2 = in[4*i+2].data;
1604  __m256d u3 = in[4*i+3].data;
1605  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1606  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1607  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1608  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1609  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1610  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1611  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1612  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1613 
1614  // Cannot use the same store instructions in both paths of the 'if'
1615  // because the compiler cannot know that there is no aliasing between
1616  // pointers
1617  if (add_into)
1618  {
1619  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1620  _mm256_storeu_pd(out0+4*i, res0);
1621  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1622  _mm256_storeu_pd(out1+4*i, res1);
1623  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1624  _mm256_storeu_pd(out2+4*i, res2);
1625  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1626  _mm256_storeu_pd(out3+4*i, res3);
1627  }
1628  else
1629  {
1630  _mm256_storeu_pd(out0+4*i, res0);
1631  _mm256_storeu_pd(out1+4*i, res1);
1632  _mm256_storeu_pd(out2+4*i, res2);
1633  _mm256_storeu_pd(out3+4*i, res3);
1634  }
1635  }
1636  if (add_into)
1637  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1638  for (unsigned int v=0; v<4; ++v)
1639  out[offsets[v]+i] += in[i][v];
1640  else
1641  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1642  for (unsigned int v=0; v<4; ++v)
1643  out[offsets[v]+i] = in[i][v];
1644 }
1645 
1646 
1647 
1651 template <>
1652 class VectorizedArray<float>
1653 {
1654 public:
1658  static const unsigned int n_array_elements = 8;
1659 
1663  DEAL_II_ALWAYS_INLINE
1664  VectorizedArray &
1665  operator = (const float x)
1666  {
1667  data = _mm256_set1_ps(x);
1668  return *this;
1669  }
1670 
1674  DEAL_II_ALWAYS_INLINE
1675  float &
1676  operator [] (const unsigned int comp)
1677  {
1678  AssertIndexRange (comp, 8);
1679  return *(reinterpret_cast<float *>(&data)+comp);
1680  }
1681 
1685  DEAL_II_ALWAYS_INLINE
1686  const float &
1687  operator [] (const unsigned int comp) const
1688  {
1689  AssertIndexRange (comp, 8);
1690  return *(reinterpret_cast<const float *>(&data)+comp);
1691  }
1692 
1696  DEAL_II_ALWAYS_INLINE
1697  VectorizedArray &
1698  operator += (const VectorizedArray &vec)
1699  {
1700  // if the compiler supports vector arithmetics, we can simply use +=
1701  // operator on the given data type. this allows the compiler to combine
1702  // additions with multiplication (fused multiply-add) if those
1703  // instructions are available. Otherwise, we need to use the built-in
1704  // intrinsic command for __m256d
1705 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1706  data += vec.data;
1707 #else
1708  data = _mm256_add_ps(data,vec.data);
1709 #endif
1710  return *this;
1711  }
1712 
1716  DEAL_II_ALWAYS_INLINE
1717  VectorizedArray &
1718  operator -= (const VectorizedArray &vec)
1719  {
1720 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1721  data -= vec.data;
1722 #else
1723  data = _mm256_sub_ps(data,vec.data);
1724 #endif
1725  return *this;
1726  }
1730  DEAL_II_ALWAYS_INLINE
1731  VectorizedArray &
1732  operator *= (const VectorizedArray &vec)
1733  {
1734 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1735  data *= vec.data;
1736 #else
1737  data = _mm256_mul_ps(data,vec.data);
1738 #endif
1739  return *this;
1740  }
1741 
1745  DEAL_II_ALWAYS_INLINE
1746  VectorizedArray &
1747  operator /= (const VectorizedArray &vec)
1748  {
1749 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1750  data /= vec.data;
1751 #else
1752  data = _mm256_div_ps(data,vec.data);
1753 #endif
1754  return *this;
1755  }
1756 
1762  DEAL_II_ALWAYS_INLINE
1763  void load (const float *ptr)
1764  {
1765  data = _mm256_loadu_ps (ptr);
1766  }
1767 
1774  DEAL_II_ALWAYS_INLINE
1775  void store (float *ptr) const
1776  {
1777  _mm256_storeu_ps (ptr, data);
1778  }
1779 
1792  DEAL_II_ALWAYS_INLINE
1793  void gather (const float *base_ptr,
1794  const unsigned int *offsets)
1795  {
1796 #ifdef __AVX2__
1797  // unfortunately, there does not appear to be a 256 bit integer load, so
1798  // do it by some reinterpret casts here. this is allowed because the Intel
1799  // API allows aliasing between different vector types.
1800  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1801  const __m256i index = *((__m256i *)(&index_val));
1802  data = _mm256_i32gather_ps(base_ptr, index, 4);
1803 #else
1804  for (unsigned int i=0; i<8; ++i)
1805  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1806 #endif
1807  }
1808 
1821  DEAL_II_ALWAYS_INLINE
1822  void scatter (const unsigned int *offsets,
1823  float *base_ptr) const
1824  {
1825  // no scatter operation in AVX/AVX2
1826  for (unsigned int i=0; i<8; ++i)
1827  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1828  }
1829 
1834  __m256 data;
1835 
1836 private:
1837 
1842  DEAL_II_ALWAYS_INLINE
1844  get_sqrt () const
1845  {
1846  VectorizedArray res;
1847  res.data = _mm256_sqrt_ps(data);
1848  return res;
1849  }
1850 
1855  DEAL_II_ALWAYS_INLINE
1857  get_abs () const
1858  {
1859  // to compute the absolute value, perform bitwise andnot with -0. This
1860  // will leave all value and exponent bits unchanged but force the sign
1861  // value to +.
1862  __m256 mask = _mm256_set1_ps (-0.f);
1863  VectorizedArray res;
1864  res.data = _mm256_andnot_ps(mask, data);
1865  return res;
1866  }
1867 
1872  DEAL_II_ALWAYS_INLINE
1874  get_max (const VectorizedArray &other) const
1875  {
1876  VectorizedArray res;
1877  res.data = _mm256_max_ps (data, other.data);
1878  return res;
1879  }
1880 
1885  DEAL_II_ALWAYS_INLINE
1887  get_min (const VectorizedArray &other) const
1888  {
1889  VectorizedArray res;
1890  res.data = _mm256_min_ps (data, other.data);
1891  return res;
1892  }
1893 
1897  template <typename Number2> friend VectorizedArray<Number2>
1898  std::sqrt (const VectorizedArray<Number2> &);
1899  template <typename Number2> friend VectorizedArray<Number2>
1900  std::abs (const VectorizedArray<Number2> &);
1901  template <typename Number2> friend VectorizedArray<Number2>
1902  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1903  template <typename Number2> friend VectorizedArray<Number2>
1904  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1905 };
1906 
1907 
1908 
1912 template <>
1913 inline
1914 void
1915 vectorized_load_and_transpose(const unsigned int n_entries,
1916  const float *in,
1917  const unsigned int *offsets,
1919 {
1920  const unsigned int n_chunks = n_entries/4;
1921  for (unsigned int i=0; i<n_chunks; ++i)
1922  {
1923  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1924  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1925  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1926  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1927  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1928  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1929  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1930  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1931  // To avoid warnings about uninitialized variables, need to initialize
1932  // one variable with zero before using it.
1933  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1934  t0 = _mm256_insertf128_ps (t3, u0, 0);
1935  t0 = _mm256_insertf128_ps (t0, u4, 1);
1936  t1 = _mm256_insertf128_ps (t3, u1, 0);
1937  t1 = _mm256_insertf128_ps (t1, u5, 1);
1938  t2 = _mm256_insertf128_ps (t3, u2, 0);
1939  t2 = _mm256_insertf128_ps (t2, u6, 1);
1940  t3 = _mm256_insertf128_ps (t3, u3, 0);
1941  t3 = _mm256_insertf128_ps (t3, u7, 1);
1942  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1943  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1944  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1945  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1946  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
1947  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
1948  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
1949  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
1950  }
1951  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1952  for (unsigned int v=0; v<8; ++v)
1953  out[i][v] = in[offsets[v]+i];
1954 }
1955 
1956 
1957 
1961 template <>
1962 inline
1963 void
1964 vectorized_transpose_and_store(const bool add_into,
1965  const unsigned int n_entries,
1966  const VectorizedArray<float> *in,
1967  const unsigned int *offsets,
1968  float *out)
1969 {
1970  const unsigned int n_chunks = n_entries/4;
1971  for (unsigned int i=0; i<n_chunks; ++i)
1972  {
1973  __m256 u0 = in[4*i+0].data;
1974  __m256 u1 = in[4*i+1].data;
1975  __m256 u2 = in[4*i+2].data;
1976  __m256 u3 = in[4*i+3].data;
1977  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1978  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1979  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1980  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1981  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1982  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1983  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1984  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1985  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1986  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1987  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1988  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1989  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1990  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1991  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1992  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1993 
1994  // Cannot use the same store instructions in both paths of the 'if'
1995  // because the compiler cannot know that there is no aliasing between
1996  // pointers
1997  if (add_into)
1998  {
1999  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
2000  _mm_storeu_ps(out+4*i+offsets[0], res0);
2001  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
2002  _mm_storeu_ps(out+4*i+offsets[1], res1);
2003  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
2004  _mm_storeu_ps(out+4*i+offsets[2], res2);
2005  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
2006  _mm_storeu_ps(out+4*i+offsets[3], res3);
2007  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
2008  _mm_storeu_ps(out+4*i+offsets[4], res4);
2009  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2010  _mm_storeu_ps(out+4*i+offsets[5], res5);
2011  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2012  _mm_storeu_ps(out+4*i+offsets[6], res6);
2013  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2014  _mm_storeu_ps(out+4*i+offsets[7], res7);
2015  }
2016  else
2017  {
2018  _mm_storeu_ps(out+4*i+offsets[0], res0);
2019  _mm_storeu_ps(out+4*i+offsets[1], res1);
2020  _mm_storeu_ps(out+4*i+offsets[2], res2);
2021  _mm_storeu_ps(out+4*i+offsets[3], res3);
2022  _mm_storeu_ps(out+4*i+offsets[4], res4);
2023  _mm_storeu_ps(out+4*i+offsets[5], res5);
2024  _mm_storeu_ps(out+4*i+offsets[6], res6);
2025  _mm_storeu_ps(out+4*i+offsets[7], res7);
2026  }
2027  }
2028  if (add_into)
2029  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2030  for (unsigned int v=0; v<8; ++v)
2031  out[offsets[v]+i] += in[i][v];
2032  else
2033  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2034  for (unsigned int v=0; v<8; ++v)
2035  out[offsets[v]+i] = in[i][v];
2036 }
2037 
2038 
2039 
2040 // for safety, also check that __SSE2__ is defined in case the user manually
2041 // set some conflicting compile flags which prevent compilation
2042 
2043 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2044 
2048 template <>
2049 class VectorizedArray<double>
2050 {
2051 public:
2055  static const unsigned int n_array_elements = 2;
2056 
2060  DEAL_II_ALWAYS_INLINE
2061  VectorizedArray &
2062  operator = (const double x)
2063  {
2064  data = _mm_set1_pd(x);
2065  return *this;
2066  }
2067 
2071  DEAL_II_ALWAYS_INLINE
2072  double &
2073  operator [] (const unsigned int comp)
2074  {
2075  AssertIndexRange (comp, 2);
2076  return *(reinterpret_cast<double *>(&data)+comp);
2077  }
2078 
2082  DEAL_II_ALWAYS_INLINE
2083  const double &
2084  operator [] (const unsigned int comp) const
2085  {
2086  AssertIndexRange (comp, 2);
2087  return *(reinterpret_cast<const double *>(&data)+comp);
2088  }
2089 
2093  DEAL_II_ALWAYS_INLINE
2094  VectorizedArray &
2095  operator += (const VectorizedArray &vec)
2096  {
2097 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2098  data += vec.data;
2099 #else
2100  data = _mm_add_pd(data,vec.data);
2101 #endif
2102  return *this;
2103  }
2104 
2108  DEAL_II_ALWAYS_INLINE
2109  VectorizedArray &
2110  operator -= (const VectorizedArray &vec)
2111  {
2112 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2113  data -= vec.data;
2114 #else
2115  data = _mm_sub_pd(data,vec.data);
2116 #endif
2117  return *this;
2118  }
2119 
2123  DEAL_II_ALWAYS_INLINE
2124  VectorizedArray &
2125  operator *= (const VectorizedArray &vec)
2126  {
2127 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2128  data *= vec.data;
2129 #else
2130  data = _mm_mul_pd(data,vec.data);
2131 #endif
2132  return *this;
2133  }
2134 
2138  DEAL_II_ALWAYS_INLINE
2139  VectorizedArray &
2140  operator /= (const VectorizedArray &vec)
2141  {
2142 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2143  data /= vec.data;
2144 #else
2145  data = _mm_div_pd(data,vec.data);
2146 #endif
2147  return *this;
2148  }
2149 
2155  DEAL_II_ALWAYS_INLINE
2156  void load (const double *ptr)
2157  {
2158  data = _mm_loadu_pd (ptr);
2159  }
2160 
2167  DEAL_II_ALWAYS_INLINE
2168  void store (double *ptr) const
2169  {
2170  _mm_storeu_pd (ptr, data);
2171  }
2172 
2185  DEAL_II_ALWAYS_INLINE
2186  void gather (const double *base_ptr,
2187  const unsigned int *offsets)
2188  {
2189  for (unsigned int i=0; i<2; ++i)
2190  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2191  }
2192 
2205  DEAL_II_ALWAYS_INLINE
2206  void scatter (const unsigned int *offsets,
2207  double *base_ptr) const
2208  {
2209  for (unsigned int i=0; i<2; ++i)
2210  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2211  }
2212 
2217  __m128d data;
2218 
2219 private:
2224  DEAL_II_ALWAYS_INLINE
2226  get_sqrt () const
2227  {
2228  VectorizedArray res;
2229  res.data = _mm_sqrt_pd(data);
2230  return res;
2231  }
2232 
2237  DEAL_II_ALWAYS_INLINE
2239  get_abs () const
2240  {
2241  // to compute the absolute value, perform
2242  // bitwise andnot with -0. This will leave all
2243  // value and exponent bits unchanged but force
2244  // the sign value to +.
2245  __m128d mask = _mm_set1_pd (-0.);
2246  VectorizedArray res;
2247  res.data = _mm_andnot_pd(mask, data);
2248  return res;
2249  }
2250 
2255  DEAL_II_ALWAYS_INLINE
2257  get_max (const VectorizedArray &other) const
2258  {
2259  VectorizedArray res;
2260  res.data = _mm_max_pd (data, other.data);
2261  return res;
2262  }
2263 
2268  DEAL_II_ALWAYS_INLINE
2270  get_min (const VectorizedArray &other) const
2271  {
2272  VectorizedArray res;
2273  res.data = _mm_min_pd (data, other.data);
2274  return res;
2275  }
2276 
2280  template <typename Number2> friend VectorizedArray<Number2>
2281  std::sqrt (const VectorizedArray<Number2> &);
2282  template <typename Number2> friend VectorizedArray<Number2>
2283  std::abs (const VectorizedArray<Number2> &);
2284  template <typename Number2> friend VectorizedArray<Number2>
2285  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2286  template <typename Number2> friend VectorizedArray<Number2>
2287  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2288 };
2289 
2290 
2291 
2295 template <>
2296 inline
2297 void vectorized_load_and_transpose(const unsigned int n_entries,
2298  const double *in,
2299  const unsigned int *offsets,
2301 {
2302  const unsigned int n_chunks = n_entries/2;
2303  for (unsigned int i=0; i<n_chunks; ++i)
2304  {
2305  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2306  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2307  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2308  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2309  }
2310  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2311  for (unsigned int v=0; v<2; ++v)
2312  out[i][v] = in[offsets[v]+i];
2313 }
2314 
2315 
2316 
2320 template <>
2321 inline
2322 void
2323 vectorized_transpose_and_store(const bool add_into,
2324  const unsigned int n_entries,
2325  const VectorizedArray<double> *in,
2326  const unsigned int *offsets,
2327  double *out)
2328 {
2329  const unsigned int n_chunks = n_entries/2;
2330  if (add_into)
2331  {
2332  for (unsigned int i=0; i<n_chunks; ++i)
2333  {
2334  __m128d u0 = in[2*i+0].data;
2335  __m128d u1 = in[2*i+1].data;
2336  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2337  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2338  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2339  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2340  }
2341  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2342  for (unsigned int v=0; v<2; ++v)
2343  out[offsets[v]+i] += in[i][v];
2344  }
2345  else
2346  {
2347  for (unsigned int i=0; i<n_chunks; ++i)
2348  {
2349  __m128d u0 = in[2*i+0].data;
2350  __m128d u1 = in[2*i+1].data;
2351  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2352  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2353  _mm_storeu_pd(out+2*i+offsets[0], res0);
2354  _mm_storeu_pd(out+2*i+offsets[1], res1);
2355  }
2356  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2357  for (unsigned int v=0; v<2; ++v)
2358  out[offsets[v]+i] = in[i][v];
2359  }
2360 }
2361 
2362 
2363 
2367 template <>
2368 class VectorizedArray<float>
2369 {
2370 public:
2374  static const unsigned int n_array_elements = 4;
2375 
2380  DEAL_II_ALWAYS_INLINE
2381  VectorizedArray &
2382  operator = (const float x)
2383  {
2384  data = _mm_set1_ps(x);
2385  return *this;
2386  }
2387 
2391  DEAL_II_ALWAYS_INLINE
2392  float &
2393  operator [] (const unsigned int comp)
2394  {
2395  AssertIndexRange (comp, 4);
2396  return *(reinterpret_cast<float *>(&data)+comp);
2397  }
2398 
2402  DEAL_II_ALWAYS_INLINE
2403  const float &
2404  operator [] (const unsigned int comp) const
2405  {
2406  AssertIndexRange (comp, 4);
2407  return *(reinterpret_cast<const float *>(&data)+comp);
2408  }
2409 
2413  DEAL_II_ALWAYS_INLINE
2414  VectorizedArray &
2415  operator += (const VectorizedArray &vec)
2416  {
2417 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2418  data += vec.data;
2419 #else
2420  data = _mm_add_ps(data,vec.data);
2421 #endif
2422  return *this;
2423  }
2424 
2428  DEAL_II_ALWAYS_INLINE
2429  VectorizedArray &
2430  operator -= (const VectorizedArray &vec)
2431  {
2432 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2433  data -= vec.data;
2434 #else
2435  data = _mm_sub_ps(data,vec.data);
2436 #endif
2437  return *this;
2438  }
2439 
2443  DEAL_II_ALWAYS_INLINE
2444  VectorizedArray &
2445  operator *= (const VectorizedArray &vec)
2446  {
2447 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2448  data *= vec.data;
2449 #else
2450  data = _mm_mul_ps(data,vec.data);
2451 #endif
2452  return *this;
2453  }
2454 
2458  DEAL_II_ALWAYS_INLINE
2459  VectorizedArray &
2460  operator /= (const VectorizedArray &vec)
2461  {
2462 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2463  data /= vec.data;
2464 #else
2465  data = _mm_div_ps(data,vec.data);
2466 #endif
2467  return *this;
2468  }
2469 
2475  DEAL_II_ALWAYS_INLINE
2476  void load (const float *ptr)
2477  {
2478  data = _mm_loadu_ps (ptr);
2479  }
2480 
2487  DEAL_II_ALWAYS_INLINE
2488  void store (float *ptr) const
2489  {
2490  _mm_storeu_ps (ptr, data);
2491  }
2492 
2505  DEAL_II_ALWAYS_INLINE
2506  void gather (const float *base_ptr,
2507  const unsigned int *offsets)
2508  {
2509  for (unsigned int i=0; i<4; ++i)
2510  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2511  }
2512 
2525  DEAL_II_ALWAYS_INLINE
2526  void scatter (const unsigned int *offsets,
2527  float *base_ptr) const
2528  {
2529  for (unsigned int i=0; i<4; ++i)
2530  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2531  }
2532 
2537  __m128 data;
2538 
2539 private:
2544  DEAL_II_ALWAYS_INLINE
2546  get_sqrt () const
2547  {
2548  VectorizedArray res;
2549  res.data = _mm_sqrt_ps(data);
2550  return res;
2551  }
2552 
2557  DEAL_II_ALWAYS_INLINE
2559  get_abs () const
2560  {
2561  // to compute the absolute value, perform bitwise andnot with -0. This
2562  // will leave all value and exponent bits unchanged but force the sign
2563  // value to +.
2564  __m128 mask = _mm_set1_ps (-0.f);
2565  VectorizedArray res;
2566  res.data = _mm_andnot_ps(mask, data);
2567  return res;
2568  }
2569 
2574  DEAL_II_ALWAYS_INLINE
2576  get_max (const VectorizedArray &other) const
2577  {
2578  VectorizedArray res;
2579  res.data = _mm_max_ps (data, other.data);
2580  return res;
2581  }
2582 
2587  DEAL_II_ALWAYS_INLINE
2589  get_min (const VectorizedArray &other) const
2590  {
2591  VectorizedArray res;
2592  res.data = _mm_min_ps (data, other.data);
2593  return res;
2594  }
2595 
2599  template <typename Number2> friend VectorizedArray<Number2>
2600  std::sqrt (const VectorizedArray<Number2> &);
2601  template <typename Number2> friend VectorizedArray<Number2>
2602  std::abs (const VectorizedArray<Number2> &);
2603  template <typename Number2> friend VectorizedArray<Number2>
2604  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2605  template <typename Number2> friend VectorizedArray<Number2>
2606  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2607 };
2608 
2609 
2610 
2614 template <>
2615 inline
2616 void vectorized_load_and_transpose(const unsigned int n_entries,
2617  const float *in,
2618  const unsigned int *offsets,
2620 {
2621  const unsigned int n_chunks = n_entries/4;
2622  for (unsigned int i=0; i<n_chunks; ++i)
2623  {
2624  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2625  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2626  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2627  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2628  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2629  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2630  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2631  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2632  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2633  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2634  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2635  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2636  }
2637  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2638  for (unsigned int v=0; v<4; ++v)
2639  out[i][v] = in[offsets[v]+i];
2640 }
2641 
2642 
2643 
2647 template <>
2648 inline
2649 void
2650 vectorized_transpose_and_store(const bool add_into,
2651  const unsigned int n_entries,
2652  const VectorizedArray<float> *in,
2653  const unsigned int *offsets,
2654  float *out)
2655 {
2656  const unsigned int n_chunks = n_entries/4;
2657  for (unsigned int i=0; i<n_chunks; ++i)
2658  {
2659  __m128 u0 = in[4*i+0].data;
2660  __m128 u1 = in[4*i+1].data;
2661  __m128 u2 = in[4*i+2].data;
2662  __m128 u3 = in[4*i+3].data;
2663  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2664  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2665  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2666  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2667  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2668  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2669  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2670  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2671 
2672  // Cannot use the same store instructions in both paths of the 'if'
2673  // because the compiler cannot know that there is no aliasing between
2674  // pointers
2675  if (add_into)
2676  {
2677  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2678  _mm_storeu_ps(out+4*i+offsets[0], u0);
2679  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2680  _mm_storeu_ps(out+4*i+offsets[1], u1);
2681  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2682  _mm_storeu_ps(out+4*i+offsets[2], u2);
2683  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2684  _mm_storeu_ps(out+4*i+offsets[3], u3);
2685  }
2686  else
2687  {
2688  _mm_storeu_ps(out+4*i+offsets[0], u0);
2689  _mm_storeu_ps(out+4*i+offsets[1], u1);
2690  _mm_storeu_ps(out+4*i+offsets[2], u2);
2691  _mm_storeu_ps(out+4*i+offsets[3], u3);
2692  }
2693  }
2694  if (add_into)
2695  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2696  for (unsigned int v=0; v<4; ++v)
2697  out[offsets[v]+i] += in[i][v];
2698  else
2699  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2700  for (unsigned int v=0; v<4; ++v)
2701  out[offsets[v]+i] = in[i][v];
2702 }
2703 
2704 
2705 
2706 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2707 
2708 
2714 template <typename Number>
2715 inline DEAL_II_ALWAYS_INLINE
2718  const VectorizedArray<Number> &v)
2719 {
2720  VectorizedArray<Number> tmp = u;
2721  return tmp+=v;
2722 }
2723 
2729 template <typename Number>
2730 inline DEAL_II_ALWAYS_INLINE
2733  const VectorizedArray<Number> &v)
2734 {
2735  VectorizedArray<Number> tmp = u;
2736  return tmp-=v;
2737 }
2738 
2744 template <typename Number>
2745 inline DEAL_II_ALWAYS_INLINE
2748  const VectorizedArray<Number> &v)
2749 {
2750  VectorizedArray<Number> tmp = u;
2751  return tmp*=v;
2752 }
2753 
2759 template <typename Number>
2760 inline DEAL_II_ALWAYS_INLINE
2763  const VectorizedArray<Number> &v)
2764 {
2765  VectorizedArray<Number> tmp = u;
2766  return tmp/=v;
2767 }
2768 
2775 template <typename Number>
2776 inline DEAL_II_ALWAYS_INLINE
2778 operator + (const Number &u,
2779  const VectorizedArray<Number> &v)
2780 {
2782  tmp = u;
2783  return tmp+=v;
2784 }
2785 
2794 inline DEAL_II_ALWAYS_INLINE
2796 operator + (const double &u,
2797  const VectorizedArray<float> &v)
2798 {
2800  tmp = u;
2801  return tmp+=v;
2802 }
2803 
2810 template <typename Number>
2811 inline DEAL_II_ALWAYS_INLINE
2814  const Number &u)
2815 {
2816  return u + v;
2817 }
2818 
2827 inline DEAL_II_ALWAYS_INLINE
2830  const double &u)
2831 {
2832  return u + v;
2833 }
2834 
2841 template <typename Number>
2842 inline DEAL_II_ALWAYS_INLINE
2844 operator - (const Number &u,
2845  const VectorizedArray<Number> &v)
2846 {
2848  tmp = u;
2849  return tmp-=v;
2850 }
2851 
2860 inline DEAL_II_ALWAYS_INLINE
2862 operator - (const double &u,
2863  const VectorizedArray<float> &v)
2864 {
2866  tmp = float(u);
2867  return tmp-=v;
2868 }
2869 
2876 template <typename Number>
2877 inline DEAL_II_ALWAYS_INLINE
2880  const Number &u)
2881 {
2883  tmp = u;
2884  return v-tmp;
2885 }
2886 
2895 inline DEAL_II_ALWAYS_INLINE
2898  const double &u)
2899 {
2901  tmp = float(u);
2902  return v-tmp;
2903 }
2904 
2911 template <typename Number>
2912 inline DEAL_II_ALWAYS_INLINE
2914 operator * (const Number &u,
2915  const VectorizedArray<Number> &v)
2916 {
2918  tmp = u;
2919  return tmp*=v;
2920 }
2921 
2930 inline DEAL_II_ALWAYS_INLINE
2932 operator * (const double &u,
2933  const VectorizedArray<float> &v)
2934 {
2936  tmp = float(u);
2937  return tmp*=v;
2938 }
2939 
2946 template <typename Number>
2947 inline DEAL_II_ALWAYS_INLINE
2950  const Number &u)
2951 {
2952  return u * v;
2953 }
2954 
2963 inline DEAL_II_ALWAYS_INLINE
2966  const double &u)
2967 {
2968  return u * v;
2969 }
2970 
2977 template <typename Number>
2978 inline DEAL_II_ALWAYS_INLINE
2980 operator / (const Number &u,
2981  const VectorizedArray<Number> &v)
2982 {
2984  tmp = u;
2985  return tmp/=v;
2986 }
2987 
2996 inline DEAL_II_ALWAYS_INLINE
2998 operator / (const double &u,
2999  const VectorizedArray<float> &v)
3000 {
3002  tmp = float(u);
3003  return tmp/=v;
3004 }
3005 
3012 template <typename Number>
3013 inline DEAL_II_ALWAYS_INLINE
3016  const Number &u)
3017 {
3019  tmp = u;
3020  return v/tmp;
3021 }
3022 
3031 inline DEAL_II_ALWAYS_INLINE
3034  const double &u)
3035 {
3037  tmp = float(u);
3038  return v/tmp;
3039 }
3040 
3046 template <typename Number>
3047 inline DEAL_II_ALWAYS_INLINE
3050 {
3051  return u;
3052 }
3053 
3059 template <typename Number>
3060 inline DEAL_II_ALWAYS_INLINE
3063 {
3064  // to get a negative sign, subtract the input from zero (could also
3065  // multiply by -1, but this one is slightly simpler)
3066  return VectorizedArray<Number>()-u;
3067 }
3068 
3069 
3070 DEAL_II_NAMESPACE_CLOSE
3071 
3072 
3079 namespace std
3080 {
3088  template <typename Number>
3089  inline
3090  ::VectorizedArray<Number>
3091  sin (const ::VectorizedArray<Number> &x)
3092  {
3093  // put values in an array and later read in that array with an unaligned
3094  // read. This should save some instructions as compared to directly
3095  // setting the individual elements and also circumvents a compiler
3096  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3097  // from April 2014, topic "matrix_free/step-48 Test").
3099  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3100  values[i] = std::sin(x[i]);
3102  out.load(&values[0]);
3103  return out;
3104  }
3105 
3106 
3107 
3115  template <typename Number>
3116  inline
3117  ::VectorizedArray<Number>
3118  cos (const ::VectorizedArray<Number> &x)
3119  {
3121  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3122  values[i] = std::cos(x[i]);
3124  out.load(&values[0]);
3125  return out;
3126  }
3127 
3128 
3129 
3137  template <typename Number>
3138  inline
3139  ::VectorizedArray<Number>
3140  tan (const ::VectorizedArray<Number> &x)
3141  {
3143  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3144  values[i] = std::tan(x[i]);
3146  out.load(&values[0]);
3147  return out;
3148  }
3149 
3150 
3151 
3159  template <typename Number>
3160  inline
3161  ::VectorizedArray<Number>
3162  exp (const ::VectorizedArray<Number> &x)
3163  {
3165  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3166  values[i] = std::exp(x[i]);
3168  out.load(&values[0]);
3169  return out;
3170  }
3171 
3172 
3173 
3181  template <typename Number>
3182  inline
3183  ::VectorizedArray<Number>
3184  log (const ::VectorizedArray<Number> &x)
3185  {
3187  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3188  values[i] = std::log(x[i]);
3190  out.load(&values[0]);
3191  return out;
3192  }
3193 
3194 
3195 
3203  template <typename Number>
3204  inline
3205  ::VectorizedArray<Number>
3206  sqrt (const ::VectorizedArray<Number> &x)
3207  {
3208  return x.get_sqrt();
3209  }
3210 
3211 
3212 
3220  template <typename Number>
3221  inline
3222  ::VectorizedArray<Number>
3223  pow (const ::VectorizedArray<Number> &x,
3224  const Number p)
3225  {
3227  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3228  values[i] = std::pow(x[i], p);
3230  out.load(&values[0]);
3231  return out;
3232  }
3233 
3234 
3235 
3243  template <typename Number>
3244  inline
3245  ::VectorizedArray<Number>
3246  abs (const ::VectorizedArray<Number> &x)
3247  {
3248  return x.get_abs();
3249  }
3250 
3251 
3252 
3260  template <typename Number>
3261  inline
3262  ::VectorizedArray<Number>
3263  max (const ::VectorizedArray<Number> &x,
3264  const ::VectorizedArray<Number> &y)
3265  {
3266  return x.get_max(y);
3267  }
3268 
3269 
3270 
3278  template <typename Number>
3279  inline
3280  ::VectorizedArray<Number>
3281  min (const ::VectorizedArray<Number> &x,
3282  const ::VectorizedArray<Number> &y)
3283  {
3284  return x.get_min(y);
3285  }
3286 
3287 }
3288 
3289 #endif
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
SymmetricTensor< rank, dim, Number > operator/(const SymmetricTensor< rank, dim, Number > &t, const Number factor)
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1190
STL namespace.
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static::ExceptionBase & ExcMessage(std::string arg1)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:328
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const