Reference documentation for deal.II version Git aa2075a 2017-04-21 00:33:12 +0200
vectorization.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #ifndef dealii__vectorization_h
18 #define dealii__vectorization_h
19 
20 #include <deal.II/base/config.h>
21 #include <deal.II/base/exceptions.h>
22 #include <deal.II/base/template_constraints.h>
23 
24 #include <cmath>
25 
26 // Note:
27 // The flag DEAL_II_COMPILER_VECTORIZATION_LEVEL is essentially constructed
28 // according to the following scheme
29 // #ifdef __AVX512F__
30 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 3
31 // #elif defined (__AVX__)
32 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2
33 // #elif defined (__SSE2__)
34 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1
35 // #else
36 // #define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0
37 // #endif
38 // In addition to checking the flags __AVX__ and __SSE2__, a CMake test,
39 // 'check_01_cpu_features.cmake', ensures that these feature are not only
40 // present in the compilation unit but also working properly.
41 
42 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 // AVX, AVX-512
43 #include <immintrin.h>
44 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL == 1 // SSE2
45 #include <emmintrin.h>
46 #endif
47 
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 namespace internal
53 {
64  template <typename T>
66  {
67  static VectorizedArray<T> value (const T &t)
68  {
70  tmp=t;
71  return tmp;
72  }
73  };
74 }
75 
76 
77 // Enable the EnableIfScalar type trait for VectorizedArray<Number> such
78 // that it can be used as a Number type in Tensor<rank,dim,Number>, etc.
79 
80 template<typename Number>
81 struct EnableIfScalar<VectorizedArray<Number> >
82 {
84 };
85 
86 
87 
138 template <typename Number>
139 class VectorizedArray
140 {
141 public:
145  static const unsigned int n_array_elements = 1;
146 
147  // POD means that there should be no user-defined constructors, destructors
148  // and copy functions (the standard is somewhat relaxed in C++2011, though).
149 
153  DEAL_II_ALWAYS_INLINE
155  operator = (const Number scalar)
156  {
157  data = scalar;
158  return *this;
159  }
160 
164  DEAL_II_ALWAYS_INLINE
165  Number &
166  operator [] (const unsigned int comp)
167  {
168  (void)comp;
169  AssertIndexRange (comp, 1);
170  return data;
171  }
172 
176  DEAL_II_ALWAYS_INLINE
177  const Number &
178  operator [] (const unsigned int comp) const
179  {
180  (void)comp;
181  AssertIndexRange (comp, 1);
182  return data;
183  }
184 
188  DEAL_II_ALWAYS_INLINE
191  {
192  data+=vec.data;
193  return *this;
194  }
195 
199  DEAL_II_ALWAYS_INLINE
202  {
203  data-=vec.data;
204  return *this;
205  }
206 
210  DEAL_II_ALWAYS_INLINE
213  {
214  data*=vec.data;
215  return *this;
216  }
217 
221  DEAL_II_ALWAYS_INLINE
224  {
225  data/=vec.data;
226  return *this;
227  }
228 
235  DEAL_II_ALWAYS_INLINE
236  void load (const Number *ptr)
237  {
238  data = *ptr;
239  }
240 
247  DEAL_II_ALWAYS_INLINE
248  void store (Number *ptr) const
249  {
250  *ptr = data;
251  }
252 
265  DEAL_II_ALWAYS_INLINE
266  void gather (const Number *base_ptr,
267  const unsigned int *offsets)
268  {
269  data = base_ptr[offsets[0]];
270  }
271 
284  DEAL_II_ALWAYS_INLINE
285  void scatter (const unsigned int *offsets,
286  Number *base_ptr) const
287  {
288  base_ptr[offsets[0]] = data;
289  }
290 
295  Number data;
296 
297 private:
302  DEAL_II_ALWAYS_INLINE
304  get_sqrt () const
305  {
306  VectorizedArray res;
307  res.data = std::sqrt(data);
308  return res;
309  }
310 
315  DEAL_II_ALWAYS_INLINE
317  get_abs () const
318  {
319  VectorizedArray res;
320  res.data = std::fabs(data);
321  return res;
322  }
323 
328  DEAL_II_ALWAYS_INLINE
330  get_max (const VectorizedArray &other) const
331  {
332  VectorizedArray res;
333  res.data = std::max (data, other.data);
334  return res;
335  }
336 
341  DEAL_II_ALWAYS_INLINE
343  get_min (const VectorizedArray &other) const
344  {
345  VectorizedArray res;
346  res.data = std::min (data, other.data);
347  return res;
348  }
349 
353  template <typename Number2> friend VectorizedArray<Number2>
354  std::sqrt (const VectorizedArray<Number2> &);
355  template <typename Number2> friend VectorizedArray<Number2>
356  std::abs (const VectorizedArray<Number2> &);
357  template <typename Number2> friend VectorizedArray<Number2>
358  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
359  template <typename Number2> friend VectorizedArray<Number2>
360  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
361 };
362 
363 
364 
371 template <typename Number>
372 inline DEAL_II_ALWAYS_INLINE
374 make_vectorized_array (const Number &u)
375 {
377  result = u;
378  return result;
379 }
380 
381 
382 
408 template <typename Number>
409 inline
410 void
411 vectorized_load_and_transpose(const unsigned int n_entries,
412  const Number *in,
413  const unsigned int *offsets,
415 {
416  for (unsigned int i=0; i<n_entries; ++i)
417  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
418  out[i][v] = in[offsets[v]+i];
419 }
420 
421 
422 
461 template <typename Number>
462 inline
463 void
464 vectorized_transpose_and_store(const bool add_into,
465  const unsigned int n_entries,
466  const VectorizedArray<Number> *in,
467  const unsigned int *offsets,
468  Number *out)
469 {
470  if (add_into)
471  for (unsigned int i=0; i<n_entries; ++i)
472  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
473  out[offsets[v]+i] += in[i][v];
474  else
475  for (unsigned int i=0; i<n_entries; ++i)
476  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
477  out[offsets[v]+i] = in[i][v];
478 }
479 
480 
481 
482 // for safety, also check that __AVX512F__ is defined in case the user manually
483 // set some conflicting compile flags which prevent compilation
484 
485 #if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
486 
490 template <>
491 class VectorizedArray<double>
492 {
493 public:
497  static const unsigned int n_array_elements = 8;
498 
502  DEAL_II_ALWAYS_INLINE
504  operator = (const double x)
505  {
506  data = _mm512_set1_pd(x);
507  return *this;
508  }
509 
513  DEAL_II_ALWAYS_INLINE
514  double &
515  operator [] (const unsigned int comp)
516  {
517  AssertIndexRange (comp, 8);
518  return *(reinterpret_cast<double *>(&data)+comp);
519  }
520 
524  DEAL_II_ALWAYS_INLINE
525  const double &
526  operator [] (const unsigned int comp) const
527  {
528  AssertIndexRange (comp, 8);
529  return *(reinterpret_cast<const double *>(&data)+comp);
530  }
531 
535  DEAL_II_ALWAYS_INLINE
537  operator += (const VectorizedArray &vec)
538  {
539  // if the compiler supports vector arithmetics, we can simply use +=
540  // operator on the given data type. this allows the compiler to combine
541  // additions with multiplication (fused multiply-add) if those
542  // instructions are available. Otherwise, we need to use the built-in
543  // intrinsic command for __m512d
544 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
545  data += vec.data;
546 #else
547  data = _mm512_add_pd(data,vec.data);
548 #endif
549  return *this;
550  }
551 
555  DEAL_II_ALWAYS_INLINE
557  operator -= (const VectorizedArray &vec)
558  {
559 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
560  data -= vec.data;
561 #else
562  data = _mm512_sub_pd(data,vec.data);
563 #endif
564  return *this;
565  }
569  DEAL_II_ALWAYS_INLINE
571  operator *= (const VectorizedArray &vec)
572  {
573 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
574  data *= vec.data;
575 #else
576  data = _mm512_mul_pd(data,vec.data);
577 #endif
578  return *this;
579  }
580 
584  DEAL_II_ALWAYS_INLINE
586  operator /= (const VectorizedArray &vec)
587  {
588 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
589  data /= vec.data;
590 #else
591  data = _mm512_div_pd(data,vec.data);
592 #endif
593  return *this;
594  }
595 
601  DEAL_II_ALWAYS_INLINE
602  void load (const double *ptr)
603  {
604  data = _mm512_loadu_pd (ptr);
605  }
606 
613  DEAL_II_ALWAYS_INLINE
614  void store (double *ptr) const
615  {
616  _mm512_storeu_pd (ptr, data);
617  }
618 
631  DEAL_II_ALWAYS_INLINE
632  void gather (const double *base_ptr,
633  const unsigned int *offsets)
634  {
635  // unfortunately, there does not appear to be a 256 bit integer load, so
636  // do it by some reinterpret casts here. this is allowed because the Intel
637  // API allows aliasing between different vector types.
638  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
639  const __m256i index = *((__m256i *)(&index_val));
640  data = _mm512_i32gather_pd(index, base_ptr, 8);
641  }
642 
655  DEAL_II_ALWAYS_INLINE
656  void scatter (const unsigned int *offsets,
657  double *base_ptr) const
658  {
659  for (unsigned int i=0; i<8; ++i)
660  for (unsigned int j=i+1; j<8; ++j)
661  Assert(offsets[i] != offsets[j],
662  ExcMessage("Result of scatter undefined if two offset elements"
663  " point to the same position"));
664 
665  // unfortunately, there does not appear to be a 256 bit integer load, so
666  // do it by some reinterpret casts here. this is allowed because the Intel
667  // API allows aliasing between different vector types.
668  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
669  const __m256i index = *((__m256i *)(&index_val));
670  _mm512_i32scatter_pd(base_ptr, index, data, 8);
671  }
672 
677  __m512d data;
678 
679 private:
684  DEAL_II_ALWAYS_INLINE
686  get_sqrt () const
687  {
688  VectorizedArray res;
689  res.data = _mm512_sqrt_pd(data);
690  return res;
691  }
692 
697  DEAL_II_ALWAYS_INLINE
699  get_abs () const
700  {
701  // to compute the absolute value, perform bitwise andnot with -0. This
702  // will leave all value and exponent bits unchanged but force the sign
703  // value to +. Since there is no andnot for AVX512, we interpret the data
704  // as 64 bit integers and do the andnot on those types (note that andnot
705  // is a bitwise operation so the data type does not matter)
706  __m512d mask = _mm512_set1_pd (-0.);
707  VectorizedArray res;
708  res.data = (__m512d)_mm512_andnot_epi64 ((__m512i)mask, (__m512i)data);
709  return res;
710  }
711 
716  DEAL_II_ALWAYS_INLINE
718  get_max (const VectorizedArray &other) const
719  {
720  VectorizedArray res;
721  res.data = _mm512_max_pd (data, other.data);
722  return res;
723  }
724 
729  DEAL_II_ALWAYS_INLINE
731  get_min (const VectorizedArray &other) const
732  {
733  VectorizedArray res;
734  res.data = _mm512_min_pd (data, other.data);
735  return res;
736  }
737 
741  template <typename Number2> friend VectorizedArray<Number2>
742  std::sqrt (const VectorizedArray<Number2> &);
743  template <typename Number2> friend VectorizedArray<Number2>
744  std::abs (const VectorizedArray<Number2> &);
745  template <typename Number2> friend VectorizedArray<Number2>
746  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
747  template <typename Number2> friend VectorizedArray<Number2>
748  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
749 };
750 
751 
752 
756 template <>
757 inline
758 void
759 vectorized_load_and_transpose(const unsigned int n_entries,
760  const double *in,
761  const unsigned int *offsets,
763 {
764  const unsigned int n_chunks = n_entries/4;
765  for (unsigned int outer=0; outer<8; outer += 4)
766  {
767  const double *in0 = in + offsets[0+outer];
768  const double *in1 = in + offsets[1+outer];
769  const double *in2 = in + offsets[2+outer];
770  const double *in3 = in + offsets[3+outer];
771 
772  for (unsigned int i=0; i<n_chunks; ++i)
773  {
774  __m256d u0 = _mm256_loadu_pd(in0+4*i);
775  __m256d u1 = _mm256_loadu_pd(in1+4*i);
776  __m256d u2 = _mm256_loadu_pd(in2+4*i);
777  __m256d u3 = _mm256_loadu_pd(in3+4*i);
778  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
779  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
780  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
781  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
782  *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
783  *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
784  *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
785  *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
786  }
787  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
788  for (unsigned int v=0; v<4; ++v)
789  out[i][outer+v] = in[offsets[v+outer]+i];
790  }
791 }
792 
793 
794 
798 template <>
799 inline
800 void
801 vectorized_transpose_and_store(const bool add_into,
802  const unsigned int n_entries,
803  const VectorizedArray<double> *in,
804  const unsigned int *offsets,
805  double *out)
806 {
807  const unsigned int n_chunks = n_entries/4;
808  // do not do full transpose because the code is too long and will most
809  // likely not pay off. rather do the transposition on the vectorized array
810  // on size smaller, mm256d
811  for (unsigned int outer=0; outer<8; outer += 4)
812  {
813  double *out0 = out + offsets[0+outer];
814  double *out1 = out + offsets[1+outer];
815  double *out2 = out + offsets[2+outer];
816  double *out3 = out + offsets[3+outer];
817  for (unsigned int i=0; i<n_chunks; ++i)
818  {
819  __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
820  __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
821  __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
822  __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
823  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
824  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
825  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
826  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
827  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
828  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
829  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
830  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
831 
832  // Cannot use the same store instructions in both paths of the 'if'
833  // because the compiler cannot know that there is no aliasing between
834  // pointers
835  if (add_into)
836  {
837  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
838  _mm256_storeu_pd(out0+4*i, res0);
839  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
840  _mm256_storeu_pd(out1+4*i, res1);
841  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
842  _mm256_storeu_pd(out2+4*i, res2);
843  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
844  _mm256_storeu_pd(out3+4*i, res3);
845  }
846  else
847  {
848  _mm256_storeu_pd(out0+4*i, res0);
849  _mm256_storeu_pd(out1+4*i, res1);
850  _mm256_storeu_pd(out2+4*i, res2);
851  _mm256_storeu_pd(out3+4*i, res3);
852  }
853  }
854  if (add_into)
855  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
856  for (unsigned int v=0; v<4; ++v)
857  out[offsets[v+outer]+i] += in[i][v+outer];
858  else
859  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
860  for (unsigned int v=0; v<4; ++v)
861  out[offsets[v+outer]+i] = in[i][v+outer];
862  }
863 }
864 
865 
866 
870 template<>
871 class VectorizedArray<float>
872 {
873 public:
877  static const unsigned int n_array_elements = 16;
878 
882  DEAL_II_ALWAYS_INLINE
884  operator = (const float x)
885  {
886  data = _mm512_set1_ps(x);
887  return *this;
888  }
889 
893  DEAL_II_ALWAYS_INLINE
894  float &
895  operator [] (const unsigned int comp)
896  {
897  AssertIndexRange (comp, 16);
898  return *(reinterpret_cast<float *>(&data)+comp);
899  }
900 
904  DEAL_II_ALWAYS_INLINE
905  const float &
906  operator [] (const unsigned int comp) const
907  {
908  AssertIndexRange (comp, 16);
909  return *(reinterpret_cast<const float *>(&data)+comp);
910  }
911 
915  DEAL_II_ALWAYS_INLINE
917  operator += (const VectorizedArray &vec)
918  {
919  // if the compiler supports vector arithmetics, we can simply use +=
920  // operator on the given data type. this allows the compiler to combine
921  // additions with multiplication (fused multiply-add) if those
922  // instructions are available. Otherwise, we need to use the built-in
923  // intrinsic command for __m512d
924 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
925  data += vec.data;
926 #else
927  data = _mm512_add_ps(data,vec.data);
928 #endif
929  return *this;
930  }
931 
935  DEAL_II_ALWAYS_INLINE
937  operator -= (const VectorizedArray &vec)
938  {
939 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
940  data -= vec.data;
941 #else
942  data = _mm512_sub_ps(data,vec.data);
943 #endif
944  return *this;
945  }
949  DEAL_II_ALWAYS_INLINE
951  operator *= (const VectorizedArray &vec)
952  {
953 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
954  data *= vec.data;
955 #else
956  data = _mm512_mul_ps(data,vec.data);
957 #endif
958  return *this;
959  }
960 
964  DEAL_II_ALWAYS_INLINE
966  operator /= (const VectorizedArray &vec)
967  {
968 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
969  data /= vec.data;
970 #else
971  data = _mm512_div_ps(data,vec.data);
972 #endif
973  return *this;
974  }
975 
981  DEAL_II_ALWAYS_INLINE
982  void load (const float *ptr)
983  {
984  data = _mm512_loadu_ps (ptr);
985  }
986 
993  DEAL_II_ALWAYS_INLINE
994  void store (float *ptr) const
995  {
996  _mm512_storeu_ps (ptr, data);
997  }
998 
1011  DEAL_II_ALWAYS_INLINE
1012  void gather (const float *base_ptr,
1013  const unsigned int *offsets)
1014  {
1015  // unfortunately, there does not appear to be a 512 bit integer load, so
1016  // do it by some reinterpret casts here. this is allowed because the Intel
1017  // API allows aliasing between different vector types.
1018  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1019  const __m512i index = *((__m512i *)(&index_val));
1020  data = _mm512_i32gather_ps(index, base_ptr, 4);
1021  }
1022 
1035  DEAL_II_ALWAYS_INLINE
1036  void scatter (const unsigned int *offsets,
1037  float *base_ptr) const
1038  {
1039  for (unsigned int i=0; i<16; ++i)
1040  for (unsigned int j=i+1; j<16; ++j)
1041  Assert(offsets[i] != offsets[j],
1042  ExcMessage("Result of scatter undefined if two offset elements"
1043  " point to the same position"));
1044 
1045  // unfortunately, there does not appear to be a 512 bit integer load, so
1046  // do it by some reinterpret casts here. this is allowed because the Intel
1047  // API allows aliasing between different vector types.
1048  const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
1049  const __m512i index = *((__m512i *)(&index_val));
1050  _mm512_i32scatter_ps(base_ptr, index, data, 4);
1051  }
1052 
1057  __m512 data;
1058 
1059 private:
1060 
1065  DEAL_II_ALWAYS_INLINE
1067  get_sqrt () const
1068  {
1069  VectorizedArray res;
1070  res.data = _mm512_sqrt_ps(data);
1071  return res;
1072  }
1073 
1078  DEAL_II_ALWAYS_INLINE
1080  get_abs () const
1081  {
1082  // to compute the absolute value, perform bitwise andnot with -0. This
1083  // will leave all value and exponent bits unchanged but force the sign
1084  // value to +. Since there is no andnot for AVX512, we interpret the data
1085  // as 32 bit integers and do the andnot on those types (note that andnot
1086  // is a bitwise operation so the data type does not matter)
1087  __m512 mask = _mm512_set1_ps (-0.f);
1088  VectorizedArray res;
1089  res.data = (__m512)_mm512_andnot_epi32 ((__m512i)mask, (__m512i)data);
1090  return res;
1091  }
1092 
1097  DEAL_II_ALWAYS_INLINE
1099  get_max (const VectorizedArray &other) const
1100  {
1101  VectorizedArray res;
1102  res.data = _mm512_max_ps (data, other.data);
1103  return res;
1104  }
1105 
1110  DEAL_II_ALWAYS_INLINE
1112  get_min (const VectorizedArray &other) const
1113  {
1114  VectorizedArray res;
1115  res.data = _mm512_min_ps (data, other.data);
1116  return res;
1117  }
1118 
1122  template <typename Number2> friend VectorizedArray<Number2>
1123  std::sqrt (const VectorizedArray<Number2> &);
1124  template <typename Number2> friend VectorizedArray<Number2>
1125  std::abs (const VectorizedArray<Number2> &);
1126  template <typename Number2> friend VectorizedArray<Number2>
1127  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1128  template <typename Number2> friend VectorizedArray<Number2>
1129  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1130 };
1131 
1132 
1133 
1137 template <>
1138 inline
1139 void
1140 vectorized_load_and_transpose(const unsigned int n_entries,
1141  const float *in,
1142  const unsigned int *offsets,
1144 {
1145  const unsigned int n_chunks = n_entries/4;
1146  for (unsigned int outer = 0; outer<16; outer += 8)
1147  {
1148  for (unsigned int i=0; i<n_chunks; ++i)
1149  {
1150  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
1151  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
1152  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
1153  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
1154  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
1155  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
1156  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
1157  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
1158  // To avoid warnings about uninitialized variables, need to initialize
1159  // one variable with zero before using it.
1160  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1161  t0 = _mm256_insertf128_ps (t3, u0, 0);
1162  t0 = _mm256_insertf128_ps (t0, u4, 1);
1163  t1 = _mm256_insertf128_ps (t3, u1, 0);
1164  t1 = _mm256_insertf128_ps (t1, u5, 1);
1165  t2 = _mm256_insertf128_ps (t3, u2, 0);
1166  t2 = _mm256_insertf128_ps (t2, u6, 1);
1167  t3 = _mm256_insertf128_ps (t3, u3, 0);
1168  t3 = _mm256_insertf128_ps (t3, u7, 1);
1169  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1170  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1171  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1172  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1173  *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
1174  *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
1175  *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
1176  *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
1177  }
1178  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1179  for (unsigned int v=0; v<8; ++v)
1180  out[i][v+outer] = in[offsets[v+outer]+i];
1181  }
1182 }
1183 
1184 
1185 
1189 template <>
1190 inline
1191 void
1192 vectorized_transpose_and_store(const bool add_into,
1193  const unsigned int n_entries,
1194  const VectorizedArray<float> *in,
1195  const unsigned int *offsets,
1196  float *out)
1197 {
1198  const unsigned int n_chunks = n_entries/4;
1199  for (unsigned int outer = 0; outer<16; outer += 8)
1200  {
1201  for (unsigned int i=0; i<n_chunks; ++i)
1202  {
1203  __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
1204  __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
1205  __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
1206  __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
1207  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1208  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1209  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1210  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1211  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1212  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1213  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1214  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1215  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1216  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1217  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1218  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1219  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1220  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1221  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1222  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1223 
1224  // Cannot use the same store instructions in both paths of the 'if'
1225  // because the compiler cannot know that there is no aliasing between
1226  // pointers
1227  if (add_into)
1228  {
1229  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
1230  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1231  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
1232  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1233  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
1234  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1235  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
1236  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1237  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
1238  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1239  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
1240  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1241  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
1242  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1243  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
1244  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1245  }
1246  else
1247  {
1248  _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
1249  _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
1250  _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
1251  _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
1252  _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
1253  _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
1254  _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
1255  _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
1256  }
1257  }
1258  if (add_into)
1259  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1260  for (unsigned int v=0; v<8; ++v)
1261  out[offsets[v+outer]+i] += in[i][v+outer];
1262  else
1263  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1264  for (unsigned int v=0; v<8; ++v)
1265  out[offsets[v+outer]+i] = in[i][v+outer];
1266  }
1267 }
1268 
1269 
1270 
1271 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
1272 
1276 template <>
1277 class VectorizedArray<double>
1278 {
1279 public:
1283  static const unsigned int n_array_elements = 4;
1284 
1288  DEAL_II_ALWAYS_INLINE
1289  VectorizedArray &
1290  operator = (const double x)
1291  {
1292  data = _mm256_set1_pd(x);
1293  return *this;
1294  }
1295 
1299  DEAL_II_ALWAYS_INLINE
1300  double &
1301  operator [] (const unsigned int comp)
1302  {
1303  AssertIndexRange (comp, 4);
1304  return *(reinterpret_cast<double *>(&data)+comp);
1305  }
1306 
1310  DEAL_II_ALWAYS_INLINE
1311  const double &
1312  operator [] (const unsigned int comp) const
1313  {
1314  AssertIndexRange (comp, 4);
1315  return *(reinterpret_cast<const double *>(&data)+comp);
1316  }
1317 
1321  DEAL_II_ALWAYS_INLINE
1322  VectorizedArray &
1323  operator += (const VectorizedArray &vec)
1324  {
1325  // if the compiler supports vector arithmetics, we can simply use +=
1326  // operator on the given data type. this allows the compiler to combine
1327  // additions with multiplication (fused multiply-add) if those
1328  // instructions are available. Otherwise, we need to use the built-in
1329  // intrinsic command for __m256d
1330 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1331  data += vec.data;
1332 #else
1333  data = _mm256_add_pd(data,vec.data);
1334 #endif
1335  return *this;
1336  }
1337 
1341  DEAL_II_ALWAYS_INLINE
1342  VectorizedArray &
1343  operator -= (const VectorizedArray &vec)
1344  {
1345 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1346  data -= vec.data;
1347 #else
1348  data = _mm256_sub_pd(data,vec.data);
1349 #endif
1350  return *this;
1351  }
1355  DEAL_II_ALWAYS_INLINE
1356  VectorizedArray &
1357  operator *= (const VectorizedArray &vec)
1358  {
1359 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1360  data *= vec.data;
1361 #else
1362  data = _mm256_mul_pd(data,vec.data);
1363 #endif
1364  return *this;
1365  }
1366 
1370  DEAL_II_ALWAYS_INLINE
1371  VectorizedArray &
1372  operator /= (const VectorizedArray &vec)
1373  {
1374 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1375  data /= vec.data;
1376 #else
1377  data = _mm256_div_pd(data,vec.data);
1378 #endif
1379  return *this;
1380  }
1381 
1387  DEAL_II_ALWAYS_INLINE
1388  void load (const double *ptr)
1389  {
1390  data = _mm256_loadu_pd (ptr);
1391  }
1392 
1399  DEAL_II_ALWAYS_INLINE
1400  void store (double *ptr) const
1401  {
1402  _mm256_storeu_pd (ptr, data);
1403  }
1404 
1417  DEAL_II_ALWAYS_INLINE
1418  void gather (const double *base_ptr,
1419  const unsigned int *offsets)
1420  {
1421 #ifdef __AVX2__
1422  // unfortunately, there does not appear to be a 128 bit integer load, so
1423  // do it by some reinterpret casts here. this is allowed because the Intel
1424  // API allows aliasing between different vector types.
1425  const __m128 index_val = _mm_loadu_ps((const float *)offsets);
1426  const __m128i index = *((__m128i *)(&index_val));
1427  data = _mm256_i32gather_pd(base_ptr, index, 8);
1428 #else
1429  for (unsigned int i=0; i<4; ++i)
1430  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
1431 #endif
1432  }
1433 
1446  DEAL_II_ALWAYS_INLINE
1447  void scatter (const unsigned int *offsets,
1448  double *base_ptr) const
1449  {
1450  // no scatter operation in AVX/AVX2
1451  for (unsigned int i=0; i<4; ++i)
1452  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
1453  }
1454 
1459  __m256d data;
1460 
1461 private:
1466  DEAL_II_ALWAYS_INLINE
1468  get_sqrt () const
1469  {
1470  VectorizedArray res;
1471  res.data = _mm256_sqrt_pd(data);
1472  return res;
1473  }
1474 
1479  DEAL_II_ALWAYS_INLINE
1481  get_abs () const
1482  {
1483  // to compute the absolute value, perform bitwise andnot with -0. This
1484  // will leave all value and exponent bits unchanged but force the sign
1485  // value to +.
1486  __m256d mask = _mm256_set1_pd (-0.);
1487  VectorizedArray res;
1488  res.data = _mm256_andnot_pd(mask, data);
1489  return res;
1490  }
1491 
1496  DEAL_II_ALWAYS_INLINE
1498  get_max (const VectorizedArray &other) const
1499  {
1500  VectorizedArray res;
1501  res.data = _mm256_max_pd (data, other.data);
1502  return res;
1503  }
1504 
1509  DEAL_II_ALWAYS_INLINE
1511  get_min (const VectorizedArray &other) const
1512  {
1513  VectorizedArray res;
1514  res.data = _mm256_min_pd (data, other.data);
1515  return res;
1516  }
1517 
1521  template <typename Number2> friend VectorizedArray<Number2>
1522  std::sqrt (const VectorizedArray<Number2> &);
1523  template <typename Number2> friend VectorizedArray<Number2>
1524  std::abs (const VectorizedArray<Number2> &);
1525  template <typename Number2> friend VectorizedArray<Number2>
1526  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1527  template <typename Number2> friend VectorizedArray<Number2>
1528  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1529 };
1530 
1531 
1532 
1536 template <>
1537 inline
1538 void
1539 vectorized_load_and_transpose(const unsigned int n_entries,
1540  const double *in,
1541  const unsigned int *offsets,
1543 {
1544  const unsigned int n_chunks = n_entries/4;
1545  const double *in0 = in + offsets[0];
1546  const double *in1 = in + offsets[1];
1547  const double *in2 = in + offsets[2];
1548  const double *in3 = in + offsets[3];
1549 
1550  for (unsigned int i=0; i<n_chunks; ++i)
1551  {
1552  __m256d u0 = _mm256_loadu_pd(in0+4*i);
1553  __m256d u1 = _mm256_loadu_pd(in1+4*i);
1554  __m256d u2 = _mm256_loadu_pd(in2+4*i);
1555  __m256d u3 = _mm256_loadu_pd(in3+4*i);
1556  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1557  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1558  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1559  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1560  out[4*i+0].data = _mm256_unpacklo_pd (t0, t1);
1561  out[4*i+1].data = _mm256_unpackhi_pd (t0, t1);
1562  out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
1563  out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
1564  }
1565  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1566  for (unsigned int v=0; v<4; ++v)
1567  out[i][v] = in[offsets[v]+i];
1568 }
1569 
1570 
1571 
1575 template <>
1576 inline
1577 void
1578 vectorized_transpose_and_store(const bool add_into,
1579  const unsigned int n_entries,
1580  const VectorizedArray<double> *in,
1581  const unsigned int *offsets,
1582  double *out)
1583 {
1584  const unsigned int n_chunks = n_entries/4;
1585  double *out0 = out + offsets[0];
1586  double *out1 = out + offsets[1];
1587  double *out2 = out + offsets[2];
1588  double *out3 = out + offsets[3];
1589  for (unsigned int i=0; i<n_chunks; ++i)
1590  {
1591  __m256d u0 = in[4*i+0].data;
1592  __m256d u1 = in[4*i+1].data;
1593  __m256d u2 = in[4*i+2].data;
1594  __m256d u3 = in[4*i+3].data;
1595  __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
1596  __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
1597  __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
1598  __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
1599  __m256d res0 = _mm256_unpacklo_pd (t0, t1);
1600  __m256d res1 = _mm256_unpackhi_pd (t0, t1);
1601  __m256d res2 = _mm256_unpacklo_pd (t2, t3);
1602  __m256d res3 = _mm256_unpackhi_pd (t2, t3);
1603 
1604  // Cannot use the same store instructions in both paths of the 'if'
1605  // because the compiler cannot know that there is no aliasing between
1606  // pointers
1607  if (add_into)
1608  {
1609  res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
1610  _mm256_storeu_pd(out0+4*i, res0);
1611  res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
1612  _mm256_storeu_pd(out1+4*i, res1);
1613  res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
1614  _mm256_storeu_pd(out2+4*i, res2);
1615  res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
1616  _mm256_storeu_pd(out3+4*i, res3);
1617  }
1618  else
1619  {
1620  _mm256_storeu_pd(out0+4*i, res0);
1621  _mm256_storeu_pd(out1+4*i, res1);
1622  _mm256_storeu_pd(out2+4*i, res2);
1623  _mm256_storeu_pd(out3+4*i, res3);
1624  }
1625  }
1626  if (add_into)
1627  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1628  for (unsigned int v=0; v<4; ++v)
1629  out[offsets[v]+i] += in[i][v];
1630  else
1631  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1632  for (unsigned int v=0; v<4; ++v)
1633  out[offsets[v]+i] = in[i][v];
1634 }
1635 
1636 
1637 
1641 template<>
1642 class VectorizedArray<float>
1643 {
1644 public:
1648  static const unsigned int n_array_elements = 8;
1649 
1653  DEAL_II_ALWAYS_INLINE
1654  VectorizedArray &
1655  operator = (const float x)
1656  {
1657  data = _mm256_set1_ps(x);
1658  return *this;
1659  }
1660 
1664  DEAL_II_ALWAYS_INLINE
1665  float &
1666  operator [] (const unsigned int comp)
1667  {
1668  AssertIndexRange (comp, 8);
1669  return *(reinterpret_cast<float *>(&data)+comp);
1670  }
1671 
1675  DEAL_II_ALWAYS_INLINE
1676  const float &
1677  operator [] (const unsigned int comp) const
1678  {
1679  AssertIndexRange (comp, 8);
1680  return *(reinterpret_cast<const float *>(&data)+comp);
1681  }
1682 
1686  DEAL_II_ALWAYS_INLINE
1687  VectorizedArray &
1688  operator += (const VectorizedArray &vec)
1689  {
1690  // if the compiler supports vector arithmetics, we can simply use +=
1691  // operator on the given data type. this allows the compiler to combine
1692  // additions with multiplication (fused multiply-add) if those
1693  // instructions are available. Otherwise, we need to use the built-in
1694  // intrinsic command for __m256d
1695 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1696  data += vec.data;
1697 #else
1698  data = _mm256_add_ps(data,vec.data);
1699 #endif
1700  return *this;
1701  }
1702 
1706  DEAL_II_ALWAYS_INLINE
1707  VectorizedArray &
1708  operator -= (const VectorizedArray &vec)
1709  {
1710 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1711  data -= vec.data;
1712 #else
1713  data = _mm256_sub_ps(data,vec.data);
1714 #endif
1715  return *this;
1716  }
1720  DEAL_II_ALWAYS_INLINE
1721  VectorizedArray &
1722  operator *= (const VectorizedArray &vec)
1723  {
1724 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1725  data *= vec.data;
1726 #else
1727  data = _mm256_mul_ps(data,vec.data);
1728 #endif
1729  return *this;
1730  }
1731 
1735  DEAL_II_ALWAYS_INLINE
1736  VectorizedArray &
1737  operator /= (const VectorizedArray &vec)
1738  {
1739 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1740  data /= vec.data;
1741 #else
1742  data = _mm256_div_ps(data,vec.data);
1743 #endif
1744  return *this;
1745  }
1746 
1752  DEAL_II_ALWAYS_INLINE
1753  void load (const float *ptr)
1754  {
1755  data = _mm256_loadu_ps (ptr);
1756  }
1757 
1764  DEAL_II_ALWAYS_INLINE
1765  void store (float *ptr) const
1766  {
1767  _mm256_storeu_ps (ptr, data);
1768  }
1769 
1782  DEAL_II_ALWAYS_INLINE
1783  void gather (const float *base_ptr,
1784  const unsigned int *offsets)
1785  {
1786 #ifdef __AVX2__
1787  // unfortunately, there does not appear to be a 256 bit integer load, so
1788  // do it by some reinterpret casts here. this is allowed because the Intel
1789  // API allows aliasing between different vector types.
1790  const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
1791  const __m256i index = *((__m256i *)(&index_val));
1792  data = _mm256_i32gather_ps(base_ptr, index, 4);
1793 #else
1794  for (unsigned int i=0; i<8; ++i)
1795  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
1796 #endif
1797  }
1798 
1811  DEAL_II_ALWAYS_INLINE
1812  void scatter (const unsigned int *offsets,
1813  float *base_ptr) const
1814  {
1815  // no scatter operation in AVX/AVX2
1816  for (unsigned int i=0; i<8; ++i)
1817  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
1818  }
1819 
1824  __m256 data;
1825 
1826 private:
1827 
1832  DEAL_II_ALWAYS_INLINE
1834  get_sqrt () const
1835  {
1836  VectorizedArray res;
1837  res.data = _mm256_sqrt_ps(data);
1838  return res;
1839  }
1840 
1845  DEAL_II_ALWAYS_INLINE
1847  get_abs () const
1848  {
1849  // to compute the absolute value, perform bitwise andnot with -0. This
1850  // will leave all value and exponent bits unchanged but force the sign
1851  // value to +.
1852  __m256 mask = _mm256_set1_ps (-0.f);
1853  VectorizedArray res;
1854  res.data = _mm256_andnot_ps(mask, data);
1855  return res;
1856  }
1857 
1862  DEAL_II_ALWAYS_INLINE
1864  get_max (const VectorizedArray &other) const
1865  {
1866  VectorizedArray res;
1867  res.data = _mm256_max_ps (data, other.data);
1868  return res;
1869  }
1870 
1875  DEAL_II_ALWAYS_INLINE
1877  get_min (const VectorizedArray &other) const
1878  {
1879  VectorizedArray res;
1880  res.data = _mm256_min_ps (data, other.data);
1881  return res;
1882  }
1883 
1887  template <typename Number2> friend VectorizedArray<Number2>
1888  std::sqrt (const VectorizedArray<Number2> &);
1889  template <typename Number2> friend VectorizedArray<Number2>
1890  std::abs (const VectorizedArray<Number2> &);
1891  template <typename Number2> friend VectorizedArray<Number2>
1892  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1893  template <typename Number2> friend VectorizedArray<Number2>
1894  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
1895 };
1896 
1897 
1898 
1902 template <>
1903 inline
1904 void
1905 vectorized_load_and_transpose(const unsigned int n_entries,
1906  const float *in,
1907  const unsigned int *offsets,
1909 {
1910  const unsigned int n_chunks = n_entries/4;
1911  for (unsigned int i=0; i<n_chunks; ++i)
1912  {
1913  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
1914  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
1915  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
1916  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
1917  __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4]);
1918  __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5]);
1919  __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6]);
1920  __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7]);
1921  // To avoid warnings about uninitialized variables, need to initialize
1922  // one variable with zero before using it.
1923  __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
1924  t0 = _mm256_insertf128_ps (t3, u0, 0);
1925  t0 = _mm256_insertf128_ps (t0, u4, 1);
1926  t1 = _mm256_insertf128_ps (t3, u1, 0);
1927  t1 = _mm256_insertf128_ps (t1, u5, 1);
1928  t2 = _mm256_insertf128_ps (t3, u2, 0);
1929  t2 = _mm256_insertf128_ps (t2, u6, 1);
1930  t3 = _mm256_insertf128_ps (t3, u3, 0);
1931  t3 = _mm256_insertf128_ps (t3, u7, 1);
1932  __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
1933  __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
1934  __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
1935  __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
1936  out[4*i+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
1937  out[4*i+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
1938  out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
1939  out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
1940  }
1941  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
1942  for (unsigned int v=0; v<8; ++v)
1943  out[i][v] = in[offsets[v]+i];
1944 }
1945 
1946 
1947 
1951 template <>
1952 inline
1953 void
1954 vectorized_transpose_and_store(const bool add_into,
1955  const unsigned int n_entries,
1956  const VectorizedArray<float> *in,
1957  const unsigned int *offsets,
1958  float *out)
1959 {
1960  const unsigned int n_chunks = n_entries/4;
1961  for (unsigned int i=0; i<n_chunks; ++i)
1962  {
1963  __m256 u0 = in[4*i+0].data;
1964  __m256 u1 = in[4*i+1].data;
1965  __m256 u2 = in[4*i+2].data;
1966  __m256 u3 = in[4*i+3].data;
1967  __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
1968  __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
1969  __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
1970  __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
1971  u0 = _mm256_shuffle_ps (t0, t2, 0x88);
1972  u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
1973  u2 = _mm256_shuffle_ps (t1, t3, 0x88);
1974  u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
1975  __m128 res0 = _mm256_extractf128_ps (u0, 0);
1976  __m128 res4 = _mm256_extractf128_ps (u0, 1);
1977  __m128 res1 = _mm256_extractf128_ps (u1, 0);
1978  __m128 res5 = _mm256_extractf128_ps (u1, 1);
1979  __m128 res2 = _mm256_extractf128_ps (u2, 0);
1980  __m128 res6 = _mm256_extractf128_ps (u2, 1);
1981  __m128 res3 = _mm256_extractf128_ps (u3, 0);
1982  __m128 res7 = _mm256_extractf128_ps (u3, 1);
1983 
1984  // Cannot use the same store instructions in both paths of the 'if'
1985  // because the compiler cannot know that there is no aliasing between
1986  // pointers
1987  if (add_into)
1988  {
1989  res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), res0);
1990  _mm_storeu_ps(out+4*i+offsets[0], res0);
1991  res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), res1);
1992  _mm_storeu_ps(out+4*i+offsets[1], res1);
1993  res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), res2);
1994  _mm_storeu_ps(out+4*i+offsets[2], res2);
1995  res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), res3);
1996  _mm_storeu_ps(out+4*i+offsets[3], res3);
1997  res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4]), res4);
1998  _mm_storeu_ps(out+4*i+offsets[4], res4);
1999  res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5]), res5);
2000  _mm_storeu_ps(out+4*i+offsets[5], res5);
2001  res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6]), res6);
2002  _mm_storeu_ps(out+4*i+offsets[6], res6);
2003  res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7]), res7);
2004  _mm_storeu_ps(out+4*i+offsets[7], res7);
2005  }
2006  else
2007  {
2008  _mm_storeu_ps(out+4*i+offsets[0], res0);
2009  _mm_storeu_ps(out+4*i+offsets[1], res1);
2010  _mm_storeu_ps(out+4*i+offsets[2], res2);
2011  _mm_storeu_ps(out+4*i+offsets[3], res3);
2012  _mm_storeu_ps(out+4*i+offsets[4], res4);
2013  _mm_storeu_ps(out+4*i+offsets[5], res5);
2014  _mm_storeu_ps(out+4*i+offsets[6], res6);
2015  _mm_storeu_ps(out+4*i+offsets[7], res7);
2016  }
2017  }
2018  if (add_into)
2019  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2020  for (unsigned int v=0; v<8; ++v)
2021  out[offsets[v]+i] += in[i][v];
2022  else
2023  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2024  for (unsigned int v=0; v<8; ++v)
2025  out[offsets[v]+i] = in[i][v];
2026 }
2027 
2028 
2029 
2030 // for safety, also check that __SSE2__ is defined in case the user manually
2031 // set some conflicting compile flags which prevent compilation
2032 
2033 #elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
2034 
2038 template <>
2039 class VectorizedArray<double>
2040 {
2041 public:
2045  static const unsigned int n_array_elements = 2;
2046 
2050  DEAL_II_ALWAYS_INLINE
2051  VectorizedArray &
2052  operator = (const double x)
2053  {
2054  data = _mm_set1_pd(x);
2055  return *this;
2056  }
2057 
2061  DEAL_II_ALWAYS_INLINE
2062  double &
2063  operator [] (const unsigned int comp)
2064  {
2065  AssertIndexRange (comp, 2);
2066  return *(reinterpret_cast<double *>(&data)+comp);
2067  }
2068 
2072  DEAL_II_ALWAYS_INLINE
2073  const double &
2074  operator [] (const unsigned int comp) const
2075  {
2076  AssertIndexRange (comp, 2);
2077  return *(reinterpret_cast<const double *>(&data)+comp);
2078  }
2079 
2083  DEAL_II_ALWAYS_INLINE
2084  VectorizedArray &
2085  operator += (const VectorizedArray &vec)
2086  {
2087 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2088  data += vec.data;
2089 #else
2090  data = _mm_add_pd(data,vec.data);
2091 #endif
2092  return *this;
2093  }
2094 
2098  DEAL_II_ALWAYS_INLINE
2099  VectorizedArray &
2100  operator -= (const VectorizedArray &vec)
2101  {
2102 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2103  data -= vec.data;
2104 #else
2105  data = _mm_sub_pd(data,vec.data);
2106 #endif
2107  return *this;
2108  }
2109 
2113  DEAL_II_ALWAYS_INLINE
2114  VectorizedArray &
2115  operator *= (const VectorizedArray &vec)
2116  {
2117 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2118  data *= vec.data;
2119 #else
2120  data = _mm_mul_pd(data,vec.data);
2121 #endif
2122  return *this;
2123  }
2124 
2128  DEAL_II_ALWAYS_INLINE
2129  VectorizedArray &
2130  operator /= (const VectorizedArray &vec)
2131  {
2132 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2133  data /= vec.data;
2134 #else
2135  data = _mm_div_pd(data,vec.data);
2136 #endif
2137  return *this;
2138  }
2139 
2145  DEAL_II_ALWAYS_INLINE
2146  void load (const double *ptr)
2147  {
2148  data = _mm_loadu_pd (ptr);
2149  }
2150 
2157  DEAL_II_ALWAYS_INLINE
2158  void store (double *ptr) const
2159  {
2160  _mm_storeu_pd (ptr, data);
2161  }
2162 
2175  DEAL_II_ALWAYS_INLINE
2176  void gather (const double *base_ptr,
2177  const unsigned int *offsets)
2178  {
2179  for (unsigned int i=0; i<2; ++i)
2180  *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
2181  }
2182 
2195  DEAL_II_ALWAYS_INLINE
2196  void scatter (const unsigned int *offsets,
2197  double *base_ptr) const
2198  {
2199  for (unsigned int i=0; i<2; ++i)
2200  base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
2201  }
2202 
2207  __m128d data;
2208 
2209 private:
2214  DEAL_II_ALWAYS_INLINE
2216  get_sqrt () const
2217  {
2218  VectorizedArray res;
2219  res.data = _mm_sqrt_pd(data);
2220  return res;
2221  }
2222 
2227  DEAL_II_ALWAYS_INLINE
2229  get_abs () const
2230  {
2231  // to compute the absolute value, perform
2232  // bitwise andnot with -0. This will leave all
2233  // value and exponent bits unchanged but force
2234  // the sign value to +.
2235  __m128d mask = _mm_set1_pd (-0.);
2236  VectorizedArray res;
2237  res.data = _mm_andnot_pd(mask, data);
2238  return res;
2239  }
2240 
2245  DEAL_II_ALWAYS_INLINE
2247  get_max (const VectorizedArray &other) const
2248  {
2249  VectorizedArray res;
2250  res.data = _mm_max_pd (data, other.data);
2251  return res;
2252  }
2253 
2258  DEAL_II_ALWAYS_INLINE
2260  get_min (const VectorizedArray &other) const
2261  {
2262  VectorizedArray res;
2263  res.data = _mm_min_pd (data, other.data);
2264  return res;
2265  }
2266 
2270  template <typename Number2> friend VectorizedArray<Number2>
2271  std::sqrt (const VectorizedArray<Number2> &);
2272  template <typename Number2> friend VectorizedArray<Number2>
2273  std::abs (const VectorizedArray<Number2> &);
2274  template <typename Number2> friend VectorizedArray<Number2>
2275  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2276  template <typename Number2> friend VectorizedArray<Number2>
2277  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2278 };
2279 
2280 
2281 
2285 template <>
2286 inline
2287 void vectorized_load_and_transpose(const unsigned int n_entries,
2288  const double *in,
2289  const unsigned int *offsets,
2291 {
2292  const unsigned int n_chunks = n_entries/2;
2293  for (unsigned int i=0; i<n_chunks; ++i)
2294  {
2295  __m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
2296  __m128d u1 = _mm_loadu_pd(in+2*i+offsets[1]);
2297  out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
2298  out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
2299  }
2300  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2301  for (unsigned int v=0; v<2; ++v)
2302  out[i][v] = in[offsets[v]+i];
2303 }
2304 
2305 
2306 
2310 template <>
2311 inline
2312 void
2313 vectorized_transpose_and_store(const bool add_into,
2314  const unsigned int n_entries,
2315  const VectorizedArray<double> *in,
2316  const unsigned int *offsets,
2317  double *out)
2318 {
2319  const unsigned int n_chunks = n_entries/2;
2320  if (add_into)
2321  {
2322  for (unsigned int i=0; i<n_chunks; ++i)
2323  {
2324  __m128d u0 = in[2*i+0].data;
2325  __m128d u1 = in[2*i+1].data;
2326  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2327  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2328  _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
2329  _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
2330  }
2331  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2332  for (unsigned int v=0; v<2; ++v)
2333  out[offsets[v]+i] += in[i][v];
2334  }
2335  else
2336  {
2337  for (unsigned int i=0; i<n_chunks; ++i)
2338  {
2339  __m128d u0 = in[2*i+0].data;
2340  __m128d u1 = in[2*i+1].data;
2341  __m128d res0 = _mm_unpacklo_pd (u0, u1);
2342  __m128d res1 = _mm_unpackhi_pd (u0, u1);
2343  _mm_storeu_pd(out+2*i+offsets[0], res0);
2344  _mm_storeu_pd(out+2*i+offsets[1], res1);
2345  }
2346  for (unsigned int i=2*n_chunks; i<n_entries; ++i)
2347  for (unsigned int v=0; v<2; ++v)
2348  out[offsets[v]+i] = in[i][v];
2349  }
2350 }
2351 
2352 
2353 
2357 template <>
2358 class VectorizedArray<float>
2359 {
2360 public:
2364  static const unsigned int n_array_elements = 4;
2365 
2370  DEAL_II_ALWAYS_INLINE
2371  VectorizedArray &
2372  operator = (const float x)
2373  {
2374  data = _mm_set1_ps(x);
2375  return *this;
2376  }
2377 
2381  DEAL_II_ALWAYS_INLINE
2382  float &
2383  operator [] (const unsigned int comp)
2384  {
2385  AssertIndexRange (comp, 4);
2386  return *(reinterpret_cast<float *>(&data)+comp);
2387  }
2388 
2392  DEAL_II_ALWAYS_INLINE
2393  const float &
2394  operator [] (const unsigned int comp) const
2395  {
2396  AssertIndexRange (comp, 4);
2397  return *(reinterpret_cast<const float *>(&data)+comp);
2398  }
2399 
2403  DEAL_II_ALWAYS_INLINE
2404  VectorizedArray &
2405  operator += (const VectorizedArray &vec)
2406  {
2407 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2408  data += vec.data;
2409 #else
2410  data = _mm_add_ps(data,vec.data);
2411 #endif
2412  return *this;
2413  }
2414 
2418  DEAL_II_ALWAYS_INLINE
2419  VectorizedArray &
2420  operator -= (const VectorizedArray &vec)
2421  {
2422 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2423  data -= vec.data;
2424 #else
2425  data = _mm_sub_ps(data,vec.data);
2426 #endif
2427  return *this;
2428  }
2429 
2433  DEAL_II_ALWAYS_INLINE
2434  VectorizedArray &
2435  operator *= (const VectorizedArray &vec)
2436  {
2437 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2438  data *= vec.data;
2439 #else
2440  data = _mm_mul_ps(data,vec.data);
2441 #endif
2442  return *this;
2443  }
2444 
2448  DEAL_II_ALWAYS_INLINE
2449  VectorizedArray &
2450  operator /= (const VectorizedArray &vec)
2451  {
2452 #ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2453  data /= vec.data;
2454 #else
2455  data = _mm_div_ps(data,vec.data);
2456 #endif
2457  return *this;
2458  }
2459 
2465  DEAL_II_ALWAYS_INLINE
2466  void load (const float *ptr)
2467  {
2468  data = _mm_loadu_ps (ptr);
2469  }
2470 
2477  DEAL_II_ALWAYS_INLINE
2478  void store (float *ptr) const
2479  {
2480  _mm_storeu_ps (ptr, data);
2481  }
2482 
2495  DEAL_II_ALWAYS_INLINE
2496  void gather (const float *base_ptr,
2497  const unsigned int *offsets)
2498  {
2499  for (unsigned int i=0; i<4; ++i)
2500  *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
2501  }
2502 
2515  DEAL_II_ALWAYS_INLINE
2516  void scatter (const unsigned int *offsets,
2517  float *base_ptr) const
2518  {
2519  for (unsigned int i=0; i<4; ++i)
2520  base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
2521  }
2522 
2527  __m128 data;
2528 
2529 private:
2534  DEAL_II_ALWAYS_INLINE
2536  get_sqrt () const
2537  {
2538  VectorizedArray res;
2539  res.data = _mm_sqrt_ps(data);
2540  return res;
2541  }
2542 
2547  DEAL_II_ALWAYS_INLINE
2549  get_abs () const
2550  {
2551  // to compute the absolute value, perform bitwise andnot with -0. This
2552  // will leave all value and exponent bits unchanged but force the sign
2553  // value to +.
2554  __m128 mask = _mm_set1_ps (-0.f);
2555  VectorizedArray res;
2556  res.data = _mm_andnot_ps(mask, data);
2557  return res;
2558  }
2559 
2564  DEAL_II_ALWAYS_INLINE
2566  get_max (const VectorizedArray &other) const
2567  {
2568  VectorizedArray res;
2569  res.data = _mm_max_ps (data, other.data);
2570  return res;
2571  }
2572 
2577  DEAL_II_ALWAYS_INLINE
2579  get_min (const VectorizedArray &other) const
2580  {
2581  VectorizedArray res;
2582  res.data = _mm_min_ps (data, other.data);
2583  return res;
2584  }
2585 
2589  template <typename Number2> friend VectorizedArray<Number2>
2590  std::sqrt (const VectorizedArray<Number2> &);
2591  template <typename Number2> friend VectorizedArray<Number2>
2592  std::abs (const VectorizedArray<Number2> &);
2593  template <typename Number2> friend VectorizedArray<Number2>
2594  std::max (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2595  template <typename Number2> friend VectorizedArray<Number2>
2596  std::min (const VectorizedArray<Number2> &, const VectorizedArray<Number2> &);
2597 };
2598 
2599 
2600 
2604 template <>
2605 inline
2606 void vectorized_load_and_transpose(const unsigned int n_entries,
2607  const float *in,
2608  const unsigned int *offsets,
2610 {
2611  const unsigned int n_chunks = n_entries/4;
2612  for (unsigned int i=0; i<n_chunks; ++i)
2613  {
2614  __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
2615  __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1]);
2616  __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2]);
2617  __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3]);
2618  __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
2619  __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
2620  __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
2621  __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
2622  out[4*i+0].data = _mm_shuffle_ps (v0, v2, 0x88);
2623  out[4*i+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
2624  out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
2625  out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
2626  }
2627  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2628  for (unsigned int v=0; v<4; ++v)
2629  out[i][v] = in[offsets[v]+i];
2630 }
2631 
2632 
2633 
2637 template <>
2638 inline
2639 void
2640 vectorized_transpose_and_store(const bool add_into,
2641  const unsigned int n_entries,
2642  const VectorizedArray<float> *in,
2643  const unsigned int *offsets,
2644  float *out)
2645 {
2646  const unsigned int n_chunks = n_entries/4;
2647  for (unsigned int i=0; i<n_chunks; ++i)
2648  {
2649  __m128 u0 = in[4*i+0].data;
2650  __m128 u1 = in[4*i+1].data;
2651  __m128 u2 = in[4*i+2].data;
2652  __m128 u3 = in[4*i+3].data;
2653  __m128 t0 = _mm_shuffle_ps (u0, u1, 0x44);
2654  __m128 t1 = _mm_shuffle_ps (u0, u1, 0xee);
2655  __m128 t2 = _mm_shuffle_ps (u2, u3, 0x44);
2656  __m128 t3 = _mm_shuffle_ps (u2, u3, 0xee);
2657  u0 = _mm_shuffle_ps (t0, t2, 0x88);
2658  u1 = _mm_shuffle_ps (t0, t2, 0xdd);
2659  u2 = _mm_shuffle_ps (t1, t3, 0x88);
2660  u3 = _mm_shuffle_ps (t1, t3, 0xdd);
2661 
2662  // Cannot use the same store instructions in both paths of the 'if'
2663  // because the compiler cannot know that there is no aliasing between
2664  // pointers
2665  if (add_into)
2666  {
2667  u0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0]), u0);
2668  _mm_storeu_ps(out+4*i+offsets[0], u0);
2669  u1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1]), u1);
2670  _mm_storeu_ps(out+4*i+offsets[1], u1);
2671  u2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2]), u2);
2672  _mm_storeu_ps(out+4*i+offsets[2], u2);
2673  u3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3]), u3);
2674  _mm_storeu_ps(out+4*i+offsets[3], u3);
2675  }
2676  else
2677  {
2678  _mm_storeu_ps(out+4*i+offsets[0], u0);
2679  _mm_storeu_ps(out+4*i+offsets[1], u1);
2680  _mm_storeu_ps(out+4*i+offsets[2], u2);
2681  _mm_storeu_ps(out+4*i+offsets[3], u3);
2682  }
2683  }
2684  if (add_into)
2685  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2686  for (unsigned int v=0; v<4; ++v)
2687  out[offsets[v]+i] += in[i][v];
2688  else
2689  for (unsigned int i=4*n_chunks; i<n_entries; ++i)
2690  for (unsigned int v=0; v<4; ++v)
2691  out[offsets[v]+i] = in[i][v];
2692 }
2693 
2694 
2695 
2696 #endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0
2697 
2698 
2704 template <typename Number>
2705 inline DEAL_II_ALWAYS_INLINE
2708  const VectorizedArray<Number> &v)
2709 {
2710  VectorizedArray<Number> tmp = u;
2711  return tmp+=v;
2712 }
2713 
2719 template <typename Number>
2720 inline DEAL_II_ALWAYS_INLINE
2723  const VectorizedArray<Number> &v)
2724 {
2725  VectorizedArray<Number> tmp = u;
2726  return tmp-=v;
2727 }
2728 
2734 template <typename Number>
2735 inline DEAL_II_ALWAYS_INLINE
2738  const VectorizedArray<Number> &v)
2739 {
2740  VectorizedArray<Number> tmp = u;
2741  return tmp*=v;
2742 }
2743 
2749 template <typename Number>
2750 inline DEAL_II_ALWAYS_INLINE
2753  const VectorizedArray<Number> &v)
2754 {
2755  VectorizedArray<Number> tmp = u;
2756  return tmp/=v;
2757 }
2758 
2765 template <typename Number>
2766 inline DEAL_II_ALWAYS_INLINE
2768 operator + (const Number &u,
2769  const VectorizedArray<Number> &v)
2770 {
2772  tmp = u;
2773  return tmp+=v;
2774 }
2775 
2784 inline DEAL_II_ALWAYS_INLINE
2786 operator + (const double &u,
2787  const VectorizedArray<float> &v)
2788 {
2790  tmp = u;
2791  return tmp+=v;
2792 }
2793 
2800 template <typename Number>
2801 inline DEAL_II_ALWAYS_INLINE
2804  const Number &u)
2805 {
2806  return u + v;
2807 }
2808 
2817 inline DEAL_II_ALWAYS_INLINE
2820  const double &u)
2821 {
2822  return u + v;
2823 }
2824 
2831 template <typename Number>
2832 inline DEAL_II_ALWAYS_INLINE
2834 operator - (const Number &u,
2835  const VectorizedArray<Number> &v)
2836 {
2838  tmp = u;
2839  return tmp-=v;
2840 }
2841 
2850 inline DEAL_II_ALWAYS_INLINE
2852 operator - (const double &u,
2853  const VectorizedArray<float> &v)
2854 {
2856  tmp = float(u);
2857  return tmp-=v;
2858 }
2859 
2866 template <typename Number>
2867 inline DEAL_II_ALWAYS_INLINE
2870  const Number &u)
2871 {
2873  tmp = u;
2874  return v-tmp;
2875 }
2876 
2885 inline DEAL_II_ALWAYS_INLINE
2888  const double &u)
2889 {
2891  tmp = float(u);
2892  return v-tmp;
2893 }
2894 
2901 template <typename Number>
2902 inline DEAL_II_ALWAYS_INLINE
2904 operator * (const Number &u,
2905  const VectorizedArray<Number> &v)
2906 {
2908  tmp = u;
2909  return tmp*=v;
2910 }
2911 
2920 inline DEAL_II_ALWAYS_INLINE
2922 operator * (const double &u,
2923  const VectorizedArray<float> &v)
2924 {
2926  tmp = float(u);
2927  return tmp*=v;
2928 }
2929 
2936 template <typename Number>
2937 inline DEAL_II_ALWAYS_INLINE
2940  const Number &u)
2941 {
2942  return u * v;
2943 }
2944 
2953 inline DEAL_II_ALWAYS_INLINE
2956  const double &u)
2957 {
2958  return u * v;
2959 }
2960 
2967 template <typename Number>
2968 inline DEAL_II_ALWAYS_INLINE
2970 operator / (const Number &u,
2971  const VectorizedArray<Number> &v)
2972 {
2974  tmp = u;
2975  return tmp/=v;
2976 }
2977 
2986 inline DEAL_II_ALWAYS_INLINE
2988 operator / (const double &u,
2989  const VectorizedArray<float> &v)
2990 {
2992  tmp = float(u);
2993  return tmp/=v;
2994 }
2995 
3002 template <typename Number>
3003 inline DEAL_II_ALWAYS_INLINE
3006  const Number &u)
3007 {
3009  tmp = u;
3010  return v/tmp;
3011 }
3012 
3021 inline DEAL_II_ALWAYS_INLINE
3024  const double &u)
3025 {
3027  tmp = float(u);
3028  return v/tmp;
3029 }
3030 
3036 template <typename Number>
3037 inline DEAL_II_ALWAYS_INLINE
3040 {
3041  return u;
3042 }
3043 
3049 template <typename Number>
3050 inline DEAL_II_ALWAYS_INLINE
3053 {
3054  // to get a negative sign, subtract the input from zero (could also
3055  // multiply by -1, but this one is slightly simpler)
3056  return VectorizedArray<Number>()-u;
3057 }
3058 
3059 
3060 DEAL_II_NAMESPACE_CLOSE
3061 
3062 
3069 namespace std
3070 {
3078  template <typename Number>
3079  inline
3080  ::VectorizedArray<Number>
3081  sin (const ::VectorizedArray<Number> &x)
3082  {
3083  // put values in an array and later read in that array with an unaligned
3084  // read. This should save some instructions as compared to directly
3085  // setting the individual elements and also circumvents a compiler
3086  // optimization bug in gcc-4.6 with SSE2 (see also deal.II developers list
3087  // from April 2014, topic "matrix_free/step-48 Test").
3089  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3090  values[i] = std::sin(x[i]);
3092  out.load(&values[0]);
3093  return out;
3094  }
3095 
3096 
3097 
3105  template <typename Number>
3106  inline
3107  ::VectorizedArray<Number>
3108  cos (const ::VectorizedArray<Number> &x)
3109  {
3111  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3112  values[i] = std::cos(x[i]);
3114  out.load(&values[0]);
3115  return out;
3116  }
3117 
3118 
3119 
3127  template <typename Number>
3128  inline
3129  ::VectorizedArray<Number>
3130  tan (const ::VectorizedArray<Number> &x)
3131  {
3133  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3134  values[i] = std::tan(x[i]);
3136  out.load(&values[0]);
3137  return out;
3138  }
3139 
3140 
3141 
3149  template <typename Number>
3150  inline
3151  ::VectorizedArray<Number>
3152  exp (const ::VectorizedArray<Number> &x)
3153  {
3155  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3156  values[i] = std::exp(x[i]);
3158  out.load(&values[0]);
3159  return out;
3160  }
3161 
3162 
3163 
3171  template <typename Number>
3172  inline
3173  ::VectorizedArray<Number>
3174  log (const ::VectorizedArray<Number> &x)
3175  {
3177  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3178  values[i] = std::log(x[i]);
3180  out.load(&values[0]);
3181  return out;
3182  }
3183 
3184 
3185 
3193  template <typename Number>
3194  inline
3195  ::VectorizedArray<Number>
3196  sqrt (const ::VectorizedArray<Number> &x)
3197  {
3198  return x.get_sqrt();
3199  }
3200 
3201 
3202 
3210  template <typename Number>
3211  inline
3212  ::VectorizedArray<Number>
3213  pow (const ::VectorizedArray<Number> &x,
3214  const Number p)
3215  {
3217  for (unsigned int i=0; i<::VectorizedArray<Number>::n_array_elements; ++i)
3218  values[i] = std::pow(x[i], p);
3220  out.load(&values[0]);
3221  return out;
3222  }
3223 
3224 
3225 
3233  template <typename Number>
3234  inline
3235  ::VectorizedArray<Number>
3236  abs (const ::VectorizedArray<Number> &x)
3237  {
3238  return x.get_abs();
3239  }
3240 
3241 
3242 
3250  template <typename Number>
3251  inline
3252  ::VectorizedArray<Number>
3253  max (const ::VectorizedArray<Number> &x,
3254  const ::VectorizedArray<Number> &y)
3255  {
3256  return x.get_max(y);
3257  }
3258 
3259 
3260 
3268  template <typename Number>
3269  inline
3270  ::VectorizedArray<Number>
3271  min (const ::VectorizedArray<Number> &x,
3272  const ::VectorizedArray<Number> &y)
3273  {
3274  return x.get_min(y);
3275  }
3276 
3277 }
3278 
3279 #endif
DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt() const
DEAL_II_ALWAYS_INLINE void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray< Number > log(const ::VectorizedArray< Number > &x)
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator+=(const VectorizedArray< Number > &vec)
SymmetricTensor< rank, dim, Number > operator/(const SymmetricTensor< rank, dim, Number > &t, const Number factor)
DEAL_II_ALWAYS_INLINE Number & operator[](const unsigned int comp)
VectorizedArray< Number > tan(const ::VectorizedArray< Number > &x)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1170
STL namespace.
DEAL_II_ALWAYS_INLINE VectorizedArray get_abs() const
VectorizedArray< Number > exp(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE void load(const Number *ptr)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number > *in, const unsigned int *offsets, Number *out)
static::ExceptionBase & ExcMessage(std::string arg1)
static const unsigned int n_array_elements
VectorizedArray< Number > min(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
#define Assert(cond, exc)
Definition: exceptions.h:313
Tensor< rank, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank, dim, Number > &left, const Tensor< rank, dim, OtherNumber > &right)
VectorizedArray< Number > pow(const ::VectorizedArray< Number > &x, const Number p)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number > *out)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator*=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sqrt(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator-=(const VectorizedArray< Number > &vec)
VectorizedArray< Number > sin(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray get_max(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE void gather(const Number *base_ptr, const unsigned int *offsets)
DEAL_II_ALWAYS_INLINE void store(Number *ptr) const
DEAL_II_ALWAYS_INLINE VectorizedArray get_min(const VectorizedArray &other) const
DEAL_II_ALWAYS_INLINE VectorizedArray & operator/=(const VectorizedArray< Number > &vec)
DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const Number scalar)
VectorizedArray< Number > abs(const ::VectorizedArray< Number > &x)
VectorizedArray< Number > max(const ::VectorizedArray< Number > &x, const ::VectorizedArray< Number > &y)
VectorizedArray< Number > cos(const ::VectorizedArray< Number > &x)
DEAL_II_ALWAYS_INLINE VectorizedArray< Number > make_vectorized_array(const Number &u)
Point< dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator*(const OtherNumber) const