17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64 # elif defined(__ALTIVEC__)
72 # elif defined(__ARM_NEON)
73 # include <arm_neon.h>
74 # elif defined(__x86_64__)
75 # include <x86intrin.h>
87 template <
typename Number, std::
size_t w
idth>
121 "You are trying to compare iterators into different arrays."));
133 "You are trying to compare iterators into different arrays."));
147 const typename T::value_type &
159 template <
typename U = T>
160 std::enable_if_t<!std::is_same_v<U, const U>,
typename T::value_type> &
203 "You can't decrement an iterator that is already at the beginning of the range."));
224 return static_cast<std::ptrdiff_t
>(
lane) -
225 static_cast<ptrdiff_t
>(other.
lane);
251 template <
typename T, std::
size_t w
idth>
263 template <
typename U>
266 auto i0 = this->
begin();
267 auto i1 = list.begin();
269 for (; i1 != list.end(); ++i0, ++i1)
274 "Initializer list exceeds size of this VectorizedArray object."));
279 for (; i0 != this->
end(); ++i0)
288 static constexpr std::size_t
420 template <
typename Number, std::
size_t w
idth>
430 static_assert(width == 1,
431 "You specified an illegal width that is not supported.");
450 template <
typename U>
550 template <
typename OtherNumber>
563 template <
typename OtherNumber>
637 gather(
const Number *base_ptr,
const unsigned int *offsets)
639 data = base_ptr[offsets[0]];
656 scatter(
const unsigned int *offsets, Number *base_ptr)
const
658 base_ptr[offsets[0]] =
data;
733 template <
typename Number2, std::
size_t w
idth2>
736 template <
typename Number2, std::
size_t w
idth2>
739 template <
typename Number2, std::
size_t w
idth2>
743 template <
typename Number2, std::
size_t w
idth2>
762 template <
typename Number,
780 template <
typename VectorizedArrayType>
785 std::is_same_v<VectorizedArrayType,
787 VectorizedArrayType::size()>>,
788 "VectorizedArrayType is not a VectorizedArray.");
790 VectorizedArrayType result = u;
807 template <
typename Number, std::
size_t w
idth>
810 const std::array<Number *, width> &ptrs,
811 const unsigned int offset)
813 for (
unsigned int v = 0; v < width; ++v)
814 out.
data[v] = ptrs[v][offset];
844 template <
typename Number, std::
size_t w
idth>
848 const unsigned int *offsets,
851 for (
unsigned int i = 0; i < n_entries; ++i)
852 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
853 out[i][v] = in[offsets[v] + i];
868 template <
typename Number, std::
size_t w
idth>
871 const std::array<Number *, width> &in,
874 for (
unsigned int i = 0; i < n_entries; ++i)
875 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
876 out[i][v] = in[v][i];
919 template <
typename Number, std::
size_t w
idth>
922 const unsigned int n_entries,
924 const unsigned int *offsets,
928 for (
unsigned int i = 0; i < n_entries; ++i)
929 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
930 out[offsets[v] + i] += in[i][v];
932 for (
unsigned int i = 0; i < n_entries; ++i)
933 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
934 out[offsets[v] + i] = in[i][v];
949 template <
typename Number, std::
size_t w
idth>
952 const unsigned int n_entries,
954 std::array<Number *, width> &out)
957 for (
unsigned int i = 0; i < n_entries; ++i)
958 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
959 out[v][i] += in[i][v];
961 for (
unsigned int i = 0; i < n_entries; ++i)
962 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
963 out[v][i] = in[i][v];
971 # if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
1003 template <
typename U>
1014 data = vdupq_n_f64(x);
1024 operator=(
const double scalar) && =
delete;
1032 return *(
reinterpret_cast<double *
>(&
data) + comp);
1041 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1090 load(
const double *ptr)
1092 data = vld1q_f64(ptr);
1097 load(
const float *ptr)
1100 for (
unsigned int i = 0; i < 2; ++i)
1111 store(
double *ptr)
const
1113 vst1q_f64(ptr,
data);
1118 store(
float *ptr)
const
1121 for (
unsigned int i = 0; i < 2; ++i)
1133 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1135 vst1q_f64(ptr,
data);
1151 gather(
const double *base_ptr,
const unsigned int *offsets)
1153 for (
unsigned int i = 0; i < 2; ++i)
1154 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1170 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1172 for (
unsigned int i = 0; i < 2; ++i)
1173 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1183 return vaddvq_f64(
data);
1191 mutable float64x2_t
data;
1243 template <
typename Number2, std::
size_t w
idth2>
1246 template <
typename Number2, std::
size_t w
idth2>
1249 template <
typename Number2, std::
size_t w
idth2>
1253 template <
typename Number2, std::
size_t w
idth2>
1289 template <
typename U>
1300 data = vdupq_n_f32(x);
1310 operator=(
const float scalar) && =
delete;
1318 return *(
reinterpret_cast<float *
>(&
data) + comp);
1327 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1376 load(
const float *ptr)
1378 data = vld1q_f32(ptr);
1388 store(
float *ptr)
const
1390 vst1q_f32(ptr,
data);
1401 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1403 vst1q_f32(ptr,
data);
1419 gather(
const float *base_ptr,
const unsigned int *offsets)
1421 for (
unsigned int i = 0; i < 4; ++i)
1422 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
1438 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1440 for (
unsigned int i = 0; i < 4; ++i)
1441 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
1451 return vaddvq_f32(
data);
1459 mutable float32x4_t
data;
1511 template <
typename Number2, std::
size_t w
idth2>
1514 template <
typename Number2, std::
size_t w
idth2>
1517 template <
typename Number2, std::
size_t w
idth2>
1521 template <
typename Number2, std::
size_t w
idth2>
1530 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
1562 template <
typename U>
1574 data = _mm_set1_pd(x);
1584 operator=(
const double scalar) && =
delete;
1594 return *(
reinterpret_cast<double *
>(&
data) + comp);
1605 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1615 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1630 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1645 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1660 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1675 load(
const double *ptr)
1677 data = _mm_loadu_pd(ptr);
1682 load(
const float *ptr)
1685 for (
unsigned int i = 0; i < 2; ++i)
1697 store(
double *ptr)
const
1699 _mm_storeu_pd(ptr,
data);
1704 store(
float *ptr)
const
1707 for (
unsigned int i = 0; i < 2; ++i)
1719 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
1721 _mm_stream_pd(ptr,
data);
1738 gather(
const double *base_ptr,
const unsigned int *offsets)
1740 for (
unsigned int i = 0; i < 2; ++i)
1741 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
1758 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1760 for (
unsigned int i = 0; i < 2; ++i)
1761 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
1771 __m128d t1 = _mm_unpackhi_pd(
data,
data);
1772 __m128d t2 = _mm_add_pd(
data, t1);
1773 return _mm_cvtsd_f64(t2);
1809 __m128d
mask = _mm_set1_pd(-0.);
1811 res.
data = _mm_andnot_pd(mask,
data);
1842 template <
typename Number2, std::
size_t w
idth2>
1845 template <
typename Number2, std::
size_t w
idth2>
1848 template <
typename Number2, std::
size_t w
idth2>
1852 template <
typename Number2, std::
size_t w
idth2>
1867 const unsigned int *offsets,
1870 const unsigned int n_chunks = n_entries / 2;
1871 for (
unsigned int i = 0; i < n_chunks; ++i)
1873 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
1874 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
1875 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1876 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1880 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1881 for (
unsigned int v = 0; v < 2; ++v)
1882 out[i][v] = in[offsets[v] + i];
1893 const std::array<double *, 2> &in,
1898 const unsigned int n_chunks = n_entries / 2;
1899 for (
unsigned int i = 0; i < n_chunks; ++i)
1901 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
1902 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
1903 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
1904 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
1907 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1908 for (
unsigned int v = 0; v < 2; ++v)
1909 out[i][v] = in[v][i];
1920 const unsigned int n_entries,
1922 const unsigned int *offsets,
1925 const unsigned int n_chunks = n_entries / 2;
1928 for (
unsigned int i = 0; i < n_chunks; ++i)
1930 __m128d u0 = in[2 * i + 0].
data;
1931 __m128d u1 = in[2 * i + 1].
data;
1932 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1933 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1934 _mm_storeu_pd(out + 2 * i + offsets[0],
1935 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
1937 _mm_storeu_pd(out + 2 * i + offsets[1],
1938 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
1942 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1943 for (
unsigned int v = 0; v < 2; ++v)
1944 out[offsets[v] + i] += in[i][v];
1948 for (
unsigned int i = 0; i < n_chunks; ++i)
1950 __m128d u0 = in[2 * i + 0].
data;
1951 __m128d u1 = in[2 * i + 1].
data;
1952 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1953 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1954 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
1955 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
1958 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1959 for (
unsigned int v = 0; v < 2; ++v)
1960 out[offsets[v] + i] = in[i][v];
1972 const unsigned int n_entries,
1974 std::array<double *, 2> &out)
1978 const unsigned int n_chunks = n_entries / 2;
1981 for (
unsigned int i = 0; i < n_chunks; ++i)
1983 __m128d u0 = in[2 * i + 0].
data;
1984 __m128d u1 = in[2 * i + 1].
data;
1985 __m128d res0 = _mm_unpacklo_pd(u0, u1);
1986 __m128d res1 = _mm_unpackhi_pd(u0, u1);
1987 _mm_storeu_pd(out[0] + 2 * i,
1988 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
1989 _mm_storeu_pd(out[1] + 2 * i,
1990 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
1993 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
1994 for (
unsigned int v = 0; v < 2; ++v)
1995 out[v][i] += in[i][v];
1999 for (
unsigned int i = 0; i < n_chunks; ++i)
2001 __m128d u0 = in[2 * i + 0].
data;
2002 __m128d u1 = in[2 * i + 1].
data;
2003 __m128d res0 = _mm_unpacklo_pd(u0, u1);
2004 __m128d res1 = _mm_unpackhi_pd(u0, u1);
2005 _mm_storeu_pd(out[0] + 2 * i, res0);
2006 _mm_storeu_pd(out[1] + 2 * i, res1);
2009 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
2010 for (
unsigned int v = 0; v < 2; ++v)
2011 out[v][i] = in[i][v];
2047 template <
typename U>
2059 data = _mm_set1_ps(x);
2069 operator=(
const float scalar) && =
delete;
2079 return *(
reinterpret_cast<float *
>(&
data) + comp);
2090 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2100 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2115 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2130 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2145 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2160 load(
const float *ptr)
2162 data = _mm_loadu_ps(ptr);
2173 store(
float *ptr)
const
2175 _mm_storeu_ps(ptr,
data);
2186 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
2188 _mm_stream_ps(ptr,
data);
2205 gather(
const float *base_ptr,
const unsigned int *offsets)
2207 for (
unsigned int i = 0; i < 4; ++i)
2208 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2225 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2227 for (
unsigned int i = 0; i < 4; ++i)
2228 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
2238 __m128 t1 = _mm_movehl_ps(
data,
data);
2239 __m128 t2 = _mm_add_ps(
data, t1);
2240 __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
2241 __m128 t4 = _mm_add_ss(t2, t3);
2242 return _mm_cvtss_f32(t4);
2277 __m128
mask = _mm_set1_ps(-0.f);
2279 res.
data = _mm_andnot_ps(mask,
data);
2310 template <
typename Number2, std::
size_t w
idth2>
2313 template <
typename Number2, std::
size_t w
idth2>
2316 template <
typename Number2, std::
size_t w
idth2>
2320 template <
typename Number2, std::
size_t w
idth2>
2335 const unsigned int *offsets,
2338 const unsigned int n_chunks = n_entries / 4;
2339 for (
unsigned int i = 0; i < n_chunks; ++i)
2341 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
2342 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
2343 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
2344 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
2345 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2346 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2347 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2348 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2349 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2350 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2351 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2352 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2356 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2357 for (
unsigned int v = 0; v < 4; ++v)
2358 out[i][v] = in[offsets[v] + i];
2369 const std::array<float *, 4> &in,
2374 const unsigned int n_chunks = n_entries / 4;
2375 for (
unsigned int i = 0; i < n_chunks; ++i)
2377 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
2378 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
2379 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
2380 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
2381 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
2382 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
2383 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
2384 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
2385 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
2386 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
2387 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
2388 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
2391 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2392 for (
unsigned int v = 0; v < 4; ++v)
2393 out[i][v] = in[v][i];
2404 const unsigned int n_entries,
2406 const unsigned int *offsets,
2409 const unsigned int n_chunks = n_entries / 4;
2410 for (
unsigned int i = 0; i < n_chunks; ++i)
2412 __m128 u0 = in[4 * i + 0].
data;
2413 __m128 u1 = in[4 * i + 1].
data;
2414 __m128 u2 = in[4 * i + 2].
data;
2415 __m128 u3 = in[4 * i + 3].
data;
2416 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2417 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2418 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2419 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2420 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2421 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2422 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2423 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2430 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
2431 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2432 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
2433 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2434 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
2435 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2436 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
2437 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2441 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
2442 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
2443 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
2444 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
2450 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2451 for (
unsigned int v = 0; v < 4; ++v)
2452 out[offsets[v] + i] += in[i][v];
2454 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2455 for (
unsigned int v = 0; v < 4; ++v)
2456 out[offsets[v] + i] = in[i][v];
2467 const unsigned int n_entries,
2469 std::array<float *, 4> &out)
2473 const unsigned int n_chunks = n_entries / 4;
2474 for (
unsigned int i = 0; i < n_chunks; ++i)
2476 __m128 u0 = in[4 * i + 0].
data;
2477 __m128 u1 = in[4 * i + 1].
data;
2478 __m128 u2 = in[4 * i + 2].
data;
2479 __m128 u3 = in[4 * i + 3].
data;
2480 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
2481 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
2482 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
2483 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
2484 u0 = _mm_shuffle_ps(t0, t2, 0x88);
2485 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
2486 u2 = _mm_shuffle_ps(t1, t3, 0x88);
2487 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
2491 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
2492 _mm_storeu_ps(out[0] + 4 * i, u0);
2493 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
2494 _mm_storeu_ps(out[1] + 4 * i, u1);
2495 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
2496 _mm_storeu_ps(out[2] + 4 * i, u2);
2497 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
2498 _mm_storeu_ps(out[3] + 4 * i, u3);
2502 _mm_storeu_ps(out[0] + 4 * i, u0);
2503 _mm_storeu_ps(out[1] + 4 * i, u1);
2504 _mm_storeu_ps(out[2] + 4 * i, u2);
2505 _mm_storeu_ps(out[3] + 4 * i, u3);
2510 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2511 for (
unsigned int v = 0; v < 4; ++v)
2512 out[v][i] += in[i][v];
2514 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2515 for (
unsigned int v = 0; v < 4; ++v)
2516 out[v][i] = in[i][v];
2523 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2555 template <
typename U>
2567 data = _mm256_set1_pd(x);
2577 operator=(
const double scalar) && =
delete;
2587 return *(
reinterpret_cast<double *
>(&
data) + comp);
2598 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2613 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2628 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2642 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2657 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2672 load(
const double *ptr)
2674 data = _mm256_loadu_pd(ptr);
2679 load(
const float *ptr)
2681 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2692 store(
double *ptr)
const
2694 _mm256_storeu_pd(ptr,
data);
2699 store(
float *ptr)
const
2701 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(
data));
2712 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2714 _mm256_stream_pd(ptr,
data);
2731 gather(
const double *base_ptr,
const unsigned int *offsets)
2733 # if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
2737 const __m128 index_val =
2738 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2739 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2744 __m256d
zero = _mm256_setzero_pd();
2747 data = _mm256_mask_i32gather_pd(
zero, base_ptr, index, mask, 8);
2749 for (
unsigned int i = 0; i < 4; ++i)
2750 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2768 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2771 for (
unsigned int i = 0; i < 4; ++i)
2772 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2783 t1.
data = _mm_add_pd(this->get_lower(), this->get_upper());
2802 return _mm256_castpd256_pd128(
data);
2812 return _mm256_extractf128_pd(
data, 1);
2839 __m256d
mask = _mm256_set1_pd(-0.);
2841 res.
data = _mm256_andnot_pd(mask,
data);
2872 template <
typename Number2, std::
size_t w
idth2>
2875 template <
typename Number2, std::
size_t w
idth2>
2878 template <
typename Number2, std::
size_t w
idth2>
2882 template <
typename Number2, std::
size_t w
idth2>
2897 const unsigned int *offsets,
2900 const unsigned int n_chunks = n_entries / 4;
2901 const double *in0 = in + offsets[0];
2902 const double *in1 = in + offsets[1];
2903 const double *in2 = in + offsets[2];
2904 const double *in3 = in + offsets[3];
2906 for (
unsigned int i = 0; i < n_chunks; ++i)
2908 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2909 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2910 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2911 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2912 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2913 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2914 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2915 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2916 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2917 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2918 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2919 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2923 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2924 out[i].
gather(in + i, offsets);
2935 const std::array<double *, 4> &in,
2940 const unsigned int n_chunks = n_entries / 4;
2941 const double *in0 = in[0];
2942 const double *in1 = in[1];
2943 const double *in2 = in[2];
2944 const double *in3 = in[3];
2946 for (
unsigned int i = 0; i < n_chunks; ++i)
2948 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2949 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2950 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2951 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2952 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2953 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2954 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2955 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2956 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2957 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2958 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2959 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2962 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2974 const unsigned int n_entries,
2976 const unsigned int *offsets,
2979 const unsigned int n_chunks = n_entries / 4;
2980 double *out0 = out + offsets[0];
2981 double *out1 = out + offsets[1];
2982 double *out2 = out + offsets[2];
2983 double *out3 = out + offsets[3];
2984 for (
unsigned int i = 0; i < n_chunks; ++i)
2986 __m256d u0 = in[4 * i + 0].
data;
2987 __m256d u1 = in[4 * i + 1].
data;
2988 __m256d u2 = in[4 * i + 2].
data;
2989 __m256d u3 = in[4 * i + 3].
data;
2990 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2991 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2992 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2993 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2994 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2995 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2996 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2997 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3004 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3005 _mm256_storeu_pd(out0 + 4 * i, res0);
3006 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3007 _mm256_storeu_pd(out1 + 4 * i, res1);
3008 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3009 _mm256_storeu_pd(out2 + 4 * i, res2);
3010 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3011 _mm256_storeu_pd(out3 + 4 * i, res3);
3015 _mm256_storeu_pd(out0 + 4 * i, res0);
3016 _mm256_storeu_pd(out1 + 4 * i, res1);
3017 _mm256_storeu_pd(out2 + 4 * i, res2);
3018 _mm256_storeu_pd(out3 + 4 * i, res3);
3024 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3025 for (
unsigned int v = 0; v < 4; ++v)
3026 out[offsets[v] + i] += in[i][v];
3028 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3029 for (
unsigned int v = 0; v < 4; ++v)
3030 out[offsets[v] + i] = in[i][v];
3041 const unsigned int n_entries,
3043 std::array<double *, 4> &out)
3047 const unsigned int n_chunks = n_entries / 4;
3048 double *out0 = out[0];
3049 double *out1 = out[1];
3050 double *out2 = out[2];
3051 double *out3 = out[3];
3052 for (
unsigned int i = 0; i < n_chunks; ++i)
3054 __m256d u0 = in[4 * i + 0].
data;
3055 __m256d u1 = in[4 * i + 1].
data;
3056 __m256d u2 = in[4 * i + 2].
data;
3057 __m256d u3 = in[4 * i + 3].
data;
3058 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
3059 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
3060 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
3061 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
3062 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
3063 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
3064 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
3065 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
3072 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
3073 _mm256_storeu_pd(out0 + 4 * i, res0);
3074 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
3075 _mm256_storeu_pd(out1 + 4 * i, res1);
3076 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
3077 _mm256_storeu_pd(out2 + 4 * i, res2);
3078 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
3079 _mm256_storeu_pd(out3 + 4 * i, res3);
3083 _mm256_storeu_pd(out0 + 4 * i, res0);
3084 _mm256_storeu_pd(out1 + 4 * i, res1);
3085 _mm256_storeu_pd(out2 + 4 * i, res2);
3086 _mm256_storeu_pd(out3 + 4 * i, res3);
3092 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3093 for (
unsigned int v = 0; v < 4; ++v)
3094 out[v][i] += in[i][v];
3096 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3097 for (
unsigned int v = 0; v < 4; ++v)
3098 out[v][i] = in[i][v];
3133 template <
typename U>
3145 data = _mm256_set1_ps(x);
3155 operator=(
const float scalar) && =
delete;
3165 return *(
reinterpret_cast<float *
>(&
data) + comp);
3176 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3191 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3206 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3220 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3235 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3250 load(
const float *ptr)
3252 data = _mm256_loadu_ps(ptr);
3263 store(
float *ptr)
const
3265 _mm256_storeu_ps(ptr,
data);
3276 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
3278 _mm256_stream_ps(ptr,
data);
3295 gather(
const float *base_ptr,
const unsigned int *offsets)
3297 # if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
3301 const __m256 index_val =
3302 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3303 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3308 __m256
zero = _mm256_setzero_ps();
3311 data = _mm256_mask_i32gather_ps(
zero, base_ptr, index, mask, 4);
3313 for (
unsigned int i = 0; i < 8; ++i)
3314 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
3332 scatter(
const unsigned int *offsets,
float *base_ptr)
const
3335 for (
unsigned int i = 0; i < 8; ++i)
3336 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3347 t1.
data = _mm_add_ps(this->get_lower(), this->get_upper());
3366 return _mm256_castps256_ps128(
data);
3376 return _mm256_extractf128_ps(
data, 1);
3403 __m256
mask = _mm256_set1_ps(-0.f);
3405 res.
data = _mm256_andnot_ps(mask,
data);
3436 template <
typename Number2, std::
size_t w
idth2>
3439 template <
typename Number2, std::
size_t w
idth2>
3442 template <
typename Number2, std::
size_t w
idth2>
3446 template <
typename Number2, std::
size_t w
idth2>
3461 const unsigned int *offsets,
3464 const unsigned int n_chunks = n_entries / 4;
3465 for (
unsigned int i = 0; i < n_chunks; ++i)
3469 __m256 t0, t1, t2, t3 = {};
3470 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3471 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3472 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3473 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3474 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3475 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3476 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3477 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3479 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3480 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3481 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3482 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3483 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3484 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3485 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3486 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3490 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3491 out[i].
gather(in + i, offsets);
3502 const std::array<float *, 8> &in,
3507 const unsigned int n_chunks = n_entries / 4;
3508 for (
unsigned int i = 0; i < n_chunks; ++i)
3510 __m256 t0, t1, t2, t3 = {};
3511 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3512 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3513 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3514 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3515 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3516 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3517 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3518 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3520 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3521 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3522 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3523 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3524 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3525 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3526 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3527 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3530 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3542 const unsigned int n_entries,
3544 const unsigned int *offsets,
3547 const unsigned int n_chunks = n_entries / 4;
3548 for (
unsigned int i = 0; i < n_chunks; ++i)
3550 __m256 u0 = in[4 * i + 0].
data;
3551 __m256 u1 = in[4 * i + 1].
data;
3552 __m256 u2 = in[4 * i + 2].
data;
3553 __m256 u3 = in[4 * i + 3].
data;
3554 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3555 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3556 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3557 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3558 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3559 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3560 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3561 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3562 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3563 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3564 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3565 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3566 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3567 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3568 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3569 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3576 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3577 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3578 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3579 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3580 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3581 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3582 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3583 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3584 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3585 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3586 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3587 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3588 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3589 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3590 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3591 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3595 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3596 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3597 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3598 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3599 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3600 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3601 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3602 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3608 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3609 for (
unsigned int v = 0; v < 8; ++v)
3610 out[offsets[v] + i] += in[i][v];
3612 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3613 for (
unsigned int v = 0; v < 8; ++v)
3614 out[offsets[v] + i] = in[i][v];
3625 const unsigned int n_entries,
3627 std::array<float *, 8> &out)
3631 const unsigned int n_chunks = n_entries / 4;
3632 for (
unsigned int i = 0; i < n_chunks; ++i)
3634 __m256 u0 = in[4 * i + 0].
data;
3635 __m256 u1 = in[4 * i + 1].
data;
3636 __m256 u2 = in[4 * i + 2].
data;
3637 __m256 u3 = in[4 * i + 3].
data;
3638 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3639 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3640 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3641 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3642 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3643 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3644 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3645 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3646 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3647 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3648 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3649 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3650 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3651 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3652 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3653 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3657 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3658 _mm_storeu_ps(out[0] + 4 * i, res0);
3659 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3660 _mm_storeu_ps(out[1] + 4 * i, res1);
3661 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3662 _mm_storeu_ps(out[2] + 4 * i, res2);
3663 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3664 _mm_storeu_ps(out[3] + 4 * i, res3);
3665 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3666 _mm_storeu_ps(out[4] + 4 * i, res4);
3667 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3668 _mm_storeu_ps(out[5] + 4 * i, res5);
3669 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3670 _mm_storeu_ps(out[6] + 4 * i, res6);
3671 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3672 _mm_storeu_ps(out[7] + 4 * i, res7);
3676 _mm_storeu_ps(out[0] + 4 * i, res0);
3677 _mm_storeu_ps(out[1] + 4 * i, res1);
3678 _mm_storeu_ps(out[2] + 4 * i, res2);
3679 _mm_storeu_ps(out[3] + 4 * i, res3);
3680 _mm_storeu_ps(out[4] + 4 * i, res4);
3681 _mm_storeu_ps(out[5] + 4 * i, res5);
3682 _mm_storeu_ps(out[6] + 4 * i, res6);
3683 _mm_storeu_ps(out[7] + 4 * i, res7);
3688 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3689 for (
unsigned int v = 0; v < 8; ++v)
3690 out[v][i] += in[i][v];
3692 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3693 for (
unsigned int v = 0; v < 8; ++v)
3694 out[v][i] = in[i][v];
3702 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
3734 template <
typename U>
3746 data = _mm512_set1_pd(x);
3757 operator=(
const double scalar) && =
delete;
3767 return *(
reinterpret_cast<double *
>(&
data) + comp);
3778 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3793 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3808 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3822 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3837 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3852 load(
const double *ptr)
3854 data = _mm512_loadu_pd(ptr);
3859 load(
const float *ptr)
3861 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
3872 store(
double *ptr)
const
3874 _mm512_storeu_pd(ptr,
data);
3879 store(
float *ptr)
const
3881 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(
data));
3892 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
3894 _mm512_stream_pd(ptr,
data);
3911 gather(
const double *base_ptr,
const unsigned int *offsets)
3913 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
3917 const __m256 index_val =
3918 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3919 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3925 __mmask8
mask = 0xFF;
3927 data = _mm512_mask_i32gather_pd(
zero, mask, index, base_ptr, 8);
3929 for (
unsigned int i = 0; i < 8; ++i)
3930 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
3948 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3950 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
3951 for (
unsigned int i = 0; i < 8; ++i)
3952 for (
unsigned int j = i + 1; j < 8; ++j)
3953 Assert(offsets[i] != offsets[j],
3954 ExcMessage(
"Result of scatter undefined if two offset elements"
3955 " point to the same position"));
3960 const __m256 index_val =
3961 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
3962 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
3963 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
3965 for (
unsigned int i = 0; i < 8; ++i)
3966 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
3978 t1.
data = _mm256_add_pd(this->get_lower(), this->get_upper());
3997 return _mm512_castpd512_pd256(
data);
4007 return _mm512_extractf64x4_pd(
data, 1);
4036 __m512d
mask = _mm512_set1_pd(-0.);
4038 res.
data =
reinterpret_cast<__m512d
>(
4039 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
4040 reinterpret_cast<__m512i
>(
data)));
4071 template <
typename Number2, std::
size_t w
idth2>
4074 template <
typename Number2, std::
size_t w
idth2>
4077 template <
typename Number2, std::
size_t w
idth2>
4081 template <
typename Number2, std::
size_t w
idth2>
4096 const unsigned int *offsets,
4104 const unsigned int n_chunks = n_entries / 4;
4105 for (
unsigned int i = 0; i < n_chunks; ++i)
4107 __m512d t0, t1, t2, t3 = {};
4109 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
4110 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
4111 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
4112 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
4113 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
4114 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
4115 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
4116 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
4118 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4119 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4120 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4121 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4122 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4123 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4124 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4125 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4128 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4129 out[i].
gather(in + i, offsets);
4140 const std::array<double *, 8> &in,
4143 const unsigned int n_chunks = n_entries / 4;
4144 for (
unsigned int i = 0; i < n_chunks; ++i)
4146 __m512d t0, t1, t2, t3 = {};
4148 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
4149 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
4150 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
4151 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
4152 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
4153 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
4154 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
4155 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
4157 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
4158 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
4159 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
4160 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
4161 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
4162 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
4163 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
4164 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
4167 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4179 const unsigned int n_entries,
4181 const unsigned int *offsets,
4186 const unsigned int n_chunks = n_entries / 4;
4187 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4188 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4189 for (
unsigned int i = 0; i < n_chunks; ++i)
4191 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4192 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4193 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4194 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4195 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4196 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4197 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4198 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4199 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4200 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4201 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4202 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4203 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4204 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4205 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4206 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4213 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
4214 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4215 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
4216 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4217 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
4218 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4219 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
4220 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4221 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
4222 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4223 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
4224 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4225 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
4226 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4227 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
4228 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4232 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
4233 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
4234 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
4235 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
4236 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
4237 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
4238 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
4239 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
4245 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4246 for (
unsigned int v = 0; v < 8; ++v)
4247 out[offsets[v] + i] += in[i][v];
4249 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4250 for (
unsigned int v = 0; v < 8; ++v)
4251 out[offsets[v] + i] = in[i][v];
4262 const unsigned int n_entries,
4264 std::array<double *, 8> &out)
4268 const unsigned int n_chunks = n_entries / 4;
4269 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
4270 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
4271 for (
unsigned int i = 0; i < n_chunks; ++i)
4273 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
4274 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
4275 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4276 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
4277 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
4278 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
4279 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
4280 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
4281 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
4282 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
4283 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
4284 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
4285 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
4286 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
4287 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
4288 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
4292 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
4293 _mm256_storeu_pd(out[0] + 4 * i, res0);
4294 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
4295 _mm256_storeu_pd(out[1] + 4 * i, res1);
4296 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
4297 _mm256_storeu_pd(out[2] + 4 * i, res2);
4298 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
4299 _mm256_storeu_pd(out[3] + 4 * i, res3);
4300 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
4301 _mm256_storeu_pd(out[4] + 4 * i, res4);
4302 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
4303 _mm256_storeu_pd(out[5] + 4 * i, res5);
4304 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
4305 _mm256_storeu_pd(out[6] + 4 * i, res6);
4306 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
4307 _mm256_storeu_pd(out[7] + 4 * i, res7);
4311 _mm256_storeu_pd(out[0] + 4 * i, res0);
4312 _mm256_storeu_pd(out[1] + 4 * i, res1);
4313 _mm256_storeu_pd(out[2] + 4 * i, res2);
4314 _mm256_storeu_pd(out[3] + 4 * i, res3);
4315 _mm256_storeu_pd(out[4] + 4 * i, res4);
4316 _mm256_storeu_pd(out[5] + 4 * i, res5);
4317 _mm256_storeu_pd(out[6] + 4 * i, res6);
4318 _mm256_storeu_pd(out[7] + 4 * i, res7);
4323 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4324 for (
unsigned int v = 0; v < 8; ++v)
4325 out[v][i] += in[i][v];
4327 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4328 for (
unsigned int v = 0; v < 8; ++v)
4329 out[v][i] = in[i][v];
4364 template <
typename U>
4376 data = _mm512_set1_ps(x);
4386 operator=(
const float scalar) && =
delete;
4396 return *(
reinterpret_cast<float *
>(&
data) + comp);
4407 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4422 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4437 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4451 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4466 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
4481 load(
const float *ptr)
4483 data = _mm512_loadu_ps(ptr);
4494 store(
float *ptr)
const
4496 _mm512_storeu_ps(ptr,
data);
4507 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
4509 _mm512_stream_ps(ptr,
data);
4526 gather(
const float *base_ptr,
const unsigned int *offsets)
4528 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
4532 const __m512 index_val =
4533 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4534 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4540 __mmask16
mask = 0xFFFF;
4542 data = _mm512_mask_i32gather_ps(
zero, mask, index, base_ptr, 4);
4544 for (
unsigned int i = 0; i < 16; ++i)
4545 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4563 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4565 # ifdef DEAL_II_USE_VECTORIZATION_GATHER
4566 for (
unsigned int i = 0; i < 16; ++i)
4567 for (
unsigned int j = i + 1; j < 16; ++j)
4568 Assert(offsets[i] != offsets[j],
4569 ExcMessage(
"Result of scatter undefined if two offset elements"
4570 " point to the same position"));
4575 const __m512 index_val =
4576 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
4577 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
4578 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
4580 for (
unsigned int i = 0; i < 16; ++i)
4581 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4593 t1.
data = _mm256_add_ps(this->get_lower(), this->get_upper());
4612 return _mm512_castps512_ps256(
data);
4622 return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(
data), 1));
4651 __m512
mask = _mm512_set1_ps(-0.f);
4653 res.
data =
reinterpret_cast<__m512
>(
4654 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
4655 reinterpret_cast<__m512i
>(
data)));
4686 template <
typename Number2, std::
size_t w
idth2>
4689 template <
typename Number2, std::
size_t w
idth2>
4692 template <
typename Number2, std::
size_t w
idth2>
4696 template <
typename Number2, std::
size_t w
idth2>
4711 const unsigned int *offsets,
4718 const unsigned int n_chunks = n_entries / 4;
4726 __m512 t0, t1, t2, t3;
4729 for (
unsigned int i = 0; i < n_chunks; ++i)
4731 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
4732 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
4733 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
4734 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
4735 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
4736 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
4737 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
4738 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
4739 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
4740 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
4741 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
4742 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
4743 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
4744 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
4745 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
4746 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
4748 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4749 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4750 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4751 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4753 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4754 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4755 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4756 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4760 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4761 out[i].
gather(in + i, offsets);
4772 const std::array<float *, 16> &in,
4777 const unsigned int n_chunks = n_entries / 4;
4779 __m512 t0, t1, t2, t3;
4782 for (
unsigned int i = 0; i < n_chunks; ++i)
4784 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
4785 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
4786 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
4787 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
4788 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
4789 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
4790 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
4791 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
4792 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
4793 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
4794 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
4795 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
4796 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
4797 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
4798 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
4799 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
4801 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
4802 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
4803 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
4804 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
4806 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
4807 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
4808 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
4809 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
4812 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4824 const unsigned int n_entries,
4826 const unsigned int *offsets,
4829 const unsigned int n_chunks = n_entries / 4;
4830 for (
unsigned int i = 0; i < n_chunks; ++i)
4832 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4833 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4835 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4837 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4838 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4839 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4840 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4841 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4843 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4844 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4845 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4846 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4847 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4848 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4849 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4850 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4851 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4852 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4853 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4854 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4855 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4856 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4857 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4858 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4865 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
4866 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4867 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
4868 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4869 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
4870 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4871 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
4872 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4873 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
4874 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4875 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
4876 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4877 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
4878 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4879 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
4880 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4881 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
4882 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4883 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
4884 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4885 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
4886 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4887 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
4888 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4889 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
4890 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4891 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
4892 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4893 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
4894 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4895 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
4896 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4900 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
4901 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
4902 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
4903 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
4904 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
4905 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
4906 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
4907 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
4908 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
4909 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
4910 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
4911 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
4912 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
4913 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
4914 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
4915 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
4921 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4922 for (
unsigned int v = 0; v < 16; ++v)
4923 out[offsets[v] + i] += in[i][v];
4925 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4926 for (
unsigned int v = 0; v < 16; ++v)
4927 out[offsets[v] + i] = in[i][v];
4938 const unsigned int n_entries,
4940 std::array<float *, 16> &out)
4944 const unsigned int n_chunks = n_entries / 4;
4945 for (
unsigned int i = 0; i < n_chunks; ++i)
4947 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
4948 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
4950 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
4952 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
4953 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
4954 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
4955 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
4956 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
4958 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
4959 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
4960 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
4961 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
4962 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
4963 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
4964 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
4965 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
4966 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
4967 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
4968 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
4969 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
4970 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
4971 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
4972 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
4973 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
4977 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
4978 _mm_storeu_ps(out[0] + 4 * i, res0);
4979 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
4980 _mm_storeu_ps(out[1] + 4 * i, res1);
4981 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
4982 _mm_storeu_ps(out[2] + 4 * i, res2);
4983 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
4984 _mm_storeu_ps(out[3] + 4 * i, res3);
4985 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
4986 _mm_storeu_ps(out[4] + 4 * i, res4);
4987 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
4988 _mm_storeu_ps(out[5] + 4 * i, res5);
4989 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
4990 _mm_storeu_ps(out[6] + 4 * i, res6);
4991 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
4992 _mm_storeu_ps(out[7] + 4 * i, res7);
4993 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
4994 _mm_storeu_ps(out[8] + 4 * i, res8);
4995 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
4996 _mm_storeu_ps(out[9] + 4 * i, res9);
4997 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
4998 _mm_storeu_ps(out[10] + 4 * i, res10);
4999 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
5000 _mm_storeu_ps(out[11] + 4 * i, res11);
5001 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
5002 _mm_storeu_ps(out[12] + 4 * i, res12);
5003 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
5004 _mm_storeu_ps(out[13] + 4 * i, res13);
5005 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
5006 _mm_storeu_ps(out[14] + 4 * i, res14);
5007 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
5008 _mm_storeu_ps(out[15] + 4 * i, res15);
5012 _mm_storeu_ps(out[0] + 4 * i, res0);
5013 _mm_storeu_ps(out[1] + 4 * i, res1);
5014 _mm_storeu_ps(out[2] + 4 * i, res2);
5015 _mm_storeu_ps(out[3] + 4 * i, res3);
5016 _mm_storeu_ps(out[4] + 4 * i, res4);
5017 _mm_storeu_ps(out[5] + 4 * i, res5);
5018 _mm_storeu_ps(out[6] + 4 * i, res6);
5019 _mm_storeu_ps(out[7] + 4 * i, res7);
5020 _mm_storeu_ps(out[8] + 4 * i, res8);
5021 _mm_storeu_ps(out[9] + 4 * i, res9);
5022 _mm_storeu_ps(out[10] + 4 * i, res10);
5023 _mm_storeu_ps(out[11] + 4 * i, res11);
5024 _mm_storeu_ps(out[12] + 4 * i, res12);
5025 _mm_storeu_ps(out[13] + 4 * i, res13);
5026 _mm_storeu_ps(out[14] + 4 * i, res14);
5027 _mm_storeu_ps(out[15] + 4 * i, res15);
5032 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5033 for (
unsigned int v = 0; v < 16; ++v)
5034 out[v][i] += in[i][v];
5036 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
5037 for (
unsigned int v = 0; v < 16; ++v)
5038 out[v][i] = in[i][v];
5043 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
5073 template <
typename U>
5085 data = vec_splats(x);
5100 operator=(
const double scalar) && =
delete;
5110 return *(
reinterpret_cast<double *
>(&
data) + comp);
5121 return *(
reinterpret_cast<const double *
>(&
data) + comp);
5174 load(
const double *ptr)
5176 data = vec_vsx_ld(0, ptr);
5185 store(
double *ptr)
const
5187 vec_vsx_st(
data, 0, ptr);
5205 gather(
const double *base_ptr,
const unsigned int *offsets)
5207 for (
unsigned int i = 0; i < 2; ++i)
5208 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
5216 scatter(
const unsigned int *offsets,
double *base_ptr)
const
5218 for (
unsigned int i = 0; i < 2; ++i)
5219 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
5227 __vector
double data;
5283 template <
typename Number2, std::
size_t w
idth2>
5286 template <
typename Number2, std::
size_t w
idth2>
5289 template <
typename Number2, std::
size_t w
idth2>
5293 template <
typename Number2, std::
size_t w
idth2>
5328 template <
typename U>
5340 data = vec_splats(x);
5355 operator=(
const float scalar) && =
delete;
5365 return *(
reinterpret_cast<float *
>(&
data) + comp);
5376 return *(
reinterpret_cast<const float *
>(&
data) + comp);
5429 load(
const float *ptr)
5431 data = vec_vsx_ld(0, ptr);
5440 store(
float *ptr)
const
5442 vec_vsx_st(
data, 0, ptr);
5460 gather(
const float *base_ptr,
const unsigned int *offsets)
5462 for (
unsigned int i = 0; i < 4; ++i)
5463 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
5471 scatter(
const unsigned int *offsets,
float *base_ptr)
const
5473 for (
unsigned int i = 0; i < 4; ++i)
5474 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
5482 __vector
float data;
5538 template <
typename Number2, std::
size_t w
idth2>
5541 template <
typename Number2, std::
size_t w
idth2>
5544 template <
typename Number2, std::
size_t w
idth2>
5548 template <
typename Number2, std::
size_t w
idth2>
5572 template <
typename Number, std::
size_t w
idth>
5577 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
5578 if (lhs[i] != rhs[i])
5590 template <
typename Number, std::
size_t w
idth>
5604 template <
typename Number, std::
size_t w
idth>
5618 template <
typename Number, std::
size_t w
idth>
5632 template <
typename Number, std::
size_t w
idth>
5647 template <
typename Number, std::
size_t w
idth>
5663 template <std::
size_t w
idth>
5677 template <
typename Number, std::
size_t w
idth>
5692 template <std::
size_t w
idth>
5705 template <
typename Number, std::
size_t w
idth>
5721 template <std::
size_t w
idth>
5735 template <
typename Number, std::
size_t w
idth>
5751 template <std::
size_t w
idth>
5765 template <
typename Number, std::
size_t w
idth>
5781 template <std::
size_t w
idth>
5795 template <
typename Number, std::
size_t w
idth>
5810 template <std::
size_t w
idth>
5823 template <
typename Number, std::
size_t w
idth>
5839 template <std::
size_t w
idth>
5853 template <
typename Number, std::
size_t w
idth>
5869 template <std::
size_t w
idth>
5882 template <
typename Number, std::
size_t w
idth>
5894 template <
typename Number, std::
size_t w
idth>
5908 template <
typename Number, std::
size_t w
idth>
5909 inline std::ostream &
5913 for (
unsigned int i = 0; i < n - 1; ++i)
5936 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6017 template <SIMDComparison predicate,
typename Number>
6020 const Number &right,
6021 const Number &true_value,
6022 const Number &false_value)
6028 mask = (left == right);
6031 mask = (left != right);
6034 mask = (left < right);
6037 mask = (left <= right);
6040 mask = (left > right);
6043 mask = (left >= right);
6047 return mask ? true_value : false_value;
6055 template <SIMDComparison predicate,
typename Number>
6063 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
6073 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
6075 template <SIMDComparison predicate>
6082 const __mmask16
mask =
6083 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
6085 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
6091 template <SIMDComparison predicate>
6098 const __mmask16
mask =
6099 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
6101 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
6107 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
6109 template <SIMDComparison predicate>
6117 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
6120 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
6125 template <SIMDComparison predicate>
6133 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
6136 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
6142 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
6144 template <SIMDComparison predicate>
6175 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
6176 _mm_andnot_ps(mask, false_values.
data));
6182 template <SIMDComparison predicate>
6213 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
6214 _mm_andnot_pd(mask, false_values.
data));
6221 # if defined(DEAL_II_HAVE_ARM_NEON) && defined(__ARM_NEON)
6223 template <SIMDComparison predicate>
6254 result.
data = vreinterpretq_f32_u32(vorrq_u32(
6255 vandq_u32(mask, vreinterpretq_u32_f32(true_values.
data)),
6256 vandq_u32(vmvnq_u32(mask), vreinterpretq_u32_f32(false_values.
data))));
6262 template <SIMDComparison predicate>
6276 mask = vreinterpretq_u64_u32(
6277 vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(left.
data, right.
data))));
6294 result.
data = vreinterpretq_f64_u64(vorrq_u64(
6295 vandq_u64(mask, vreinterpretq_u64_f64(true_values.
data)),
6296 vandq_u64(vreinterpretq_u64_u32(vmvnq_u32(vreinterpretq_u32_u64(mask))),
6297 vreinterpretq_u64_f64(false_values.
data))));
6308 template <
typename T>
6319 static constexpr std::size_t
6336 static constexpr std::size_t
6390 template <
typename T, std::
size_t w
idth_>
6401 static constexpr std::size_t
6419 static constexpr std::size_t
6492 template <
typename Number, std::
size_t w
idth>
6493 inline ::VectorizedArray<Number, width>
6494 sin(const ::VectorizedArray<Number, width> &x)
6502 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6504 values[i] = std::sin(x[i]);
6519 template <
typename Number, std::
size_t w
idth>
6520 inline ::VectorizedArray<Number, width>
6521 cos(const ::VectorizedArray<Number, width> &x)
6524 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6526 values[i] = std::cos(x[i]);
6541 template <
typename Number, std::
size_t w
idth>
6542 inline ::VectorizedArray<Number, width>
6543 tan(const ::VectorizedArray<Number, width> &x)
6546 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6548 values[i] = std::tan(x[i]);
6563 template <
typename Number, std::
size_t w
idth>
6564 inline ::VectorizedArray<Number, width>
6565 exp(const ::VectorizedArray<Number, width> &x)
6568 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6570 values[i] = std::exp(x[i]);
6585 template <
typename Number, std::
size_t w
idth>
6586 inline ::VectorizedArray<Number, width>
6587 log(const ::VectorizedArray<Number, width> &x)
6590 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6592 values[i] = std::log(x[i]);
6607 template <
typename Number, std::
size_t w
idth>
6608 inline ::VectorizedArray<Number, width>
6609 sqrt(const ::VectorizedArray<Number, width> &x)
6611 return x.get_sqrt();
6623 template <
typename Number, std::
size_t w
idth>
6624 inline ::VectorizedArray<Number, width>
6625 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
6628 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6630 values[i] = std::pow(x[i], p);
6646 template <
typename Number, std::
size_t w
idth>
6647 inline ::VectorizedArray<Number, width>
6648 pow(const ::VectorizedArray<Number, width> &x,
6649 const ::VectorizedArray<Number, width> &p)
6652 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
6654 values[i] = std::pow(x[i], p[i]);
6669 template <
typename Number, std::
size_t w
idth>
6670 inline ::VectorizedArray<Number, width>
6671 abs(const ::VectorizedArray<Number, width> &x)
6685 template <
typename Number, std::
size_t w
idth>
6686 inline ::VectorizedArray<Number, width>
6687 max(const ::VectorizedArray<Number, width> &x,
6688 const ::VectorizedArray<Number, width> &y)
6690 return x.get_max(y);
6702 template <
typename Number, std::
size_t w
idth>
6703 inline ::VectorizedArray<Number, width>
6704 min(const ::VectorizedArray<Number, width> &x,
6705 const ::VectorizedArray<Number, width> &y)
6707 return x.get_min(y);
6718 #ifdef DEAL_II_HAVE_CXX20
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
std::enable_if_t<!std::is_same_v< U, const U >, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static vectorized_value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(vectorized_value_type &values, unsigned int c)
static constexpr std::size_t stride()
static const value_type & get(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static const vectorized_value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static constexpr std::size_t width()
static constexpr std::size_t stride()
VectorizedArray< T > vectorized_value_type
static value_type & get_from_vectorized(vectorized_value_type &values, unsigned int c)
static value_type & get(value_type &value, unsigned int c)
static const value_type & get_from_vectorized(const vectorized_value_type &values, unsigned int c)
static const value_type & get(const value_type &value, unsigned int c)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)