17 #ifndef dealii_vectorization_h
18 #define dealii_vectorization_h
44 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0
53 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && !defined(__AVX__)
55 "Mismatch in vectorization capabilities: AVX was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
57 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && !defined(__AVX512F__)
59 "Mismatch in vectorization capabilities: AVX-512F was detected during configuration of deal.II and switched on, but it is apparently not available for the file you are trying to compile at the moment. Check compilation flags controlling the instruction set, such as -march=native."
64 # elif defined(__ALTIVEC__)
73 # include <x86intrin.h>
85 template <
typename Number, std::
size_t w
idth>
119 "You are trying to compare iterators into different arrays."));
131 "You are trying to compare iterators into different arrays."));
145 const typename T::value_type &
157 template <
typename U = T>
158 std::enable_if_t<!std::is_same<U, const U>::value,
typename T::value_type> &
201 "You can't decrement an iterator that is already at the beginning of the range."));
222 return static_cast<std::ptrdiff_t
>(
lane) -
223 static_cast<ptrdiff_t
>(other.
lane);
249 template <
typename T, std::
size_t w
idth>
261 template <
typename U>
264 auto i0 = this->
begin();
265 auto i1 = list.begin();
267 for (; i1 != list.end(); ++i0, ++i1)
272 "Initializer list exceeds size of this VectorizedArray object."));
277 for (; i0 != this->
end(); ++i0)
286 static constexpr std::size_t
418 template <
typename Number, std::
size_t w
idth>
428 static_assert(width == 1,
429 "You specified an illegal width that is not supported.");
448 template <
typename U>
548 template <
typename OtherNumber>
561 template <
typename OtherNumber>
635 gather(
const Number *base_ptr,
const unsigned int *offsets)
637 data = base_ptr[offsets[0]];
654 scatter(
const unsigned int *offsets, Number *base_ptr)
const
656 base_ptr[offsets[0]] =
data;
720 template <
typename Number2, std::
size_t w
idth2>
723 template <
typename Number2, std::
size_t w
idth2>
726 template <
typename Number2, std::
size_t w
idth2>
730 template <
typename Number2, std::
size_t w
idth2>
749 template <
typename Number,
767 template <
typename VectorizedArrayType>
772 std::is_same<VectorizedArrayType,
774 VectorizedArrayType::size()>>::value,
775 "VectorizedArrayType is not a VectorizedArray.");
777 VectorizedArrayType result = u;
794 template <
typename Number, std::
size_t w
idth>
797 const std::array<Number *, width> &ptrs,
798 const unsigned int offset)
800 for (
unsigned int v = 0; v < width; ++v)
801 out.
data[v] = ptrs[v][offset];
831 template <
typename Number, std::
size_t w
idth>
835 const unsigned int * offsets,
838 for (
unsigned int i = 0; i < n_entries; ++i)
839 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
840 out[i][v] = in[offsets[v] + i];
855 template <
typename Number, std::
size_t w
idth>
858 const std::array<Number *, width> &in,
861 for (
unsigned int i = 0; i < n_entries; ++i)
862 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
863 out[i][v] = in[v][i];
906 template <
typename Number, std::
size_t w
idth>
909 const unsigned int n_entries,
911 const unsigned int * offsets,
915 for (
unsigned int i = 0; i < n_entries; ++i)
916 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
917 out[offsets[v] + i] += in[i][v];
919 for (
unsigned int i = 0; i < n_entries; ++i)
920 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
921 out[offsets[v] + i] = in[i][v];
936 template <
typename Number, std::
size_t w
idth>
939 const unsigned int n_entries,
941 std::array<Number *, width> & out)
944 for (
unsigned int i = 0; i < n_entries; ++i)
945 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
946 out[v][i] += in[i][v];
948 for (
unsigned int i = 0; i < n_entries; ++i)
949 for (
unsigned int v = 0; v < VectorizedArray<Number, width>::size(); ++v)
950 out[v][i] = in[i][v];
961 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
993 template <
typename U>
1005 data = _mm512_set1_pd(x);
1016 operator=(
const double scalar) && =
delete;
1026 return *(
reinterpret_cast<double *
>(&
data) + comp);
1037 return *(
reinterpret_cast<const double *
>(&
data) + comp);
1052 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1067 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1081 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1096 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1111 load(
const double *ptr)
1113 data = _mm512_loadu_pd(ptr);
1118 load(
const float *ptr)
1120 data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
1131 store(
double *ptr)
const
1133 _mm512_storeu_pd(ptr,
data);
1138 store(
float *ptr)
const
1140 _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(
data));
1151 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1153 _mm512_stream_pd(ptr,
data);
1170 gather(
const double *base_ptr,
const unsigned int *offsets)
1175 const __m256 index_val =
1176 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1177 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
1183 __mmask8
mask = 0xFF;
1185 data = _mm512_mask_i32gather_pd(
zero, mask, index, base_ptr, 8);
1202 scatter(
const unsigned int *offsets,
double *base_ptr)
const
1204 for (
unsigned int i = 0; i < 8; ++i)
1205 for (
unsigned int j = i + 1; j < 8; ++j)
1206 Assert(offsets[i] != offsets[j],
1207 ExcMessage(
"Result of scatter undefined if two offset elements"
1208 " point to the same position"));
1213 const __m256 index_val =
1214 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1215 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
1216 _mm512_i32scatter_pd(base_ptr, index,
data, 8);
1253 __m512d
mask = _mm512_set1_pd(-0.);
1255 res.
data =
reinterpret_cast<__m512d
>(
1256 _mm512_andnot_epi64(
reinterpret_cast<__m512i
>(mask),
1257 reinterpret_cast<__m512i
>(
data)));
1288 template <
typename Number2, std::
size_t w
idth2>
1291 template <
typename Number2, std::
size_t w
idth2>
1294 template <
typename Number2, std::
size_t w
idth2>
1298 template <
typename Number2, std::
size_t w
idth2>
1313 const unsigned int * offsets,
1321 const unsigned int n_chunks = n_entries / 4;
1322 for (
unsigned int i = 0; i < n_chunks; ++i)
1324 __m512d t0, t1, t2, t3 = {};
1326 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
1327 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
1328 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
1329 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
1330 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
1331 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
1332 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
1333 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
1335 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1336 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1337 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1338 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1339 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1340 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1341 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1342 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1345 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1346 out[i].
gather(in + i, offsets);
1357 const std::array<double *, 8> &in,
1360 const unsigned int n_chunks = n_entries / 4;
1361 for (
unsigned int i = 0; i < n_chunks; ++i)
1363 __m512d t0, t1, t2, t3 = {};
1365 t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
1366 t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
1367 t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
1368 t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
1369 t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
1370 t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
1371 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
1372 t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
1374 __m512d
v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
1375 __m512d
v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
1376 __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
1377 __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
1378 out[4 * i + 0].
data = _mm512_unpacklo_pd(
v0, v2);
1379 out[4 * i + 1].
data = _mm512_unpackhi_pd(
v0, v2);
1380 out[4 * i + 2].
data = _mm512_unpacklo_pd(
v1, v3);
1381 out[4 * i + 3].
data = _mm512_unpackhi_pd(
v1, v3);
1384 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1396 const unsigned int n_entries,
1398 const unsigned int * offsets,
1403 const unsigned int n_chunks = n_entries / 4;
1404 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1405 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1406 for (
unsigned int i = 0; i < n_chunks; ++i)
1408 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1409 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1410 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1411 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1412 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1413 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1414 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1415 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1416 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1417 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1418 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1419 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1420 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1421 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1422 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1423 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1430 res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
1431 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1432 res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
1433 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1434 res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
1435 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1436 res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
1437 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1438 res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
1439 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1440 res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
1441 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1442 res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
1443 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1444 res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
1445 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1449 _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
1450 _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
1451 _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
1452 _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
1453 _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
1454 _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
1455 _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
1456 _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
1462 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1463 for (
unsigned int v = 0; v < 8; ++v)
1464 out[offsets[v] + i] += in[i][v];
1466 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1467 for (
unsigned int v = 0; v < 8; ++v)
1468 out[offsets[v] + i] = in[i][v];
1479 const unsigned int n_entries,
1481 std::array<double *, 8> & out)
1485 const unsigned int n_chunks = n_entries / 4;
1486 __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
1487 __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
1488 for (
unsigned int i = 0; i < n_chunks; ++i)
1490 __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
1491 __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
1492 __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1493 __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
1494 __m512d
v0 = _mm512_permutex2var_pd(t0, mask1, t2);
1495 __m512d
v1 = _mm512_permutex2var_pd(t0, mask2, t2);
1496 __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
1497 __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
1498 __m256d res0 = _mm512_extractf64x4_pd(
v0, 0);
1499 __m256d res4 = _mm512_extractf64x4_pd(
v0, 1);
1500 __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
1501 __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
1502 __m256d res2 = _mm512_extractf64x4_pd(
v1, 0);
1503 __m256d res6 = _mm512_extractf64x4_pd(
v1, 1);
1504 __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
1505 __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
1509 res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
1510 _mm256_storeu_pd(out[0] + 4 * i, res0);
1511 res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
1512 _mm256_storeu_pd(out[1] + 4 * i, res1);
1513 res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
1514 _mm256_storeu_pd(out[2] + 4 * i, res2);
1515 res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
1516 _mm256_storeu_pd(out[3] + 4 * i, res3);
1517 res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
1518 _mm256_storeu_pd(out[4] + 4 * i, res4);
1519 res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
1520 _mm256_storeu_pd(out[5] + 4 * i, res5);
1521 res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
1522 _mm256_storeu_pd(out[6] + 4 * i, res6);
1523 res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
1524 _mm256_storeu_pd(out[7] + 4 * i, res7);
1528 _mm256_storeu_pd(out[0] + 4 * i, res0);
1529 _mm256_storeu_pd(out[1] + 4 * i, res1);
1530 _mm256_storeu_pd(out[2] + 4 * i, res2);
1531 _mm256_storeu_pd(out[3] + 4 * i, res3);
1532 _mm256_storeu_pd(out[4] + 4 * i, res4);
1533 _mm256_storeu_pd(out[5] + 4 * i, res5);
1534 _mm256_storeu_pd(out[6] + 4 * i, res6);
1535 _mm256_storeu_pd(out[7] + 4 * i, res7);
1540 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1541 for (
unsigned int v = 0; v < 8; ++v)
1542 out[v][i] += in[i][v];
1544 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1545 for (
unsigned int v = 0; v < 8; ++v)
1546 out[v][i] = in[i][v];
1581 template <
typename U>
1593 data = _mm512_set1_ps(x);
1603 operator=(
const float scalar) && =
delete;
1613 return *(
reinterpret_cast<float *
>(&
data) + comp);
1624 return *(
reinterpret_cast<const float *
>(&
data) + comp);
1639 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1654 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1668 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1683 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
1698 load(
const float *ptr)
1700 data = _mm512_loadu_ps(ptr);
1711 store(
float *ptr)
const
1713 _mm512_storeu_ps(ptr,
data);
1724 Assert(
reinterpret_cast<std::size_t
>(ptr) % 64 == 0,
1726 _mm512_stream_ps(ptr,
data);
1743 gather(
const float *base_ptr,
const unsigned int *offsets)
1748 const __m512 index_val =
1749 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1750 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
1756 __mmask16
mask = 0xFFFF;
1758 data = _mm512_mask_i32gather_ps(
zero, mask, index, base_ptr, 4);
1775 scatter(
const unsigned int *offsets,
float *base_ptr)
const
1777 for (
unsigned int i = 0; i < 16; ++i)
1778 for (
unsigned int j = i + 1; j < 16; ++j)
1779 Assert(offsets[i] != offsets[j],
1780 ExcMessage(
"Result of scatter undefined if two offset elements"
1781 " point to the same position"));
1786 const __m512 index_val =
1787 _mm512_loadu_ps(
reinterpret_cast<const float *
>(offsets));
1788 const __m512i
index = *
reinterpret_cast<const __m512i *
>(&index_val);
1789 _mm512_i32scatter_ps(base_ptr, index,
data, 4);
1826 __m512
mask = _mm512_set1_ps(-0.f);
1828 res.
data =
reinterpret_cast<__m512
>(
1829 _mm512_andnot_epi32(
reinterpret_cast<__m512i
>(mask),
1830 reinterpret_cast<__m512i
>(
data)));
1861 template <
typename Number2, std::
size_t w
idth2>
1864 template <
typename Number2, std::
size_t w
idth2>
1867 template <
typename Number2, std::
size_t w
idth2>
1871 template <
typename Number2, std::
size_t w
idth2>
1886 const unsigned int * offsets,
1893 const unsigned int n_chunks = n_entries / 4;
1901 __m512 t0, t1, t2, t3;
1904 for (
unsigned int i = 0; i < n_chunks; ++i)
1906 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
1907 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
1908 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
1909 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
1910 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
1911 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
1912 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
1913 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
1914 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
1915 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
1916 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
1917 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
1918 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
1919 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
1920 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
1921 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
1923 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1924 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1925 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1926 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1928 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1929 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1930 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1931 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1935 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1936 out[i].
gather(in + i, offsets);
1947 const std::array<float *, 16> &in,
1952 const unsigned int n_chunks = n_entries / 4;
1954 __m512 t0, t1, t2, t3;
1957 for (
unsigned int i = 0; i < n_chunks; ++i)
1959 t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
1960 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
1961 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
1962 t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
1963 t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
1964 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
1965 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
1966 t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
1967 t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
1968 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
1969 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
1970 t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
1971 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
1972 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
1973 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
1974 t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
1976 __m512
v0 = _mm512_shuffle_ps(t0, t1, 0x44);
1977 __m512
v1 = _mm512_shuffle_ps(t0, t1, 0xee);
1978 __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
1979 __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
1981 out[4 * i + 0].
data = _mm512_shuffle_ps(
v0, v2, 0x88);
1982 out[4 * i + 1].
data = _mm512_shuffle_ps(
v0, v2, 0xdd);
1983 out[4 * i + 2].
data = _mm512_shuffle_ps(
v1, v3, 0x88);
1984 out[4 * i + 3].
data = _mm512_shuffle_ps(
v1, v3, 0xdd);
1987 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
1999 const unsigned int n_entries,
2001 const unsigned int * offsets,
2004 const unsigned int n_chunks = n_entries / 4;
2005 for (
unsigned int i = 0; i < n_chunks; ++i)
2007 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2008 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2010 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2012 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2013 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2014 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2015 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2016 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2018 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2019 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2020 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2021 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2022 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2023 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2024 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2025 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2026 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2027 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2028 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2029 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2030 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2031 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2032 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2033 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2040 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
2041 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2042 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
2043 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2044 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
2045 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2046 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
2047 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2048 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
2049 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2050 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
2051 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2052 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
2053 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2054 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
2055 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2056 res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
2057 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2058 res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
2059 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2060 res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
2061 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2062 res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
2063 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2064 res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
2065 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2066 res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
2067 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2068 res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
2069 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2070 res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
2071 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2075 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
2076 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
2077 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
2078 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
2079 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
2080 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
2081 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
2082 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
2083 _mm_storeu_ps(out + 4 * i + offsets[8], res8);
2084 _mm_storeu_ps(out + 4 * i + offsets[9], res9);
2085 _mm_storeu_ps(out + 4 * i + offsets[10], res10);
2086 _mm_storeu_ps(out + 4 * i + offsets[11], res11);
2087 _mm_storeu_ps(out + 4 * i + offsets[12], res12);
2088 _mm_storeu_ps(out + 4 * i + offsets[13], res13);
2089 _mm_storeu_ps(out + 4 * i + offsets[14], res14);
2090 _mm_storeu_ps(out + 4 * i + offsets[15], res15);
2096 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2097 for (
unsigned int v = 0; v < 16; ++v)
2098 out[offsets[v] + i] += in[i][v];
2100 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2101 for (
unsigned int v = 0; v < 16; ++v)
2102 out[offsets[v] + i] = in[i][v];
2113 const unsigned int n_entries,
2115 std::array<float *, 16> & out)
2119 const unsigned int n_chunks = n_entries / 4;
2120 for (
unsigned int i = 0; i < n_chunks; ++i)
2122 __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
2123 __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
2125 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
2127 _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
2128 __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
2129 __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
2130 __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
2131 __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
2133 __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
2134 __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
2135 __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
2136 __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
2137 __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
2138 __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
2139 __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
2140 __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
2141 __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
2142 __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
2143 __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
2144 __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
2145 __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
2146 __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
2147 __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
2148 __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
2152 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
2153 _mm_storeu_ps(out[0] + 4 * i, res0);
2154 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
2155 _mm_storeu_ps(out[1] + 4 * i, res1);
2156 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
2157 _mm_storeu_ps(out[2] + 4 * i, res2);
2158 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
2159 _mm_storeu_ps(out[3] + 4 * i, res3);
2160 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
2161 _mm_storeu_ps(out[4] + 4 * i, res4);
2162 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
2163 _mm_storeu_ps(out[5] + 4 * i, res5);
2164 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
2165 _mm_storeu_ps(out[6] + 4 * i, res6);
2166 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
2167 _mm_storeu_ps(out[7] + 4 * i, res7);
2168 res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
2169 _mm_storeu_ps(out[8] + 4 * i, res8);
2170 res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
2171 _mm_storeu_ps(out[9] + 4 * i, res9);
2172 res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
2173 _mm_storeu_ps(out[10] + 4 * i, res10);
2174 res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
2175 _mm_storeu_ps(out[11] + 4 * i, res11);
2176 res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
2177 _mm_storeu_ps(out[12] + 4 * i, res12);
2178 res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
2179 _mm_storeu_ps(out[13] + 4 * i, res13);
2180 res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
2181 _mm_storeu_ps(out[14] + 4 * i, res14);
2182 res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
2183 _mm_storeu_ps(out[15] + 4 * i, res15);
2187 _mm_storeu_ps(out[0] + 4 * i, res0);
2188 _mm_storeu_ps(out[1] + 4 * i, res1);
2189 _mm_storeu_ps(out[2] + 4 * i, res2);
2190 _mm_storeu_ps(out[3] + 4 * i, res3);
2191 _mm_storeu_ps(out[4] + 4 * i, res4);
2192 _mm_storeu_ps(out[5] + 4 * i, res5);
2193 _mm_storeu_ps(out[6] + 4 * i, res6);
2194 _mm_storeu_ps(out[7] + 4 * i, res7);
2195 _mm_storeu_ps(out[8] + 4 * i, res8);
2196 _mm_storeu_ps(out[9] + 4 * i, res9);
2197 _mm_storeu_ps(out[10] + 4 * i, res10);
2198 _mm_storeu_ps(out[11] + 4 * i, res11);
2199 _mm_storeu_ps(out[12] + 4 * i, res12);
2200 _mm_storeu_ps(out[13] + 4 * i, res13);
2201 _mm_storeu_ps(out[14] + 4 * i, res14);
2202 _mm_storeu_ps(out[15] + 4 * i, res15);
2207 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2208 for (
unsigned int v = 0; v < 16; ++v)
2209 out[v][i] += in[i][v];
2211 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2212 for (
unsigned int v = 0; v < 16; ++v)
2213 out[v][i] = in[i][v];
2218 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
2250 template <
typename U>
2262 data = _mm256_set1_pd(x);
2272 operator=(
const double scalar) && =
delete;
2282 return *(
reinterpret_cast<double *
>(&
data) + comp);
2293 return *(
reinterpret_cast<const double *
>(&
data) + comp);
2308 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2323 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2337 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2352 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2367 load(
const double *ptr)
2369 data = _mm256_loadu_pd(ptr);
2374 load(
const float *ptr)
2376 data = _mm256_cvtps_pd(_mm_loadu_ps(ptr));
2387 store(
double *ptr)
const
2389 _mm256_storeu_pd(ptr,
data);
2394 store(
float *ptr)
const
2396 _mm_storeu_ps(ptr, _mm256_cvtpd_ps(
data));
2407 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2409 _mm256_stream_pd(ptr,
data);
2426 gather(
const double *base_ptr,
const unsigned int *offsets)
2432 const __m128 index_val =
2433 _mm_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2434 const __m128i
index = *
reinterpret_cast<const __m128i *
>(&index_val);
2439 __m256d
zero = _mm256_setzero_pd();
2442 data = _mm256_mask_i32gather_pd(
zero, base_ptr, index, mask, 8);
2444 for (
unsigned int i = 0; i < 4; ++i)
2445 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
2463 scatter(
const unsigned int *offsets,
double *base_ptr)
const
2466 for (
unsigned int i = 0; i < 4; ++i)
2467 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
2502 __m256d
mask = _mm256_set1_pd(-0.);
2504 res.
data = _mm256_andnot_pd(mask,
data);
2535 template <
typename Number2, std::
size_t w
idth2>
2538 template <
typename Number2, std::
size_t w
idth2>
2541 template <
typename Number2, std::
size_t w
idth2>
2545 template <
typename Number2, std::
size_t w
idth2>
2560 const unsigned int * offsets,
2563 const unsigned int n_chunks = n_entries / 4;
2564 const double * in0 = in + offsets[0];
2565 const double * in1 = in + offsets[1];
2566 const double * in2 = in + offsets[2];
2567 const double * in3 = in + offsets[3];
2569 for (
unsigned int i = 0; i < n_chunks; ++i)
2571 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2572 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2573 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2574 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2575 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2576 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2577 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2578 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2579 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2580 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2581 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2582 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2586 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2587 out[i].
gather(in + i, offsets);
2598 const std::array<double *, 4> &in,
2603 const unsigned int n_chunks = n_entries / 4;
2604 const double * in0 = in[0];
2605 const double * in1 = in[1];
2606 const double * in2 = in[2];
2607 const double * in3 = in[3];
2609 for (
unsigned int i = 0; i < n_chunks; ++i)
2611 __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
2612 __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
2613 __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
2614 __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
2615 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2616 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2617 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2618 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2619 out[4 * i + 0].
data = _mm256_unpacklo_pd(t0, t1);
2620 out[4 * i + 1].
data = _mm256_unpackhi_pd(t0, t1);
2621 out[4 * i + 2].
data = _mm256_unpacklo_pd(t2, t3);
2622 out[4 * i + 3].
data = _mm256_unpackhi_pd(t2, t3);
2625 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2637 const unsigned int n_entries,
2639 const unsigned int * offsets,
2642 const unsigned int n_chunks = n_entries / 4;
2643 double * out0 = out + offsets[0];
2644 double * out1 = out + offsets[1];
2645 double * out2 = out + offsets[2];
2646 double * out3 = out + offsets[3];
2647 for (
unsigned int i = 0; i < n_chunks; ++i)
2649 __m256d u0 = in[4 * i + 0].
data;
2650 __m256d u1 = in[4 * i + 1].
data;
2651 __m256d u2 = in[4 * i + 2].
data;
2652 __m256d u3 = in[4 * i + 3].
data;
2653 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2654 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2655 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2656 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2657 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2658 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2659 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2660 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2667 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2668 _mm256_storeu_pd(out0 + 4 * i, res0);
2669 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2670 _mm256_storeu_pd(out1 + 4 * i, res1);
2671 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2672 _mm256_storeu_pd(out2 + 4 * i, res2);
2673 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2674 _mm256_storeu_pd(out3 + 4 * i, res3);
2678 _mm256_storeu_pd(out0 + 4 * i, res0);
2679 _mm256_storeu_pd(out1 + 4 * i, res1);
2680 _mm256_storeu_pd(out2 + 4 * i, res2);
2681 _mm256_storeu_pd(out3 + 4 * i, res3);
2687 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2688 for (
unsigned int v = 0; v < 4; ++v)
2689 out[offsets[v] + i] += in[i][v];
2691 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2692 for (
unsigned int v = 0; v < 4; ++v)
2693 out[offsets[v] + i] = in[i][v];
2704 const unsigned int n_entries,
2706 std::array<double *, 4> & out)
2710 const unsigned int n_chunks = n_entries / 4;
2711 double * out0 = out[0];
2712 double * out1 = out[1];
2713 double * out2 = out[2];
2714 double * out3 = out[3];
2715 for (
unsigned int i = 0; i < n_chunks; ++i)
2717 __m256d u0 = in[4 * i + 0].
data;
2718 __m256d u1 = in[4 * i + 1].
data;
2719 __m256d u2 = in[4 * i + 2].
data;
2720 __m256d u3 = in[4 * i + 3].
data;
2721 __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
2722 __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
2723 __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
2724 __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
2725 __m256d res0 = _mm256_unpacklo_pd(t0, t1);
2726 __m256d res1 = _mm256_unpackhi_pd(t0, t1);
2727 __m256d res2 = _mm256_unpacklo_pd(t2, t3);
2728 __m256d res3 = _mm256_unpackhi_pd(t2, t3);
2735 res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
2736 _mm256_storeu_pd(out0 + 4 * i, res0);
2737 res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
2738 _mm256_storeu_pd(out1 + 4 * i, res1);
2739 res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
2740 _mm256_storeu_pd(out2 + 4 * i, res2);
2741 res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
2742 _mm256_storeu_pd(out3 + 4 * i, res3);
2746 _mm256_storeu_pd(out0 + 4 * i, res0);
2747 _mm256_storeu_pd(out1 + 4 * i, res1);
2748 _mm256_storeu_pd(out2 + 4 * i, res2);
2749 _mm256_storeu_pd(out3 + 4 * i, res3);
2755 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2756 for (
unsigned int v = 0; v < 4; ++v)
2757 out[v][i] += in[i][v];
2759 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
2760 for (
unsigned int v = 0; v < 4; ++v)
2761 out[v][i] = in[i][v];
2796 template <
typename U>
2808 data = _mm256_set1_ps(x);
2818 operator=(
const float scalar) && =
delete;
2828 return *(
reinterpret_cast<float *
>(&
data) + comp);
2839 return *(
reinterpret_cast<const float *
>(&
data) + comp);
2854 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2869 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2883 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2898 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
2913 load(
const float *ptr)
2915 data = _mm256_loadu_ps(ptr);
2926 store(
float *ptr)
const
2928 _mm256_storeu_ps(ptr,
data);
2939 Assert(
reinterpret_cast<std::size_t
>(ptr) % 32 == 0,
2941 _mm256_stream_ps(ptr,
data);
2958 gather(
const float *base_ptr,
const unsigned int *offsets)
2964 const __m256 index_val =
2965 _mm256_loadu_ps(
reinterpret_cast<const float *
>(offsets));
2966 const __m256i
index = *
reinterpret_cast<const __m256i *
>(&index_val);
2971 __m256
zero = _mm256_setzero_ps();
2974 data = _mm256_mask_i32gather_ps(
zero, base_ptr, index, mask, 4);
2976 for (
unsigned int i = 0; i < 8; ++i)
2977 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
2995 scatter(
const unsigned int *offsets,
float *base_ptr)
const
2998 for (
unsigned int i = 0; i < 8; ++i)
2999 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
3034 __m256
mask = _mm256_set1_ps(-0.f);
3036 res.
data = _mm256_andnot_ps(mask,
data);
3067 template <
typename Number2, std::
size_t w
idth2>
3070 template <
typename Number2, std::
size_t w
idth2>
3073 template <
typename Number2, std::
size_t w
idth2>
3077 template <
typename Number2, std::
size_t w
idth2>
3092 const unsigned int * offsets,
3095 const unsigned int n_chunks = n_entries / 4;
3096 for (
unsigned int i = 0; i < n_chunks; ++i)
3100 __m256 t0, t1, t2, t3 = {};
3101 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
3102 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
3103 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
3104 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
3105 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
3106 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
3107 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
3108 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
3110 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3111 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3112 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3113 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3114 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3115 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3116 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3117 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3121 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3122 out[i].
gather(in + i, offsets);
3133 const std::array<float *, 8> &in,
3138 const unsigned int n_chunks = n_entries / 4;
3139 for (
unsigned int i = 0; i < n_chunks; ++i)
3141 __m256 t0, t1, t2, t3 = {};
3142 t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
3143 t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
3144 t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
3145 t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
3146 t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
3147 t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
3148 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
3149 t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
3151 __m256
v0 = _mm256_shuffle_ps(t0, t1, 0x44);
3152 __m256
v1 = _mm256_shuffle_ps(t0, t1, 0xee);
3153 __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
3154 __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
3155 out[4 * i + 0].
data = _mm256_shuffle_ps(
v0, v2, 0x88);
3156 out[4 * i + 1].
data = _mm256_shuffle_ps(
v0, v2, 0xdd);
3157 out[4 * i + 2].
data = _mm256_shuffle_ps(
v1, v3, 0x88);
3158 out[4 * i + 3].
data = _mm256_shuffle_ps(
v1, v3, 0xdd);
3161 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3173 const unsigned int n_entries,
3175 const unsigned int * offsets,
3178 const unsigned int n_chunks = n_entries / 4;
3179 for (
unsigned int i = 0; i < n_chunks; ++i)
3181 __m256 u0 = in[4 * i + 0].
data;
3182 __m256 u1 = in[4 * i + 1].
data;
3183 __m256 u2 = in[4 * i + 2].
data;
3184 __m256 u3 = in[4 * i + 3].
data;
3185 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3186 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3187 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3188 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3189 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3190 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3191 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3192 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3193 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3194 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3195 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3196 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3197 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3198 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3199 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3200 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3207 res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
3208 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3209 res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
3210 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3211 res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
3212 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3213 res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
3214 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3215 res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
3216 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3217 res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
3218 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3219 res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
3220 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3221 res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
3222 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3226 _mm_storeu_ps(out + 4 * i + offsets[0], res0);
3227 _mm_storeu_ps(out + 4 * i + offsets[1], res1);
3228 _mm_storeu_ps(out + 4 * i + offsets[2], res2);
3229 _mm_storeu_ps(out + 4 * i + offsets[3], res3);
3230 _mm_storeu_ps(out + 4 * i + offsets[4], res4);
3231 _mm_storeu_ps(out + 4 * i + offsets[5], res5);
3232 _mm_storeu_ps(out + 4 * i + offsets[6], res6);
3233 _mm_storeu_ps(out + 4 * i + offsets[7], res7);
3239 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3240 for (
unsigned int v = 0; v < 8; ++v)
3241 out[offsets[v] + i] += in[i][v];
3243 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3244 for (
unsigned int v = 0; v < 8; ++v)
3245 out[offsets[v] + i] = in[i][v];
3256 const unsigned int n_entries,
3258 std::array<float *, 8> & out)
3262 const unsigned int n_chunks = n_entries / 4;
3263 for (
unsigned int i = 0; i < n_chunks; ++i)
3265 __m256 u0 = in[4 * i + 0].
data;
3266 __m256 u1 = in[4 * i + 1].
data;
3267 __m256 u2 = in[4 * i + 2].
data;
3268 __m256 u3 = in[4 * i + 3].
data;
3269 __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
3270 __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
3271 __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
3272 __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
3273 u0 = _mm256_shuffle_ps(t0, t2, 0x88);
3274 u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
3275 u2 = _mm256_shuffle_ps(t1, t3, 0x88);
3276 u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
3277 __m128 res0 = _mm256_extractf128_ps(u0, 0);
3278 __m128 res4 = _mm256_extractf128_ps(u0, 1);
3279 __m128 res1 = _mm256_extractf128_ps(u1, 0);
3280 __m128 res5 = _mm256_extractf128_ps(u1, 1);
3281 __m128 res2 = _mm256_extractf128_ps(u2, 0);
3282 __m128 res6 = _mm256_extractf128_ps(u2, 1);
3283 __m128 res3 = _mm256_extractf128_ps(u3, 0);
3284 __m128 res7 = _mm256_extractf128_ps(u3, 1);
3288 res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
3289 _mm_storeu_ps(out[0] + 4 * i, res0);
3290 res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
3291 _mm_storeu_ps(out[1] + 4 * i, res1);
3292 res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
3293 _mm_storeu_ps(out[2] + 4 * i, res2);
3294 res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
3295 _mm_storeu_ps(out[3] + 4 * i, res3);
3296 res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
3297 _mm_storeu_ps(out[4] + 4 * i, res4);
3298 res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
3299 _mm_storeu_ps(out[5] + 4 * i, res5);
3300 res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
3301 _mm_storeu_ps(out[6] + 4 * i, res6);
3302 res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
3303 _mm_storeu_ps(out[7] + 4 * i, res7);
3307 _mm_storeu_ps(out[0] + 4 * i, res0);
3308 _mm_storeu_ps(out[1] + 4 * i, res1);
3309 _mm_storeu_ps(out[2] + 4 * i, res2);
3310 _mm_storeu_ps(out[3] + 4 * i, res3);
3311 _mm_storeu_ps(out[4] + 4 * i, res4);
3312 _mm_storeu_ps(out[5] + 4 * i, res5);
3313 _mm_storeu_ps(out[6] + 4 * i, res6);
3314 _mm_storeu_ps(out[7] + 4 * i, res7);
3319 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3320 for (
unsigned int v = 0; v < 8; ++v)
3321 out[v][i] += in[i][v];
3323 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
3324 for (
unsigned int v = 0; v < 8; ++v)
3325 out[v][i] = in[i][v];
3330 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
3362 template <
typename U>
3374 data = _mm_set1_pd(x);
3384 operator=(
const double scalar) && =
delete;
3394 return *(
reinterpret_cast<double *
>(&
data) + comp);
3405 return *(
reinterpret_cast<const double *
>(&
data) + comp);
3415 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3430 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3445 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3460 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3475 load(
const double *ptr)
3477 data = _mm_loadu_pd(ptr);
3482 load(
const float *ptr)
3485 for (
unsigned int i = 0; i < 2; ++i)
3497 store(
double *ptr)
const
3499 _mm_storeu_pd(ptr,
data);
3504 store(
float *ptr)
const
3507 for (
unsigned int i = 0; i < 2; ++i)
3519 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3521 _mm_stream_pd(ptr,
data);
3538 gather(
const double *base_ptr,
const unsigned int *offsets)
3540 for (
unsigned int i = 0; i < 2; ++i)
3541 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
3558 scatter(
const unsigned int *offsets,
double *base_ptr)
const
3560 for (
unsigned int i = 0; i < 2; ++i)
3561 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
3597 __m128d
mask = _mm_set1_pd(-0.);
3599 res.
data = _mm_andnot_pd(mask,
data);
3630 template <
typename Number2, std::
size_t w
idth2>
3633 template <
typename Number2, std::
size_t w
idth2>
3636 template <
typename Number2, std::
size_t w
idth2>
3640 template <
typename Number2, std::
size_t w
idth2>
3655 const unsigned int * offsets,
3658 const unsigned int n_chunks = n_entries / 2;
3659 for (
unsigned int i = 0; i < n_chunks; ++i)
3661 __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
3662 __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
3663 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3664 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3668 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3669 for (
unsigned int v = 0; v < 2; ++v)
3670 out[i][v] = in[offsets[v] + i];
3681 const std::array<double *, 2> &in,
3686 const unsigned int n_chunks = n_entries / 2;
3687 for (
unsigned int i = 0; i < n_chunks; ++i)
3689 __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
3690 __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
3691 out[2 * i + 0].
data = _mm_unpacklo_pd(u0, u1);
3692 out[2 * i + 1].
data = _mm_unpackhi_pd(u0, u1);
3695 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3696 for (
unsigned int v = 0; v < 2; ++v)
3697 out[i][v] = in[v][i];
3708 const unsigned int n_entries,
3710 const unsigned int * offsets,
3713 const unsigned int n_chunks = n_entries / 2;
3716 for (
unsigned int i = 0; i < n_chunks; ++i)
3718 __m128d u0 = in[2 * i + 0].
data;
3719 __m128d u1 = in[2 * i + 1].
data;
3720 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3721 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3722 _mm_storeu_pd(out + 2 * i + offsets[0],
3723 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
3725 _mm_storeu_pd(out + 2 * i + offsets[1],
3726 _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
3730 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3731 for (
unsigned int v = 0; v < 2; ++v)
3732 out[offsets[v] + i] += in[i][v];
3736 for (
unsigned int i = 0; i < n_chunks; ++i)
3738 __m128d u0 = in[2 * i + 0].
data;
3739 __m128d u1 = in[2 * i + 1].
data;
3740 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3741 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3742 _mm_storeu_pd(out + 2 * i + offsets[0], res0);
3743 _mm_storeu_pd(out + 2 * i + offsets[1], res1);
3746 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3747 for (
unsigned int v = 0; v < 2; ++v)
3748 out[offsets[v] + i] = in[i][v];
3760 const unsigned int n_entries,
3762 std::array<double *, 2> & out)
3766 const unsigned int n_chunks = n_entries / 2;
3769 for (
unsigned int i = 0; i < n_chunks; ++i)
3771 __m128d u0 = in[2 * i + 0].
data;
3772 __m128d u1 = in[2 * i + 1].
data;
3773 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3774 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3775 _mm_storeu_pd(out[0] + 2 * i,
3776 _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
3777 _mm_storeu_pd(out[1] + 2 * i,
3778 _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
3781 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3782 for (
unsigned int v = 0; v < 2; ++v)
3783 out[v][i] += in[i][v];
3787 for (
unsigned int i = 0; i < n_chunks; ++i)
3789 __m128d u0 = in[2 * i + 0].
data;
3790 __m128d u1 = in[2 * i + 1].
data;
3791 __m128d res0 = _mm_unpacklo_pd(u0, u1);
3792 __m128d res1 = _mm_unpackhi_pd(u0, u1);
3793 _mm_storeu_pd(out[0] + 2 * i, res0);
3794 _mm_storeu_pd(out[1] + 2 * i, res1);
3797 for (
unsigned int i = 2 * n_chunks; i < n_entries; ++i)
3798 for (
unsigned int v = 0; v < 2; ++v)
3799 out[v][i] = in[i][v];
3839 template <
typename U>
3848 data = _mm_set1_ps(x);
3858 operator=(
const float scalar) && =
delete;
3868 return *(
reinterpret_cast<float *
>(&
data) + comp);
3879 return *(
reinterpret_cast<const float *
>(&
data) + comp);
3889 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3904 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3919 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3934 # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
3949 load(
const float *ptr)
3951 data = _mm_loadu_ps(ptr);
3962 store(
float *ptr)
const
3964 _mm_storeu_ps(ptr,
data);
3975 Assert(
reinterpret_cast<std::size_t
>(ptr) % 16 == 0,
3977 _mm_stream_ps(ptr,
data);
3994 gather(
const float *base_ptr,
const unsigned int *offsets)
3996 for (
unsigned int i = 0; i < 4; ++i)
3997 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4014 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4016 for (
unsigned int i = 0; i < 4; ++i)
4017 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4052 __m128
mask = _mm_set1_ps(-0.f);
4054 res.
data = _mm_andnot_ps(mask,
data);
4085 template <
typename Number2, std::
size_t w
idth2>
4088 template <
typename Number2, std::
size_t w
idth2>
4091 template <
typename Number2, std::
size_t w
idth2>
4095 template <
typename Number2, std::
size_t w
idth2>
4110 const unsigned int * offsets,
4113 const unsigned int n_chunks = n_entries / 4;
4114 for (
unsigned int i = 0; i < n_chunks; ++i)
4116 __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
4117 __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
4118 __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
4119 __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
4120 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
4121 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
4122 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4123 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4124 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
4125 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
4126 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
4127 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
4131 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4132 for (
unsigned int v = 0; v < 4; ++v)
4133 out[i][v] = in[offsets[v] + i];
4144 const std::array<float *, 4> &in,
4149 const unsigned int n_chunks = n_entries / 4;
4150 for (
unsigned int i = 0; i < n_chunks; ++i)
4152 __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
4153 __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
4154 __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
4155 __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
4156 __m128
v0 = _mm_shuffle_ps(u0, u1, 0x44);
4157 __m128
v1 = _mm_shuffle_ps(u0, u1, 0xee);
4158 __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
4159 __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
4160 out[4 * i + 0].
data = _mm_shuffle_ps(
v0, v2, 0x88);
4161 out[4 * i + 1].
data = _mm_shuffle_ps(
v0, v2, 0xdd);
4162 out[4 * i + 2].
data = _mm_shuffle_ps(
v1, v3, 0x88);
4163 out[4 * i + 3].
data = _mm_shuffle_ps(
v1, v3, 0xdd);
4166 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4167 for (
unsigned int v = 0; v < 4; ++v)
4168 out[i][v] = in[v][i];
4179 const unsigned int n_entries,
4181 const unsigned int * offsets,
4184 const unsigned int n_chunks = n_entries / 4;
4185 for (
unsigned int i = 0; i < n_chunks; ++i)
4187 __m128 u0 = in[4 * i + 0].
data;
4188 __m128 u1 = in[4 * i + 1].
data;
4189 __m128 u2 = in[4 * i + 2].
data;
4190 __m128 u3 = in[4 * i + 3].
data;
4191 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4192 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4193 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4194 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4195 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4196 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4197 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4198 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4205 u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
4206 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4207 u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
4208 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4209 u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
4210 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4211 u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
4212 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4216 _mm_storeu_ps(out + 4 * i + offsets[0], u0);
4217 _mm_storeu_ps(out + 4 * i + offsets[1], u1);
4218 _mm_storeu_ps(out + 4 * i + offsets[2], u2);
4219 _mm_storeu_ps(out + 4 * i + offsets[3], u3);
4225 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4226 for (
unsigned int v = 0; v < 4; ++v)
4227 out[offsets[v] + i] += in[i][v];
4229 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4230 for (
unsigned int v = 0; v < 4; ++v)
4231 out[offsets[v] + i] = in[i][v];
4242 const unsigned int n_entries,
4244 std::array<float *, 4> & out)
4248 const unsigned int n_chunks = n_entries / 4;
4249 for (
unsigned int i = 0; i < n_chunks; ++i)
4251 __m128 u0 = in[4 * i + 0].
data;
4252 __m128 u1 = in[4 * i + 1].
data;
4253 __m128 u2 = in[4 * i + 2].
data;
4254 __m128 u3 = in[4 * i + 3].
data;
4255 __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
4256 __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
4257 __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
4258 __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
4259 u0 = _mm_shuffle_ps(t0, t2, 0x88);
4260 u1 = _mm_shuffle_ps(t0, t2, 0xdd);
4261 u2 = _mm_shuffle_ps(t1, t3, 0x88);
4262 u3 = _mm_shuffle_ps(t1, t3, 0xdd);
4266 u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
4267 _mm_storeu_ps(out[0] + 4 * i, u0);
4268 u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
4269 _mm_storeu_ps(out[1] + 4 * i, u1);
4270 u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
4271 _mm_storeu_ps(out[2] + 4 * i, u2);
4272 u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
4273 _mm_storeu_ps(out[3] + 4 * i, u3);
4277 _mm_storeu_ps(out[0] + 4 * i, u0);
4278 _mm_storeu_ps(out[1] + 4 * i, u1);
4279 _mm_storeu_ps(out[2] + 4 * i, u2);
4280 _mm_storeu_ps(out[3] + 4 * i, u3);
4285 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4286 for (
unsigned int v = 0; v < 4; ++v)
4287 out[v][i] += in[i][v];
4289 for (
unsigned int i = 4 * n_chunks; i < n_entries; ++i)
4290 for (
unsigned int v = 0; v < 4; ++v)
4291 out[v][i] = in[i][v];
4298 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
4328 template <
typename U>
4340 data = vec_splats(x);
4355 operator=(
const double scalar) && =
delete;
4365 return *(
reinterpret_cast<double *
>(&
data) + comp);
4376 return *(
reinterpret_cast<const double *
>(&
data) + comp);
4429 load(
const double *ptr)
4431 data = vec_vsx_ld(0, ptr);
4440 store(
double *ptr)
const
4442 vec_vsx_st(
data, 0, ptr);
4460 gather(
const double *base_ptr,
const unsigned int *offsets)
4462 for (
unsigned int i = 0; i < 2; ++i)
4463 *(
reinterpret_cast<double *
>(&
data) + i) = base_ptr[offsets[i]];
4471 scatter(
const unsigned int *offsets,
double *base_ptr)
const
4473 for (
unsigned int i = 0; i < 2; ++i)
4474 base_ptr[offsets[i]] = *(
reinterpret_cast<const double *
>(&
data) + i);
4482 __vector
double data;
4538 template <
typename Number2, std::
size_t w
idth2>
4541 template <
typename Number2, std::
size_t w
idth2>
4544 template <
typename Number2, std::
size_t w
idth2>
4548 template <
typename Number2, std::
size_t w
idth2>
4583 template <
typename U>
4595 data = vec_splats(x);
4610 operator=(
const float scalar) && =
delete;
4620 return *(
reinterpret_cast<float *
>(&
data) + comp);
4631 return *(
reinterpret_cast<const float *
>(&
data) + comp);
4684 load(
const float *ptr)
4686 data = vec_vsx_ld(0, ptr);
4695 store(
float *ptr)
const
4697 vec_vsx_st(
data, 0, ptr);
4715 gather(
const float *base_ptr,
const unsigned int *offsets)
4717 for (
unsigned int i = 0; i < 4; ++i)
4718 *(
reinterpret_cast<float *
>(&
data) + i) = base_ptr[offsets[i]];
4726 scatter(
const unsigned int *offsets,
float *base_ptr)
const
4728 for (
unsigned int i = 0; i < 4; ++i)
4729 base_ptr[offsets[i]] = *(
reinterpret_cast<const float *
>(&
data) + i);
4737 __vector
float data;
4793 template <
typename Number2, std::
size_t w
idth2>
4796 template <
typename Number2, std::
size_t w
idth2>
4799 template <
typename Number2, std::
size_t w
idth2>
4803 template <
typename Number2, std::
size_t w
idth2>
4825 template <
typename Number, std::
size_t w
idth>
4830 for (
unsigned int i = 0; i < VectorizedArray<Number, width>::size(); ++i)
4831 if (lhs[i] != rhs[i])
4843 template <
typename Number, std::
size_t w
idth>
4857 template <
typename Number, std::
size_t w
idth>
4871 template <
typename Number, std::
size_t w
idth>
4885 template <
typename Number, std::
size_t w
idth>
4900 template <
typename Number, std::
size_t w
idth>
4916 template <std::
size_t w
idth>
4930 template <
typename Number, std::
size_t w
idth>
4945 template <std::
size_t w
idth>
4958 template <
typename Number, std::
size_t w
idth>
4974 template <std::
size_t w
idth>
4988 template <
typename Number, std::
size_t w
idth>
5004 template <std::
size_t w
idth>
5018 template <
typename Number, std::
size_t w
idth>
5034 template <std::
size_t w
idth>
5048 template <
typename Number, std::
size_t w
idth>
5063 template <std::
size_t w
idth>
5076 template <
typename Number, std::
size_t w
idth>
5092 template <std::
size_t w
idth>
5106 template <
typename Number, std::
size_t w
idth>
5122 template <std::
size_t w
idth>
5135 template <
typename Number, std::
size_t w
idth>
5147 template <
typename Number, std::
size_t w
idth>
5161 template <
typename Number, std::
size_t w
idth>
5162 inline std::ostream &
5166 for (
unsigned int i = 0; i < n - 1; ++i)
5189 #if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5270 template <SIMDComparison predicate,
typename Number>
5273 const Number &right,
5274 const Number &true_value,
5275 const Number &false_value)
5281 mask = (left == right);
5284 mask = (left != right);
5287 mask = (left < right);
5290 mask = (left <= right);
5293 mask = (left > right);
5296 mask = (left >= right);
5300 return mask ? true_value : false_value;
5308 template <SIMDComparison predicate,
typename Number>
5316 result.
data = compare_and_apply_mask<predicate, Number>(left.
data,
5326 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
5328 template <SIMDComparison predicate>
5335 const __mmask16
mask =
5336 _mm512_cmp_ps_mask(left.
data, right.
data,
static_cast<int>(predicate));
5338 result.
data = _mm512_mask_mov_ps(false_values.
data, mask, true_values.
data);
5344 template <SIMDComparison predicate>
5351 const __mmask16
mask =
5352 _mm512_cmp_pd_mask(left.
data, right.
data,
static_cast<int>(predicate));
5354 result.
data = _mm512_mask_mov_pd(false_values.
data, mask, true_values.
data);
5360 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
5362 template <SIMDComparison predicate>
5370 _mm256_cmp_ps(left.
data, right.
data,
static_cast<int>(predicate));
5373 result.
data = _mm256_blendv_ps(false_values.
data, true_values.
data, mask);
5378 template <SIMDComparison predicate>
5386 _mm256_cmp_pd(left.
data, right.
data,
static_cast<int>(predicate));
5389 result.
data = _mm256_blendv_pd(false_values.
data, true_values.
data, mask);
5395 # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
5397 template <SIMDComparison predicate>
5428 result.
data = _mm_or_ps(_mm_and_ps(mask, true_values.
data),
5429 _mm_andnot_ps(mask, false_values.
data));
5435 template <SIMDComparison predicate>
5466 result.
data = _mm_or_pd(_mm_and_pd(mask, true_values.
data),
5467 _mm_andnot_pd(mask, false_values.
data));
5478 template <
typename T>
5494 get(
const T &value,
unsigned int c)
5503 template <
typename T, std::
size_t w
idth_>
5507 static constexpr std::size_t
width = width_;
5545 template <
typename Number, std::
size_t w
idth>
5546 inline ::VectorizedArray<Number, width>
5547 sin(const ::VectorizedArray<Number, width> &x)
5555 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5557 values[i] = std::sin(x[i]);
5572 template <
typename Number, std::
size_t w
idth>
5573 inline ::VectorizedArray<Number, width>
5574 cos(const ::VectorizedArray<Number, width> &x)
5577 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5579 values[i] = std::cos(x[i]);
5594 template <
typename Number, std::
size_t w
idth>
5595 inline ::VectorizedArray<Number, width>
5596 tan(const ::VectorizedArray<Number, width> &x)
5599 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5601 values[i] = std::tan(x[i]);
5616 template <
typename Number, std::
size_t w
idth>
5617 inline ::VectorizedArray<Number, width>
5618 exp(const ::VectorizedArray<Number, width> &x)
5621 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5623 values[i] = std::exp(x[i]);
5638 template <
typename Number, std::
size_t w
idth>
5639 inline ::VectorizedArray<Number, width>
5640 log(const ::VectorizedArray<Number, width> &x)
5643 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5645 values[i] = std::log(x[i]);
5660 template <
typename Number, std::
size_t w
idth>
5661 inline ::VectorizedArray<Number, width>
5662 sqrt(const ::VectorizedArray<Number, width> &x)
5664 return x.get_sqrt();
5676 template <
typename Number, std::
size_t w
idth>
5677 inline ::VectorizedArray<Number, width>
5678 pow(const ::VectorizedArray<Number, width> &x,
const Number p)
5681 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5683 values[i] = std::pow(x[i], p);
5699 template <
typename Number, std::
size_t w
idth>
5700 inline ::VectorizedArray<Number, width>
5701 pow(const ::VectorizedArray<Number, width> &x,
5702 const ::VectorizedArray<Number, width> &p)
5705 for (
unsigned int i = 0; i < ::VectorizedArray<Number, width>::size();
5707 values[i] = std::pow(x[i], p[i]);
5722 template <
typename Number, std::
size_t w
idth>
5723 inline ::VectorizedArray<Number, width>
5724 abs(const ::VectorizedArray<Number, width> &x)
5738 template <
typename Number, std::
size_t w
idth>
5739 inline ::VectorizedArray<Number, width>
5740 max(const ::VectorizedArray<Number, width> &x,
5741 const ::VectorizedArray<Number, width> &y)
5743 return x.get_max(y);
5755 template <
typename Number, std::
size_t w
idth>
5756 inline ::VectorizedArray<Number, width>
5757 min(const ::VectorizedArray<Number, width> &x,
5758 const ::VectorizedArray<Number, width> &y)
5760 return x.get_min(y);
OutputOperator< VectorType > & operator<<(OutputOperator< VectorType > &out, unsigned int step)
VectorizedArrayBase()=default
VectorizedArrayIterator< const T > begin() const
VectorizedArrayIterator< const T > end() const
VectorizedArrayIterator< T > end()
VectorizedArrayIterator< T > begin()
static constexpr std::size_t size()
VectorizedArrayBase(const std::initializer_list< U > &list)
VectorizedArrayIterator< T > operator+(const std::size_t &offset) const
VectorizedArrayIterator< T > & operator--()
std::enable_if_t<!std::is_same< U, const U >::value, typename T::value_type > & operator*()
VectorizedArrayIterator< T > & operator=(const VectorizedArrayIterator< T > &other)=default
std::ptrdiff_t operator-(const VectorizedArrayIterator< T > &other) const
bool operator==(const VectorizedArrayIterator< T > &other) const
VectorizedArrayIterator(T &data, const std::size_t lane)
VectorizedArrayIterator< T > & operator+=(const std::size_t offset)
VectorizedArrayIterator< T > & operator++()
bool operator!=(const VectorizedArrayIterator< T > &other) const
const T::value_type & operator*() const
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u)
VectorizedArray< float, width > operator+(const VectorizedArray< float, width > &v, const double u)
void gather(const Number *base_ptr, const unsigned int *offsets)
void vectorized_load_and_transpose(const unsigned int n_entries, const Number *in, const unsigned int *offsets, VectorizedArray< Number, width > *out)
VectorizedArray & operator=(const Number scalar) &&=delete
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArrayType make_vectorized_array(const typename VectorizedArrayType::value_type &u)
VectorizedArray & operator+=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray get_abs() const
VectorizedArray< float, width > operator/(const VectorizedArray< float, width > &v, const double u)
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray< Number, width > log(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< Number, width > exp(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &v, const Number &u)
VectorizedArray< float, width > operator-(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator+(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u)
VectorizedArray()=default
bool operator==(const VectorizedArray< Number, width > &lhs, const VectorizedArray< Number, width > &rhs)
VectorizedArray< Number, width > tan(const ::VectorizedArray< Number, width > &x)
VectorizedArray(const Number scalar)
VectorizedArray< Number, width > operator-(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator*(const VectorizedArray< float, width > &v, const double u)
VectorizedArray get_max(const VectorizedArray &other) const
VectorizedArray & operator/=(const VectorizedArray &vec)
VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &y)
VectorizedArray get_min(const VectorizedArray &other) const
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const Number p)
VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &x, const ::VectorizedArray< Number, width > &p)
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
void store(OtherNumber *ptr) const
VectorizedArray< float, width > operator-(const VectorizedArray< float, width > &v, const double u)
Number & operator[](const unsigned int comp)
void load(const OtherNumber *ptr)
void scatter(const unsigned int *offsets, Number *base_ptr) const
VectorizedArray & operator*=(const VectorizedArray &vec)
VectorizedArray< Number, width > operator-(const Number &u, const VectorizedArray< Number, width > &v)
const Number & operator[](const unsigned int comp) const
VectorizedArray< Number, width > operator*(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< float, width > operator+(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< Number, width > operator*(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray get_sqrt() const
VectorizedArray< Number, width > operator/(const Number &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > make_vectorized_array(const Number &u)
VectorizedArray< Number, width > operator/(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > operator+(const VectorizedArray< Number, width > &u, const VectorizedArray< Number, width > &v)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
VectorizedArray & operator=(const Number scalar) &
VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &x)
void streaming_store(Number *ptr) const
VectorizedArray(const std::initializer_list< U > &list)
void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, const VectorizedArray< Number, width > *in, const unsigned int *offsets, Number *out)
VectorizedArray< float, width > operator/(const double u, const VectorizedArray< float, width > &v)
VectorizedArray< float, width > operator*(const double u, const VectorizedArray< float, width > &v)
VectorizedArray & operator-=(const VectorizedArray &vec)
#define DEAL_II_ALWAYS_INLINE
#define DEAL_II_OPENMP_SIMD_PRAGMA
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void vec_add(Number *val, const Number a, const size_type N)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
Expression fabs(const Expression &x)
static const types::blas_int zero
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
static T & get(VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const VectorizedArray< T, width_ > &values, unsigned int c)
static const T & get(const T &value, unsigned int c)
static constexpr std::size_t width
static T & get(T &value, unsigned int c)
typename T::value_type value_type
std::ptrdiff_t difference_type
random_access_iterator_tag iterator_category
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)
Number compare_and_apply_mask(const Number &left, const Number &right, const Number &true_value, const Number &false_value)