Reference documentation for deal.II version GIT relicensing-489-g2d48aca8cc 2024-04-28 17:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
evaluation_kernels_face.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#ifndef dealii_matrix_free_evaluation_kernels_face_h
17#define dealii_matrix_free_evaluation_kernels_face_h
18
19#include <deal.II/base/config.h>
20
25
31
32
34
35
36namespace internal
37{
38 template <bool symmetric_evaluate,
39 int dim,
40 int fe_degree,
41 int n_q_points_1d,
42 typename Number>
44 {
45 // We enable a transformation to collocation for derivatives if it gives
46 // correct results (first two conditions), if it is the most efficient
47 // choice in terms of operation counts (third condition) and if we were
48 // able to initialize the fields in shape_info.templates.h from the
49 // polynomials (fourth condition).
50 using Number2 =
52
55 dim - 1,
56 fe_degree + 1,
57 n_q_points_1d,
58 Number,
59 Number2>;
60
61 static Eval
64 const unsigned int subface_index,
65 const unsigned int direction)
66 {
68 return Eval(data.shape_values_eo,
71 data.fe_degree + 1,
72 data.n_q_points_1d);
73 else if (subface_index >= GeometryInfo<dim>::max_children_per_cell)
74 return Eval(data.shape_values,
75 data.shape_gradients,
76 data.shape_hessians,
77 data.fe_degree + 1,
78 data.n_q_points_1d);
79 else
80 {
81 const unsigned int index =
82 direction == 0 ? subface_index % 2 : subface_index / 2;
83 return Eval(data.values_within_subface[index],
86 data.fe_degree + 1,
87 data.n_q_points_1d);
88 }
89 }
90
91 static void
93 const unsigned int n_components,
96 Number *values_dofs,
97 Number *values_quad,
98 Number *gradients_quad,
99 Number *hessians_quad,
100 Number *scratch_data,
101 const unsigned int subface_index)
102 {
103 Eval eval0 = create_evaluator_tensor_product(data, subface_index, 0);
104 Eval eval1 = create_evaluator_tensor_product(data, subface_index, 1);
105
106 const std::size_t n_dofs = fe_degree > -1 ?
107 Utilities::pow(fe_degree + 1, dim - 1) :
108 Utilities::pow(data.fe_degree + 1, dim - 1);
109 const std::size_t n_q_points =
110 fe_degree > -1 ? Utilities::pow(n_q_points_1d, dim - 1) :
111 Utilities::pow(data.n_q_points_1d, dim - 1);
112
113 // keep a copy of the original pointer for the case of the Hessians
114 Number *values_dofs_ptr = values_dofs;
115
118 for (unsigned int c = 0; c < n_components; ++c)
119 {
120 switch (dim)
121 {
122 case 3:
123 eval0.template values<0, true, false>(values_dofs,
124 values_quad);
125 eval1.template values<1, true, false>(values_quad,
126 values_quad);
127 break;
128 case 2:
129 eval0.template values<0, true, false>(values_dofs,
130 values_quad);
131 break;
132 case 1:
133 values_quad[0] = values_dofs[0];
134 break;
135 default:
137 }
138 // Note: we always keep storage of values, 1st and 2nd derivatives
139 // in an array
140 values_dofs += 3 * n_dofs;
141 values_quad += n_q_points;
142 }
144 for (unsigned int c = 0; c < n_components; ++c)
145 {
146 switch (dim)
147 {
148 case 3:
149 if (symmetric_evaluate &&
150 use_collocation_evaluation(fe_degree, n_q_points_1d))
151 {
152 eval0.template values<0, true, false>(values_dofs,
153 values_quad);
154 eval0.template values<1, true, false>(values_quad,
155 values_quad);
157 dim - 1,
158 n_q_points_1d,
159 n_q_points_1d,
160 Number,
161 Number2>
164 values_quad, gradients_quad);
166 values_quad, gradients_quad + 1);
167 }
168 else
169 {
170 // grad x
171 eval0.template gradients<0, true, false>(values_dofs,
172 scratch_data);
173 eval1.template values<1, true, false, 3>(scratch_data,
174 gradients_quad);
175
176 // grad y
177 eval0.template values<0, true, false>(values_dofs,
178 scratch_data);
180 scratch_data, gradients_quad + 1);
181
183 eval1.template values<1, true, false>(scratch_data,
184 values_quad);
185 }
186 // grad z
187 eval0.template values<0, true, false>(values_dofs + n_dofs,
188 scratch_data);
189 eval1.template values<1, true, false, 3>(scratch_data,
190 gradients_quad + 2);
191
192 break;
193 case 2:
194 eval0.template values<0, true, false, 2>(values_dofs + n_dofs,
195 gradients_quad + 1);
196 eval0.template gradients<0, true, false, 2>(values_dofs,
197 gradients_quad);
199 eval0.template values<0, true, false>(values_dofs,
200 values_quad);
201 break;
202 case 1:
203 values_quad[0] = values_dofs[0];
204 gradients_quad[0] = values_dofs[1];
205 break;
206 default:
208 }
209 values_dofs += 3 * n_dofs;
210 values_quad += n_q_points;
211 gradients_quad += dim * n_q_points;
212 }
213
215 {
216 values_dofs = values_dofs_ptr;
217 for (unsigned int c = 0; c < n_components; ++c)
218 {
219 switch (dim)
220 {
221 case 3:
222 // grad xx
223 eval0.template hessians<0, true, false>(values_dofs,
224 scratch_data);
225 eval1.template values<1, true, false>(scratch_data,
226 hessians_quad);
227
228 // grad yy
229 eval0.template values<0, true, false>(values_dofs,
230 scratch_data);
231 eval1.template hessians<1, true, false>(scratch_data,
232 hessians_quad +
233 n_q_points);
234
235 // grad zz
236 eval0.template values<0, true, false>(values_dofs +
237 2 * n_dofs,
238 scratch_data);
239 eval1.template values<1, true, false>(scratch_data,
240 hessians_quad +
241 2 * n_q_points);
242
243 // grad xy
244 eval0.template gradients<0, true, false>(values_dofs,
245 scratch_data);
246 eval1.template gradients<1, true, false>(scratch_data,
247 hessians_quad +
248 3 * n_q_points);
249
250 // grad xz
251 eval0.template gradients<0, true, false>(values_dofs +
252 n_dofs,
253 scratch_data);
254 eval1.template values<1, true, false>(scratch_data,
255 hessians_quad +
256 4 * n_q_points);
257
258 // grad yz
259 eval0.template values<0, true, false>(values_dofs + n_dofs,
260 scratch_data);
261 eval1.template gradients<1, true, false>(scratch_data,
262 hessians_quad +
263 5 * n_q_points);
264
265 break;
266 case 2:
267 // grad xx
268 eval0.template hessians<0, true, false>(values_dofs,
269 hessians_quad);
270 // grad yy
272 values_dofs + 2 * n_dofs, hessians_quad + n_q_points);
273 // grad xy
275 values_dofs + n_dofs, hessians_quad + 2 * n_q_points);
276 break;
277 case 1:
278 hessians_quad[0] = values_dofs[2];
279 break;
280 default:
282 }
283 values_dofs += 3 * n_dofs;
284 hessians_quad += dim * (dim + 1) / 2 * n_q_points;
285 }
286 }
287 }
288
289 static void
291 const unsigned int n_components,
294 Number *values_dofs,
295 Number *values_quad,
296 Number *gradients_quad,
297 Number *hessians_quad,
298 Number *scratch_data,
299 const unsigned int subface_index)
300 {
301 Eval eval0 = create_evaluator_tensor_product(data, subface_index, 0);
302 Eval eval1 = create_evaluator_tensor_product(data, subface_index, 1);
303
304 const std::size_t n_dofs =
305 fe_degree > -1 ?
306 Utilities::pow(fe_degree + 1, dim - 1) :
307 (dim > 1 ? Utilities::fixed_power<dim - 1>(data.fe_degree + 1) : 1);
308 const std::size_t n_q_points =
309 fe_degree > -1 ? Utilities::pow(n_q_points_1d, dim - 1) :
310 Utilities::pow(data.n_q_points_1d, dim - 1);
311
312 // keep a copy of the original pointer for the case of the Hessians
313 Number *values_dofs_ptr = values_dofs;
314
317 for (unsigned int c = 0; c < n_components; ++c)
318 {
319 switch (dim)
320 {
321 case 3:
322 eval1.template values<1, false, false>(values_quad,
323 values_quad);
324 eval0.template values<0, false, false>(values_quad,
325 values_dofs);
326 break;
327 case 2:
328 eval0.template values<0, false, false>(values_quad,
329 values_dofs);
330 break;
331 case 1:
332 values_dofs[0] = values_quad[0];
333 break;
334 default:
336 }
337 values_dofs += 3 * n_dofs;
338 values_quad += n_q_points;
339 }
341 for (unsigned int c = 0; c < n_components; ++c)
342 {
343 switch (dim)
344 {
345 case 3:
346 // grad z
347 eval1.template values<1, false, false, 3>(gradients_quad + 2,
348 scratch_data);
349 eval0.template values<0, false, false>(scratch_data,
350 values_dofs + n_dofs);
351 if (symmetric_evaluate &&
352 use_collocation_evaluation(fe_degree, n_q_points_1d))
353 {
355 dim - 1,
356 n_q_points_1d,
357 n_q_points_1d,
358 Number,
359 Number2>
363 gradients_quad + 1, values_quad);
364 else
366 gradients_quad + 1, values_quad);
368 gradients_quad, values_quad);
369 eval0.template values<1, false, false>(values_quad,
370 values_quad);
371 eval0.template values<0, false, false>(values_quad,
372 values_dofs);
373 }
374 else
375 {
377 {
378 eval1.template values<1, false, false>(values_quad,
379 scratch_data);
381 gradients_quad + 1, scratch_data);
382 }
383 else
385 gradients_quad + 1, scratch_data);
386
387 // grad y
388 eval0.template values<0, false, false>(scratch_data,
389 values_dofs);
390
391 // grad x
392 eval1.template values<1, false, false, 3>(gradients_quad,
393 scratch_data);
394 eval0.template gradients<0, false, true>(scratch_data,
395 values_dofs);
396 }
397 break;
398 case 2:
399 eval0.template values<0, false, false, 2>(gradients_quad + 1,
400 values_dofs +
401 n_dofs);
402 eval0.template gradients<0, false, false, 2>(gradients_quad,
403 values_dofs);
405 eval0.template values<0, false, true>(values_quad,
406 values_dofs);
407 break;
408 case 1:
409 values_dofs[0] = values_quad[0];
410 values_dofs[1] = gradients_quad[0];
411 break;
412 default:
414 }
415 values_dofs += 3 * n_dofs;
416 values_quad += n_q_points;
417 gradients_quad += dim * n_q_points;
418 }
419
421 {
422 values_dofs = values_dofs_ptr;
423 for (unsigned int c = 0; c < n_components; ++c)
424 {
425 switch (dim)
426 {
427 case 3:
428 // grad xx
429 eval1.template values<1, false, false>(hessians_quad,
430 scratch_data);
433 eval0.template hessians<0, false, true>(scratch_data,
434 values_dofs);
435 else
436 eval0.template hessians<0, false, false>(scratch_data,
437 values_dofs);
438
439 // grad yy
440 eval1.template hessians<1, false, false>(hessians_quad +
441 n_q_points,
442 scratch_data);
443 eval0.template values<0, false, true>(scratch_data,
444 values_dofs);
445
446 // grad zz
447 eval1.template values<1, false, false>(hessians_quad +
448 2 * n_q_points,
449 scratch_data);
450 eval0.template values<0, false, false>(scratch_data,
451 values_dofs +
452 2 * n_dofs);
453
454 // grad xy
455 eval1.template gradients<1, false, false>(hessians_quad +
456 3 * n_q_points,
457 scratch_data);
458 eval0.template gradients<0, false, true>(scratch_data,
459 values_dofs);
460
461 // grad xz
462 eval1.template values<1, false, false>(hessians_quad +
463 4 * n_q_points,
464 scratch_data);
466 eval0.template gradients<0, false, true>(scratch_data,
467 values_dofs +
468 n_dofs);
469 else
470 eval0.template gradients<0, false, false>(scratch_data,
471 values_dofs +
472 n_dofs);
473
474 // grad yz
475 eval1.template gradients<1, false, false>(hessians_quad +
476 5 * n_q_points,
477 scratch_data);
478 eval0.template values<0, false, true>(scratch_data,
479 values_dofs + n_dofs);
480
481 break;
482 case 2:
483 // grad xx
486 eval0.template hessians<0, false, true>(hessians_quad,
487 values_dofs);
488 else
489 eval0.template hessians<0, false, false>(hessians_quad,
490 values_dofs);
491
492 // grad yy
494 hessians_quad + n_q_points, values_dofs + 2 * n_dofs);
495 // grad xy
498 hessians_quad + 2 * n_q_points, values_dofs + n_dofs);
499 else
501 hessians_quad + 2 * n_q_points, values_dofs + n_dofs);
502 break;
503 case 1:
504 values_dofs[2] = hessians_quad[0];
506 values_dofs[0] = 0;
508 values_dofs[1] = 0;
509 break;
510 default:
512 }
513 values_dofs += 3 * n_dofs;
514 hessians_quad += dim * (dim + 1) / 2 * n_q_points;
515 }
516 }
517 }
518 };
519
520
521
522 template <int dim, int fe_degree, int n_q_points_1d, typename Number>
524 {
525 using Number2 =
527
532 template <bool do_integrate>
533 static inline void
537 &shape_data,
538 Number *values_dofs_in,
539 Number *values,
540 Number *gradients,
541 Number *scratch_data,
542 const unsigned int subface_index,
543 const unsigned int face_direction)
544 {
545 AssertDimension(shape_data.size(), 2);
546
547 const int degree = fe_degree != -1 ? fe_degree : shape_data[0].fe_degree;
548 const int n_rows_n = degree + 1;
549 const int n_rows_t = degree;
553 {{n_rows_t, n_rows_t, n_rows_n}}}};
554
555 (void)scratch_data;
556 (void)subface_index;
557 // TODO: This is currently not implemented, but the test
558 // matrix_vector_rt_face_03 apparently works without it -> check
559 // if (subface_index < GeometryInfo<dim - 1>::max_children_per_cell)
560 // DEAL_II_NOT_IMPLEMENTED();
561
563 dim - 1,
564 (fe_degree > 0 ? fe_degree : 0),
565 n_q_points_1d,
566 Number,
567 Number2>;
568
569 std::array<int, dim> values_dofs_offsets = {};
570 for (unsigned int comp = 0; comp < dim - 1; ++comp)
571 {
572 if (dim == 2)
575 3 * dofs_per_direction[comp][(face_direction + 1) % dim];
576 else
579 3 * dofs_per_direction[comp][(face_direction + 1) % dim] *
581 }
582
583 // Jacobians on faces are reordered to enable simple access with the
584 // regular evaluators; to get the RT Piola transform right, we need to
585 // pass through the values_dofs array in a permuted right order
586 std::array<unsigned int, dim> components;
587 for (unsigned int comp = 0; comp < dim; ++comp)
588 components[comp] = (face_direction + comp + 1) % dim;
589
590 for (const unsigned int comp : components)
591 {
592 Number *values_dofs = values_dofs_in + values_dofs_offsets[comp];
593
594 std::array<int, 2> n_blocks{
596 (dim > 2 ? dofs_per_direction[comp][(face_direction + 2) % dim] :
597 1)}};
598
599 if constexpr (dim == 3)
600 {
602 dim - 1,
603 n_q_points_1d,
604 n_q_points_1d,
605 Number,
606 Number2>
607 eval_g({},
608 shape_data[0].shape_gradients_collocation_eo.data(),
609 {});
610 if (!do_integrate)
611 {
613 fe_degree,
614 n_q_points_1d,
615 true>
616 eval;
617 // Evaluate in 3d
618 if (n_blocks[0] == n_rows_n)
619 {
620 eval.template normal<0>(shape_data[0],
621 values_dofs,
622 values);
623 eval.template tangential<1, 0>(shape_data[1],
624 values,
625 values);
626
628 {
629 eval.template normal<0>(shape_data[0],
630 values_dofs +
631 n_blocks[0] * n_blocks[1],
632 scratch_data);
634 scratch_data,
635 gradients + 2);
636 }
637 }
638 else if (n_blocks[1] == n_rows_n)
639 {
640 eval.template normal<1>(shape_data[0],
641 values_dofs,
642 values);
643 eval.template tangential<0, 1>(shape_data[1],
644 values,
645 values);
646
648 {
649 eval.template normal<1>(shape_data[0],
650 values_dofs +
651 n_blocks[0] * n_blocks[1],
652 scratch_data);
654 scratch_data,
655 gradients + 2);
656 }
657 }
658 else
659 {
660 Eval eval(shape_data[1].shape_values_eo.data(), {}, {});
661 eval.template values<0, true, false>(values_dofs, values);
662 eval.template values<1, true, false>(values, values);
664 {
665 eval.template values<0, true, false>(values_dofs +
666 n_blocks[0] *
667 n_blocks[1],
668 scratch_data);
670 scratch_data, gradients + 2);
671 }
672 }
674 {
676 gradients);
678 gradients +
679 1);
680 }
681 }
682 else
683 {
685 fe_degree,
686 n_q_points_1d,
687 false>
688 eval;
689 // Integrate in 3d
691 {
694 gradients, values);
695 else
697 gradients, values);
698 eval_g.template gradients<1, false, true, dim>(gradients +
699 1,
700 values);
701 }
702 if (n_blocks[0] == n_rows_n)
703 {
704 eval.template tangential<1, 0>(shape_data[1],
705 values,
706 values);
707 eval.template normal<0>(shape_data[0],
708 values,
709 values_dofs);
710
712 {
714 gradients + 2,
715 scratch_data);
716 eval.template normal<0>(shape_data[0],
717 scratch_data,
718 values_dofs +
719 n_blocks[0] * n_blocks[1]);
720 }
721 }
722 else if (n_blocks[1] == n_rows_n)
723 {
724 eval.template tangential<0, 1>(shape_data[1],
725 values,
726 values);
727 eval.template normal<1>(shape_data[0],
728 values,
729 values_dofs);
730
732 {
734 gradients + 2,
735 scratch_data);
736 eval.template normal<1>(shape_data[0],
737 scratch_data,
738 values_dofs +
739 n_blocks[0] * n_blocks[1]);
740 }
741 }
742 else
743 {
744 Eval eval_iso(shape_data[1].shape_values_eo.data(),
745 {},
746 {});
747 eval_iso.template values<1, false, false>(values, values);
748 eval_iso.template values<0, false, false>(values,
749 values_dofs);
751 {
753 gradients + 2, scratch_data);
755 scratch_data,
756 values_dofs + n_blocks[0] * n_blocks[1]);
757 }
758 }
759 }
760 }
761 else
762 {
764 dim - 1,
765 fe_degree + 1,
766 n_q_points_1d,
767 Number,
768 Number2>;
769 if (!do_integrate)
770 {
771 // Evaluate in 2d
772 if (n_blocks[0] == n_rows_n)
773 {
774 EvalN eval(shape_data[0].shape_values_eo,
775 shape_data[0].shape_gradients_eo,
776 {});
777 eval.template values<0, true, false>(values_dofs, values);
779 {
781 values_dofs, gradients);
783 values_dofs + n_rows_n, gradients + 1);
784 }
785 }
786 else
787 {
788 Eval eval(shape_data[1].shape_values_eo,
789 shape_data[1].shape_gradients_eo,
790 {});
791 eval.template values<0, true, false>(values_dofs, values);
793 {
795 values_dofs, gradients);
797 values_dofs + n_rows_t, gradients + 1);
798 }
799 }
800 }
801 else
802 {
803 // Integrate in 2d
804 if (n_blocks[0] == n_rows_n)
805 {
806 EvalN eval(shape_data[0].shape_values_eo,
807 shape_data[0].shape_gradients_eo,
808 {});
810 eval.template values<0, false, false>(values,
811 values_dofs);
813 {
816 gradients, values_dofs);
817 else
819 gradients, values_dofs);
821 gradients + 1, values_dofs + n_rows_n);
822 }
823 }
824 else
825 {
826 Eval eval(shape_data[1].shape_values_eo,
827 shape_data[1].shape_gradients_eo,
828 {});
830 eval.template values<0, false, false>(values,
831 values_dofs);
833 {
836 gradients, values_dofs);
837 else
839 gradients, values_dofs);
841 gradients + 1, values_dofs + n_rows_t);
842 }
843 }
844 }
845 }
846 values += Utilities::pow(n_q_points_1d, dim - 1);
847 gradients += dim * Utilities::pow(n_q_points_1d, dim - 1);
848 }
849 }
850 };
851
852
853
854 template <int dim, int fe_degree, typename Number>
856 {
857 using Number2 =
859
860 template <bool do_evaluate, bool add_into_output>
861 static void
862 interpolate(const unsigned int n_components,
865 const Number *input,
866 Number *output,
867 const unsigned int face_no)
868 {
869 Assert(static_cast<unsigned int>(fe_degree) ==
870 shape_info.data.front().fe_degree ||
871 fe_degree == -1,
875 n_components, input, output, flags, face_no, shape_info);
876 else
878 n_components,
879 input,
880 output,
881 flags,
882 face_no,
883 shape_info.data.front().fe_degree + 1,
884 shape_info.data.front().shape_data_on_face,
886 3 * shape_info.dofs_per_component_on_face);
887 }
888
892 template <bool do_evaluate, bool add_into_output>
893 static void
895 const unsigned int n_components,
898 const Number *input,
899 Number *output,
900 const unsigned int face_no)
901 {
902 Assert(static_cast<unsigned int>(fe_degree + 1) ==
903 shape_info.data.front().n_q_points_1d ||
904 fe_degree == -1,
906
908 n_components,
909 input,
910 output,
911 flags,
912 face_no,
913 shape_info.data.front().quadrature.size(),
914 shape_info.data.front().quadrature_data_on_face,
915 shape_info.n_q_points,
916 shape_info.n_q_points_face);
917 }
918
919 private:
920 template <bool do_evaluate, bool add_into_output, int face_direction = 0>
921 static void
922 interpolate_generic(const unsigned int n_components,
923 const Number *input,
924 Number *output,
926 const unsigned int face_no,
927 const unsigned int n_points_1d,
928 const std::array<AlignedVector<Number2>, 2> &shape_data,
929 const unsigned int dofs_per_component_on_cell,
930 const unsigned int dofs_per_component_on_face)
931 {
932 if (face_direction == face_no / 2)
933 {
934 constexpr int stride_ = Utilities::pow(fe_degree + 1, face_direction);
935
936 const int n_rows = fe_degree != -1 ? fe_degree + 1 : n_points_1d;
937 const int stride = Utilities::pow(n_rows, face_direction);
938 const std::array<int, 2> n_blocks{
939 {(dim > 1 ? n_rows : 1), (dim > 2 ? n_rows : 1)}};
940 std::array<int, 2> steps;
941 if constexpr (face_direction == 0)
942 steps = {{n_rows, 0}};
943 else if constexpr (face_direction == 1 && dim == 2)
944 steps = {{1, 0}};
945 else if constexpr (face_direction == 1)
946 // in 3d, the coordinate system is zx, not xz -> switch indices
947 steps = {{n_rows * n_rows, -n_rows * n_rows * n_rows + 1}};
948 else if constexpr (face_direction == 2)
949 steps = {{1, 0}};
950
951 for (unsigned int c = 0; c < n_components; ++c)
952 {
953 if (flag & EvaluationFlags::hessians)
954 interpolate_to_face<fe_degree + 1,
955 stride_,
956 do_evaluate,
958 2>(shape_data[face_no % 2].begin(),
959 n_blocks,
960 steps,
961 input,
962 output,
963 n_rows,
964 stride);
965 else if (flag & EvaluationFlags::gradients)
966 interpolate_to_face<fe_degree + 1,
967 stride_,
968 do_evaluate,
970 1>(shape_data[face_no % 2].begin(),
971 n_blocks,
972 steps,
973 input,
974 output,
975 n_rows,
976 stride);
977 else
978 interpolate_to_face<fe_degree + 1,
979 stride_,
980 do_evaluate,
982 0>(shape_data[face_no % 2].begin(),
983 n_blocks,
984 steps,
985 input,
986 output,
987 n_rows,
988 stride);
989 if (do_evaluate)
990 {
991 input += dofs_per_component_on_cell;
992 output += dofs_per_component_on_face;
993 }
994 else
995 {
996 output += dofs_per_component_on_cell;
997 input += dofs_per_component_on_face;
998 }
999 }
1000 }
1001 else if (face_direction < dim)
1002 {
1003 interpolate_generic<do_evaluate,
1005 std::min(face_direction + 1, dim - 1)>(
1006 n_components,
1007 input,
1008 output,
1009 flag,
1010 face_no,
1012 shape_data,
1013 dofs_per_component_on_cell,
1014 dofs_per_component_on_face);
1015 }
1016 }
1017
1018 template <bool do_evaluate,
1019 bool add_into_output,
1020 int face_direction = 0,
1021 int max_derivative = 0>
1022 static void
1024 const unsigned int n_components,
1025 const Number *input,
1026 Number *output,
1028 const unsigned int face_no,
1030 {
1031 if (dim == 1)
1032 {
1033 // This should never happen since the FE_RaviartThomasNodal is not
1034 // defined for dim = 1. It prevents compiler warnings of infinite
1035 // recursion.
1037 return;
1038 }
1039
1040 bool increase_max_der = false;
1041 if ((flag & EvaluationFlags::hessians && max_derivative < 2) ||
1043 increase_max_der = true;
1044
1046 {
1047 constexpr int stride1 = Utilities::pow(fe_degree + 1, face_direction);
1048 constexpr int stride0 = Utilities::pow(fe_degree, face_direction);
1049 constexpr int stride2 = fe_degree * (fe_degree + 1);
1050
1051 const int degree =
1052 fe_degree != -1 ? fe_degree : shape_info.data[0].fe_degree;
1053 const int n_rows_n = degree + 1;
1054 const int n_rows_t = degree;
1055
1056 std::array<int, 3> strides{{1, 1, 1}};
1057 if (face_direction > 0)
1058 {
1059 strides[0] =
1061 strides[1] = n_rows_t * (face_direction == 3 ? n_rows_n : 1);
1063 }
1065 {{{n_rows_n, n_rows_t, n_rows_t}},
1067 {{n_rows_t, n_rows_t, n_rows_n}}}};
1068
1069 std::array<int, 2> steps, n_blocks;
1070
1071 if constexpr (face_direction == 0)
1072 steps = {{degree + (face_direction == 0), 0}};
1073 else if constexpr (face_direction == 1 && dim == 2)
1074 steps = {{1, 0}};
1075 else if constexpr (face_direction == 1)
1076 // in 3d, the coordinate system is zx, not xz -> switch indices
1077 steps = {
1079 else if constexpr (face_direction == 2)
1080 steps = {{1, 0}};
1081
1082 n_blocks[0] = dofs_per_direction[0][(face_direction + 1) % dim];
1083 n_blocks[1] =
1084 dim > 2 ? dofs_per_direction[0][(face_direction + 2) % dim] : 1;
1085
1087 (fe_degree != -1 ? (fe_degree + (face_direction == 0)) : 0),
1088 ((face_direction < 2) ? stride1 : stride2),
1089 do_evaluate,
1091 max_derivative>(shape_info.data[face_direction != 0]
1092 .shape_data_on_face[face_no % 2]
1093 .begin(),
1094 n_blocks,
1095 steps,
1096 input,
1097 output,
1098 degree + (face_direction == 0),
1099 strides[0]);
1100
1101 if (do_evaluate)
1102 {
1103 input += n_rows_n * Utilities::pow(n_rows_t, dim - 1);
1104 output += 3 * n_blocks[0] * n_blocks[1];
1105 }
1106 else
1107 {
1108 output += n_rows_n * Utilities::pow(n_rows_t, dim - 1);
1109 input += 3 * n_blocks[0] * n_blocks[1];
1110 }
1111
1112 // must only change steps only for face direction 0
1113 if constexpr (face_direction == 0)
1114 steps = {{degree, 0}};
1115
1116 n_blocks[0] = dofs_per_direction[1][(face_direction + 1) % dim];
1117 n_blocks[1] =
1118 dim > 2 ? dofs_per_direction[1][(face_direction + 2) % dim] : 1;
1119
1121 (fe_degree != -1 ? (fe_degree + (face_direction == 1)) : 0),
1122 ((face_direction < 2) ? stride0 : stride2),
1123 do_evaluate,
1125 max_derivative>(shape_info.data[face_direction != 1]
1126 .shape_data_on_face[face_no % 2]
1127 .begin(),
1128 n_blocks,
1129 steps,
1130 input,
1131 output,
1132 degree + (face_direction == 1),
1133 strides[1]);
1134
1135 if constexpr (dim > 2)
1136 {
1137 if (do_evaluate)
1138 {
1139 input += n_rows_n * Utilities::pow(n_rows_t, dim - 1);
1140 output += 3 * n_blocks[0] * n_blocks[1];
1141 }
1142 else
1143 {
1144 output += n_rows_n * Utilities::pow(n_rows_t, dim - 1);
1145 input += 3 * n_blocks[0] * n_blocks[1];
1146 }
1147
1148 if constexpr (face_direction == 0)
1149 steps = {{degree, 0}};
1150 else if constexpr (face_direction == 1)
1151 // in 3d, the coordinate system is zx, not xz -> switch indices
1152 steps = {
1154 else if constexpr (face_direction == 2)
1155 steps = {{1, 0}};
1156
1157 n_blocks[0] = dofs_per_direction[2][(face_direction + 1) % dim];
1158 n_blocks[1] = dofs_per_direction[2][(face_direction + 2) % dim];
1159
1161 (fe_degree != -1 ? (fe_degree + (face_direction == 2)) : 0),
1162 stride0,
1163 do_evaluate,
1165 max_derivative>(shape_info.data[face_direction != 2]
1166 .shape_data_on_face[face_no % 2]
1167 .begin(),
1168 n_blocks,
1169 steps,
1170 input,
1171 output,
1172 degree + (face_direction == 2),
1173 strides[2]);
1174 }
1175 }
1176 else if (face_direction == face_no / 2)
1177 {
1178 // Only increase max_derivative
1179 interpolate_raviart_thomas<do_evaluate,
1182 std::min(max_derivative + 1, 2)>(
1183 n_components, input, output, flag, face_no, shape_info);
1184 }
1185 else if (face_direction < dim)
1186 {
1187 if (increase_max_der)
1188 {
1189 interpolate_raviart_thomas<do_evaluate,
1191 std::min(face_direction + 1, dim - 1),
1192 std::min(max_derivative + 1, 2)>(
1193 n_components, input, output, flag, face_no, shape_info);
1194 }
1195 else
1196 {
1197 interpolate_raviart_thomas<do_evaluate,
1199 std::min(face_direction + 1, dim - 1),
1201 n_components, input, output, flag, face_no, shape_info);
1202 }
1203 }
1204 }
1205 };
1206
1207
1208
1209 // internal helper function for reading data; base version of different types
1210 template <typename VectorizedArrayType, typename Number2>
1211 void
1212 do_vectorized_read(const Number2 *src_ptr, VectorizedArrayType &dst)
1213 {
1214 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
1215 dst[v] = src_ptr[v];
1216 }
1217
1218
1219
1220 // internal helper function for reading data; specialized version where we
1221 // can use a dedicated load function
1222 template <typename Number, std::size_t width>
1223 void
1225 {
1226 dst.load(src_ptr);
1227 }
1228
1229
1230
1231 // internal helper function for reading data; base version of different types
1232 template <typename VectorizedArrayType, typename Number2>
1233 void
1235 const unsigned int *indices,
1236 VectorizedArrayType &dst)
1237 {
1238 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
1239 dst[v] = src_ptr[indices[v]];
1240 }
1241
1242
1243
1244 // internal helper function for reading data; specialized version where we
1245 // can use a dedicated gather function
1246 template <typename Number, std::size_t width>
1247 void
1249 const unsigned int *indices,
1251 {
1252 dst.gather(src_ptr, indices);
1253 }
1254
1255
1256
1257 // internal helper function for reading data; base version of different types
1258 template <typename VectorizedArrayType, typename Number2>
1259 void
1260 do_vectorized_add(const VectorizedArrayType src, Number2 *dst_ptr)
1261 {
1262 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
1263 dst_ptr[v] += src[v];
1264 }
1265
1266
1267
1268 // internal helper function for reading data; specialized version where we
1269 // can use a dedicated load function
1270 template <typename Number, std::size_t width>
1271 void
1273 {
1275 tmp.load(dst_ptr);
1276 (tmp + src).store(dst_ptr);
1277 }
1278
1279
1280
1281 // internal helper function for reading data; base version of different types
1282 template <typename VectorizedArrayType, typename Number2>
1283 void
1284 do_vectorized_scatter_add(const VectorizedArrayType src,
1285 const unsigned int *indices,
1286 Number2 *dst_ptr)
1287 {
1288 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
1289 dst_ptr[indices[v]] += src[v];
1290 }
1291
1292
1293
1294 // internal helper function for reading data; specialized version where we
1295 // can use a dedicated gather function
1296 template <typename Number, std::size_t width>
1297 void
1299 const unsigned int *indices,
1300 Number *dst_ptr)
1301 {
1302#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS < 512
1303 for (unsigned int v = 0; v < width; ++v)
1304 dst_ptr[indices[v]] += src[v];
1305#else
1307 tmp.gather(dst_ptr, indices);
1308 (tmp + src).scatter(indices, dst_ptr);
1309#endif
1310 }
1311
1312
1313
1314 template <typename Number>
1315 void
1316 adjust_for_face_orientation(const unsigned int dim,
1317 const unsigned int n_components,
1319 const unsigned int *orientation,
1320 const bool integrate,
1321 const std::size_t n_q_points,
1322 Number *tmp_values,
1323 Number *values_quad,
1324 Number *gradients_quad,
1325 Number *hessians_quad)
1326 {
1327 for (unsigned int c = 0; c < n_components; ++c)
1328 {
1329 if (flag & EvaluationFlags::values)
1330 {
1331 if (integrate)
1332 for (unsigned int q = 0; q < n_q_points; ++q)
1333 tmp_values[q] = values_quad[c * n_q_points + orientation[q]];
1334 else
1335 for (unsigned int q = 0; q < n_q_points; ++q)
1336 tmp_values[orientation[q]] = values_quad[c * n_q_points + q];
1337 for (unsigned int q = 0; q < n_q_points; ++q)
1338 values_quad[c * n_q_points + q] = tmp_values[q];
1339 }
1340 if (flag & EvaluationFlags::gradients)
1341 for (unsigned int d = 0; d < dim; ++d)
1342 {
1343 if (integrate)
1344 for (unsigned int q = 0; q < n_q_points; ++q)
1345 tmp_values[q] =
1346 gradients_quad[(c * n_q_points + orientation[q]) * dim + d];
1347 else
1348 for (unsigned int q = 0; q < n_q_points; ++q)
1349 tmp_values[orientation[q]] =
1350 gradients_quad[(c * n_q_points + q) * dim + d];
1351 for (unsigned int q = 0; q < n_q_points; ++q)
1352 gradients_quad[(c * n_q_points + q) * dim + d] = tmp_values[q];
1353 }
1354 if (flag & EvaluationFlags::hessians)
1355 {
1356 const unsigned int hdim = (dim * (dim + 1)) / 2;
1357 for (unsigned int d = 0; d < hdim; ++d)
1358 {
1359 if (integrate)
1360 for (unsigned int q = 0; q < n_q_points; ++q)
1361 tmp_values[q] = hessians_quad[(c * hdim + d) * n_q_points +
1362 orientation[q]];
1363 else
1364 for (unsigned int q = 0; q < n_q_points; ++q)
1365 tmp_values[orientation[q]] =
1366 hessians_quad[(c * hdim + d) * n_q_points + q];
1367 for (unsigned int q = 0; q < n_q_points; ++q)
1368 hessians_quad[(c * hdim + d) * n_q_points + q] =
1369 tmp_values[q];
1370 }
1371 }
1372 }
1373 }
1374
1375
1376
1377 template <typename Number, typename VectorizedArrayType>
1378 void
1380 const unsigned int dim,
1381 const unsigned int n_components,
1382 const unsigned int v,
1384 const unsigned int *orientation,
1385 const bool integrate,
1386 const std::size_t n_q_points,
1387 Number *tmp_values,
1388 VectorizedArrayType *values_quad,
1389 VectorizedArrayType *gradients_quad = nullptr,
1390 VectorizedArrayType *hessians_quad = nullptr)
1391 {
1392 for (unsigned int c = 0; c < n_components; ++c)
1393 {
1394 if (flag & EvaluationFlags::values)
1395 {
1396 if (integrate)
1397 for (unsigned int q = 0; q < n_q_points; ++q)
1398 tmp_values[q] = values_quad[c * n_q_points + orientation[q]][v];
1399 else
1400 for (unsigned int q = 0; q < n_q_points; ++q)
1401 tmp_values[orientation[q]] = values_quad[c * n_q_points + q][v];
1402 for (unsigned int q = 0; q < n_q_points; ++q)
1403 values_quad[c * n_q_points + q][v] = tmp_values[q];
1404 }
1405 if (flag & EvaluationFlags::gradients)
1406 for (unsigned int d = 0; d < dim; ++d)
1407 {
1408 Assert(gradients_quad != nullptr, ExcInternalError());
1409 if (integrate)
1410 for (unsigned int q = 0; q < n_q_points; ++q)
1411 tmp_values[q] =
1412 gradients_quad[(c * n_q_points + orientation[q]) * dim + d]
1413 [v];
1414 else
1415 for (unsigned int q = 0; q < n_q_points; ++q)
1416 tmp_values[orientation[q]] =
1417 gradients_quad[(c * n_q_points + q) * dim + d][v];
1418 for (unsigned int q = 0; q < n_q_points; ++q)
1419 gradients_quad[(c * n_q_points + q) * dim + d][v] =
1420 tmp_values[q];
1421 }
1422 if (flag & EvaluationFlags::hessians)
1423 {
1424 Assert(hessians_quad != nullptr, ExcInternalError());
1425 const unsigned int hdim = (dim * (dim + 1)) / 2;
1426 for (unsigned int d = 0; d < hdim; ++d)
1427 {
1428 if (integrate)
1429 for (unsigned int q = 0; q < n_q_points; ++q)
1430 tmp_values[q] = hessians_quad[(c * hdim + d) * n_q_points +
1431 orientation[q]][v];
1432 else
1433 for (unsigned int q = 0; q < n_q_points; ++q)
1434 tmp_values[orientation[q]] =
1435 hessians_quad[(c * hdim + d) * n_q_points + q][v];
1436 for (unsigned int q = 0; q < n_q_points; ++q)
1437 hessians_quad[(c * hdim + d) * n_q_points + q][v] =
1438 tmp_values[q];
1439 }
1440 }
1441 }
1442 }
1443
1444
1445
1446 template <int dim, typename Number>
1448 {
1449 static bool
1450 evaluate_tensor_none(const unsigned int n_components,
1452 const Number *values_dofs,
1454 {
1455 const auto &shape_info = fe_eval.get_shape_info();
1456 const auto &shape_data = shape_info.data.front();
1457 using Number2 =
1459
1460 Assert((fe_eval.get_dof_access_index() ==
1462 fe_eval.is_interior_face() == false) == false,
1464
1465 const unsigned int face_no = fe_eval.get_face_no();
1466 const unsigned int face_orientation = fe_eval.get_face_orientation();
1467 const std::size_t n_dofs = shape_info.dofs_per_component_on_cell;
1468 const std::size_t n_q_points = shape_info.n_q_points_faces[face_no];
1469
1470 using Eval =
1472
1474 {
1475 const auto *const shape_values =
1476 &shape_data.shape_values_face(face_no, face_orientation, 0);
1477
1478 auto *values_quad_ptr = fe_eval.begin_values();
1479 auto *values_dofs_actual_ptr = values_dofs;
1480
1481 Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
1482 for (unsigned int c = 0; c < n_components; ++c)
1483 {
1486
1487 values_quad_ptr += n_q_points;
1488 values_dofs_actual_ptr += n_dofs;
1489 }
1490 }
1491
1493 {
1494 auto *gradients_quad_ptr = fe_eval.begin_gradients();
1495 const auto *values_dofs_actual_ptr = values_dofs;
1496
1497 std::array<const Number2 *, dim> shape_gradients;
1498 for (unsigned int d = 0; d < dim; ++d)
1499 shape_gradients[d] =
1500 &shape_data.shape_gradients_face(face_no, face_orientation, d, 0);
1501
1502 for (unsigned int c = 0; c < n_components; ++c)
1503 {
1504 for (unsigned int d = 0; d < dim; ++d)
1505 {
1506 Eval eval(
1507 nullptr, shape_gradients[d], nullptr, n_dofs, n_q_points);
1508
1511 }
1512 gradients_quad_ptr += n_q_points * dim;
1513 values_dofs_actual_ptr += n_dofs;
1514 }
1515 }
1516
1519
1520 return true;
1521 }
1522
1523 template <int fe_degree>
1524#ifndef DEBUG
1526#endif
1527 static void
1528 project_to_face(const unsigned int n_components,
1530 const Number *values_dofs,
1532 const bool use_vectorization,
1533 Number *temp,
1534 Number *scratch_data)
1535 {
1536 const auto &shape_info = fe_eval.get_shape_info();
1537
1538 if (use_vectorization == false)
1539 {
1540 const auto &shape_data = shape_info.data.front();
1541
1542 const unsigned int dofs_per_comp_face =
1543 fe_degree > -1 ?
1544 Utilities::pow(fe_degree + 1, dim - 1) :
1545 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
1546 const unsigned int dofs_per_face = n_components * dofs_per_comp_face;
1547
1548 for (unsigned int v = 0; v < Number::size(); ++v)
1549 {
1550 // the loop breaks once an invalid_unsigned_int is hit for
1551 // all cases except the exterior faces in the ECL loop (where
1552 // some faces might be at the boundaries but others not)
1553 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
1554 {
1555 for (unsigned int i = 0; i < 3 * dofs_per_face; ++i)
1556 temp[i][v] = 0;
1557 continue;
1558 }
1559
1563 shape_info,
1564 values_dofs,
1565 scratch_data,
1566 fe_eval.get_face_no(v));
1567
1568 for (unsigned int i = 0; i < 3 * dofs_per_face; ++i)
1569 temp[i][v] = scratch_data[i][v];
1570 }
1571 }
1572 else
1576 shape_info,
1577 values_dofs,
1578 temp,
1579 fe_eval.get_face_no());
1580 }
1581
1582
1583 template <int fe_degree, int n_q_points_1d>
1584#ifndef DEBUG
1586#endif
1587 static void
1588 evaluate_in_face(const unsigned int n_components,
1591 Number *temp,
1592 Number *scratch_data)
1593 {
1594 const auto &shape_info = fe_eval.get_shape_info();
1595 const auto &shape_data = shape_info.data.front();
1596
1597 const unsigned int subface_index = fe_eval.get_subface_index();
1598 constexpr unsigned int n_q_points_1d_actual =
1599 fe_degree > -1 ? n_q_points_1d : 0;
1600
1601 if (shape_info.element_type == MatrixFreeFunctions::tensor_raviart_thomas)
1602 {
1604 fe_degree,
1606 Number>::
1609 fe_eval.get_shape_info().data,
1610 temp,
1611 fe_eval.begin_values(),
1612 fe_eval.begin_gradients(),
1613 scratch_data,
1614 subface_index,
1615 fe_eval.get_face_no() / 2);
1616 }
1617 else if (fe_degree > -1 &&
1619 shape_info.element_type <= MatrixFreeFunctions::tensor_symmetric)
1621 dim,
1622 fe_degree,
1624 Number>::evaluate_in_face(n_components,
1626 shape_data,
1627 temp,
1628 fe_eval.begin_values(),
1629 fe_eval
1630 .begin_gradients(),
1631 fe_eval.begin_hessians(),
1632 scratch_data,
1633 subface_index);
1634 else
1636 dim,
1637 fe_degree,
1639 Number>::evaluate_in_face(n_components,
1641 shape_data,
1642 temp,
1643 fe_eval.begin_values(),
1644 fe_eval
1645 .begin_gradients(),
1646 fe_eval.begin_hessians(),
1647 scratch_data,
1648 subface_index);
1649 }
1650
1651#ifndef DEBUG
1653#endif
1654 static void
1656 const unsigned int n_components,
1659 const bool use_vectorization,
1660 Number *temp)
1661 {
1662 const auto &shape_info = fe_eval.get_shape_info();
1663
1664 if (use_vectorization == false)
1665 {
1666 for (unsigned int v = 0; v < Number::size(); ++v)
1667 {
1668 // the loop breaks once an invalid_unsigned_int is hit for
1669 // all cases except the exterior faces in the ECL loop (where
1670 // some faces might be at the boundaries but others not)
1671 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
1672 continue;
1673
1674 if (fe_eval.get_face_orientation(v) != 0)
1676 dim,
1677 n_components,
1678 v,
1680 &shape_info.face_orientations_quad(
1681 fe_eval.get_face_orientation(v), 0),
1682 false,
1683 shape_info.n_q_points_face,
1684 &temp[0][0],
1685 fe_eval.begin_values(),
1686 fe_eval.begin_gradients(),
1687 fe_eval.begin_hessians());
1688 }
1689 }
1690 else if (fe_eval.get_face_orientation() != 0)
1692 dim,
1693 n_components,
1695 &shape_info.face_orientations_quad(fe_eval.get_face_orientation(), 0),
1696 false,
1697 shape_info.n_q_points_face,
1698 temp,
1699 fe_eval.begin_values(),
1700 fe_eval.begin_gradients(),
1701 fe_eval.begin_hessians());
1702 }
1703
1704
1705
1706 template <int fe_degree, int n_q_points_1d>
1707 static bool
1708 evaluate_tensor(const unsigned int n_components,
1710 const Number *values_dofs,
1712 {
1713 const auto &shape_info = fe_eval.get_shape_info();
1714 const auto &shape_data = shape_info.data.front();
1715
1716 const unsigned int dofs_per_comp_face =
1717 fe_degree > -1 ?
1718 Utilities::pow(fe_degree + 1, dim - 1) :
1719 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
1720
1721 // Note: we always keep storage of values, 1st and 2nd derivatives in an
1722 // array, so reserve space for all three here
1723 Number *temp = fe_eval.get_scratch_data().begin();
1724 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
1725
1726 bool use_vectorization = true;
1727 if (fe_eval.get_dof_access_index() ==
1729 fe_eval.is_interior_face() == false) // exterior faces in the ECL loop
1730 for (unsigned int v = 0; v < Number::size(); ++v)
1731 if (fe_eval.get_cell_ids()[v] != numbers::invalid_unsigned_int &&
1732 fe_eval.get_face_no(v) != fe_eval.get_face_no(0))
1733 use_vectorization = false;
1734
1735 project_to_face<fe_degree>(n_components,
1737 values_dofs,
1738 fe_eval,
1740 temp,
1741 scratch_data);
1742
1744 n_components, evaluation_flag, fe_eval, temp, scratch_data);
1745
1746 if (dim == 3)
1748 n_components, evaluation_flag, fe_eval, use_vectorization, temp);
1749
1750 return false;
1751 }
1752
1753 template <int fe_degree, int n_q_points_1d>
1754 static bool
1755 run(const unsigned int n_components,
1757 const Number *values_dofs,
1759 {
1760 const auto &shape_info = fe_eval.get_shape_info();
1761
1762 if (shape_info.element_type == MatrixFreeFunctions::tensor_none)
1763 return evaluate_tensor_none(n_components,
1765 values_dofs,
1766 fe_eval);
1767 else
1770 values_dofs,
1771 fe_eval);
1772 }
1773 };
1774
1775
1776
1777 template <int dim, typename Number>
1779 {
1780 template <int fe_degree>
1781 static bool
1782 run(const unsigned int n_components,
1784 const Number *values_dofs,
1786 {
1787 const auto &shape_info = fe_eval.get_shape_info();
1788 const auto &shape_data = shape_info.data.front();
1789
1790 const unsigned int dofs_per_comp_face =
1791 fe_degree > -1 ?
1792 Utilities::pow(fe_degree + 1, dim - 1) :
1793 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
1794
1795 // Note: we always keep storage of values, 1st and 2nd derivatives in an
1796 // array, so reserve space for all three here
1797 Number *temp = fe_eval.get_scratch_data().begin();
1798 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
1799
1800 bool use_vectorization = true;
1801 if (fe_eval.get_dof_access_index() ==
1803 fe_eval.is_interior_face() == false) // exterior faces in the ECL loop
1804 for (unsigned int v = 0; v < Number::size(); ++v)
1805 if (fe_eval.get_cell_ids()[v] != numbers::invalid_unsigned_int &&
1806 fe_eval.get_face_no(v) != fe_eval.get_face_no(0))
1807 use_vectorization = false;
1808
1812 values_dofs,
1813 fe_eval,
1815 temp,
1816 scratch_data);
1817
1818 return false;
1819 }
1820 };
1821
1822
1823
1824 template <int dim, typename Number>
1826 {
1827 template <int fe_degree, int n_q_points_1d>
1828 static bool
1829 run(const unsigned int n_components,
1832 {
1833 const auto &shape_info = fe_eval.get_shape_info();
1834 const auto &shape_data = shape_info.data.front();
1835
1836 const unsigned int dofs_per_comp_face =
1837 fe_degree > -1 ?
1838 Utilities::pow(fe_degree + 1, dim - 1) :
1839 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
1840
1841 // Note: we always keep storage of values, 1st and 2nd derivatives in an
1842 // array, so reserve space for all three here
1843 Number *temp = fe_eval.get_scratch_data().begin();
1844 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
1845
1848 n_components, evaluation_flag, fe_eval, temp, scratch_data);
1849
1850 return false;
1851 }
1852 };
1853
1854
1855
1856 template <int dim, typename Number>
1858 {
1859 static bool
1861 const unsigned int n_components,
1863 Number *values_dofs,
1865 const bool sum_into_values)
1866 {
1867 const auto &shape_info = fe_eval.get_shape_info();
1868 const auto &shape_data = shape_info.data.front();
1869 using Number2 =
1871
1872 Assert((fe_eval.get_dof_access_index() ==
1874 fe_eval.is_interior_face() == false) == false,
1876
1877 const unsigned int face_no = fe_eval.get_face_no();
1878 const unsigned int face_orientation = fe_eval.get_face_orientation();
1879 const std::size_t n_dofs = shape_info.dofs_per_component_on_cell;
1880 const std::size_t n_q_points = shape_info.n_q_points_faces[face_no];
1881
1882 using Eval =
1884
1886 {
1887 const auto *const shape_values =
1888 &shape_data.shape_values_face(face_no, face_orientation, 0);
1889
1890 auto *values_quad_ptr = fe_eval.begin_values();
1891 auto *values_dofs_actual_ptr = values_dofs;
1892
1893 Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
1894 for (unsigned int c = 0; c < n_components; ++c)
1895 {
1896 if (sum_into_values)
1899 else
1902 values_quad_ptr += n_q_points;
1903 values_dofs_actual_ptr += n_dofs;
1904 }
1905 }
1906
1908 {
1909 auto *gradients_quad_ptr = fe_eval.begin_gradients();
1910 auto *values_dofs_actual_ptr = values_dofs;
1911
1912 std::array<const Number2 *, dim> shape_gradients;
1913 for (unsigned int d = 0; d < dim; ++d)
1914 shape_gradients[d] =
1915 &shape_data.shape_gradients_face(face_no, face_orientation, d, 0);
1916
1917 for (unsigned int c = 0; c < n_components; ++c)
1918 {
1919 for (unsigned int d = 0; d < dim; ++d)
1920 {
1921 Eval eval(
1922 nullptr, shape_gradients[d], nullptr, n_dofs, n_q_points);
1923
1924 if (!sum_into_values &&
1928 else
1931 }
1932 gradients_quad_ptr += n_q_points * dim;
1933 values_dofs_actual_ptr += n_dofs;
1934 }
1935 }
1936
1939
1940 return true;
1941 }
1942
1943#ifndef DEBUG
1945#endif
1946 static void
1948 const unsigned int n_components,
1951 const bool use_vectorization,
1952 Number *temp)
1953 {
1954 const auto &shape_info = fe_eval.get_shape_info();
1955
1956 if (use_vectorization == false)
1957 {
1958 for (unsigned int v = 0; v < Number::size(); ++v)
1959 {
1960 // the loop breaks once an invalid_unsigned_int is hit for
1961 // all cases except the exterior faces in the ECL loop (where
1962 // some faces might be at the boundaries but others not)
1963 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
1964 continue;
1965
1966 if (fe_eval.get_face_orientation(v) != 0)
1968 dim,
1969 n_components,
1970 v,
1972 &fe_eval.get_shape_info().face_orientations_quad(
1973 fe_eval.get_face_orientation(v), 0),
1974 true,
1975 shape_info.n_q_points_face,
1976 &temp[0][0],
1977 fe_eval.begin_values(),
1978 fe_eval.begin_gradients(),
1979 fe_eval.begin_hessians());
1980 }
1981 }
1982 else if (fe_eval.get_face_orientation() != 0)
1984 dim,
1985 n_components,
1987 &fe_eval.get_shape_info().face_orientations_quad(
1988 fe_eval.get_face_orientation(), 0),
1989 true,
1990 shape_info.n_q_points_face,
1991 temp,
1992 fe_eval.begin_values(),
1993 fe_eval.begin_gradients(),
1994 fe_eval.begin_hessians());
1995 }
1996
1997 template <int fe_degree, int n_q_points_1d>
1998#ifndef DEBUG
2000#endif
2001 static void
2002 integrate_in_face(const unsigned int n_components,
2005 Number *temp,
2006 Number *scratch_data)
2007 {
2008 const auto &shape_info = fe_eval.get_shape_info();
2009 const auto &shape_data = shape_info.data.front();
2010
2011 const unsigned int n_q_points_1d_actual =
2012 fe_degree > -1 ? n_q_points_1d : 0;
2013 const unsigned int subface_index = fe_eval.get_subface_index();
2014
2015 if (shape_info.element_type == MatrixFreeFunctions::tensor_raviart_thomas)
2016 {
2018 fe_degree,
2020 Number>::
2023 fe_eval.get_shape_info().data,
2024 temp,
2025 fe_eval.begin_values(),
2026 fe_eval.begin_gradients(),
2027 scratch_data,
2028 subface_index,
2029 fe_eval.get_face_no() / 2);
2030 }
2031 else if (fe_degree > -1 &&
2032 fe_eval.get_subface_index() >=
2034 shape_info.element_type <= MatrixFreeFunctions::tensor_symmetric)
2036 true,
2037 dim,
2038 fe_degree,
2040 Number>::integrate_in_face(n_components,
2042 shape_data,
2043 temp,
2044 fe_eval.begin_values(),
2045 fe_eval.begin_gradients(),
2046 fe_eval.begin_hessians(),
2047 scratch_data,
2048 subface_index);
2049 else
2051 false,
2052 dim,
2053 fe_degree,
2055 Number>::integrate_in_face(n_components,
2057 shape_data,
2058 temp,
2059 fe_eval.begin_values(),
2060 fe_eval.begin_gradients(),
2061 fe_eval.begin_hessians(),
2062 scratch_data,
2063 subface_index);
2064 }
2065
2066 template <int fe_degree>
2067#ifndef DEBUG
2069#endif
2070 static void
2071 collect_from_face(const unsigned int n_components,
2073 Number *values_dofs,
2075 const bool use_vectorization,
2076 const Number *temp,
2077 Number *scratch_data,
2078 const bool sum_into_values)
2079 {
2080 const auto &shape_info = fe_eval.get_shape_info();
2081 const auto &shape_data = shape_info.data.front();
2082
2083 const unsigned int dofs_per_comp_face =
2084 fe_degree > -1 ?
2085 Utilities::pow(fe_degree + 1, dim - 1) :
2086 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
2087 const unsigned int dofs_per_face = n_components * dofs_per_comp_face;
2088
2089 if (use_vectorization == false)
2090 {
2091 for (unsigned int v = 0; v < Number::size(); ++v)
2092 {
2093 // the loop breaks once an invalid_unsigned_int is hit for
2094 // all cases except the exterior faces in the ECL loop (where
2095 // some faces might be at the boundaries but others not)
2096 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
2097 continue;
2098
2102 shape_info,
2103 temp,
2104 scratch_data,
2105 fe_eval.get_face_no(v));
2106
2107 if (sum_into_values)
2108 for (unsigned int i = 0; i < 3 * dofs_per_face; ++i)
2109 values_dofs[i][v] += scratch_data[i][v];
2110 else
2111 for (unsigned int i = 0; i < 3 * dofs_per_face; ++i)
2112 values_dofs[i][v] = scratch_data[i][v];
2113 }
2114 }
2115 else
2116 {
2117 if (sum_into_values)
2121 shape_info,
2122 temp,
2123 values_dofs,
2124 fe_eval.get_face_no());
2125 else
2129 shape_info,
2130 temp,
2131 values_dofs,
2132 fe_eval.get_face_no());
2133 }
2134 }
2135
2136 template <int fe_degree, int n_q_points_1d>
2137 static bool
2138 integrate_tensor(const unsigned int n_components,
2140 Number *values_dofs,
2142 const bool sum_into_values)
2143 {
2144 const auto &shape_info = fe_eval.get_shape_info();
2145 const auto &shape_data = shape_info.data.front();
2146
2147 const unsigned int dofs_per_comp_face =
2148 fe_degree > -1 ?
2149 Utilities::pow(fe_degree + 1, dim - 1) :
2150 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
2151
2152 Number *temp = fe_eval.get_scratch_data().begin();
2153 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
2154
2155 bool use_vectorization = true;
2156
2157 if (fe_eval.get_dof_access_index() ==
2159 fe_eval.is_interior_face() == false) // exterior faces in the ECL loop
2161 fe_eval.get_cell_ids()[0] != numbers::invalid_unsigned_int &&
2162 std::all_of(fe_eval.get_cell_ids().begin() + 1,
2163 fe_eval.get_cell_ids().end(),
2164 [&](const auto &v) {
2165 return v == fe_eval.get_cell_ids()[0] ||
2166 v == numbers::invalid_unsigned_int;
2167 });
2168
2169 if (dim == 3)
2171 n_components, integration_flag, fe_eval, use_vectorization, temp);
2172
2174 n_components, integration_flag, fe_eval, temp, scratch_data);
2175
2176 collect_from_face<fe_degree>(n_components,
2178 values_dofs,
2179 fe_eval,
2181 temp,
2182 scratch_data,
2184
2185 return false;
2186 }
2187
2188 template <int fe_degree, int n_q_points_1d>
2189 static bool
2190 run(const unsigned int n_components,
2192 Number *values_dofs,
2194 const bool sum_into_values)
2195 {
2196 const auto &shape_info = fe_eval.get_shape_info();
2197
2198 if (shape_info.element_type == MatrixFreeFunctions::tensor_none)
2199 return integrate_tensor_none(n_components,
2201 values_dofs,
2202 fe_eval,
2204 else
2207 values_dofs,
2208 fe_eval,
2210 }
2211 };
2212
2213
2214
2215 template <int dim, typename Number>
2217 {
2218 template <int fe_degree>
2219 static bool
2220 run(const unsigned int n_components,
2222 Number *values_dofs,
2224 const bool sum_into_values)
2225 {
2226 const auto &shape_info = fe_eval.get_shape_info();
2227 const auto &shape_data = shape_info.data.front();
2228
2229 const unsigned int dofs_per_comp_face =
2230 fe_degree > -1 ?
2231 Utilities::pow(fe_degree + 1, dim - 1) :
2232 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
2233
2234 Number *temp = fe_eval.get_scratch_data().begin();
2235 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
2236
2237 bool use_vectorization = true;
2238
2239 if (fe_eval.get_dof_access_index() ==
2241 fe_eval.is_interior_face() == false) // exterior faces in the ECL loop
2243 fe_eval.get_cell_ids()[0] != numbers::invalid_unsigned_int &&
2244 std::all_of(fe_eval.get_cell_ids().begin() + 1,
2245 fe_eval.get_cell_ids().end(),
2246 [&](const auto &v) {
2247 return v == fe_eval.get_cell_ids()[0] ||
2248 v == numbers::invalid_unsigned_int;
2249 });
2250
2254 values_dofs,
2255 fe_eval,
2257 temp,
2258 scratch_data,
2260
2261 return false;
2262 }
2263 };
2264
2265
2266
2267 template <int dim, typename Number>
2269 {
2270 template <int fe_degree, int n_q_points_1d>
2271 static bool
2272 run(const unsigned int n_components,
2274
2276 {
2277 const auto &shape_info = fe_eval.get_shape_info();
2278 const auto &shape_data = shape_info.data.front();
2279
2280 const unsigned int dofs_per_comp_face =
2281 fe_degree > -1 ?
2282 Utilities::pow(fe_degree + 1, dim - 1) :
2283 Utilities::fixed_power<dim - 1>(shape_data.fe_degree + 1);
2284
2285 Number *temp = fe_eval.get_scratch_data().begin();
2286 Number *scratch_data = temp + 3 * n_components * dofs_per_comp_face;
2287
2290 n_components, integration_flag, fe_eval, temp, scratch_data);
2291
2292 return false;
2293 }
2294 };
2295
2296
2297
2298 template <int n_face_orientations,
2299 typename Processor,
2300 typename EvaluationData,
2301 const bool check_face_orientations = false>
2302 void
2304 Processor &proc,
2305 const unsigned int n_components,
2307 typename Processor::Number2_ *global_vector_ptr,
2309 const EvaluationData &fe_eval,
2310 typename Processor::VectorizedArrayType_ *temp1)
2311 {
2312 constexpr int dim = Processor::dim_;
2313 constexpr int fe_degree = Processor::fe_degree_;
2314 using VectorizedArrayType = typename Processor::VectorizedArrayType_;
2315 constexpr int n_lanes = VectorizedArrayType::size();
2316
2317 using Number = typename Processor::Number_;
2318 using Number2_ = typename Processor::Number2_;
2319
2320 const auto &shape_data = fe_eval.get_shape_info().data.front();
2321 constexpr bool integrate = Processor::do_integrate;
2322 const unsigned int face_no = fe_eval.get_face_no();
2323 const auto &dof_info = fe_eval.get_dof_info();
2324 const unsigned int cell = fe_eval.get_cell_or_face_batch_id();
2325 const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index =
2326 fe_eval.get_dof_access_index();
2327 AssertIndexRange(cell,
2328 dof_info.index_storage_variants[dof_access_index].size());
2329 constexpr unsigned int dofs_per_face =
2330 Utilities::pow(fe_degree + 1, dim - 1);
2331 const unsigned int subface_index = fe_eval.get_subface_index();
2332
2333 const unsigned int n_filled_lanes =
2334 dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
2335
2336 bool all_faces_are_same = n_filled_lanes == n_lanes;
2337 if (n_face_orientations == n_lanes)
2338 for (unsigned int v = 1; v < n_lanes; ++v)
2339 if (fe_eval.get_face_no(v) != fe_eval.get_face_no(0) ||
2340 fe_eval.get_face_orientation(v) != fe_eval.get_face_orientation(0))
2341 {
2342 all_faces_are_same = false;
2343 break;
2344 }
2345
2346 // check for re-orientation ...
2347 std::array<const unsigned int *, n_face_orientations> orientation = {};
2348
2349 if (dim == 3 && n_face_orientations == n_lanes && !all_faces_are_same &&
2350 fe_eval.is_interior_face() == 0)
2351 for (unsigned int v = 0; v < n_lanes; ++v)
2352 {
2353 // the loop breaks once an invalid_unsigned_int is hit for
2354 // all cases except the exterior faces in the ECL loop (where
2355 // some faces might be at the boundaries but others not)
2356 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
2357 continue;
2358
2359 if (shape_data.nodal_at_cell_boundaries &&
2360 fe_eval.get_face_orientation(v) != 0)
2361 {
2362 // ... and in case we detect a re-orientation, go to the other
2363 // version of this function that actually allows for this
2364 if (subface_index == GeometryInfo<dim>::max_children_per_cell &&
2365 check_face_orientations == false)
2366 {
2367 fe_face_evaluation_process_and_io<n_face_orientations,
2368 Processor,
2370 true>(proc,
2371 n_components,
2374 sm_ptr,
2375 fe_eval,
2376 temp1);
2377 return;
2378 }
2379 orientation[v] = &fe_eval.get_shape_info().face_orientations_dofs(
2380 fe_eval.get_face_orientation(v), 0);
2381 }
2382 }
2383 else if (dim == 3 && fe_eval.get_face_orientation() != 0)
2384 {
2385 // go to the other version of this function
2386 if (subface_index == GeometryInfo<dim>::max_children_per_cell &&
2387 check_face_orientations == false)
2388 {
2389 fe_face_evaluation_process_and_io<n_face_orientations,
2390 Processor,
2392 true>(proc,
2393 n_components,
2396 sm_ptr,
2397 fe_eval,
2398 temp1);
2399 return;
2400 }
2401 for (unsigned int v = 0; v < n_face_orientations; ++v)
2402 orientation[v] = &fe_eval.get_shape_info().face_orientations_dofs(
2403 fe_eval.get_face_orientation(), 0);
2404 }
2405
2406 // we know that the gradient weights for the Hermite case on the
2407 // right (side==1) are the negative from the value at the left
2408 // (side==0), so we only read out one of them.
2409 VectorizedArrayType grad_weight =
2411 .shape_data_on_face[0][fe_degree + (integrate ? (2 - face_no % 2) :
2412 (1 + face_no % 2))];
2413
2414 // face_to_cell_index_hermite
2415 std::array<const unsigned int *, n_face_orientations> index_array_hermite =
2416 {};
2417 if (fe_degree > 1 && (evaluation_flag & EvaluationFlags::gradients))
2418 {
2419 if (n_face_orientations == 1)
2421 &fe_eval.get_shape_info().face_to_cell_index_hermite(face_no, 0);
2422 else
2423 {
2424 for (unsigned int v = 0; v < n_lanes; ++v)
2425 {
2426 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
2427 continue;
2428
2429 const auto face_no = fe_eval.get_face_no(v);
2430
2431 grad_weight[v] =
2432 shape_data.shape_data_on_face[0][fe_degree +
2433 (integrate ?
2434 (2 - (face_no % 2)) :
2435 (1 + (face_no % 2)))][0];
2436
2438 &fe_eval.get_shape_info().face_to_cell_index_hermite(face_no,
2439 0);
2440 }
2441 }
2442 }
2443
2444 // face_to_cell_index_nodal
2445 std::array<const unsigned int *, n_face_orientations> index_array_nodal =
2446 {};
2447 if (shape_data.nodal_at_cell_boundaries == true)
2448 {
2449 if (n_face_orientations == 1)
2451 &fe_eval.get_shape_info().face_to_cell_index_nodal(face_no, 0);
2452 else
2453 {
2454 for (unsigned int v = 0; v < n_lanes; ++v)
2455 {
2456 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
2457 continue;
2458
2459 const auto face_no = fe_eval.get_face_no(v);
2460
2462 &fe_eval.get_shape_info().face_to_cell_index_nodal(face_no,
2463 0);
2464 }
2465 }
2466 }
2467
2468
2469 const auto reorientate = [&](const unsigned int v, const unsigned int i) {
2470 return (!check_face_orientations || orientation[v] == nullptr) ?
2471 i :
2472 orientation[v][i];
2473 };
2474
2475 const unsigned int cell_index =
2477 fe_eval.get_cell_ids()[0] :
2478 cell * n_lanes;
2479 const unsigned int *dof_indices =
2480 &dof_info.dof_indices_contiguous[dof_access_index][cell_index];
2481
2482 for (unsigned int comp = 0; comp < n_components; ++comp)
2483 {
2484 const std::size_t index_offset =
2485 dof_info.component_dof_indices_offset
2486 [fe_eval.get_active_fe_index()]
2487 [fe_eval.get_first_selected_component()] +
2488 comp * Utilities::pow(fe_degree + 1, dim);
2489
2490 // case 1: contiguous and interleaved indices
2491 if (n_face_orientations == 1 &&
2492 dof_info.index_storage_variants[dof_access_index][cell] ==
2494 interleaved_contiguous)
2495 {
2497 dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
2498 n_lanes);
2499 Number2_ *vector_ptr =
2500 global_vector_ptr + dof_indices[0] + index_offset * n_lanes;
2501
2502 if (fe_degree > 1 && (evaluation_flag & EvaluationFlags::gradients))
2503 {
2504 for (unsigned int i = 0; i < dofs_per_face; ++i)
2505 {
2506 Assert(n_face_orientations == 1, ExcNotImplemented());
2507
2508 const unsigned int ind1 = index_array_hermite[0][2 * i];
2509 const unsigned int ind2 = index_array_hermite[0][2 * i + 1];
2510 const unsigned int i_ = reorientate(0, i);
2511 proc.hermite_grad_vectorized(temp1[i_],
2512 temp1[i_ + dofs_per_face],
2513 vector_ptr + ind1 * n_lanes,
2514 vector_ptr + ind2 * n_lanes,
2515 grad_weight);
2516 }
2517 }
2518 else
2519 {
2520 for (unsigned int i = 0; i < dofs_per_face; ++i)
2521 {
2522 Assert(n_face_orientations == 1, ExcNotImplemented());
2523
2524 const unsigned int i_ = reorientate(0, i);
2525 const unsigned int ind = index_array_nodal[0][i];
2526 proc.value_vectorized(temp1[i_],
2527 vector_ptr + ind * n_lanes);
2528 }
2529 }
2530 }
2531
2532 // case 2: contiguous and interleaved indices with fixed stride
2533 else if (n_face_orientations == 1 &&
2534 dof_info.index_storage_variants[dof_access_index][cell] ==
2536 interleaved_contiguous_strided)
2537 {
2539 dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
2540 n_lanes);
2541 Number2_ *vector_ptr = global_vector_ptr + index_offset * n_lanes;
2542 if (fe_degree > 1 && (evaluation_flag & EvaluationFlags::gradients))
2543 {
2544 for (unsigned int i = 0; i < dofs_per_face; ++i)
2545 {
2546 Assert(n_face_orientations == 1, ExcNotImplemented());
2547
2548 const unsigned int i_ = reorientate(0, i);
2549 const unsigned int ind1 =
2550 index_array_hermite[0][2 * i] * n_lanes;
2551 const unsigned int ind2 =
2552 index_array_hermite[0][2 * i + 1] * n_lanes;
2553 proc.hermite_grad_vectorized_indexed(
2554 temp1[i_],
2555 temp1[i_ + dofs_per_face],
2556 vector_ptr + ind1,
2557 vector_ptr + ind2,
2559 dof_indices,
2560 dof_indices);
2561 }
2562 }
2563 else
2564 {
2565 for (unsigned int i = 0; i < dofs_per_face; ++i)
2566 {
2567 Assert(n_face_orientations == 1, ExcNotImplemented());
2568
2569 const unsigned int i_ = reorientate(0, i);
2570 const unsigned int ind = index_array_nodal[0][i] * n_lanes;
2571 proc.value_vectorized_indexed(temp1[i_],
2572 vector_ptr + ind,
2573 dof_indices);
2574 }
2575 }
2576 }
2577
2578 // case 3: contiguous and interleaved indices with mixed stride
2579 else if (n_face_orientations == 1 &&
2580 dof_info.index_storage_variants[dof_access_index][cell] ==
2582 interleaved_contiguous_mixed_strides)
2583 {
2584 const unsigned int *strides =
2585 &dof_info.dof_indices_interleave_strides[dof_access_index]
2586 [cell * n_lanes];
2587 unsigned int indices[n_lanes];
2588 for (unsigned int v = 0; v < n_lanes; ++v)
2589 indices[v] = dof_indices[v] + index_offset * strides[v];
2590 const unsigned int n_filled_lanes =
2591 dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
2592
2593 if (fe_degree > 1 && (evaluation_flag & EvaluationFlags::gradients))
2594 {
2595 if (n_filled_lanes == n_lanes)
2596 for (unsigned int i = 0; i < dofs_per_face; ++i)
2597 {
2598 Assert(n_face_orientations == 1, ExcNotImplemented());
2599
2600 const unsigned int i_ = reorientate(0, i);
2601 unsigned int ind1[n_lanes];
2603 for (unsigned int v = 0; v < n_lanes; ++v)
2604 ind1[v] = indices[v] +
2605 index_array_hermite[0][2 * i] * strides[v];
2606 unsigned int ind2[n_lanes];
2608 for (unsigned int v = 0; v < n_lanes; ++v)
2609 ind2[v] =
2610 indices[v] +
2611 // TODO
2612 index_array_hermite[0][2 * i + 1] * strides[v];
2613 proc.hermite_grad_vectorized_indexed(
2614 temp1[i_],
2615 temp1[i_ + dofs_per_face],
2619 ind1,
2620 ind2);
2621 }
2622 else
2623 {
2624 if (integrate == false)
2625 for (unsigned int i = 0; i < 2 * dofs_per_face; ++i)
2626 temp1[i] = VectorizedArrayType();
2627
2628 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2629 for (unsigned int i = 0; i < dofs_per_face; ++i)
2630 {
2631 const unsigned int i_ =
2632 reorientate(n_face_orientations == 1 ? 0 : v, i);
2633 proc.hermite_grad(
2634 temp1[i_][v],
2635 temp1[i_ + dofs_per_face][v],
2637 [indices[v] +
2639 [n_face_orientations == 1 ? 0 : v][2 * i] *
2640 strides[v]],
2642 [indices[v] +
2643 index_array_hermite[n_face_orientations == 1 ?
2644 0 :
2645 v][2 * i + 1] *
2646 strides[v]],
2647 grad_weight[n_face_orientations == 1 ? 0 : v]);
2648 }
2649 }
2650 }
2651 else
2652 {
2653 if (n_filled_lanes == n_lanes)
2654 for (unsigned int i = 0; i < dofs_per_face; ++i)
2655 {
2656 Assert(n_face_orientations == 1, ExcInternalError());
2657 unsigned int ind[n_lanes];
2659 for (unsigned int v = 0; v < n_lanes; ++v)
2660 ind[v] =
2661 indices[v] + index_array_nodal[0][i] * strides[v];
2662 const unsigned int i_ = reorientate(0, i);
2663 proc.value_vectorized_indexed(temp1[i_],
2665 ind);
2666 }
2667 else
2668 {
2669 if (integrate == false)
2670 for (unsigned int i = 0; i < dofs_per_face; ++i)
2671 temp1[i] = VectorizedArrayType();
2672
2673 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2674 for (unsigned int i = 0; i < dofs_per_face; ++i)
2675 proc.value(
2676 temp1[reorientate(n_face_orientations == 1 ? 0 : v,
2677 i)][v],
2679 [indices[v] +
2680 index_array_nodal[n_face_orientations == 1 ? 0 : v]
2681 [i] *
2682 strides[v]]);
2683 }
2684 }
2685 }
2686
2687 // case 4: contiguous indices without interleaving
2688 else if (n_face_orientations > 1 ||
2689 dof_info.index_storage_variants[dof_access_index][cell] ==
2691 contiguous)
2692 {
2693 Number2_ *vector_ptr = global_vector_ptr + index_offset;
2694
2695 const bool vectorization_possible =
2696 all_faces_are_same && (sm_ptr == nullptr);
2697
2698 std::array<Number2_ *, n_lanes> vector_ptrs;
2699 std::array<unsigned int, n_lanes> reordered_indices;
2700
2701 if (vectorization_possible == false)
2702 {
2703 vector_ptrs = {};
2704 if (n_face_orientations == 1)
2705 {
2706 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2707 if (sm_ptr == nullptr)
2708 {
2709 vector_ptrs[v] = vector_ptr + dof_indices[v];
2710 }
2711 else
2712 {
2713 const auto &temp =
2714 dof_info
2715 .dof_indices_contiguous_sm[dof_access_index]
2716 [cell * n_lanes + v];
2717 vector_ptrs[v] = const_cast<Number2_ *>(
2718 sm_ptr->operator[](temp.first).data() +
2719 temp.second + index_offset);
2720 }
2721 }
2722 else if (n_face_orientations == n_lanes)
2723 {
2724 const auto &cells = fe_eval.get_cell_ids();
2725 for (unsigned int v = 0; v < n_lanes; ++v)
2726 if (cells[v] != numbers::invalid_unsigned_int)
2727 {
2728 if (sm_ptr == nullptr)
2729 {
2730 vector_ptrs[v] =
2731 vector_ptr +
2732 dof_info
2733 .dof_indices_contiguous[dof_access_index]
2734 [cells[v]];
2735 }
2736 else
2737 {
2738 const auto &temp =
2739 dof_info
2740 .dof_indices_contiguous_sm[dof_access_index]
2741 [cells[v]];
2742 vector_ptrs[v] = const_cast<Number2_ *>(
2743 sm_ptr->operator[](temp.first).data() +
2744 temp.second + index_offset);
2745 }
2746 }
2747 }
2748 else
2749 {
2751 }
2752 }
2753 else if (n_face_orientations == n_lanes)
2754 {
2755 for (unsigned int v = 0; v < n_lanes; ++v)
2757 dof_info.dof_indices_contiguous[dof_access_index]
2758 [fe_eval.get_cell_ids()[v]];
2759 dof_indices = reordered_indices.data();
2760 }
2761
2762 if (fe_degree > 1 && (evaluation_flag & EvaluationFlags::gradients))
2763 {
2765 for (unsigned int i = 0; i < dofs_per_face; ++i)
2766 {
2767 const unsigned int ind1 = index_array_hermite[0][2 * i];
2768 const unsigned int ind2 =
2769 index_array_hermite[0][2 * i + 1];
2770 const unsigned int i_ = reorientate(0, i);
2771
2772 proc.hermite_grad_vectorized_indexed(
2773 temp1[i_],
2774 temp1[i_ + dofs_per_face],
2775 vector_ptr + ind1,
2776 vector_ptr + ind2,
2778 dof_indices,
2779 dof_indices);
2780 }
2781 else if (n_face_orientations == 1)
2782 for (unsigned int i = 0; i < dofs_per_face; ++i)
2783 {
2784 const unsigned int ind1 = index_array_hermite[0][2 * i];
2785 const unsigned int ind2 =
2786 index_array_hermite[0][2 * i + 1];
2787 const unsigned int i_ = reorientate(0, i);
2788
2789 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2790 proc.hermite_grad(temp1[i_][v],
2791 temp1[i_ + dofs_per_face][v],
2792 vector_ptrs[v][ind1],
2793 vector_ptrs[v][ind2],
2794 grad_weight[v]);
2795
2796 if (integrate == false)
2797 for (unsigned int v = n_filled_lanes; v < n_lanes; ++v)
2798 {
2799 temp1[i][v] = 0.0;
2800 temp1[i + dofs_per_face][v] = 0.0;
2801 }
2802 }
2803 else
2804 {
2805 if (integrate == false && n_filled_lanes < n_lanes)
2806 for (unsigned int i = 0; i < dofs_per_face; ++i)
2807 temp1[i] = temp1[i + dofs_per_face] = Number();
2808
2809 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2810 for (unsigned int i = 0; i < dofs_per_face; ++i)
2811 proc.hermite_grad(
2812 temp1[reorientate(v, i)][v],
2813 temp1[reorientate(v, i) + dofs_per_face][v],
2814 vector_ptrs[v][index_array_hermite[v][2 * i]],
2815 vector_ptrs[v][index_array_hermite[v][2 * i + 1]],
2816 grad_weight[v]);
2817 }
2818 }
2819 else
2820 {
2822 for (unsigned int i = 0; i < dofs_per_face; ++i)
2823 {
2824 const unsigned int ind = index_array_nodal[0][i];
2825 const unsigned int i_ = reorientate(0, i);
2826
2827 proc.value_vectorized_indexed(temp1[i_],
2828 vector_ptr + ind,
2829 dof_indices);
2830 }
2831 else if (n_face_orientations == 1)
2832 for (unsigned int i = 0; i < dofs_per_face; ++i)
2833 {
2834 const unsigned int ind = index_array_nodal[0][i];
2835 const unsigned int i_ = reorientate(0, i);
2836
2837 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2838 proc.value(temp1[i_][v], vector_ptrs[v][ind]);
2839
2840 if (integrate == false)
2841 for (unsigned int v = n_filled_lanes; v < n_lanes; ++v)
2842 temp1[i_][v] = 0.0;
2843 }
2844 else
2845 {
2846 if (integrate == false && n_filled_lanes < n_lanes)
2847 for (unsigned int i = 0; i < dofs_per_face; ++i)
2848 temp1[i] = Number();
2849
2850 for (unsigned int v = 0; v < n_filled_lanes; ++v)
2851 for (unsigned int i = 0; i < dofs_per_face; ++i)
2852 proc.value(temp1[reorientate(v, i)][v],
2853 vector_ptrs[v][index_array_nodal[v][i]]);
2854 }
2855 }
2856 }
2857 else
2858 {
2859 // We should not end up here, this should be caught by
2860 // FEFaceEvaluationImplGatherEvaluateSelector::supports()
2862 }
2863 temp1 += 3 * dofs_per_face;
2864 }
2865 }
2866
2867
2868
2869 template <int dim, typename Number2, typename VectorizedArrayType>
2871 {
2872 using Number = typename VectorizedArrayType::value_type;
2873
2874 template <int fe_degree, int n_q_points_1d>
2875 static bool
2876 run(const unsigned int n_components,
2878 const Number2 *src_ptr,
2879 const std::vector<ArrayView<const Number2>> *sm_ptr,
2881 {
2882 Assert(fe_degree > -1, ExcInternalError());
2883 Assert(fe_eval.get_shape_info().element_type <=
2886
2887 const unsigned int dofs_per_face = Utilities::pow(fe_degree + 1, dim - 1);
2888
2889 VectorizedArrayType *temp = fe_eval.get_scratch_data().begin();
2890 VectorizedArrayType *scratch_data =
2891 temp + 3 * n_components * dofs_per_face;
2892
2894
2895 if (fe_eval.get_dof_access_index() ==
2897 fe_eval.is_interior_face() == false)
2899 p, n_components, evaluation_flag, src_ptr, sm_ptr, fe_eval, temp);
2900 else
2902 p, n_components, evaluation_flag, src_ptr, sm_ptr, fe_eval, temp);
2903
2904 const unsigned int subface_index = fe_eval.get_subface_index();
2905
2906 if (subface_index >= GeometryInfo<dim>::max_children_per_cell)
2908 dim,
2909 fe_degree,
2910 n_q_points_1d,
2911 VectorizedArrayType>::
2912 evaluate_in_face(n_components,
2914 fe_eval.get_shape_info().data.front(),
2915 temp,
2916 fe_eval.begin_values(),
2917 fe_eval.begin_gradients(),
2918 fe_eval.begin_hessians(),
2919 scratch_data,
2920 subface_index);
2921 else
2923 dim,
2924 fe_degree,
2925 n_q_points_1d,
2926 VectorizedArrayType>::
2927 evaluate_in_face(n_components,
2929 fe_eval.get_shape_info().data.front(),
2930 temp,
2931 fe_eval.begin_values(),
2932 fe_eval.begin_gradients(),
2933 fe_eval.begin_hessians(),
2934 scratch_data,
2935 subface_index);
2936
2937 // re-orientation for cases not possible with above algorithm
2938 if (subface_index < GeometryInfo<dim>::max_children_per_cell)
2939 {
2940 if (fe_eval.get_dof_access_index() ==
2942 fe_eval.is_interior_face() == false)
2943 {
2944 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
2945 {
2946 // the loop breaks once an invalid_unsigned_int is hit for
2947 // all cases except the exterior faces in the ECL loop (where
2948 // some faces might be at the boundaries but others not)
2949 if (fe_eval.get_cell_ids()[v] ==
2951 continue;
2952
2953 if (fe_eval.get_face_orientation(v) != 0)
2955 dim,
2956 n_components,
2957 v,
2959 &fe_eval.get_shape_info().face_orientations_quad(
2960 fe_eval.get_face_orientation(v), 0),
2961 false,
2962 Utilities::pow(n_q_points_1d, dim - 1),
2963 &temp[0][0],
2964 fe_eval.begin_values(),
2965 fe_eval.begin_gradients(),
2966 fe_eval.begin_hessians());
2967 }
2968 }
2969 else if (fe_eval.get_face_orientation() != 0)
2971 dim,
2972 n_components,
2974 &fe_eval.get_shape_info().face_orientations_quad(
2975 fe_eval.get_face_orientation(), 0),
2976 false,
2977 Utilities::pow(n_q_points_1d, dim - 1),
2978 temp,
2979 fe_eval.begin_values(),
2980 fe_eval.begin_gradients(),
2981 fe_eval.begin_hessians());
2982 }
2983
2984 return false;
2985 }
2986
2987 template <typename Number3>
2988 static bool
2991 const Number2 *vector_ptr,
2993 {
2994 const unsigned int fe_degree = shape_info.data.front().fe_degree;
2995 if (fe_degree < 1 || !shape_info.data.front().nodal_at_cell_boundaries ||
2997 (fe_degree < 2 ||
2998 shape_info.data.front().element_type !=
3001 vector_ptr == nullptr ||
3002 shape_info.data.front().element_type >
3004 storage <
3006 return false;
3007 else
3008 return true;
3009 }
3010
3011 private:
3012 template <int fe_degree>
3014 {
3015 static const bool do_integrate = false;
3016 static const int dim_ = dim;
3017 static const int fe_degree_ = fe_degree;
3018 using VectorizedArrayType_ = VectorizedArrayType;
3020 using Number2_ = const Number2;
3021
3022 template <typename T0, typename T1, typename T2>
3023 void
3034
3035 template <typename T1, typename T2>
3036 void
3041
3042 template <typename T0, typename T1, typename T2, typename T3>
3043 void
3056
3057 template <typename T0, typename T1, typename T2>
3058 void
3059 value_vectorized_indexed(T0 &temp, const T1 src_ptr, const T2 &indices)
3060 {
3062 }
3063
3064 template <typename T0, typename T1, typename T2>
3065 void
3067 T0 &temp_2,
3068 const T1 &src_ptr_1,
3069 const T1 &src_ptr_2,
3070 const T2 &grad_weight)
3071 {
3072 // case 3a)
3073 temp_1 = src_ptr_1;
3075 }
3076
3077 template <typename T1, typename T2>
3078 void
3080 {
3081 // case 3b)
3082 temp = src_ptr;
3083 }
3084 };
3085 };
3086
3087
3088
3089 template <int dim, typename Number2, typename VectorizedArrayType>
3091 {
3092 using Number = typename VectorizedArrayType::value_type;
3093
3094 template <int fe_degree, int n_q_points_1d>
3095 static bool
3096 run(const unsigned int n_components,
3098 Number2 *dst_ptr,
3099 const std::vector<ArrayView<const Number2>> *sm_ptr,
3101 {
3102 Assert(fe_degree > -1, ExcInternalError());
3103 Assert(fe_eval.get_shape_info().element_type <=
3106
3107 const unsigned int dofs_per_face = Utilities::pow(fe_degree + 1, dim - 1);
3108
3109 VectorizedArrayType *temp = fe_eval.get_scratch_data().begin();
3110 VectorizedArrayType *scratch_data =
3111 temp + 3 * n_components * dofs_per_face;
3112
3113 const unsigned int subface_index = fe_eval.get_subface_index();
3114
3115 // re-orientation for cases not possible with the io function below
3116 if (subface_index < GeometryInfo<dim>::max_children_per_cell)
3117 {
3118 if (fe_eval.get_dof_access_index() ==
3120 fe_eval.is_interior_face() == false)
3121 for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
3122 {
3123 // the loop breaks once an invalid_unsigned_int is hit for
3124 // all cases except the exterior faces in the ECL loop (where
3125 // some faces might be at the boundaries but others not)
3126 if (fe_eval.get_cell_ids()[v] == numbers::invalid_unsigned_int)
3127 continue;
3128
3129 if (fe_eval.get_face_orientation(v) != 0)
3131 dim,
3132 n_components,
3133 v,
3135 &fe_eval.get_shape_info().face_orientations_quad(
3136 fe_eval.get_face_orientation(v), 0),
3137 true,
3138 Utilities::pow(n_q_points_1d, dim - 1),
3139 &temp[0][0],
3140 fe_eval.begin_values(),
3141 fe_eval.begin_gradients(),
3142 fe_eval.begin_hessians());
3143 }
3144 else if (fe_eval.get_face_orientation() != 0)
3146 dim,
3147 n_components,
3149 &fe_eval.get_shape_info().face_orientations_quad(
3150 fe_eval.get_face_orientation(), 0),
3151 true,
3152 Utilities::pow(n_q_points_1d, dim - 1),
3153 temp,
3154 fe_eval.begin_values(),
3155 fe_eval.begin_gradients(),
3156 fe_eval.begin_hessians());
3157 }
3158
3159 if (fe_degree > -1 && fe_eval.get_subface_index() >=
3160 GeometryInfo<dim - 1>::max_children_per_cell)
3162 dim,
3163 fe_degree,
3164 n_q_points_1d,
3165 VectorizedArrayType>::
3166 integrate_in_face(n_components,
3168 fe_eval.get_shape_info().data.front(),
3169 temp,
3170 fe_eval.begin_values(),
3171 fe_eval.begin_gradients(),
3172 fe_eval.begin_hessians(),
3173 scratch_data,
3174 subface_index);
3175 else
3177 dim,
3178 fe_degree,
3179 n_q_points_1d,
3180 VectorizedArrayType>::
3181 integrate_in_face(n_components,
3183 fe_eval.get_shape_info().data.front(),
3184 temp,
3185 fe_eval.begin_values(),
3186 fe_eval.begin_gradients(),
3187 fe_eval.begin_hessians(),
3188 scratch_data,
3189 subface_index);
3190
3192
3193 if (fe_eval.get_dof_access_index() ==
3195 fe_eval.is_interior_face() == false)
3197 p, n_components, integration_flag, dst_ptr, sm_ptr, fe_eval, temp);
3198 else
3200 p, n_components, integration_flag, dst_ptr, sm_ptr, fe_eval, temp);
3201
3202 return false;
3203 }
3204
3205 private:
3206 template <int fe_degree>
3208 {
3209 static const bool do_integrate = true;
3210 static const int dim_ = dim;
3211 static const int fe_degree_ = fe_degree;
3212 using VectorizedArrayType_ = VectorizedArrayType;
3214 using Number2_ = Number2;
3215
3216 template <typename T0, typename T1, typename T2, typename T3, typename T4>
3217 void
3219 const T1 &temp_2,
3220 T2 dst_ptr_1,
3221 T3 dst_ptr_2,
3222 const T4 &grad_weight)
3223 {
3224 // case 1a)
3225 const VectorizedArrayType val = temp_1 - grad_weight * temp_2;
3226 const VectorizedArrayType grad = grad_weight * temp_2;
3229 }
3230
3231 template <typename T0, typename T1>
3232 void
3234 {
3235 // case 1b)
3237 }
3238
3239 template <typename T0, typename T1, typename T2, typename T3>
3240 void
3242 const T0 &temp_2,
3243 T1 dst_ptr_1,
3244 T1 dst_ptr_2,
3245 const T2 &grad_weight,
3246 const T3 &indices_1,
3247 const T3 &indices_2)
3248 {
3249 // case 2a)
3250 const VectorizedArrayType val = temp_1 - grad_weight * temp_2;
3251 const VectorizedArrayType grad = grad_weight * temp_2;
3254 }
3255
3256 template <typename T0, typename T1, typename T2>
3257 void
3258 value_vectorized_indexed(const T0 &temp, T1 dst_ptr, const T2 &indices)
3259 {
3260 // case 2b)
3262 }
3263
3264 template <typename T0, typename T1, typename T2>
3265 void
3267 const T0 &temp_2,
3268 T1 &dst_ptr_1,
3269 T1 &dst_ptr_2,
3270 const T2 &grad_weight)
3271 {
3272 // case 3a)
3273 const Number val = temp_1 - grad_weight * temp_2;
3274 const Number grad = grad_weight * temp_2;
3275 dst_ptr_1 += val;
3276 dst_ptr_2 += grad;
3277 }
3278
3279 template <typename T0, typename T1>
3280 void
3282 {
3283 // case 3b)
3284 dst_ptr += temp;
3285 }
3286 };
3287 };
3288} // end of namespace internal
3289
3290
3292
3293#endif
friend class Tensor
Definition tensor.h:882
#define DEAL_II_ALWAYS_INLINE
Definition config.h:109
#define DEAL_II_OPENMP_SIMD_PRAGMA
Definition config.h:143
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
unsigned int cell_index
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
#define AssertThrow(cond, exc)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
EvaluationFlags
The EvaluationFlags enum.
constexpr T fixed_power(const T t)
Definition utilities.h:942
constexpr T pow(const T base, const int iexp)
Definition utilities.h:966
void do_vectorized_add(const VectorizedArrayType src, Number2 *dst_ptr)
constexpr bool use_collocation_evaluation(const unsigned int fe_degree, const unsigned int n_q_points_1d)
void adjust_for_face_orientation_per_lane(const unsigned int dim, const unsigned int n_components, const unsigned int v, const EvaluationFlags::EvaluationFlags flag, const unsigned int *orientation, const bool integrate, const std::size_t n_q_points, Number *tmp_values, VectorizedArrayType *values_quad, VectorizedArrayType *gradients_quad=nullptr, VectorizedArrayType *hessians_quad=nullptr)
void fe_face_evaluation_process_and_io(Processor &proc, const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, typename Processor::Number2_ *global_vector_ptr, const std::vector< ArrayView< const typename Processor::Number2_ > > *sm_ptr, const EvaluationData &fe_eval, typename Processor::VectorizedArrayType_ *temp1)
void do_vectorized_scatter_add(const VectorizedArrayType src, const unsigned int *indices, Number2 *dst_ptr)
void do_vectorized_gather(const Number2 *src_ptr, const unsigned int *indices, VectorizedArrayType &dst)
void do_vectorized_read(const Number2 *src_ptr, VectorizedArrayType &dst)
std::enable_if_t< contract_onto_face, void > interpolate_to_face(const Number2 *shape_values, const std::array< int, 2 > &n_blocks, const std::array< int, 2 > &steps, const Number *input, Number *DEAL_II_RESTRICT output, const int n_rows_runtime=0, const int stride_runtime=1)
void adjust_for_face_orientation(const unsigned int dim, const unsigned int n_components, const EvaluationFlags::EvaluationFlags flag, const unsigned int *orientation, const bool integrate, const std::size_t n_q_points, Number *tmp_values, Number *values_quad, Number *gradients_quad, Number *hessians_quad)
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool sum_into_values)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, FEEvaluationData< dim, Number, true > &fe_eval)
static void evaluate_in_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, FEEvaluationData< dim, Number, true > &fe_eval, Number *temp, Number *scratch_data)
static void project_to_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool use_vectorization, Number *temp, Number *scratch_data)
static bool evaluate_tensor_none(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval)
static void adjust_quadrature_for_face_orientation(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, FEEvaluationData< dim, Number, true > &fe_eval, const bool use_vectorization, Number *temp)
static bool evaluate_tensor(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval)
void hermite_grad(T0 &temp_1, T0 &temp_2, const T1 &src_ptr_1, const T1 &src_ptr_2, const T2 &grad_weight)
void value_vectorized_indexed(T0 &temp, const T1 src_ptr, const T2 &indices)
void hermite_grad_vectorized(T0 &temp_1, T0 &temp_2, const T1 src_ptr_1, const T1 src_ptr_2, const T2 &grad_weight)
void hermite_grad_vectorized_indexed(T0 &temp_1, T0 &temp_2, const T1 src_ptr_1, const T1 src_ptr_2, const T2 &grad_weight, const T3 &indices_1, const T3 &indices_2)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number2 *src_ptr, const std::vector< ArrayView< const Number2 > > *sm_ptr, FEEvaluationData< dim, VectorizedArrayType, true > &fe_eval)
static bool supports(const EvaluationFlags::EvaluationFlags evaluation_flag, const MatrixFreeFunctions::ShapeInfo< Number3 > &shape_info, const Number2 *vector_ptr, MatrixFreeFunctions::DoFInfo::IndexStorageVariants storage)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, FEEvaluationData< dim, Number, true > &fe_eval)
void hermite_grad_vectorized(const T0 &temp_1, const T1 &temp_2, T2 dst_ptr_1, T3 dst_ptr_2, const T4 &grad_weight)
void hermite_grad_vectorized_indexed(const T0 &temp_1, const T0 &temp_2, T1 dst_ptr_1, T1 dst_ptr_2, const T2 &grad_weight, const T3 &indices_1, const T3 &indices_2)
void hermite_grad(const T0 &temp_1, const T0 &temp_2, T1 &dst_ptr_1, T1 &dst_ptr_2, const T2 &grad_weight)
void value_vectorized_indexed(const T0 &temp, T1 dst_ptr, const T2 &indices)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number2 *dst_ptr, const std::vector< ArrayView< const Number2 > > *sm_ptr, FEEvaluationData< dim, VectorizedArrayType, true > &fe_eval)
static bool integrate_tensor_none(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool sum_into_values)
static bool integrate_tensor(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool sum_into_values)
static void collect_from_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool use_vectorization, const Number *temp, Number *scratch_data, const bool sum_into_values)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval, const bool sum_into_values)
static void integrate_in_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, FEEvaluationData< dim, Number, true > &fe_eval, Number *temp, Number *scratch_data)
static void adjust_quadrature_for_face_orientation(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, FEEvaluationData< dim, Number, true > &fe_eval, const bool use_vectorization, Number *temp)
static bool run(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const Number *values_dofs, FEEvaluationData< dim, Number, true > &fe_eval)
static void evaluate_or_integrate_in_face(const EvaluationFlags::EvaluationFlags evaluation_flag, const std::vector< MatrixFreeFunctions::UnivariateShapeData< Number2 > > &shape_data, Number *values_dofs_in, Number *values, Number *gradients, Number *scratch_data, const unsigned int subface_index, const unsigned int face_direction)
typename FEEvaluationData< dim, Number, true >::shape_info_number_type Number2
EvaluatorTensorProduct< symmetric_evaluate ? evaluate_evenodd :evaluate_general, dim - 1, fe_degree+1, n_q_points_1d, Number, Number2 > Eval
static void evaluate_in_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags evaluation_flag, const MatrixFreeFunctions::UnivariateShapeData< Number2 > &data, Number *values_dofs, Number *values_quad, Number *gradients_quad, Number *hessians_quad, Number *scratch_data, const unsigned int subface_index)
static Eval create_evaluator_tensor_product(const MatrixFreeFunctions::UnivariateShapeData< Number2 > &data, const unsigned int subface_index, const unsigned int direction)
static void integrate_in_face(const unsigned int n_components, const EvaluationFlags::EvaluationFlags integration_flag, const MatrixFreeFunctions::UnivariateShapeData< Number2 > &data, Number *values_dofs, Number *values_quad, Number *gradients_quad, Number *hessians_quad, Number *scratch_data, const unsigned int subface_index)
typename FEEvaluationData< dim, Number, true >::shape_info_number_type Number2
typename FEEvaluationData< dim, Number, true >::shape_info_number_type Number2
static void interpolate_quadrature(const unsigned int n_components, const EvaluationFlags::EvaluationFlags flags, const MatrixFreeFunctions::ShapeInfo< Number2 > &shape_info, const Number *input, Number *output, const unsigned int face_no)
static void interpolate_generic(const unsigned int n_components, const Number *input, Number *output, const EvaluationFlags::EvaluationFlags flag, const unsigned int face_no, const unsigned int n_points_1d, const std::array< AlignedVector< Number2 >, 2 > &shape_data, const unsigned int dofs_per_component_on_cell, const unsigned int dofs_per_component_on_face)
static void interpolate(const unsigned int n_components, const EvaluationFlags::EvaluationFlags flags, const MatrixFreeFunctions::ShapeInfo< Number2 > &shape_info, const Number *input, Number *output, const unsigned int face_no)
static void interpolate_raviart_thomas(const unsigned int n_components, const Number *input, Number *output, const EvaluationFlags::EvaluationFlags flag, const unsigned int face_no, const MatrixFreeFunctions::ShapeInfo< Number2 > &shape_info)
std::vector< UnivariateShapeData< Number > > data
Definition shape_info.h:485
std::array< AlignedVector< Number >, 2 > hessians_within_subface
Definition shape_info.h:326
std::array< AlignedVector< Number >, 2 > values_within_subface
Definition shape_info.h:314
std::array< AlignedVector< Number >, 2 > gradients_within_subface
Definition shape_info.h:320