659 *
const unsigned int component = 0)
const override
662 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
670 *
class InitialValuesV :
public Function<dim>
674 *
const unsigned int component = 0) const override
686 * Secondly, we have the right hand side forcing term. Boring as we are, we
687 * choose zero here as well:
691 *
class RightHandSide :
public Function<dim>
695 *
const unsigned int component = 0) const override
707 * Finally, we have boundary
values for @f$u@f$ and @f$v@f$. They are as described
708 * in the introduction, one being the time derivative of the other:
712 *
class BoundaryValuesU :
public Function<dim>
716 *
const unsigned int component = 0) const override
721 *
if ((this->
get_time() <= 0.5) && (p[0] < 0) && (p[1] < 1. / 3) &&
732 *
class BoundaryValuesV :
public Function<dim>
736 *
const unsigned int component = 0) const override
741 *
if ((this->
get_time() <= 0.5) && (p[0] < 0) && (p[1] < 1. / 3) &&
754 * <a name=
"step_23-ImplementationofthecodeWaveEquationcodeclass"></a>
755 * <h3>Implementation of the <code>WaveEquation</code>
class</h3>
759 * The implementation of the actual logic is actually fairly short, since we
760 * relegate things like assembling the matrices and right hand side vectors
761 * to the library. The rest boils down to not much more than 130 lines of
762 * actual code, a significant fraction of which is boilerplate code that can
763 * be taken from previous example programs (
e.g. the functions that solve
764 * linear systems, or that generate output).
768 * Let
's start with the constructor (for an explanation of the choice of
769 * time step, see the section on Courant, Friedrichs, and Lewy in the
774 * WaveEquation<dim>::WaveEquation()
776 * , dof_handler(triangulation)
777 * , time_step(1. / 64)
779 * , timestep_number(1)
787 * <a name="step_23-WaveEquationsetup_system"></a>
788 * <h4>WaveEquation::setup_system</h4>
792 * The next function is the one that sets up the mesh, DoFHandler, and
793 * matrices and vectors at the beginning of the program, i.e. before the
794 * first time step. The first few lines are pretty much standard if you've
795 * read through the tutorial programs at least up to @ref step_6
"step-6":
799 *
void WaveEquation<dim>::setup_system()
804 * std::cout <<
"Number of active cells: " <<
triangulation.n_active_cells()
807 * dof_handler.distribute_dofs(fe);
809 * std::cout <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
815 * sparsity_pattern.copy_from(dsp);
819 * Then comes a block where we have to initialize the 3 matrices we need
820 * in the course of the program: the mass
matrix, the Laplace
matrix, and
821 * the
matrix @f$M+k^2\theta^2A@f$
used when solving
for @f$U^n@f$ in each time
826 * When setting up these matrices, note that they all make use of the same
827 * sparsity pattern
object. Finally, the reason why matrices and sparsity
828 * patterns are separate objects in deal.II (unlike in many other finite
829 * element or linear algebra classes) becomes clear: in a significant
830 * fraction of applications, one has to hold several matrices that happen
831 * to have the same sparsity pattern, and there is no reason
for them not
832 * to share
this information, rather than re-building and wasting memory
833 * on
it several times.
837 * After initializing all of these matrices, we call library
functions
838 * that build the Laplace and mass matrices. All they need is a
DoFHandler
839 *
object and a quadrature formula
object that is to be
used for numerical
840 * integration. Note that in many respects these
functions are better than
841 * what we would usually
do in application programs,
for example because
842 * they automatically parallelize building the matrices
if multiple
843 * processors are available in a machine:
for more information see the
845 * @ref threads
"Parallel computing with multiple processors"
846 *
module. The matrices for solving linear systems will be filled in the
847 * run() method because we need to re-apply boundary conditions every time
851 * mass_matrix.reinit(sparsity_pattern);
852 * laplace_matrix.reinit(sparsity_pattern);
853 * matrix_u.reinit(sparsity_pattern);
854 * matrix_v.reinit(sparsity_pattern);
865 * The rest of the
function is spent on setting vector sizes to the
866 * correct
value. The
final line closes the hanging node constraints
867 *
object. Since we work on a uniformly refined
mesh, no constraints exist
868 * or have been computed (i.e. there was no need to call
870 * need a constraints
object in one place further down below anyway.
873 * solution_u.reinit(dof_handler.n_dofs());
874 * solution_v.reinit(dof_handler.n_dofs());
875 * old_solution_u.reinit(dof_handler.n_dofs());
876 * old_solution_v.reinit(dof_handler.n_dofs());
877 * system_rhs.reinit(dof_handler.n_dofs());
879 * constraints.close();
887 * <a name=
"step_23-WaveEquationsolve_uandWaveEquationsolve_v"></a>
888 * <h4>WaveEquation::solve_u and WaveEquation::solve_v</h4>
892 * The next two
functions deal with solving the linear systems associated
893 * with the equations
for @f$U^n@f$ and @f$V^n@f$. Both are not particularly
894 * interesting as they pretty much follow the scheme
used in all the
895 * previous tutorial programs.
899 * One can make little experiments with preconditioners
for the two matrices
900 * we have to
invert. As
it turns
out, however,
for the matrices at hand
901 * here,
using Jacobi or SSOR preconditioners reduces the number of
902 * iterations necessary to solve the linear system slightly, but due to the
903 * cost of applying the preconditioner
it is no win in terms of
run-time. It
904 * is not much of a loss either, but let
's keep it simple and just do
909 * void WaveEquation<dim>::solve_u()
911 * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
912 * SolverCG<Vector<double>> cg(solver_control);
914 * cg.solve(matrix_u, solution_u, system_rhs, PreconditionIdentity());
916 * std::cout << " u-equation: " << solver_control.last_step()
917 * << " CG iterations." << std::endl;
923 * void WaveEquation<dim>::solve_v()
925 * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
926 * SolverCG<Vector<double>> cg(solver_control);
928 * cg.solve(matrix_v, solution_v, system_rhs, PreconditionIdentity());
930 * std::cout << " v-equation: " << solver_control.last_step()
931 * << " CG iterations." << std::endl;
939 * <a name="step_23-WaveEquationoutput_results"></a>
940 * <h4>WaveEquation::output_results</h4>
944 * Likewise, the following function is pretty much what we've done
945 * before. The only thing worth mentioning is how here we generate a
string
946 * representation of the time step number padded with leading zeros to 3
952 * void WaveEquation<dim>::output_results() const
954 * DataOut<dim> data_out;
956 * data_out.attach_dof_handler(dof_handler);
957 * data_out.add_data_vector(solution_u, "U");
958 * data_out.add_data_vector(solution_v, "V");
960 * data_out.build_patches();
962 * const std::string filename =
963 * "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
966 * Like @ref step_15 "step-15", since we write output at every time step (and the system
967 * we have to solve is relatively easy), we instruct DataOut to use the
968 * zlib compression algorithm that is optimized for speed instead of disk
969 * usage since otherwise plotting the output becomes a bottleneck:
972 * DataOutBase::VtkFlags vtk_flags;
973 * vtk_flags.compression_level = DataOutBase::CompressionLevel::best_speed;
974 * data_out.set_flags(vtk_flags);
975 * std::ofstream output(filename);
976 * data_out.write_vtu(output);
984 * <a name="step_23-WaveEquationrun"></a>
985 * <h4>WaveEquation::run</h4>
989 * The following is really the only interesting function of the program. It
990 * contains the loop over all time steps, but before we get to that we have
991 * to set up the grid, DoFHandler, and matrices. In addition, we have to
992 * somehow get started with initial values. To this end, we use the
993 * VectorTools::project function that takes an object that describes a
994 * continuous function and computes the @f$L^2@f$ projection of this function
995 * onto the finite element space described by the DoFHandler object. Can't
996 * be any simpler than that:
1000 *
void WaveEquation<dim>::run()
1007 * InitialValuesU<dim>(),
1012 * InitialValuesV<dim>(),
1017 * The next thing is to
loop over all the time steps until we reach the
1018 *
end time (@f$T=5@f$ in
this case). In each time step, we
first have to
1019 * solve
for @f$U^n@f$,
using the equation @f$(M^n + k^2\theta^2 A^n)U^n =@f$
1020 * @f$(M^{n,n-1} - k^2\theta(1-\theta) A^{n,n-1})U^{n-1} + kM^{n,n-1}V^{n-1}
1021 * +@f$ @f$k\theta \left[k \theta
F^n + k(1-\theta)
F^{n-1} \right]@f$. Note
1022 * that we use the same
mesh for all time steps, so that @f$M^n=M^{n,n-1}=M@f$
1023 * and @f$A^n=A^{n,n-1}=A@f$. What we therefore have to
do first is to add up
1024 * @f$MU^{n-1} - k^2\theta(1-\theta) AU^{n-1} + kMV^{n-1}@f$ and the forcing
1025 * terms, and put the result into the <code>system_rhs</code> vector. (For
1026 * these additions, we need a temporary vector that we declare before the
1027 *
loop to avoid repeated memory allocations in each time step.)
1031 * The one thing to realize here is how we communicate the time variable
1032 * to the
object describing the right hand side: each
object derived from
1033 * the
Function class has a time field that can be
set using the
1035 *
this mechanism, all
functions of space and time are therefore
1036 * considered
functions of space evaluated at a particular time. This
1037 * matches well what we typically need in finite element programs, where
1038 * we almost
always work on a single time step at a time, and where
it
1039 * never happens that,
for example, one would like to evaluate a
1040 * space-time
function for all times at any given spatial location.
1046 *
for (; time <= 5; time += time_step, ++timestep_number)
1048 * std::cout <<
"Time step " << timestep_number <<
" at t=" << time
1054 * system_rhs.add(time_step, tmp);
1056 * laplace_matrix.vmult(tmp, old_solution_u);
1057 * system_rhs.add(-theta * (1 - theta) * time_step * time_step, tmp);
1059 * RightHandSide<dim> rhs_function;
1060 * rhs_function.set_time(time);
1065 * forcing_terms = tmp;
1066 * forcing_terms *= theta * time_step;
1068 * rhs_function.set_time(time - time_step);
1074 * forcing_terms.add((1 - theta) * time_step, tmp);
1076 * system_rhs.add(theta * time_step, forcing_terms);
1080 * After so constructing the right hand side vector of the
first
1081 * equation, all we have to
do is
apply the correct boundary
1082 *
values. As
for the right hand side,
this is a space-time
function
1083 * evaluated at a particular time, which we
interpolate at boundary
1084 * nodes and then use the result to
apply boundary
values as we
1085 * usually
do. The result is then handed off to the solve_u()
1090 * BoundaryValuesU<dim> boundary_values_u_function;
1091 * boundary_values_u_function.
set_time(time);
1093 * std::map<types::global_dof_index, double> boundary_values;
1096 * boundary_values_u_function,
1101 * The
matrix for solve_u() is the same in every time steps, so one
1102 * could think that it is enough to do this only once at the
1103 * beginning of the simulation. However, since we need to apply
1104 * boundary values to the linear system (which eliminate some matrix
1105 * rows and columns and give contributions to the right hand side),
1106 * we have to refill the matrix in every time steps before we
1107 * actually apply boundary data. The actual content is very simple:
1108 * it is the sum of the mass matrix and a weighted Laplace matrix:
1111 * matrix_u.copy_from(mass_matrix);
1112 * matrix_u.add(theta * theta * time_step * time_step, laplace_matrix);
1113 *
MatrixTools::apply_boundary_values(boundary_values,
1123 * The
second step, i.e. solving for @f$V^n@f$, works similarly, except
1124 * that this time the matrix on the left is the mass matrix (which we
1125 * copy again in order to be able to apply boundary conditions, and
1126 * the right hand side is @f$MV^{n-1} - k\left[ \theta A U^n +
1127 * (1-\theta) AU^{n-1}\right]@f$ plus forcing terms. Boundary
values
1128 * are applied in the same way as before, except that now we have to
1129 * use the BoundaryValuesV
class:
1132 * laplace_matrix.vmult(system_rhs, solution_u);
1133 * system_rhs *= -theta * time_step;
1136 * system_rhs += tmp;
1138 * laplace_matrix.vmult(tmp, old_solution_u);
1139 * system_rhs.add(-time_step * (1 - theta), tmp);
1141 * system_rhs += forcing_terms;
1144 * BoundaryValuesV<dim> boundary_values_v_function;
1145 * boundary_values_v_function.set_time(time);
1147 * std::map<types::global_dof_index, double> boundary_values;
1150 * boundary_values_v_function,
1152 * matrix_v.copy_from(mass_matrix);
1162 * Finally, after both solution components have been computed, we
1163 * output the result, compute the energy in the solution, and go on to
1164 * the next time step after shifting the present solution into the
1165 * vectors that hold the solution at the previous time step. Note the
1167 * @f$\left<V^n,MV^n\right>@f$ and @f$\left<U^n,AU^n\right>@f$ in one step,
1168 * saving us the expense of a temporary vector and several lines of
1174 * std::cout <<
" Total energy: "
1175 * << (
mass_matrix.matrix_norm_square(solution_v) +
1176 * laplace_matrix.matrix_norm_square(solution_u)) /
1180 * old_solution_u = solution_u;
1181 * old_solution_v = solution_v;
1190 * <a name=
"step_23-Thecodemaincodefunction"></a>
1191 * <h3>The <code>main</code>
function</h3>
1195 * What remains is the main
function of the program. There is
nothing here
1196 * that hasn
't been shown in several of the previous programs:
1203 * using namespace Step23;
1205 * WaveEquation<2> wave_equation_solver;
1206 * wave_equation_solver.run();
1208 * catch (std::exception &exc)
1210 * std::cerr << std::endl
1212 * << "----------------------------------------------------"
1214 * std::cerr << "Exception on processing: " << std::endl
1215 * << exc.what() << std::endl
1216 * << "Aborting!" << std::endl
1217 * << "----------------------------------------------------"
1224 * std::cerr << std::endl
1226 * << "----------------------------------------------------"
1228 * std::cerr << "Unknown exception!" << std::endl
1229 * << "Aborting!" << std::endl
1230 * << "----------------------------------------------------"
1238<a name="step_23-Results"></a><h1>Results</h1>
1241When the program is run, it produces the following output:
1243Number of active cells: 16384
1244Number of degrees of freedom: 16641
1246Time step 1 at t=0.015625
1247 u-equation: 8 CG iterations.
1248 v-equation: 22 CG iterations.
1249 Total energy: 1.17887
1250Time step 2 at t=0.03125
1251 u-equation: 8 CG iterations.
1252 v-equation: 20 CG iterations.
1253 Total energy: 2.9655
1254Time step 3 at t=0.046875
1255 u-equation: 8 CG iterations.
1256 v-equation: 21 CG iterations.
1257 Total energy: 4.33761
1258Time step 4 at t=0.0625
1259 u-equation: 7 CG iterations.
1260 v-equation: 21 CG iterations.
1261 Total energy: 5.35499
1262Time step 5 at t=0.078125
1263 u-equation: 7 CG iterations.
1264 v-equation: 21 CG iterations.
1265 Total energy: 6.18652
1266Time step 6 at t=0.09375
1267 u-equation: 7 CG iterations.
1268 v-equation: 20 CG iterations.
1269 Total energy: 6.6799
1273Time step 31 at t=0.484375
1274 u-equation: 7 CG iterations.
1275 v-equation: 20 CG iterations.
1276 Total energy: 21.9068
1277Time step 32 at t=0.5
1278 u-equation: 7 CG iterations.
1279 v-equation: 20 CG iterations.
1280 Total energy: 23.3394
1281Time step 33 at t=0.515625
1282 u-equation: 7 CG iterations.
1283 v-equation: 20 CG iterations.
1284 Total energy: 23.1019
1288Time step 319 at t=4.98438
1289 u-equation: 7 CG iterations.
1290 v-equation: 20 CG iterations.
1291 Total energy: 23.1019
1293 u-equation: 7 CG iterations.
1294 v-equation: 20 CG iterations.
1295 Total energy: 23.1019
1298What we see immediately is that the energy is a constant at least after
1299@f$t=\frac 12@f$ (until which the boundary source term @f$g@f$ is nonzero, injecting
1300energy into the system).
1302In addition to the screen output, the program writes the solution of each time
1303step to an output file. If we process them adequately and paste them into a
1304movie, we get the following:
1306<img src="https://www.dealii.org/images/steps/developer/step-23.movie.gif" alt="Animation of the solution of step 23.">
1308The movie shows the generated wave nice traveling through the domain and back,
1309being reflected at the clamped boundary. Some numerical noise is trailing the
1310wave, an artifact of a too-large mesh size that can be reduced by reducing the
1311mesh width and the time step.
1314<a name="step-23-extensions"></a>
1315<a name="step_23-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1318If you want to explore a bit, try out some of the following things:
1320 <li>Varying @f$\theta@f$. This gives different time stepping schemes, some of
1321 which are stable while others are not. Take a look at how the energy
1324 <li>Different initial and boundary conditions, right hand sides.
1326 <li>More complicated domains or more refined meshes. Remember that the time
1327 step needs to be bounded by the mesh width, so changing the mesh should
1328 always involve also changing the time step. We will come back to this issue
1329 in @ref step_24 "step-24".
1331 <li>Variable coefficients: In real media, the wave speed is often
1332 variable. In particular, the "real" wave equation in realistic media would
1335 \rho(x) \frac{\partial^2 u}{\partial t^2}
1340 where @f$\rho(x)@f$ is the density of the material, and @f$a(x)@f$ is related to the
1341 stiffness coefficient. The wave speed is then @f$c=\sqrt{a/\rho}@f$.
1343 To make such a change, we would have to compute the mass and Laplace
1344 matrices with a variable coefficient. Fortunately, this isn't too hard: the
1348 changes are therefore relatively small. On the other hand, care must be
1349 taken again to make sure the time step is within the allowed range.
1351 <li>In the in-code comments, we discussed the fact that the matrices
for
1352 solving
for @f$U^n@f$ and @f$V^n@f$ need to be reset in every time because of
1353 boundary conditions, even though the actual content does not change. It is
1354 possible to avoid copying by not eliminating columns in the linear systems,
1355 which is implemented by appending a @
p false argument to the call:
1364 <li>deal.II being a library that supports adaptive meshes
it would of course be
1365 nice
if this program supported change the
mesh every few time steps. Given the
1366 structure of the solution — a wave that travels through the domain —
1367 it would seem appropriate
if we only refined the
mesh where the wave currently is,
1368 and not simply everywhere. It is intuitively clear that we should be able to
1369 save a significant amount of cells
this way. (Though upon further thought one
1370 realizes that
this is really only the
case in the
initial stages of the simulation.
1371 After some time,
for wave phenomena, the domain is filled with reflections of
1372 the
initial wave going in every direction and filling every corner of the domain.
1376 To make adaptively changing meshes possible, there are basically two routes.
1377 The
"correct" way would be to go back to the weak form we get
using Rothe
's
1378 method. For example, the first of the two equations to be solved in each time
1379 step looked like this:
1381 (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
1382 (u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla u^{n-1},\nabla \varphi)
1387 \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
1390 Now, note that we solve for @f$u^n@f$ on mesh @f${\mathbb T}^n@f$, and
1391 consequently the test functions @f$\varphi@f$ have to be from the space
1392 @f$V_h^n@f$ as well. As discussed in the introduction, terms like
1393 @f$(u^{n-1},\varphi)@f$ then require us to integrate the solution of the
1394 previous step (which may have been computed on a different mesh
1395 @f${\mathbb T}^{n-1}@f$) against the test functions of the current mesh,
1396 leading to a matrix @f$M^{n,n-1}@f$. This process of integrating shape
1397 functions from different meshes is, at best, awkward. It can be done
1398 but because it is difficult to ensure that @f${\mathbb T}^{n-1}@f$ and
1399 @f${\mathbb T}^{n}@f$ differ by at most one level of refinement, one
1400 has to recursively match cells from both meshes. It is feasible to
1401 do this, but it leads to lengthy and not entirely obvious code.
1403 The second approach is the following: whenever we change the mesh,
1404 we simply interpolate the solution from the last time step on the old
1405 mesh to the new mesh, using the SolutionTransfer class. In other words,
1406 instead of the equation above, we would solve
1408 (u^n,\varphi) + k^2\theta^2(\nabla u^n,\nabla \varphi) &=&
1409 (I^n u^{n-1},\varphi) - k^2\theta(1-\theta)(\nabla I^n u^{n-1},\nabla \varphi)
1411 k(I^n v^{n-1},\varphi)
1414 \theta (f^n,\varphi) + (1-\theta) (f^{n-1},\varphi)
1417 where @f$I^n@f$ interpolates a given function onto mesh @f${\mathbb T}^n@f$.
1418 This is a much simpler approach because, in each time step, we no
1419 longer have to worry whether @f$u^{n-1},v^{n-1}@f$ were computed on the
1420 same mesh as we are using now or on a different mesh. Consequently,
1421 the only changes to the code necessary are the addition of a function
1422 that computes the error, marks cells for refinement, sets up a
1423 SolutionTransfer object, transfers the solution to the new mesh, and
1424 rebuilds matrices and right hand side vectors on the new mesh. Neither
1425 the functions building the matrices and right hand sides, nor the
1426 solvers need to be changed.
1428 While this second approach is, strictly speaking,
1429 not quite correct in the Rothe framework (it introduces an addition source
1430 of error, namely the interpolation), it is nevertheless what
1431 almost everyone solving time dependent equations does. We will use this
1432 method in @ref step_31 "step-31", for example.
1436<a name="step_23-PlainProg"></a>
1437<h1> The plain program</h1>
1438@include "step-23.cc"
virtual void set_time(const Number new_time)
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
somenumber matrix_norm_square(const Vector< somenumber > &v) const
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
void create_mass_matrix(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim > &q, SparseMatrixType &matrix, const Function< spacedim, typename SparseMatrixType::value_type > *const a=nullptr, const AffineConstraints< typename SparseMatrixType::value_type > &constraints=AffineConstraints< typename SparseMatrixType::value_type >())
void create_laplace_matrix(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim > &q, SparseMatrixType &matrix, const Function< spacedim, typename SparseMatrixType::value_type > *const a=nullptr, const AffineConstraints< typename SparseMatrixType::value_type > &constraints=AffineConstraints< typename SparseMatrixType::value_type >())
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
const InputIterator OutputIterator out
const Iterator const std_cxx20::type_identity_t< Iterator > & end
const InputIterator OutputIterator const Function & function
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)