Reference documentation for deal.II version GIT relicensing-437-g81ec864850 2024-04-19 07:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-14.h
Go to the documentation of this file.
1) const
745 *   {
746 *   std::ofstream out(output_name_base + "-" +
747 *   std::to_string(this->refinement_cycle) + ".svg");
748 *   GridOut().write_svg(dof_handler.get_triangulation(), out);
749 *   }
750 *   } // namespace Evaluation
751 *  
752 *  
753 * @endcode
754 *
755 *
756 * <a name="step_14-TheLaplacesolverclasses"></a>
757 * <h3>The Laplace solver classes</h3>
758 *
759
760 *
761 * Next are the actual solver classes. Again, we discuss only the
762 * differences to the previous program.
763 *
764 * @code
765 *   namespace LaplaceSolver
766 *   {
767 * @endcode
768 *
769 *
770 * <a name="step_14-TheLaplacesolverbaseclass"></a>
771 * <h4>The Laplace solver base class</h4>
772 *
773
774 *
775 * This class is almost unchanged, with the exception that it declares two
776 * more functions: <code>output_solution</code> will be used to generate
777 * output files from the actual solutions computed by derived classes, and
778 * the <code>set_refinement_cycle</code> function by which the testing
779 * framework sets the number of the refinement cycle to a local variable
780 * in this class; this number is later used to generate filenames for the
781 * solution output.
782 *
783 * @code
784 *   template <int dim>
785 *   class Base
786 *   {
787 *   public:
788 *   Base(Triangulation<dim> &coarse_grid);
789 *   virtual ~Base() = default;
790 *  
791 *   virtual void solve_problem() = 0;
792 *   virtual void postprocess(
793 *   const Evaluation::EvaluationBase<dim> &postprocessor) const = 0;
794 *   virtual void refine_grid() = 0;
795 *   virtual unsigned int n_dofs() const = 0;
796 *  
797 *   virtual void set_refinement_cycle(const unsigned int cycle);
798 *  
799 *   virtual void output_solution() const = 0;
800 *  
801 *   protected:
803 *  
804 *   unsigned int refinement_cycle;
805 *   };
806 *  
807 *  
808 *   template <int dim>
809 *   Base<dim>::Base(Triangulation<dim> &coarse_grid)
810 *   : triangulation(&coarse_grid)
811 *   , refinement_cycle(numbers::invalid_unsigned_int)
812 *   {}
813 *  
814 *  
815 *  
816 *   template <int dim>
817 *   void Base<dim>::set_refinement_cycle(const unsigned int cycle)
818 *   {
819 *   refinement_cycle = cycle;
820 *   }
821 *  
822 *  
823 * @endcode
824 *
825 *
826 * <a name="step_14-TheLaplaceSolverclass"></a>
827 * <h4>The Laplace Solver class</h4>
828 *
829
830 *
831 * Likewise, the <code>Solver</code> class is entirely unchanged and will
832 * thus not be discussed.
833 *
834 * @code
835 *   template <int dim>
836 *   class Solver : public virtual Base<dim>
837 *   {
838 *   public:
840 *   const FiniteElement<dim> &fe,
841 *   const Quadrature<dim> &quadrature,
842 *   const Quadrature<dim - 1> &face_quadrature,
843 *   const Function<dim> &boundary_values);
844 *   virtual ~Solver() override;
845 *  
846 *   virtual void solve_problem() override;
847 *  
848 *   virtual void postprocess(
849 *   const Evaluation::EvaluationBase<dim> &postprocessor) const override;
850 *  
851 *   virtual unsigned int n_dofs() const override;
852 *  
853 *   protected:
855 *   const SmartPointer<const Quadrature<dim>> quadrature;
856 *   const SmartPointer<const Quadrature<dim - 1>> face_quadrature;
857 *   DoFHandler<dim> dof_handler;
858 *   Vector<double> solution;
859 *   const SmartPointer<const Function<dim>> boundary_values;
860 *  
861 *   virtual void assemble_rhs(Vector<double> &rhs) const = 0;
862 *  
863 *   private:
864 *   struct LinearSystem
865 *   {
866 *   LinearSystem(const DoFHandler<dim> &dof_handler);
867 *  
868 *   void solve(Vector<double> &solution) const;
869 *  
870 *   AffineConstraints<double> hanging_node_constraints;
871 *   SparsityPattern sparsity_pattern;
873 *   Vector<double> rhs;
874 *   };
875 *  
876 *  
877 * @endcode
878 *
879 * The remainder of the class is essentially a copy of @ref step_13 "step-13"
880 * as well, including the data structures and functions
881 * necessary to compute the linear system in parallel using the
882 * WorkStream framework:
883 *
884 * @code
885 *   struct AssemblyScratchData
886 *   {
887 *   AssemblyScratchData(const FiniteElement<dim> &fe,
888 *   const Quadrature<dim> &quadrature);
889 *   AssemblyScratchData(const AssemblyScratchData &scratch_data);
890 *  
891 *   FEValues<dim> fe_values;
892 *   };
893 *  
894 *   struct AssemblyCopyData
895 *   {
897 *   std::vector<types::global_dof_index> local_dof_indices;
898 *   };
899 *  
900 *  
901 *   void assemble_linear_system(LinearSystem &linear_system);
902 *  
903 *   void local_assemble_matrix(
904 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
905 *   AssemblyScratchData &scratch_data,
906 *   AssemblyCopyData &copy_data) const;
907 *  
908 *  
909 *   void copy_local_to_global(const AssemblyCopyData &copy_data,
910 *   LinearSystem &linear_system) const;
911 *   };
912 *  
913 *  
914 *  
915 *   template <int dim>
916 *   Solver<dim>::Solver(Triangulation<dim> &triangulation,
917 *   const FiniteElement<dim> &fe,
918 *   const Quadrature<dim> &quadrature,
919 *   const Quadrature<dim - 1> &face_quadrature,
920 *   const Function<dim> &boundary_values)
921 *   : Base<dim>(triangulation)
922 *   , fe(&fe)
923 *   , quadrature(&quadrature)
924 *   , face_quadrature(&face_quadrature)
925 *   , dof_handler(triangulation)
926 *   , boundary_values(&boundary_values)
927 *   {}
928 *  
929 *  
930 *   template <int dim>
931 *   Solver<dim>::~Solver()
932 *   {
933 *   dof_handler.clear();
934 *   }
935 *  
936 *  
937 *   template <int dim>
938 *   void Solver<dim>::solve_problem()
939 *   {
940 *   dof_handler.distribute_dofs(*fe);
941 *   solution.reinit(dof_handler.n_dofs());
942 *  
943 *   LinearSystem linear_system(dof_handler);
944 *   assemble_linear_system(linear_system);
945 *   linear_system.solve(solution);
946 *   }
947 *  
948 *  
949 *   template <int dim>
950 *   void Solver<dim>::postprocess(
951 *   const Evaluation::EvaluationBase<dim> &postprocessor) const
952 *   {
953 *   postprocessor(dof_handler, solution);
954 *   }
955 *  
956 *  
957 *   template <int dim>
958 *   unsigned int Solver<dim>::n_dofs() const
959 *   {
960 *   return dof_handler.n_dofs();
961 *   }
962 *  
963 *  
964 * @endcode
965 *
966 * The following few functions and constructors are verbatim
967 * copies taken from @ref step_13 "step-13":
968 *
969 * @code
970 *   template <int dim>
971 *   void Solver<dim>::assemble_linear_system(LinearSystem &linear_system)
972 *   {
973 *   Threads::Task<void> rhs_task =
974 *   Threads::new_task(&Solver<dim>::assemble_rhs, *this, linear_system.rhs);
975 *  
976 *   auto worker =
977 *   [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
978 *   AssemblyScratchData &scratch_data,
979 *   AssemblyCopyData &copy_data) {
980 *   this->local_assemble_matrix(cell, scratch_data, copy_data);
981 *   };
982 *  
983 *   auto copier = [this, &linear_system](const AssemblyCopyData &copy_data) {
984 *   this->copy_local_to_global(copy_data, linear_system);
985 *   };
986 *  
987 *   WorkStream::run(dof_handler.begin_active(),
988 *   dof_handler.end(),
989 *   worker,
990 *   copier,
991 *   AssemblyScratchData(*fe, *quadrature),
992 *   AssemblyCopyData());
993 *   linear_system.hanging_node_constraints.condense(linear_system.matrix);
994 *  
995 *   std::map<types::global_dof_index, double> boundary_value_map;
997 *   0,
998 *   *boundary_values,
999 *   boundary_value_map);
1000 *  
1001 *   rhs_task.join();
1002 *   linear_system.hanging_node_constraints.condense(linear_system.rhs);
1003 *  
1004 *   MatrixTools::apply_boundary_values(boundary_value_map,
1005 *   linear_system.matrix,
1006 *   solution,
1007 *   linear_system.rhs);
1008 *   }
1009 *  
1010 *  
1011 *   template <int dim>
1012 *   Solver<dim>::AssemblyScratchData::AssemblyScratchData(
1013 *   const FiniteElement<dim> &fe,
1014 *   const Quadrature<dim> &quadrature)
1015 *   : fe_values(fe, quadrature, update_gradients | update_JxW_values)
1016 *   {}
1017 *  
1018 *  
1019 *   template <int dim>
1020 *   Solver<dim>::AssemblyScratchData::AssemblyScratchData(
1021 *   const AssemblyScratchData &scratch_data)
1022 *   : fe_values(scratch_data.fe_values.get_fe(),
1023 *   scratch_data.fe_values.get_quadrature(),
1025 *   {}
1026 *  
1027 *  
1028 *   template <int dim>
1029 *   void Solver<dim>::local_assemble_matrix(
1030 *   const typename DoFHandler<dim>::active_cell_iterator &cell,
1031 *   AssemblyScratchData &scratch_data,
1032 *   AssemblyCopyData &copy_data) const
1033 *   {
1034 *   const unsigned int dofs_per_cell = fe->n_dofs_per_cell();
1035 *   const unsigned int n_q_points = quadrature->size();
1036 *  
1037 *   copy_data.cell_matrix.reinit(dofs_per_cell, dofs_per_cell);
1038 *  
1039 *   copy_data.local_dof_indices.resize(dofs_per_cell);
1040 *  
1041 *   scratch_data.fe_values.reinit(cell);
1042 *  
1043 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1044 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1045 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
1046 *   copy_data.cell_matrix(i, j) +=
1047 *   (scratch_data.fe_values.shape_grad(i, q_point) *
1048 *   scratch_data.fe_values.shape_grad(j, q_point) *
1049 *   scratch_data.fe_values.JxW(q_point));
1050 *  
1051 *   cell->get_dof_indices(copy_data.local_dof_indices);
1052 *   }
1053 *  
1054 *  
1055 *  
1056 *   template <int dim>
1057 *   void Solver<dim>::copy_local_to_global(const AssemblyCopyData &copy_data,
1058 *   LinearSystem &linear_system) const
1059 *   {
1060 *   for (unsigned int i = 0; i < copy_data.local_dof_indices.size(); ++i)
1061 *   for (unsigned int j = 0; j < copy_data.local_dof_indices.size(); ++j)
1062 *   linear_system.matrix.add(copy_data.local_dof_indices[i],
1063 *   copy_data.local_dof_indices[j],
1064 *   copy_data.cell_matrix(i, j));
1065 *   }
1066 *  
1067 *  
1068 * @endcode
1069 *
1070 * Now for the functions that implement actions in the linear
1071 * system class. First, the constructor initializes all data
1072 * elements to their correct sizes, and sets up a number of
1073 * additional data structures, such as constraints due to hanging
1074 * nodes. Since setting up the hanging nodes and finding out about
1075 * the nonzero elements of the matrix is independent, we do that
1076 * in parallel (if the library was configured to use concurrency,
1077 * at least; otherwise, the actions are performed
1078 * sequentially). Note that we start only one thread, and do the
1079 * second action in the main thread. Since only one thread is
1080 * generated, we don't use the <code>Threads::TaskGroup</code>
1081 * class here, but rather use the one created task object
1082 * directly to wait for this particular task's exit. The
1083 * approach is generally the same as the one we have used in
1084 * <code>Solver::assemble_linear_system()</code> above.
1085 *
1086
1087 *
1088 * Note that taking the address of the
1090 * is a little tricky, since there are actually three functions of
1091 * this name, one for each supported space dimension. Taking
1092 * addresses of overloaded functions is somewhat complicated in
1093 * C++, since the address-of operator <code>&</code> in that case
1094 * returns a set of values (the addresses of all
1095 * functions with that name), and selecting the right one is then
1096 * the next step. If the context dictates which one to take (for
1097 * example by assigning to a function pointer of known type), then
1098 * the compiler can do that by itself, but if this set of pointers
1099 * shall be given as the argument to a function that takes a
1100 * template, the compiler could choose all without having a
1101 * preference for one. We therefore have to make it clear to the
1102 * compiler which one we would like to have; for this, we could
1103 * use a cast, but for more clarity, we assign it to a temporary
1104 * <code>mhnc_p</code> (short for <code>pointer to
1105 * make_hanging_node_constraints</code>) with the right type, and
1106 * using this pointer instead.
1107 *
1108 * @code
1109 *   template <int dim>
1110 *   Solver<dim>::LinearSystem::LinearSystem(const DoFHandler<dim> &dof_handler)
1111 *   {
1112 *   hanging_node_constraints.clear();
1113 *  
1114 *   void (*mhnc_p)(const DoFHandler<dim> &, AffineConstraints<double> &) =
1116 *  
1117 * @endcode
1118 *
1119 * Start a side task then continue on the main thread
1120 *
1121 * @code
1122 *   Threads::Task<void> side_task =
1123 *   Threads::new_task(mhnc_p, dof_handler, hanging_node_constraints);
1124 *  
1125 *   DynamicSparsityPattern dsp(dof_handler.n_dofs(), dof_handler.n_dofs());
1126 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
1127 *  
1128 *  
1129 *  
1130 * @endcode
1131 *
1132 * Wait for the side task to be done before going further
1133 *
1134 * @code
1135 *   side_task.join();
1136 *  
1137 *   hanging_node_constraints.close();
1138 *   hanging_node_constraints.condense(dsp);
1139 *   sparsity_pattern.copy_from(dsp);
1140 *  
1141 *   matrix.reinit(sparsity_pattern);
1142 *   rhs.reinit(dof_handler.n_dofs());
1143 *   }
1144 *  
1145 *  
1146 *  
1147 *   template <int dim>
1148 *   void Solver<dim>::LinearSystem::solve(Vector<double> &solution) const
1149 *   {
1150 *   SolverControl solver_control(5000, 1e-12);
1151 *   SolverCG<Vector<double>> cg(solver_control);
1152 *  
1153 *   PreconditionSSOR<SparseMatrix<double>> preconditioner;
1154 *   preconditioner.initialize(matrix, 1.2);
1155 *  
1156 *   cg.solve(matrix, solution, rhs, preconditioner);
1157 *  
1158 *   hanging_node_constraints.distribute(solution);
1159 *   }
1160 *  
1161 *  
1162 *  
1163 * @endcode
1164 *
1165 *
1166 * <a name="step_14-ThePrimalSolverclass"></a>
1167 * <h4>The PrimalSolver class</h4>
1168 *
1169
1170 *
1171 * The <code>PrimalSolver</code> class is also mostly unchanged except for
1172 * implementing the <code>output_solution</code> function. We keep the
1173 * <code>GlobalRefinement</code> and <code>RefinementKelly</code> classes
1174 * in this program, and they can then rely on the default implementation
1175 * of this function which simply outputs the primal solution. The class
1176 * implementing dual weighted error estimators will overload this function
1177 * itself, to also output the dual solution.
1178 *
1179 * @code
1180 *   template <int dim>
1181 *   class PrimalSolver : public Solver<dim>
1182 *   {
1183 *   public:
1184 *   PrimalSolver(Triangulation<dim> &triangulation,
1185 *   const FiniteElement<dim> &fe,
1186 *   const Quadrature<dim> &quadrature,
1187 *   const Quadrature<dim - 1> &face_quadrature,
1188 *   const Function<dim> &rhs_function,
1189 *   const Function<dim> &boundary_values);
1190 *  
1191 *   virtual void output_solution() const override;
1192 *  
1193 *   protected:
1194 *   const SmartPointer<const Function<dim>> rhs_function;
1195 *   virtual void assemble_rhs(Vector<double> &rhs) const override;
1196 *   };
1197 *  
1198 *  
1199 *   template <int dim>
1200 *   PrimalSolver<dim>::PrimalSolver(Triangulation<dim> &triangulation,
1201 *   const FiniteElement<dim> &fe,
1202 *   const Quadrature<dim> &quadrature,
1203 *   const Quadrature<dim - 1> &face_quadrature,
1204 *   const Function<dim> &rhs_function,
1205 *   const Function<dim> &boundary_values)
1206 *   : Base<dim>(triangulation)
1207 *   , Solver<dim>(triangulation,
1208 *   fe,
1209 *   quadrature,
1210 *   face_quadrature,
1211 *   boundary_values)
1212 *   , rhs_function(&rhs_function)
1213 *   {}
1214 *  
1215 *  
1216 *  
1217 *   template <int dim>
1218 *   void PrimalSolver<dim>::output_solution() const
1219 *   {
1220 *   DataOut<dim> data_out;
1221 *   data_out.attach_dof_handler(this->dof_handler);
1222 *   data_out.add_data_vector(this->solution, "solution");
1223 *   data_out.build_patches();
1224 *  
1225 *   std::ofstream out("solution-" + std::to_string(this->refinement_cycle) +
1226 *   ".vtu");
1227 *   data_out.write(out, DataOutBase::vtu);
1228 *   }
1229 *  
1230 *  
1231 *  
1232 *   template <int dim>
1233 *   void PrimalSolver<dim>::assemble_rhs(Vector<double> &rhs) const
1234 *   {
1235 *   FEValues<dim> fe_values(*this->fe,
1236 *   *this->quadrature,
1238 *   update_JxW_values);
1239 *  
1240 *   const unsigned int dofs_per_cell = this->fe->n_dofs_per_cell();
1241 *   const unsigned int n_q_points = this->quadrature->size();
1242 *  
1243 *   Vector<double> cell_rhs(dofs_per_cell);
1244 *   std::vector<double> rhs_values(n_q_points);
1245 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1246 *  
1247 *   for (const auto &cell : this->dof_handler.active_cell_iterators())
1248 *   {
1249 *   cell_rhs = 0;
1250 *  
1251 *   fe_values.reinit(cell);
1252 *  
1253 *   rhs_function->value_list(fe_values.get_quadrature_points(),
1254 *   rhs_values);
1255 *  
1256 *   for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
1257 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1258 *   cell_rhs(i) += (fe_values.shape_value(i, q_point) * // phi_i(x_q)
1259 *   rhs_values[q_point] * // f((x_q)
1260 *   fe_values.JxW(q_point)); // dx
1261 *  
1262 *   cell->get_dof_indices(local_dof_indices);
1263 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1264 *   rhs(local_dof_indices[i]) += cell_rhs(i);
1265 *   }
1266 *   }
1267 *  
1268 *  
1269 * @endcode
1270 *
1271 *
1272 * <a name="step_14-TheRefinementGlobalandRefinementKellyclasses"></a>
1273 * <h4>The RefinementGlobal and RefinementKelly classes</h4>
1274 *
1275
1276 *
1277 * For the following two classes, the same applies as for most of the
1278 * above: the class is taken from the previous example as-is:
1279 *
1280 * @code
1281 *   template <int dim>
1282 *   class RefinementGlobal : public PrimalSolver<dim>
1283 *   {
1284 *   public:
1285 *   RefinementGlobal(Triangulation<dim> &coarse_grid,
1286 *   const FiniteElement<dim> &fe,
1287 *   const Quadrature<dim> &quadrature,
1288 *   const Quadrature<dim - 1> &face_quadrature,
1289 *   const Function<dim> &rhs_function,
1290 *   const Function<dim> &boundary_values);
1291 *  
1292 *   virtual void refine_grid() override;
1293 *   };
1294 *  
1295 *  
1296 *  
1297 *   template <int dim>
1298 *   RefinementGlobal<dim>::RefinementGlobal(
1299 *   Triangulation<dim> &coarse_grid,
1300 *   const FiniteElement<dim> &fe,
1301 *   const Quadrature<dim> &quadrature,
1302 *   const Quadrature<dim - 1> &face_quadrature,
1303 *   const Function<dim> &rhs_function,
1304 *   const Function<dim> &boundary_values)
1305 *   : Base<dim>(coarse_grid)
1306 *   , PrimalSolver<dim>(coarse_grid,
1307 *   fe,
1308 *   quadrature,
1309 *   face_quadrature,
1310 *   rhs_function,
1311 *   boundary_values)
1312 *   {}
1313 *  
1314 *  
1315 *  
1316 *   template <int dim>
1317 *   void RefinementGlobal<dim>::refine_grid()
1318 *   {
1319 *   this->triangulation->refine_global(1);
1320 *   }
1321 *  
1322 *  
1323 *  
1324 *   template <int dim>
1325 *   class RefinementKelly : public PrimalSolver<dim>
1326 *   {
1327 *   public:
1328 *   RefinementKelly(Triangulation<dim> &coarse_grid,
1329 *   const FiniteElement<dim> &fe,
1330 *   const Quadrature<dim> &quadrature,
1331 *   const Quadrature<dim - 1> &face_quadrature,
1332 *   const Function<dim> &rhs_function,
1333 *   const Function<dim> &boundary_values);
1334 *  
1335 *   virtual void refine_grid() override;
1336 *   };
1337 *  
1338 *  
1339 *  
1340 *   template <int dim>
1341 *   RefinementKelly<dim>::RefinementKelly(
1342 *   Triangulation<dim> &coarse_grid,
1343 *   const FiniteElement<dim> &fe,
1344 *   const Quadrature<dim> &quadrature,
1345 *   const Quadrature<dim - 1> &face_quadrature,
1346 *   const Function<dim> &rhs_function,
1347 *   const Function<dim> &boundary_values)
1348 *   : Base<dim>(coarse_grid)
1349 *   , PrimalSolver<dim>(coarse_grid,
1350 *   fe,
1351 *   quadrature,
1352 *   face_quadrature,
1353 *   rhs_function,
1354 *   boundary_values)
1355 *   {}
1356 *  
1357 *  
1358 *  
1359 *   template <int dim>
1360 *   void RefinementKelly<dim>::refine_grid()
1361 *   {
1362 *   Vector<float> estimated_error_per_cell(
1363 *   this->triangulation->n_active_cells());
1365 *   this->dof_handler,
1366 *   QGauss<dim - 1>(this->fe->degree + 1),
1367 *   std::map<types::boundary_id, const Function<dim> *>(),
1368 *   this->solution,
1369 *   estimated_error_per_cell);
1371 *   estimated_error_per_cell,
1372 *   0.3,
1373 *   0.03);
1374 *   this->triangulation->execute_coarsening_and_refinement();
1375 *   }
1376 *  
1377 *  
1378 *  
1379 * @endcode
1380 *
1381 *
1382 * <a name="step_14-TheRefinementWeightedKellyclass"></a>
1383 * <h4>The RefinementWeightedKelly class</h4>
1384 *
1385
1386 *
1387 * This class is a variant of the previous one, in that it allows to
1388 * weight the refinement indicators we get from the library's Kelly
1389 * indicator by some function. We include this class since the goal of
1390 * this example program is to demonstrate automatic refinement criteria
1391 * even for complex output quantities such as point values or stresses. If
1392 * we did not solve a dual problem and compute the weights thereof, we
1393 * would probably be tempted to give a hand-crafted weighting to the
1394 * indicators to account for the fact that we are going to evaluate these
1395 * quantities. This class accepts such a weighting function as argument to
1396 * its constructor:
1397 *
1398 * @code
1399 *   template <int dim>
1400 *   class RefinementWeightedKelly : public PrimalSolver<dim>
1401 *   {
1402 *   public:
1403 *   RefinementWeightedKelly(Triangulation<dim> &coarse_grid,
1404 *   const FiniteElement<dim> &fe,
1405 *   const Quadrature<dim> &quadrature,
1406 *   const Quadrature<dim - 1> &face_quadrature,
1407 *   const Function<dim> &rhs_function,
1408 *   const Function<dim> &boundary_values,
1409 *   const Function<dim> &weighting_function);
1410 *  
1411 *   virtual void refine_grid() override;
1412 *  
1413 *   private:
1414 *   const SmartPointer<const Function<dim>> weighting_function;
1415 *   };
1416 *  
1417 *  
1418 *  
1419 *   template <int dim>
1420 *   RefinementWeightedKelly<dim>::RefinementWeightedKelly(
1421 *   Triangulation<dim> &coarse_grid,
1422 *   const FiniteElement<dim> &fe,
1423 *   const Quadrature<dim> &quadrature,
1424 *   const Quadrature<dim - 1> &face_quadrature,
1425 *   const Function<dim> &rhs_function,
1426 *   const Function<dim> &boundary_values,
1427 *   const Function<dim> &weighting_function)
1428 *   : Base<dim>(coarse_grid)
1429 *   , PrimalSolver<dim>(coarse_grid,
1430 *   fe,
1431 *   quadrature,
1432 *   face_quadrature,
1433 *   rhs_function,
1434 *   boundary_values)
1435 *   , weighting_function(&weighting_function)
1436 *   {}
1437 *  
1438 *  
1439 *  
1440 * @endcode
1441 *
1442 * Now, here comes the main function, including the weighting:
1443 *
1444 * @code
1445 *   template <int dim>
1446 *   void RefinementWeightedKelly<dim>::refine_grid()
1447 *   {
1448 * @endcode
1449 *
1450 * First compute some residual based error indicators for all cells by a
1451 * method already implemented in the library. What exactly we compute
1452 * here is described in more detail in the documentation of that class.
1453 *
1454 * @code
1455 *   Vector<float> estimated_error_per_cell(
1456 *   this->triangulation->n_active_cells());
1457 *   std::map<types::boundary_id, const Function<dim> *> dummy_function_map;
1458 *   KellyErrorEstimator<dim>::estimate(this->dof_handler,
1459 *   *this->face_quadrature,
1460 *   dummy_function_map,
1461 *   this->solution,
1462 *   estimated_error_per_cell);
1463 *  
1464 * @endcode
1465 *
1466 * Next weigh each entry in the vector of indicators by the value of the
1467 * function given to the constructor, evaluated at the cell center. We
1468 * need to write the result into the vector entry that corresponds to the
1469 * current cell, which we can obtain by asking the cell what its index
1470 * among all active cells is using CellAccessor::active_cell_index(). (In
1471 * reality, this index is zero for the first cell we handle in the loop,
1472 * one for the second cell, etc., and we could as well just keep track of
1473 * this index using an integer counter; but using
1474 * CellAccessor::active_cell_index() makes this more explicit.)
1475 *
1476 * @code
1477 *   for (const auto &cell : this->dof_handler.active_cell_iterators())
1478 *   estimated_error_per_cell(cell->active_cell_index()) *=
1479 *   weighting_function->value(cell->center());
1480 *  
1481 *   GridRefinement::refine_and_coarsen_fixed_number(*this->triangulation,
1482 *   estimated_error_per_cell,
1483 *   0.3,
1484 *   0.03);
1485 *   this->triangulation->execute_coarsening_and_refinement();
1486 *   }
1487 *  
1488 *   } // namespace LaplaceSolver
1489 *  
1490 *  
1491 * @endcode
1492 *
1493 *
1494 * <a name="step_14-Equationdata"></a>
1495 * <h3>Equation data</h3>
1496 *
1497
1498 *
1499 * In this example program, we work with the same data sets as in the
1500 * previous one, but as it may so happen that someone wants to run the
1501 * program with different boundary values and right hand side functions, or
1502 * on a different grid, we show a simple technique to do exactly that. For
1503 * more clarity, we furthermore pack everything that has to do with equation
1504 * data into a namespace of its own.
1505 *
1506
1507 *
1508 * The underlying assumption is that this is a research program, and that
1509 * there we often have a number of test cases that consist of a domain, a
1510 * right hand side, boundary values, possibly a specified coefficient, and a
1511 * number of other parameters. They often vary all at the same time when
1512 * shifting from one example to another. To make handling such sets of
1513 * problem description parameters simple is the goal of the following.
1514 *
1515
1516 *
1517 * Basically, the idea is this: let us have a structure for each set of
1518 * data, in which we pack everything that describes a test case: here, these
1519 * are two subclasses, one called <code>BoundaryValues</code> for the
1520 * boundary values of the exact solution, and one called
1521 * <code>RightHandSide</code>, and then a way to generate the coarse
1522 * grid. Since the solution of the previous example program looked like
1523 * curved ridges, we use this name here for the enclosing class. Note that
1524 * the names of the two inner classes have to be the same for all enclosing
1525 * test case classes, and also that we have attached the dimension template
1526 * argument to the enclosing class rather than to the inner ones, to make
1527 * further processing simpler. (From a language viewpoint, a namespace
1528 * would be better to encapsulate these inner classes, rather than a
1529 * structure. However, namespaces cannot be given as template arguments, so
1530 * we use a structure to allow a second object to select from within its
1531 * given argument. The enclosing structure, of course, has no member
1532 * variables apart from the classes it declares, and a static function to
1533 * generate the coarse mesh; it will in general never be instantiated.)
1534 *
1535
1536 *
1537 * The idea is then the following (this is the right time to also take a
1538 * brief look at the code below): we can generate objects for boundary
1539 * values and right hand side by simply giving the name of the outer class
1540 * as a template argument to a class which we call here
1541 * <code>Data::SetUp</code>, and it then creates objects for the inner
1542 * classes. In this case, to get all that characterizes the curved ridge
1543 * solution, we would simply generate an instance of
1544 * <code>Data::SetUp@<Data::CurvedRidge@></code>, and everything we need to
1545 * know about the solution would be static member variables and functions of
1546 * that object.
1547 *
1548
1549 *
1550 * This approach might seem like overkill in this case, but will become very
1551 * handy once a certain set up is not only characterized by Dirichlet
1552 * boundary values and a right hand side function, but in addition by
1553 * material properties, Neumann values, different boundary descriptors,
1554 * etc. In that case, the <code>SetUp</code> class might consist of a dozen
1555 * or more objects, and each descriptor class (like the
1556 * <code>CurvedRidges</code> class below) would have to provide them. Then,
1557 * you will be happy to be able to change from one set of data to another by
1558 * only changing the template argument to the <code>SetUp</code> class at
1559 * one place, rather than at many.
1560 *
1561
1562 *
1563 * With this framework for different test cases, we are almost finished, but
1564 * one thing remains: by now we can select statically, by changing one
1565 * template argument, which data set to choose. In order to be able to do
1566 * that dynamically, i.e. at run time, we need a base class. This we provide
1567 * in the obvious way, see below, with virtual abstract functions. It forces
1568 * us to introduce a second template parameter <code>dim</code> which we
1569 * need for the base class (which could be avoided using some template
1570 * magic, but we omit that), but that's all.
1571 *
1572
1573 *
1574 * Adding new testcases is now simple, you don't have to touch the framework
1575 * classes, only a structure like the <code>CurvedRidges</code> one is
1576 * needed.
1577 *
1578 * @code
1579 *   namespace Data
1580 *   {
1581 * @endcode
1582 *
1583 *
1584 * <a name="step_14-TheSetUpBaseandSetUpclasses"></a>
1585 * <h4>The SetUpBase and SetUp classes</h4>
1586 *
1587
1588 *
1589 * Based on the above description, the <code>SetUpBase</code> class then
1590 * looks as follows. To allow using the <code>SmartPointer</code> class
1591 * with this class, we derived from the <code>Subscriptor</code> class.
1592 *
1593 * @code
1594 *   template <int dim>
1595 *   struct SetUpBase : public Subscriptor
1596 *   {
1597 *   virtual const Function<dim> &get_boundary_values() const = 0;
1598 *  
1599 *   virtual const Function<dim> &get_right_hand_side() const = 0;
1600 *  
1601 *   virtual void
1602 *   create_coarse_grid(Triangulation<dim> &coarse_grid) const = 0;
1603 *   };
1604 *  
1605 *  
1606 * @endcode
1607 *
1608 * And now for the derived class that takes the template argument as
1609 * explained above.
1610 *
1611
1612 *
1613 * Here we pack the data elements into private variables, and allow access
1614 * to them through the methods of the base class.
1615 *
1616 * @code
1617 *   template <class Traits, int dim>
1618 *   struct SetUp : public SetUpBase<dim>
1619 *   {
1620 *   virtual const Function<dim> &get_boundary_values() const override;
1621 *  
1622 *   virtual const Function<dim> &get_right_hand_side() const override;
1623 *  
1624 *  
1625 *   virtual void
1626 *   create_coarse_grid(Triangulation<dim> &coarse_grid) const override;
1627 *  
1628 *   private:
1629 *   static const typename Traits::BoundaryValues boundary_values;
1630 *   static const typename Traits::RightHandSide right_hand_side;
1631 *   };
1632 *  
1633 * @endcode
1634 *
1635 * We have to provide definitions for the static member variables of the
1636 * above class:
1637 *
1638 * @code
1639 *   template <class Traits, int dim>
1640 *   const typename Traits::BoundaryValues SetUp<Traits, dim>::boundary_values;
1641 *   template <class Traits, int dim>
1642 *   const typename Traits::RightHandSide SetUp<Traits, dim>::right_hand_side;
1643 *  
1644 * @endcode
1645 *
1646 * And definitions of the member functions:
1647 *
1648 * @code
1649 *   template <class Traits, int dim>
1650 *   const Function<dim> &SetUp<Traits, dim>::get_boundary_values() const
1651 *   {
1652 *   return boundary_values;
1653 *   }
1654 *  
1655 *  
1656 *   template <class Traits, int dim>
1657 *   const Function<dim> &SetUp<Traits, dim>::get_right_hand_side() const
1658 *   {
1659 *   return right_hand_side;
1660 *   }
1661 *  
1662 *  
1663 *   template <class Traits, int dim>
1664 *   void SetUp<Traits, dim>::create_coarse_grid(
1665 *   Triangulation<dim> &coarse_grid) const
1666 *   {
1667 *   Traits::create_coarse_grid(coarse_grid);
1668 *   }
1669 *  
1670 *  
1671 * @endcode
1672 *
1673 *
1674 * <a name="step_14-TheCurvedRidgesclass"></a>
1675 * <h4>The CurvedRidges class</h4>
1676 *
1677
1678 *
1679 * The class that is used to describe the boundary values and right hand
1680 * side of the <code>curved ridge</code> problem already used in the
1681 * @ref step_13 "step-13" example program is then like so:
1682 *
1683 * @code
1684 *   template <int dim>
1685 *   struct CurvedRidges
1686 *   {
1687 *   class BoundaryValues : public Function<dim>
1688 *   {
1689 *   public:
1690 *   virtual double value(const Point<dim> &p,
1691 *   const unsigned int component) const;
1692 *   };
1693 *  
1694 *  
1695 *   class RightHandSide : public Function<dim>
1696 *   {
1697 *   public:
1698 *   virtual double value(const Point<dim> &p,
1699 *   const unsigned int component) const;
1700 *   };
1701 *  
1702 *   static void create_coarse_grid(Triangulation<dim> &coarse_grid);
1703 *   };
1704 *  
1705 *  
1706 *   template <int dim>
1707 *   double CurvedRidges<dim>::BoundaryValues::value(
1708 *   const Point<dim> &p,
1709 *   const unsigned int /*component*/) const
1710 *   {
1711 *   double q = p(0);
1712 *   for (unsigned int i = 1; i < dim; ++i)
1713 *   q += std::sin(10 * p(i) + 5 * p(0) * p(0));
1714 *   const double exponential = std::exp(q);
1715 *   return exponential;
1716 *   }
1717 *  
1718 *  
1719 *  
1720 *   template <int dim>
1721 *   double CurvedRidges<dim>::RightHandSide::value(
1722 *   const Point<dim> &p,
1723 *   const unsigned int /*component*/) const
1724 *   {
1725 *   double q = p(0);
1726 *   for (unsigned int i = 1; i < dim; ++i)
1727 *   q += std::sin(10 * p(i) + 5 * p(0) * p(0));
1728 *   const double u = std::exp(q);
1729 *   double t1 = 1, t2 = 0, t3 = 0;
1730 *   for (unsigned int i = 1; i < dim; ++i)
1731 *   {
1732 *   t1 += std::cos(10 * p(i) + 5 * p(0) * p(0)) * 10 * p(0);
1733 *   t2 += 10 * std::cos(10 * p(i) + 5 * p(0) * p(0)) -
1734 *   100 * std::sin(10 * p(i) + 5 * p(0) * p(0)) * p(0) * p(0);
1735 *   t3 += 100 * std::cos(10 * p(i) + 5 * p(0) * p(0)) *
1736 *   std::cos(10 * p(i) + 5 * p(0) * p(0)) -
1737 *   100 * std::sin(10 * p(i) + 5 * p(0) * p(0));
1738 *   }
1739 *   t1 = t1 * t1;
1740 *  
1741 *   return -u * (t1 + t2 + t3);
1742 *   }
1743 *  
1744 *  
1745 *   template <int dim>
1746 *   void CurvedRidges<dim>::create_coarse_grid(Triangulation<dim> &coarse_grid)
1747 *   {
1748 *   GridGenerator::hyper_cube(coarse_grid, -1, 1);
1749 *   coarse_grid.refine_global(2);
1750 *   }
1751 *  
1752 *  
1753 * @endcode
1754 *
1755 *
1756 * <a name="step_14-TheExercise_2_3class"></a>
1757 * <h4>The Exercise_2_3 class</h4>
1758 *
1759
1760 *
1761 * This example program was written while giving practical courses for a
1762 * lecture on adaptive finite element methods and duality based error
1763 * estimates. For these courses, we had one exercise, which required to
1764 * solve the Laplace equation with constant right hand side on a square
1765 * domain with a square hole in the center, and zero boundary
1766 * values. Since the implementation of the properties of this problem is
1767 * so particularly simple here, lets do it. As the number of the exercise
1768 * was 2.3, we take the liberty to retain this name for the class as well.
1769 *
1770 * @code
1771 *   template <int dim>
1772 *   struct Exercise_2_3
1773 *   {
1774 * @endcode
1775 *
1776 * We need a class to denote the boundary values of the problem. In this
1777 * case, this is simple: it's the zero function, so don't even declare a
1778 * class, just an alias:
1779 *
1780 * @code
1781 *   using BoundaryValues = Functions::ZeroFunction<dim>;
1782 *  
1783 * @endcode
1784 *
1785 * Second, a class that denotes the right hand side. Since they are
1786 * constant, just subclass the corresponding class of the library and be
1787 * done:
1788 *
1789 * @code
1790 *   class RightHandSide : public Functions::ConstantFunction<dim>
1791 *   {
1792 *   public:
1793 *   RightHandSide()
1794 *   : Functions::ConstantFunction<dim>(1.)
1795 *   {}
1796 *   };
1797 *  
1798 * @endcode
1799 *
1800 * Finally a function to generate the coarse grid. This is somewhat more
1801 * complicated here, see immediately below.
1802 *
1803 * @code
1804 *   static void create_coarse_grid(Triangulation<dim> &coarse_grid);
1805 *   };
1806 *  
1807 *  
1808 * @endcode
1809 *
1810 * As stated above, the grid for this example is the square [-1,1]^2 with
1811 * the square [-1/2,1/2]^2 as hole in it. We create the coarse grid as 4
1812 * times 4 cells with the middle four ones missing. To understand how
1813 * exactly the mesh is going to look, it may be simplest to just look
1814 * at the "Results" section of this tutorial program first. In general,
1815 * if you'd like to understand more about creating meshes either from
1816 * scratch by hand, as we do here, or using other techniques, you
1817 * should take a look at @ref step_49 "step-49".
1818 *
1819
1820 *
1821 * Of course, the example has an extension to 3d, but since this function
1822 * cannot be written in a dimension independent way we choose not to
1823 * implement this here, but rather only specialize the template for
1824 * dim=2. If you compile the program for 3d, you'll get a message from the
1825 * linker that this function is not implemented for 3d, and needs to be
1826 * provided.
1827 *
1828
1829 *
1830 * For the creation of this geometry, the library has no predefined
1831 * method. In this case, the geometry is still simple enough to do the
1832 * creation by hand, rather than using a mesh generator.
1833 *
1834 * @code
1835 *   template <>
1836 *   void Exercise_2_3<2>::create_coarse_grid(Triangulation<2> &coarse_grid)
1837 *   {
1838 * @endcode
1839 *
1840 * We first define the space dimension, to allow those parts of the
1841 * function that are actually dimension independent to use this
1842 * variable. That makes it simpler if you later take this as a starting
1843 * point to implement a 3d version of this mesh. The next step is then
1844 * to have a list of vertices. Here, they are 24 (5 times 5, with the
1845 * middle one omitted). It is probably best to draw a sketch here.
1846 *
1847 * @code
1848 *   const unsigned int dim = 2;
1849 *  
1850 *   const std::vector<Point<2>> vertices = {
1851 *   {-1.0, -1.0}, {-0.5, -1.0}, {+0.0, -1.0}, {+0.5, -1.0}, {+1.0, -1.0},
1852 *   {-1.0, -0.5}, {-0.5, -0.5}, {+0.0, -0.5}, {+0.5, -0.5}, {+1.0, -0.5},
1853 *   {-1.0, +0.0}, {-0.5, +0.0}, {+0.5, +0.0}, {+1.0, +0.0},
1854 *   {-1.0, +0.5}, {-0.5, +0.5}, {+0.0, +0.5}, {+0.5, +0.5}, {+1.0, +0.5},
1855 *   {-1.0, +1.0}, {-0.5, +1.0}, {+0.0, +1.0}, {+0.5, +1.0}, {+1.0, +1.0}};
1856 *  
1857 * @endcode
1858 *
1859 * Next, we have to define the cells and the vertices they contain.
1860 *
1861 * @code
1862 *   const std::vector<std::array<int, GeometryInfo<dim>::vertices_per_cell>>
1863 *   cell_vertices = {{{0, 1, 5, 6}},
1864 *   {{1, 2, 6, 7}},
1865 *   {{2, 3, 7, 8}},
1866 *   {{3, 4, 8, 9}},
1867 *   {{5, 6, 10, 11}},
1868 *   {{8, 9, 12, 13}},
1869 *   {{10, 11, 14, 15}},
1870 *   {{12, 13, 17, 18}},
1871 *   {{14, 15, 19, 20}},
1872 *   {{15, 16, 20, 21}},
1873 *   {{16, 17, 21, 22}},
1874 *   {{17, 18, 22, 23}}};
1875 *  
1876 *   const unsigned int n_cells = cell_vertices.size();
1877 *  
1878 * @endcode
1879 *
1880 * Again, we generate a C++ vector type from this, but this time by
1881 * looping over the cells (yes, this is boring). Additionally, we set
1882 * the material indicator to zero for all the cells:
1883 *
1884 * @code
1885 *   std::vector<CellData<dim>> cells(n_cells, CellData<dim>());
1886 *   for (unsigned int i = 0; i < n_cells; ++i)
1887 *   {
1888 *   for (unsigned int j = 0; j < cell_vertices[i].size(); ++j)
1889 *   cells[i].vertices[j] = cell_vertices[i][j];
1890 *   cells[i].material_id = 0;
1891 *   }
1892 *  
1893 * @endcode
1894 *
1895 * Finally pass all this information to the library to generate a
1896 * triangulation. The right call for this is
1897 * Triangulation::create_triangulation(), but that function wants
1898 * its input in a format in which cells are consistently oriented
1899 * in some way. It turns out that the mesh we describe with the
1900 * `vertices` and `cells` objects above already is consistently
1901 * oriented, but if you modify the code in some way it may not
1902 * be any more, and so it is good practice to call a function
1903 * that ensures it is -- GridTools::consistently_order_cells()
1904 * does this.
1905 *
1906
1907 *
1908 * The last parameter of the call to Triangulation::create_triangulation()
1909 * below describes what we want to do about boundary and manifold
1910 * indicators on boundary faces. Here, we don't want to do anything
1911 * specific (in particular, we are fine with labeling all boundaries
1912 * with boundary indicator zero, and so we call the function with
1913 * an empty object as the last argument:
1914 *
1915 * @code
1917 *   coarse_grid.create_triangulation(vertices, cells, SubCellData());
1918 *  
1919 * @endcode
1920 *
1921 * And since we want that the evaluation point (3/4,3/4) in this example
1922 * is a grid point, we refine once globally:
1923 *
1924 * @code
1925 *   coarse_grid.refine_global(1);
1926 *   }
1927 *   } // namespace Data
1928 *  
1929 * @endcode
1930 *
1931 *
1932 * <a name="step_14-Discussion"></a>
1933 * <h4>Discussion</h4>
1934 *
1935
1936 *
1937 * As you have now read through this framework, you may be wondering why we
1938 * have not chosen to implement the classes implementing a certain setup
1939 * (like the <code>CurvedRidges</code> class) directly as classes derived
1940 * from <code>Data::SetUpBase</code>. Indeed, we could have done very well
1941 * so. The only reason is that then we would have to have member variables
1942 * for the solution and right hand side classes in the
1943 * <code>CurvedRidges</code> class, as well as member functions overloading
1944 * the abstract functions of the base class giving access to these member
1945 * variables. The <code>SetUp</code> class has the sole reason to relieve us
1946 * from the need to reiterate these member variables and functions that
1947 * would be necessary in all such classes. In some way, the template
1948 * mechanism here only provides a way to have default implementations for a
1949 * number of functions that depend on external quantities and can thus not
1950 * be provided using normal virtual functions, at least not without the help
1951 * of templates.
1952 *
1953
1954 *
1955 * However, there might be good reasons to actually implement classes
1956 * derived from <code>Data::SetUpBase</code>, for example if the solution or
1957 * right hand side classes require constructors that take arguments, which
1958 * the <code>Data::SetUpBase</code> class cannot provide. In that case,
1959 * subclassing is a worthwhile strategy. Other possibilities for special
1960 * cases are to derive from <code>Data::SetUp@<SomeSetUp@></code> where
1961 * <code>SomeSetUp</code> denotes a class, or even to explicitly specialize
1962 * <code>Data::SetUp@<SomeSetUp@></code>. The latter allows to transparently
1963 * use the way the <code>SetUp</code> class is used for other set-ups, but
1964 * with special actions taken for special arguments.
1965 *
1966
1967 *
1968 * A final observation favoring the approach taken here is the following: we
1969 * have found numerous times that when starting a project, the number of
1970 * parameters (usually boundary values, right hand side, coarse grid, just
1971 * as here) was small, and the number of test cases was small as well. One
1972 * then starts out by handcoding them into a number of <code>switch</code>
1973 * statements. Over time, projects grow, and so does the number of test
1974 * cases. The number of <code>switch</code> statements grows with that, and
1975 * their length as well, and one starts to find ways to consider impossible
1976 * examples where domains, boundary values, and right hand sides do not fit
1977 * together any more, and starts losing the overview over the whole
1978 * structure. Encapsulating everything belonging to a certain test case into
1979 * a structure of its own has proven worthwhile for this, as it keeps
1980 * everything that belongs to one test case in one place. Furthermore, it
1981 * allows to put these things all in one or more files that are only devoted
1982 * to test cases and their data, without having to bring their actual
1983 * implementation into contact with the rest of the program.
1984 *
1985
1986 *
1987 *
1988
1989 *
1990 *
1991 * <a name="step_14-Dualfunctionals"></a>
1992 * <h3>Dual functionals</h3>
1993 *
1994
1995 *
1996 * As with the other components of the program, we put everything we need to
1997 * describe dual functionals into a namespace of its own, and define an
1998 * abstract base class that provides the interface the class solving the
1999 * dual problem needs for its work.
2000 *
2001
2002 *
2003 * We will then implement two such classes, for the evaluation of a point
2004 * value and of the derivative of the solution at that point. For these
2005 * functionals we already have the corresponding evaluation objects, so they
2006 * are complementary.
2007 *
2008 * @code
2009 *   namespace DualFunctional
2010 *   {
2011 * @endcode
2012 *
2013 *
2014 * <a name="step_14-TheDualFunctionalBaseclass"></a>
2015 * <h4>The DualFunctionalBase class</h4>
2016 *
2017
2018 *
2019 * First start with the base class for dual functionals. Since for linear
2020 * problems the characteristics of the dual problem play a role only in
2021 * the right hand side, we only need to provide for a function that
2022 * assembles the right hand side for a given discretization:
2023 *
2024 * @code
2025 *   template <int dim>
2026 *   class DualFunctionalBase : public Subscriptor
2027 *   {
2028 *   public:
2029 *   virtual void assemble_rhs(const DoFHandler<dim> &dof_handler,
2030 *   Vector<double> &rhs) const = 0;
2031 *   };
2032 *  
2033 *  
2034 * @endcode
2035 *
2036 *
2037 * <a name="step_14-ThedualfunctionalPointValueEvaluationclass"></a>
2038 * <h4>The dual functional PointValueEvaluation class</h4>
2039 *
2040
2041 *
2042 * As a first application, we consider the functional corresponding to the
2043 * evaluation of the solution's value at a given point which again we
2044 * assume to be a vertex. Apart from the constructor that takes and stores
2045 * the evaluation point, this class consists only of the function that
2046 * implements assembling the right hand side.
2047 *
2048 * @code
2049 *   template <int dim>
2050 *   class PointValueEvaluation : public DualFunctionalBase<dim>
2051 *   {
2052 *   public:
2053 *   PointValueEvaluation(const Point<dim> &evaluation_point);
2054 *  
2055 *   virtual void assemble_rhs(const DoFHandler<dim> &dof_handler,
2056 *   Vector<double> &rhs) const override;
2057 *  
2058 *   DeclException1(
2059 *   ExcEvaluationPointNotFound,
2060 *   Point<dim>,
2061 *   << "The evaluation point " << arg1
2062 *   << " was not found among the vertices of the present grid.");
2063 *  
2064 *   protected:
2065 *   const Point<dim> evaluation_point;
2066 *   };
2067 *  
2068 *  
2069 *   template <int dim>
2070 *   PointValueEvaluation<dim>::PointValueEvaluation(
2071 *   const Point<dim> &evaluation_point)
2072 *   : evaluation_point(evaluation_point)
2073 *   {}
2074 *  
2075 *  
2076 * @endcode
2077 *
2078 * As for doing the main purpose of the class, assembling the right hand
2079 * side, let us first consider what is necessary: The right hand side of
2080 * the dual problem is a vector of values J(phi_i), where J is the error
2081 * functional, and phi_i is the i-th shape function. Here, J is the
2082 * evaluation at the point x0, i.e. J(phi_i)=phi_i(x0).
2083 *
2084
2085 *
2086 * Now, we have assumed that the evaluation point is a vertex. Thus, for
2087 * the usual finite elements we might be using in this program, we can
2088 * take for granted that at such a point exactly one shape function is
2089 * nonzero, and in particular has the value one. Thus, we set the right
2090 * hand side vector to all-zeros, then seek for the shape function
2091 * associated with that point and set the corresponding value of the right
2092 * hand side vector to one:
2093 *
2094 * @code
2095 *   template <int dim>
2096 *   void
2097 *   PointValueEvaluation<dim>::assemble_rhs(const DoFHandler<dim> &dof_handler,
2098 *   Vector<double> &rhs) const
2099 *   {
2100 * @endcode
2101 *
2102 * So, first set everything to zeros...
2103 *
2104 * @code
2105 *   rhs.reinit(dof_handler.n_dofs());
2106 *  
2107 * @endcode
2108 *
2109 * ...then loop over cells and find the evaluation point among the
2110 * vertices (or very close to a vertex, which may happen due to floating
2111 * point round-off):
2112 *
2113 * @code
2114 *   for (const auto &cell : dof_handler.active_cell_iterators())
2115 *   for (const auto vertex : cell->vertex_indices())
2116 *   if (cell->vertex(vertex).distance(evaluation_point) <
2117 *   cell->diameter() * 1e-8)
2118 *   {
2119 * @endcode
2120 *
2121 * Ok, found, so set corresponding entry, and leave function
2122 * since we are finished:
2123 *
2124 * @code
2125 *   rhs(cell->vertex_dof_index(vertex, 0)) = 1;
2126 *   return;
2127 *   }
2128 *  
2129 * @endcode
2130 *
2131 * Finally, a sanity check: if we somehow got here, then we must have
2132 * missed the evaluation point, so raise an exception unconditionally:
2133 *
2134 * @code
2135 *   AssertThrow(false, ExcEvaluationPointNotFound(evaluation_point));
2136 *   }
2137 *  
2138 *  
2139 * @endcode
2140 *
2141 *
2142 * <a name="step_14-ThedualfunctionalPointXDerivativeEvaluationclass"></a>
2143 * <h4>The dual functional PointXDerivativeEvaluation class</h4>
2144 *
2145
2146 *
2147 * As second application, we again consider the evaluation of the
2148 * x-derivative of the solution at one point. Again, the declaration of
2149 * the class, and the implementation of its constructor is not too
2150 * interesting:
2151 *
2152 * @code
2153 *   template <int dim>
2154 *   class PointXDerivativeEvaluation : public DualFunctionalBase<dim>
2155 *   {
2156 *   public:
2157 *   PointXDerivativeEvaluation(const Point<dim> &evaluation_point);
2158 *  
2159 *   virtual void assemble_rhs(const DoFHandler<dim> &dof_handler,
2160 *   Vector<double> &rhs) const;
2161 *  
2162 *   DeclException1(
2163 *   ExcEvaluationPointNotFound,
2164 *   Point<dim>,
2165 *   << "The evaluation point " << arg1
2166 *   << " was not found among the vertices of the present grid.");
2167 *  
2168 *   protected:
2169 *   const Point<dim> evaluation_point;
2170 *   };
2171 *  
2172 *  
2173 *   template <int dim>
2174 *   PointXDerivativeEvaluation<dim>::PointXDerivativeEvaluation(
2175 *   const Point<dim> &evaluation_point)
2176 *   : evaluation_point(evaluation_point)
2177 *   {}
2178 *  
2179 *  
2180 * @endcode
2181 *
2182 * What is interesting is the implementation of this functional: here,
2183 * J(phi_i)=d/dx phi_i(x0).
2184 *
2185
2186 *
2187 * We could, as in the implementation of the respective evaluation object
2188 * take the average of the gradients of each shape function phi_i at this
2189 * evaluation point. However, we take a slightly different approach: we
2190 * simply take the average over all cells that surround this point. The
2191 * question which cells <code>surrounds</code> the evaluation point is
2192 * made dependent on the mesh width by including those cells for which the
2193 * distance of the cell's midpoint to the evaluation point is less than
2194 * the cell's diameter.
2195 *
2196
2197 *
2198 * Taking the average of the gradient over the area/volume of these cells
2199 * leads to a dual solution which is very close to the one which would
2200 * result from the point evaluation of the gradient. It is simple to
2201 * justify theoretically that this does not change the method
2202 * significantly.
2203 *
2204 * @code
2205 *   template <int dim>
2206 *   void PointXDerivativeEvaluation<dim>::assemble_rhs(
2207 *   const DoFHandler<dim> &dof_handler,
2208 *   Vector<double> &rhs) const
2209 *   {
2210 * @endcode
2211 *
2212 * Again, first set all entries to zero:
2213 *
2214 * @code
2215 *   rhs.reinit(dof_handler.n_dofs());
2216 *  
2217 * @endcode
2218 *
2219 * Initialize a <code>FEValues</code> object with a quadrature formula,
2220 * have abbreviations for the number of quadrature points and shape
2221 * functions...
2222 *
2223 * @code
2224 *   QGauss<dim> quadrature(dof_handler.get_fe().degree + 1);
2225 *   FEValues<dim> fe_values(dof_handler.get_fe(),
2226 *   quadrature,
2227 *   update_gradients | update_quadrature_points |
2228 *   update_JxW_values);
2229 *   const unsigned int n_q_points = fe_values.n_quadrature_points;
2230 *   const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell;
2231 *  
2232 * @endcode
2233 *
2234 * ...and have two objects that are used to store the global indices of
2235 * the degrees of freedom on a cell, and the values of the gradients of
2236 * the shape functions at the quadrature points:
2237 *
2238 * @code
2239 *   Vector<double> cell_rhs(dofs_per_cell);
2240 *   std::vector<unsigned int> local_dof_indices(dofs_per_cell);
2241 *  
2242 * @endcode
2243 *
2244 * Finally have a variable in which we will sum up the area/volume of
2245 * the cells over which we integrate, by integrating the unit functions
2246 * on these cells:
2247 *
2248 * @code
2249 *   double total_volume = 0;
2250 *  
2251 * @endcode
2252 *
2253 * Then start the loop over all cells, and select those cells which are
2254 * close enough to the evaluation point:
2255 *
2256 * @code
2257 *   for (const auto &cell : dof_handler.active_cell_iterators())
2258 *   if (cell->center().distance(evaluation_point) <= cell->diameter())
2259 *   {
2260 * @endcode
2261 *
2262 * If we have found such a cell, then initialize the
2263 * <code>FEValues</code> object and integrate the x-component of
2264 * the gradient of each shape function, as well as the unit
2265 * function for the total area/volume.
2266 *
2267 * @code
2268 *   fe_values.reinit(cell);
2269 *   cell_rhs = 0;
2270 *  
2271 *   for (unsigned int q = 0; q < n_q_points; ++q)
2272 *   {
2273 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
2274 *   cell_rhs(i) +=
2275 *   fe_values.shape_grad(i, q)[0] // (d/dx phi_i(x_q))
2276 *   * fe_values.JxW(q); // * dx
2277 *   total_volume += fe_values.JxW(q);
2278 *   }
2279 *  
2280 * @endcode
2281 *
2282 * If we have the local contributions, distribute them to the
2283 * global vector:
2284 *
2285 * @code
2286 *   cell->get_dof_indices(local_dof_indices);
2287 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
2288 *   rhs(local_dof_indices[i]) += cell_rhs(i);
2289 *   }
2290 *  
2291 * @endcode
2292 *
2293 * After we have looped over all cells, check whether we have found any
2294 * at all, by making sure that their volume is non-zero. If not, then
2295 * the results will be botched, as the right hand side should then still
2296 * be zero, so throw an exception:
2297 *
2298 * @code
2299 *   AssertThrow(total_volume > 0,
2300 *   ExcEvaluationPointNotFound(evaluation_point));
2301 *  
2302 * @endcode
2303 *
2304 * Finally, we have by now only integrated the gradients of the shape
2305 * functions, not taking their mean value. We fix this by dividing by
2306 * the measure of the volume over which we have integrated:
2307 *
2308 * @code
2309 *   rhs /= total_volume;
2310 *   }
2311 *  
2312 *  
2313 *   } // namespace DualFunctional
2314 *  
2315 *  
2316 * @endcode
2317 *
2318 *
2319 * <a name="step_14-ExtendingtheLaplaceSolvernamespace"></a>
2320 * <h3>Extending the LaplaceSolver namespace</h3>
2321 *
2322 * @code
2323 *   namespace LaplaceSolver
2324 *   {
2325 * @endcode
2326 *
2327 *
2328 * <a name="step_14-TheDualSolverclass"></a>
2329 * <h4>The DualSolver class</h4>
2330 *
2331
2332 *
2333 * In the same way as the <code>PrimalSolver</code> class above, we now
2334 * implement a <code>DualSolver</code>. It has all the same features, the
2335 * only difference is that it does not take a function object denoting a
2336 * right hand side object, but now takes a <code>DualFunctionalBase</code>
2337 * object that will assemble the right hand side vector of the dual
2338 * problem. The rest of the class is rather trivial.
2339 *
2340
2341 *
2342 * Since both primal and dual solver will use the same triangulation, but
2343 * different discretizations, it now becomes clear why we have made the
2344 * <code>Base</code> class a virtual one: since the final class will be
2345 * derived from both <code>PrimalSolver</code> as well as
2346 * <code>DualSolver</code>, it would have two <code>Base</code> instances,
2347 * would we not have marked the inheritance as virtual. Since in many
2348 * applications the base class would store much more information than just
2349 * the triangulation which needs to be shared between primal and dual
2350 * solvers, we do not usually want to use two such base classes.
2351 *
2352 * @code
2353 *   template <int dim>
2354 *   class DualSolver : public Solver<dim>
2355 *   {
2356 *   public:
2357 *   DualSolver(
2358 *   Triangulation<dim> &triangulation,
2359 *   const FiniteElement<dim> &fe,
2360 *   const Quadrature<dim> &quadrature,
2361 *   const Quadrature<dim - 1> &face_quadrature,
2362 *   const DualFunctional::DualFunctionalBase<dim> &dual_functional);
2363 *  
2364 *   protected:
2365 *   const SmartPointer<const DualFunctional::DualFunctionalBase<dim>>
2366 *   dual_functional;
2367 *   virtual void assemble_rhs(Vector<double> &rhs) const override;
2368 *  
2369 *   static const Functions::ZeroFunction<dim> boundary_values;
2370 *   };
2371 *  
2372 *   template <int dim>
2373 *   const Functions::ZeroFunction<dim> DualSolver<dim>::boundary_values;
2374 *  
2375 *   template <int dim>
2376 *   DualSolver<dim>::DualSolver(
2377 *   Triangulation<dim> &triangulation,
2378 *   const FiniteElement<dim> &fe,
2379 *   const Quadrature<dim> &quadrature,
2380 *   const Quadrature<dim - 1> &face_quadrature,
2381 *   const DualFunctional::DualFunctionalBase<dim> &dual_functional)
2382 *   : Base<dim>(triangulation)
2383 *   , Solver<dim>(triangulation,
2384 *   fe,
2385 *   quadrature,
2386 *   face_quadrature,
2387 *   boundary_values)
2388 *   , dual_functional(&dual_functional)
2389 *   {}
2390 *  
2391 *  
2392 *  
2393 *   template <int dim>
2394 *   void DualSolver<dim>::assemble_rhs(Vector<double> &rhs) const
2395 *   {
2396 *   dual_functional->assemble_rhs(this->dof_handler, rhs);
2397 *   }
2398 *  
2399 *  
2400 * @endcode
2401 *
2402 *
2403 * <a name="step_14-TheWeightedResidualclass"></a>
2404 * <h4>The WeightedResidual class</h4>
2405 *
2406
2407 *
2408 * Here finally comes the main class of this program, the one that
2409 * implements the dual weighted residual error estimator. It joins the
2410 * primal and dual solver classes to use them for the computation of
2411 * primal and dual solutions, and implements the error representation
2412 * formula for use as error estimate and mesh refinement.
2413 *
2414
2415 *
2416 * The first few of the functions of this class are mostly overriders of
2417 * the respective functions of the base class:
2418 *
2419 * @code
2420 *   template <int dim>
2421 *   class WeightedResidual : public PrimalSolver<dim>, public DualSolver<dim>
2422 *   {
2423 *   public:
2424 *   WeightedResidual(
2425 *   Triangulation<dim> &coarse_grid,
2426 *   const FiniteElement<dim> &primal_fe,
2427 *   const FiniteElement<dim> &dual_fe,
2428 *   const Quadrature<dim> &quadrature,
2429 *   const Quadrature<dim - 1> &face_quadrature,
2430 *   const Function<dim> &rhs_function,
2431 *   const Function<dim> &boundary_values,
2432 *   const DualFunctional::DualFunctionalBase<dim> &dual_functional);
2433 *  
2434 *   virtual void solve_problem() override;
2435 *  
2436 *   virtual void postprocess(
2437 *   const Evaluation::EvaluationBase<dim> &postprocessor) const override;
2438 *  
2439 *   virtual unsigned int n_dofs() const override;
2440 *  
2441 *   virtual void refine_grid() override;
2442 *  
2443 *   virtual void output_solution() const override;
2444 *  
2445 *   private:
2446 * @endcode
2447 *
2448 * In the private section, we have two functions that are used to call
2449 * the <code>solve_problem</code> functions of the primal and dual base
2450 * classes. These two functions will be called in parallel by the
2451 * <code>solve_problem</code> function of this class.
2452 *
2453 * @code
2454 *   void solve_primal_problem();
2455 *   void solve_dual_problem();
2456 * @endcode
2457 *
2458 * Then declare abbreviations for active cell iterators, to avoid that
2459 * we have to write this lengthy name over and over again:
2460 *
2461
2462 *
2463 *
2464 * @code
2465 *   using active_cell_iterator =
2466 *   typename DoFHandler<dim>::active_cell_iterator;
2467 *  
2468 * @endcode
2469 *
2470 * Next, declare a data type that we will us to store the contribution
2471 * of faces to the error estimator. The idea is that we can compute the
2472 * face terms from each of the two cells to this face, as they are the
2473 * same when viewed from both sides. What we will do is to compute them
2474 * only once, based on some rules explained below which of the two
2475 * adjacent cells will be in charge to do so. We then store the
2476 * contribution of each face in a map mapping faces to their values, and
2477 * only collect the contributions for each cell by looping over the
2478 * cells a second time and grabbing the values from the map.
2479 *
2480
2481 *
2482 * The data type of this map is declared here:
2483 *
2484 * @code
2485 *   using FaceIntegrals =
2486 *   typename std::map<typename DoFHandler<dim>::face_iterator, double>;
2487 *  
2488 * @endcode
2489 *
2490 * In the computation of the error estimates on cells and faces, we need
2491 * a number of helper objects, such as <code>FEValues</code> and
2492 * <code>FEFaceValues</code> functions, but also temporary objects
2493 * storing the values and gradients of primal and dual solutions, for
2494 * example. These fields are needed in the three functions that do the
2495 * integration on cells, and regular and irregular faces, respectively.
2496 *
2497
2498 *
2499 * There are three reasonable ways to provide these fields: first, as
2500 * local variables in the function that needs them; second, as member
2501 * variables of this class; third, as arguments passed to that function.
2502 *
2503
2504 *
2505 * These three alternatives all have drawbacks: the third that their
2506 * number is not negligible and would make calling these functions a
2507 * lengthy enterprise. The second has the drawback that it disallows
2508 * parallelization, since the threads that will compute the error
2509 * estimate have to have their own copies of these variables each, so
2510 * member variables of the enclosing class will not work. The first
2511 * approach, although straightforward, has a subtle but important
2512 * drawback: we will call these functions over and over again, many
2513 * thousands of times maybe; it now turns out that allocating
2514 * vectors and other objects that need memory from the heap is an
2515 * expensive business in terms of run-time, since memory allocation is
2516 * expensive when several threads are involved. It is thus
2517 * significantly better to allocate the memory only once, and recycle
2518 * the objects as often as possible.
2519 *
2520
2521 *
2522 * What to do? Our answer is to use a variant of the third strategy.
2523 * In fact, this is exactly what the WorkStream concept is supposed to
2524 * do (we have already introduced it above, but see also @ref threads).
2525 * To avoid that we have to give these functions a dozen or so
2526 * arguments, we pack all these variables into two structures, one which
2527 * is used for the computations on cells, the other doing them on the
2528 * faces. Both are then joined into the WeightedResidualScratchData class
2529 * that will serve as the "scratch data" class of the WorkStream concept:
2530 *
2531 * @code
2532 *   struct CellData
2533 *   {
2534 *   FEValues<dim> fe_values;
2535 *   const SmartPointer<const Function<dim>> right_hand_side;
2536 *  
2537 *   std::vector<double> cell_residual;
2538 *   std::vector<double> rhs_values;
2539 *   std::vector<double> dual_weights;
2540 *   std::vector<double> cell_laplacians;
2541 *   CellData(const FiniteElement<dim> &fe,
2542 *   const Quadrature<dim> &quadrature,
2543 *   const Function<dim> &right_hand_side);
2544 *   CellData(const CellData &cell_data);
2545 *   };
2546 *  
2547 *   struct FaceData
2548 *   {
2549 *   FEFaceValues<dim> fe_face_values_cell;
2550 *   FEFaceValues<dim> fe_face_values_neighbor;
2551 *   FESubfaceValues<dim> fe_subface_values_cell;
2552 *  
2553 *   std::vector<double> jump_residual;
2554 *   std::vector<double> dual_weights;
2555 *   typename std::vector<Tensor<1, dim>> cell_grads;
2556 *   typename std::vector<Tensor<1, dim>> neighbor_grads;
2557 *   FaceData(const FiniteElement<dim> &fe,
2558 *   const Quadrature<dim - 1> &face_quadrature);
2559 *   FaceData(const FaceData &face_data);
2560 *   };
2561 *  
2562 *   struct WeightedResidualScratchData
2563 *   {
2564 *   WeightedResidualScratchData(
2565 *   const FiniteElement<dim> &primal_fe,
2566 *   const Quadrature<dim> &primal_quadrature,
2567 *   const Quadrature<dim - 1> &primal_face_quadrature,
2568 *   const Function<dim> &rhs_function,
2569 *   const Vector<double> &primal_solution,
2570 *   const Vector<double> &dual_weights);
2571 *  
2572 *   WeightedResidualScratchData(
2573 *   const WeightedResidualScratchData &scratch_data);
2574 *  
2575 *   CellData cell_data;
2576 *   FaceData face_data;
2577 *   Vector<double> primal_solution;
2578 *   Vector<double> dual_weights;
2579 *   };
2580 *  
2581 *  
2582 * @endcode
2583 *
2584 * WorkStream::run generally wants both a scratch object and a copy
2585 * object. Here, for reasons similar to what we had in @ref step_9 "step-9" when
2586 * discussing the computation of an approximation of the gradient, we
2587 * don't actually need a "copy data" structure. Since WorkStream insists
2588 * on having one of these, we just declare an empty structure that does
2589 * nothing other than being there.
2590 *
2591 * @code
2592 *   struct WeightedResidualCopyData
2593 *   {};
2594 *  
2595 *  
2596 *  
2597 * @endcode
2598 *
2599 * Regarding the evaluation of the error estimator, we have one driver
2600 * function that uses WorkStream::run() to call the second function on
2601 * every cell:
2602 *
2603 * @code
2604 *   void estimate_error(Vector<float> &error_indicators) const;
2605 *  
2606 *   void estimate_on_one_cell(const active_cell_iterator &cell,
2607 *   WeightedResidualScratchData &scratch_data,
2608 *   WeightedResidualCopyData &copy_data,
2609 *   Vector<float> &error_indicators,
2610 *   FaceIntegrals &face_integrals) const;
2611 *  
2612 * @endcode
2613 *
2614 * Then we have functions that do the actual integration of the error
2615 * representation formula. They will treat the terms on the cell
2616 * interiors, on those faces that have no hanging nodes, and on those
2617 * faces with hanging nodes, respectively:
2618 *
2619 * @code
2620 *   void integrate_over_cell(const active_cell_iterator &cell,
2621 *   const Vector<double> &primal_solution,
2622 *   const Vector<double> &dual_weights,
2623 *   CellData &cell_data,
2624 *   Vector<float> &error_indicators) const;
2625 *  
2626 *   void integrate_over_regular_face(const active_cell_iterator &cell,
2627 *   const unsigned int face_no,
2628 *   const Vector<double> &primal_solution,
2629 *   const Vector<double> &dual_weights,
2630 *   FaceData &face_data,
2631 *   FaceIntegrals &face_integrals) const;
2632 *   void integrate_over_irregular_face(const active_cell_iterator &cell,
2633 *   const unsigned int face_no,
2634 *   const Vector<double> &primal_solution,
2635 *   const Vector<double> &dual_weights,
2636 *   FaceData &face_data,
2637 *   FaceIntegrals &face_integrals) const;
2638 *   };
2639 *  
2640 *  
2641 *  
2642 * @endcode
2643 *
2644 * In the implementation of this class, we first have the constructors of
2645 * the <code>CellData</code> and <code>FaceData</code> member classes, and
2646 * the <code>WeightedResidual</code> constructor. They only initialize
2647 * fields to their correct lengths, so we do not have to discuss them in
2648 * too much detail:
2649 *
2650 * @code
2651 *   template <int dim>
2652 *   WeightedResidual<dim>::CellData::CellData(
2653 *   const FiniteElement<dim> &fe,
2654 *   const Quadrature<dim> &quadrature,
2655 *   const Function<dim> &right_hand_side)
2656 *   : fe_values(fe,
2657 *   quadrature,
2659 *   update_JxW_values)
2660 *   , right_hand_side(&right_hand_side)
2661 *   , cell_residual(quadrature.size())
2662 *   , rhs_values(quadrature.size())
2663 *   , dual_weights(quadrature.size())
2664 *   , cell_laplacians(quadrature.size())
2665 *   {}
2666 *  
2667 *  
2668 *  
2669 *   template <int dim>
2670 *   WeightedResidual<dim>::CellData::CellData(const CellData &cell_data)
2671 *   : fe_values(cell_data.fe_values.get_fe(),
2672 *   cell_data.fe_values.get_quadrature(),
2674 *   update_JxW_values)
2675 *   , right_hand_side(cell_data.right_hand_side)
2676 *   , cell_residual(cell_data.cell_residual)
2677 *   , rhs_values(cell_data.rhs_values)
2678 *   , dual_weights(cell_data.dual_weights)
2679 *   , cell_laplacians(cell_data.cell_laplacians)
2680 *   {}
2681 *  
2682 *  
2683 *  
2684 *   template <int dim>
2685 *   WeightedResidual<dim>::FaceData::FaceData(
2686 *   const FiniteElement<dim> &fe,
2687 *   const Quadrature<dim - 1> &face_quadrature)
2688 *   : fe_face_values_cell(fe,
2689 *   face_quadrature,
2692 *   , fe_face_values_neighbor(fe,
2693 *   face_quadrature,
2696 *   , fe_subface_values_cell(fe, face_quadrature, update_gradients)
2697 *   {
2698 *   const unsigned int n_face_q_points = face_quadrature.size();
2699 *  
2700 *   jump_residual.resize(n_face_q_points);
2701 *   dual_weights.resize(n_face_q_points);
2702 *   cell_grads.resize(n_face_q_points);
2703 *   neighbor_grads.resize(n_face_q_points);
2704 *   }
2705 *  
2706 *  
2707 *  
2708 *   template <int dim>
2709 *   WeightedResidual<dim>::FaceData::FaceData(const FaceData &face_data)
2710 *   : fe_face_values_cell(face_data.fe_face_values_cell.get_fe(),
2711 *   face_data.fe_face_values_cell.get_quadrature(),
2714 *   , fe_face_values_neighbor(
2715 *   face_data.fe_face_values_neighbor.get_fe(),
2716 *   face_data.fe_face_values_neighbor.get_quadrature(),
2719 *   , fe_subface_values_cell(
2720 *   face_data.fe_subface_values_cell.get_fe(),
2721 *   face_data.fe_subface_values_cell.get_quadrature(),
2722 *   update_gradients)
2723 *   , jump_residual(face_data.jump_residual)
2724 *   , dual_weights(face_data.dual_weights)
2725 *   , cell_grads(face_data.cell_grads)
2726 *   , neighbor_grads(face_data.neighbor_grads)
2727 *   {}
2728 *  
2729 *  
2730 *  
2731 *   template <int dim>
2732 *   WeightedResidual<dim>::WeightedResidualScratchData::
2733 *   WeightedResidualScratchData(
2734 *   const FiniteElement<dim> &primal_fe,
2735 *   const Quadrature<dim> &primal_quadrature,
2736 *   const Quadrature<dim - 1> &primal_face_quadrature,
2737 *   const Function<dim> &rhs_function,
2738 *   const Vector<double> &primal_solution,
2739 *   const Vector<double> &dual_weights)
2740 *   : cell_data(primal_fe, primal_quadrature, rhs_function)
2741 *   , face_data(primal_fe, primal_face_quadrature)
2742 *   , primal_solution(primal_solution)
2743 *   , dual_weights(dual_weights)
2744 *   {}
2745 *  
2746 *   template <int dim>
2747 *   WeightedResidual<dim>::WeightedResidualScratchData::
2748 *   WeightedResidualScratchData(
2749 *   const WeightedResidualScratchData &scratch_data)
2750 *   : cell_data(scratch_data.cell_data)
2751 *   , face_data(scratch_data.face_data)
2752 *   , primal_solution(scratch_data.primal_solution)
2753 *   , dual_weights(scratch_data.dual_weights)
2754 *   {}
2755 *  
2756 *  
2757 *  
2758 *   template <int dim>
2759 *   WeightedResidual<dim>::WeightedResidual(
2760 *   Triangulation<dim> &coarse_grid,
2761 *   const FiniteElement<dim> &primal_fe,
2762 *   const FiniteElement<dim> &dual_fe,
2763 *   const Quadrature<dim> &quadrature,
2764 *   const Quadrature<dim - 1> &face_quadrature,
2765 *   const Function<dim> &rhs_function,
2766 *   const Function<dim> &bv,
2767 *   const DualFunctional::DualFunctionalBase<dim> &dual_functional)
2768 *   : Base<dim>(coarse_grid)
2769 *   , PrimalSolver<dim>(coarse_grid,
2770 *   primal_fe,
2771 *   quadrature,
2772 *   face_quadrature,
2773 *   rhs_function,
2774 *   bv)
2775 *   , DualSolver<dim>(coarse_grid,
2776 *   dual_fe,
2777 *   quadrature,
2778 *   face_quadrature,
2779 *   dual_functional)
2780 *   {}
2781 *  
2782 *  
2783 * @endcode
2784 *
2785 * The next five functions are boring, as they simply relay their work to
2786 * the base classes. The first calls the primal and dual solvers in
2787 * parallel, while postprocessing the solution and retrieving the number
2788 * of degrees of freedom is done by the primal class.
2789 *
2790 * @code
2791 *   template <int dim>
2792 *   void WeightedResidual<dim>::solve_problem()
2793 *   {
2794 *   Threads::TaskGroup<void> tasks;
2795 *   tasks +=
2796 *   Threads::new_task(&WeightedResidual<dim>::solve_primal_problem, *this);
2797 *   tasks +=
2798 *   Threads::new_task(&WeightedResidual<dim>::solve_dual_problem, *this);
2799 *   tasks.join_all();
2800 *   }
2801 *  
2802 *  
2803 *   template <int dim>
2804 *   void WeightedResidual<dim>::solve_primal_problem()
2805 *   {
2806 *   PrimalSolver<dim>::solve_problem();
2807 *   }
2808 *  
2809 *   template <int dim>
2810 *   void WeightedResidual<dim>::solve_dual_problem()
2811 *   {
2812 *   DualSolver<dim>::solve_problem();
2813 *   }
2814 *  
2815 *  
2816 *   template <int dim>
2817 *   void WeightedResidual<dim>::postprocess(
2818 *   const Evaluation::EvaluationBase<dim> &postprocessor) const
2819 *   {
2820 *   PrimalSolver<dim>::postprocess(postprocessor);
2821 *   }
2822 *  
2823 *  
2824 *   template <int dim>
2825 *   unsigned int WeightedResidual<dim>::n_dofs() const
2826 *   {
2827 *   return PrimalSolver<dim>::n_dofs();
2828 *   }
2829 *  
2830 *  
2831 *  
2832 * @endcode
2833 *
2834 * Now, it is becoming more interesting: the <code>refine_grid()</code>
2835 * function asks the error estimator to compute the cell-wise error
2836 * indicators, then uses their absolute values for mesh refinement.
2837 *
2838 * @code
2839 *   template <int dim>
2840 *   void WeightedResidual<dim>::refine_grid()
2841 *   {
2842 * @endcode
2843 *
2844 * First call the function that computes the cell-wise and global error:
2845 *
2846 * @code
2847 *   Vector<float> error_indicators(this->triangulation->n_active_cells());
2848 *   estimate_error(error_indicators);
2849 *  
2850 * @endcode
2851 *
2852 * Then note that marking cells for refinement or coarsening only works
2853 * if all indicators are positive, to allow their comparison. Thus, drop
2854 * the signs on all these indicators:
2855 *
2856 * @code
2857 *   for (float &error_indicator : error_indicators)
2858 *   error_indicator = std::fabs(error_indicator);
2859 *  
2860 * @endcode
2861 *
2862 * Finally, we can select between different strategies for
2863 * refinement. The default here is to refine those cells with the
2864 * largest error indicators that make up for a total of 80 per cent of
2865 * the error, while we coarsen those with the smallest indicators that
2866 * make up for the bottom 2 per cent of the error.
2867 *
2868 * @code
2870 *   error_indicators,
2871 *   0.8,
2872 *   0.02);
2873 *   this->triangulation->execute_coarsening_and_refinement();
2874 *   }
2875 *  
2876 *  
2877 * @endcode
2878 *
2879 * Since we want to output both the primal and the dual solution, we
2880 * overload the <code>output_solution</code> function. The only
2881 * interesting feature of this function is that the primal and dual
2882 * solutions are defined on different finite element spaces, which is not
2883 * the format the <code>DataOut</code> class expects. Thus, we have to
2884 * transfer them to a common finite element space. Since we want the
2885 * solutions only to see them qualitatively, we contend ourselves with
2886 * interpolating the dual solution to the (smaller) primal space. For the
2887 * interpolation, there is a library function, that takes a
2888 * AffineConstraints object including the hanging node
2889 * constraints. The rest is standard.
2890 *
2891 * @code
2892 *   template <int dim>
2893 *   void WeightedResidual<dim>::output_solution() const
2894 *   {
2895 *   AffineConstraints<double> primal_hanging_node_constraints;
2896 *   DoFTools::make_hanging_node_constraints(PrimalSolver<dim>::dof_handler,
2897 *   primal_hanging_node_constraints);
2898 *   primal_hanging_node_constraints.close();
2899 *   Vector<double> dual_solution(PrimalSolver<dim>::dof_handler.n_dofs());
2900 *   FETools::interpolate(DualSolver<dim>::dof_handler,
2901 *   DualSolver<dim>::solution,
2902 *   PrimalSolver<dim>::dof_handler,
2903 *   primal_hanging_node_constraints,
2904 *   dual_solution);
2905 *  
2906 *   DataOut<dim> data_out;
2907 *   data_out.attach_dof_handler(PrimalSolver<dim>::dof_handler);
2908 *  
2909 * @endcode
2910 *
2911 * Add the data vectors for which we want output. Add them both, the
2912 * <code>DataOut</code> functions can handle as many data vectors as you
2913 * wish to write to output:
2914 *
2915 * @code
2916 *   data_out.add_data_vector(PrimalSolver<dim>::solution, "primal_solution");
2917 *   data_out.add_data_vector(dual_solution, "dual_solution");
2918 *  
2919 *   data_out.build_patches();
2920 *  
2921 *   std::ofstream out("solution-" + std::to_string(this->refinement_cycle) +
2922 *   ".vtu");
2923 *   data_out.write(out, DataOutBase::vtu);
2924 *   }
2925 *  
2926 *  
2927 * @endcode
2928 *
2929 *
2930 * <a name="step_14-Estimatingerrors"></a>
2931 * <h3>Estimating errors</h3>
2932 *
2933
2934 *
2935 *
2936 * <a name="step_14-Errorestimationdriverfunctions"></a>
2937 * <h4>Error estimation driver functions</h4>
2938 *
2939
2940 *
2941 * As for the actual computation of error estimates, let's start with the
2942 * function that drives all this, i.e. calls those functions that actually
2943 * do the work, and finally collects the results.
2944 *
2945 * @code
2946 *   template <int dim>
2947 *   void
2948 *   WeightedResidual<dim>::estimate_error(Vector<float> &error_indicators) const
2949 *   {
2950 * @endcode
2951 *
2952 * The first task in computing the error is to set up vectors that
2953 * denote the primal solution, and the weights (z-z_h)=(z-I_hz), both in
2954 * the finite element space for which we have computed the dual
2955 * solution. For this, we have to interpolate the primal solution to the
2956 * dual finite element space, and to subtract the interpolation of the
2957 * computed dual solution to the primal finite element
2958 * space. Fortunately, the library provides functions for the
2959 * interpolation into larger or smaller finite element spaces, so this
2960 * is mostly obvious.
2961 *
2962
2963 *
2964 * First, let's do that for the primal solution: it is cell-wise
2965 * interpolated into the finite element space in which we have solved
2966 * the dual problem: But, again as in the
2967 * <code>WeightedResidual::output_solution</code> function we first need
2968 * to create an AffineConstraints object including the hanging node
2969 * constraints, but this time of the dual finite element space.
2970 *
2971 * @code
2972 *   AffineConstraints<double> dual_hanging_node_constraints;
2973 *   DoFTools::make_hanging_node_constraints(DualSolver<dim>::dof_handler,
2974 *   dual_hanging_node_constraints);
2975 *   dual_hanging_node_constraints.close();
2976 *   Vector<double> primal_solution(DualSolver<dim>::dof_handler.n_dofs());
2977 *   FETools::interpolate(PrimalSolver<dim>::dof_handler,
2978 *   PrimalSolver<dim>::solution,
2979 *   DualSolver<dim>::dof_handler,
2980 *   dual_hanging_node_constraints,
2981 *   primal_solution);
2982 *  
2983 * @endcode
2984 *
2985 * Then for computing the interpolation of the numerically approximated
2986 * dual solution z into the finite element space of the primal solution
2987 * and subtracting it from z: use the
2988 * <code>interpolate_difference</code> function, that gives (z-I_hz) in
2989 * the element space of the dual solution.
2990 *
2991 * @code
2992 *   AffineConstraints<double> primal_hanging_node_constraints;
2993 *   DoFTools::make_hanging_node_constraints(PrimalSolver<dim>::dof_handler,
2994 *   primal_hanging_node_constraints);
2995 *   primal_hanging_node_constraints.close();
2996 *   Vector<double> dual_weights(DualSolver<dim>::dof_handler.n_dofs());
2997 *   FETools::interpolation_difference(DualSolver<dim>::dof_handler,
2998 *   dual_hanging_node_constraints,
2999 *   DualSolver<dim>::solution,
3000 *   PrimalSolver<dim>::dof_handler,
3001 *   primal_hanging_node_constraints,
3002 *   dual_weights);
3003 *  
3004 * @endcode
3005 *
3006 * Note that this could probably have been more efficient since those
3007 * constraints have been used previously when assembling matrix and
3008 * right hand side for the primal problem and writing out the dual
3009 * solution. We leave the optimization of the program in this respect as
3010 * an exercise.
3011 *
3012
3013 *
3014 * Having computed the dual weights we now proceed with computing the
3015 * cell and face residuals of the primal solution. First we set up a map
3016 * between face iterators and their jump term contributions of faces to
3017 * the error estimator. The reason is that we compute the jump terms
3018 * only once, from one side of the face, and want to collect them only
3019 * afterwards when looping over all cells a second time.
3020 *
3021
3022 *
3023 * We initialize this map already with a value of -1e20 for all faces,
3024 * since this value will stand out in the results if something should go
3025 * wrong and we fail to compute the value for a face for some
3026 * reason. Secondly, this initialization already makes the std::map
3027 * object allocate all objects it may possibly need. This is important
3028 * since we will write into this structure from parallel threads,
3029 * and doing so would not be thread-safe if the map needed to allocate
3030 * memory and thereby reshape its data structures. In other words, the
3031 * initial initialization relieves us from the necessity to synchronize
3032 * the threads through a mutex each time they write to (and modify the
3033 * structure of) this map.
3034 *
3035 * @code
3036 *   FaceIntegrals face_integrals;
3037 *   for (const auto &cell :
3038 *   DualSolver<dim>::dof_handler.active_cell_iterators())
3039 *   for (const auto &face : cell->face_iterators())
3040 *   face_integrals[face] = -1e20;
3041 *  
3042 *   auto worker = [this,
3043 *   &error_indicators,
3044 *   &face_integrals](const active_cell_iterator &cell,
3045 *   WeightedResidualScratchData &scratch_data,
3046 *   WeightedResidualCopyData &copy_data) {
3047 *   this->estimate_on_one_cell(
3048 *   cell, scratch_data, copy_data, error_indicators, face_integrals);
3049 *   };
3050 *  
3051 *   auto do_nothing_copier =
3052 *   std::function<void(const WeightedResidualCopyData &)>();
3053 *  
3054 * @endcode
3055 *
3056 * Then hand it all off to WorkStream::run() to compute the
3057 * estimators for all cells in parallel:
3058 *
3059 * @code
3060 *   WorkStream::run(
3061 *   DualSolver<dim>::dof_handler.begin_active(),
3062 *   DualSolver<dim>::dof_handler.end(),
3063 *   worker,
3064 *   do_nothing_copier,
3065 *   WeightedResidualScratchData(*DualSolver<dim>::fe,
3066 *   *DualSolver<dim>::quadrature,
3067 *   *DualSolver<dim>::face_quadrature,
3068 *   *this->rhs_function,
3069 *   primal_solution,
3070 *   dual_weights),
3071 *   WeightedResidualCopyData());
3072 *  
3073 * @endcode
3074 *
3075 * Once the error contributions are computed, sum them up. For this,
3076 * note that the cell terms are already set, and that only the edge
3077 * terms need to be collected. Thus, loop over all cells and their
3078 * faces, make sure that the contributions of each of the faces are
3079 * there, and add them up. Only take minus one half of the jump term,
3080 * since the other half will be taken by the neighboring cell.
3081 *
3082 * @code
3083 *   unsigned int present_cell = 0;
3084 *   for (const auto &cell :
3085 *   DualSolver<dim>::dof_handler.active_cell_iterators())
3086 *   {
3087 *   for (const auto &face : cell->face_iterators())
3088 *   {
3089 *   Assert(face_integrals.find(face) != face_integrals.end(),
3090 *   ExcInternalError());
3091 *   error_indicators(present_cell) -= 0.5 * face_integrals[face];
3092 *   }
3093 *   ++present_cell;
3094 *   }
3095 *   std::cout << " Estimated error: "
3096 *   << std::accumulate(error_indicators.begin(),
3097 *   error_indicators.end(),
3098 *   0.)
3099 *   << std::endl;
3100 *   }
3101 *  
3102 *  
3103 * @endcode
3104 *
3105 *
3106 * <a name="step_14-Estimatingonasinglecell"></a>
3107 * <h4>Estimating on a single cell</h4>
3108 *
3109
3110 *
3111 * Next we have the function that is called to estimate the error on a
3112 * single cell. The function may be called multiple times if the library was
3113 * configured to use multithreading. Here it goes:
3114 *
3115 * @code
3116 *   template <int dim>
3117 *   void WeightedResidual<dim>::estimate_on_one_cell(
3118 *   const active_cell_iterator &cell,
3119 *   WeightedResidualScratchData &scratch_data,
3120 *   WeightedResidualCopyData &copy_data,
3121 *   Vector<float> &error_indicators,
3122 *   FaceIntegrals &face_integrals) const
3123 *   {
3124 * @endcode
3125 *
3126 * Because of WorkStream, estimate_on_one_cell requires a CopyData object
3127 * even if it is no used. The next line silences a warning about this
3128 * unused variable.
3129 *
3130 * @code
3131 *   (void)copy_data;
3132 *  
3133 * @endcode
3134 *
3135 * First task on each cell is to compute the cell residual
3136 * contributions of this cell, and put them into the
3137 * <code>error_indicators</code> variable:
3138 *
3139 * @code
3140 *   integrate_over_cell(cell,
3141 *   scratch_data.primal_solution,
3142 *   scratch_data.dual_weights,
3143 *   scratch_data.cell_data,
3144 *   error_indicators);
3145 *  
3146 * @endcode
3147 *
3148 * After computing the cell terms, turn to the face terms. For this,
3149 * loop over all faces of the present cell, and see whether
3150 * something needs to be computed on it:
3151 *
3152 * @code
3153 *   for (const auto face_no : cell->face_indices())
3154 *   {
3155 * @endcode
3156 *
3157 * First, if this face is part of the boundary, then there is
3158 * nothing to do. However, to make things easier when summing up
3159 * the contributions of the faces of cells, we enter this face
3160 * into the list of faces with a zero contribution to the error.
3161 *
3162 * @code
3163 *   if (cell->face(face_no)->at_boundary())
3164 *   {
3165 *   face_integrals[cell->face(face_no)] = 0;
3166 *   continue;
3167 *   }
3168 *  
3169 * @endcode
3170 *
3171 * Next, note that since we want to compute the jump terms on
3172 * each face only once although we access it twice (if it is not
3173 * at the boundary), we have to define some rules who is
3174 * responsible for computing on a face:
3175 *
3176
3177 *
3178 * First, if the neighboring cell is on the same level as this
3179 * one, i.e. neither further refined not coarser, then the one
3180 * with the lower index within this level does the work. In
3181 * other words: if the other one has a lower index, then skip
3182 * work on this face:
3183 *
3184 * @code
3185 *   if ((cell->neighbor(face_no)->has_children() == false) &&
3186 *   (cell->neighbor(face_no)->level() == cell->level()) &&
3187 *   (cell->neighbor(face_no)->index() < cell->index()))
3188 *   continue;
3189 *  
3190 * @endcode
3191 *
3192 * Likewise, we always work from the coarser cell if this and
3193 * its neighbor differ in refinement. Thus, if the neighboring
3194 * cell is less refined than the present one, then do nothing
3195 * since we integrate over the subfaces when we visit the coarse
3196 * cell.
3197 *
3198 * @code
3199 *   if (cell->at_boundary(face_no) == false)
3200 *   if (cell->neighbor(face_no)->level() < cell->level())
3201 *   continue;
3202 *  
3203 *  
3204 * @endcode
3205 *
3206 * Now we know that we are in charge here, so actually compute
3207 * the face jump terms. If the face is a regular one, i.e. the
3208 * other side's cell is neither coarser not finer than this
3209 * cell, then call one function, and if the cell on the other
3210 * side is further refined, then use another function. Note that
3211 * the case that the cell on the other side is coarser cannot
3212 * happen since we have decided above that we handle this case
3213 * when we pass over that other cell.
3214 *
3215 * @code
3216 *   if (cell->face(face_no)->has_children() == false)
3217 *   integrate_over_regular_face(cell,
3218 *   face_no,
3219 *   scratch_data.primal_solution,
3220 *   scratch_data.dual_weights,
3221 *   scratch_data.face_data,
3222 *   face_integrals);
3223 *   else
3224 *   integrate_over_irregular_face(cell,
3225 *   face_no,
3226 *   scratch_data.primal_solution,
3227 *   scratch_data.dual_weights,
3228 *   scratch_data.face_data,
3229 *   face_integrals);
3230 *   }
3231 *   }
3232 *  
3233 *  
3234 * @endcode
3235 *
3236 *
3237 * <a name="step_14-Computingcelltermerrorcontributions"></a>
3238 * <h4>Computing cell term error contributions</h4>
3239 *
3240
3241 *
3242 * As for the actual computation of the error contributions, first turn to
3243 * the cell terms:
3244 *
3245 * @code
3246 *   template <int dim>
3247 *   void WeightedResidual<dim>::integrate_over_cell(
3248 *   const active_cell_iterator &cell,
3249 *   const Vector<double> &primal_solution,
3250 *   const Vector<double> &dual_weights,
3251 *   CellData &cell_data,
3252 *   Vector<float> &error_indicators) const
3253 *   {
3254 * @endcode
3255 *
3256 * The tasks to be done are what appears natural from looking at the
3257 * error estimation formula: first get the right hand side and Laplacian
3258 * of the numerical solution at the quadrature points for the cell
3259 * residual,
3260 *
3261 * @code
3262 *   cell_data.fe_values.reinit(cell);
3263 *   cell_data.right_hand_side->value_list(
3264 *   cell_data.fe_values.get_quadrature_points(), cell_data.rhs_values);
3265 *   cell_data.fe_values.get_function_laplacians(primal_solution,
3266 *   cell_data.cell_laplacians);
3267 *  
3268 * @endcode
3269 *
3270 * ...then get the dual weights...
3271 *
3272 * @code
3273 *   cell_data.fe_values.get_function_values(dual_weights,
3274 *   cell_data.dual_weights);
3275 *  
3276 * @endcode
3277 *
3278 * ...and finally build the sum over all quadrature points and store it
3279 * with the present cell:
3280 *
3281 * @code
3282 *   double sum = 0;
3283 *   for (unsigned int p = 0; p < cell_data.fe_values.n_quadrature_points; ++p)
3284 *   sum += ((cell_data.rhs_values[p] + cell_data.cell_laplacians[p]) *
3285 *   cell_data.dual_weights[p] * cell_data.fe_values.JxW(p));
3286 *   error_indicators(cell->active_cell_index()) += sum;
3287 *   }
3288 *  
3289 *  
3290 * @endcode
3291 *
3292 *
3293 * <a name="step_14-Computingedgetermerrorcontributions1"></a>
3294 * <h4>Computing edge term error contributions -- 1</h4>
3295 *
3296
3297 *
3298 * On the other hand, computation of the edge terms for the error estimate
3299 * is not so simple. First, we have to distinguish between faces with and
3300 * without hanging nodes. Because it is the simple case, we first consider
3301 * the case without hanging nodes on a face (let's call this the `regular'
3302 * case):
3303 *
3304 * @code
3305 *   template <int dim>
3306 *   void WeightedResidual<dim>::integrate_over_regular_face(
3307 *   const active_cell_iterator &cell,
3308 *   const unsigned int face_no,
3309 *   const Vector<double> &primal_solution,
3310 *   const Vector<double> &dual_weights,
3311 *   FaceData &face_data,
3312 *   FaceIntegrals &face_integrals) const
3313 *   {
3314 *   const unsigned int n_q_points =
3315 *   face_data.fe_face_values_cell.n_quadrature_points;
3316 *  
3317 * @endcode
3318 *
3319 * The first step is to get the values of the gradients at the
3320 * quadrature points of the finite element field on the present
3321 * cell. For this, initialize the <code>FEFaceValues</code> object
3322 * corresponding to this side of the face, and extract the gradients
3323 * using that object.
3324 *
3325 * @code
3326 *   face_data.fe_face_values_cell.reinit(cell, face_no);
3327 *   face_data.fe_face_values_cell.get_function_gradients(
3328 *   primal_solution, face_data.cell_grads);
3329 *  
3330 * @endcode
3331 *
3332 * The second step is then to extract the gradients of the finite
3333 * element solution at the quadrature points on the other side of the
3334 * face, i.e. from the neighboring cell.
3335 *
3336
3337 *
3338 * For this, do a sanity check before: make sure that the neighbor
3339 * actually exists (yes, we should not have come here if the neighbor
3340 * did not exist, but in complicated software there are bugs, so better
3341 * check this), and if this is not the case throw an error.
3342 *
3343 * @code
3344 *   Assert(cell->neighbor(face_no).state() == IteratorState::valid,
3345 *   ExcInternalError());
3346 * @endcode
3347 *
3348 * If we have that, then we need to find out with which face of the
3349 * neighboring cell we have to work, i.e. the <code>how-many'th</code> the
3350 * neighbor the present cell is of the cell behind the present face. For
3351 * this, there is a function, and we put the result into a variable with
3352 * the name <code>neighbor_neighbor</code>:
3353 *
3354 * @code
3355 *   const unsigned int neighbor_neighbor =
3356 *   cell->neighbor_of_neighbor(face_no);
3357 * @endcode
3358 *
3359 * Then define an abbreviation for the neighbor cell, initialize the
3360 * <code>FEFaceValues</code> object on that cell, and extract the
3361 * gradients on that cell:
3362 *
3363 * @code
3364 *   const active_cell_iterator neighbor = cell->neighbor(face_no);
3365 *   face_data.fe_face_values_neighbor.reinit(neighbor, neighbor_neighbor);
3366 *   face_data.fe_face_values_neighbor.get_function_gradients(
3367 *   primal_solution, face_data.neighbor_grads);
3368 *  
3369 * @endcode
3370 *
3371 * Now that we have the gradients on this and the neighboring cell,
3372 * compute the jump residual by multiplying the jump in the gradient
3373 * with the normal vector:
3374 *
3375 * @code
3376 *   for (unsigned int p = 0; p < n_q_points; ++p)
3377 *   face_data.jump_residual[p] =
3378 *   ((face_data.cell_grads[p] - face_data.neighbor_grads[p]) *
3379 *   face_data.fe_face_values_cell.normal_vector(p));
3380 *  
3381 * @endcode
3382 *
3383 * Next get the dual weights for this face:
3384 *
3385 * @code
3386 *   face_data.fe_face_values_cell.get_function_values(dual_weights,
3387 *   face_data.dual_weights);
3388 *  
3389 * @endcode
3390 *
3391 * Finally, we have to compute the sum over jump residuals, dual
3392 * weights, and quadrature weights, to get the result for this face:
3393 *
3394 * @code
3395 *   double face_integral = 0;
3396 *   for (unsigned int p = 0; p < n_q_points; ++p)
3397 *   face_integral +=
3398 *   (face_data.jump_residual[p] * face_data.dual_weights[p] *
3399 *   face_data.fe_face_values_cell.JxW(p));
3400 *  
3401 * @endcode
3402 *
3403 * Double check that the element already exists and that it was not
3404 * already written to...
3405 *
3406 * @code
3407 *   Assert(face_integrals.find(cell->face(face_no)) != face_integrals.end(),
3408 *   ExcInternalError());
3409 *   Assert(face_integrals[cell->face(face_no)] == -1e20, ExcInternalError());
3410 *  
3411 * @endcode
3412 *
3413 * ...then store computed value at assigned location. Note that the
3414 * stored value does not contain the factor 1/2 that appears in the
3415 * error representation. The reason is that the term actually does not
3416 * have this factor if we loop over all faces in the triangulation, but
3417 * only appears if we write it as a sum over all cells and all faces of
3418 * each cell; we thus visit the same face twice. We take account of this
3419 * by using this factor -1/2 later, when we sum up the contributions for
3420 * each cell individually.
3421 *
3422 * @code
3423 *   face_integrals[cell->face(face_no)] = face_integral;
3424 *   }
3425 *  
3426 *  
3427 * @endcode
3428 *
3429 *
3430 * <a name="step_14-Computingedgetermerrorcontributions2"></a>
3431 * <h4>Computing edge term error contributions -- 2</h4>
3432 *
3433
3434 *
3435 * We are still missing the case of faces with hanging nodes. This is what
3436 * is covered in this function:
3437 *
3438 * @code
3439 *   template <int dim>
3440 *   void WeightedResidual<dim>::integrate_over_irregular_face(
3441 *   const active_cell_iterator &cell,
3442 *   const unsigned int face_no,
3443 *   const Vector<double> &primal_solution,
3444 *   const Vector<double> &dual_weights,
3445 *   FaceData &face_data,
3446 *   FaceIntegrals &face_integrals) const
3447 *   {
3448 * @endcode
3449 *
3450 * First again two abbreviations, and some consistency checks whether
3451 * the function is called only on faces for which it is supposed to be
3452 * called:
3453 *
3454 * @code
3455 *   const unsigned int n_q_points =
3456 *   face_data.fe_face_values_cell.n_quadrature_points;
3457 *  
3458 *   const typename DoFHandler<dim>::face_iterator face = cell->face(face_no);
3459 *   const typename DoFHandler<dim>::cell_iterator neighbor =
3460 *   cell->neighbor(face_no);
3461 *   Assert(neighbor.state() == IteratorState::valid, ExcInternalError());
3462 *   Assert(neighbor->has_children(), ExcInternalError());
3463 *   (void)neighbor;
3464 *  
3465 * @endcode
3466 *
3467 * Then find out which neighbor the present cell is of the adjacent
3468 * cell. Note that we will operate on the children of this adjacent
3469 * cell, but that their orientation is the same as that of their mother,
3470 * i.e. the neighbor direction is the same.
3471 *
3472 * @code
3473 *   const unsigned int neighbor_neighbor =
3474 *   cell->neighbor_of_neighbor(face_no);
3475 *  
3476 * @endcode
3477 *
3478 * Then simply do everything we did in the previous function for one
3479 * face for all the sub-faces now:
3480 *
3481 * @code
3482 *   for (unsigned int subface_no = 0; subface_no < face->n_children();
3483 *   ++subface_no)
3484 *   {
3485 * @endcode
3486 *
3487 * Start with some checks again: get an iterator pointing to the
3488 * cell behind the present subface and check whether its face is a
3489 * subface of the one we are considering. If that were not the case,
3490 * then there would be either a bug in the
3491 * <code>neighbor_neighbor</code> function called above, or -- worse
3492 * -- some function in the library did not keep to some underlying
3493 * assumptions about cells, their children, and their faces. In any
3494 * case, even though this assertion should not be triggered, it does
3495 * not harm to be cautious, and in optimized mode computations the
3496 * assertion will be removed anyway.
3497 *
3498 * @code
3499 *   const active_cell_iterator neighbor_child =
3500 *   cell->neighbor_child_on_subface(face_no, subface_no);
3501 *   Assert(neighbor_child->face(neighbor_neighbor) ==
3502 *   cell->face(face_no)->child(subface_no),
3503 *   ExcInternalError());
3504 *  
3505 * @endcode
3506 *
3507 * Now start the work by again getting the gradient of the solution
3508 * first at this side of the interface,
3509 *
3510 * @code
3511 *   face_data.fe_subface_values_cell.reinit(cell, face_no, subface_no);
3512 *   face_data.fe_subface_values_cell.get_function_gradients(
3513 *   primal_solution, face_data.cell_grads);
3514 * @endcode
3515 *
3516 * then at the other side,
3517 *
3518 * @code
3519 *   face_data.fe_face_values_neighbor.reinit(neighbor_child,
3520 *   neighbor_neighbor);
3521 *   face_data.fe_face_values_neighbor.get_function_gradients(
3522 *   primal_solution, face_data.neighbor_grads);
3523 *  
3524 * @endcode
3525 *
3526 * and finally building the jump residuals. Since we take the normal
3527 * vector from the other cell this time, revert the sign of the
3528 * first term compared to the other function:
3529 *
3530 * @code
3531 *   for (unsigned int p = 0; p < n_q_points; ++p)
3532 *   face_data.jump_residual[p] =
3533 *   ((face_data.neighbor_grads[p] - face_data.cell_grads[p]) *
3534 *   face_data.fe_face_values_neighbor.normal_vector(p));
3535 *  
3536 * @endcode
3537 *
3538 * Then get dual weights:
3539 *
3540 * @code
3541 *   face_data.fe_face_values_neighbor.get_function_values(
3542 *   dual_weights, face_data.dual_weights);
3543 *  
3544 * @endcode
3545 *
3546 * At last, sum up the contribution of this sub-face, and set it in
3547 * the global map:
3548 *
3549 * @code
3550 *   double face_integral = 0;
3551 *   for (unsigned int p = 0; p < n_q_points; ++p)
3552 *   face_integral +=
3553 *   (face_data.jump_residual[p] * face_data.dual_weights[p] *
3554 *   face_data.fe_face_values_neighbor.JxW(p));
3555 *   face_integrals[neighbor_child->face(neighbor_neighbor)] =
3556 *   face_integral;
3557 *   }
3558 *  
3559 * @endcode
3560 *
3561 * Once the contributions of all sub-faces are computed, loop over all
3562 * sub-faces to collect and store them with the mother face for simple
3563 * use when later collecting the error terms of cells. Again make safety
3564 * checks that the entries for the sub-faces have been computed and do
3565 * not carry an invalid value.
3566 *
3567 * @code
3568 *   double sum = 0;
3569 *   for (unsigned int subface_no = 0; subface_no < face->n_children();
3570 *   ++subface_no)
3571 *   {
3572 *   Assert(face_integrals.find(face->child(subface_no)) !=
3573 *   face_integrals.end(),
3574 *   ExcInternalError());
3575 *   Assert(face_integrals[face->child(subface_no)] != -1e20,
3576 *   ExcInternalError());
3577 *  
3578 *   sum += face_integrals[face->child(subface_no)];
3579 *   }
3580 * @endcode
3581 *
3582 * Finally store the value with the parent face.
3583 *
3584 * @code
3585 *   face_integrals[face] = sum;
3586 *   }
3587 *  
3588 *   } // namespace LaplaceSolver
3589 *  
3590 *  
3591 * @endcode
3592 *
3593 *
3594 * <a name="step_14-Asimulationframework"></a>
3595 * <h3>A simulation framework</h3>
3596 *
3597
3598 *
3599 * In the previous example program, we have had two functions that were used
3600 * to drive the process of solving on subsequently finer grids. We extend
3601 * this here to allow for a number of parameters to be passed to these
3602 * functions, and put all of that into framework class.
3603 *
3604
3605 *
3606 * You will have noted that this program is built up of a number of small
3607 * parts (evaluation functions, solver classes implementing various
3608 * refinement methods, different dual functionals, different problem and
3609 * data descriptions), which makes the program relatively simple to extend,
3610 * but also allows to solve a large number of different problems by
3611 * replacing one part by another. We reflect this flexibility by declaring a
3612 * structure in the following framework class that holds a number of
3613 * parameters that may be set to test various combinations of the parts of
3614 * this program, and which can be used to test it at various problems and
3615 * discretizations in a simple way.
3616 *
3617 * @code
3618 *   template <int dim>
3619 *   struct Framework
3620 *   {
3621 *   public:
3622 * @endcode
3623 *
3624 * First, we declare two abbreviations for simple use of the respective
3625 * data types:
3626 *
3627 * @code
3628 *   using Evaluator = Evaluation::EvaluationBase<dim>;
3629 *   using EvaluatorList = std::list<Evaluator *>;
3630 *  
3631 *  
3632 * @endcode
3633 *
3634 * Then we have the structure which declares all the parameters that may
3635 * be set. In the default constructor of the structure, these values are
3636 * all set to default values, for simple use.
3637 *
3638 * @code
3639 *   struct ProblemDescription
3640 *   {
3641 * @endcode
3642 *
3643 * First allow for the degrees of the piecewise polynomials by which the
3644 * primal and dual problems will be discretized. They default to (bi-,
3645 * tri-)linear ansatz functions for the primal, and (bi-, tri-)quadratic
3646 * ones for the dual problem. If a refinement criterion is chosen that
3647 * does not need the solution of a dual problem, the value of the dual
3648 * finite element degree is of course ignored.
3649 *
3650 * @code
3651 *   unsigned int primal_fe_degree;
3652 *   unsigned int dual_fe_degree;
3653 *  
3654 * @endcode
3655 *
3656 * Then have an object that describes the problem type, i.e. right hand
3657 * side, domain, boundary values, etc. The pointer needed here defaults
3658 * to the Null pointer, i.e. you will have to set it in actual instances
3659 * of this object to make it useful.
3660 *
3661 * @code
3662 *   std::unique_ptr<const Data::SetUpBase<dim>> data;
3663 *  
3664 * @endcode
3665 *
3666 * Since we allow to use different refinement criteria (global
3667 * refinement, refinement by the Kelly error indicator, possibly with a
3668 * weight, and using the dual estimator), define a number of enumeration
3669 * values, and subsequently a variable of that type. It will default to
3670 * <code>dual_weighted_error_estimator</code>.
3671 *
3672 * @code
3673 *   enum RefinementCriterion
3674 *   {
3675 *   dual_weighted_error_estimator,
3676 *   global_refinement,
3677 *   kelly_indicator,
3678 *   weighted_kelly_indicator
3679 *   };
3680 *  
3681 *   RefinementCriterion refinement_criterion;
3682 *  
3683 * @endcode
3684 *
3685 * Next, an object that describes the dual functional. It is only needed
3686 * if the dual weighted residual refinement is chosen, and also defaults
3687 * to a Null pointer.
3688 *
3689 * @code
3690 *   std::unique_ptr<const DualFunctional::DualFunctionalBase<dim>>
3691 *   dual_functional;
3692 *  
3693 * @endcode
3694 *
3695 * Then a list of evaluation objects. Its default value is empty,
3696 * i.e. no evaluation objects.
3697 *
3698 * @code
3699 *   EvaluatorList evaluator_list;
3700 *  
3701 * @endcode
3702 *
3703 * Next to last, a function that is used as a weight to the
3704 * <code>RefinementWeightedKelly</code> class. The default value of this
3705 * pointer is zero, but you have to set it to some other value if you
3706 * want to use the <code>weighted_kelly_indicator</code> refinement
3707 * criterion.
3708 *
3709 * @code
3710 *   std::unique_ptr<const Function<dim>> kelly_weight;
3711 *  
3712 * @endcode
3713 *
3714 * Finally, we have a variable that denotes the maximum number of
3715 * degrees of freedom we allow for the (primal) discretization. If it is
3716 * exceeded, we stop the process of solving and intermittent mesh
3717 * refinement. Its default value is 20,000.
3718 *
3719 * @code
3720 *   unsigned int max_degrees_of_freedom;
3721 *  
3722 * @endcode
3723 *
3724 * Finally the default constructor of this class:
3725 *
3726 * @code
3727 *   ProblemDescription();
3728 *   };
3729 *  
3730 * @endcode
3731 *
3732 * The driver framework class only has one method which calls solver and
3733 * mesh refinement intermittently, and does some other small tasks in
3734 * between. Since it does not need data besides the parameters given to
3735 * it, we make it static:
3736 *
3737 * @code
3738 *   static void run(const ProblemDescription &descriptor);
3739 *   };
3740 *  
3741 *  
3742 * @endcode
3743 *
3744 * As for the implementation, first the constructor of the parameter object,
3745 * setting all values to their defaults:
3746 *
3747 * @code
3748 *   template <int dim>
3749 *   Framework<dim>::ProblemDescription::ProblemDescription()
3750 *   : primal_fe_degree(1)
3751 *   , dual_fe_degree(2)
3752 *   , refinement_criterion(dual_weighted_error_estimator)
3753 *   , max_degrees_of_freedom(20000)
3754 *   {}
3755 *  
3756 *  
3757 *  
3758 * @endcode
3759 *
3760 * Then the function which drives the whole process:
3761 *
3762 * @code
3763 *   template <int dim>
3764 *   void Framework<dim>::run(const ProblemDescription &descriptor)
3765 *   {
3766 * @endcode
3767 *
3768 * First create a triangulation from the given data object,
3769 *
3770 * @code
3773 *   descriptor.data->create_coarse_grid(triangulation);
3774 *  
3775 * @endcode
3776 *
3777 * then a set of finite elements and appropriate quadrature formula:
3778 *
3779 * @code
3780 *   const FE_Q<dim> primal_fe(descriptor.primal_fe_degree);
3781 *   const FE_Q<dim> dual_fe(descriptor.dual_fe_degree);
3782 *   const QGauss<dim> quadrature(descriptor.dual_fe_degree + 1);
3783 *   const QGauss<dim - 1> face_quadrature(descriptor.dual_fe_degree + 1);
3784 *  
3785 * @endcode
3786 *
3787 * Next, select one of the classes implementing different refinement
3788 * criteria.
3789 *
3790 * @code
3791 *   std::unique_ptr<LaplaceSolver::Base<dim>> solver;
3792 *   switch (descriptor.refinement_criterion)
3793 *   {
3794 *   case ProblemDescription::dual_weighted_error_estimator:
3795 *   {
3796 *   solver = std::make_unique<LaplaceSolver::WeightedResidual<dim>>(
3797 *   triangulation,
3798 *   primal_fe,
3799 *   dual_fe,
3800 *   quadrature,
3801 *   face_quadrature,
3802 *   descriptor.data->get_right_hand_side(),
3803 *   descriptor.data->get_boundary_values(),
3804 *   *descriptor.dual_functional);
3805 *   break;
3806 *   }
3807 *  
3808 *   case ProblemDescription::global_refinement:
3809 *   {
3810 *   solver = std::make_unique<LaplaceSolver::RefinementGlobal<dim>>(
3811 *   triangulation,
3812 *   primal_fe,
3813 *   quadrature,
3814 *   face_quadrature,
3815 *   descriptor.data->get_right_hand_side(),
3816 *   descriptor.data->get_boundary_values());
3817 *   break;
3818 *   }
3819 *  
3820 *   case ProblemDescription::kelly_indicator:
3821 *   {
3822 *   solver = std::make_unique<LaplaceSolver::RefinementKelly<dim>>(
3823 *   triangulation,
3824 *   primal_fe,
3825 *   quadrature,
3826 *   face_quadrature,
3827 *   descriptor.data->get_right_hand_side(),
3828 *   descriptor.data->get_boundary_values());
3829 *   break;
3830 *   }
3831 *  
3832 *   case ProblemDescription::weighted_kelly_indicator:
3833 *   {
3834 *   solver =
3835 *   std::make_unique<LaplaceSolver::RefinementWeightedKelly<dim>>(
3836 *   triangulation,
3837 *   primal_fe,
3838 *   quadrature,
3839 *   face_quadrature,
3840 *   descriptor.data->get_right_hand_side(),
3841 *   descriptor.data->get_boundary_values(),
3842 *   *descriptor.kelly_weight);
3843 *   break;
3844 *   }
3845 *  
3846 *   default:
3847 *   AssertThrow(false, ExcInternalError());
3848 *   }
3849 *  
3850 * @endcode
3851 *
3852 * Now that all objects are in place, run the main loop. The stopping
3853 * criterion is implemented at the bottom of the loop.
3854 *
3855
3856 *
3857 * In the loop, first set the new cycle number, then solve the problem,
3858 * output its solution(s), apply the evaluation objects to it, then decide
3859 * whether we want to refine the mesh further and solve again on this
3860 * mesh, or jump out of the loop.
3861 *
3862 * @code
3863 *   for (unsigned int step = 0; true; ++step)
3864 *   {
3865 *   std::cout << "Refinement cycle: " << step << std::endl;
3866 *  
3867 *   solver->set_refinement_cycle(step);
3868 *   solver->solve_problem();
3869 *   solver->output_solution();
3870 *  
3871 *   std::cout << " Number of degrees of freedom: " << solver->n_dofs()
3872 *   << std::endl;
3873 *  
3874 *   for (const auto &evaluator : descriptor.evaluator_list)
3875 *   {
3876 *   evaluator->set_refinement_cycle(step);
3877 *   solver->postprocess(*evaluator);
3878 *   }
3879 *  
3880 *  
3881 *   if (solver->n_dofs() < descriptor.max_degrees_of_freedom)
3882 *   solver->refine_grid();
3883 *   else
3884 *   break;
3885 *   }
3886 *  
3887 * @endcode
3888 *
3889 * Clean up the screen after the loop has run:
3890 *
3891 * @code
3892 *   std::cout << std::endl;
3893 *   }
3894 *  
3895 *   } // namespace Step14
3896 *  
3897 *  
3898 *  
3899 * @endcode
3900 *
3901 *
3902 * <a name="step_14-Themainfunction"></a>
3903 * <h3>The main function</h3>
3904 *
3905
3906 *
3907 * Here finally comes the main function. It drives the whole process by
3908 * specifying a set of parameters to be used for the simulation (polynomial
3909 * degrees, evaluation and dual functionals, etc), and passes them packed into
3910 * a structure to the frame work class above.
3911 *
3912 * @code
3913 *   int main()
3914 *   {
3915 *   try
3916 *   {
3917 *   using namespace Step14;
3918 *  
3919 * @endcode
3920 *
3921 * Describe the problem we want to solve here by passing a descriptor
3922 * object to the function doing the rest of the work:
3923 *
3924 * @code
3925 *   const unsigned int dim = 2;
3926 *   Framework<dim>::ProblemDescription descriptor;
3927 *  
3928 * @endcode
3929 *
3930 * First set the refinement criterion we wish to use:
3931 *
3932 * @code
3933 *   descriptor.refinement_criterion =
3934 *   Framework<dim>::ProblemDescription::dual_weighted_error_estimator;
3935 * @endcode
3936 *
3937 * Here, we could as well have used <code>global_refinement</code> or
3938 * <code>weighted_kelly_indicator</code>. Note that the information
3939 * given about dual finite elements, dual functional, etc is only
3940 * important for the given choice of refinement criterion, and is
3941 * ignored otherwise.
3942 *
3943
3944 *
3945 * Then set the polynomial degrees of primal and dual problem. We choose
3946 * here bi-linear and bi-quadratic ones:
3947 *
3948 * @code
3949 *   descriptor.primal_fe_degree = 1;
3950 *   descriptor.dual_fe_degree = 2;
3951 *  
3952 * @endcode
3953 *
3954 * Then set the description of the test case, i.e. domain, boundary
3955 * values, and right hand side. These are prepackaged in classes. We
3956 * take here the description of <code>Exercise_2_3</code>, but you can
3957 * also use <code>CurvedRidges@<dim@></code>:
3958 *
3959 * @code
3960 *   descriptor.data =
3961 *   std::make_unique<Data::SetUp<Data::Exercise_2_3<dim>, dim>>();
3962 *  
3963 * @endcode
3964 *
3965 * Next set first a dual functional, then a list of evaluation
3966 * objects. We choose as default the evaluation of the value at an
3967 * evaluation point, represented by the classes
3968 * <code>PointValueEvaluation</code> in the namespaces of evaluation and
3969 * dual functional classes. You can also set the
3970 * <code>PointXDerivativeEvaluation</code> classes for the x-derivative
3971 * instead of the value at the evaluation point.
3972 *
3973
3974 *
3975 * Note that dual functional and evaluation objects should
3976 * match. However, you can give as many evaluation functionals as you
3977 * want, so you can have both point value and derivative evaluated after
3978 * each step. One such additional evaluation is to output the grid in
3979 * each step.
3980 *
3981 * @code
3982 *   const Point<dim> evaluation_point(0.75, 0.75);
3983 *   descriptor.dual_functional =
3984 *   std::make_unique<DualFunctional::PointValueEvaluation<dim>>(
3985 *   evaluation_point);
3986 *  
3987 *   Evaluation::PointValueEvaluation<dim> postprocessor1(evaluation_point);
3988 *   Evaluation::GridOutput<dim> postprocessor2("grid");
3989 *  
3990 *   descriptor.evaluator_list.push_back(&postprocessor1);
3991 *   descriptor.evaluator_list.push_back(&postprocessor2);
3992 *  
3993 * @endcode
3994 *
3995 * Set the maximal number of degrees of freedom after which we want the
3996 * program to stop refining the mesh further:
3997 *
3998 * @code
3999 *   descriptor.max_degrees_of_freedom = 20000;
4000 *  
4001 * @endcode
4002 *
4003 * Finally pass the descriptor object to a function that runs the entire
4004 * solution with it:
4005 *
4006 * @code
4007 *   Framework<dim>::run(descriptor);
4008 *   }
4009 *  
4010 * @endcode
4011 *
4012 * Catch exceptions to give information about things that failed:
4013 *
4014 * @code
4015 *   catch (std::exception &exc)
4016 *   {
4017 *   std::cerr << std::endl
4018 *   << std::endl
4019 *   << "----------------------------------------------------"
4020 *   << std::endl;
4021 *   std::cerr << "Exception on processing: " << std::endl
4022 *   << exc.what() << std::endl
4023 *   << "Aborting!" << std::endl
4024 *   << "----------------------------------------------------"
4025 *   << std::endl;
4026 *   return 1;
4027 *   }
4028 *   catch (...)
4029 *   {
4030 *   std::cerr << std::endl
4031 *   << std::endl
4032 *   << "----------------------------------------------------"
4033 *   << std::endl;
4034 *   std::cerr << "Unknown exception!" << std::endl
4035 *   << "Aborting!" << std::endl
4036 *   << "----------------------------------------------------"
4037 *   << std::endl;
4038 *   return 1;
4039 *   }
4040 *  
4041 *   return 0;
4042 *   }
4043 * @endcode
4044<a name="step_14-Results"></a><h1>Results</h1>
4045
4046
4047<a name="step_14-Pointvalues"></a><h3>Point values</h3>
4048
4049
4050
4051This program offers a lot of possibilities to play around. We can thus
4052only show a small part of all possible results that can be obtained
4053with the help of this program. However, you are encouraged to just try
4054it out, by changing the settings in the main program. Here, we start
4055by simply letting it run, unmodified:
4056@code
4057Refinement cycle: 0
4058 Number of degrees of freedom: 72
4059 Point value: 0.03243
4060 Estimated error: 0.000702385
4061Refinement cycle: 1
4062 Number of degrees of freedom: 67
4063 Point value: 0.0324827
4064 Estimated error: 0.000888953
4065Refinement cycle: 2
4066 Number of degrees of freedom: 130
4067 Point value: 0.0329619
4068 Estimated error: 0.000454606
4069Refinement cycle: 3
4070 Number of degrees of freedom: 307
4071 Point value: 0.0331934
4072 Estimated error: 0.000241254
4073Refinement cycle: 4
4074 Number of degrees of freedom: 718
4075 Point value: 0.0333675
4076 Estimated error: 7.4912e-05
4077Refinement cycle: 5
4078 Number of degrees of freedom: 1665
4079 Point value: 0.0334083
4080 Estimated error: 3.69111e-05
4081Refinement cycle: 6
4082 Number of degrees of freedom: 3975
4083 Point value: 0.033431
4084 Estimated error: 1.54218e-05
4085Refinement cycle: 7
4086 Number of degrees of freedom: 8934
4087 Point value: 0.0334406
4088 Estimated error: 6.28359e-06
4089Refinement cycle: 8
4090 Number of degrees of freedom: 21799
4091 Point value: 0.0334444
4092@endcode
4093
4094
4095First let's look what the program actually computed. On the seventh
4096grid, primal and dual numerical solutions look like this (using a
4097color scheme intended to evoke the snow-capped mountains of
4098Colorado that the original author of this program now calls
4099home):
4100<table align="center">
4101 <tr>
4102 <td width="50%">
4103 <img src="https://www.dealii.org/images/steps/developer/step-14.point-value.solution-7.9.2.png" alt="">
4104 </td>
4105 <td width="50%">
4106 <img src="https://www.dealii.org/images/steps/developer/step-14.point-value.solution-7-dual.9.2.png" alt="">
4107 </td>
4108 </tr>
4109</table>
4110Apparently, the region at the bottom left is so unimportant for the
4111point value evaluation at the top right that the grid is left entirely
4112unrefined there, even though the solution has singularities at the inner
4113corner of that cell! Due
4114to the symmetry in right hand side and domain, the solution should
4115actually look like at the top right in all four corners, but the mesh
4116refinement criterion involving the dual solution chose to refine them
4117differently -- because we said that we really only care about a single
4118function value somewhere at the top right.
4119
4120
4121
4122Here are some of the meshes that are produced in refinement cycles 0,
41232, 4 (top row), and 5, 7, and 8 (bottom row):
4124
4125<table width="80%" align="center">
4126 <tr>
4127 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-0.9.2.png" alt="" width="100%"></td>
4128 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-2.9.2.png" alt="" width="100%"></td>
4129 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-4.9.2.png" alt="" width="100%"></td>
4130 </tr>
4131 <tr>
4132 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-5.9.2.png" alt="" width="100%"></td>
4133 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-7.9.2.png" alt="" width="100%"></td>
4134 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.grid-8.9.2.png" alt="" width="100%"></td>
4135 </tr>
4136</table>
4137
4138Note the subtle interplay between resolving the corner singularities,
4139and resolving around the point of evaluation. It will be rather
4140difficult to generate such a mesh by hand, as this would involve to
4141judge quantitatively how much which of the four corner singularities
4142should be resolved, and to set the weight compared to the vicinity of
4143the evaluation point.
4144
4145
4146
4147The program prints the point value and the estimated error in this
4148quantity. From extrapolating it, we can guess that the exact value is
4149somewhere close to 0.0334473, plus or minus 0.0000001 (note that we get
4150almost 6 valid digits from only 22,000 (primal) degrees of
4151freedom. This number cannot be obtained from the value of the
4152functional alone, but I have used the assumption that the error
4153estimator is mostly exact, and extrapolated the computed value plus
4154the estimated error, to get an approximation of the true
4155value. Computing with more degrees of freedom shows that this
4156assumption is indeed valid.
4157
4158
4159
4160From the computed results, we can generate two graphs: one that shows
4161the convergence of the error @f$J(u)-J(u_h)@f$ (taking the
4162extrapolated value as correct) in the point value, and the value that
4163we get by adding up computed value @f$J(u_h)@f$ and estimated
4164error eta (if the error estimator @f$eta@f$ were exact, then the value
4165@f$J(u_h)+\eta@f$ would equal the exact point value, and the error
4166in this quantity would always be zero; however, since the error
4167estimator is only a - good - approximation to the true error, we can
4168by this only reduce the size of the error). In this graph, we also
4169indicate the complexity @f${\cal O}(1/N)@f$ to show that mesh refinement
4170acts optimal in this case. The second chart compares
4171true and estimated error, and shows that the two are actually very
4172close to each other, even for such a complicated quantity as the point
4173value:
4174
4175
4176<table width="80%" align="center">
4177 <tr>
4178 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error.png" alt="" width="100%"></td>
4179 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error-estimation.png" alt="" width="100%"></td>
4180 </tr>
4181</table>
4182
4183
4184<a name="step_14-Comparingrefinementcriteria"></a><h3>Comparing refinement criteria</h3>
4185
4186
4187
4188Since we have accepted quite some effort when using the mesh
4189refinement driven by the dual weighted error estimator (for solving
4190the dual problem, and for evaluating the error representation), it is
4191worth while asking whether that effort was successful. To this end, we
4192first compare the achieved error levels for different mesh refinement
4193criteria. To generate this data, simply change the value of the mesh
4194refinement criterion variable in the main program. The results are
4195thus (for the weight in the Kelly indicator, we have chosen the
4196function @f$1/(r^2+0.1^2)@f$, where @f$r@f$
4197is the distance to the evaluation point; it can be shown that this is
4198the optimal weight if we neglect the effects of boundaries):
4199
4200<img src="https://www.dealii.org/images/steps/developer/step-14.point-value.error-comparison.png" alt="">
4201
4202
4203
4204Checking these numbers, we see that for global refinement, the error
4205is proportional to @f$O(1/(sqrt(N) log(N)))@f$, and for the dual
4206estimator @f$O(1/N)@f$. Generally speaking, we see that the dual
4207weighted error estimator is better than the other refinement
4208indicators, at least when compared with those that have a similarly
4209regular behavior. The Kelly indicator produces smaller errors, but
4210jumps about the picture rather irregularly, with the error also
4211changing signs sometimes. Therefore, its behavior does not allow to
4212extrapolate the results to larger values of N. Furthermore, if we
4213trust the error estimates of the dual weighted error estimator, the
4214results can be improved by adding the estimated error to the computed
4215values. In terms of reliability, the weighted estimator is thus better
4216than the Kelly indicator, although the latter sometimes produces
4217smaller errors.
4218
4219
4220
4221<a name="step_14-Evaluationofpointstresses"></a><h3>Evaluation of point stresses</h3>
4222
4223
4224
4225Besides evaluating the values of the solution at a certain point, the
4226program also offers the possibility to evaluate the x-derivatives at a
4227certain point, and also to tailor mesh refinement for this. To let the
4228program compute these quantities, simply replace the two occurrences of
4229<code>PointValueEvaluation</code> in the main function by
4230<code>PointXDerivativeEvaluation</code>, and let the program run:
4231@code
4232Refinement cycle: 0
4233 Number of degrees of freedom: 72
4234 Point x-derivative: -0.0719397
4235 Estimated error: -0.0126173
4236Refinement cycle: 1
4237 Number of degrees of freedom: 61
4238 Point x-derivative: -0.0707956
4239 Estimated error: -0.00774316
4240Refinement cycle: 2
4241 Number of degrees of freedom: 131
4242 Point x-derivative: -0.0568671
4243 Estimated error: -0.00313426
4244Refinement cycle: 3
4245 Number of degrees of freedom: 247
4246 Point x-derivative: -0.053033
4247 Estimated error: -0.00136114
4248Refinement cycle: 4
4249 Number of degrees of freedom: 532
4250 Point x-derivative: -0.0526429
4251 Estimated error: -0.000558868
4252Refinement cycle: 5
4253 Number of degrees of freedom: 1267
4254 Point x-derivative: -0.0526955
4255 Estimated error: -0.000220116
4256Refinement cycle: 6
4257 Number of degrees of freedom: 2864
4258 Point x-derivative: -0.0527495
4259 Estimated error: -9.46731e-05
4260Refinement cycle: 7
4261 Number of degrees of freedom: 6409
4262 Point x-derivative: -0.052785
4263 Estimated error: -4.21543e-05
4264Refinement cycle: 8
4265 Number of degrees of freedom: 14183
4266 Point x-derivative: -0.0528028
4267 Estimated error: -2.04241e-05
4268Refinement cycle: 9
4269 Number of degrees of freedom: 29902
4270 Point x-derivative: -0.052814
4271@endcode
4272
4273
4274
4275The solution looks roughly the same as before (the exact solution of
4276course <em>is</em> the same, only the grid changed a little), but the
4277dual solution is now different. A close-up around the point of
4278evaluation shows this:
4279<table align="center">
4280 <tr>
4281 <td width="50%">
4282 <img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.solution-7-dual.png" alt="">
4283 </td>
4284 <td width="50%">
4285 <img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.solution-7-dual-close-up.png" alt="">
4286 </td>
4287</table>
4288This time, the grids in refinement cycles 0, 5, 6, 7, 8, and 9 look
4289like this:
4290
4291<table align="center" width="80%">
4292 <tr>
4293 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-0.9.2.png" alt="" width="100%"></td>
4294 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-5.9.2.png" alt="" width="100%"></td>
4295 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-6.9.2.png" alt="" width="100%"></td>
4296 </tr>
4297 <tr>
4298 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-7.9.2.png" alt="" width="100%"></td>
4299 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-8.9.2.png" alt="" width="100%"></td>
4300 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.grid-9.9.2.png" alt="" width="100%"></td>
4301 </tr>
4302</table>
4303
4304Note the asymmetry of the grids compared with those we obtained for
4305the point evaluation. This is due to the fact that the domain and the primal
4306solution may be symmetric about the diagonal, but the @f$x@f$-derivative is
4307not, and the latter enters the refinement criterion.
4308
4309
4310
4311Then, it is interesting to compare actually computed values of the
4312quantity of interest (i.e. the x-derivative of the solution at one
4313point) with a reference value of -0.0528223... plus or minus
43140.0000005. We get this reference value by computing on finer grid after
4315some more mesh refinements, with approximately 130,000 cells.
4316Recall that if the error is @f$O(1/N)@f$ in the optimal case, then
4317taking a mesh with ten times more cells gives us one additional digit
4318in the result.
4319
4320
4321
4322In the left part of the following chart, you again see the convergence
4323of the error towards this extrapolated value, while on the right you
4324see a comparison of true and estimated error:
4325
4326<table width="80%" align="center">
4327 <tr>
4328 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.error.png" alt="" width="100%"></td>
4329 <td><img src="https://www.dealii.org/images/steps/developer/step-14.point-derivative.error-estimation.png" alt="" width="100%"></td>
4330 </tr>
4331</table>
4332
4333After an initial phase where the true error changes its sign, the
4334estimated error matches it quite well, again. Also note the dramatic
4335improvement in the error when using the estimated error to correct the
4336computed value of @f$J(u_h)@f$.
4337
4338
4339
4340<a name="step_14-step13revisited"></a><h3>step-13 revisited</h3>
4341
4342
4343
4344If instead of the <code>Exercise_2_3</code> data set, we choose
4345<code>CurvedRidges</code> in the main function, and choose @f$(0.5,0.5)@f$
4346as the evaluation point, then we can redo the
4347computations of the previous example program, to compare whether the
4348results obtained with the help of the dual weighted error estimator
4349are better than those we had previously.
4350
4351
4352
4353First, the meshes after 9 adaptive refinement cycles obtained with
4354the point evaluation and derivative evaluation refinement
4355criteria, respectively, look like this:
4356
4357<table width="80%" align="center">
4358 <tr>
4359 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.point-value.png" alt="" width="100%"></td>
4360 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.point-derivative.png" alt="" width="100%"></td>
4361 </tr>
4362</table>
4363
4364The features of the solution can still be seen in the mesh, but since the
4365solution is smooth, the singularities of the dual solution entirely
4366dominate the mesh refinement criterion, and lead to strongly
4367concentrated meshes. The solution after the seventh refinement step looks
4368like the following:
4369
4370<table width="40%" align="center">
4371 <tr>
4372 <td><img src="https://www.dealii.org/images/steps/developer/step-14.step-13.solution-7.9.2.png" alt="" width="100%"></td>
4373 </tr>
4374</table>
4375
4376Obviously, the solution is worse at some places, but the mesh
4377refinement process should have taken care that these places are not
4378important for computing the point value.
4379
4380
4381
4382
4383The next point is to compare the new (duality based) mesh refinement
4384criterion with the old ones. These are the results:
4385
4386<img src="https://www.dealii.org/images/steps/developer/step-14.step-13.error-comparison.png" alt="">
4387
4388
4389
4390The results are, well, somewhat mixed. First, the Kelly indicator
4391disqualifies itself by its unsteady behavior, changing the sign of the
4392error several times, and with increasing errors under mesh
4393refinement. The dual weighted error estimator has a monotone decrease
4394in the error, and is better than the weighted Kelly and global
4395refinement, but the margin is not as large as expected. This is, here,
4396due to the fact the global refinement can exploit the regular
4397structure of the meshes around the point of evaluation, which leads to
4398a better order of convergence for the point error. However, if we had
4399a mesh that is not locally rectangular, for example because we had to
4400approximate curved boundaries, or if the coefficients were not
4401constant, then this advantage of globally refinement meshes would
4402vanish, while the good performance of the duality based estimator
4403would remain.
4404
4405
4406
4407
4408<a name="step_14-Conclusionsandoutlook"></a><h3>Conclusions and outlook</h3>
4409
4410
4411
4412The results here are not too clearly indicating the superiority of the
4413dual weighted error estimation approach for mesh refinement over other
4414mesh refinement criteria, such as the Kelly indicator. This is due to
4415the relative simplicity of the shown applications. If you are not
4416convinced yet that this approach is indeed superior, you are invited
4417to browse through the literature indicated in the introduction, where
4418plenty of examples are provided where the dual weighted approach can
4419reduce the necessary numerical work by orders of magnitude, making
4420this the only way to compute certain quantities to reasonable
4421accuracies at all.
4422
4423
4424
4425Besides the objections you may raise against its use as a mesh
4426refinement criterion, consider that accurate knowledge of the error in
4427the quantity one might want to compute is of great use, since we can
4428stop computations when we are satisfied with the accuracy. Using more
4429traditional approaches, it is very difficult to get accurate estimates
4430for arbitrary quantities, except for, maybe, the error in the energy
4431norm, and we will then have no guarantee that the result we computed
4432satisfies any requirements on its accuracy. Also, as was shown for the
4433evaluation of point values and derivatives, the error estimate can be
4434used to extrapolate the results, yielding much higher accuracy in the
4435quantity we want to know.
4436
4437
4438
4439Leaving these mathematical considerations, we tried to write the
4440program in a modular way, such that implementing another test case, or
4441another evaluation and dual functional is simple. You are encouraged
4442to take the program as a basis for your own experiments, and to play a
4443little.
4444 *
4445 *
4446<a name="step_14-PlainProg"></a>
4447<h1> The plain program</h1>
4448@include "step-14.cc"
4449*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
types::global_dof_index n_dofs() const
Definition fe_q.h:550
void write_svg(const Triangulation< 2, 2 > &tria, std::ostream &out) const
Definition grid_out.cc:1702
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Definition point.h:111
void initialize(const MatrixType &A, const AdditionalData &parameters=AdditionalData())
Point< 3 > vertices[4]
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertThrow(cond, exc)
typename ActiveSelector::cell_iterator cell_iterator
typename ActiveSelector::face_iterator face_iterator
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_hessians
Second derivatives of shape functions.
@ update_values
Shape function values.
@ update_normal_vectors
Normal vectors.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void consistently_order_cells(std::vector< CellData< dim > > &cells)
Task< RT > new_task(const std::function< RT()> &function)
const Event initial
Definition event.cc:64
Expression fabs(const Expression &x)
Expression sign(const Expression &x)
void interpolation_difference(const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const FiniteElement< dim, spacedim > &fe2, OutVector &z1_difference)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void refine_and_coarsen_fixed_fraction(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double top_fraction, const double bottom_fraction, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max(), const VectorTools::NormType norm_type=VectorTools::L1_norm)
spacedim const Point< spacedim > & p
Definition grid_tools.h:990
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:989
if(marked_vertices.size() !=0) for(auto it
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
void cell_residual(Vector< double > &result, const FEValuesBase< dim > &fe, const std::vector< Tensor< 1, dim > > &input, const ArrayView< const std::vector< double > > &velocity, double factor=1.)
Definition advection.h:130
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
T sum(const T &t, const MPI_Comm mpi_communicator)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
spacedim const DoFHandler< dim, spacedim > const FullMatrix< double > & transfer
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
static const unsigned int invalid_unsigned_int
Definition types.h:220
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
STL namespace.
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation