Reference documentation for deal.II version GIT relicensing-249-g48dc7357c7 2024-03-29 12:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-22.h
Go to the documentation of this file.
1) const
1343 *   {
1344 *   return Tensor<1, dim>();
1345 *   }
1346 *  
1347 *  
1348 *   template <int dim>
1349 *   void RightHandSide<dim>::value_list(const std::vector<Point<dim>> &vp,
1350 *   std::vector<Tensor<1, dim>> &values) const
1351 *   {
1352 *   for (unsigned int c = 0; c < vp.size(); ++c)
1353 *   {
1354 *   values[c] = RightHandSide<dim>::value(vp[c]);
1355 *   }
1356 *   }
1357 *  
1358 *  
1359 * @endcode
1360 *
1361 *
1362 * <a name="step_22-Linearsolversandpreconditioners"></a>
1363 * <h3>Linear solvers and preconditioners</h3>
1364 *
1365
1366 *
1367 * The linear solvers and preconditioners are discussed extensively in the
1368 * introduction. Here, we create the respective objects that will be used.
1369 *
1370
1371 *
1372 *
1373 * <a name="step_22-ThecodeInverseMatrixcodeclasstemplate"></a>
1374 * <h4>The <code>InverseMatrix</code> class template</h4>
1375 * The <code>InverseMatrix</code> class represents the data structure for an
1376 * inverse matrix. Unlike @ref step_20 "step-20", we implement this with a class instead of
1377 * the helper function inverse_linear_operator() we will apply this class to
1378 * different kinds of matrices that will require different preconditioners
1379 * (in @ref step_20 "step-20" we only used a non-identity preconditioner for the mass
1380 * matrix). The types of matrix and preconditioner are passed to this class
1381 * via template parameters, and matrix and preconditioner objects of these
1382 * types will then be passed to the constructor when an
1383 * <code>InverseMatrix</code> object is created. The member function
1384 * <code>vmult</code> is obtained by solving a linear system:
1385 *
1386 * @code
1387 *   template <class MatrixType, class PreconditionerType>
1388 *   class InverseMatrix : public Subscriptor
1389 *   {
1390 *   public:
1391 *   InverseMatrix(const MatrixType &m,
1392 *   const PreconditionerType &preconditioner);
1393 *  
1394 *   void vmult(Vector<double> &dst, const Vector<double> &src) const;
1395 *  
1396 *   private:
1398 *   const SmartPointer<const PreconditionerType> preconditioner;
1399 *   };
1400 *  
1401 *  
1402 *   template <class MatrixType, class PreconditionerType>
1403 *   InverseMatrix<MatrixType, PreconditionerType>::InverseMatrix(
1404 *   const MatrixType &m,
1405 *   const PreconditionerType &preconditioner)
1406 *   : matrix(&m)
1407 *   , preconditioner(&preconditioner)
1408 *   {}
1409 *  
1410 *  
1411 * @endcode
1412 *
1413 * This is the implementation of the <code>vmult</code> function.
1414 *
1415
1416 *
1417 * In this class we use a rather large tolerance for the solver control. The
1418 * reason for this is that the function is used very frequently, and hence,
1419 * any additional effort to make the residual in the CG solve smaller makes
1420 * the solution more expensive. Note that we do not only use this class as a
1421 * preconditioner for the Schur complement, but also when forming the
1422 * inverse of the Laplace matrix &ndash; which is hence directly responsible
1423 * for the accuracy of the solution itself, so we can't choose a too large
1424 * tolerance, either.
1425 *
1426 * @code
1427 *   template <class MatrixType, class PreconditionerType>
1428 *   void InverseMatrix<MatrixType, PreconditionerType>::vmult(
1429 *   Vector<double> &dst,
1430 *   const Vector<double> &src) const
1431 *   {
1432 *   SolverControl solver_control(src.size(), 1e-6 * src.l2_norm());
1433 *   SolverCG<Vector<double>> cg(solver_control);
1434 *  
1435 *   dst = 0;
1436 *  
1437 *   cg.solve(*matrix, dst, src, *preconditioner);
1438 *   }
1439 *  
1440 *  
1441 * @endcode
1442 *
1443 *
1444 * <a name="step_22-ThecodeSchurComplementcodeclasstemplate"></a>
1445 * <h4>The <code>SchurComplement</code> class template</h4>
1446 *
1447
1448 *
1449 * This class implements the Schur complement discussed in the introduction.
1450 * It is in analogy to @ref step_20 "step-20". Though, we now call it with a template
1451 * parameter <code>PreconditionerType</code> in order to access that when
1452 * specifying the respective type of the inverse matrix class. As a
1453 * consequence of the definition above, the declaration
1454 * <code>InverseMatrix</code> now contains the second template parameter for
1455 * a preconditioner class as above, which affects the
1456 * <code>SmartPointer</code> object <code>m_inverse</code> as well.
1457 *
1458 * @code
1459 *   template <class PreconditionerType>
1460 *   class SchurComplement : public Subscriptor
1461 *   {
1462 *   public:
1463 *   SchurComplement(
1464 *   const BlockSparseMatrix<double> &system_matrix,
1465 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse);
1466 *  
1467 *   void vmult(Vector<double> &dst, const Vector<double> &src) const;
1468 *  
1469 *   private:
1470 *   const SmartPointer<const BlockSparseMatrix<double>> system_matrix;
1471 *   const SmartPointer<
1472 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType>>
1473 *   A_inverse;
1474 *  
1475 *   mutable Vector<double> tmp1, tmp2;
1476 *   };
1477 *  
1478 *  
1479 *  
1480 *   template <class PreconditionerType>
1481 *   SchurComplement<PreconditionerType>::SchurComplement(
1482 *   const BlockSparseMatrix<double> &system_matrix,
1483 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse)
1484 *   : system_matrix(&system_matrix)
1485 *   , A_inverse(&A_inverse)
1486 *   , tmp1(system_matrix.block(0, 0).m())
1487 *   , tmp2(system_matrix.block(0, 0).m())
1488 *   {}
1489 *  
1490 *  
1491 *   template <class PreconditionerType>
1492 *   void
1493 *   SchurComplement<PreconditionerType>::vmult(Vector<double> &dst,
1494 *   const Vector<double> &src) const
1495 *   {
1496 *   system_matrix->block(0, 1).vmult(tmp1, src);
1497 *   A_inverse->vmult(tmp2, tmp1);
1498 *   system_matrix->block(1, 0).vmult(dst, tmp2);
1499 *   }
1500 *  
1501 *  
1502 * @endcode
1503 *
1504 *
1505 * <a name="step_22-StokesProblemclassimplementation"></a>
1506 * <h3>StokesProblem class implementation</h3>
1507 *
1508
1509 *
1510 *
1511 * <a name="step_22-StokesProblemStokesProblem"></a>
1512 * <h4>StokesProblem::StokesProblem</h4>
1513 *
1514
1515 *
1516 * The constructor of this class looks very similar to the one of
1517 * @ref step_20 "step-20". The constructor initializes the variables for the polynomial
1518 * degree, triangulation, finite element system and the dof handler. The
1519 * underlying polynomial functions are of order <code>degree+1</code> for
1520 * the vector-valued velocity components and of order <code>degree</code>
1521 * for the pressure. This gives the LBB-stable element pair
1522 * @f$Q_{degree+1}^d\times Q_{degree}@f$, often referred to as the Taylor-Hood
1523 * element.
1524 *
1525
1526 *
1527 * Note that we initialize the triangulation with a MeshSmoothing argument,
1528 * which ensures that the refinement of cells is done in a way that the
1529 * approximation of the PDE solution remains well-behaved (problems arise if
1530 * grids are too unstructured), see the documentation of
1531 * <code>Triangulation::MeshSmoothing</code> for details.
1532 *
1533 * @code
1534 *   template <int dim>
1535 *   StokesProblem<dim>::StokesProblem(const unsigned int degree)
1536 *   : degree(degree)
1537 *   , triangulation(Triangulation<dim>::maximum_smoothing)
1538 *   , fe(FE_Q<dim>(degree + 1) ^ dim, FE_Q<dim>(degree))
1539 *   , dof_handler(triangulation)
1540 *   {}
1541 *  
1542 *  
1543 * @endcode
1544 *
1545 *
1546 * <a name="step_22-StokesProblemsetup_dofs"></a>
1547 * <h4>StokesProblem::setup_dofs</h4>
1548 *
1549
1550 *
1551 * Given a mesh, this function associates the degrees of freedom with it and
1552 * creates the corresponding matrices and vectors. At the beginning it also
1553 * releases the pointer to the preconditioner object (if the shared pointer
1554 * pointed at anything at all at this point) since it will definitely not be
1555 * needed any more after this point and will have to be re-computed after
1556 * assembling the matrix, and unties the sparse matrices from their sparsity
1557 * pattern objects.
1558 *
1559
1560 *
1561 * We then proceed with distributing degrees of freedom and renumbering
1562 * them: In order to make the ILU preconditioner (in 3d) work efficiently,
1563 * it is important to enumerate the degrees of freedom in such a way that it
1564 * reduces the bandwidth of the matrix, or maybe more importantly: in such a
1565 * way that the ILU is as close as possible to a real LU decomposition. On
1566 * the other hand, we need to preserve the block structure of velocity and
1567 * pressure already seen in @ref step_20 "step-20" and @ref step_21 "step-21". This is done in two
1568 * steps: First, all dofs are renumbered to improve the ILU and then we
1569 * renumber once again by components. Since
1570 * <code>DoFRenumbering::component_wise</code> does not touch the
1571 * renumbering within the individual blocks, the basic renumbering from the
1572 * first step remains. As for how the renumber degrees of freedom to improve
1573 * the ILU: deal.II has a number of algorithms that attempt to find
1574 * orderings to improve ILUs, or reduce the bandwidth of matrices, or
1575 * optimize some other aspect. The DoFRenumbering namespace shows a
1576 * comparison of the results we obtain with several of these algorithms
1577 * based on the testcase discussed here in this tutorial program. Here, we
1578 * will use the traditional Cuthill-McKee algorithm already used in some of
1579 * the previous tutorial programs. In the <a href="#improved-ilu">section
1580 * on improved ILU</a> we're going to discuss this issue in more detail.
1581 *
1582
1583 *
1584 * There is one more change compared to previous tutorial programs: There is
1585 * no reason in sorting the <code>dim</code> velocity components
1586 * individually. In fact, rather than first enumerating all @f$x@f$-velocities,
1587 * then all @f$y@f$-velocities, etc, we would like to keep all velocities at the
1588 * same location together and only separate between velocities (all
1589 * components) and pressures. By default, this is not what the
1590 * DoFRenumbering::component_wise function does: it treats each vector
1591 * component separately; what we have to do is group several components into
1592 * "blocks" and pass this block structure to that function. Consequently, we
1593 * allocate a vector <code>block_component</code> with as many elements as
1594 * there are components and describe all velocity components to correspond
1595 * to block 0, while the pressure component will form block 1:
1596 *
1597 * @code
1598 *   template <int dim>
1599 *   void StokesProblem<dim>::setup_dofs()
1600 *   {
1601 *   A_preconditioner.reset();
1602 *   system_matrix.clear();
1603 *   preconditioner_matrix.clear();
1604 *  
1605 *   dof_handler.distribute_dofs(fe);
1606 *   DoFRenumbering::Cuthill_McKee(dof_handler);
1607 *  
1608 *   std::vector<unsigned int> block_component(dim + 1, 0);
1609 *   block_component[dim] = 1;
1610 *   DoFRenumbering::component_wise(dof_handler, block_component);
1611 *  
1612 * @endcode
1613 *
1614 * Now comes the implementation of Dirichlet boundary conditions, which
1615 * should be evident after the discussion in the introduction. All that
1616 * changed is that the function already appears in the setup functions,
1617 * whereas we were used to see it in some assembly routine. Further down
1618 * below where we set up the mesh, we will associate the top boundary
1619 * where we impose Dirichlet boundary conditions with boundary indicator
1620 * 1. We will have to pass this boundary indicator as second argument to
1621 * the function below interpolating boundary values. There is one more
1622 * thing, though. The function describing the Dirichlet conditions was
1623 * defined for all components, both velocity and pressure. However, the
1624 * Dirichlet conditions are to be set for the velocity only. To this end,
1625 * we use a ComponentMask that only selects the velocity components. The
1626 * component mask is obtained from the finite element by specifying the
1627 * particular components we want. Since we use adaptively refined grids,
1628 * the affine constraints object needs to be first filled with hanging node
1629 * constraints generated from the DoF handler. Note the order of the two
1630 * functions &mdash; we first compute the hanging node constraints, and
1631 * then insert the boundary values into the constraints object. This makes
1632 * sure that we respect H<sup>1</sup> conformity on boundaries with
1633 * hanging nodes (in three space dimensions), where the hanging node needs
1634 * to dominate the Dirichlet boundary values.
1635 *
1636 * @code
1637 *   {
1638 *   constraints.clear();
1639 *  
1640 *   const FEValuesExtractors::Vector velocities(0);
1641 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1643 *   1,
1644 *   BoundaryValues<dim>(),
1645 *   constraints,
1646 *   fe.component_mask(velocities));
1647 *   }
1648 *  
1649 *   constraints.close();
1650 *  
1651 * @endcode
1652 *
1653 * In analogy to @ref step_20 "step-20", we count the dofs in the individual components.
1654 * We could do this in the same way as there, but we want to operate on
1655 * the block structure we used already for the renumbering: The function
1656 * <code>DoFTools::count_dofs_per_fe_block</code> does the same as
1657 * <code>DoFTools::count_dofs_per_fe_component</code>, but now grouped as
1658 * velocity and pressure block via <code>block_component</code>.
1659 *
1660 * @code
1661 *   const std::vector<types::global_dof_index> dofs_per_block =
1662 *   DoFTools::count_dofs_per_fe_block(dof_handler, block_component);
1663 *   const types::global_dof_index n_u = dofs_per_block[0];
1664 *   const types::global_dof_index n_p = dofs_per_block[1];
1665 *  
1666 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
1667 *   << std::endl
1668 *   << " Number of degrees of freedom: " << dof_handler.n_dofs()
1669 *   << " (" << n_u << '+' << n_p << ')' << std::endl;
1670 *  
1671 * @endcode
1672 *
1673 * The next task is to allocate a sparsity pattern for the system matrix we
1674 * will create and one for the preconditioner matrix. We could do this in
1675 * the same way as in @ref step_20 "step-20", i.e. directly build an object of type
1676 * SparsityPattern through DoFTools::make_sparsity_pattern. However, there
1677 * is a major reason not to do so:
1678 * In 3d, the function DoFTools::max_couplings_between_dofs yields a
1679 * conservative but rather large number for the coupling between the
1680 * individual dofs, so that the memory initially provided for the creation
1681 * of the sparsity pattern of the matrix is far too much -- so much actually
1682 * that the initial sparsity pattern won't even fit into the physical memory
1683 * of most systems already for moderately-sized 3d problems, see also the
1684 * discussion in @ref step_18 "step-18". Instead, we first build temporary objects that use
1685 * a different data structure that doesn't require allocating more memory
1686 * than necessary but isn't suitable for use as a basis of SparseMatrix or
1687 * BlockSparseMatrix objects; in a second step we then copy these objects
1688 * into objects of type BlockSparsityPattern. This is entirely analogous to
1689 * what we already did in @ref step_11 "step-11" and @ref step_18 "step-18". In particular, we make use of
1690 * the fact that we will never write into the @f$(1,1)@f$ block of the system
1691 * matrix and that this is the only block to be filled for the
1692 * preconditioner matrix.
1693 *
1694
1695 *
1696 * All this is done inside new scopes, which means that the memory of
1697 * <code>dsp</code> will be released once the information has been copied to
1698 * <code>sparsity_pattern</code>.
1699 *
1700 * @code
1701 *   {
1702 *   BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
1703 *  
1704 *   Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
1705 *   for (unsigned int c = 0; c < dim + 1; ++c)
1706 *   for (unsigned int d = 0; d < dim + 1; ++d)
1707 *   if (!((c == dim) && (d == dim)))
1708 *   coupling[c][d] = DoFTools::always;
1709 *   else
1710 *   coupling[c][d] = DoFTools::none;
1711 *  
1712 *   DoFTools::make_sparsity_pattern(
1713 *   dof_handler, coupling, dsp, constraints, false);
1714 *  
1715 *   sparsity_pattern.copy_from(dsp);
1716 *   }
1717 *  
1718 *   {
1719 *   BlockDynamicSparsityPattern preconditioner_dsp(dofs_per_block,
1720 *   dofs_per_block);
1721 *  
1722 *   Table<2, DoFTools::Coupling> preconditioner_coupling(dim + 1, dim + 1);
1723 *   for (unsigned int c = 0; c < dim + 1; ++c)
1724 *   for (unsigned int d = 0; d < dim + 1; ++d)
1725 *   if (((c == dim) && (d == dim)))
1726 *   preconditioner_coupling[c][d] = DoFTools::always;
1727 *   else
1728 *   preconditioner_coupling[c][d] = DoFTools::none;
1729 *  
1730 *   DoFTools::make_sparsity_pattern(dof_handler,
1731 *   preconditioner_coupling,
1732 *   preconditioner_dsp,
1733 *   constraints,
1734 *   false);
1735 *  
1736 *   preconditioner_sparsity_pattern.copy_from(preconditioner_dsp);
1737 *   }
1738 *  
1739 * @endcode
1740 *
1741 * Finally, the system matrix, the preconsitioner matrix, the solution and
1742 * the right hand side vector are created from the block structure similar
1743 * to the approach in @ref step_20 "step-20":
1744 *
1745 * @code
1746 *   system_matrix.reinit(sparsity_pattern);
1747 *   preconditioner_matrix.reinit(preconditioner_sparsity_pattern);
1748 *  
1749 *   solution.reinit(dofs_per_block);
1750 *   system_rhs.reinit(dofs_per_block);
1751 *   }
1752 *  
1753 *  
1754 * @endcode
1755 *
1756 *
1757 * <a name="step_22-StokesProblemassemble_system"></a>
1758 * <h4>StokesProblem::assemble_system</h4>
1759 *
1760
1761 *
1762 * The assembly process follows the discussion in @ref step_20 "step-20" and in the
1763 * introduction. We use the well-known abbreviations for the data structures
1764 * that hold the local matrices, right hand side, and global numbering of the
1765 * degrees of freedom for the present cell.
1766 *
1767 * @code
1768 *   template <int dim>
1769 *   void StokesProblem<dim>::assemble_system()
1770 *   {
1771 *   system_matrix = 0;
1772 *   system_rhs = 0;
1773 *   preconditioner_matrix = 0;
1774 *  
1775 *   QGauss<dim> quadrature_formula(degree + 2);
1776 *  
1777 *   FEValues<dim> fe_values(fe,
1778 *   quadrature_formula,
1779 *   update_values | update_quadrature_points |
1780 *   update_JxW_values | update_gradients);
1781 *  
1782 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1783 *  
1784 *   const unsigned int n_q_points = quadrature_formula.size();
1785 *  
1786 *   FullMatrix<double> local_matrix(dofs_per_cell, dofs_per_cell);
1787 *   FullMatrix<double> local_preconditioner_matrix(dofs_per_cell,
1788 *   dofs_per_cell);
1789 *   Vector<double> local_rhs(dofs_per_cell);
1790 *  
1791 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1792 *  
1793 *   const RightHandSide<dim> right_hand_side;
1794 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points, Tensor<1, dim>());
1795 *  
1796 * @endcode
1797 *
1798 * Next, we need two objects that work as extractors for the FEValues
1799 * object. Their use is explained in detail in the report on @ref
1800 * vector_valued :
1801 *
1802 * @code
1803 *   const FEValuesExtractors::Vector velocities(0);
1804 *   const FEValuesExtractors::Scalar pressure(dim);
1805 *  
1806 * @endcode
1807 *
1808 * As an extension over @ref step_20 "step-20" and @ref step_21 "step-21", we include a few optimizations
1809 * that make assembly much faster for this particular problem. The
1810 * improvements are based on the observation that we do a few calculations
1811 * too many times when we do as in @ref step_20 "step-20": The symmetric gradient actually
1812 * has <code>dofs_per_cell</code> different values per quadrature point, but
1813 * we extract it <code>dofs_per_cell*dofs_per_cell</code> times from the
1814 * FEValues object - for both the loop over <code>i</code> and the inner
1815 * loop over <code>j</code>. In 3d, that means evaluating it @f$89^2=7921@f$
1816 * instead of @f$89@f$ times, a not insignificant difference.
1817 *
1818
1819 *
1820 * So what we're going to do here is to avoid such repeated calculations
1821 * by getting a vector of rank-2 tensors (and similarly for the divergence
1822 * and the basis function value on pressure) at the quadrature point prior
1823 * to starting the loop over the dofs on the cell. First, we create the
1824 * respective objects that will hold these values. Then, we start the loop
1825 * over all cells and the loop over the quadrature points, where we first
1826 * extract these values. There is one more optimization we implement here:
1827 * the local matrix (as well as the global one) is going to be symmetric,
1828 * since all the operations involved are symmetric with respect to @f$i@f$ and
1829 * @f$j@f$. This is implemented by simply running the inner loop not to
1830 * <code>dofs_per_cell</code>, but only up to <code>i</code>, the index of
1831 * the outer loop.
1832 *
1833 * @code
1834 *   std::vector<SymmetricTensor<2, dim>> symgrad_phi_u(dofs_per_cell);
1835 *   std::vector<double> div_phi_u(dofs_per_cell);
1836 *   std::vector<Tensor<1, dim>> phi_u(dofs_per_cell);
1837 *   std::vector<double> phi_p(dofs_per_cell);
1838 *  
1839 *   for (const auto &cell : dof_handler.active_cell_iterators())
1840 *   {
1841 *   fe_values.reinit(cell);
1842 *   local_matrix = 0;
1843 *   local_preconditioner_matrix = 0;
1844 *   local_rhs = 0;
1845 *  
1846 *   right_hand_side.value_list(fe_values.get_quadrature_points(),
1847 *   rhs_values);
1848 *  
1849 *   for (unsigned int q = 0; q < n_q_points; ++q)
1850 *   {
1851 *   for (unsigned int k = 0; k < dofs_per_cell; ++k)
1852 *   {
1853 *   symgrad_phi_u[k] =
1854 *   fe_values[velocities].symmetric_gradient(k, q);
1855 *   div_phi_u[k] = fe_values[velocities].divergence(k, q);
1856 *   phi_u[k] = fe_values[velocities].value(k, q);
1857 *   phi_p[k] = fe_values[pressure].value(k, q);
1858 *   }
1859 *  
1860 * @endcode
1861 *
1862 * Now finally for the bilinear forms of both the system matrix and
1863 * the matrix we use for the preconditioner. Recall that the
1864 * formulas for these two are
1865 * @f{align*}{
1866 * A_{ij} &= a(\varphi_i,\varphi_j)
1867 * \\ &= \underbrace{2(\varepsilon(\varphi_{i,\textbf{u}}),
1868 * \varepsilon(\varphi_{j,\textbf{u}}))_{\Omega}}
1869 * _{(1)}
1870 * \;
1871 * \underbrace{- (\textrm{div}\; \varphi_{i,\textbf{u}},
1872 * \varphi_{j,p})_{\Omega}}
1873 * _{(2)}
1874 * \;
1875 * \underbrace{- (\varphi_{i,p},
1876 * \textrm{div}\;
1877 * \varphi_{j,\textbf{u}})_{\Omega}}
1878 * _{(3)}
1879 * @f}
1880 * and
1881 * @f{align*}{
1882 * M_{ij} &= \underbrace{(\varphi_{i,p},
1883 * \varphi_{j,p})_{\Omega}}
1884 * _{(4)},
1885 * @f}
1886 * respectively, where @f$\varphi_{i,\textbf{u}}@f$ and @f$\varphi_{i,p}@f$
1887 * are the velocity and pressure components of the @f$i@f$th shape
1888 * function. The various terms above are then easily recognized in
1889 * the following implementation:
1890 *
1891 * @code
1892 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1893 *   {
1894 *   for (unsigned int j = 0; j <= i; ++j)
1895 *   {
1896 *   local_matrix(i, j) +=
1897 *   (2 * (symgrad_phi_u[i] * symgrad_phi_u[j]) // (1)
1898 *   - div_phi_u[i] * phi_p[j] // (2)
1899 *   - phi_p[i] * div_phi_u[j]) // (3)
1900 *   * fe_values.JxW(q); // * dx
1901 *  
1902 *   local_preconditioner_matrix(i, j) +=
1903 *   (phi_p[i] * phi_p[j]) // (4)
1904 *   * fe_values.JxW(q); // * dx
1905 *   }
1906 * @endcode
1907 *
1908 * Note that in the implementation of (1) above, `operator*`
1909 * is overloaded for symmetric tensors, yielding the scalar
1910 * product between the two tensors.
1911 *
1912
1913 *
1914 * For the right-hand side, we need to multiply the (vector of)
1915 * velocity shape functions with the vector of body force
1916 * right-hand side components, both evaluated at the current
1917 * quadrature point. We have implemented the body forces as a
1918 * `TensorFunction<1,dim>`, so its values at quadrature points
1919 * are already tensors for which the application of `operator*`
1920 * against the velocity components of the shape function results
1921 * in the dot product, as intended.
1922 *
1923 * @code
1924 *   local_rhs(i) += phi_u[i] // phi_u_i(x_q)
1925 *   * rhs_values[q] // * f(x_q)
1926 *   * fe_values.JxW(q); // * dx
1927 *   }
1928 *   }
1929 *  
1930 * @endcode
1931 *
1932 * Before we can write the local data into the global matrix (and
1933 * simultaneously use the AffineConstraints object to apply
1934 * Dirichlet boundary conditions and eliminate hanging node constraints,
1935 * as we discussed in the introduction), we have to be careful about one
1936 * thing, though. We have only built half of the local matrices
1937 * because of symmetry, but we're going to save the full matrices
1938 * in order to use the standard functions for solving. This is done
1939 * by flipping the indices in case we are pointing into the empty part
1940 * of the local matrices.
1941 *
1942 * @code
1943 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1944 *   for (unsigned int j = i + 1; j < dofs_per_cell; ++j)
1945 *   {
1946 *   local_matrix(i, j) = local_matrix(j, i);
1947 *   local_preconditioner_matrix(i, j) =
1948 *   local_preconditioner_matrix(j, i);
1949 *   }
1950 *  
1951 *   cell->get_dof_indices(local_dof_indices);
1952 *   constraints.distribute_local_to_global(local_matrix,
1953 *   local_rhs,
1954 *   local_dof_indices,
1955 *   system_matrix,
1956 *   system_rhs);
1957 *   constraints.distribute_local_to_global(local_preconditioner_matrix,
1958 *   local_dof_indices,
1959 *   preconditioner_matrix);
1960 *   }
1961 *  
1962 * @endcode
1963 *
1964 * Before we're going to solve this linear system, we generate a
1965 * preconditioner for the velocity-velocity matrix, i.e.,
1966 * <code>block(0,0)</code> in the system matrix. As mentioned above, this
1967 * depends on the spatial dimension. Since the two classes described by
1968 * the <code>InnerPreconditioner::type</code> alias have the same
1969 * interface, we do not have to do anything different whether we want to
1970 * use a sparse direct solver or an ILU:
1971 *
1972 * @code
1973 *   std::cout << " Computing preconditioner..." << std::endl << std::flush;
1974 *  
1975 *   A_preconditioner =
1976 *   std::make_shared<typename InnerPreconditioner<dim>::type>();
1977 *   A_preconditioner->initialize(
1978 *   system_matrix.block(0, 0),
1979 *   typename InnerPreconditioner<dim>::type::AdditionalData());
1980 *   }
1981 *  
1982 *  
1983 *  
1984 * @endcode
1985 *
1986 *
1987 * <a name="step_22-StokesProblemsolve"></a>
1988 * <h4>StokesProblem::solve</h4>
1989 *
1990
1991 *
1992 * After the discussion in the introduction and the definition of the
1993 * respective classes above, the implementation of the <code>solve</code>
1994 * function is rather straight-forward and done in a similar way as in
1995 * @ref step_20 "step-20". To start with, we need an object of the
1996 * <code>InverseMatrix</code> class that represents the inverse of the
1997 * matrix A. As described in the introduction, the inverse is generated with
1998 * the help of an inner preconditioner of type
1999 * <code>InnerPreconditioner::type</code>.
2000 *
2001 * @code
2002 *   template <int dim>
2003 *   void StokesProblem<dim>::solve()
2004 *   {
2005 *   const InverseMatrix<SparseMatrix<double>,
2006 *   typename InnerPreconditioner<dim>::type>
2007 *   A_inverse(system_matrix.block(0, 0), *A_preconditioner);
2008 *   Vector<double> tmp(solution.block(0).size());
2009 *  
2010 * @endcode
2011 *
2012 * This is as in @ref step_20 "step-20". We generate the right hand side @f$B A^{-1} F - G@f$
2013 * for the Schur complement and an object that represents the respective
2014 * linear operation @f$B A^{-1} B^T@f$, now with a template parameter
2015 * indicating the preconditioner - in accordance with the definition of
2016 * the class.
2017 *
2018 * @code
2019 *   {
2020 *   Vector<double> schur_rhs(solution.block(1).size());
2021 *   A_inverse.vmult(tmp, system_rhs.block(0));
2022 *   system_matrix.block(1, 0).vmult(schur_rhs, tmp);
2023 *   schur_rhs -= system_rhs.block(1);
2024 *  
2025 *   SchurComplement<typename InnerPreconditioner<dim>::type> schur_complement(
2026 *   system_matrix, A_inverse);
2027 *  
2028 * @endcode
2029 *
2030 * The usual control structures for the solver call are created...
2031 *
2032 * @code
2033 *   SolverControl solver_control(solution.block(1).size(),
2034 *   1e-6 * schur_rhs.l2_norm());
2035 *   SolverCG<Vector<double>> cg(solver_control);
2036 *  
2037 * @endcode
2038 *
2039 * Now to the preconditioner to the Schur complement. As explained in
2040 * the introduction, the preconditioning is done by a @ref GlossMassMatrix "mass matrix" in the
2041 * pressure variable.
2042 *
2043
2044 *
2045 * Actually, the solver needs to have the preconditioner in the form
2046 * @f$P^{-1}@f$, so we need to create an inverse operation. Once again, we
2047 * use an object of the class <code>InverseMatrix</code>, which
2048 * implements the <code>vmult</code> operation that is needed by the
2049 * solver. In this case, we have to invert the pressure mass matrix. As
2050 * it already turned out in earlier tutorial programs, the inversion of
2051 * a mass matrix is a rather cheap and straight-forward operation
2052 * (compared to, e.g., a Laplace matrix). The CG method with ILU
2053 * preconditioning converges in 5-10 steps, independently on the mesh
2054 * size. This is precisely what we do here: We choose another ILU
2055 * preconditioner and take it along to the InverseMatrix object via the
2056 * corresponding template parameter. A CG solver is then called within
2057 * the vmult operation of the inverse matrix.
2058 *
2059
2060 *
2061 * An alternative that is cheaper to build, but needs more iterations
2062 * afterwards, would be to choose a SSOR preconditioner with factor
2063 * 1.2. It needs about twice the number of iterations, but the costs for
2064 * its generation are almost negligible.
2065 *
2066 * @code
2067 *   SparseILU<double> preconditioner;
2068 *   preconditioner.initialize(preconditioner_matrix.block(1, 1),
2069 *   SparseILU<double>::AdditionalData());
2070 *  
2071 *   InverseMatrix<SparseMatrix<double>, SparseILU<double>> m_inverse(
2072 *   preconditioner_matrix.block(1, 1), preconditioner);
2073 *  
2074 * @endcode
2075 *
2076 * With the Schur complement and an efficient preconditioner at hand, we
2077 * can solve the respective equation for the pressure (i.e. block 0 in
2078 * the solution vector) in the usual way:
2079 *
2080 * @code
2081 *   cg.solve(schur_complement, solution.block(1), schur_rhs, m_inverse);
2082 *  
2083 * @endcode
2084 *
2085 * After this first solution step, the hanging node constraints have to
2086 * be distributed to the solution in order to achieve a consistent
2087 * pressure field.
2088 *
2089 * @code
2090 *   constraints.distribute(solution);
2091 *  
2092 *   std::cout << " " << solver_control.last_step()
2093 *   << " outer CG Schur complement iterations for pressure"
2094 *   << std::endl;
2095 *   }
2096 *  
2097 * @endcode
2098 *
2099 * As in @ref step_20 "step-20", we finally need to solve for the velocity equation where
2100 * we plug in the solution to the pressure equation. This involves only
2101 * objects we already know - so we simply multiply @f$p@f$ by @f$B^T@f$, subtract
2102 * the right hand side and multiply by the inverse of @f$A@f$. At the end, we
2103 * need to distribute the constraints from hanging nodes in order to
2104 * obtain a consistent flow field:
2105 *
2106 * @code
2107 *   {
2108 *   system_matrix.block(0, 1).vmult(tmp, solution.block(1));
2109 *   tmp *= -1;
2110 *   tmp += system_rhs.block(0);
2111 *  
2112 *   A_inverse.vmult(solution.block(0), tmp);
2113 *  
2114 *   constraints.distribute(solution);
2115 *   }
2116 *   }
2117 *  
2118 *  
2119 * @endcode
2120 *
2121 *
2122 * <a name="step_22-StokesProblemoutput_results"></a>
2123 * <h4>StokesProblem::output_results</h4>
2124 *
2125
2126 *
2127 * The next function generates graphical output. In this example, we are
2128 * going to use the VTK file format. We attach names to the individual
2129 * variables in the problem: <code>velocity</code> to the <code>dim</code>
2130 * components of velocity and <code>pressure</code> to the pressure.
2131 *
2132
2133 *
2134 * Not all visualization programs have the ability to group individual
2135 * vector components into a vector to provide vector plots; in particular,
2136 * this holds for some VTK-based visualization programs. In this case, the
2137 * logical grouping of components into vectors should already be described
2138 * in the file containing the data. In other words, what we need to do is
2139 * provide our output writers with a way to know which of the components of
2140 * the finite element logically form a vector (with @f$d@f$ components in @f$d@f$
2141 * space dimensions) rather than letting them assume that we simply have a
2142 * bunch of scalar fields. This is achieved using the members of the
2143 * <code>DataComponentInterpretation</code> namespace: as with the filename,
2144 * we create a vector in which the first <code>dim</code> components refer
2145 * to the velocities and are given the tag
2146 * DataComponentInterpretation::component_is_part_of_vector; we
2147 * finally push one tag
2148 * DataComponentInterpretation::component_is_scalar to describe
2149 * the grouping of the pressure variable.
2150 *
2151
2152 *
2153 * The rest of the function is then the same as in @ref step_20 "step-20".
2154 *
2155 * @code
2156 *   template <int dim>
2157 *   void
2158 *   StokesProblem<dim>::output_results(const unsigned int refinement_cycle) const
2159 *   {
2160 *   std::vector<std::string> solution_names(dim, "velocity");
2161 *   solution_names.emplace_back("pressure");
2162 *  
2163 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2164 *   data_component_interpretation(
2165 *   dim, DataComponentInterpretation::component_is_part_of_vector);
2166 *   data_component_interpretation.push_back(
2167 *   DataComponentInterpretation::component_is_scalar);
2168 *  
2169 *   DataOut<dim> data_out;
2170 *   data_out.attach_dof_handler(dof_handler);
2171 *   data_out.add_data_vector(solution,
2172 *   solution_names,
2173 *   DataOut<dim>::type_dof_data,
2174 *   data_component_interpretation);
2175 *   data_out.build_patches();
2176 *  
2177 *   std::ofstream output(
2178 *   "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtk");
2179 *   data_out.write_vtk(output);
2180 *   }
2181 *  
2182 *  
2183 * @endcode
2184 *
2185 *
2186 * <a name="step_22-StokesProblemrefine_mesh"></a>
2187 * <h4>StokesProblem::refine_mesh</h4>
2188 *
2189
2190 *
2191 * This is the last interesting function of the <code>StokesProblem</code>
2192 * class. As indicated by its name, it takes the solution to the problem
2193 * and refines the mesh where this is needed. The procedure is the same as
2194 * in the respective step in @ref step_6 "step-6", with the exception that we base the
2195 * refinement only on the change in pressure, i.e., we call the Kelly error
2196 * estimator with a mask object of type ComponentMask that selects the
2197 * single scalar component for the pressure that we are interested in (we
2198 * get such a mask from the finite element class by specifying the component
2199 * we want). Additionally, we do not coarsen the grid again:
2200 *
2201 * @code
2202 *   template <int dim>
2203 *   void StokesProblem<dim>::refine_mesh()
2204 *   {
2205 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
2206 *  
2207 *   const FEValuesExtractors::Scalar pressure(dim);
2208 *   KellyErrorEstimator<dim>::estimate(
2209 *   dof_handler,
2210 *   QGauss<dim - 1>(degree + 1),
2211 *   std::map<types::boundary_id, const Function<dim> *>(),
2212 *   solution,
2213 *   estimated_error_per_cell,
2214 *   fe.component_mask(pressure));
2215 *  
2216 *   GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2217 *   estimated_error_per_cell,
2218 *   0.3,
2219 *   0.0);
2220 *   triangulation.execute_coarsening_and_refinement();
2221 *   }
2222 *  
2223 *  
2224 * @endcode
2225 *
2226 *
2227 * <a name="step_22-StokesProblemrun"></a>
2228 * <h4>StokesProblem::run</h4>
2229 *
2230
2231 *
2232 * The last step in the Stokes class is, as usual, the function that
2233 * generates the initial grid and calls the other functions in the
2234 * respective order.
2235 *
2236
2237 *
2238 * We start off with a rectangle of size @f$4 \times 1@f$ (in 2d) or @f$4 \times 1
2239 * \times 1@f$ (in 3d), placed in @f$R^2/R^3@f$ as @f$(-2,2)\times(-1,0)@f$ or
2240 * @f$(-2,2)\times(0,1)\times(-1,0)@f$, respectively. It is natural to start
2241 * with equal mesh size in each direction, so we subdivide the initial
2242 * rectangle four times in the first coordinate direction. To limit the
2243 * scope of the variables involved in the creation of the mesh to the range
2244 * where we actually need them, we put the entire block between a pair of
2245 * braces:
2246 *
2247 * @code
2248 *   template <int dim>
2249 *   void StokesProblem<dim>::run()
2250 *   {
2251 *   {
2252 *   std::vector<unsigned int> subdivisions(dim, 1);
2253 *   subdivisions[0] = 4;
2254 *  
2255 *   const Point<dim> bottom_left = (dim == 2 ?
2256 *   Point<dim>(-2, -1) : // 2d case
2257 *   Point<dim>(-2, 0, -1)); // 3d case
2258 *  
2259 *   const Point<dim> top_right = (dim == 2 ?
2260 *   Point<dim>(2, 0) : // 2d case
2261 *   Point<dim>(2, 1, 0)); // 3d case
2262 *  
2263 *   GridGenerator::subdivided_hyper_rectangle(triangulation,
2264 *   subdivisions,
2265 *   bottom_left,
2266 *   top_right);
2267 *   }
2268 *  
2269 * @endcode
2270 *
2271 * A boundary indicator of 1 is set to all boundaries that are subject to
2272 * Dirichlet boundary conditions, i.e. to faces that are located at 0 in
2273 * the last coordinate direction. See the example description above for
2274 * details.
2275 *
2276 * @code
2277 *   for (const auto &cell : triangulation.active_cell_iterators())
2278 *   for (const auto &face : cell->face_iterators())
2279 *   if (face->center()[dim - 1] == 0)
2280 *   face->set_all_boundary_ids(1);
2281 *  
2282 *  
2283 * @endcode
2284 *
2285 * We then apply an initial refinement before solving for the first
2286 * time. In 3d, there are going to be more degrees of freedom, so we
2287 * refine less there:
2288 *
2289 * @code
2290 *   triangulation.refine_global(4 - dim);
2291 *  
2292 * @endcode
2293 *
2294 * As first seen in @ref step_6 "step-6", we cycle over the different refinement levels
2295 * and refine (except for the first cycle), setup the degrees of freedom
2296 * and matrices, assemble, solve and create output:
2297 *
2298 * @code
2299 *   for (unsigned int refinement_cycle = 0; refinement_cycle < 6;
2300 *   ++refinement_cycle)
2301 *   {
2302 *   std::cout << "Refinement cycle " << refinement_cycle << std::endl;
2303 *  
2304 *   if (refinement_cycle > 0)
2305 *   refine_mesh();
2306 *  
2307 *   setup_dofs();
2308 *  
2309 *   std::cout << " Assembling..." << std::endl << std::flush;
2310 *   assemble_system();
2311 *  
2312 *   std::cout << " Solving..." << std::flush;
2313 *   solve();
2314 *  
2315 *   output_results(refinement_cycle);
2316 *  
2317 *   std::cout << std::endl;
2318 *   }
2319 *   }
2320 *   } // namespace Step22
2321 *  
2322 *  
2323 * @endcode
2324 *
2325 *
2326 * <a name="step_22-Thecodemaincodefunction"></a>
2327 * <h3>The <code>main</code> function</h3>
2328 *
2329
2330 *
2331 * The main function is the same as in @ref step_20 "step-20". We pass the element degree as
2332 * a parameter and choose the space dimension at the well-known template slot.
2333 *
2334 * @code
2335 *   int main()
2336 *   {
2337 *   try
2338 *   {
2339 *   using namespace Step22;
2340 *  
2341 *   StokesProblem<2> flow_problem(1);
2342 *   flow_problem.run();
2343 *   }
2344 *   catch (std::exception &exc)
2345 *   {
2346 *   std::cerr << std::endl
2347 *   << std::endl
2348 *   << "----------------------------------------------------"
2349 *   << std::endl;
2350 *   std::cerr << "Exception on processing: " << std::endl
2351 *   << exc.what() << std::endl
2352 *   << "Aborting!" << std::endl
2353 *   << "----------------------------------------------------"
2354 *   << std::endl;
2355 *  
2356 *   return 1;
2357 *   }
2358 *   catch (...)
2359 *   {
2360 *   std::cerr << std::endl
2361 *   << std::endl
2362 *   << "----------------------------------------------------"
2363 *   << std::endl;
2364 *   std::cerr << "Unknown exception!" << std::endl
2365 *   << "Aborting!" << std::endl
2366 *   << "----------------------------------------------------"
2367 *   << std::endl;
2368 *   return 1;
2369 *   }
2370 *  
2371 *   return 0;
2372 *   }
2373 * @endcode
2374<a name="step_22-Results"></a><h1>Results</h1>
2375
2376
2377<a name="step_22-Outputoftheprogramandgraphicalvisualization"></a><h3>Output of the program and graphical visualization</h3>
2378
2379
2380<a name="step_22-2Dcalculations"></a><h4>2D calculations</h4>
2381
2382
2383Running the program with the space dimension set to 2 in the <code>main</code>
2384function yields the following output (in "release mode",
2385See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.18.html">video lecture 18</a>.):
2386@code
2387examples/step-22> make run
2388Refinement cycle 0
2389 Number of active cells: 64
2390 Number of degrees of freedom: 679 (594+85)
2391 Assembling...
2392 Computing preconditioner...
2393 Solving... 11 outer CG Schur complement iterations for pressure
2394
2395Refinement cycle 1
2396 Number of active cells: 160
2397 Number of degrees of freedom: 1683 (1482+201)
2398 Assembling...
2399 Computing preconditioner...
2400 Solving... 11 outer CG Schur complement iterations for pressure
2401
2402Refinement cycle 2
2403 Number of active cells: 376
2404 Number of degrees of freedom: 3813 (3370+443)
2405 Assembling...
2406 Computing preconditioner...
2407 Solving... 11 outer CG Schur complement iterations for pressure
2408
2409Refinement cycle 3
2410 Number of active cells: 880
2411 Number of degrees of freedom: 8723 (7722+1001)
2412 Assembling...
2413 Computing preconditioner...
2414 Solving... 11 outer CG Schur complement iterations for pressure
2415
2416Refinement cycle 4
2417 Number of active cells: 2008
2418 Number of degrees of freedom: 19383 (17186+2197)
2419 Assembling...
2420 Computing preconditioner...
2421 Solving... 11 outer CG Schur complement iterations for pressure
2422
2423Refinement cycle 5
2424 Number of active cells: 4288
2425 Number of degrees of freedom: 40855 (36250+4605)
2426 Assembling...
2427 Computing preconditioner...
2428 Solving... 11 outer CG Schur complement iterations for pressure
2429@endcode
2430
2431The entire computation above takes about 2 seconds on a reasonably
2432quick (for 2015 standards) machine.
2433
2434What we see immediately from this is that the number of (outer)
2435iterations does not increase as we refine the mesh. This confirms the
2436statement in the introduction that preconditioning the Schur
2437complement with the mass matrix indeed yields a matrix spectrally
2438equivalent to the identity matrix (i.e. with eigenvalues bounded above
2439and below independently of the mesh size or the relative sizes of
2440cells). In other words, the mass matrix and the Schur complement are
2441spectrally equivalent.
2442
2443In the images below, we show the grids for the first six refinement
2444steps in the program. Observe how the grid is refined in regions
2445where the solution rapidly changes: On the upper boundary, we have
2446Dirichlet boundary conditions that are -1 in the left half of the line
2447and 1 in the right one, so there is an abrupt change at @f$x=0@f$. Likewise,
2448there are changes from Dirichlet to Neumann data in the two upper
2449corners, so there is need for refinement there as well:
2450
2451<table width="60%" align="center">
2452 <tr>
2453 <td align="center">
2454 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-0.png" alt="">
2455 </td>
2456 <td align="center">
2457 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-1.png" alt="">
2458 </td>
2459 </tr>
2460 <tr>
2461 <td align="center">
2462 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-2.png" alt="">
2463 </td>
2464 <td align="center">
2465 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-3.png" alt="">
2466 </td>
2467 </tr>
2468 <tr>
2469 <td align="center">
2470 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-4.png" alt="">
2471 </td>
2472 <td align="center">
2473 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-5.png" alt="">
2474 </td>
2475 </tr>
2476</table>
2477
2478Finally, following is a plot of the flow field. It shows fluid
2479transported along with the moving upper boundary and being replaced by
2480material coming from below:
2481
2482<img src="https://www.dealii.org/images/steps/developer/step-22.2d.solution.png" alt="">
2483
2484This plot uses the capability of VTK-based visualization programs (in
2485this case of VisIt) to show vector data; this is the result of us
2486declaring the velocity components of the finite element in use to be a
2487set of vector components, rather than independent scalar components in
2488the <code>StokesProblem@<dim@>::%output_results</code> function of this
2489tutorial program.
2490
2491
2492
2493<a name="step_22-3Dcalculations"></a><h4>3D calculations</h4>
2494
2495
2496In 3d, the screen output of the program looks like this:
2497
2498@code
2499Refinement cycle 0
2500 Number of active cells: 32
2501 Number of degrees of freedom: 1356 (1275+81)
2502 Assembling...
2503 Computing preconditioner...
2504 Solving... 13 outer CG Schur complement iterations for pressure.
2505
2506Refinement cycle 1
2507 Number of active cells: 144
2508 Number of degrees of freedom: 5088 (4827+261)
2509 Assembling...
2510 Computing preconditioner...
2511 Solving... 14 outer CG Schur complement iterations for pressure.
2512
2513Refinement cycle 2
2514 Number of active cells: 704
2515 Number of degrees of freedom: 22406 (21351+1055)
2516 Assembling...
2517 Computing preconditioner...
2518 Solving... 14 outer CG Schur complement iterations for pressure.
2519
2520Refinement cycle 3
2521 Number of active cells: 3168
2522 Number of degrees of freedom: 93176 (89043+4133)
2523 Assembling...
2524 Computing preconditioner...
2525 Solving... 15 outer CG Schur complement iterations for pressure.
2526
2527Refinement cycle 4
2528 Number of active cells: 11456
2529 Number of degrees of freedom: 327808 (313659+14149)
2530 Assembling...
2531 Computing preconditioner...
2532 Solving... 15 outer CG Schur complement iterations for pressure.
2533
2534Refinement cycle 5
2535 Number of active cells: 45056
2536 Number of degrees of freedom: 1254464 (1201371+53093)
2537 Assembling...
2538 Computing preconditioner...
2539 Solving... 14 outer CG Schur complement iterations for pressure.
2540@endcode
2541
2542Again, we see that the number of outer iterations does not increase as
2543we refine the mesh. Nevertheless, the compute time increases
2544significantly: for each of the iterations above separately, it takes about
25450.14 seconds, 0.63 seconds, 4.8 seconds, 35 seconds, 2 minutes and 33 seconds,
2546and 13 minutes and 12 seconds. This overall superlinear (in the number of
2547unknowns) increase in runtime is due to the fact that our inner solver is not
2548@f${\cal O}(N)@f$: a simple experiment shows that as we keep refining the mesh, the
2549average number of ILU-preconditioned CG iterations to invert the
2550velocity-velocity block @f$A@f$ increases.
2551
2552We will address the question of how possibly to improve our solver <a
2553href="#improved-solver">below</a>.
2554
2555As for the graphical output, the grids generated during the solution
2556look as follow:
2557
2558<table width="60%" align="center">
2559 <tr>
2560 <td align="center">
2561 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-0.png" alt="">
2562 </td>
2563 <td align="center">
2564 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-1.png" alt="">
2565 </td>
2566 </tr>
2567 <tr>
2568 <td align="center">
2569 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-2.png" alt="">
2570 </td>
2571 <td align="center">
2572 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-3.png" alt="">
2573 </td>
2574 </tr>
2575 <tr>
2576 <td align="center">
2577 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-4.png" alt="">
2578 </td>
2579 <td align="center">
2580 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-5.png" alt="">
2581 </td>
2582 </tr>
2583</table>
2584
2585Again, they show essentially the location of singularities introduced
2586by boundary conditions. The vector field computed makes for an
2587interesting graph:
2588
2589<img src="https://www.dealii.org/images/steps/developer/step-22.3d.solution.png" alt="">
2590
2591The isocontours shown here as well are those of the pressure
2592variable, showing the singularity at the point of discontinuous
2593velocity boundary conditions.
2594
2595
2596
2597<a name="step_22-Sparsitypattern"></a><h3>Sparsity pattern</h3>
2598
2599
2600As explained during the generation of the sparsity pattern, it is
2601important to have the numbering of degrees of freedom in mind when
2602using preconditioners like incomplete LU decompositions. This is most
2603conveniently visualized using the distribution of nonzero elements in
2604the @ref GlossStiffnessMatrix "stiffness matrix".
2605
2606If we don't do anything special to renumber degrees of freedom (i.e.,
2607without using DoFRenumbering::Cuthill_McKee, but with using
2608DoFRenumbering::component_wise to ensure that degrees of freedom are
2609appropriately sorted into their corresponding blocks of the matrix and
2610vector), then we get the following image after the first adaptive
2611refinement in two dimensions:
2612
2613<img src="https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-nor.png" alt="">
2614
2615In order to generate such a graph, you have to insert a piece of
2616code like the following to the end of the setup step.
2617@code
2618 {
2619 std::ofstream out ("sparsity_pattern.gpl");
2620 sparsity_pattern.print_gnuplot(out);
2621 }
2622@endcode
2623
2624It is clearly visible that the nonzero entries are spread over almost the
2625whole matrix. This makes preconditioning by ILU inefficient: ILU generates a
2626Gaussian elimination (LU decomposition) without fill-in elements, which means
2627that more tentative fill-ins left out will result in a worse approximation of
2628the complete decomposition.
2629
2630In this program, we have thus chosen a more advanced renumbering of
2631components. The renumbering with DoFRenumbering::Cuthill_McKee and grouping
2632the components into velocity and pressure yields the following output:
2633
2634<img src="https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-ren.png" alt="">
2635
2636It is apparent that the situation has improved a lot. Most of the elements are
2637now concentrated around the diagonal in the (0,0) block in the matrix. Similar
2638effects are also visible for the other blocks. In this case, the ILU
2639decomposition will be much closer to the full LU decomposition, which improves
2640the quality of the preconditioner. (It may be interesting to note that the
2641sparse direct solver UMFPACK does some %internal renumbering of the equations
2642before actually generating a sparse LU decomposition; that procedure leads to
2643a very similar pattern to the one we got from the Cuthill-McKee algorithm.)
2644
2645Finally, we want to have a closer
2646look at a sparsity pattern in 3D. We show only the (0,0) block of the
2647matrix, again after one adaptive refinement. Apart from the fact that the matrix
2648size has increased, it is also visible that there are many more entries
2649in the matrix. Moreover, even for the optimized renumbering, there will be a
2650considerable amount of tentative fill-in elements. This illustrates why UMFPACK
2651is not a good choice in 3D - a full decomposition needs many new entries that
2652 eventually won't fit into the physical memory (RAM):
2653
2654<img src="https://www.dealii.org/images/steps/developer/step-22.3d.sparsity_uu-ren.png" alt="">
2655
2656
2657
2658<a name="step_22-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2659
2660
2661<a name="step-22-improved-solver">
2662<a name="step_22-Improvedlinearsolverin3D"></a><h4>Improved linear solver in 3D</h4>
2663
2664</a>
2665
2666We have seen in the section of computational results that the number of outer
2667iterations does not depend on the mesh size, which is optimal in a sense of
2668scalability. This does, however, not apply to the solver as a whole, as
2669mentioned above:
2670We did not look at the number of inner iterations when generating the inverse of
2671the matrix @f$A@f$ and the mass matrix @f$M_p@f$. Of course, this is unproblematic in
2672the 2D case where we precondition @f$A@f$ with a direct solver and the
2673<code>vmult</code> operation of the inverse matrix structure will converge in
2674one single CG step, but this changes in 3D where we only use an ILU
2675preconditioner. There, the number of required preconditioned CG steps to
2676invert @f$A@f$ increases as the mesh is refined, and each <code>vmult</code>
2677operation involves on average approximately 14, 23, 36, 59, 75 and 101 inner
2678CG iterations in the refinement steps shown above. (On the other hand,
2679the number of iterations for applying the inverse pressure mass matrix is
2680always around five, both in two and three dimensions.) To summarize, most work
2681is spent on solving linear systems with the same matrix @f$A@f$ over and over again.
2682What makes this look even worse is the fact that we
2683actually invert a matrix that is about 95 percent the size of the total system
2684matrix and stands for 85 percent of the non-zero entries in the sparsity
2685pattern. Hence, the natural question is whether it is reasonable to solve a
2686linear system with matrix @f$A@f$ for about 15 times when calculating the solution
2687to the block system.
2688
2689The answer is, of course, that we can do that in a few other (most of the time
2690better) ways.
2691Nevertheless, it has to be remarked that an indefinite system as the one
2692at hand puts indeed much higher
2693demands on the linear algebra than standard elliptic problems as we have seen
2694in the early tutorial programs. The improvements are still rather
2695unsatisfactory, if one compares with an elliptic problem of similar
2696size. Either way, we will introduce below a number of improvements to the
2697linear solver, a discussion that we will re-consider again with additional
2698options in the @ref step_31 "step-31" program.
2699
2700<a name="step-22-improved-ilu">
2701<a name="step_22-BetterILUdecompositionbysmartreordering"></a><h5>Better ILU decomposition by smart reordering</h5>
2702
2703</a>
2704A first attempt to improve the speed of the linear solution process is to choose
2705a dof reordering that makes the ILU being closer to a full LU decomposition, as
2706already mentioned in the in-code comments. The DoFRenumbering namespace compares
2707several choices for the renumbering of dofs for the Stokes equations. The best
2708result regarding the computing time was found for the King ordering, which is
2709accessed through the call DoFRenumbering::boost::king_ordering. With that
2710program, the inner solver needs considerably less operations, e.g. about 62
2711inner CG iterations for the inversion of @f$A@f$ at cycle 4 compared to about 75
2712iterations with the standard Cuthill-McKee-algorithm. Also, the computing time
2713at cycle 4 decreased from about 17 to 11 minutes for the <code>solve()</code>
2714call. However, the King ordering (and the orderings provided by the
2715DoFRenumbering::boost namespace in general) has a serious drawback - it uses
2716much more memory than the in-build deal versions, since it acts on abstract
2717graphs rather than the geometry provided by the triangulation. In the present
2718case, the renumbering takes about 5 times as much memory, which yields an
2719infeasible algorithm for the last cycle in 3D with 1.2 million
2720unknowns.
2721
2722<a name="step_22-BetterpreconditionerfortheinnerCGsolver"></a><h5>Better preconditioner for the inner CG solver</h5>
2723
2724Another idea to improve the situation even more would be to choose a
2725preconditioner that makes CG for the (0,0) matrix @f$A@f$ converge in a
2726mesh-independent number of iterations, say 10 to 30. We have seen such a
2727candidate in @ref step_16 "step-16": multigrid.
2728
2729<a name="step_22-BlockSchurcomplementpreconditioner"></a><h5>Block Schur complement preconditioner</h5>
2730
2731<a name="step-22-block-schur"></a>
2732Even with a good preconditioner for @f$A@f$, we still
2733need to solve of the same linear system repeatedly (with different
2734right hand sides, though) in order to make the Schur complement solve
2735converge. The approach we are going to discuss here is how inner iteration
2736and outer iteration can be combined. If we persist in calculating the Schur
2737complement, there is no other possibility.
2738
2739The alternative is to attack the block system at once and use an approximate
2740Schur complement as efficient preconditioner. The idea is as
2741follows: If we find a block preconditioner @f$P@f$ such that the matrix
2742@f{eqnarray*}{
2743 P^{-1}\left(\begin{array}{cc}
2744 A & B^T \\ B & 0
2745 \end{array}\right)
2746@f}
2747is simple, then an iterative solver with that preconditioner will converge in a
2748few iterations. Using the Schur complement @f$S = B A^{-1} B^T@f$, one finds that
2749@f{eqnarray*}{
2750 P^{-1}
2751 =
2752 \left(\begin{array}{cc}
2753 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2754 \end{array}\right)
2755@f}
2756would appear to be a good choice since
2757@f{eqnarray*}{
2758 P^{-1}\left(\begin{array}{cc}
2759 A & B^T \\ B & 0
2760 \end{array}\right)
2761 =
2762 \left(\begin{array}{cc}
2763 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2764 \end{array}\right)\cdot \left(\begin{array}{cc}
2765 A & B^T \\ B & 0
2766 \end{array}\right)
2767 =
2768 \left(\begin{array}{cc}
2769 I & A^{-1} B^T \\ 0 & I
2770 \end{array}\right).
2771@f}
2772This is the approach taken by the paper by Silvester and Wathen referenced
2773to in the introduction (with the exception that Silvester and Wathen use
2774right preconditioning). In this case, a Krylov-based iterative method would
2775converge in one step only if exact inverses of @f$A@f$ and @f$S@f$ were applied,
2776since all the eigenvalues are one (and the number of iterations in such a
2777method is bounded by the number of distinct eigenvalues). Below, we will
2778discuss the choice of an adequate solver for this problem. First, we are
2779going to have a closer look at the implementation of the preconditioner.
2780
2781Since @f$P@f$ is aimed to be a preconditioner only, we shall use approximations to
2782the inverse of the Schur complement @f$S@f$ and the matrix @f$A@f$. Hence, the Schur
2783complement will be approximated by the pressure mass matrix @f$M_p@f$, and we use
2784a preconditioner to @f$A@f$ (without an InverseMatrix class around it) for
2785approximating @f$A^{-1}@f$.
2786
2787Here comes the class that implements the block Schur
2788complement preconditioner. The <code>vmult</code> operation for block vectors
2789according to the derivation above can be specified by three successive
2790operations:
2791@code
2792template <class PreconditionerA, class PreconditionerMp>
2793class BlockSchurPreconditioner : public Subscriptor
2794{
2795 public:
2796 BlockSchurPreconditioner (const BlockSparseMatrix<double> &S,
2797 const InverseMatrix<SparseMatrix<double>,PreconditionerMp> &Mpinv,
2798 const PreconditionerA &Apreconditioner);
2799
2800 void vmult (BlockVector<double> &dst,
2801 const BlockVector<double> &src) const;
2802
2803 private:
2806 PreconditionerMp > > m_inverse;
2807 const PreconditionerA &a_preconditioner;
2808
2809 mutable Vector<double> tmp;
2810
2811};
2812
2813template <class PreconditionerA, class PreconditionerMp>
2814BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::BlockSchurPreconditioner(
2816 const InverseMatrix<SparseMatrix<double>,PreconditionerMp> &Mpinv,
2817 const PreconditionerA &Apreconditioner
2818 )
2819 :
2820 system_matrix (&S),
2821 m_inverse (&Mpinv),
2822 a_preconditioner (Apreconditioner),
2823 tmp (S.block(1,1).m())
2824{}
2825
2826 // Now the interesting function, the multiplication of
2827 // the preconditioner with a BlockVector.
2828template <class PreconditionerA, class PreconditionerMp>
2829void BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::vmult (
2831 const BlockVector<double> &src) const
2832{
2833 // Form u_new = A^{-1} u
2834 a_preconditioner.vmult (dst.block(0), src.block(0));
2835 // Form tmp = - B u_new + p
2836 // (<code>SparseMatrix::residual</code>
2837 // does precisely this)
2838 system_matrix->block(1,0).residual(tmp, dst.block(0), src.block(1));
2839 // Change sign in tmp
2840 tmp *= -1;
2841 // Multiply by approximate Schur complement
2842 // (i.e. a pressure mass matrix)
2843 m_inverse->vmult (dst.block(1), tmp);
2844}
2845@endcode
2846
2847Since we act on the whole block system now, we have to live with one
2848disadvantage: we need to perform the solver iterations on
2849the full block system instead of the smaller pressure space.
2850
2851Now we turn to the question which solver we should use for the block
2852system. The first observation is that the resulting preconditioned matrix cannot
2853be solved with CG since it is neither positive definite nor symmetric.
2854
2855The deal.II libraries implement several solvers that are appropriate for the
2856problem at hand. One choice is the solver @ref SolverBicgstab "BiCGStab", which
2857was used for the solution of the unsymmetric advection problem in @ref step_9 "step-9". The
2858second option, the one we are going to choose, is @ref SolverGMRES "GMRES"
2859(generalized minimum residual). Both methods have their pros and cons - there
2860are problems where one of the two candidates clearly outperforms the other, and
2861vice versa.
2862<a href="http://en.wikipedia.org/wiki/GMRES#Comparison_with_other_solvers">Wikipedia</a>'s
2863article on the GMRES method gives a comparative presentation.
2864A more comprehensive and well-founded comparison can be read e.g. in the book by
2865J.W. Demmel (Applied Numerical Linear Algebra, SIAM, 1997, section 6.6.6).
2866
2867For our specific problem with the ILU preconditioner for @f$A@f$, we certainly need
2868to perform hundreds of iterations on the block system for large problem sizes
2869(we won't beat CG!). Actually, this disfavors GMRES: During the GMRES
2870iterations, a basis of Krylov vectors is successively built up and some
2871operations are performed on these vectors. The more vectors are in this basis,
2872the more operations and memory will be needed. The number of operations scales
2873as @f${\cal O}(n + k^2)@f$ and memory as @f${\cal O}(kn)@f$, where @f$k@f$ is the number of
2874vectors in the Krylov basis and @f$n@f$ the size of the (block) matrix.
2875To not let these demands grow excessively, deal.II limits the size @f$k@f$ of the
2876basis to 30 vectors by default.
2877Then, the basis is rebuilt. This implementation of the GMRES method is called
2878GMRES(k), with default @f$k=30@f$. What we have gained by this restriction,
2879namely a bound on operations and memory requirements, will be compensated by
2880the fact that we use an incomplete basis - this will increase the number of
2881required iterations.
2882
2883BiCGStab, on the other hand, won't get slower when many iterations are needed
2884(one iteration uses only results from one preceding step and
2885not all the steps as GMRES). Besides the fact the BiCGStab is more expensive per
2886step since two matrix-vector products are needed (compared to one for
2887CG or GMRES), there is one main reason which makes BiCGStab not appropriate for
2888this problem: The preconditioner applies the inverse of the pressure
2889mass matrix by using the InverseMatrix class. Since the application of the
2890inverse matrix to a vector is done only in approximative way (an exact inverse
2891is too expensive), this will also affect the solver. In the case of BiCGStab,
2892the Krylov vectors will not be orthogonal due to that perturbation. While
2893this is uncritical for a small number of steps (up to about 50), it ruins the
2894performance of the solver when these perturbations have grown to a significant
2895magnitude in the coarse of iterations.
2896
2897We did some experiments with BiCGStab and found it to
2898be faster than GMRES up to refinement cycle 3 (in 3D), but it became very slow
2899for cycles 4 and 5 (even slower than the original Schur complement), so the
2900solver is useless in this situation. Choosing a sharper tolerance for the
2901inverse matrix class (<code>1e-10*src.l2_norm()</code> instead of
2902<code>1e-6*src.l2_norm()</code>) made BiCGStab perform well also for cycle 4,
2903but did not change the failure on the very large problems.
2904
2905GMRES is of course also effected by the approximate inverses, but it is not as
2906sensitive to orthogonality and retains a relatively good performance also for
2907large sizes, see the results below.
2908
2909With this said, we turn to the realization of the solver call with GMRES with
2910@f$k=100@f$ temporary vectors:
2911
2912@code
2913 const SparseMatrix<double> &pressure_mass_matrix
2914 = preconditioner_matrix.block(1,1);
2915 SparseILU<double> pmass_preconditioner;
2916 pmass_preconditioner.initialize (pressure_mass_matrix,
2917 SparseILU<double>::AdditionalData());
2918
2919 InverseMatrix<SparseMatrix<double>,SparseILU<double> >
2920 m_inverse (pressure_mass_matrix, pmass_preconditioner);
2921
2922 BlockSchurPreconditioner<typename InnerPreconditioner<dim>::type,
2923 SparseILU<double> >
2924 preconditioner (system_matrix, m_inverse, *A_preconditioner);
2925
2926 SolverControl solver_control (system_matrix.m(),
2927 1e-6*system_rhs.l2_norm());
2928 GrowingVectorMemory<BlockVector<double> > vector_memory;
2929 SolverGMRES<BlockVector<double> >::AdditionalData gmres_data;
2930 gmres_data.max_basis_size = 100;
2931
2932 SolverGMRES<BlockVector<double> > gmres(solver_control, vector_memory,
2933 gmres_data);
2934
2935 gmres.solve(system_matrix, solution, system_rhs,
2936 preconditioner);
2937
2938 constraints.distribute (solution);
2939
2940 std::cout << " "
2941 << solver_control.last_step()
2942 << " block GMRES iterations";
2943@endcode
2944
2945Obviously, one needs to add the include file @ref SolverGMRES
2946"<lac/solver_gmres.h>" in order to make this run.
2947We call the solver with a BlockVector template in order to enable
2948GMRES to operate on block vectors and matrices.
2949Note also that we need to set the (1,1) block in the system
2950matrix to zero (we saved the pressure mass matrix there which is not part of the
2951problem) after we copied the information to another matrix.
2952
2953Using the Timer class, we collect some statistics that compare the runtime
2954of the block solver with the one from the problem implementation above.
2955Besides the solution with the two options we also check if the solutions
2956of the two variants are close to each other (i.e. this solver gives indeed the
2957same solution as we had before) and calculate the infinity
2958norm of the vector difference.
2959
2960Let's first see the results in 2D:
2961@code
2962Refinement cycle 0
2963 Number of active cells: 64
2964 Number of degrees of freedom: 679 (594+85) [0.00162792 s]
2965 Assembling... [0.00108981 s]
2966 Computing preconditioner... [0.0025959 s]
2967 Solving...
2968 Schur complement: 11 outer CG iterations for p [0.00479603s ]
2969 Block Schur preconditioner: 12 GMRES iterations [0.00441718 s]
2970 l_infinity difference between solution vectors: 5.38258e-07
2971
2972Refinement cycle 1
2973 Number of active cells: 160
2974 Number of degrees of freedom: 1683 (1482+201) [0.00345707 s]
2975 Assembling... [0.00237417 s]
2976 Computing preconditioner... [0.00605702 s]
2977 Solving...
2978 Schur complement: 11 outer CG iterations for p [0.0123992s ]
2979 Block Schur preconditioner: 12 GMRES iterations [0.011909 s]
2980 l_infinity difference between solution vectors: 1.74658e-05
2981
2982Refinement cycle 2
2983 Number of active cells: 376
2984 Number of degrees of freedom: 3813 (3370+443) [0.00729299 s]
2985 Assembling... [0.00529909 s]
2986 Computing preconditioner... [0.0167508 s]
2987 Solving...
2988 Schur complement: 11 outer CG iterations for p [0.031672s ]
2989 Block Schur preconditioner: 12 GMRES iterations [0.029232 s]
2990 l_infinity difference between solution vectors: 7.81569e-06
2991
2992Refinement cycle 3
2993 Number of active cells: 880
2994 Number of degrees of freedom: 8723 (7722+1001) [0.017709 s]
2995 Assembling... [0.0126002 s]
2996 Computing preconditioner... [0.0435679 s]
2997 Solving...
2998 Schur complement: 11 outer CG iterations for p [0.0971651s ]
2999 Block Schur preconditioner: 12 GMRES iterations [0.0992041 s]
3000 l_infinity difference between solution vectors: 1.87249e-05
3001
3002Refinement cycle 4
3003 Number of active cells: 2008
3004 Number of degrees of freedom: 19383 (17186+2197) [0.039988 s]
3005 Assembling... [0.028281 s]
3006 Computing preconditioner... [0.118314 s]
3007 Solving...
3008 Schur complement: 11 outer CG iterations for p [0.252133s ]
3009 Block Schur preconditioner: 13 GMRES iterations [0.269125 s]
3010 l_infinity difference between solution vectors: 6.38657e-05
3011
3012Refinement cycle 5
3013 Number of active cells: 4288
3014 Number of degrees of freedom: 40855 (36250+4605) [0.0880702 s]
3015 Assembling... [0.0603511 s]
3016 Computing preconditioner... [0.278339 s]
3017 Solving...
3018 Schur complement: 11 outer CG iterations for p [0.53846s ]
3019 Block Schur preconditioner: 13 GMRES iterations [0.578667 s]
3020 l_infinity difference between solution vectors: 0.000173363
3021@endcode
3022
3023We see that there is no huge difference in the solution time between the
3024block Schur complement preconditioner solver and the Schur complement
3025itself. The reason is simple: we used a direct solve as preconditioner for
3026@f$A@f$ - so we cannot expect any gain by avoiding the inner iterations. We see
3027that the number of iterations has slightly increased for GMRES, but all in
3028all the two choices are fairly similar.
3029
3030The picture of course changes in 3D:
3031
3032@code
3033Refinement cycle 0
3034 Number of active cells: 32
3035 Number of degrees of freedom: 1356 (1275+81) [0.00845218 s]
3036 Assembling... [0.019372 s]
3037 Computing preconditioner... [0.00712395 s]
3038 Solving...
3039 Schur complement: 13 outer CG iterations for p [0.0320101s ]
3040 Block Schur preconditioner: 22 GMRES iterations [0.0048759 s]
3041 l_infinity difference between solution vectors: 2.15942e-05
3042
3043Refinement cycle 1
3044 Number of active cells: 144
3045 Number of degrees of freedom: 5088 (4827+261) [0.0346942 s]
3046 Assembling... [0.0857739 s]
3047 Computing preconditioner... [0.0465031 s]
3048 Solving...
3049 Schur complement: 14 outer CG iterations for p [0.349258s ]
3050 Block Schur preconditioner: 35 GMRES iterations [0.048759 s]
3051 l_infinity difference between solution vectors: 1.77657e-05
3052
3053Refinement cycle 2
3054 Number of active cells: 704
3055 Number of degrees of freedom: 22406 (21351+1055) [0.175669 s]
3056 Assembling... [0.437447 s]
3057 Computing preconditioner... [0.286435 s]
3058 Solving...
3059 Schur complement: 14 outer CG iterations for p [3.65519s ]
3060 Block Schur preconditioner: 63 GMRES iterations [0.497787 s]
3061 l_infinity difference between solution vectors: 5.08078e-05
3062
3063Refinement cycle 3
3064 Number of active cells: 3168
3065 Number of degrees of freedom: 93176 (89043+4133) [0.790985 s]
3066 Assembling... [1.97598 s]
3067 Computing preconditioner... [1.4325 s]
3068 Solving...
3069 Schur complement: 15 outer CG iterations for p [29.9666s ]
3070 Block Schur preconditioner: 128 GMRES iterations [5.02645 s]
3071 l_infinity difference between solution vectors: 0.000119671
3072
3073Refinement cycle 4
3074 Number of active cells: 11456
3075 Number of degrees of freedom: 327808 (313659+14149) [3.44995 s]
3076 Assembling... [7.54772 s]
3077 Computing preconditioner... [5.46306 s]
3078 Solving...
3079 Schur complement: 15 outer CG iterations for p [139.987s ]
3080 Block Schur preconditioner: 255 GMRES iterations [38.0946 s]
3081 l_infinity difference between solution vectors: 0.00020793
3082
3083Refinement cycle 5
3084 Number of active cells: 45056
3085 Number of degrees of freedom: 1254464 (1201371+53093) [19.6795 s]
3086 Assembling... [28.6586 s]
3087 Computing preconditioner... [22.401 s]
3088 Solving...
3089 Schur complement: 14 outer CG iterations for p [796.767s ]
3090 Block Schur preconditioner: 524 GMRES iterations [355.597 s]
3091 l_infinity difference between solution vectors: 0.000501219
3092@endcode
3093
3094Here, the block preconditioned solver is clearly superior to the Schur
3095complement, but the advantage gets less for more mesh points. This is
3096because GMRES(k) scales worse with the problem size than CG, as we discussed
3097above. Nonetheless, the improvement by a factor of 3-6 for moderate problem
3098sizes is quite impressive.
3099
3100
3101<a name="step_22-Combiningtheblockpreconditionerandmultigrid"></a><h5>Combining the block preconditioner and multigrid</h5>
3102
3103An ultimate linear solver for this problem could be imagined as a
3104combination of an optimal
3105preconditioner for @f$A@f$ (e.g. multigrid) and the block preconditioner
3106described above, which is the approach taken in the @ref step_31 "step-31"
3107and @ref step_32 "step-32" tutorial programs (where we use an algebraic multigrid
3108method) and @ref step_56 "step-56" (where we use a geometric multigrid method).
3109
3110
3111<a name="step_22-Noblockmatricesandvectors"></a><h5>No block matrices and vectors</h5>
3112
3113Another possibility that can be taken into account is to not set up a block
3114system, but rather solve the system of velocity and pressure all at once. The
3115options are direct solve with UMFPACK (2D) or GMRES with ILU
3116preconditioning (3D). It should be straightforward to try that.
3117
3118
3119
3120<a name="step_22-Moreinterestingtestcases"></a><h4>More interesting testcases</h4>
3121
3122
3123The program can of course also serve as a basis to compute the flow in more
3124interesting cases. The original motivation to write this program was for it to
3125be a starting point for some geophysical flow problems, such as the
3126movement of magma under places where continental plates drift apart (for
3127example mid-ocean ridges). Of course, in such places, the geometry is more
3128complicated than the examples shown above, but it is not hard to accommodate
3129for that.
3130
3131For example, by using the following modification of the boundary values
3133@code
3134template <int dim>
3135double
3136BoundaryValues<dim>::value (const Point<dim> &p,
3137 const unsigned int component) const
3138{
3139 Assert (component < this->n_components,
3140 ExcIndexRange (component, 0, this->n_components));
3141
3142 const double x_offset = std::atan(p[1]*4)/3;
3143
3144 if (component == 0)
3145 return (p[0] < x_offset ? -1 : (p[0] > x_offset ? 1 : 0));
3146 return 0;
3147}
3148@endcode
3149and the following way to generate the mesh as the domain
3150@f$[-2,2]\times[-2,2]\times[-1,0]@f$
3151@code
3152 std::vector<unsigned int> subdivisions (dim, 1);
3153 subdivisions[0] = 4;
3154 if (dim>2)
3155 subdivisions[1] = 4;
3156
3157 const Point<dim> bottom_left = (dim == 2 ?
3158 Point<dim>(-2,-1) :
3159 Point<dim>(-2,-2,-1));
3160 const Point<dim> top_right = (dim == 2 ?
3161 Point<dim>(2,0) :
3162 Point<dim>(2,2,0));
3163
3165 subdivisions,
3166 bottom_left,
3167 top_right);
3168@endcode
3169then we get images where the fault line is curved:
3170<table width="60%" align="center">
3171 <tr>
3172 <td align="center">
3173 <img src="https://www.dealii.org/images/steps/developer/step-22.3d-extension.png" alt="">
3174 </td>
3175 <td align="center">
3176 <img src="https://www.dealii.org/images/steps/developer/step-22.3d-grid-extension.png" alt="">
3177 </td>
3178 </tr>
3179</table>
3180 *
3181 *
3182<a name="step_22-PlainProg"></a>
3183<h1> The plain program</h1>
3184@include "step-22.cc"
3185*/
BlockType & block(const unsigned int i)
Definition point.h:111
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:442
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void Cuthill_McKee(DoFHandler< dim, spacedim > &dof_handler, const bool reversed_numbering=false, const bool use_constraints=false, const std::vector< types::global_dof_index > &starting_indices=std::vector< types::global_dof_index >())
std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
std::vector< types::global_dof_index > count_dofs_per_fe_component(const DoFHandler< dim, spacedim > &dof_handler, const bool vector_valued_once=false, const std::vector< unsigned int > &target_component={})
void subdivided_hyper_rectangle(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
spacedim const Point< spacedim > & p
Definition grid_tools.h:980
const std::vector< bool > & used
spacedim & mesh
Definition grid_tools.h:979
for(unsigned int j=best_vertex+1;j< vertices.size();++j) if(vertices_to_use[j])
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
const InputIterator OutputIterator out
Definition parallel.h:167
const Iterator const std_cxx20::type_identity_t< Iterator > & end
Definition parallel.h:610
const InputIterator OutputIterator const Function & function
Definition parallel.h:168
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)