Reference documentation for deal.II version Git 26c8cac9c6 2020-01-27 13:06:59 +0100
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symmetric_tensor.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_symmetric_tensor_h
17 #define dealii_symmetric_tensor_h
18 
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/numbers.h>
23 #include <deal.II/base/table_indices.h>
24 #include <deal.II/base/template_constraints.h>
25 #include <deal.II/base/tensor.h>
26 
27 #include <algorithm>
28 #include <array>
29 #include <functional>
30 
31 DEAL_II_NAMESPACE_OPEN
32 
33 // Forward declaration
34 #ifndef DOXYGEN
35 template <int rank, int dim, typename Number = double>
36 class SymmetricTensor;
37 #endif
38 
39 template <int dim, typename Number>
40 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
42 
43 template <int dim, typename Number>
44 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number>
46 
47 template <int dim, typename Number>
48 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number>
50 
51 template <int dim, typename Number>
52 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
54 
55 template <int dim, typename Number>
56 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number>
58 
59 template <int dim2, typename Number>
60 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
62 
63 template <int dim, typename Number>
64 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
66 
67 template <int dim, typename Number>
68 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
70 
71 
72 
73 namespace internal
74 {
79  namespace SymmetricTensorImplementation
80  {
85  template <int rank, int dim, typename Number>
86  struct Inverse;
87  } // namespace SymmetricTensorImplementation
88 
93  namespace SymmetricTensorAccessors
94  {
101  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE TableIndices<2>
102  merge(const TableIndices<2> &previous_indices,
103  const unsigned int new_index,
104  const unsigned int position)
105  {
106  AssertIndexRange(position, 2);
107 
108  if (position == 0)
109  return {new_index, numbers::invalid_unsigned_int};
110  else
111  return {previous_indices[0], new_index};
112  }
113 
114 
115 
122  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE TableIndices<4>
123  merge(const TableIndices<4> &previous_indices,
124  const unsigned int new_index,
125  const unsigned int position)
126  {
127  AssertIndexRange(position, 4);
128 
129  switch (position)
130  {
131  case 0:
132  return {new_index,
135  numbers::invalid_unsigned_int};
136  case 1:
137  return {previous_indices[0],
138  new_index,
140  numbers::invalid_unsigned_int};
141  case 2:
142  return {previous_indices[0],
143  previous_indices[1],
144  new_index,
145  numbers::invalid_unsigned_int};
146  case 3:
147  return {previous_indices[0],
148  previous_indices[1],
149  previous_indices[2],
150  new_index};
151  default:
152  Assert(false, ExcInternalError());
153  return {};
154  }
155  }
156 
157 
166  template <int rank1,
167  int rank2,
168  int dim,
169  typename Number,
170  typename OtherNumber = Number>
172  {
173  using value_type = typename ProductType<Number, OtherNumber>::type;
174  using type =
175  ::SymmetricTensor<rank1 + rank2 - 4, dim, value_type>;
176  };
177 
178 
187  template <int dim, typename Number, typename OtherNumber>
188  struct double_contraction_result<2, 2, dim, Number, OtherNumber>
189  {
190  using type = typename ProductType<Number, OtherNumber>::type;
191  };
192 
193 
194 
207  template <int rank, int dim, typename Number>
208  struct StorageType;
209 
213  template <int dim, typename Number>
214  struct StorageType<2, dim, Number>
215  {
220  static const unsigned int n_independent_components =
221  (dim * dim + dim) / 2;
222 
227  };
228 
229 
230 
234  template <int dim, typename Number>
235  struct StorageType<4, dim, Number>
236  {
242  static const unsigned int n_rank2_components = (dim * dim + dim) / 2;
243 
247  static const unsigned int n_independent_components =
248  (n_rank2_components *
250 
258  };
259 
260 
261 
266  template <int rank, int dim, bool constness, typename Number>
268 
275  template <int rank, int dim, typename Number>
276  struct AccessorTypes<rank, dim, true, Number>
277  {
278  using tensor_type = const ::SymmetricTensor<rank, dim, Number>;
279 
280  using reference = Number;
281  };
282 
289  template <int rank, int dim, typename Number>
290  struct AccessorTypes<rank, dim, false, Number>
291  {
293 
294  using reference = Number &;
295  };
296 
297 
332  template <int rank, int dim, bool constness, int P, typename Number>
333  class Accessor
334  {
335  public:
339  using reference =
341  using tensor_type =
343 
344  private:
363  constexpr Accessor(tensor_type & tensor,
364  const TableIndices<rank> &previous_indices);
365 
369  constexpr DEAL_II_ALWAYS_INLINE
370  Accessor(const Accessor &) = default;
371 
372  public:
376  DEAL_II_CONSTEXPR Accessor<rank, dim, constness, P - 1, Number>
377  operator[](const unsigned int i);
378 
382  constexpr Accessor<rank, dim, constness, P - 1, Number>
383  operator[](const unsigned int i) const;
384 
385  private:
389  tensor_type & tensor;
390  const TableIndices<rank> previous_indices;
391 
392  // Declare some other classes as friends. Make sure to work around bugs
393  // in some compilers:
394  template <int, int, typename>
395  friend class ::SymmetricTensor;
396  template <int, int, bool, int, typename>
397  friend class Accessor;
398 #ifndef DEAL_II_TEMPL_SPEC_FRIEND_BUG
399  friend class ::SymmetricTensor<rank, dim, Number>;
400  friend class Accessor<rank, dim, constness, P + 1, Number>;
401 #endif
402  };
403 
404 
405 
415  template <int rank, int dim, bool constness, typename Number>
416  class Accessor<rank, dim, constness, 1, Number>
417  {
418  public:
422  using reference =
424  using tensor_type =
426 
427  private:
449  constexpr Accessor(tensor_type & tensor,
450  const TableIndices<rank> &previous_indices);
451 
455  constexpr DEAL_II_ALWAYS_INLINE
456  Accessor(const Accessor &) = default;
457 
458  public:
462  DEAL_II_CONSTEXPR reference operator[](const unsigned int);
463 
467  constexpr reference operator[](const unsigned int) const;
468 
469  private:
473  tensor_type & tensor;
474  const TableIndices<rank> previous_indices;
475 
476  // Declare some other classes as friends. Make sure to work around bugs
477  // in some compilers:
478  template <int, int, typename>
479  friend class ::SymmetricTensor;
480  template <int, int, bool, int, typename>
481  friend class SymmetricTensorAccessors::Accessor;
482 #ifndef DEAL_II_TEMPL_SPEC_FRIEND_BUG
483  friend class ::SymmetricTensor<rank, dim, Number>;
484  friend class SymmetricTensorAccessors::
485  Accessor<rank, dim, constness, 2, Number>;
486 #endif
487  };
488  } // namespace SymmetricTensorAccessors
489 } // namespace internal
490 
491 
492 
556 template <int rank_, int dim, typename Number>
558 {
559 public:
560  static_assert(rank_ % 2 == 0, "A SymmetricTensor must have even rank!");
561 
570  static const unsigned int dimension = dim;
571 
575  static const unsigned int rank = rank_;
576 
582  static constexpr unsigned int n_independent_components =
584  n_independent_components;
585 
589  constexpr DEAL_II_ALWAYS_INLINE
590  SymmetricTensor() = default;
591 
605  template <typename OtherNumber>
606  explicit SymmetricTensor(const Tensor<2, dim, OtherNumber> &t);
607 
623  DEAL_II_CONSTEXPR
624  SymmetricTensor(const Number (&array)[n_independent_components]);
625 
631  template <typename OtherNumber>
632  constexpr explicit SymmetricTensor(
633  const SymmetricTensor<rank_, dim, OtherNumber> &initializer);
634 
638  Number *
639  begin_raw();
640 
644  const Number *
645  begin_raw() const;
646 
650  Number *
651  end_raw();
652 
657  const Number *
658  end_raw() const;
659 
666  template <typename OtherNumber>
667  DEAL_II_CONSTEXPR SymmetricTensor &
668  operator=(const SymmetricTensor<rank_, dim, OtherNumber> &rhs);
669 
676  DEAL_II_CONSTEXPR SymmetricTensor &
677  operator=(const Number &d);
678 
683  constexpr operator Tensor<rank_, dim, Number>() const;
684 
688  constexpr bool
689  operator==(const SymmetricTensor &) const;
690 
694  constexpr bool
695  operator!=(const SymmetricTensor &) const;
696 
700  template <typename OtherNumber>
701  DEAL_II_CONSTEXPR SymmetricTensor &
702  operator+=(const SymmetricTensor<rank_, dim, OtherNumber> &);
703 
707  template <typename OtherNumber>
708  DEAL_II_CONSTEXPR SymmetricTensor &
709  operator-=(const SymmetricTensor<rank_, dim, OtherNumber> &);
710 
715  template <typename OtherNumber>
716  DEAL_II_CONSTEXPR SymmetricTensor &
717  operator*=(const OtherNumber &factor);
718 
722  template <typename OtherNumber>
723  DEAL_II_CONSTEXPR SymmetricTensor &
724  operator/=(const OtherNumber &factor);
725 
729  DEAL_II_CONSTEXPR SymmetricTensor
730  operator-() const;
731 
756  template <typename OtherNumber>
757  DEAL_II_CONSTEXPR typename internal::SymmetricTensorAccessors::
758  double_contraction_result<rank_, 2, dim, Number, OtherNumber>::type
760 
765  template <typename OtherNumber>
766  DEAL_II_CONSTEXPR typename internal::SymmetricTensorAccessors::
767  double_contraction_result<rank_, 4, dim, Number, OtherNumber>::type
769 
773  DEAL_II_CONSTEXPR Number &
774  operator()(const TableIndices<rank_> &indices);
775 
779  DEAL_II_CONSTEXPR const Number &
780  operator()(const TableIndices<rank_> &indices) const;
781 
786  constexpr internal::SymmetricTensorAccessors::
787  Accessor<rank_, dim, true, rank_ - 1, Number>
788  operator[](const unsigned int row) const;
789 
794  DEAL_II_CONSTEXPR internal::SymmetricTensorAccessors::
795  Accessor<rank_, dim, false, rank_ - 1, Number>
796  operator[](const unsigned int row);
797 
803  constexpr const Number &operator[](const TableIndices<rank_> &indices) const;
804 
810  DEAL_II_CONSTEXPR Number &operator[](const TableIndices<rank_> &indices);
811 
817  DEAL_II_CONSTEXPR const Number &
818  access_raw_entry(const unsigned int unrolled_index) const;
819 
825  DEAL_II_CONSTEXPR Number &
826  access_raw_entry(const unsigned int unrolled_index);
827 
838  norm() const;
839 
847  static constexpr unsigned int
848  component_to_unrolled_index(const TableIndices<rank_> &indices);
849 
855  static constexpr TableIndices<rank_>
856  unrolled_to_component_indices(const unsigned int i);
857 
870  DEAL_II_CONSTEXPR void
871  clear();
872 
877  static constexpr std::size_t
878  memory_consumption();
879 
884  template <class Archive>
885  void
886  serialize(Archive &ar, const unsigned int version);
887 
888 private:
892  using base_tensor_descriptor =
894 
898  using base_tensor_type = typename base_tensor_descriptor::base_tensor_type;
899 
904 
905  // Make all other symmetric tensors friends.
906  template <int, int, typename>
907  friend class SymmetricTensor;
908 
909  // Make a few more functions friends.
910  template <int dim2, typename Number2>
911  friend DEAL_II_CONSTEXPR Number2
913 
914  template <int dim2, typename Number2>
915  friend DEAL_II_CONSTEXPR Number2
917 
918  template <int dim2, typename Number2>
919  friend DEAL_II_CONSTEXPR SymmetricTensor<2, dim2, Number2>
921 
922  template <int dim2, typename Number2>
923  friend DEAL_II_CONSTEXPR SymmetricTensor<2, dim2, Number2>
925 
926  template <int dim2, typename Number2>
927  friend DEAL_II_CONSTEXPR SymmetricTensor<4, dim2, Number2>
928  deviator_tensor();
929 
930  template <int dim2, typename Number2>
931  friend DEAL_II_CONSTEXPR SymmetricTensor<4, dim2, Number2>
932  identity_tensor();
933 
934 
935  // Make a few helper classes friends as well.
937  Inverse<2, dim, Number>;
938 
940  Inverse<4, dim, Number>;
941 };
942 
943 
944 
945 // ------------------------- inline functions ------------------------
946 
947 #ifndef DOXYGEN
948 
949 // provide declarations for static members
950 template <int rank, int dim, typename Number>
951 const unsigned int SymmetricTensor<rank, dim, Number>::dimension;
952 
953 template <int rank_, int dim, typename Number>
954 constexpr unsigned int
955  SymmetricTensor<rank_, dim, Number>::n_independent_components;
956 
957 namespace internal
958 {
959  namespace SymmetricTensorAccessors
960  {
961  template <int rank_, int dim, bool constness, int P, typename Number>
962  constexpr DEAL_II_ALWAYS_INLINE
963  Accessor<rank_, dim, constness, P, Number>::Accessor(
964  tensor_type & tensor,
965  const TableIndices<rank_> &previous_indices)
966  : tensor(tensor)
967  , previous_indices(previous_indices)
968  {}
969 
970 
971 
972  template <int rank_, int dim, bool constness, int P, typename Number>
973  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
974  Accessor<rank_, dim, constness, P - 1, Number>
975  Accessor<rank_, dim, constness, P, Number>::
976  operator[](const unsigned int i)
977  {
978  return Accessor<rank_, dim, constness, P - 1, Number>(
979  tensor, merge(previous_indices, i, rank_ - P));
980  }
981 
982 
983 
984  template <int rank_, int dim, bool constness, int P, typename Number>
985  constexpr DEAL_II_ALWAYS_INLINE
986  Accessor<rank_, dim, constness, P - 1, Number>
987  Accessor<rank_, dim, constness, P, Number>::
988  operator[](const unsigned int i) const
989  {
990  return Accessor<rank_, dim, constness, P - 1, Number>(
991  tensor, merge(previous_indices, i, rank_ - P));
992  }
993 
994 
995 
996  template <int rank_, int dim, bool constness, typename Number>
997  constexpr DEAL_II_ALWAYS_INLINE
998  Accessor<rank_, dim, constness, 1, Number>::Accessor(
999  tensor_type & tensor,
1000  const TableIndices<rank_> &previous_indices)
1001  : tensor(tensor)
1002  , previous_indices(previous_indices)
1003  {}
1004 
1005 
1006 
1007  template <int rank_, int dim, bool constness, typename Number>
1008  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1009  typename Accessor<rank_, dim, constness, 1, Number>::reference
1010  Accessor<rank_, dim, constness, 1, Number>::
1011  operator[](const unsigned int i)
1012  {
1013  return tensor(merge(previous_indices, i, rank_ - 1));
1014  }
1015 
1016 
1017  template <int rank_, int dim, bool constness, typename Number>
1018  constexpr DEAL_II_ALWAYS_INLINE
1019  typename Accessor<rank_, dim, constness, 1, Number>::reference
1020  Accessor<rank_, dim, constness, 1, Number>::
1021  operator[](const unsigned int i) const
1022  {
1023  return tensor(merge(previous_indices, i, rank_ - 1));
1024  }
1025  } // namespace SymmetricTensorAccessors
1026 } // namespace internal
1027 
1028 
1029 
1030 template <int rank_, int dim, typename Number>
1031 template <typename OtherNumber>
1032 inline DEAL_II_ALWAYS_INLINE
1034  const Tensor<2, dim, OtherNumber> &t)
1035 {
1036  static_assert(rank == 2, "This function is only implemented for rank==2");
1037  for (unsigned int d = 0; d < dim; ++d)
1038  for (unsigned int e = 0; e < d; ++e)
1039  Assert(t[d][e] == t[e][d], ExcInternalError());
1040 
1041  for (unsigned int d = 0; d < dim; ++d)
1042  data[d] = t[d][d];
1043 
1044  for (unsigned int d = 0, c = 0; d < dim; ++d)
1045  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1046  data[dim + c] = t[d][e];
1047 }
1048 
1049 
1050 
1051 template <int rank_, int dim, typename Number>
1052 template <typename OtherNumber>
1053 constexpr DEAL_II_ALWAYS_INLINE
1055  const SymmetricTensor<rank_, dim, OtherNumber> &initializer)
1056  : data(initializer.data)
1057 {}
1058 
1059 
1060 
1061 template <int rank_, int dim, typename Number>
1062 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1064  const Number (&array)[n_independent_components])
1065  : data(
1066  *reinterpret_cast<const typename base_tensor_type::array_type *>(array))
1067 {
1068  // ensure that the reinterpret_cast above actually works
1069  Assert(sizeof(typename base_tensor_type::array_type) == sizeof(array),
1070  ExcInternalError());
1071 }
1072 
1073 
1074 
1075 template <int rank_, int dim, typename Number>
1076 template <typename OtherNumber>
1077 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1081 {
1082  data = t.data;
1083  return *this;
1084 }
1085 
1086 
1087 
1088 template <int rank_, int dim, typename Number>
1089 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1092 {
1094  ExcMessage("Only assignment with zero is allowed"));
1095  (void)d;
1096 
1097  data = internal::NumberType<Number>::value(0.0);
1098 
1099  return *this;
1100 }
1101 
1102 
1103 namespace internal
1104 {
1105  namespace SymmetricTensorImplementation
1106  {
1107  template <int dim, typename Number>
1108  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1109  ::Tensor<2, dim, Number>
1110  convert_to_tensor(const ::SymmetricTensor<2, dim, Number> &s)
1111  {
1113 
1114  // diagonal entries are stored first
1115  for (unsigned int d = 0; d < dim; ++d)
1116  t[d][d] = s.access_raw_entry(d);
1117 
1118  // off-diagonal entries come next, row by row
1119  for (unsigned int d = 0, c = 0; d < dim; ++d)
1120  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1121  {
1122  t[d][e] = s.access_raw_entry(dim + c);
1123  t[e][d] = s.access_raw_entry(dim + c);
1124  }
1125  return t;
1126  }
1127 
1128 
1129  template <int dim, typename Number>
1130  DEAL_II_CONSTEXPR ::Tensor<4, dim, Number>
1131  convert_to_tensor(const ::SymmetricTensor<4, dim, Number> &st)
1132  {
1133  // utilize the symmetry properties of SymmetricTensor<4,dim>
1134  // discussed in the class documentation to avoid accessing all
1135  // independent elements of the input tensor more than once
1137 
1138  for (unsigned int i = 0; i < dim; ++i)
1139  for (unsigned int j = i; j < dim; ++j)
1140  for (unsigned int k = 0; k < dim; ++k)
1141  for (unsigned int l = k; l < dim; ++l)
1142  t[TableIndices<4>(i, j, k, l)] = t[TableIndices<4>(i, j, l, k)] =
1143  t[TableIndices<4>(j, i, k, l)] =
1144  t[TableIndices<4>(j, i, l, k)] =
1145  st[TableIndices<4>(i, j, k, l)];
1146 
1147  return t;
1148  }
1149 
1150 
1151  template <typename Number>
1152  struct Inverse<2, 1, Number>
1153  {
1154  DEAL_II_CONSTEXPR static inline DEAL_II_ALWAYS_INLINE
1155  ::SymmetricTensor<2, 1, Number>
1156  value(const ::SymmetricTensor<2, 1, Number> &t)
1157  {
1159 
1160  tmp[0][0] = 1.0 / t[0][0];
1161 
1162  return tmp;
1163  }
1164  };
1165 
1166 
1167  template <typename Number>
1168  struct Inverse<2, 2, Number>
1169  {
1170  DEAL_II_CONSTEXPR static inline DEAL_II_ALWAYS_INLINE
1171  ::SymmetricTensor<2, 2, Number>
1172  value(const ::SymmetricTensor<2, 2, Number> &t)
1173  {
1175 
1176  // Sympy result: ([
1177  // [ t11/(t00*t11 - t01**2), -t01/(t00*t11 - t01**2)],
1178  // [-t01/(t00*t11 - t01**2), t00/(t00*t11 - t01**2)] ])
1179  const TableIndices<2> idx_00(0, 0);
1180  const TableIndices<2> idx_01(0, 1);
1181  const TableIndices<2> idx_11(1, 1);
1182  const Number inv_det_t =
1183  1.0 / (t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01]);
1184  tmp[idx_00] = t[idx_11];
1185  tmp[idx_01] = -t[idx_01];
1186  tmp[idx_11] = t[idx_00];
1187  tmp *= inv_det_t;
1188 
1189  return tmp;
1190  }
1191  };
1192 
1193 
1194  template <typename Number>
1195  struct Inverse<2, 3, Number>
1196  {
1197  DEAL_II_CONSTEXPR static ::SymmetricTensor<2, 3, Number>
1198  value(const ::SymmetricTensor<2, 3, Number> &t)
1199  {
1201 
1202  // Sympy result: ([
1203  // [ (t11*t22 - t12**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1204  // 2*t01*t02*t12 - t02**2*t11),
1205  // (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1206  // 2*t01*t02*t12 - t02**2*t11),
1207  // (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1208  // 2*t01*t02*t12 - t02**2*t11)],
1209  // [ (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1210  // 2*t01*t02*t12 - t02**2*t11),
1211  // (t00*t22 - t02**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1212  // 2*t01*t02*t12 - t02**2*t11),
1213  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1214  // 2*t01*t02*t12 + t02**2*t11)],
1215  // [ (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1216  // 2*t01*t02*t12 - t02**2*t11),
1217  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1218  // 2*t01*t02*t12 + t02**2*t11),
1219  // (-t00*t11 + t01**2)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1220  // 2*t01*t02*t12 + t02**2*t11)] ])
1221  //
1222  // =
1223  //
1224  // [ (t11*t22 - t12**2)/det_t,
1225  // (-t01*t22 + t02*t12)/det_t,
1226  // (t01*t12 - t02*t11)/det_t],
1227  // [ (-t01*t22 + t02*t12)/det_t,
1228  // (t00*t22 - t02**2)/det_t,
1229  // (-t00*t12 + t01*t02)/det_t],
1230  // [ (t01*t12 - t02*t11)/det_t,
1231  // (-t00*t12 + t01*t02)/det_t,
1232  // (t00*t11 - t01**2)/det_t] ])
1233  //
1234  // with det_t = (t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1235  // 2*t01*t02*t12 - t02**2*t11)
1236  const TableIndices<2> idx_00(0, 0);
1237  const TableIndices<2> idx_01(0, 1);
1238  const TableIndices<2> idx_02(0, 2);
1239  const TableIndices<2> idx_11(1, 1);
1240  const TableIndices<2> idx_12(1, 2);
1241  const TableIndices<2> idx_22(2, 2);
1242  const Number inv_det_t =
1243  1.0 / (t[idx_00] * t[idx_11] * t[idx_22] -
1244  t[idx_00] * t[idx_12] * t[idx_12] -
1245  t[idx_01] * t[idx_01] * t[idx_22] +
1246  2.0 * t[idx_01] * t[idx_02] * t[idx_12] -
1247  t[idx_02] * t[idx_02] * t[idx_11]);
1248  tmp[idx_00] = t[idx_11] * t[idx_22] - t[idx_12] * t[idx_12];
1249  tmp[idx_01] = -t[idx_01] * t[idx_22] + t[idx_02] * t[idx_12];
1250  tmp[idx_02] = t[idx_01] * t[idx_12] - t[idx_02] * t[idx_11];
1251  tmp[idx_11] = t[idx_00] * t[idx_22] - t[idx_02] * t[idx_02];
1252  tmp[idx_12] = -t[idx_00] * t[idx_12] + t[idx_01] * t[idx_02];
1253  tmp[idx_22] = t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01];
1254  tmp *= inv_det_t;
1255 
1256  return tmp;
1257  }
1258  };
1259 
1260 
1261  template <typename Number>
1262  struct Inverse<4, 1, Number>
1263  {
1264  DEAL_II_CONSTEXPR static inline ::SymmetricTensor<4, 1, Number>
1265  value(const ::SymmetricTensor<4, 1, Number> &t)
1266  {
1268  tmp.data[0][0] = 1.0 / t.data[0][0];
1269  return tmp;
1270  }
1271  };
1272 
1273 
1274  template <typename Number>
1275  struct Inverse<4, 2, Number>
1276  {
1277  DEAL_II_CONSTEXPR static inline ::SymmetricTensor<4, 2, Number>
1278  value(const ::SymmetricTensor<4, 2, Number> &t)
1279  {
1281 
1282  // Inverting this tensor is a little more complicated than necessary,
1283  // since we store the data of 't' as a 3x3 matrix t.data, but the
1284  // product between a rank-4 and a rank-2 tensor is really not the
1285  // product between this matrix and the 3-vector of a rhs, but rather
1286  //
1287  // B.vec = t.data * mult * A.vec
1288  //
1289  // where mult is a 3x3 matrix with entries [[1,0,0],[0,1,0],[0,0,2]] to
1290  // capture the fact that we need to add up both the c_ij12*a_12 and the
1291  // c_ij21*a_21 terms.
1292  //
1293  // In addition, in this scheme, the identity tensor has the matrix
1294  // representation mult^-1.
1295  //
1296  // The inverse of 't' therefore has the matrix representation
1297  //
1298  // inv.data = mult^-1 * t.data^-1 * mult^-1
1299  //
1300  // in order to compute it, let's first compute the inverse of t.data and
1301  // put it into tmp.data; at the end of the function we then scale the
1302  // last row and column of the inverse by 1/2, corresponding to the left
1303  // and right multiplication with mult^-1.
1304  const Number t4 = t.data[0][0] * t.data[1][1],
1305  t6 = t.data[0][0] * t.data[1][2],
1306  t8 = t.data[0][1] * t.data[1][0],
1307  t00 = t.data[0][2] * t.data[1][0],
1308  t01 = t.data[0][1] * t.data[2][0],
1309  t04 = t.data[0][2] * t.data[2][0],
1310  t07 = 1.0 / (t4 * t.data[2][2] - t6 * t.data[2][1] -
1311  t8 * t.data[2][2] + t00 * t.data[2][1] +
1312  t01 * t.data[1][2] - t04 * t.data[1][1]);
1313  tmp.data[0][0] =
1314  (t.data[1][1] * t.data[2][2] - t.data[1][2] * t.data[2][1]) * t07;
1315  tmp.data[0][1] =
1316  -(t.data[0][1] * t.data[2][2] - t.data[0][2] * t.data[2][1]) * t07;
1317  tmp.data[0][2] =
1318  -(-t.data[0][1] * t.data[1][2] + t.data[0][2] * t.data[1][1]) * t07;
1319  tmp.data[1][0] =
1320  -(t.data[1][0] * t.data[2][2] - t.data[1][2] * t.data[2][0]) * t07;
1321  tmp.data[1][1] = (t.data[0][0] * t.data[2][2] - t04) * t07;
1322  tmp.data[1][2] = -(t6 - t00) * t07;
1323  tmp.data[2][0] =
1324  -(-t.data[1][0] * t.data[2][1] + t.data[1][1] * t.data[2][0]) * t07;
1325  tmp.data[2][1] = -(t.data[0][0] * t.data[2][1] - t01) * t07;
1326  tmp.data[2][2] = (t4 - t8) * t07;
1327 
1328  // scale last row and column as mentioned
1329  // above
1330  tmp.data[2][0] /= 2;
1331  tmp.data[2][1] /= 2;
1332  tmp.data[0][2] /= 2;
1333  tmp.data[1][2] /= 2;
1334  tmp.data[2][2] /= 4;
1335 
1336  return tmp;
1337  }
1338  };
1339 
1340 
1341  template <typename Number>
1342  struct Inverse<4, 3, Number>
1343  {
1344  static ::SymmetricTensor<4, 3, Number>
1345  value(const ::SymmetricTensor<4, 3, Number> &t)
1346  {
1348 
1349  // This function follows the exact same scheme as the 2d case, except
1350  // that hardcoding the inverse of a 6x6 matrix is pretty wasteful.
1351  // Instead, we use the Gauss-Jordan algorithm implemented for
1352  // FullMatrix. For historical reasons the following code is copied from
1353  // there, with the tangential benefit that we do not need to copy the
1354  // tensor entries to and from the FullMatrix.
1355  const unsigned int N = 6;
1356 
1357  // First get an estimate of the size of the elements of this matrix,
1358  // for later checks whether the pivot element is large enough, or
1359  // whether we have to fear that the matrix is not regular.
1360  Number diagonal_sum = internal::NumberType<Number>::value(0.0);
1361  for (unsigned int i = 0; i < N; ++i)
1362  diagonal_sum += std::fabs(tmp.data[i][i]);
1363  const Number typical_diagonal_element =
1364  diagonal_sum / static_cast<double>(N);
1365  (void)typical_diagonal_element;
1366 
1367  unsigned int p[N];
1368  for (unsigned int i = 0; i < N; ++i)
1369  p[i] = i;
1370 
1371  for (unsigned int j = 0; j < N; ++j)
1372  {
1373  // Pivot search: search that part of the line on and right of the
1374  // diagonal for the largest element.
1375  Number max = std::fabs(tmp.data[j][j]);
1376  unsigned int r = j;
1377  for (unsigned int i = j + 1; i < N; ++i)
1378  if (std::fabs(tmp.data[i][j]) > max)
1379  {
1380  max = std::fabs(tmp.data[i][j]);
1381  r = i;
1382  }
1383 
1384  // Check whether the pivot is too small
1385  Assert(max > 1.e-16 * typical_diagonal_element,
1386  ExcMessage("This tensor seems to be noninvertible"));
1387 
1388  // Row interchange
1389  if (r > j)
1390  {
1391  for (unsigned int k = 0; k < N; ++k)
1392  std::swap(tmp.data[j][k], tmp.data[r][k]);
1393 
1394  std::swap(p[j], p[r]);
1395  }
1396 
1397  // Transformation
1398  const Number hr = 1. / tmp.data[j][j];
1399  tmp.data[j][j] = hr;
1400  for (unsigned int k = 0; k < N; ++k)
1401  {
1402  if (k == j)
1403  continue;
1404  for (unsigned int i = 0; i < N; ++i)
1405  {
1406  if (i == j)
1407  continue;
1408  tmp.data[i][k] -= tmp.data[i][j] * tmp.data[j][k] * hr;
1409  }
1410  }
1411  for (unsigned int i = 0; i < N; ++i)
1412  {
1413  tmp.data[i][j] *= hr;
1414  tmp.data[j][i] *= -hr;
1415  }
1416  tmp.data[j][j] = hr;
1417  }
1418 
1419  // Column interchange
1420  Number hv[N];
1421  for (unsigned int i = 0; i < N; ++i)
1422  {
1423  for (unsigned int k = 0; k < N; ++k)
1424  hv[p[k]] = tmp.data[i][k];
1425  for (unsigned int k = 0; k < N; ++k)
1426  tmp.data[i][k] = hv[k];
1427  }
1428 
1429  // Scale rows and columns. The mult matrix
1430  // here is diag[1, 1, 1, 1/2, 1/2, 1/2].
1431  for (unsigned int i = 3; i < 6; ++i)
1432  for (unsigned int j = 0; j < 3; ++j)
1433  tmp.data[i][j] /= 2;
1434 
1435  for (unsigned int i = 0; i < 3; ++i)
1436  for (unsigned int j = 3; j < 6; ++j)
1437  tmp.data[i][j] /= 2;
1438 
1439  for (unsigned int i = 3; i < 6; ++i)
1440  for (unsigned int j = 3; j < 6; ++j)
1441  tmp.data[i][j] /= 4;
1442 
1443  return tmp;
1444  }
1445  };
1446 
1447  } // namespace SymmetricTensorImplementation
1448 } // namespace internal
1449 
1450 
1451 
1452 template <int rank_, int dim, typename Number>
1453 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<rank_, dim, Number>::
1454  operator Tensor<rank_, dim, Number>() const
1455 {
1456  return internal::SymmetricTensorImplementation::convert_to_tensor(*this);
1457 }
1458 
1459 
1460 
1461 template <int rank_, int dim, typename Number>
1462 constexpr bool
1465 {
1466  return data == t.data;
1467 }
1468 
1469 
1470 
1471 template <int rank_, int dim, typename Number>
1472 constexpr bool
1475 {
1476  return data != t.data;
1477 }
1478 
1479 
1480 
1481 template <int rank_, int dim, typename Number>
1482 template <typename OtherNumber>
1483 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1487 {
1488  data += t.data;
1489  return *this;
1490 }
1491 
1492 
1493 
1494 template <int rank_, int dim, typename Number>
1495 template <typename OtherNumber>
1496 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1500 {
1501  data -= t.data;
1502  return *this;
1503 }
1504 
1505 
1506 
1507 template <int rank_, int dim, typename Number>
1508 template <typename OtherNumber>
1509 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1512 {
1513  data *= d;
1514  return *this;
1515 }
1516 
1517 
1518 
1519 template <int rank_, int dim, typename Number>
1520 template <typename OtherNumber>
1521 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1524 {
1525  data /= d;
1526  return *this;
1527 }
1528 
1529 
1530 
1531 template <int rank_, int dim, typename Number>
1532 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1535 {
1536  SymmetricTensor tmp = *this;
1537  tmp.data = -tmp.data;
1538  return tmp;
1539 }
1540 
1541 
1542 
1543 template <int rank_, int dim, typename Number>
1544 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE void
1546 {
1547  data.clear();
1548 }
1549 
1550 
1551 
1552 template <int rank_, int dim, typename Number>
1553 constexpr std::size_t
1555 {
1556  // all memory consists of statically allocated memory of the current
1557  // object, no pointers
1558  return sizeof(SymmetricTensor<rank_, dim, Number>);
1559 }
1560 
1561 
1562 
1563 namespace internal
1564 {
1565  template <int dim, typename Number, typename OtherNumber = Number>
1566  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1567  typename SymmetricTensorAccessors::
1568  double_contraction_result<2, 2, dim, Number, OtherNumber>::type
1569  perform_double_contraction(
1570  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1571  base_tensor_type &data,
1572  const typename SymmetricTensorAccessors::
1573  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1574  {
1575  using result_type = typename SymmetricTensorAccessors::
1576  double_contraction_result<2, 2, dim, Number, OtherNumber>::type;
1577 
1578  switch (dim)
1579  {
1580  case 1:
1581  return data[0] * sdata[0];
1582  default:
1583  // Start with the non-diagonal part to avoid some multiplications by
1584  // 2.
1585 
1586  result_type sum = data[dim] * sdata[dim];
1587  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1588  sum += data[d] * sdata[d];
1589  sum += sum; // sum = sum * 2.;
1590 
1591  // Now add the contributions from the diagonal
1592  for (unsigned int d = 0; d < dim; ++d)
1593  sum += data[d] * sdata[d];
1594  return sum;
1595  }
1596  }
1597 
1598 
1599 
1600  template <int dim, typename Number, typename OtherNumber = Number>
1601  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1602  typename SymmetricTensorAccessors::
1603  double_contraction_result<4, 2, dim, Number, OtherNumber>::type
1604  perform_double_contraction(
1605  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1606  base_tensor_type &data,
1607  const typename SymmetricTensorAccessors::
1608  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1609  {
1610  using result_type = typename SymmetricTensorAccessors::
1611  double_contraction_result<4, 2, dim, Number, OtherNumber>::type;
1612  using value_type = typename SymmetricTensorAccessors::
1613  double_contraction_result<4, 2, dim, Number, OtherNumber>::value_type;
1614 
1615  const unsigned int data_dim = SymmetricTensorAccessors::
1616  StorageType<2, dim, value_type>::n_independent_components;
1617  value_type tmp[data_dim]{};
1618  for (unsigned int i = 0; i < data_dim; ++i)
1619  tmp[i] =
1620  perform_double_contraction<dim, Number, OtherNumber>(data[i], sdata);
1621  return result_type(tmp);
1622  }
1623 
1624 
1625 
1626  template <int dim, typename Number, typename OtherNumber = Number>
1627  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1628  typename SymmetricTensorAccessors::StorageType<
1629  2,
1630  dim,
1631  typename SymmetricTensorAccessors::
1632  double_contraction_result<2, 4, dim, Number, OtherNumber>::value_type>::
1633  base_tensor_type
1634  perform_double_contraction(
1635  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1636  base_tensor_type &data,
1637  const typename SymmetricTensorAccessors::
1638  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1639  {
1640  using value_type = typename SymmetricTensorAccessors::
1641  double_contraction_result<2, 4, dim, Number, OtherNumber>::value_type;
1642  using base_tensor_type = typename SymmetricTensorAccessors::
1643  StorageType<2, dim, value_type>::base_tensor_type;
1644 
1645  base_tensor_type tmp;
1646  for (unsigned int i = 0; i < tmp.dimension; ++i)
1647  {
1648  // Start with the non-diagonal part
1649  value_type sum = data[dim] * sdata[dim][i];
1650  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1651  sum += data[d] * sdata[d][i];
1652  sum += sum; // sum = sum * 2.;
1653 
1654  // Now add the contributions from the diagonal
1655  for (unsigned int d = 0; d < dim; ++d)
1656  sum += data[d] * sdata[d][i];
1657  tmp[i] = sum;
1658  }
1659  return tmp;
1660  }
1661 
1662 
1663 
1664  template <int dim, typename Number, typename OtherNumber = Number>
1665  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1666  typename SymmetricTensorAccessors::StorageType<
1667  4,
1668  dim,
1669  typename SymmetricTensorAccessors::
1670  double_contraction_result<4, 4, dim, Number, OtherNumber>::value_type>::
1671  base_tensor_type
1672  perform_double_contraction(
1673  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1674  base_tensor_type &data,
1675  const typename SymmetricTensorAccessors::
1676  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1677  {
1678  using value_type = typename SymmetricTensorAccessors::
1679  double_contraction_result<4, 4, dim, Number, OtherNumber>::value_type;
1680  using base_tensor_type = typename SymmetricTensorAccessors::
1681  StorageType<4, dim, value_type>::base_tensor_type;
1682 
1683  const unsigned int data_dim = SymmetricTensorAccessors::
1684  StorageType<2, dim, value_type>::n_independent_components;
1685  base_tensor_type tmp;
1686  for (unsigned int i = 0; i < data_dim; ++i)
1687  for (unsigned int j = 0; j < data_dim; ++j)
1688  {
1689  // Start with the non-diagonal part
1690  for (unsigned int d = dim; d < (dim * (dim + 1) / 2); ++d)
1691  tmp[i][j] += data[i][d] * sdata[d][j];
1692  tmp[i][j] += tmp[i][j]; // tmp[i][j] = tmp[i][j] * 2;
1693 
1694  // Now add the contributions from the diagonal
1695  for (unsigned int d = 0; d < dim; ++d)
1696  tmp[i][j] += data[i][d] * sdata[d][j];
1697  }
1698  return tmp;
1699  }
1700 
1701 } // end of namespace internal
1702 
1703 
1704 
1705 template <int rank_, int dim, typename Number>
1706 template <typename OtherNumber>
1707 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
1708  typename internal::SymmetricTensorAccessors::
1709  double_contraction_result<rank_, 2, dim, Number, OtherNumber>::type
1712 {
1713  // need to have two different function calls
1714  // because a scalar and rank-2 tensor are not
1715  // the same data type (see internal function
1716  // above)
1717  return internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1718  s.data);
1719 }
1720 
1721 
1722 
1723 template <int rank_, int dim, typename Number>
1724 template <typename OtherNumber>
1725 DEAL_II_CONSTEXPR inline typename internal::SymmetricTensorAccessors::
1726  double_contraction_result<rank_, 4, dim, Number, OtherNumber>::type
1729 {
1730  typename internal::SymmetricTensorAccessors::
1731  double_contraction_result<rank_, 4, dim, Number, OtherNumber>::type tmp;
1732  tmp.data =
1733  internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1734  s.data);
1735  return tmp;
1736 }
1737 
1738 
1739 
1740 // internal namespace to switch between the
1741 // access of different tensors. There used to
1742 // be explicit instantiations before for
1743 // different ranks and dimensions, but since
1744 // we now allow for templates on the data
1745 // type, and since we cannot partially
1746 // specialize the implementation, this got
1747 // into a separate namespace
1748 namespace internal
1749 {
1750  // The variables within this struct will be referenced in the next functions.
1751  // It is a workaround that allows returning a reference to a static variable
1752  // while allowing constexpr evaluation of the function.
1753  // It has to be defined outside the function because constexpr functions
1754  // cannot define static variables.
1755  // A similar struct has also been defined in tensor.h
1756  template <typename Type>
1757  struct Uninitialized
1758  {
1759  static Type value;
1760  };
1761 
1762  template <typename Type>
1763  Type Uninitialized<Type>::value;
1764 
1765  template <int dim, typename Number>
1766  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number &
1767  symmetric_tensor_access(const TableIndices<2> &indices,
1768  typename SymmetricTensorAccessors::
1769  StorageType<2, dim, Number>::base_tensor_type &data)
1770  {
1771  // 1d is very simple and done first
1772  if (dim == 1)
1773  return data[0];
1774 
1775  // first treat the main diagonal elements, which are stored consecutively
1776  // at the beginning
1777  if (indices[0] == indices[1])
1778  return data[indices[0]];
1779 
1780  // the rest is messier and requires a few switches.
1781  switch (dim)
1782  {
1783  case 2:
1784  // at least for the 2x2 case it is reasonably simple
1785  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1786  ((indices[0] == 0) && (indices[1] == 1)),
1787  ExcInternalError());
1788  return data[2];
1789 
1790  default:
1791  // to do the rest, sort our indices before comparing
1792  {
1793  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1794  std::max(indices[0], indices[1]));
1795  for (unsigned int d = 0, c = 0; d < dim; ++d)
1796  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1797  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1798  return data[dim + c];
1799  Assert(false, ExcInternalError());
1800  }
1801  }
1802 
1803  // The code should never reach there.
1804  // Returns a dummy reference to a dummy variable just to make the
1805  // compiler happy.
1806  return Uninitialized<Number>::value;
1807  }
1808 
1809 
1810 
1811  template <int dim, typename Number>
1812  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE const Number &
1813  symmetric_tensor_access(const TableIndices<2> &indices,
1814  const typename SymmetricTensorAccessors::
1815  StorageType<2, dim, Number>::base_tensor_type &data)
1816  {
1817  // 1d is very simple and done first
1818  if (dim == 1)
1819  return data[0];
1820 
1821  // first treat the main diagonal elements, which are stored consecutively
1822  // at the beginning
1823  if (indices[0] == indices[1])
1824  return data[indices[0]];
1825 
1826  // the rest is messier and requires a few switches.
1827  switch (dim)
1828  {
1829  case 2:
1830  // at least for the 2x2 case it is reasonably simple
1831  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1832  ((indices[0] == 0) && (indices[1] == 1)),
1833  ExcInternalError());
1834  return data[2];
1835 
1836  default:
1837  // to do the rest, sort our indices before comparing
1838  {
1839  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1840  std::max(indices[0], indices[1]));
1841  for (unsigned int d = 0, c = 0; d < dim; ++d)
1842  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1843  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1844  return data[dim + c];
1845  Assert(false, ExcInternalError());
1846  }
1847  }
1848 
1849  // The code should never reach there.
1850  // Returns a dummy reference to a dummy variable just to make the
1851  // compiler happy.
1852  return Uninitialized<Number>::value;
1853  }
1854 
1855 
1856 
1857  template <int dim, typename Number>
1858  DEAL_II_CONSTEXPR inline Number &
1859  symmetric_tensor_access(const TableIndices<4> &indices,
1860  typename SymmetricTensorAccessors::
1861  StorageType<4, dim, Number>::base_tensor_type &data)
1862  {
1863  switch (dim)
1864  {
1865  case 1:
1866  return data[0][0];
1867 
1868  case 2:
1869  // each entry of the tensor can be thought of as an entry in a
1870  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1871  // rank-2 tensors. this is the format in which we store rank-4
1872  // tensors. determine which position the present entry is
1873  // stored in
1874  {
1875  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
1876  return data[base_index[indices[0]][indices[1]]]
1877  [base_index[indices[2]][indices[3]]];
1878  }
1879  case 3:
1880  // each entry of the tensor can be thought of as an entry in a
1881  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1882  // rank-2 tensors. this is the format in which we store rank-4
1883  // tensors. determine which position the present entry is
1884  // stored in
1885  {
1886  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
1887  {3, 1, 5},
1888  {4, 5, 2}};
1889  return data[base_index[indices[0]][indices[1]]]
1890  [base_index[indices[2]][indices[3]]];
1891  }
1892 
1893  default:
1894  Assert(false, ExcNotImplemented());
1895  }
1896 
1897  // The code should never reach there.
1898  // Returns a dummy reference to a dummy variable just to make the
1899  // compiler happy.
1900  return Uninitialized<Number>::value;
1901  }
1902 
1903 
1904  template <int dim, typename Number>
1905  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE const Number &
1906  symmetric_tensor_access(const TableIndices<4> &indices,
1907  const typename SymmetricTensorAccessors::
1908  StorageType<4, dim, Number>::base_tensor_type &data)
1909  {
1910  switch (dim)
1911  {
1912  case 1:
1913  return data[0][0];
1914 
1915  case 2:
1916  // each entry of the tensor can be thought of as an entry in a
1917  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1918  // rank-2 tensors. this is the format in which we store rank-4
1919  // tensors. determine which position the present entry is
1920  // stored in
1921  {
1922  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
1923  return data[base_index[indices[0]][indices[1]]]
1924  [base_index[indices[2]][indices[3]]];
1925  }
1926  case 3:
1927  // each entry of the tensor can be thought of as an entry in a
1928  // matrix that maps the rolled-out rank-2 tensors into rolled-out
1929  // rank-2 tensors. this is the format in which we store rank-4
1930  // tensors. determine which position the present entry is
1931  // stored in
1932  {
1933  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
1934  {3, 1, 5},
1935  {4, 5, 2}};
1936  return data[base_index[indices[0]][indices[1]]]
1937  [base_index[indices[2]][indices[3]]];
1938  }
1939 
1940  default:
1941  Assert(false, ExcNotImplemented());
1942  }
1943 
1944  // The code should never reach there.
1945  // Returns a dummy reference to a dummy variable just to make the
1946  // compiler happy.
1947  return Uninitialized<Number>::value;
1948  }
1949 
1950 } // end of namespace internal
1951 
1952 
1953 
1954 template <int rank_, int dim, typename Number>
1955 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number &
1957  operator()(const TableIndices<rank_> &indices)
1958 {
1959  for (unsigned int r = 0; r < rank; ++r)
1960  AssertIndexRange(indices[r], dimension);
1961  return internal::symmetric_tensor_access<dim, Number>(indices, data);
1962 }
1963 
1964 
1965 
1966 template <int rank_, int dim, typename Number>
1967 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE const Number &
1969  operator()(const TableIndices<rank_> &indices) const
1970 {
1971  for (unsigned int r = 0; r < rank; ++r)
1972  AssertIndexRange(indices[r], dimension);
1973  return internal::symmetric_tensor_access<dim, Number>(indices, data);
1974 }
1975 
1976 
1977 
1978 namespace internal
1979 {
1980  namespace SymmetricTensorImplementation
1981  {
1982  template <int rank_>
1983  constexpr TableIndices<rank_>
1984  get_partially_filled_indices(const unsigned int row,
1985  const std::integral_constant<int, 2> &)
1986  {
1988  }
1989 
1990 
1991  template <int rank_>
1992  constexpr TableIndices<rank_>
1993  get_partially_filled_indices(const unsigned int row,
1994  const std::integral_constant<int, 4> &)
1995  {
1996  return TableIndices<rank_>(row,
2000  }
2001  } // namespace SymmetricTensorImplementation
2002 } // namespace internal
2003 
2004 
2005 template <int rank_, int dim, typename Number>
2006 constexpr DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors::
2007  Accessor<rank_, dim, true, rank_ - 1, Number>
2009  operator[](const unsigned int row) const
2010 {
2011  return internal::SymmetricTensorAccessors::
2012  Accessor<rank_, dim, true, rank_ - 1, Number>(
2013  *this,
2014  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2015  rank_>(row, std::integral_constant<int, rank_>()));
2016 }
2017 
2018 
2019 
2020 template <int rank_, int dim, typename Number>
2021 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE internal::
2022  SymmetricTensorAccessors::Accessor<rank_, dim, false, rank_ - 1, Number>
2023  SymmetricTensor<rank_, dim, Number>::operator[](const unsigned int row)
2024 {
2025  return internal::SymmetricTensorAccessors::
2026  Accessor<rank_, dim, false, rank_ - 1, Number>(
2027  *this,
2028  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2029  rank_>(row, std::integral_constant<int, rank_>()));
2030 }
2031 
2032 
2033 
2034 template <int rank_, int dim, typename Number>
2035 constexpr DEAL_II_ALWAYS_INLINE const Number &
2037  operator[](const TableIndices<rank_> &indices) const
2038 {
2039  return operator()(indices);
2040 }
2041 
2042 
2043 
2044 template <int rank_, int dim, typename Number>
2045 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number &
2047  operator[](const TableIndices<rank_> &indices)
2048 {
2049  return operator()(indices);
2050 }
2051 
2052 
2053 
2054 template <int rank_, int dim, typename Number>
2055 inline Number *
2057 {
2058  return std::addressof(this->access_raw_entry(0));
2059 }
2060 
2061 
2062 
2063 template <int rank_, int dim, typename Number>
2064 inline const Number *
2066 {
2067  return std::addressof(this->access_raw_entry(0));
2068 }
2069 
2070 
2071 
2072 template <int rank_, int dim, typename Number>
2073 inline Number *
2075 {
2077 }
2078 
2079 
2080 
2081 template <int rank_, int dim, typename Number>
2082 inline const Number *
2084 {
2086 }
2087 
2088 
2089 
2090 namespace internal
2091 {
2092  namespace SymmetricTensorImplementation
2093  {
2094  template <int dim, typename Number>
2095  constexpr unsigned int
2096  entry_to_indices(const ::SymmetricTensor<2, dim, Number> &,
2097  const unsigned int index)
2098  {
2099  return index;
2100  }
2101 
2102 
2103  template <int dim, typename Number>
2104  constexpr ::TableIndices<2>
2105  entry_to_indices(const ::SymmetricTensor<4, dim, Number> &,
2106  const unsigned int index)
2107  {
2110  }
2111 
2112  } // namespace SymmetricTensorImplementation
2113 } // namespace internal
2114 
2115 
2116 
2117 template <int rank_, int dim, typename Number>
2118 DEAL_II_CONSTEXPR inline const Number &
2120  const unsigned int index) const
2121 {
2122  AssertIndexRange(index, n_independent_components);
2123  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2124  index)];
2125 }
2126 
2127 
2128 
2129 template <int rank_, int dim, typename Number>
2130 DEAL_II_CONSTEXPR inline Number &
2132 {
2133  AssertIndexRange(index, n_independent_components);
2134  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2135  index)];
2136 }
2137 
2138 
2139 
2140 namespace internal
2141 {
2142  template <int dim, typename Number>
2143  DEAL_II_CONSTEXPR inline typename numbers::NumberTraits<Number>::real_type
2144  compute_norm(const typename SymmetricTensorAccessors::
2145  StorageType<2, dim, Number>::base_tensor_type &data)
2146  {
2147  switch (dim)
2148  {
2149  case 1:
2150  return numbers::NumberTraits<Number>::abs(data[0]);
2151 
2152  case 2:
2153  return std::sqrt(
2157 
2158  case 3:
2159  return std::sqrt(
2166 
2167  default:
2168  {
2169  typename numbers::NumberTraits<Number>::real_type return_value =
2171 
2172  for (unsigned int d = 0; d < dim; ++d)
2173  return_value +=
2175  for (unsigned int d = dim; d < (dim * dim + dim) / 2; ++d)
2176  return_value +=
2178 
2179  return std::sqrt(return_value);
2180  }
2181  }
2182  }
2183 
2184 
2185 
2186  template <int dim, typename Number>
2187  DEAL_II_CONSTEXPR inline typename numbers::NumberTraits<Number>::real_type
2188  compute_norm(const typename SymmetricTensorAccessors::
2189  StorageType<4, dim, Number>::base_tensor_type &data)
2190  {
2191  switch (dim)
2192  {
2193  case 1:
2194  return numbers::NumberTraits<Number>::abs(data[0][0]);
2195 
2196  default:
2197  {
2198  typename numbers::NumberTraits<Number>::real_type return_value =
2200 
2201  const unsigned int n_independent_components = data.dimension;
2202 
2203  for (unsigned int i = 0; i < dim; ++i)
2204  for (unsigned int j = 0; j < dim; ++j)
2205  return_value +=
2207  for (unsigned int i = 0; i < dim; ++i)
2208  for (unsigned int j = dim; j < n_independent_components; ++j)
2209  return_value +=
2211  for (unsigned int i = dim; i < n_independent_components; ++i)
2212  for (unsigned int j = 0; j < dim; ++j)
2213  return_value +=
2215  for (unsigned int i = dim; i < n_independent_components; ++i)
2216  for (unsigned int j = dim; j < n_independent_components; ++j)
2217  return_value +=
2219 
2220  return std::sqrt(return_value);
2221  }
2222  }
2223  }
2224 
2225 } // end of namespace internal
2226 
2227 
2228 
2229 template <int rank_, int dim, typename Number>
2232 {
2233  return internal::compute_norm<dim, Number>(data);
2234 }
2235 
2236 
2237 
2238 namespace internal
2239 {
2240  namespace SymmetricTensorImplementation
2241  {
2242  // a function to do the unrolling from a set of indices to a
2243  // scalar index into the array in which we store the elements of
2244  // a symmetric tensor
2245  //
2246  // this function is for rank-2 tensors
2247  template <int dim>
2248  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE unsigned int
2250  {
2251  AssertIndexRange(indices[0], dim);
2252  AssertIndexRange(indices[1], dim);
2253 
2254  switch (dim)
2255  {
2256  case 1:
2257  {
2258  return 0;
2259  }
2260 
2261  case 2:
2262  {
2263  constexpr unsigned int table[2][2] = {{0, 2}, {2, 1}};
2264  return table[indices[0]][indices[1]];
2265  }
2266 
2267  case 3:
2268  {
2269  constexpr unsigned int table[3][3] = {{0, 3, 4},
2270  {3, 1, 5},
2271  {4, 5, 2}};
2272  return table[indices[0]][indices[1]];
2273  }
2274 
2275  case 4:
2276  {
2277  constexpr unsigned int table[4][4] = {{0, 4, 5, 6},
2278  {4, 1, 7, 8},
2279  {5, 7, 2, 9},
2280  {6, 8, 9, 3}};
2281  return table[indices[0]][indices[1]];
2282  }
2283 
2284  default:
2285  // for the remainder, manually figure out the numbering
2286  {
2287  if (indices[0] == indices[1])
2288  return indices[0];
2289 
2290  TableIndices<2> sorted_indices(indices);
2291  sorted_indices.sort();
2292 
2293  for (unsigned int d = 0, c = 0; d < dim; ++d)
2294  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2295  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
2296  return dim + c;
2297 
2298  // should never get here:
2299  Assert(false, ExcInternalError());
2300  return 0;
2301  }
2302  }
2303  }
2304 
2305  // a function to do the unrolling from a set of indices to a
2306  // scalar index into the array in which we store the elements of
2307  // a symmetric tensor
2308  //
2309  // this function is for tensors of ranks not already handled
2310  // above
2311  template <int dim, int rank_>
2312  DEAL_II_CONSTEXPR inline unsigned int
2314  {
2315  (void)indices;
2316  Assert(false, ExcNotImplemented());
2318  }
2319  } // namespace SymmetricTensorImplementation
2320 } // namespace internal
2321 
2322 
2323 template <int rank_, int dim, typename Number>
2324 constexpr unsigned int
2326  const TableIndices<rank_> &indices)
2327 {
2328  return internal::SymmetricTensorImplementation::component_to_unrolled_index<
2329  dim>(indices);
2330 }
2331 
2332 
2333 
2334 namespace internal
2335 {
2336  namespace SymmetricTensorImplementation
2337  {
2338  // a function to do the inverse of the unrolling from a set of
2339  // indices to a scalar index into the array in which we store
2340  // the elements of a symmetric tensor. in other words, it goes
2341  // from the scalar index into the array to a set of indices of
2342  // the tensor
2343  //
2344  // this function is for rank-2 tensors
2345  template <int dim>
2346  DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE TableIndices<2>
2347  unrolled_to_component_indices(const unsigned int i,
2348  const std::integral_constant<int, 2> &)
2349  {
2350  Assert(
2352  ExcIndexRange(
2353  i,
2354  0,
2356  switch (dim)
2357  {
2358  case 1:
2359  {
2360  return {0, 0};
2361  }
2362 
2363  case 2:
2364  {
2365  const TableIndices<2> table[3] = {TableIndices<2>(0, 0),
2366  TableIndices<2>(1, 1),
2367  TableIndices<2>(0, 1)};
2368  return table[i];
2369  }
2370 
2371  case 3:
2372  {
2373  const TableIndices<2> table[6] = {TableIndices<2>(0, 0),
2374  TableIndices<2>(1, 1),
2375  TableIndices<2>(2, 2),
2376  TableIndices<2>(0, 1),
2377  TableIndices<2>(0, 2),
2378  TableIndices<2>(1, 2)};
2379  return table[i];
2380  }
2381 
2382  default:
2383  if (i < dim)
2384  return {i, i};
2385 
2386  for (unsigned int d = 0, c = 0; d < dim; ++d)
2387  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2388  if (c == i)
2389  return {d, e};
2390 
2391  // should never get here:
2392  Assert(false, ExcInternalError());
2393  return {0, 0};
2394  }
2395  }
2396 
2397  // a function to do the inverse of the unrolling from a set of
2398  // indices to a scalar index into the array in which we store
2399  // the elements of a symmetric tensor. in other words, it goes
2400  // from the scalar index into the array to a set of indices of
2401  // the tensor
2402  //
2403  // this function is for tensors of a rank not already handled
2404  // above
2405  template <int dim, int rank_>
2406  DEAL_II_CONSTEXPR inline
2407  typename std::enable_if<rank_ != 2, TableIndices<rank_>>::type
2408  unrolled_to_component_indices(const unsigned int i,
2409  const std::integral_constant<int, rank_> &)
2410  {
2411  (void)i;
2412  Assert(
2413  (i <
2415  ExcIndexRange(i,
2416  0,
2418  n_independent_components));
2419  Assert(false, ExcNotImplemented());
2420  return TableIndices<rank_>();
2421  }
2422 
2423  } // namespace SymmetricTensorImplementation
2424 } // namespace internal
2425 
2426 template <int rank_, int dim, typename Number>
2427 constexpr DEAL_II_ALWAYS_INLINE TableIndices<rank_>
2429  const unsigned int i)
2430 {
2431  return internal::SymmetricTensorImplementation::unrolled_to_component_indices<
2432  dim>(i, std::integral_constant<int, rank_>());
2433 }
2434 
2435 
2436 
2437 template <int rank_, int dim, typename Number>
2438 template <class Archive>
2439 inline void
2440 SymmetricTensor<rank_, dim, Number>::serialize(Archive &ar, const unsigned int)
2441 {
2442  ar &data;
2443 }
2444 
2445 
2446 #endif // DOXYGEN
2447 
2448 /* ----------------- Non-member functions operating on tensors. ------------ */
2449 
2450 
2463 template <int rank_, int dim, typename Number, typename OtherNumber>
2464 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2468 {
2470  tmp = left;
2471  tmp += right;
2472  return tmp;
2473 }
2474 
2475 
2488 template <int rank_, int dim, typename Number, typename OtherNumber>
2489 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
2493 {
2495  tmp = left;
2496  tmp -= right;
2497  return tmp;
2498 }
2499 
2500 
2508 template <int rank_, int dim, typename Number, typename OtherNumber>
2509 constexpr DEAL_II_ALWAYS_INLINE
2512  const Tensor<rank_, dim, OtherNumber> & right)
2513 {
2514  return Tensor<rank_, dim, Number>(left) + right;
2515 }
2516 
2517 
2525 template <int rank_, int dim, typename Number, typename OtherNumber>
2526 constexpr DEAL_II_ALWAYS_INLINE
2530 {
2531  return left + Tensor<rank_, dim, OtherNumber>(right);
2532 }
2533 
2534 
2542 template <int rank_, int dim, typename Number, typename OtherNumber>
2543 constexpr DEAL_II_ALWAYS_INLINE
2546  const Tensor<rank_, dim, OtherNumber> & right)
2547 {
2548  return Tensor<rank_, dim, Number>(left) - right;
2549 }
2550 
2551 
2559 template <int rank_, int dim, typename Number, typename OtherNumber>
2560 constexpr DEAL_II_ALWAYS_INLINE
2564 {
2565  return left - Tensor<rank_, dim, OtherNumber>(right);
2566 }
2567 
2568 
2569 
2583 template <int dim, typename Number>
2584 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
2586 {
2587  switch (dim)
2588  {
2589  case 1:
2590  return t.data[0];
2591  case 2:
2592  return (t.data[0] * t.data[1] - t.data[2] * t.data[2]);
2593  case 3:
2594  {
2595  // in analogy to general tensors, but
2596  // there's something to be simplified for
2597  // the present case
2598  const Number tmp = t.data[3] * t.data[4] * t.data[5];
2599  return (tmp + tmp + t.data[0] * t.data[1] * t.data[2] -
2600  t.data[0] * t.data[5] * t.data[5] -
2601  t.data[1] * t.data[4] * t.data[4] -
2602  t.data[2] * t.data[3] * t.data[3]);
2603  }
2604  default:
2605  Assert(false, ExcNotImplemented());
2606  return internal::NumberType<Number>::value(0.0);
2607  }
2608 }
2609 
2610 
2611 
2621 template <int dim, typename Number>
2622 constexpr DEAL_II_ALWAYS_INLINE Number
2624 {
2625  return determinant(t);
2626 }
2627 
2628 
2629 
2637 template <int dim, typename Number>
2638 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE Number
2640 {
2641  Number t = d.data[0];
2642  for (unsigned int i = 1; i < dim; ++i)
2643  t += d.data[i];
2644  return t;
2645 }
2646 
2647 
2657 template <int dim, typename Number>
2658 constexpr Number
2660 {
2661  return trace(t);
2662 }
2663 
2664 
2677 template <typename Number>
2678 constexpr DEAL_II_ALWAYS_INLINE Number
2680 {
2681  return internal::NumberType<Number>::value(0.0);
2682 }
2683 
2684 
2685 
2705 template <typename Number>
2706 constexpr DEAL_II_ALWAYS_INLINE Number
2708 {
2709  return t[0][0] * t[1][1] - t[0][1] * t[0][1];
2710 }
2711 
2712 
2713 
2723 template <typename Number>
2724 constexpr DEAL_II_ALWAYS_INLINE Number
2726 {
2727  return (t[0][0] * t[1][1] + t[1][1] * t[2][2] + t[2][2] * t[0][0] -
2728  t[0][1] * t[0][1] - t[0][2] * t[0][2] - t[1][2] * t[1][2]);
2729 }
2730 
2731 
2732 
2741 template <typename Number>
2742 std::array<Number, 1>
2744 
2745 
2746 
2770 template <typename Number>
2771 std::array<Number, 2>
2773 
2774 
2775 
2798 template <typename Number>
2799 std::array<Number, 3>
2801 
2802 
2803 
2804 namespace internal
2805 {
2806  namespace SymmetricTensorImplementation
2807  {
2847  template <int dim, typename Number>
2848  void
2849  tridiagonalize(const ::SymmetricTensor<2, dim, Number> &A,
2850  ::Tensor<2, dim, Number> & Q,
2851  std::array<Number, dim> & d,
2852  std::array<Number, dim - 1> & e);
2853 
2854 
2855 
2897  template <int dim, typename Number>
2898  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
2899  ql_implicit_shifts(const ::SymmetricTensor<2, dim, Number> &A);
2900 
2901 
2902 
2944  template <int dim, typename Number>
2945  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
2947 
2948 
2949 
2965  template <typename Number>
2966  std::array<std::pair<Number, Tensor<1, 2, Number>>, 2>
2967  hybrid(const ::SymmetricTensor<2, 2, Number> &A);
2968 
2969 
2970 
3005  template <typename Number>
3006  std::array<std::pair<Number, Tensor<1, 3, Number>>, 3>
3007  hybrid(const ::SymmetricTensor<2, 3, Number> &A);
3008 
3013  template <int dim, typename Number>
3015  {
3016  using EigValsVecs = std::pair<Number, Tensor<1, dim, Number>>;
3017  bool
3018  operator()(const EigValsVecs &lhs, const EigValsVecs &rhs)
3019  {
3020  return lhs.first > rhs.first;
3021  }
3022  };
3023 
3024  } // namespace SymmetricTensorImplementation
3025 
3026 } // namespace internal
3027 
3028 
3029 
3030 // The line below is to ensure that doxygen puts the full description
3031 // of this global enumeration into the documentation
3032 // See https://stackoverflow.com/a/1717984
3062 {
3072  hybrid,
3090  jacobi
3091 };
3092 
3093 
3094 
3124 template <int dim, typename Number>
3125 std::array<std::pair<Number, Tensor<1, dim, Number>>,
3126  std::integral_constant<int, dim>::value>
3128  const SymmetricTensorEigenvectorMethod method =
3130 
3131 
3132 
3142 template <int rank_, int dim, typename Number>
3143 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<rank_, dim, Number>
3145 {
3146  return t;
3147 }
3148 
3149 
3150 
3160 template <int dim, typename Number>
3161 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
3163 {
3165 
3166  // subtract scaled trace from the diagonal
3167  const Number tr = trace(t) / dim;
3168  for (unsigned int i = 0; i < dim; ++i)
3169  tmp.data[i] -= tr;
3170 
3171  return tmp;
3172 }
3173 
3174 
3175 
3183 template <int dim, typename Number>
3184 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
3186 {
3187  // create a default constructed matrix filled with
3188  // zeros, then set the diagonal elements to one
3190  switch (dim)
3191  {
3192  case 1:
3193  tmp.data[0] = Number(1);
3194  break;
3195  case 2:
3196  tmp.data[0] = tmp.data[1] = Number(1);
3197  break;
3198  case 3:
3199  tmp.data[0] = tmp.data[1] = tmp.data[2] = Number(1);
3200  break;
3201  default:
3202  for (unsigned int d = 0; d < dim; ++d)
3203  tmp.data[d] = Number(1);
3204  }
3205  return tmp;
3206 }
3207 
3208 
3209 
3218 template <int dim>
3219 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim>
3221 {
3222  return unit_symmetric_tensor<dim, double>();
3223 }
3224 
3225 
3226 
3241 template <int dim, typename Number>
3242 DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number>
3244 {
3246 
3247  // fill the elements treating the diagonal
3248  for (unsigned int i = 0; i < dim; ++i)
3249  for (unsigned int j = 0; j < dim; ++j)
3250  tmp.data[i][j] = Number((i == j ? 1 : 0) - 1. / dim);
3251 
3252  // then fill the ones that copy over the
3253  // non-diagonal elements. note that during
3254  // the double-contraction, we handle the
3255  // off-diagonal elements twice, so simply
3256  // copying requires a weight of 1/2
3257  for (unsigned int i = dim;
3258  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3259  n_rank2_components;
3260  ++i)
3261  tmp.data[i][i] = Number(0.5);
3262 
3263  return tmp;
3264 }
3265 
3266 
3267 
3282 template <int dim>
3283 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim>
3285 {
3286  return deviator_tensor<dim, double>();
3287 }
3288 
3289 
3290 
3313 template <int dim, typename Number>
3314 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim, Number>
3316 {
3318 
3319  // fill the elements treating the diagonal
3320  for (unsigned int i = 0; i < dim; ++i)
3321  tmp.data[i][i] = Number(1);
3322 
3323  // then fill the ones that copy over the
3324  // non-diagonal elements. note that during
3325  // the double-contraction, we handle the
3326  // off-diagonal elements twice, so simply
3327  // copying requires a weight of 1/2
3328  for (unsigned int i = dim;
3329  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3330  n_rank2_components;
3331  ++i)
3332  tmp.data[i][i] = Number(0.5);
3333 
3334  return tmp;
3335 }
3336 
3337 
3338 
3360 template <int dim>
3361 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<4, dim>
3363 {
3364  return identity_tensor<dim, double>();
3365 }
3366 
3367 
3368 
3379 template <int dim, typename Number>
3380 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
3382 {
3384  value(t);
3385 }
3386 
3387 
3388 
3400 template <int dim, typename Number>
3403 {
3405  value(t);
3406 }
3407 
3408 
3409 
3424 template <int dim, typename Number>
3425 DEAL_II_CONSTEXPR inline SymmetricTensor<4, dim, Number>
3428 {
3430 
3431  // fill only the elements really needed
3432  for (unsigned int i = 0; i < dim; ++i)
3433  for (unsigned int j = i; j < dim; ++j)
3434  for (unsigned int k = 0; k < dim; ++k)
3435  for (unsigned int l = k; l < dim; ++l)
3436  tmp[i][j][k][l] = t1[i][j] * t2[k][l];
3437 
3438  return tmp;
3439 }
3440 
3441 
3442 
3451 template <int dim, typename Number>
3452 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<2, dim, Number>
3454 {
3456  for (unsigned int d = 0; d < dim; ++d)
3457  result[d][d] = t[d][d];
3458  Number half = 0.5;
3459  for (unsigned int d = 0; d < dim; ++d)
3460  for (unsigned int e = d + 1; e < dim; ++e)
3461  result[d][e] = (t[d][e] + t[e][d]) * half;
3462  return result;
3463 }
3464 
3465 
3466 
3474 template <int rank_, int dim, typename Number>
3475 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
3477  operator*(const SymmetricTensor<rank_, dim, Number> &t, const Number &factor)
3478 {
3480  tt *= factor;
3481  return tt;
3482 }
3483 
3484 
3485 
3493 template <int rank_, int dim, typename Number>
3494 constexpr DEAL_II_ALWAYS_INLINE SymmetricTensor<rank_, dim, Number>
3495  operator*(const Number &factor, const SymmetricTensor<rank_, dim, Number> &t)
3496 {
3497  // simply forward to the other operator
3498  return t * factor;
3499 }
3500 
3501 
3502 
3528 template <int rank_, int dim, typename Number, typename OtherNumber>
3529 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3530  rank_,
3531  dim,
3532  typename ProductType<Number,
3533  typename EnableIfScalar<OtherNumber>::type>::type>
3535  const OtherNumber & factor)
3536 {
3537  // form the product. we have to convert the two factors into the final
3538  // type via explicit casts because, for awkward reasons, the C++
3539  // standard committee saw it fit to not define an
3540  // operator*(float,std::complex<double>)
3541  // (as well as with switched arguments and double<->float).
3542  using product_type = typename ProductType<Number, OtherNumber>::type;
3544  tt *= product_type(factor);
3545  return tt;
3546 }
3547 
3548 
3549 
3558 template <int rank_, int dim, typename Number, typename OtherNumber>
3559 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3560  rank_,
3561  dim,
3562  typename ProductType<OtherNumber,
3563  typename EnableIfScalar<Number>::type>::type>
3564 operator*(const Number & factor,
3566 {
3567  // simply forward to the other operator with switched arguments
3568  return (t * factor);
3569 }
3570 
3571 
3572 
3578 template <int rank_, int dim, typename Number, typename OtherNumber>
3579 DEAL_II_CONSTEXPR inline SymmetricTensor<
3580  rank_,
3581  dim,
3582  typename ProductType<Number,
3583  typename EnableIfScalar<OtherNumber>::type>::type>
3585  const OtherNumber & factor)
3586 {
3588  tt = t;
3589  tt /= factor;
3590  return tt;
3591 }
3592 
3593 
3594 
3601 template <int rank_, int dim>
3602 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<rank_, dim>
3603  operator*(const SymmetricTensor<rank_, dim> &t, const double factor)
3604 {
3606  tt *= factor;
3607  return tt;
3608 }
3609 
3610 
3611 
3618 template <int rank_, int dim>
3619 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE SymmetricTensor<rank_, dim>
3620  operator*(const double factor, const SymmetricTensor<rank_, dim> &t)
3621 {
3623  tt *= factor;
3624  return tt;
3625 }
3626 
3627 
3628 
3634 template <int rank_, int dim>
3635 DEAL_II_CONSTEXPR inline SymmetricTensor<rank_, dim>
3636 operator/(const SymmetricTensor<rank_, dim> &t, const double factor)
3637 {
3639  tt /= factor;
3640  return tt;
3641 }
3642 
3652 template <int dim, typename Number, typename OtherNumber>
3653 constexpr DEAL_II_ALWAYS_INLINE typename ProductType<Number, OtherNumber>::type
3656 {
3657  return (t1 * t2);
3658 }
3659 
3660 
3670 template <int dim, typename Number, typename OtherNumber>
3671 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE
3672  typename ProductType<Number, OtherNumber>::type
3674  const Tensor<2, dim, OtherNumber> & t2)
3675 {
3676  typename ProductType<Number, OtherNumber>::type s = internal::NumberType<
3677  typename ProductType<Number, OtherNumber>::type>::value(0.0);
3678  for (unsigned int i = 0; i < dim; ++i)
3679  for (unsigned int j = 0; j < dim; ++j)
3680  s += t1[i][j] * t2[i][j];
3681  return s;
3682 }
3683 
3684 
3694 template <int dim, typename Number, typename OtherNumber>
3695 constexpr DEAL_II_ALWAYS_INLINE typename ProductType<Number, OtherNumber>::type
3698 {
3699  return scalar_product(t2, t1);
3700 }
3701 
3702 
3718 template <typename Number, typename OtherNumber>
3719 DEAL_II_CONSTEXPR inline DEAL_II_ALWAYS_INLINE void double_contract(
3720  SymmetricTensor<2, 1, typename ProductType<Number, OtherNumber>::type> &tmp,
3723 {
3724  tmp[0][0] = t[0][0][0][0] * s[0][0];
3725 }
3726 
3727 
3728 
3744 template <typename Number, typename OtherNumber>
3745 DEAL_II_CONSTEXPR inline void double_contract(
3746  SymmetricTensor<2, 1, typename ProductType<Number, OtherNumber>::type> &tmp,
3749 {
3750  tmp[0][0] = t[0][0][0][0] * s[0][0];
3751 }
3752 
3753 
3754 
3769 template <typename Number, typename OtherNumber>
3770 DEAL_II_CONSTEXPR inline void double_contract(
3771  SymmetricTensor<2, 2, typename ProductType<Number, OtherNumber>::type> &tmp,
3774 {
3775  const unsigned int dim = 2;
3776 
3777  for (unsigned int i = 0; i < dim; ++i)
3778  for (unsigned int j = i; j < dim; ++j)
3779  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3780  2 * t[i][j][0][1] * s[0][1];
3781 }
3782 
3783 
3784 
3800 template <typename Number, typename OtherNumber>
3801 DEAL_II_CONSTEXPR inline void double_contract(
3802  SymmetricTensor<2, 2, typename ProductType<Number, OtherNumber>::type> &tmp,
3805 {
3806  const unsigned int dim = 2;
3807 
3808  for (unsigned int i = 0; i < dim; ++i)
3809  for (unsigned int j = i; j < dim; ++j)
3810  tmp[i][j] = s[0][0] * t[0][0][i][j] * +s[1][1] * t[1][1][i][j] +
3811  2 * s[0][1] * t[0][1][i][j];
3812 }
3813 
3814 
3815 
3831 template <typename Number, typename OtherNumber>
3832 DEAL_II_CONSTEXPR inline void double_contract(
3833  SymmetricTensor<2, 3, typename ProductType<Number, OtherNumber>::type> &tmp,
3836 {
3837  const unsigned int dim = 3;
3838 
3839  for (unsigned int i = 0; i < dim; ++i)
3840  for (unsigned int j = i; j < dim; ++j)
3841  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3842  t[i][j][2][2] * s[2][2] + 2 * t[i][j][0][1] * s[0][1] +
3843  2 * t[i][j][0][2] * s[0][2] + 2 * t[i][j][1][2] * s[1][2];
3844 }
3845 
3846 
3847 
3863 template <typename Number, typename OtherNumber>
3864 DEAL_II_CONSTEXPR inline void double_contract(
3865  SymmetricTensor<2, 3, typename ProductType<Number, OtherNumber>::type> &tmp,
3868 {
3869  const unsigned int dim = 3;
3870 
3871  for (unsigned int i = 0; i < dim; ++i)
3872  for (unsigned int j = i; j < dim; ++j)
3873  tmp[i][j] = s[0][0] * t[0][0][i][j] + s[1][1] * t[1][1][i][j] +
3874  s[2][2] * t[2][2][i][j] + 2 * s[0][1] * t[0][1][i][j] +
3875  2 * s[0][2] * t[0][2][i][j] + 2 * s[1][2] * t[1][2][i][j];
3876 }
3877 
3878 
3879 
3887 template <int dim, typename Number, typename OtherNumber>
3888 DEAL_II_CONSTEXPR
3891  const Tensor<1, dim, OtherNumber> & src2)
3892 {
3894  for (unsigned int i = 0; i < dim; ++i)
3895  for (unsigned int j = 0; j < dim; ++j)
3896  dest[i] += src1[i][j] * src2[j];
3897  return dest;
3898 }
3899 
3900 
3908 template <int dim, typename Number, typename OtherNumber>
3912 {
3913  // this is easy for symmetric tensors:
3914  return src2 * src1;
3915 }
3916 
3917 
3918 
3939 template <int rank_1,
3940  int rank_2,
3941  int dim,
3942  typename Number,
3943  typename OtherNumber>
3944 constexpr DEAL_II_ALWAYS_INLINE
3945  typename Tensor<rank_1 + rank_2 - 2,
3946  dim,
3947  typename ProductType<Number, OtherNumber>::type>::tensor_type
3950 {
3951  return src1 * Tensor<rank_2, dim, OtherNumber>(src2);
3952 }
3953 
3954 
3955 
3976 template <int rank_1,
3977  int rank_2,
3978  int dim,
3979  typename Number,
3980  typename OtherNumber>
3981 constexpr DEAL_II_ALWAYS_INLINE
3982  typename Tensor<rank_1 + rank_2 - 2,
3983  dim,
3984  typename ProductType<Number, OtherNumber>::type>::tensor_type
3986  const Tensor<rank_2, dim, OtherNumber> & src2)
3987 {
3988  return Tensor<rank_2, dim, OtherNumber>(src1) * src2;
3989 }
3990 
3991 
3992 
4002 template <int dim, typename Number>
4003 inline std::ostream &
4004 operator<<(std::ostream &out, const SymmetricTensor<2, dim, Number> &t)
4005 {
4006  // make our lives a bit simpler by outputting
4007  // the tensor through the operator for the
4008  // general Tensor class
4010 
4011  for (unsigned int i = 0; i < dim; ++i)
4012  for (unsigned int j = 0; j < dim; ++j)
4013  tt[i][j] = t[i][j];
4014 
4015  return out << tt;
4016 }
4017 
4018 
4019 
4029 template <int dim, typename Number>
4030 inline std::ostream &
4031 operator<<(std::ostream &out, const SymmetricTensor<4, dim, Number> &t)
4032 {
4033  // make our lives a bit simpler by outputting
4034  // the tensor through the operator for the
4035  // general Tensor class
4037 
4038  for (unsigned int i = 0; i < dim; ++i)
4039  for (unsigned int j = 0; j < dim; ++j)
4040  for (unsigned int k = 0; k < dim; ++k)
4041  for (unsigned int l = 0; l < dim; ++l)
4042  tt[i][j][k][l] = t[i][j][k][l];
4043 
4044  return out << tt;
4045 }
4046 
4047 
4048 DEAL_II_NAMESPACE_CLOSE
4049 
4050 #endif
DEAL_II_CONSTEXPR internal::SymmetricTensorAccessors::double_contraction_result< rank_, 2, dim, Number, OtherNumber >::type operator*(const SymmetricTensor< 2, dim, OtherNumber > &s) const
DEAL_II_CONSTEXPR SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
DEAL_II_CONSTEXPR SymmetricTensor & operator=(const SymmetricTensor< rank_, dim, OtherNumber > &rhs)
static const unsigned int invalid_unsigned_int
Definition: types.h:187
DEAL_II_CONSTEXPR SymmetricTensor< 2, dim, Number > unit_symmetric_tensor()
static constexpr unsigned int component_to_unrolled_index(const TableIndices< rank_ > &indices)
DEAL_II_CONSTEXPR Number & operator()(const TableIndices< rank_ > &indices)
static constexpr unsigned int n_independent_components
DEAL_II_CONSTEXPR SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
DEAL_II_CONSTEXPR SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &t)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
DEAL_II_CONSTEXPR void double_contract(SymmetricTensor< 2, 1, typename ProductType< Number, OtherNumber >::type > &tmp, const SymmetricTensor< 4, 1, Number > &t, const SymmetricTensor< 2, 1, OtherNumber > &s)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
constexpr numbers::NumberTraits< Number >::real_type norm() const
DEAL_II_CONSTEXPR SymmetricTensor & operator+=(const SymmetricTensor< rank_, dim, OtherNumber > &)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
static constexpr TableIndices< rank_ > unrolled_to_component_indices(const unsigned int i)
constexpr bool operator==(const SymmetricTensor &) const
friend DEAL_II_CONSTEXPR SymmetricTensor< 4, dim2, Number2 > deviator_tensor()
DEAL_II_CONSTEXPR void clear()
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
DEAL_II_CONSTEXPR SymmetricTensor & operator/=(const OtherNumber &factor)
DEAL_II_CONSTEXPR SymmetricTensor & operator*=(const OtherNumber &factor)
static real_type abs(const number &x)
Definition: numbers.h:627
SymmetricTensorEigenvectorMethod
DEAL_II_CONSTEXPR const Number & access_raw_entry(const unsigned int unrolled_index) const
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
DEAL_II_CONSTEXPR SymmetricTensor operator-() const
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
DEAL_II_CONSTEXPR SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
constexpr internal::SymmetricTensorAccessors::Accessor< rank_, dim, true, rank_ - 1, Number > operator[](const unsigned int row) const
static ::ExceptionBase & ExcMessage(std::string arg1)
constexpr bool operator!=(const SymmetricTensor &) const
constexpr SymmetricTensor()=default
typename base_tensor_descriptor::base_tensor_type base_tensor_type
static const unsigned int rank
constexpr Number second_invariant(const SymmetricTensor< 2, 1, Number > &)
Number * begin_raw()
constexpr Number first_invariant(const SymmetricTensor< 2, dim, Number > &t)
#define Assert(cond, exc)
Definition: exceptions.h:1411
DEAL_II_CONSTEXPR SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
DEAL_II_CONSTEXPR SymmetricTensor< 4, dim, Number > identity_tensor()
DEAL_II_CONSTEXPR Number determinant(const SymmetricTensor< 2, dim, Number > &t)
base_tensor_type data
DEAL_II_CONSTEXPR Number trace(const SymmetricTensor< 2, dim, Number > &d)
void serialize(Archive &ar, const unsigned int version)
void tridiagonalize(const ::SymmetricTensor< 2, dim, Number > &A, ::Tensor< 2, dim, Number > &Q, std::array< Number, dim > &d, std::array< Number, dim - 1 > &e)
Number * end_raw()
DEAL_II_CONSTEXPR SymmetricTensor & operator-=(const SymmetricTensor< rank_, dim, OtherNumber > &)
DEAL_II_CONSTEXPR TableIndices< 4 > merge(const TableIndices< 4 > &previous_indices, const unsigned int new_index, const unsigned int position)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
DEAL_II_CONSTEXPR SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &)
void swap(Vector< Number > &u, Vector< Number > &v)
Definition: vector.h:1376
static constexpr std::size_t memory_consumption()
constexpr Number third_invariant(const SymmetricTensor< 2, dim, Number > &t)
Definition: tensor.h:422
constexpr SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
DEAL_II_CONSTEXPR Number determinant(const SymmetricTensor< 2, dim, Number > &)
constexpr bool value_is_zero(const Number &value)
Definition: numbers.h:959
DEAL_II_CONSTEXPR Number trace(const SymmetricTensor< 2, dim, Number > &d)
DEAL_II_CONSTEXPR SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
friend DEAL_II_CONSTEXPR SymmetricTensor< 2, dim2, Number2 > unit_symmetric_tensor()
DEAL_II_CONSTEXPR SymmetricTensor< 4, dim, Number > outer_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, Number > &t2)
static ::ExceptionBase & ExcNotImplemented()
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
friend DEAL_II_CONSTEXPR SymmetricTensor< 4, dim2, Number2 > identity_tensor()
static ::ExceptionBase & ExcInternalError()
DEAL_II_CONSTEXPR SymmetricTensor< 4, dim, Number > deviator_tensor()