Reference documentation for deal.II version Git 9e557027ad 2021-09-25 18:07:42 +0200
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symmetric_tensor.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_symmetric_tensor_h
17 #define dealii_symmetric_tensor_h
18 
19 
20 #include <deal.II/base/config.h>
21 
22 #include <deal.II/base/numbers.h>
25 #include <deal.II/base/tensor.h>
26 
27 #include <algorithm>
28 #include <array>
29 #include <functional>
30 
32 
33 // Forward declaration
34 #ifndef DOXYGEN
35 template <int rank, int dim, typename Number = double>
36 class SymmetricTensor;
37 #endif
38 
45 template <int dim, typename Number = double>
48 
77 template <int dim, typename Number = double>
80 
118 template <int dim, typename Number = double>
120  identity_tensor();
121 
122 template <int dim, typename Number>
125 
126 template <int dim, typename Number>
129 
130 template <int dim2, typename Number>
131 constexpr inline DEAL_II_ALWAYS_INLINE Number
133 
134 template <int dim, typename Number>
137 
138 template <int dim, typename Number>
141 
142 
143 
144 namespace internal
145 {
146  // Workaround: The following 4 overloads are necessary to be able to
147  // compile the library with Apple Clang 8 and older. We should remove
148  // these overloads again when we bump the minimal required version to
149  // something later than clang-3.6 / Apple Clang 6.3.
150  template <int rank, int dim, typename T, typename U>
151  struct ProductTypeImpl<SymmetricTensor<rank, dim, T>, std::complex<U>>
152  {
153  using type =
154  SymmetricTensor<rank,
155  dim,
156  std::complex<typename ProductType<T, U>::type>>;
157  };
158 
159  template <int rank, int dim, typename T, typename U>
160  struct ProductTypeImpl<SymmetricTensor<rank, dim, std::complex<T>>,
161  std::complex<U>>
162  {
163  using type =
164  SymmetricTensor<rank,
165  dim,
166  std::complex<typename ProductType<T, U>::type>>;
167  };
168 
169  template <typename T, int rank, int dim, typename U>
170  struct ProductTypeImpl<std::complex<T>, SymmetricTensor<rank, dim, U>>
171  {
172  using type =
173  SymmetricTensor<rank,
174  dim,
175  std::complex<typename ProductType<T, U>::type>>;
176  };
177 
178  template <int rank, int dim, typename T, typename U>
179  struct ProductTypeImpl<std::complex<T>,
180  SymmetricTensor<rank, dim, std::complex<U>>>
181  {
182  using type =
183  SymmetricTensor<rank,
184  dim,
185  std::complex<typename ProductType<T, U>::type>>;
186  };
187  // end workaround
188 
193  namespace SymmetricTensorImplementation
194  {
199  template <int rank, int dim, typename Number>
200  struct Inverse;
201  } // namespace SymmetricTensorImplementation
202 
207  namespace SymmetricTensorAccessors
208  {
216  merge(const TableIndices<2> &previous_indices,
217  const unsigned int new_index,
218  const unsigned int position)
219  {
220  AssertIndexRange(position, 2);
221 
222  if (position == 0)
223  return {new_index, numbers::invalid_unsigned_int};
224  else
225  return {previous_indices[0], new_index};
226  }
227 
228 
229 
237  merge(const TableIndices<4> &previous_indices,
238  const unsigned int new_index,
239  const unsigned int position)
240  {
241  AssertIndexRange(position, 4);
242 
243  switch (position)
244  {
245  case 0:
246  return {new_index,
249  numbers::invalid_unsigned_int};
250  case 1:
251  return {previous_indices[0],
252  new_index,
254  numbers::invalid_unsigned_int};
255  case 2:
256  return {previous_indices[0],
257  previous_indices[1],
258  new_index,
259  numbers::invalid_unsigned_int};
260  case 3:
261  return {previous_indices[0],
262  previous_indices[1],
263  previous_indices[2],
264  new_index};
265  default:
266  Assert(false, ExcInternalError());
267  return {};
268  }
269  }
270 
271 
278  template <int rank1,
279  int rank2,
280  int dim,
281  typename Number,
282  typename OtherNumber = Number>
284  {
286  using type =
287  ::SymmetricTensor<rank1 + rank2 - 4, dim, value_type>;
288  };
289 
290 
297  template <int dim, typename Number, typename OtherNumber>
298  struct double_contraction_result<2, 2, dim, Number, OtherNumber>
299  {
301  };
302 
303 
304 
317  template <int rank, int dim, typename Number>
318  struct StorageType;
319 
323  template <int dim, typename Number>
324  struct StorageType<2, dim, Number>
325  {
330  static const unsigned int n_independent_components =
331  (dim * dim + dim) / 2;
332 
337  };
338 
339 
340 
344  template <int dim, typename Number>
345  struct StorageType<4, dim, Number>
346  {
352  static const unsigned int n_rank2_components = (dim * dim + dim) / 2;
353 
357  static const unsigned int n_independent_components =
358  (n_rank2_components *
360 
368  };
369 
370 
371 
376  template <int rank, int dim, bool constness, typename Number>
378 
385  template <int rank, int dim, typename Number>
386  struct AccessorTypes<rank, dim, true, Number>
387  {
388  using tensor_type = const ::SymmetricTensor<rank, dim, Number>;
389 
390  using reference = Number;
391  };
392 
399  template <int rank, int dim, typename Number>
400  struct AccessorTypes<rank, dim, false, Number>
401  {
403 
404  using reference = Number &;
405  };
406 
407 
440  template <int rank, int dim, bool constness, int P, typename Number>
441  class Accessor
442  {
443  public:
447  using reference =
449  using tensor_type =
451 
452  private:
471  constexpr Accessor(tensor_type & tensor,
472  const TableIndices<rank> &previous_indices);
473 
477  constexpr DEAL_II_ALWAYS_INLINE
478  Accessor(const Accessor &) = default;
479 
480  public:
484  constexpr Accessor<rank, dim, constness, P - 1, Number>
485  operator[](const unsigned int i);
486 
490  constexpr Accessor<rank, dim, constness, P - 1, Number>
491  operator[](const unsigned int i) const;
492 
493  private:
499 
500  // Declare some other classes as friends. Make sure to work around bugs
501  // in some compilers:
502  template <int, int, typename>
503  friend class ::SymmetricTensor;
504  template <int, int, bool, int, typename>
505  friend class Accessor;
506  friend class ::SymmetricTensor<rank, dim, Number>;
507  friend class Accessor<rank, dim, constness, P + 1, Number>;
508  };
509 
510 
511 
519  template <int rank, int dim, bool constness, typename Number>
520  class Accessor<rank, dim, constness, 1, Number>
521  {
522  public:
526  using reference =
528  using tensor_type =
530 
531  private:
553  constexpr Accessor(tensor_type & tensor,
554  const TableIndices<rank> &previous_indices);
555 
559  constexpr DEAL_II_ALWAYS_INLINE
560  Accessor(const Accessor &) = default;
561 
562  public:
566  constexpr reference
567  operator[](const unsigned int);
568 
572  constexpr reference
573  operator[](const unsigned int) const;
574 
575  private:
581 
582  // Declare some other classes as friends. Make sure to work around bugs
583  // in some compilers:
584  template <int, int, typename>
585  friend class ::SymmetricTensor;
586  template <int, int, bool, int, typename>
588  friend class ::SymmetricTensor<rank, dim, Number>;
589  friend class SymmetricTensorAccessors::
590  Accessor<rank, dim, constness, 2, Number>;
591  };
592  } // namespace SymmetricTensorAccessors
593 } // namespace internal
594 
595 
596 
669 template <int rank_, int dim, typename Number>
671 {
672 public:
673  static_assert(rank_ % 2 == 0, "A SymmetricTensor must have even rank!");
674 
683  static const unsigned int dimension = dim;
684 
688  static const unsigned int rank = rank_;
689 
695  static constexpr unsigned int n_independent_components =
697  n_independent_components;
698 
702  constexpr DEAL_II_ALWAYS_INLINE
703  SymmetricTensor() = default;
704 
718  template <typename OtherNumber>
719  explicit SymmetricTensor(const Tensor<2, dim, OtherNumber> &t);
720 
736  constexpr SymmetricTensor(const Number (&array)[n_independent_components]);
737 
743  template <typename OtherNumber>
744  constexpr explicit SymmetricTensor(
745  const SymmetricTensor<rank_, dim, OtherNumber> &initializer);
746 
755  DEAL_II_DEPRECATED_EARLY
756  Number *
757  begin_raw();
758 
767  DEAL_II_DEPRECATED_EARLY
768  const Number *
769  begin_raw() const;
770 
779  DEAL_II_DEPRECATED_EARLY
780  Number *
781  end_raw();
782 
792  DEAL_II_DEPRECATED_EARLY
793  const Number *
794  end_raw() const;
795 
802  template <typename OtherNumber>
803  constexpr SymmetricTensor &
804  operator=(const SymmetricTensor<rank_, dim, OtherNumber> &rhs);
805 
812  constexpr SymmetricTensor &
813  operator=(const Number &d);
814 
819  constexpr operator Tensor<rank_, dim, Number>() const;
820 
824  constexpr bool
825  operator==(const SymmetricTensor &) const;
826 
830  constexpr bool
831  operator!=(const SymmetricTensor &) const;
832 
836  template <typename OtherNumber>
837  constexpr SymmetricTensor &
838  operator+=(const SymmetricTensor<rank_, dim, OtherNumber> &);
839 
843  template <typename OtherNumber>
844  constexpr SymmetricTensor &
845  operator-=(const SymmetricTensor<rank_, dim, OtherNumber> &);
846 
851  template <typename OtherNumber>
852  constexpr SymmetricTensor &
853  operator*=(const OtherNumber &factor);
854 
858  template <typename OtherNumber>
859  constexpr SymmetricTensor &
860  operator/=(const OtherNumber &factor);
861 
865  constexpr SymmetricTensor
866  operator-() const;
867 
894  template <typename OtherNumber>
898 
903  template <typename OtherNumber>
907 
911  constexpr Number &
912  operator()(const TableIndices<rank_> &indices);
913 
917  constexpr const Number &
918  operator()(const TableIndices<rank_> &indices) const;
919 
924  constexpr internal::SymmetricTensorAccessors::
925  Accessor<rank_, dim, true, rank_ - 1, Number>
926  operator[](const unsigned int row) const;
927 
932  constexpr internal::SymmetricTensorAccessors::
933  Accessor<rank_, dim, false, rank_ - 1, Number>
934  operator[](const unsigned int row);
935 
941  constexpr const Number &
942  operator[](const TableIndices<rank_> &indices) const;
943 
949  constexpr Number &
950  operator[](const TableIndices<rank_> &indices);
951 
958  constexpr const Number &
959  access_raw_entry(const unsigned int unrolled_index) const;
960 
967  constexpr Number &
968  access_raw_entry(const unsigned int unrolled_index);
969 
980  norm() const;
981 
989  static constexpr unsigned int
990  component_to_unrolled_index(const TableIndices<rank_> &indices);
991 
997  static constexpr TableIndices<rank_>
998  unrolled_to_component_indices(const unsigned int i);
999 
1012  constexpr void
1013  clear();
1014 
1019  static constexpr std::size_t
1021 
1027  template <class Archive>
1028  void
1029  serialize(Archive &ar, const unsigned int version);
1030 
1031 private:
1035  using base_tensor_descriptor =
1037 
1041  using base_tensor_type = typename base_tensor_descriptor::base_tensor_type;
1042 
1047 
1048  // Make all other symmetric tensors friends.
1049  template <int, int, typename>
1050  friend class SymmetricTensor;
1051 
1052  // Make a few more functions friends.
1053  template <int dim2, typename Number2>
1054  friend constexpr Number2
1056 
1057  template <int dim2, typename Number2>
1058  friend DEAL_II_CONSTEXPR Number2
1060 
1061  template <int dim2, typename Number2>
1062  friend constexpr SymmetricTensor<2, dim2, Number2>
1064 
1065  template <int dim2, typename Number2>
1068 
1069  template <int dim2, typename Number2>
1071  deviator_tensor();
1072 
1073  template <int dim2, typename Number2>
1075  identity_tensor();
1076 
1077 
1078  // Make a few helper classes friends as well.
1080  Inverse<2, dim, Number>;
1081 
1083  Inverse<4, dim, Number>;
1084 };
1085 
1086 
1087 
1088 // ------------------------- inline functions ------------------------
1089 
1090 #ifndef DOXYGEN
1091 
1092 // provide declarations for static members
1093 template <int rank, int dim, typename Number>
1094 const unsigned int SymmetricTensor<rank, dim, Number>::dimension;
1095 
1096 template <int rank_, int dim, typename Number>
1097 constexpr unsigned int
1098  SymmetricTensor<rank_, dim, Number>::n_independent_components;
1099 
1100 namespace internal
1101 {
1102  namespace SymmetricTensorAccessors
1103  {
1104  template <int rank_, int dim, bool constness, int P, typename Number>
1105  constexpr DEAL_II_ALWAYS_INLINE
1106  Accessor<rank_, dim, constness, P, Number>::Accessor(
1107  tensor_type & tensor,
1108  const TableIndices<rank_> &previous_indices)
1109  : tensor(tensor)
1110  , previous_indices(previous_indices)
1111  {}
1112 
1113 
1114 
1115  template <int rank_, int dim, bool constness, int P, typename Number>
1116  constexpr inline DEAL_II_ALWAYS_INLINE
1117  Accessor<rank_, dim, constness, P - 1, Number>
1118  Accessor<rank_, dim, constness, P, Number>::operator[](
1119  const unsigned int i)
1120  {
1121  return Accessor<rank_, dim, constness, P - 1, Number>(
1122  tensor, merge(previous_indices, i, rank_ - P));
1123  }
1124 
1125 
1126 
1127  template <int rank_, int dim, bool constness, int P, typename Number>
1128  constexpr DEAL_II_ALWAYS_INLINE
1129  Accessor<rank_, dim, constness, P - 1, Number>
1130  Accessor<rank_, dim, constness, P, Number>::operator[](
1131  const unsigned int i) const
1132  {
1133  return Accessor<rank_, dim, constness, P - 1, Number>(
1134  tensor, merge(previous_indices, i, rank_ - P));
1135  }
1136 
1137 
1138 
1139  template <int rank_, int dim, bool constness, typename Number>
1140  constexpr DEAL_II_ALWAYS_INLINE
1141  Accessor<rank_, dim, constness, 1, Number>::Accessor(
1142  tensor_type & tensor,
1143  const TableIndices<rank_> &previous_indices)
1144  : tensor(tensor)
1145  , previous_indices(previous_indices)
1146  {}
1147 
1148 
1149 
1150  template <int rank_, int dim, bool constness, typename Number>
1151  constexpr inline DEAL_II_ALWAYS_INLINE
1152  typename Accessor<rank_, dim, constness, 1, Number>::reference
1153  Accessor<rank_, dim, constness, 1, Number>::operator[](
1154  const unsigned int i)
1155  {
1156  return tensor(merge(previous_indices, i, rank_ - 1));
1157  }
1158 
1159 
1160  template <int rank_, int dim, bool constness, typename Number>
1161  constexpr DEAL_II_ALWAYS_INLINE
1162  typename Accessor<rank_, dim, constness, 1, Number>::reference
1163  Accessor<rank_, dim, constness, 1, Number>::operator[](
1164  const unsigned int i) const
1165  {
1166  return tensor(merge(previous_indices, i, rank_ - 1));
1167  }
1168  } // namespace SymmetricTensorAccessors
1169 } // namespace internal
1170 
1171 
1172 
1173 template <int rank_, int dim, typename Number>
1174 template <typename OtherNumber>
1175 inline DEAL_II_ALWAYS_INLINE
1177  const Tensor<2, dim, OtherNumber> &t)
1178 {
1179  static_assert(rank == 2, "This function is only implemented for rank==2");
1180  for (unsigned int d = 0; d < dim; ++d)
1181  for (unsigned int e = 0; e < d; ++e)
1182  Assert(t[d][e] == t[e][d],
1183  ExcMessage("The incoming Tensor must be exactly symmetric."));
1184 
1185  for (unsigned int d = 0; d < dim; ++d)
1186  data[d] = t[d][d];
1187 
1188  for (unsigned int d = 0, c = 0; d < dim; ++d)
1189  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1190  data[dim + c] = t[d][e];
1191 }
1192 
1193 
1194 
1195 template <int rank_, int dim, typename Number>
1196 template <typename OtherNumber>
1197 constexpr DEAL_II_ALWAYS_INLINE
1199  const SymmetricTensor<rank_, dim, OtherNumber> &initializer)
1200  : data(initializer.data)
1201 {}
1202 
1203 
1204 
1205 template <int rank_, int dim, typename Number>
1206 constexpr inline DEAL_II_ALWAYS_INLINE
1208  const Number (&array)[n_independent_components])
1209  : data(
1210  *reinterpret_cast<const typename base_tensor_type::array_type *>(array))
1211 {
1212  // ensure that the reinterpret_cast above actually works
1213  Assert(sizeof(typename base_tensor_type::array_type) == sizeof(array),
1214  ExcInternalError());
1215 }
1216 
1217 
1218 
1219 template <int rank_, int dim, typename Number>
1220 template <typename OtherNumber>
1224 {
1225  data = t.data;
1226  return *this;
1227 }
1228 
1229 
1230 
1231 template <int rank_, int dim, typename Number>
1234 {
1236  ExcMessage("Only assignment with zero is allowed"));
1237  (void)d;
1238 
1240 
1241  return *this;
1242 }
1243 
1244 
1245 namespace internal
1246 {
1247  namespace SymmetricTensorImplementation
1248  {
1249  template <int dim, typename Number>
1250  constexpr inline DEAL_II_ALWAYS_INLINE ::Tensor<2, dim, Number>
1251  convert_to_tensor(const ::SymmetricTensor<2, dim, Number> &s)
1252  {
1254 
1255  // diagonal entries are stored first
1256  for (unsigned int d = 0; d < dim; ++d)
1257  t[d][d] = s.access_raw_entry(d);
1258 
1259  // off-diagonal entries come next, row by row
1260  for (unsigned int d = 0, c = 0; d < dim; ++d)
1261  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1262  {
1263  t[d][e] = s.access_raw_entry(dim + c);
1264  t[e][d] = s.access_raw_entry(dim + c);
1265  }
1266  return t;
1267  }
1268 
1269 
1270  template <int dim, typename Number>
1271  constexpr ::Tensor<4, dim, Number>
1272  convert_to_tensor(const ::SymmetricTensor<4, dim, Number> &st)
1273  {
1274  // utilize the symmetry properties of SymmetricTensor<4,dim>
1275  // discussed in the class documentation to avoid accessing all
1276  // independent elements of the input tensor more than once
1278 
1279  for (unsigned int i = 0; i < dim; ++i)
1280  for (unsigned int j = i; j < dim; ++j)
1281  for (unsigned int k = 0; k < dim; ++k)
1282  for (unsigned int l = k; l < dim; ++l)
1283  t[TableIndices<4>(i, j, k, l)] = t[TableIndices<4>(i, j, l, k)] =
1284  t[TableIndices<4>(j, i, k, l)] =
1285  t[TableIndices<4>(j, i, l, k)] =
1286  st[TableIndices<4>(i, j, k, l)];
1287 
1288  return t;
1289  }
1290 
1291 
1292  template <typename Number>
1293  struct Inverse<2, 1, Number>
1294  {
1295  constexpr static inline DEAL_II_ALWAYS_INLINE
1296  ::SymmetricTensor<2, 1, Number>
1297  value(const ::SymmetricTensor<2, 1, Number> &t)
1298  {
1300 
1301  tmp[0][0] = 1.0 / t[0][0];
1302 
1303  return tmp;
1304  }
1305  };
1306 
1307 
1308  template <typename Number>
1309  struct Inverse<2, 2, Number>
1310  {
1311  constexpr static inline DEAL_II_ALWAYS_INLINE
1312  ::SymmetricTensor<2, 2, Number>
1313  value(const ::SymmetricTensor<2, 2, Number> &t)
1314  {
1316 
1317  // Sympy result: ([
1318  // [ t11/(t00*t11 - t01**2), -t01/(t00*t11 - t01**2)],
1319  // [-t01/(t00*t11 - t01**2), t00/(t00*t11 - t01**2)] ])
1320  const TableIndices<2> idx_00(0, 0);
1321  const TableIndices<2> idx_01(0, 1);
1322  const TableIndices<2> idx_11(1, 1);
1323  const Number inv_det_t =
1324  1.0 / (t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01]);
1325  tmp[idx_00] = t[idx_11];
1326  tmp[idx_01] = -t[idx_01];
1327  tmp[idx_11] = t[idx_00];
1328  tmp *= inv_det_t;
1329 
1330  return tmp;
1331  }
1332  };
1333 
1334 
1335  template <typename Number>
1336  struct Inverse<2, 3, Number>
1337  {
1338  constexpr static ::SymmetricTensor<2, 3, Number>
1339  value(const ::SymmetricTensor<2, 3, Number> &t)
1340  {
1342 
1343  // Sympy result: ([
1344  // [ (t11*t22 - t12**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1345  // 2*t01*t02*t12 - t02**2*t11),
1346  // (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1347  // 2*t01*t02*t12 - t02**2*t11),
1348  // (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1349  // 2*t01*t02*t12 - t02**2*t11)],
1350  // [ (-t01*t22 + t02*t12)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1351  // 2*t01*t02*t12 - t02**2*t11),
1352  // (t00*t22 - t02**2)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1353  // 2*t01*t02*t12 - t02**2*t11),
1354  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1355  // 2*t01*t02*t12 + t02**2*t11)],
1356  // [ (t01*t12 - t02*t11)/(t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1357  // 2*t01*t02*t12 - t02**2*t11),
1358  // (t00*t12 - t01*t02)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1359  // 2*t01*t02*t12 + t02**2*t11),
1360  // (-t00*t11 + t01**2)/(-t00*t11*t22 + t00*t12**2 + t01**2*t22 -
1361  // 2*t01*t02*t12 + t02**2*t11)] ])
1362  //
1363  // =
1364  //
1365  // [ (t11*t22 - t12**2)/det_t,
1366  // (-t01*t22 + t02*t12)/det_t,
1367  // (t01*t12 - t02*t11)/det_t],
1368  // [ (-t01*t22 + t02*t12)/det_t,
1369  // (t00*t22 - t02**2)/det_t,
1370  // (-t00*t12 + t01*t02)/det_t],
1371  // [ (t01*t12 - t02*t11)/det_t,
1372  // (-t00*t12 + t01*t02)/det_t,
1373  // (t00*t11 - t01**2)/det_t] ])
1374  //
1375  // with det_t = (t00*t11*t22 - t00*t12**2 - t01**2*t22 +
1376  // 2*t01*t02*t12 - t02**2*t11)
1377  const TableIndices<2> idx_00(0, 0);
1378  const TableIndices<2> idx_01(0, 1);
1379  const TableIndices<2> idx_02(0, 2);
1380  const TableIndices<2> idx_11(1, 1);
1381  const TableIndices<2> idx_12(1, 2);
1382  const TableIndices<2> idx_22(2, 2);
1383  const Number inv_det_t =
1384  1.0 / (t[idx_00] * t[idx_11] * t[idx_22] -
1385  t[idx_00] * t[idx_12] * t[idx_12] -
1386  t[idx_01] * t[idx_01] * t[idx_22] +
1387  2.0 * t[idx_01] * t[idx_02] * t[idx_12] -
1388  t[idx_02] * t[idx_02] * t[idx_11]);
1389  tmp[idx_00] = t[idx_11] * t[idx_22] - t[idx_12] * t[idx_12];
1390  tmp[idx_01] = -t[idx_01] * t[idx_22] + t[idx_02] * t[idx_12];
1391  tmp[idx_02] = t[idx_01] * t[idx_12] - t[idx_02] * t[idx_11];
1392  tmp[idx_11] = t[idx_00] * t[idx_22] - t[idx_02] * t[idx_02];
1393  tmp[idx_12] = -t[idx_00] * t[idx_12] + t[idx_01] * t[idx_02];
1394  tmp[idx_22] = t[idx_00] * t[idx_11] - t[idx_01] * t[idx_01];
1395  tmp *= inv_det_t;
1396 
1397  return tmp;
1398  }
1399  };
1400 
1401 
1402  template <typename Number>
1403  struct Inverse<4, 1, Number>
1404  {
1405  constexpr static inline ::SymmetricTensor<4, 1, Number>
1406  value(const ::SymmetricTensor<4, 1, Number> &t)
1407  {
1409  tmp.data[0][0] = 1.0 / t.data[0][0];
1410  return tmp;
1411  }
1412  };
1413 
1414 
1415  template <typename Number>
1416  struct Inverse<4, 2, Number>
1417  {
1418  constexpr static inline ::SymmetricTensor<4, 2, Number>
1419  value(const ::SymmetricTensor<4, 2, Number> &t)
1420  {
1422 
1423  // Inverting this tensor is a little more complicated than necessary,
1424  // since we store the data of 't' as a 3x3 matrix t.data, but the
1425  // product between a rank-4 and a rank-2 tensor is really not the
1426  // product between this matrix and the 3-vector of a rhs, but rather
1427  //
1428  // B.vec = t.data * mult * A.vec
1429  //
1430  // where mult is a 3x3 matrix with entries [[1,0,0],[0,1,0],[0,0,2]] to
1431  // capture the fact that we need to add up both the c_ij12*a_12 and the
1432  // c_ij21*a_21 terms.
1433  //
1434  // In addition, in this scheme, the identity tensor has the matrix
1435  // representation mult^-1.
1436  //
1437  // The inverse of 't' therefore has the matrix representation
1438  //
1439  // inv.data = mult^-1 * t.data^-1 * mult^-1
1440  //
1441  // in order to compute it, let's first compute the inverse of t.data and
1442  // put it into tmp.data; at the end of the function we then scale the
1443  // last row and column of the inverse by 1/2, corresponding to the left
1444  // and right multiplication with mult^-1.
1445  const Number t4 = t.data[0][0] * t.data[1][1],
1446  t6 = t.data[0][0] * t.data[1][2],
1447  t8 = t.data[0][1] * t.data[1][0],
1448  t00 = t.data[0][2] * t.data[1][0],
1449  t01 = t.data[0][1] * t.data[2][0],
1450  t04 = t.data[0][2] * t.data[2][0],
1451  t07 = 1.0 / (t4 * t.data[2][2] - t6 * t.data[2][1] -
1452  t8 * t.data[2][2] + t00 * t.data[2][1] +
1453  t01 * t.data[1][2] - t04 * t.data[1][1]);
1454  tmp.data[0][0] =
1455  (t.data[1][1] * t.data[2][2] - t.data[1][2] * t.data[2][1]) * t07;
1456  tmp.data[0][1] =
1457  -(t.data[0][1] * t.data[2][2] - t.data[0][2] * t.data[2][1]) * t07;
1458  tmp.data[0][2] =
1459  -(-t.data[0][1] * t.data[1][2] + t.data[0][2] * t.data[1][1]) * t07;
1460  tmp.data[1][0] =
1461  -(t.data[1][0] * t.data[2][2] - t.data[1][2] * t.data[2][0]) * t07;
1462  tmp.data[1][1] = (t.data[0][0] * t.data[2][2] - t04) * t07;
1463  tmp.data[1][2] = -(t6 - t00) * t07;
1464  tmp.data[2][0] =
1465  -(-t.data[1][0] * t.data[2][1] + t.data[1][1] * t.data[2][0]) * t07;
1466  tmp.data[2][1] = -(t.data[0][0] * t.data[2][1] - t01) * t07;
1467  tmp.data[2][2] = (t4 - t8) * t07;
1468 
1469  // scale last row and column as mentioned
1470  // above
1471  tmp.data[2][0] /= 2;
1472  tmp.data[2][1] /= 2;
1473  tmp.data[0][2] /= 2;
1474  tmp.data[1][2] /= 2;
1475  tmp.data[2][2] /= 4;
1476 
1477  return tmp;
1478  }
1479  };
1480 
1481 
1482  template <typename Number>
1483  struct Inverse<4, 3, Number>
1484  {
1485  static ::SymmetricTensor<4, 3, Number>
1486  value(const ::SymmetricTensor<4, 3, Number> &t)
1487  {
1489 
1490  // This function follows the exact same scheme as the 2d case, except
1491  // that hardcoding the inverse of a 6x6 matrix is pretty wasteful.
1492  // Instead, we use the Gauss-Jordan algorithm implemented for
1493  // FullMatrix. For historical reasons the following code is copied from
1494  // there, with the tangential benefit that we do not need to copy the
1495  // tensor entries to and from the FullMatrix.
1496  const unsigned int N = 6;
1497 
1498  // First get an estimate of the size of the elements of this matrix,
1499  // for later checks whether the pivot element is large enough, or
1500  // whether we have to fear that the matrix is not regular.
1501  Number diagonal_sum = internal::NumberType<Number>::value(0.0);
1502  for (unsigned int i = 0; i < N; ++i)
1503  diagonal_sum += std::fabs(tmp.data[i][i]);
1504  const Number typical_diagonal_element =
1505  diagonal_sum / static_cast<double>(N);
1506  (void)typical_diagonal_element;
1507 
1508  unsigned int p[N];
1509  for (unsigned int i = 0; i < N; ++i)
1510  p[i] = i;
1511 
1512  for (unsigned int j = 0; j < N; ++j)
1513  {
1514  // Pivot search: search that part of the line on and right of the
1515  // diagonal for the largest element.
1516  Number max = std::fabs(tmp.data[j][j]);
1517  unsigned int r = j;
1518  for (unsigned int i = j + 1; i < N; ++i)
1519  if (std::fabs(tmp.data[i][j]) > max)
1520  {
1521  max = std::fabs(tmp.data[i][j]);
1522  r = i;
1523  }
1524 
1525  // Check whether the pivot is too small
1526  Assert(max > 1.e-16 * typical_diagonal_element,
1527  ExcMessage("This tensor seems to be noninvertible"));
1528 
1529  // Row interchange
1530  if (r > j)
1531  {
1532  for (unsigned int k = 0; k < N; ++k)
1533  std::swap(tmp.data[j][k], tmp.data[r][k]);
1534 
1535  std::swap(p[j], p[r]);
1536  }
1537 
1538  // Transformation
1539  const Number hr = 1. / tmp.data[j][j];
1540  tmp.data[j][j] = hr;
1541  for (unsigned int k = 0; k < N; ++k)
1542  {
1543  if (k == j)
1544  continue;
1545  for (unsigned int i = 0; i < N; ++i)
1546  {
1547  if (i == j)
1548  continue;
1549  tmp.data[i][k] -= tmp.data[i][j] * tmp.data[j][k] * hr;
1550  }
1551  }
1552  for (unsigned int i = 0; i < N; ++i)
1553  {
1554  tmp.data[i][j] *= hr;
1555  tmp.data[j][i] *= -hr;
1556  }
1557  tmp.data[j][j] = hr;
1558  }
1559 
1560  // Column interchange
1561  Number hv[N];
1562  for (unsigned int i = 0; i < N; ++i)
1563  {
1564  for (unsigned int k = 0; k < N; ++k)
1565  hv[p[k]] = tmp.data[i][k];
1566  for (unsigned int k = 0; k < N; ++k)
1567  tmp.data[i][k] = hv[k];
1568  }
1569 
1570  // Scale rows and columns. The mult matrix
1571  // here is diag[1, 1, 1, 1/2, 1/2, 1/2].
1572  for (unsigned int i = 3; i < 6; ++i)
1573  for (unsigned int j = 0; j < 3; ++j)
1574  tmp.data[i][j] /= 2;
1575 
1576  for (unsigned int i = 0; i < 3; ++i)
1577  for (unsigned int j = 3; j < 6; ++j)
1578  tmp.data[i][j] /= 2;
1579 
1580  for (unsigned int i = 3; i < 6; ++i)
1581  for (unsigned int j = 3; j < 6; ++j)
1582  tmp.data[i][j] /= 4;
1583 
1584  return tmp;
1585  }
1586  };
1587 
1588  } // namespace SymmetricTensorImplementation
1589 } // namespace internal
1590 
1591 
1592 
1593 template <int rank_, int dim, typename Number>
1594 constexpr DEAL_II_ALWAYS_INLINE
1596  const
1597 {
1598  return internal::SymmetricTensorImplementation::convert_to_tensor(*this);
1599 }
1600 
1601 
1602 
1603 template <int rank_, int dim, typename Number>
1604 constexpr bool
1606  const SymmetricTensor<rank_, dim, Number> &t) const
1607 {
1608  return data == t.data;
1609 }
1610 
1611 
1612 
1613 template <int rank_, int dim, typename Number>
1614 constexpr bool
1616  const SymmetricTensor<rank_, dim, Number> &t) const
1617 {
1618  return data != t.data;
1619 }
1620 
1621 
1622 
1623 template <int rank_, int dim, typename Number>
1624 template <typename OtherNumber>
1628 {
1629  data += t.data;
1630  return *this;
1631 }
1632 
1633 
1634 
1635 template <int rank_, int dim, typename Number>
1636 template <typename OtherNumber>
1640 {
1641  data -= t.data;
1642  return *this;
1643 }
1644 
1645 
1646 
1647 template <int rank_, int dim, typename Number>
1648 template <typename OtherNumber>
1651 {
1652  data *= d;
1653  return *this;
1654 }
1655 
1656 
1657 
1658 template <int rank_, int dim, typename Number>
1659 template <typename OtherNumber>
1662 {
1663  data /= d;
1664  return *this;
1665 }
1666 
1667 
1668 
1669 template <int rank_, int dim, typename Number>
1672 {
1673  SymmetricTensor tmp = *this;
1674  tmp.data = -tmp.data;
1675  return tmp;
1676 }
1677 
1678 
1679 
1680 template <int rank_, int dim, typename Number>
1681 constexpr inline DEAL_II_ALWAYS_INLINE void
1683 {
1684  data.clear();
1685 }
1686 
1687 
1688 
1689 template <int rank_, int dim, typename Number>
1690 constexpr std::size_t
1692 {
1693  // all memory consists of statically allocated memory of the current
1694  // object, no pointers
1695  return sizeof(SymmetricTensor<rank_, dim, Number>);
1696 }
1697 
1698 
1699 
1700 namespace internal
1701 {
1702  template <int dim, typename Number, typename OtherNumber = Number>
1706  perform_double_contraction(
1707  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1708  base_tensor_type &data,
1709  const typename SymmetricTensorAccessors::
1710  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1711  {
1712  using result_type = typename SymmetricTensorAccessors::
1714 
1715  switch (dim)
1716  {
1717  case 1:
1718  return data[0] * sdata[0];
1719  default:
1720  // Start with the non-diagonal part to avoid some multiplications by
1721  // 2.
1722 
1723  result_type sum = data[dim] * sdata[dim];
1724  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1725  sum += data[d] * sdata[d];
1726  sum += sum; // sum = sum * 2.;
1727 
1728  // Now add the contributions from the diagonal
1729  for (unsigned int d = 0; d < dim; ++d)
1730  sum += data[d] * sdata[d];
1731  return sum;
1732  }
1733  }
1734 
1735 
1736 
1737  template <int dim, typename Number, typename OtherNumber = Number>
1741  perform_double_contraction(
1742  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1743  base_tensor_type &data,
1744  const typename SymmetricTensorAccessors::
1745  StorageType<2, dim, OtherNumber>::base_tensor_type &sdata)
1746  {
1747  using result_type = typename SymmetricTensorAccessors::
1749  using value_type = typename SymmetricTensorAccessors::
1751 
1752  const unsigned int data_dim = SymmetricTensorAccessors::
1753  StorageType<2, dim, value_type>::n_independent_components;
1754  value_type tmp[data_dim]{};
1755  for (unsigned int i = 0; i < data_dim; ++i)
1756  tmp[i] =
1757  perform_double_contraction<dim, Number, OtherNumber>(data[i], sdata);
1758  return result_type(tmp);
1759  }
1760 
1761 
1762 
1763  template <int dim, typename Number, typename OtherNumber = Number>
1765  typename SymmetricTensorAccessors::StorageType<
1766  2,
1767  dim,
1770  base_tensor_type
1771  perform_double_contraction(
1772  const typename SymmetricTensorAccessors::StorageType<2, dim, Number>::
1773  base_tensor_type &data,
1774  const typename SymmetricTensorAccessors::
1775  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1776  {
1777  using value_type = typename SymmetricTensorAccessors::
1779  using base_tensor_type = typename SymmetricTensorAccessors::
1780  StorageType<2, dim, value_type>::base_tensor_type;
1781 
1782  base_tensor_type tmp;
1783  for (unsigned int i = 0; i < tmp.dimension; ++i)
1784  {
1785  // Start with the non-diagonal part
1786  value_type sum = data[dim] * sdata[dim][i];
1787  for (unsigned int d = dim + 1; d < (dim * (dim + 1) / 2); ++d)
1788  sum += data[d] * sdata[d][i];
1789  sum += sum; // sum = sum * 2.;
1790 
1791  // Now add the contributions from the diagonal
1792  for (unsigned int d = 0; d < dim; ++d)
1793  sum += data[d] * sdata[d][i];
1794  tmp[i] = sum;
1795  }
1796  return tmp;
1797  }
1798 
1799 
1800 
1801  template <int dim, typename Number, typename OtherNumber = Number>
1803  typename SymmetricTensorAccessors::StorageType<
1804  4,
1805  dim,
1808  base_tensor_type
1809  perform_double_contraction(
1810  const typename SymmetricTensorAccessors::StorageType<4, dim, Number>::
1811  base_tensor_type &data,
1812  const typename SymmetricTensorAccessors::
1813  StorageType<4, dim, OtherNumber>::base_tensor_type &sdata)
1814  {
1815  using value_type = typename SymmetricTensorAccessors::
1817  using base_tensor_type = typename SymmetricTensorAccessors::
1818  StorageType<4, dim, value_type>::base_tensor_type;
1819 
1820  const unsigned int data_dim = SymmetricTensorAccessors::
1821  StorageType<2, dim, value_type>::n_independent_components;
1822  base_tensor_type tmp;
1823  for (unsigned int i = 0; i < data_dim; ++i)
1824  for (unsigned int j = 0; j < data_dim; ++j)
1825  {
1826  // Start with the non-diagonal part
1827  for (unsigned int d = dim; d < (dim * (dim + 1) / 2); ++d)
1828  tmp[i][j] += data[i][d] * sdata[d][j];
1829  tmp[i][j] += tmp[i][j]; // tmp[i][j] = tmp[i][j] * 2;
1830 
1831  // Now add the contributions from the diagonal
1832  for (unsigned int d = 0; d < dim; ++d)
1833  tmp[i][j] += data[i][d] * sdata[d][j];
1834  }
1835  return tmp;
1836  }
1837 
1838 } // end of namespace internal
1839 
1840 
1841 
1842 template <int rank_, int dim, typename Number>
1843 template <typename OtherNumber>
1848  const SymmetricTensor<2, dim, OtherNumber> &s) const
1849 {
1850  // need to have two different function calls
1851  // because a scalar and rank-2 tensor are not
1852  // the same data type (see internal function
1853  // above)
1854  return internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1855  s.data);
1856 }
1857 
1858 
1859 
1860 template <int rank_, int dim, typename Number>
1861 template <typename OtherNumber>
1865  const SymmetricTensor<4, dim, OtherNumber> &s) const
1866 {
1869  tmp.data =
1870  internal::perform_double_contraction<dim, Number, OtherNumber>(data,
1871  s.data);
1872  return tmp;
1873 }
1874 
1875 
1876 
1877 // internal namespace to switch between the
1878 // access of different tensors. There used to
1879 // be explicit instantiations before for
1880 // different ranks and dimensions, but since
1881 // we now allow for templates on the data
1882 // type, and since we cannot partially
1883 // specialize the implementation, this got
1884 // into a separate namespace
1885 namespace internal
1886 {
1887  // The variables within this struct will be referenced in the next functions.
1888  // It is a workaround that allows returning a reference to a static variable
1889  // while allowing constexpr evaluation of the function.
1890  // It has to be defined outside the function because constexpr functions
1891  // cannot define static variables.
1892  // A similar struct has also been defined in tensor.h
1893  template <typename Type>
1894  struct Uninitialized
1895  {
1896  static Type value;
1897  };
1898 
1899  template <typename Type>
1901 
1902  template <int dim, typename Number>
1903  constexpr inline DEAL_II_ALWAYS_INLINE Number &
1904  symmetric_tensor_access(const TableIndices<2> &indices,
1905  typename SymmetricTensorAccessors::
1906  StorageType<2, dim, Number>::base_tensor_type &data)
1907  {
1908  // 1d is very simple and done first
1909  if (dim == 1)
1910  return data[0];
1911 
1912  // first treat the main diagonal elements, which are stored consecutively
1913  // at the beginning
1914  if (indices[0] == indices[1])
1915  return data[indices[0]];
1916 
1917  // the rest is messier and requires a few switches.
1918  switch (dim)
1919  {
1920  case 2:
1921  // at least for the 2x2 case it is reasonably simple
1922  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1923  ((indices[0] == 0) && (indices[1] == 1)),
1924  ExcInternalError());
1925  return data[2];
1926 
1927  default:
1928  // to do the rest, sort our indices before comparing
1929  {
1930  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1931  std::max(indices[0], indices[1]));
1932  for (unsigned int d = 0, c = 0; d < dim; ++d)
1933  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1934  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1935  return data[dim + c];
1936  Assert(false, ExcInternalError());
1937  }
1938  }
1939 
1940  // The code should never reach there.
1941  // Returns a dummy reference to a dummy variable just to make the
1942  // compiler happy.
1944  }
1945 
1946 
1947 
1948  template <int dim, typename Number>
1949  constexpr inline DEAL_II_ALWAYS_INLINE const Number &
1950  symmetric_tensor_access(const TableIndices<2> &indices,
1951  const typename SymmetricTensorAccessors::
1952  StorageType<2, dim, Number>::base_tensor_type &data)
1953  {
1954  // 1d is very simple and done first
1955  if (dim == 1)
1956  return data[0];
1957 
1958  // first treat the main diagonal elements, which are stored consecutively
1959  // at the beginning
1960  if (indices[0] == indices[1])
1961  return data[indices[0]];
1962 
1963  // the rest is messier and requires a few switches.
1964  switch (dim)
1965  {
1966  case 2:
1967  // at least for the 2x2 case it is reasonably simple
1968  Assert(((indices[0] == 1) && (indices[1] == 0)) ||
1969  ((indices[0] == 0) && (indices[1] == 1)),
1970  ExcInternalError());
1971  return data[2];
1972 
1973  default:
1974  // to do the rest, sort our indices before comparing
1975  {
1976  TableIndices<2> sorted_indices(std::min(indices[0], indices[1]),
1977  std::max(indices[0], indices[1]));
1978  for (unsigned int d = 0, c = 0; d < dim; ++d)
1979  for (unsigned int e = d + 1; e < dim; ++e, ++c)
1980  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
1981  return data[dim + c];
1982  Assert(false, ExcInternalError());
1983  }
1984  }
1985 
1986  // The code should never reach there.
1987  // Returns a dummy reference to a dummy variable just to make the
1988  // compiler happy.
1990  }
1991 
1992 
1993 
1994  template <int dim, typename Number>
1995  constexpr inline Number &
1996  symmetric_tensor_access(const TableIndices<4> &indices,
1997  typename SymmetricTensorAccessors::
1998  StorageType<4, dim, Number>::base_tensor_type &data)
1999  {
2000  switch (dim)
2001  {
2002  case 1:
2003  return data[0][0];
2004 
2005  case 2:
2006  // each entry of the tensor can be thought of as an entry in a
2007  // matrix that maps the rolled-out rank-2 tensors into rolled-out
2008  // rank-2 tensors. this is the format in which we store rank-4
2009  // tensors. determine which position the present entry is
2010  // stored in
2011  {
2012  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
2013  return data[base_index[indices[0]][indices[1]]]
2014  [base_index[indices[2]][indices[3]]];
2015  }
2016  case 3:
2017  // each entry of the tensor can be thought of as an entry in a
2018  // matrix that maps the rolled-out rank-2 tensors into rolled-out
2019  // rank-2 tensors. this is the format in which we store rank-4
2020  // tensors. determine which position the present entry is
2021  // stored in
2022  {
2023  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
2024  {3, 1, 5},
2025  {4, 5, 2}};
2026  return data[base_index[indices[0]][indices[1]]]
2027  [base_index[indices[2]][indices[3]]];
2028  }
2029 
2030  default:
2031  Assert(false, ExcNotImplemented());
2032  }
2033 
2034  // The code should never reach there.
2035  // Returns a dummy reference to a dummy variable just to make the
2036  // compiler happy.
2038  }
2039 
2040 
2041  template <int dim, typename Number>
2042  constexpr inline DEAL_II_ALWAYS_INLINE const Number &
2043  symmetric_tensor_access(const TableIndices<4> &indices,
2044  const typename SymmetricTensorAccessors::
2045  StorageType<4, dim, Number>::base_tensor_type &data)
2046  {
2047  switch (dim)
2048  {
2049  case 1:
2050  return data[0][0];
2051 
2052  case 2:
2053  // each entry of the tensor can be thought of as an entry in a
2054  // matrix that maps the rolled-out rank-2 tensors into rolled-out
2055  // rank-2 tensors. this is the format in which we store rank-4
2056  // tensors. determine which position the present entry is
2057  // stored in
2058  {
2059  constexpr std::size_t base_index[2][2] = {{0, 2}, {2, 1}};
2060  return data[base_index[indices[0]][indices[1]]]
2061  [base_index[indices[2]][indices[3]]];
2062  }
2063  case 3:
2064  // each entry of the tensor can be thought of as an entry in a
2065  // matrix that maps the rolled-out rank-2 tensors into rolled-out
2066  // rank-2 tensors. this is the format in which we store rank-4
2067  // tensors. determine which position the present entry is
2068  // stored in
2069  {
2070  constexpr std::size_t base_index[3][3] = {{0, 3, 4},
2071  {3, 1, 5},
2072  {4, 5, 2}};
2073  return data[base_index[indices[0]][indices[1]]]
2074  [base_index[indices[2]][indices[3]]];
2075  }
2076 
2077  default:
2078  Assert(false, ExcNotImplemented());
2079  }
2080 
2081  // The code should never reach there.
2082  // Returns a dummy reference to a dummy variable just to make the
2083  // compiler happy.
2085  }
2086 
2087 } // end of namespace internal
2088 
2089 
2090 
2091 template <int rank_, int dim, typename Number>
2092 constexpr inline DEAL_II_ALWAYS_INLINE Number &
2094  const TableIndices<rank_> &indices)
2095 {
2096  for (unsigned int r = 0; r < rank; ++r)
2097  AssertIndexRange(indices[r], dimension);
2098  return internal::symmetric_tensor_access<dim, Number>(indices, data);
2099 }
2100 
2101 
2102 
2103 template <int rank_, int dim, typename Number>
2104 constexpr inline DEAL_II_ALWAYS_INLINE const Number &
2106  const TableIndices<rank_> &indices) const
2107 {
2108  for (unsigned int r = 0; r < rank; ++r)
2109  AssertIndexRange(indices[r], dimension);
2110  return internal::symmetric_tensor_access<dim, Number>(indices, data);
2111 }
2112 
2113 
2114 
2115 namespace internal
2116 {
2117  namespace SymmetricTensorImplementation
2118  {
2119  template <int rank_>
2120  constexpr TableIndices<rank_>
2121  get_partially_filled_indices(const unsigned int row,
2122  const std::integral_constant<int, 2> &)
2123  {
2125  }
2126 
2127 
2128  template <int rank_>
2129  constexpr TableIndices<rank_>
2130  get_partially_filled_indices(const unsigned int row,
2131  const std::integral_constant<int, 4> &)
2132  {
2133  return TableIndices<rank_>(row,
2137  }
2138  } // namespace SymmetricTensorImplementation
2139 } // namespace internal
2140 
2141 
2142 template <int rank_, int dim, typename Number>
2143 constexpr DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors::
2144  Accessor<rank_, dim, true, rank_ - 1, Number>
2145  SymmetricTensor<rank_, dim, Number>::operator[](const unsigned int row) const
2146 {
2147  return internal::SymmetricTensorAccessors::
2148  Accessor<rank_, dim, true, rank_ - 1, Number>(
2149  *this,
2150  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2151  rank_>(row, std::integral_constant<int, rank_>()));
2152 }
2153 
2154 
2155 
2156 template <int rank_, int dim, typename Number>
2157 constexpr inline DEAL_II_ALWAYS_INLINE internal::SymmetricTensorAccessors::
2158  Accessor<rank_, dim, false, rank_ - 1, Number>
2159  SymmetricTensor<rank_, dim, Number>::operator[](const unsigned int row)
2160 {
2161  return internal::SymmetricTensorAccessors::
2162  Accessor<rank_, dim, false, rank_ - 1, Number>(
2163  *this,
2164  internal::SymmetricTensorImplementation::get_partially_filled_indices<
2165  rank_>(row, std::integral_constant<int, rank_>()));
2166 }
2167 
2168 
2169 
2170 template <int rank_, int dim, typename Number>
2171 constexpr DEAL_II_ALWAYS_INLINE const Number &
2173  const TableIndices<rank_> &indices) const
2174 {
2175  return operator()(indices);
2176 }
2177 
2178 
2179 
2180 template <int rank_, int dim, typename Number>
2181 constexpr inline DEAL_II_ALWAYS_INLINE Number &
2183  const TableIndices<rank_> &indices)
2184 {
2185  return operator()(indices);
2186 }
2187 
2188 
2189 
2190 template <int rank_, int dim, typename Number>
2191 inline Number *
2193 {
2194  return std::addressof(this->access_raw_entry(0));
2195 }
2196 
2197 
2198 
2199 template <int rank_, int dim, typename Number>
2200 inline const Number *
2202 {
2203  return std::addressof(this->access_raw_entry(0));
2204 }
2205 
2206 
2207 
2208 template <int rank_, int dim, typename Number>
2209 inline Number *
2211 {
2212  return begin_raw() + n_independent_components;
2213 }
2214 
2215 
2216 
2217 template <int rank_, int dim, typename Number>
2218 inline const Number *
2220 {
2221  return begin_raw() + n_independent_components;
2222 }
2223 
2224 
2225 
2226 namespace internal
2227 {
2228  namespace SymmetricTensorImplementation
2229  {
2230  template <int dim, typename Number>
2231  constexpr unsigned int
2232  entry_to_indices(const ::SymmetricTensor<2, dim, Number> &,
2233  const unsigned int index)
2234  {
2235  return index;
2236  }
2237 
2238 
2239  template <int dim, typename Number>
2240  constexpr ::TableIndices<2>
2241  entry_to_indices(const ::SymmetricTensor<4, dim, Number> &,
2242  const unsigned int index)
2243  {
2246  }
2247 
2248  } // namespace SymmetricTensorImplementation
2249 } // namespace internal
2250 
2251 
2252 
2253 template <int rank_, int dim, typename Number>
2254 constexpr inline const Number &
2256  const unsigned int index) const
2257 {
2258  AssertIndexRange(index, n_independent_components);
2259  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2260  index)];
2261 }
2262 
2263 
2264 
2265 template <int rank_, int dim, typename Number>
2266 constexpr inline Number &
2268 {
2269  AssertIndexRange(index, n_independent_components);
2270  return data[internal::SymmetricTensorImplementation::entry_to_indices(*this,
2271  index)];
2272 }
2273 
2274 
2275 
2276 namespace internal
2277 {
2278  template <int dim, typename Number>
2279  constexpr inline typename numbers::NumberTraits<Number>::real_type
2280  compute_norm(const typename SymmetricTensorAccessors::
2281  StorageType<2, dim, Number>::base_tensor_type &data)
2282  {
2283  switch (dim)
2284  {
2285  case 1:
2286  return numbers::NumberTraits<Number>::abs(data[0]);
2287 
2288  case 2:
2289  return std::sqrt(
2293 
2294  case 3:
2295  return std::sqrt(
2302 
2303  default:
2304  {
2305  typename numbers::NumberTraits<Number>::real_type return_value =
2307 
2308  for (unsigned int d = 0; d < dim; ++d)
2309  return_value +=
2311  for (unsigned int d = dim; d < (dim * dim + dim) / 2; ++d)
2312  return_value +=
2314 
2315  return std::sqrt(return_value);
2316  }
2317  }
2318  }
2319 
2320 
2321 
2322  template <int dim, typename Number>
2323  constexpr inline typename numbers::NumberTraits<Number>::real_type
2324  compute_norm(const typename SymmetricTensorAccessors::
2325  StorageType<4, dim, Number>::base_tensor_type &data)
2326  {
2327  switch (dim)
2328  {
2329  case 1:
2330  return numbers::NumberTraits<Number>::abs(data[0][0]);
2331 
2332  default:
2333  {
2334  typename numbers::NumberTraits<Number>::real_type return_value =
2336 
2337  const unsigned int n_independent_components = data.dimension;
2338 
2339  for (unsigned int i = 0; i < dim; ++i)
2340  for (unsigned int j = 0; j < dim; ++j)
2341  return_value +=
2343  for (unsigned int i = 0; i < dim; ++i)
2344  for (unsigned int j = dim; j < n_independent_components; ++j)
2345  return_value +=
2347  for (unsigned int i = dim; i < n_independent_components; ++i)
2348  for (unsigned int j = 0; j < dim; ++j)
2349  return_value +=
2351  for (unsigned int i = dim; i < n_independent_components; ++i)
2352  for (unsigned int j = dim; j < n_independent_components; ++j)
2353  return_value +=
2355 
2356  return std::sqrt(return_value);
2357  }
2358  }
2359  }
2360 
2361 } // end of namespace internal
2362 
2363 
2364 
2365 template <int rank_, int dim, typename Number>
2368 {
2369  return internal::compute_norm<dim, Number>(data);
2370 }
2371 
2372 
2373 
2374 namespace internal
2375 {
2376  namespace SymmetricTensorImplementation
2377  {
2378  // a function to do the unrolling from a set of indices to a
2379  // scalar index into the array in which we store the elements of
2380  // a symmetric tensor
2381  //
2382  // this function is for rank-2 tensors
2383  template <int dim>
2384  constexpr inline DEAL_II_ALWAYS_INLINE unsigned int
2385  component_to_unrolled_index(const TableIndices<2> &indices)
2386  {
2387  AssertIndexRange(indices[0], dim);
2388  AssertIndexRange(indices[1], dim);
2389 
2390  switch (dim)
2391  {
2392  case 1:
2393  {
2394  return 0;
2395  }
2396 
2397  case 2:
2398  {
2399  constexpr unsigned int table[2][2] = {{0, 2}, {2, 1}};
2400  return table[indices[0]][indices[1]];
2401  }
2402 
2403  case 3:
2404  {
2405  constexpr unsigned int table[3][3] = {{0, 3, 4},
2406  {3, 1, 5},
2407  {4, 5, 2}};
2408  return table[indices[0]][indices[1]];
2409  }
2410 
2411  case 4:
2412  {
2413  constexpr unsigned int table[4][4] = {{0, 4, 5, 6},
2414  {4, 1, 7, 8},
2415  {5, 7, 2, 9},
2416  {6, 8, 9, 3}};
2417  return table[indices[0]][indices[1]];
2418  }
2419 
2420  default:
2421  // for the remainder, manually figure out the numbering
2422  {
2423  if (indices[0] == indices[1])
2424  return indices[0];
2425 
2426  TableIndices<2> sorted_indices(indices);
2427  sorted_indices.sort();
2428 
2429  for (unsigned int d = 0, c = 0; d < dim; ++d)
2430  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2431  if ((sorted_indices[0] == d) && (sorted_indices[1] == e))
2432  return dim + c;
2433 
2434  // should never get here:
2435  Assert(false, ExcInternalError());
2436  return 0;
2437  }
2438  }
2439  }
2440 
2441  // a function to do the unrolling from a set of indices to a
2442  // scalar index into the array in which we store the elements of
2443  // a symmetric tensor
2444  //
2445  // this function is for tensors of ranks not already handled
2446  // above
2447  template <int dim, int rank_>
2448  constexpr inline unsigned int
2449  component_to_unrolled_index(const TableIndices<rank_> &indices)
2450  {
2451  (void)indices;
2452  Assert(false, ExcNotImplemented());
2454  }
2455  } // namespace SymmetricTensorImplementation
2456 } // namespace internal
2457 
2458 
2459 template <int rank_, int dim, typename Number>
2460 constexpr unsigned int
2462  const TableIndices<rank_> &indices)
2463 {
2464  return internal::SymmetricTensorImplementation::component_to_unrolled_index<
2465  dim>(indices);
2466 }
2467 
2468 
2469 
2470 namespace internal
2471 {
2472  namespace SymmetricTensorImplementation
2473  {
2474  // a function to do the inverse of the unrolling from a set of
2475  // indices to a scalar index into the array in which we store
2476  // the elements of a symmetric tensor. in other words, it goes
2477  // from the scalar index into the array to a set of indices of
2478  // the tensor
2479  //
2480  // this function is for rank-2 tensors
2481  template <int dim>
2482  constexpr inline DEAL_II_ALWAYS_INLINE TableIndices<2>
2483  unrolled_to_component_indices(const unsigned int i,
2484  const std::integral_constant<int, 2> &)
2485  {
2486  Assert(
2488  ExcIndexRange(
2489  i,
2490  0,
2492  switch (dim)
2493  {
2494  case 1:
2495  {
2496  return {0, 0};
2497  }
2498 
2499  case 2:
2500  {
2501  const TableIndices<2> table[3] = {TableIndices<2>(0, 0),
2502  TableIndices<2>(1, 1),
2503  TableIndices<2>(0, 1)};
2504  return table[i];
2505  }
2506 
2507  case 3:
2508  {
2509  const TableIndices<2> table[6] = {TableIndices<2>(0, 0),
2510  TableIndices<2>(1, 1),
2511  TableIndices<2>(2, 2),
2512  TableIndices<2>(0, 1),
2513  TableIndices<2>(0, 2),
2514  TableIndices<2>(1, 2)};
2515  return table[i];
2516  }
2517 
2518  default:
2519  if (i < dim)
2520  return {i, i};
2521 
2522  for (unsigned int d = 0, c = dim; d < dim; ++d)
2523  for (unsigned int e = d + 1; e < dim; ++e, ++c)
2524  if (c == i)
2525  return {d, e};
2526 
2527  // should never get here:
2528  Assert(false, ExcInternalError());
2529  return {0, 0};
2530  }
2531  }
2532 
2533  // a function to do the inverse of the unrolling from a set of
2534  // indices to a scalar index into the array in which we store
2535  // the elements of a symmetric tensor. in other words, it goes
2536  // from the scalar index into the array to a set of indices of
2537  // the tensor
2538  //
2539  // this function is for tensors of a rank not already handled
2540  // above
2541  template <int dim, int rank_>
2542  constexpr inline
2543  typename std::enable_if<rank_ != 2, TableIndices<rank_>>::type
2544  unrolled_to_component_indices(const unsigned int i,
2545  const std::integral_constant<int, rank_> &)
2546  {
2547  (void)i;
2548  Assert(
2549  (i <
2551  ExcIndexRange(i,
2552  0,
2554  n_independent_components));
2555  Assert(false, ExcNotImplemented());
2556  return TableIndices<rank_>();
2557  }
2558 
2559  } // namespace SymmetricTensorImplementation
2560 } // namespace internal
2561 
2562 template <int rank_, int dim, typename Number>
2565  const unsigned int i)
2566 {
2567  return internal::SymmetricTensorImplementation::unrolled_to_component_indices<
2568  dim>(i, std::integral_constant<int, rank_>());
2569 }
2570 
2571 
2572 
2573 template <int rank_, int dim, typename Number>
2574 template <class Archive>
2575 inline void
2576 SymmetricTensor<rank_, dim, Number>::serialize(Archive &ar, const unsigned int)
2577 {
2578  ar &data;
2579 }
2580 
2581 
2582 #endif // DOXYGEN
2583 
2584 /* ----------------- Non-member functions operating on tensors. ------------ */
2585 
2586 
2599 template <int rank_, int dim, typename Number, typename OtherNumber>
2600 constexpr inline DEAL_II_ALWAYS_INLINE
2604 {
2606  tmp = left;
2607  tmp += right;
2608  return tmp;
2609 }
2610 
2611 
2624 template <int rank_, int dim, typename Number, typename OtherNumber>
2625 constexpr inline DEAL_II_ALWAYS_INLINE
2629 {
2631  tmp = left;
2632  tmp -= right;
2633  return tmp;
2634 }
2635 
2636 
2644 template <int rank_, int dim, typename Number, typename OtherNumber>
2645 constexpr DEAL_II_ALWAYS_INLINE
2648  const Tensor<rank_, dim, OtherNumber> & right)
2649 {
2650  return Tensor<rank_, dim, Number>(left) + right;
2651 }
2652 
2653 
2661 template <int rank_, int dim, typename Number, typename OtherNumber>
2662 constexpr DEAL_II_ALWAYS_INLINE
2666 {
2667  return left + Tensor<rank_, dim, OtherNumber>(right);
2668 }
2669 
2670 
2678 template <int rank_, int dim, typename Number, typename OtherNumber>
2679 constexpr DEAL_II_ALWAYS_INLINE
2682  const Tensor<rank_, dim, OtherNumber> & right)
2683 {
2684  return Tensor<rank_, dim, Number>(left) - right;
2685 }
2686 
2687 
2695 template <int rank_, int dim, typename Number, typename OtherNumber>
2696 constexpr DEAL_II_ALWAYS_INLINE
2700 {
2701  return left - Tensor<rank_, dim, OtherNumber>(right);
2702 }
2703 
2704 
2705 
2719 template <int dim, typename Number>
2722 {
2723  switch (dim)
2724  {
2725  case 1:
2726  return t.data[0];
2727  case 2:
2728  return (t.data[0] * t.data[1] - t.data[2] * t.data[2]);
2729  case 3:
2730  {
2731  // in analogy to general tensors, but
2732  // there's something to be simplified for
2733  // the present case
2734  const Number tmp = t.data[3] * t.data[4] * t.data[5];
2735  return (tmp + tmp + t.data[0] * t.data[1] * t.data[2] -
2736  t.data[0] * t.data[5] * t.data[5] -
2737  t.data[1] * t.data[4] * t.data[4] -
2738  t.data[2] * t.data[3] * t.data[3]);
2739  }
2740  default:
2741  Assert(false, ExcNotImplemented());
2743  }
2744 }
2745 
2746 
2747 
2759 template <int dim, typename Number>
2762 {
2763  return determinant(t);
2764 }
2765 
2766 
2767 
2777 template <int dim, typename Number>
2778 constexpr inline DEAL_II_ALWAYS_INLINE Number
2780 {
2781  Number t = d.data[0];
2782  for (unsigned int i = 1; i < dim; ++i)
2783  t += d.data[i];
2784  return t;
2785 }
2786 
2787 
2799 template <int dim, typename Number>
2800 constexpr Number
2802 {
2803  return trace(t);
2804 }
2805 
2806 
2818 template <typename Number>
2819 constexpr DEAL_II_ALWAYS_INLINE Number
2821 {
2823 }
2824 
2825 
2826 
2845 template <typename Number>
2846 constexpr DEAL_II_ALWAYS_INLINE Number
2848 {
2849  return t[0][0] * t[1][1] - t[0][1] * t[0][1];
2850 }
2851 
2852 
2853 
2862 template <typename Number>
2863 constexpr DEAL_II_ALWAYS_INLINE Number
2865 {
2866  return (t[0][0] * t[1][1] + t[1][1] * t[2][2] + t[2][2] * t[0][0] -
2867  t[0][1] * t[0][1] - t[0][2] * t[0][2] - t[1][2] * t[1][2]);
2868 }
2869 
2870 
2871 
2879 template <typename Number>
2880 std::array<Number, 1>
2882 
2883 
2884 
2907 template <typename Number>
2908 std::array<Number, 2>
2910 
2911 
2912 
2935 template <typename Number>
2936 std::array<Number, 3>
2938 
2939 
2940 
2941 namespace internal
2942 {
2943  namespace SymmetricTensorImplementation
2944  {
2982  template <int dim, typename Number>
2983  void
2984  tridiagonalize(const ::SymmetricTensor<2, dim, Number> &A,
2985  ::Tensor<2, dim, Number> & Q,
2986  std::array<Number, dim> & d,
2987  std::array<Number, dim - 1> & e);
2988 
2989 
2990 
3030  template <int dim, typename Number>
3031  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
3032  ql_implicit_shifts(const ::SymmetricTensor<2, dim, Number> &A);
3033 
3034 
3035 
3075  template <int dim, typename Number>
3076  std::array<std::pair<Number, Tensor<1, dim, Number>>, dim>
3077  jacobi(::SymmetricTensor<2, dim, Number> A);
3078 
3079 
3080 
3094  template <typename Number>
3095  std::array<std::pair<Number, Tensor<1, 2, Number>>, 2>
3096  hybrid(const ::SymmetricTensor<2, 2, Number> &A);
3097 
3098 
3099 
3132  template <typename Number>
3133  std::array<std::pair<Number, Tensor<1, 3, Number>>, 3>
3134  hybrid(const ::SymmetricTensor<2, 3, Number> &A);
3135 
3140  template <int dim, typename Number>
3142  {
3143  using EigValsVecs = std::pair<Number, Tensor<1, dim, Number>>;
3144  bool
3145  operator()(const EigValsVecs &lhs, const EigValsVecs &rhs)
3146  {
3147  return lhs.first > rhs.first;
3148  }
3149  };
3150 
3151  } // namespace SymmetricTensorImplementation
3152 
3153 } // namespace internal
3154 
3155 
3156 
3157 // The line below is to ensure that doxygen puts the full description
3158 // of this global enumeration into the documentation
3159 // See https://stackoverflow.com/a/1717984
3189 {
3199  hybrid,
3209  ql_implicit_shifts,
3217  jacobi
3218 };
3219 
3220 
3221 
3250 template <int dim, typename Number>
3251 std::array<std::pair<Number, Tensor<1, dim, Number>>,
3252  std::integral_constant<int, dim>::value>
3254  const SymmetricTensorEigenvectorMethod method =
3256 
3257 
3258 
3267 template <int rank_, int dim, typename Number>
3270 {
3271  return t;
3272 }
3273 
3274 
3275 
3286 template <int dim, typename Number>
3289 {
3291 
3292  // subtract scaled trace from the diagonal
3293  const Number tr = trace(t) / dim;
3294  for (unsigned int i = 0; i < dim; ++i)
3295  tmp.data[i] -= tr;
3296 
3297  return tmp;
3298 }
3299 
3300 
3301 
3302 template <int dim, typename Number>
3305 {
3306  // create a default constructed matrix filled with
3307  // zeros, then set the diagonal elements to one
3309  switch (dim)
3310  {
3311  case 1:
3313  break;
3314  case 2:
3315  tmp.data[0] = tmp.data[1] = internal::NumberType<Number>::value(1.);
3316  break;
3317  case 3:
3318  tmp.data[0] = tmp.data[1] = tmp.data[2] =
3320  break;
3321  default:
3322  for (unsigned int d = 0; d < dim; ++d)
3324  }
3325  return tmp;
3326 }
3327 
3328 
3329 
3330 template <int dim, typename Number>
3333 {
3335 
3336  // fill the elements treating the diagonal
3337  for (unsigned int i = 0; i < dim; ++i)
3338  for (unsigned int j = 0; j < dim; ++j)
3339  tmp.data[i][j] =
3340  internal::NumberType<Number>::value((i == j ? 1. : 0.) - 1. / dim);
3341 
3342  // then fill the ones that copy over the
3343  // non-diagonal elements. note that during
3344  // the double-contraction, we handle the
3345  // off-diagonal elements twice, so simply
3346  // copying requires a weight of 1/2
3347  for (unsigned int i = dim;
3348  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3349  n_rank2_components;
3350  ++i)
3351  tmp.data[i][i] = internal::NumberType<Number>::value(0.5);
3352 
3353  return tmp;
3354 }
3355 
3356 
3357 
3358 template <int dim, typename Number>
3361 {
3363 
3364  // fill the elements treating the diagonal
3365  for (unsigned int i = 0; i < dim; ++i)
3366  tmp.data[i][i] = internal::NumberType<Number>::value(1.);
3367 
3368  // then fill the ones that copy over the
3369  // non-diagonal elements. note that during
3370  // the double-contraction, we handle the
3371  // off-diagonal elements twice, so simply
3372  // copying requires a weight of 1/2
3373  for (unsigned int i = dim;
3374  i < internal::SymmetricTensorAccessors::StorageType<4, dim, Number>::
3375  n_rank2_components;
3376  ++i)
3377  tmp.data[i][i] = internal::NumberType<Number>::value(0.5);
3378 
3379  return tmp;
3380 }
3381 
3382 
3383 
3393 template <int dim, typename Number>
3396 {
3398  value(t);
3399 }
3400 
3401 
3402 
3413 template <int dim, typename Number>
3416 {
3418  value(t);
3419 }
3420 
3421 
3422 
3444 template <int dim, typename Number>
3445 constexpr inline SymmetricTensor<4, dim, Number>
3448 {
3450 
3451  // fill only the elements really needed
3452  for (unsigned int i = 0; i < dim; ++i)
3453  for (unsigned int j = i; j < dim; ++j)
3454  for (unsigned int k = 0; k < dim; ++k)
3455  for (unsigned int l = k; l < dim; ++l)
3456  tmp[i][j][k][l] = t1[i][j] * t2[k][l];
3457 
3458  return tmp;
3459 }
3460 
3461 
3462 
3470 template <int dim, typename Number>
3473 {
3475  for (unsigned int d = 0; d < dim; ++d)
3476  result[d][d] = t[d][d];
3477 
3478  const Number half = internal::NumberType<Number>::value(0.5);
3479  for (unsigned int d = 0; d < dim; ++d)
3480  for (unsigned int e = d + 1; e < dim; ++e)
3481  result[d][e] = (t[d][e] + t[e][d]) * half;
3482  return result;
3483 }
3484 
3485 
3486 
3494 template <int rank_, int dim, typename Number>
3496 operator*(const SymmetricTensor<rank_, dim, Number> &t, const Number &factor)
3497 {
3499  tt *= factor;
3500  return tt;
3501 }
3502 
3503 
3504 
3512 template <int rank_, int dim, typename Number>
3514 operator*(const Number &factor, const SymmetricTensor<rank_, dim, Number> &t)
3515 {
3516  // simply forward to the other operator
3517  return t * factor;
3518 }
3519 
3520 
3521 
3547 template <int rank_, int dim, typename Number, typename OtherNumber>
3548 constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3549  rank_,
3550  dim,
3551  typename ProductType<Number,
3554  const OtherNumber & factor)
3555 {
3556  // form the product. we have to convert the two factors into the final
3557  // type via explicit casts because, for awkward reasons, the C++
3558  // standard committee saw it fit to not define an
3559  // operator*(float,std::complex<double>)
3560  // (as well as with switched arguments and double<->float).
3561  using product_type = typename ProductType<Number, OtherNumber>::type;
3564  return tt;
3565 }
3566 
3567 
3568 
3577 template <int rank_, int dim, typename Number, typename OtherNumber>
3578 constexpr inline DEAL_II_ALWAYS_INLINE SymmetricTensor<
3579  rank_,
3580  dim,
3581  typename ProductType<OtherNumber,
3583 operator*(const Number & factor,
3585 {
3586  // simply forward to the other operator with switched arguments
3587  return (t * factor);
3588 }
3589 
3590 
3591 
3597 template <int rank_, int dim, typename Number, typename OtherNumber>
3598 constexpr inline SymmetricTensor<
3599  rank_,
3600  dim,
3601  typename ProductType<Number,
3604  const OtherNumber & factor)
3605 {
3606  using product_type = typename ProductType<Number, OtherNumber>::type;
3609  return tt;
3610 }
3611 
3612 
3613 
3620 template <int rank_, int dim>
3622 operator*(const SymmetricTensor<rank_, dim> &t, const double factor)
3623 {
3625  tt *= factor;
3626  return tt;
3627 }
3628 
3629 
3630 
3637 template <int rank_, int dim>
3639 operator*(const double factor, const SymmetricTensor<rank_, dim> &t)
3640 {
3642  tt *= factor;
3643  return tt;
3644 }
3645 
3646 
3647 
3653 template <int rank_, int dim>
3654 constexpr inline SymmetricTensor<rank_, dim>
3655 operator/(const SymmetricTensor<rank_, dim> &t, const double factor)
3656 {
3658  tt /= factor;
3659  return tt;
3660 }
3661 
3671 template <int dim, typename Number, typename OtherNumber>
3675 {
3676  return (t1 * t2);
3677 }
3678 
3679 
3693 template <int dim, typename Number, typename OtherNumber>
3694 constexpr inline DEAL_II_ALWAYS_INLINE
3697  const Tensor<2, dim, OtherNumber> & t2)
3698 {
3701  for (unsigned int i = 0; i < dim; ++i)
3702  for (unsigned int j = 0; j < dim; ++j)
3703  s += t1[i][j] * t2[i][j];
3704  return s;
3705 }
3706 
3707 
3721 template <int dim, typename Number, typename OtherNumber>
3725 {
3726  return scalar_product(t2, t1);
3727 }
3728 
3729 
3744 template <typename Number, typename OtherNumber>
3745 constexpr inline DEAL_II_ALWAYS_INLINE void
3750 {
3751  tmp[0][0] = t[0][0][0][0] * s[0][0];
3752 }
3753 
3754 
3755 
3770 template <typename Number, typename OtherNumber>
3771 constexpr inline void
3776 {
3777  tmp[0][0] = t[0][0][0][0] * s[0][0];
3778 }
3779 
3780 
3781 
3796 template <typename Number, typename OtherNumber>
3797 constexpr inline void
3802 {
3803  const unsigned int dim = 2;
3804 
3805  for (unsigned int i = 0; i < dim; ++i)
3806  for (unsigned int j = i; j < dim; ++j)
3807  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3808  2 * t[i][j][0][1] * s[0][1];
3809 }
3810 
3811 
3812 
3827 template <typename Number, typename OtherNumber>
3828 constexpr inline void
3833 {
3834  const unsigned int dim = 2;
3835 
3836  for (unsigned int i = 0; i < dim; ++i)
3837  for (unsigned int j = i; j < dim; ++j)
3838  tmp[i][j] = s[0][0] * t[0][0][i][j] * +s[1][1] * t[1][1][i][j] +
3839  2 * s[0][1] * t[0][1][i][j];
3840 }
3841 
3842 
3843 
3858 template <typename Number, typename OtherNumber>
3859 constexpr inline void
3864 {
3865  const unsigned int dim = 3;
3866 
3867  for (unsigned int i = 0; i < dim; ++i)
3868  for (unsigned int j = i; j < dim; ++j)
3869  tmp[i][j] = t[i][j][0][0] * s[0][0] + t[i][j][1][1] * s[1][1] +
3870  t[i][j][2][2] * s[2][2] + 2 * t[i][j][0][1] * s[0][1] +
3871  2 * t[i][j][0][2] * s[0][2] + 2 * t[i][j][1][2] * s[1][2];
3872 }
3873 
3874 
3875 
3890 template <typename Number, typename OtherNumber>
3891 constexpr inline void
3896 {
3897  const unsigned int dim = 3;
3898 
3899  for (unsigned int i = 0; i < dim; ++i)
3900  for (unsigned int j = i; j < dim; ++j)
3901  tmp[i][j] = s[0][0] * t[0][0][i][j] + s[1][1] * t[1][1][i][j] +
3902  s[2][2] * t[2][2][i][j] + 2 * s[0][1] * t[0][1][i][j] +
3903  2 * s[0][2] * t[0][2][i][j] + 2 * s[1][2] * t[1][2][i][j];
3904 }
3905 
3906 
3907 
3914 template <int dim, typename Number, typename OtherNumber>
3917  const Tensor<1, dim, OtherNumber> & src2)
3918 {
3920  for (unsigned int i = 0; i < dim; ++i)
3921  for (unsigned int j = 0; j < dim; ++j)
3922  dest[i] += src1[i][j] * src2[j];
3923  return dest;
3924 }
3925 
3926 
3933 template <int dim, typename Number, typename OtherNumber>
3937 {
3938  // this is easy for symmetric tensors:
3939  return src2 * src1;
3940 }
3941 
3942 
3943 
3963 template <int rank_1,
3964  int rank_2,
3965  int dim,
3966  typename Number,
3967  typename OtherNumber>
3968 constexpr DEAL_II_ALWAYS_INLINE
3969  typename Tensor<rank_1 + rank_2 - 2,
3970  dim,
3971  typename ProductType<Number, OtherNumber>::type>::tensor_type
3974 {
3975  return src1 * Tensor<rank_2, dim, OtherNumber>(src2);
3976 }
3977 
3978 
3979 
3999 template <int rank_1,
4000  int rank_2,
4001  int dim,
4002  typename Number,
4003  typename OtherNumber>
4004 constexpr DEAL_II_ALWAYS_INLINE
4005  typename Tensor<rank_1 + rank_2 - 2,
4006  dim,
4007  typename ProductType<Number, OtherNumber>::type>::tensor_type
4009  const Tensor<rank_2, dim, OtherNumber> & src2)
4010 {
4011  return Tensor<rank_1, dim, Number>(src1) * src2;
4012 }
4013 
4014 
4015 
4025 template <int dim, typename Number>
4026 inline std::ostream &
4027 operator<<(std::ostream &out, const SymmetricTensor<2, dim, Number> &t)
4028 {
4029  // make our lives a bit simpler by outputting
4030  // the tensor through the operator for the
4031  // general Tensor class
4033 
4034  for (unsigned int i = 0; i < dim; ++i)
4035  for (unsigned int j = 0; j < dim; ++j)
4036  tt[i][j] = t[i][j];
4037 
4038  return out << tt;
4039 }
4040 
4041 
4042 
4052 template <int dim, typename Number>
4053 inline std::ostream &
4054 operator<<(std::ostream &out, const SymmetricTensor<4, dim, Number> &t)
4055 {
4056  // make our lives a bit simpler by outputting
4057  // the tensor through the operator for the
4058  // general Tensor class
4060 
4061  for (unsigned int i = 0; i < dim; ++i)
4062  for (unsigned int j = 0; j < dim; ++j)
4063  for (unsigned int k = 0; k < dim; ++k)
4064  for (unsigned int l = 0; l < dim; ++l)
4065  tt[i][j][k][l] = t[i][j][k][l];
4066 
4067  return out << tt;
4068 }
4069 
4070 
4072 
4073 #endif
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &)
static const unsigned int invalid_unsigned_int
Definition: types.h:196
constexpr internal::SymmetricTensorAccessors::double_contraction_result< rank_, 2, dim, Number, OtherNumber >::type operator*(const SymmetricTensor< 2, dim, OtherNumber > &s) const
typename AccessorTypes< rank, dim, constness, Number >::tensor_type tensor_type
static constexpr unsigned int component_to_unrolled_index(const TableIndices< rank_ > &indices)
std::array< std::pair< Number, Tensor< 1, dim, Number > >, std::integral_constant< int, dim >::value > eigenvectors(const SymmetricTensor< 2, dim, Number > &T, const SymmetricTensorEigenvectorMethod method=SymmetricTensorEigenvectorMethod::ql_implicit_shifts)
constexpr SymmetricTensor operator-() const
constexpr SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &t)
static constexpr const T & value(const T &t)
Definition: numbers.h:703
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
typename internal::ProductTypeImpl< typename std::decay< T >::type, typename std::decay< U >::type >::type type
constexpr SymmetricTensor & operator-=(const SymmetricTensor< rank_, dim, OtherNumber > &)
constexpr SymmetricTensor< 2, dim, Number > deviator(const SymmetricTensor< 2, dim, Number > &)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)
constexpr numbers::NumberTraits< Number >::real_type norm() const
bool operator!=(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
#define AssertIndexRange(index, range)
Definition: exceptions.h:1718
typename AccessorTypes< rank, dim, constness, Number >::reference reference
static constexpr TableIndices< rank_ > unrolled_to_component_indices(const unsigned int i)
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator+(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:472
constexpr bool operator==(const SymmetricTensor &) const
STL namespace.
static real_type abs(const number &x)
Definition: numbers.h:609
constexpr SymmetricTensor< rank_, dim, Number > transpose(const SymmetricTensor< rank_, dim, Number > &t)
SymmetricTensorEigenvectorMethod
bool operator==(const AlignedVector< T > &lhs, const AlignedVector< T > &rhs)
constexpr void sort()
constexpr const Number & access_raw_entry(const unsigned int unrolled_index) const
constexpr void clear()
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor & operator+=(const SymmetricTensor< rank_, dim, OtherNumber > &)
constexpr SymmetricTensor & operator/=(const OtherNumber &factor)
typename AccessorTypes< rank, dim, constness, Number >::tensor_type tensor_type
constexpr SymmetricTensor< 4, dim, Number > outer_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, Number > &t2)
constexpr internal::SymmetricTensorAccessors::Accessor< rank_, dim, true, rank_ - 1, Number > operator[](const unsigned int row) const
static ::ExceptionBase & ExcMessage(std::string arg1)
constexpr bool operator!=(const SymmetricTensor &) const
constexpr SymmetricTensor()=default
typename base_tensor_descriptor::base_tensor_type base_tensor_type
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
Number * begin_raw()
#define Assert(cond, exc)
Definition: exceptions.h:1461
std::pair< Number, Tensor< 1, dim, Number > > EigValsVecs
bool operator()(const EigValsVecs &lhs, const EigValsVecs &rhs)
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, OtherNumber >::type > operator-(const SymmetricTensor< rank_, dim, Number > &left, const SymmetricTensor< rank_, dim, OtherNumber > &right)
constexpr Number trace(const SymmetricTensor< 2, dim, Number > &d)
constexpr TableIndices< 4 > merge(const TableIndices< 4 > &previous_indices, const unsigned int new_index, const unsigned int position)
constexpr SymmetricTensor< 4, dim, Number > identity_tensor()
base_tensor_type data
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:401
typename ProductType< Number, OtherNumber >::type value_type
constexpr SymmetricTensor< rank_, dim, Number > operator*(const SymmetricTensor< rank_, dim, Number > &t, const Number &factor)
void serialize(Archive &ar, const unsigned int version)
void tridiagonalize(const ::SymmetricTensor< 2, dim, Number > &A, ::Tensor< 2, dim, Number > &Q, std::array< Number, dim > &d, std::array< Number, dim - 1 > &e)
#define DEAL_II_ALWAYS_INLINE
Definition: config.h:98
Number * end_raw()
typename AccessorTypes< rank, dim, constness, Number >::reference reference
constexpr void double_contract(SymmetricTensor< 2, 1, typename ProductType< Number, OtherNumber >::type > &tmp, const SymmetricTensor< 4, 1, Number > &t, const SymmetricTensor< 2, 1, OtherNumber > &s)
constexpr Number second_invariant(const SymmetricTensor< 2, 1, Number > &)
Expression fabs(const Expression &x)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr Number determinant(const SymmetricTensor< 2, dim, Number > &t)
::SymmetricTensor< rank1+rank2 - 4, dim, value_type > type
constexpr SymmetricTensor< rank_, dim, typename ProductType< Number, typename EnableIfScalar< OtherNumber >::type >::type > operator/(const SymmetricTensor< rank_, dim, Number > &t, const OtherNumber &factor)
static const char A
static constexpr std::size_t memory_consumption()
void swap(MemorySpaceData< Number, MemorySpace > &, MemorySpaceData< Number, MemorySpace > &)
constexpr Number first_invariant(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor< 2, dim, Number > unit_symmetric_tensor()
Definition: tensor.h:506
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:400
static ::ExceptionBase & ExcIndexRange(std::size_t arg1, std::size_t arg2, std::size_t arg3)
constexpr bool value_is_zero(const Number &value)
Definition: numbers.h:941
static const char N
decltype(std::declval< T >() *std::declval< U >()) type
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
constexpr Number trace(const SymmetricTensor< 2, dim2, Number > &)
static ::ExceptionBase & ExcNotImplemented()
constexpr Number & operator()(const TableIndices< rank_ > &indices)
constexpr Number third_invariant(const SymmetricTensor< 2, dim, Number > &t)
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
constexpr SymmetricTensor< 4, dim, Number > deviator_tensor()
constexpr SymmetricTensor & operator*=(const OtherNumber &factor)
#define DEAL_II_CONSTEXPR
Definition: config.h:173
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
static ::ExceptionBase & ExcInternalError()
constexpr SymmetricTensor & operator=(const SymmetricTensor< rank_, dim, OtherNumber > &rhs)