Reference documentation for deal.II version Git f0d1c24e5f 2021-10-18 08:09:39 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symengine_optimizer.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_differentiation_sd_symengine_optimizer_h
17 #define dealii_differentiation_sd_symengine_optimizer_h
18 
19 #include <deal.II/base/config.h>
20 
21 #ifdef DEAL_II_WITH_SYMENGINE
22 
24 // Low level
25 # include <symengine/basic.h>
26 # include <symengine/dict.h>
27 # include <symengine/symengine_exception.h>
28 # include <symengine/symengine_rcp.h>
29 
30 // Optimization
31 # include <symengine/lambda_double.h>
32 # include <symengine/visitor.h>
33 # ifdef HAVE_SYMENGINE_LLVM
34 # include <symengine/llvm_double.h>
35 # endif
37 
38 # include <deal.II/base/exceptions.h>
39 # include <deal.II/base/logstream.h>
40 # include <deal.II/base/utilities.h>
41 
47 
48 # include <boost/serialization/split_member.hpp>
49 # include <boost/type_traits.hpp>
50 
51 # include <algorithm>
52 # include <map>
53 # include <memory>
54 # include <type_traits>
55 # include <utility>
56 # include <vector>
57 
58 
60 
61 
62 namespace Differentiation
63 {
64  namespace SD
65  {
76  "SymEngine has not been built with LLVM support.");
77 
83  "The SymEngine LLVM optimizer does not (yet) support the "
84  "selected return type.");
85 
87 
88 
89  // Forward declarations
90  template <typename ReturnType>
92 
93 
99  enum class OptimizerType
100  {
104  dictionary,
109  lambda,
114  llvm
115  };
116 
117 
121  template <class StreamType>
122  inline StreamType &
123  operator<<(StreamType &s, OptimizerType o)
124  {
125  if (o == OptimizerType::dictionary)
126  s << "dictionary";
127  else if (o == OptimizerType::lambda)
128  s << "lambda";
129  else if (o == OptimizerType::llvm)
130  s << "llvm";
131  else
132  {
133  Assert(false, ExcMessage("Unknown optimization method."));
134  }
135 
136  return s;
137  }
138 
139 
145  enum class OptimizationFlags : unsigned char
146  {
150  optimize_default = 0,
154  optimize_cse = 0x0001,
159  optimize_aggressive = 0x0002,
164  };
165 
166 
175  // This operator exists since if it did not then the result of the bit-or
176  // <tt>operator |</tt> would be an integer which would in turn trigger a
177  // compiler warning when we tried to assign it to an object of type
178  // OptimizationFlags.
179  inline OptimizationFlags
181  {
182  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) |
183  static_cast<unsigned int>(f2));
184  }
185 
186 
191  inline OptimizationFlags &
193  {
194  f1 = f1 | f2;
195  return f1;
196  }
197 
198 
207  // This operator exists since if it did not then the result of the bit-or
208  // <tt>operator |</tt> would be an integer which would in turn trigger a
209  // compiler warning when we tried to assign it to an object of type
210  // OptimizationFlags.
211  inline OptimizationFlags
213  {
214  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) &
215  static_cast<unsigned int>(f2));
216  }
217 
218 
223  inline OptimizationFlags &
225  {
226  f1 = f1 & f2;
227  return f1;
228  }
229 
230 
231  namespace internal
232  {
237  inline bool
239  {
240  return static_cast<int>(flags & OptimizationFlags::optimize_cse);
241  }
242 
247  inline int
249  {
250  // With the LLVM compiler there exists the opportunity to tune
251  // the level of optimizations performed during compilation.
252  // By default SymEngine sets this at "opt_level=2", which one
253  // presumes targets -O2. Here we are a bit more specific about
254  // want we want it to do:
255  // - Normal compilation: -02 (default settings)
256  // - Aggressive mode: -03 (the whole lot!)
257  // In theory we could also target
258  // - Debug mode: -O0 (no optimizations)
259  // but this doesn't make much sense since SymEngine is a
260  // tested external library.
261  const bool use_agg_opt =
262  static_cast<int>(flags & OptimizationFlags::optimize_aggressive);
263  const int opt_level = (use_agg_opt ? 3 : 2);
264  return opt_level;
265  }
266  } // namespace internal
267 
268 
273  template <class StreamType>
274  inline StreamType &
275  operator<<(StreamType &s, OptimizationFlags o)
276  {
277  s << " OptimizationFlags|";
278  if (static_cast<unsigned int>(o & OptimizationFlags::optimize_cse))
279  s << "cse|";
280 
281  // LLVM optimization level
282  s << "-O" +
285  "|";
286 
287  return s;
288  }
289 
290 
291  namespace internal
292  {
302  template <typename ReturnType, typename T = void>
304 
305 
315  template <typename ReturnType, typename T = void>
317 
318 
319 # ifdef HAVE_SYMENGINE_LLVM
320 
329  template <typename ReturnType, typename T = void>
330  struct LLVMOptimizer;
331 # endif // HAVE_SYMENGINE_LLVM
332 
333 
349  template <typename ReturnType, typename Optimizer, typename T = void>
351 
352 
353 # ifndef DOXYGEN
354 
355 
356  /* ----------- Specializations for the Optimizers ----------- */
357 
358 
359  // A helper struct to type trait detection for the optimizers that
360  // will be defined next.
361  template <typename ReturnType_, typename T = void>
362  struct SupportedOptimizerTypeTraits
363  {
364  static const bool is_supported = false;
365 
366  using ReturnType = void;
367  };
368 
369 
370 
371  // Specialization for arithmetic types
372  template <typename ReturnType_>
373  struct SupportedOptimizerTypeTraits<
374  ReturnType_,
375  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
376  {
377  static const bool is_supported = true;
378 
379  using ReturnType =
380  typename std::conditional<std::is_same<ReturnType_, float>::value,
381  float,
382  double>::type;
383  };
384 
385 
386 
387  // Specialization for complex arithmetic types
388  template <typename ReturnType_>
389  struct SupportedOptimizerTypeTraits<
390  ReturnType_,
391  typename std::enable_if<
392  boost::is_complex<ReturnType_>::value &&
393  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
394  {
395  static const bool is_supported = true;
396 
397  using ReturnType = typename std::conditional<
398  std::is_same<ReturnType_, std::complex<float>>::value,
399  std::complex<float>,
400  std::complex<double>>::type;
401  };
402 
403 
404 
405  template <typename ReturnType_>
406  struct DictionaryOptimizer<
407  ReturnType_,
408  typename std::enable_if<
409  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
410  {
411  using ReturnType =
412  typename SupportedOptimizerTypeTraits<ReturnType_>::ReturnType;
413  using OptimizerType =
415 
416 
425  static void
426  initialize(OptimizerType & optimizer,
427  const SymEngine::vec_basic & independent_symbols,
428  const SymEngine::vec_basic & dependent_functions,
429  const enum OptimizationFlags &optimization_flags)
430  {
431  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
432  optimizer.init(independent_symbols,
433  dependent_functions,
434  use_symbolic_cse);
435  }
436 
437 
438 
443  template <class Archive>
444  static void
445  save(Archive & archive,
446  const unsigned int version,
447  OptimizerType & optimizer)
448  {
449  optimizer.save(archive, version);
450  }
451 
452 
453 
458  template <class Archive>
459  static void
460  load(Archive & archive,
461  const unsigned int version,
462  OptimizerType & optimizer,
463  const SymEngine::vec_basic & /*independent_symbols*/,
464  const SymEngine::vec_basic & /*dependent_functions*/,
465  const enum OptimizationFlags & /*optimization_flags*/)
466  {
467  optimizer.load(archive, version);
468  }
469 
470 
471 
487  template <typename Stream>
488  static void
489  print(Stream & stream,
490  const OptimizerType &optimizer,
491  const bool print_independent_symbols = false,
492  const bool print_dependent_functions = false,
493  const bool print_cse_reductions = true)
494  {
495  optimizer.print(stream,
496  print_independent_symbols,
497  print_dependent_functions,
498  print_cse_reductions);
499  }
500  };
501 
502 
503 
504  template <typename ReturnType_>
505  struct LambdaOptimizer<
506  ReturnType_,
507  typename std::enable_if<
508  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
509  {
510  using ReturnType =
511  typename std::conditional<!boost::is_complex<ReturnType_>::value,
512  double,
513  std::complex<double>>::type;
514  using OptimizerType = typename std::conditional<
515  !boost::is_complex<ReturnType_>::value,
516  SymEngine::LambdaRealDoubleVisitor,
517  SymEngine::LambdaComplexDoubleVisitor>::type;
518 
519 
528  static void
529  initialize(OptimizerType & optimizer,
530  const SymEngine::vec_basic & independent_symbols,
531  const SymEngine::vec_basic & dependent_functions,
532  const enum OptimizationFlags &optimization_flags)
533  {
534  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
535  optimizer.init(independent_symbols,
536  dependent_functions,
537  use_symbolic_cse);
538  }
539 
540 
541 
546  template <class Archive>
547  static void
548  save(Archive & /*archive*/,
549  const unsigned int /*version*/,
550  OptimizerType & /*optimizer*/)
551  {}
552 
553 
558  template <class Archive>
559  static void
560  load(Archive & /*archive*/,
561  const unsigned int /*version*/,
562  OptimizerType & optimizer,
563  const SymEngine::vec_basic & independent_symbols,
564  const SymEngine::vec_basic & dependent_functions,
565  const enum OptimizationFlags &optimization_flags)
566  {
567  initialize(optimizer,
568  independent_symbols,
569  dependent_functions,
570  optimization_flags);
571  }
572 
573 
574 
590  template <typename StreamType>
591  static void
592  print(StreamType & /*stream*/,
593  const OptimizerType & /*optimizer*/,
594  const bool /*print_independent_symbols*/ = false,
595  const bool /*print_dependent_functions*/ = false,
596  const bool /*print_cse_reductions*/ = true)
597  {
598  // No built-in print function
599  }
600  };
601 
602 
603 
604 # ifdef HAVE_SYMENGINE_LLVM
605  template <typename ReturnType_>
606  struct LLVMOptimizer<
607  ReturnType_,
608  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
609  {
610  using ReturnType =
611  typename std::conditional<std::is_same<ReturnType_, float>::value,
612  float,
613  double>::type;
614  using OptimizerType =
615  typename std::conditional<std::is_same<ReturnType_, float>::value,
616  SymEngine::LLVMFloatVisitor,
617  SymEngine::LLVMDoubleVisitor>::type;
618 
623  static const bool supported_by_LLVM = true;
624 
625 
634  static void
635  initialize(OptimizerType & optimizer,
636  const SymEngine::vec_basic & independent_symbols,
637  const SymEngine::vec_basic & dependent_functions,
638  const enum OptimizationFlags &optimization_flags)
639  {
640  const int opt_level = get_LLVM_optimization_level(optimization_flags);
641  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
642  optimizer.init(independent_symbols,
643  dependent_functions,
644  use_symbolic_cse,
645  opt_level);
646  }
647 
648 
649 
654  template <class Archive>
655  static void
656  save(Archive &archive,
657  const unsigned int /*version*/,
658  OptimizerType &optimizer)
659  {
660  const std::string llvm_compiled_function = optimizer.dumps();
661  archive & llvm_compiled_function;
662  }
663 
664 
665 
670  template <class Archive>
671  static void
672  load(Archive &archive,
673  const unsigned int /*version*/,
674  OptimizerType &optimizer,
675  const SymEngine::vec_basic & /*independent_symbols*/,
676  const SymEngine::vec_basic & /*dependent_functions*/,
677  const enum OptimizationFlags & /*optimization_flags*/)
678  {
679  std::string llvm_compiled_function;
680  archive & llvm_compiled_function;
681  optimizer.loads(llvm_compiled_function);
682  }
683 
684 
685 
701  template <typename StreamType>
702  static void
703  print(StreamType & /*stream*/,
704  const OptimizerType & /*optimizer*/,
705  const bool /*print_independent_symbols*/ = false,
706  const bool /*print_dependent_functions*/ = false,
707  const bool /*print_cse_reductions*/ = true)
708  {
709  // No built-in print function
710  }
711  };
712 
713 
714  // There is no LLVM optimizer built with complex number support.
715  // So we fall back to the LambdaDouble case as a type (required
716  // at compile time), but offer no implementation. We expect that
717  // the calling class does not create this type: This can be done by
718  // checking the `supported_by_LLVM` flag.
719  template <typename ReturnType_>
720  struct LLVMOptimizer<
721  ReturnType_,
722  typename std::enable_if<
723  boost::is_complex<ReturnType_>::value &&
724  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
725  {
726  // Since there is no working implementation, these are dummy types
727  // that help with templating in the calling function.
728  using ReturnType = typename LambdaOptimizer<ReturnType_>::ReturnType;
729  using OptimizerType =
731 
736  static const bool supported_by_LLVM = false;
737 
738 
747  static void
748  initialize(OptimizerType & /*optimizer*/,
749  const SymEngine::vec_basic & /*independent_symbols*/,
750  const SymEngine::vec_basic & /*dependent_functions*/,
751  const enum OptimizationFlags & /*optimization_flags*/)
752  {
753  AssertThrow(false, ExcNotImplemented());
754  }
755 
756 
757 
762  template <class Archive>
763  static void
764  save(Archive & /*archive*/,
765  const unsigned int /*version*/,
766  OptimizerType & /*optimizer*/)
767  {
768  AssertThrow(false, ExcNotImplemented());
769  }
770 
771 
772 
777  template <class Archive>
778  static void
779  load(Archive & /*archive*/,
780  const unsigned int /*version*/,
781  OptimizerType & /*optimizer*/,
782  const SymEngine::vec_basic & /*independent_symbols*/,
783  const SymEngine::vec_basic & /*dependent_functions*/,
784  const enum OptimizationFlags & /*optimization_flags*/)
785  {
786  AssertThrow(false, ExcNotImplemented());
787  }
788 
789 
790 
806  template <typename StreamType>
807  static void
808  print(StreamType & /*stream*/,
809  const OptimizerType & /*optimizer*/,
810  const bool /*print_independent_symbols*/ = false,
811  const bool /*print_dependent_functions*/ = false,
812  const bool /*print_cse_reductions*/ = true)
813  {
814  AssertThrow(false, ExcNotImplemented());
815  }
816  };
817 # endif // HAVE_SYMENGINE_LLVM
818 
819 
820  /* ----------- Specializations for OptimizerHelper ----------- */
821 
822 
823  template <typename ReturnType, typename Optimizer>
824  struct OptimizerHelper<ReturnType,
825  Optimizer,
826  typename std::enable_if<std::is_same<
827  ReturnType,
828  typename Optimizer::ReturnType>::value>::type>
829  {
838  static void
839  initialize(typename Optimizer::OptimizerType *optimizer,
840  const SymEngine::vec_basic & independent_symbols,
841  const SymEngine::vec_basic & dependent_functions,
842  const enum OptimizationFlags & optimization_flags)
843  {
844  Assert(optimizer, ExcNotInitialized());
845 
846  // Some optimizers don't have the same interface for
847  // initialization, we filter them out through the specializations
848  // of the Optimizer class
849  Optimizer::initialize(*optimizer,
850  independent_symbols,
851  dependent_functions,
852  optimization_flags);
853  }
854 
855 
856 
870  static void
871  substitute(typename Optimizer::OptimizerType *optimizer,
872  std::vector<ReturnType> & output_values,
873  const std::vector<ReturnType> & substitution_values)
874  {
875  Assert(optimizer, ExcNotInitialized());
876  optimizer->call(output_values.data(), substitution_values.data());
877  }
878 
879 
880 
885  template <class Archive>
886  static void
887  save(Archive & archive,
888  const unsigned int version,
889  typename Optimizer::OptimizerType *optimizer)
890  {
891  Assert(optimizer, ExcNotInitialized());
892 
893  // Some optimizers don't have the same interface for
894  // serialization, we filter them out through the specializations
895  // of the Optimizer class
896  Optimizer::save(archive, version, *optimizer);
897  }
898 
899 
900 
905  template <class Archive>
906  static void
907  load(Archive & archive,
908  const unsigned int version,
909  typename Optimizer::OptimizerType *optimizer,
910  const SymEngine::vec_basic & independent_symbols,
911  const SymEngine::vec_basic & dependent_functions,
912  const enum OptimizationFlags & optimization_flags)
913  {
914  Assert(optimizer, ExcNotInitialized());
915 
916  // Some optimizers don't have the same interface for
917  // serialization, we filter them out through the specializations
918  // of the Optimizer class
919  Optimizer::load(archive,
920  version,
921  *optimizer,
922  independent_symbols,
923  dependent_functions,
924  optimization_flags);
925  }
926 
927 
928 
944  template <typename Stream>
945  static void
946  print(Stream & stream,
947  typename Optimizer::OptimizerType *optimizer,
948  const bool print_independent_symbols = false,
949  const bool print_dependent_functions = false,
950  const bool print_cse_reductions = true)
951  {
952  Assert(optimizer, ExcNotInitialized());
953 
954  // Some optimizers don't have a print function, so
955  // we filter them out through the specializations of
956  // the Optimizer class
957  Optimizer::print(stream,
958  *optimizer,
959  print_independent_symbols,
960  print_dependent_functions,
961  print_cse_reductions);
962  }
963  };
964 
965  template <typename ReturnType, typename Optimizer>
966  struct OptimizerHelper<ReturnType,
967  Optimizer,
968  typename std::enable_if<!std::is_same<
969  ReturnType,
970  typename Optimizer::ReturnType>::value>::type>
971  {
980  static void
981  initialize(typename Optimizer::OptimizerType *optimizer,
982  const SymEngine::vec_basic & independent_symbols,
983  const SymEngine::vec_basic & dependent_functions,
984  const enum OptimizationFlags & optimization_flags)
985  {
986  Assert(optimizer, ExcNotInitialized());
987 
988  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
989  optimizer->init(independent_symbols,
990  dependent_functions,
991  use_symbolic_cse);
992  }
993 
994 
995 
1009  static void
1010  substitute(typename Optimizer::OptimizerType *optimizer,
1011  std::vector<ReturnType> & output_values,
1012  const std::vector<ReturnType> & substitution_values)
1013  {
1014  Assert(optimizer, ExcNotInitialized());
1015 
1016  // Intermediate values to accommodate the difference in
1017  // value types.
1018  std::vector<typename Optimizer::ReturnType> int_outputs(
1019  output_values.size());
1020  std::vector<typename Optimizer::ReturnType> int_inputs(
1021  substitution_values.size());
1022 
1023  std::copy(substitution_values.begin(),
1024  substitution_values.end(),
1025  int_inputs.begin());
1026  optimizer->call(int_outputs.data(), int_inputs.data());
1027  std::copy(int_outputs.begin(),
1028  int_outputs.end(),
1029  output_values.begin());
1030  }
1031 
1032 
1033 
1038  template <class Archive>
1039  static void
1040  save(Archive & archive,
1041  const unsigned int version,
1042  typename Optimizer::OptimizerType *optimizer)
1043  {
1044  Assert(optimizer, ExcNotInitialized());
1045  Optimizer::save(archive, version, *optimizer);
1046  }
1047 
1048 
1049 
1054  template <class Archive>
1055  static void
1056  load(Archive & archive,
1057  const unsigned int version,
1058  typename Optimizer::OptimizerType *optimizer,
1059  const SymEngine::vec_basic & independent_symbols,
1060  const SymEngine::vec_basic & dependent_functions,
1061  const enum OptimizationFlags & optimization_flags)
1062  {
1063  Assert(optimizer, ExcNotInitialized());
1064 
1065  // Some optimizers don't have the same interface for
1066  // serialization, we filter them out through the specializations
1067  // of the Optimizer class
1068  Optimizer::load(archive,
1069  version,
1070  *optimizer,
1071  independent_symbols,
1072  dependent_functions,
1073  optimization_flags);
1074  }
1075 
1076 
1077 
1093  template <typename Stream>
1094  static void
1095  print(Stream & stream,
1096  typename Optimizer::OptimizerType *optimizer,
1097  const bool print_cse_reductions = true,
1098  const bool print_independent_symbols = false,
1099  const bool print_dependent_functions = false)
1100  {
1101  Assert(optimizer, ExcNotInitialized());
1102 
1103  optimizer->print(stream,
1104  print_independent_symbols,
1105  print_dependent_functions,
1106  print_cse_reductions);
1107  }
1108  };
1109 
1110 # endif // DOXYGEN
1111 
1112 
1113  /* -------------------- Utility functions ---------------------- */
1114 
1115 
1137  template <typename NumberType,
1138  int rank,
1139  int dim,
1140  template <int, int, typename>
1141  class TensorType>
1142  TensorType<rank, dim, NumberType>
1144  const TensorType<rank, dim, Expression> &symbol_tensor,
1145  const std::vector<NumberType> & cached_evaluation,
1146  const BatchOptimizer<NumberType> & optimizer)
1147  {
1148  TensorType<rank, dim, NumberType> out;
1149  for (unsigned int i = 0; i < out.n_independent_components; ++i)
1150  {
1151  const TableIndices<rank> indices(
1152  out.unrolled_to_component_indices(i));
1153  out[indices] =
1154  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1155  }
1156  return out;
1157  }
1158 
1159 
1182  template <typename NumberType, int dim>
1185  const SymmetricTensor<4, dim, Expression> &symbol_tensor,
1186  const std::vector<NumberType> & cached_evaluation,
1187  const BatchOptimizer<NumberType> & optimizer)
1188  {
1190  for (unsigned int i = 0;
1191  i < SymmetricTensor<2, dim>::n_independent_components;
1192  ++i)
1193  for (unsigned int j = 0;
1194  j < SymmetricTensor<2, dim>::n_independent_components;
1195  ++j)
1196  {
1197  const TableIndices<4> indices =
1198  make_rank_4_tensor_indices<dim>(i, j);
1199  out[indices] =
1200  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1201  }
1202  return out;
1203  }
1204 
1205 
1223  template <typename NumberType, typename T>
1224  void
1226  const T & function)
1227  {
1228  optimizer.register_function(function);
1229  }
1230 
1231 
1249  template <typename NumberType, typename T>
1250  void
1252  const std::vector<T> & functions)
1253  {
1254  for (const auto &function : functions)
1255  register_functions(optimizer, function);
1256  }
1257 
1258 
1278  template <typename NumberType, typename T, typename... Args>
1279  void
1281  const T & function,
1282  const Args &...other_functions)
1283  {
1284  register_functions(optimizer, function);
1285  register_functions(optimizer, other_functions...);
1286  }
1287 
1288 
1300  template <int rank,
1301  int dim,
1302  template <int, int, typename>
1303  class TensorType>
1306  const TensorType<rank, dim, Expression> &symbol_tensor)
1307  {
1309  out.reserve(symbol_tensor.n_independent_components);
1310  for (unsigned int i = 0; i < symbol_tensor.n_independent_components;
1311  ++i)
1312  {
1313  const TableIndices<rank> indices(
1314  symbol_tensor.unrolled_to_component_indices(i));
1315  out.push_back(symbol_tensor[indices].get_RCP());
1316  }
1317  return out;
1318  }
1319 
1320 
1330  template <int dim>
1333  const SymmetricTensor<4, dim, Expression> &symbol_tensor)
1334  {
1336  out.reserve(symbol_tensor.n_independent_components);
1337  for (unsigned int i = 0;
1338  i < SymmetricTensor<2, dim>::n_independent_components;
1339  ++i)
1340  for (unsigned int j = 0;
1341  j < SymmetricTensor<2, dim>::n_independent_components;
1342  ++j)
1343  {
1344  const TableIndices<4> indices =
1345  make_rank_4_tensor_indices<dim>(i, j);
1346  out.push_back(symbol_tensor[indices].get_RCP());
1347  }
1348  return out;
1349  }
1350 
1351  } // namespace internal
1352 
1353 
1354 
1445  template <typename ReturnType>
1446  class BatchOptimizer
1447  {
1448  public:
1456  BatchOptimizer();
1457 
1475  BatchOptimizer(const enum OptimizerType & optimization_method,
1476  const enum OptimizationFlags &optimization_flags =
1478 
1488  BatchOptimizer(const BatchOptimizer &other/*,
1489  const bool copy_initialized = true*/);
1490 
1494  BatchOptimizer(BatchOptimizer &&) = default;
1495 
1499  ~BatchOptimizer() = default;
1500 
1510  template <typename Stream>
1511  void
1512  print(Stream &stream, const bool print_cse = false) const;
1513 
1522  template <class Archive>
1523  void
1524  save(Archive &archive, const unsigned int version) const;
1525 
1539  template <class Archive>
1540  void
1541  load(Archive &archive, const unsigned int version);
1542 
1543 # ifdef DOXYGEN
1544 
1565  template <class Archive>
1566  void
1567  serialize(Archive &archive, const unsigned int version);
1568 # else
1569  // This macro defines the serialize() method that is compatible with
1570  // the templated save() and load() method that have been implemented.
1571  BOOST_SERIALIZATION_SPLIT_MEMBER()
1572 # endif
1573 
1578 
1584  void
1585  register_symbols(const types::substitution_map &substitution_map);
1586 
1592  void
1593  register_symbols(const SymEngine::map_basic_basic &substitution_map);
1594 
1605  void
1606  register_symbols(const types::symbol_vector &symbols);
1607 
1618  void
1619  register_symbols(const SymEngine::vec_basic &symbols);
1620 
1626  get_independent_symbols() const;
1627 
1633  std::size_t
1634  n_independent_variables() const;
1635 
1637 
1642 
1647  void
1648  register_function(const Expression &function);
1649 
1654  template <int rank, int dim>
1655  void
1656  register_function(const Tensor<rank, dim, Expression> &function_tensor);
1657 
1662  template <int rank, int dim>
1663  void
1664  register_function(
1665  const SymmetricTensor<rank, dim, Expression> &function_tensor);
1666 
1671  void
1673 
1678  void
1679  register_functions(const SymEngine::vec_basic &functions);
1680 
1690  template <typename T>
1691  void
1692  register_functions(const std::vector<T> &functions);
1693 
1707  template <typename T, typename... Args>
1708  void
1709  register_functions(const T &functions, const Args &...other_functions);
1710 
1715  const types::symbol_vector &
1716  get_dependent_functions() const;
1717 
1724  std::size_t
1725  n_dependent_variables() const;
1726 
1728 
1733 
1745  void
1746  set_optimization_method(const enum OptimizerType & optimization_method,
1747  const enum OptimizationFlags &optimization_flags =
1749 
1754  enum OptimizerType
1755  optimization_method() const;
1756 
1761  enum OptimizationFlags
1762  optimization_flags() const;
1763 
1769  bool
1770  use_symbolic_CSE() const;
1771 
1787  void
1788  optimize();
1789 
1794  bool
1795  optimized() const;
1796 
1798 
1803 
1813  void
1814  substitute(const types::substitution_map &substitution_map) const;
1815 
1825  void
1826  substitute(const SymEngine::map_basic_basic &substitution_map) const;
1827 
1838  void
1839  substitute(const types::symbol_vector & symbols,
1840  const std::vector<ReturnType> &values) const;
1841 
1852  void
1853  substitute(const SymEngine::vec_basic & symbols,
1854  const std::vector<ReturnType> &values) const;
1855 
1861  bool
1862  values_substituted() const;
1863 
1865 
1870 
1893  const std::vector<ReturnType> &
1894  evaluate() const;
1895 
1903  ReturnType
1904  evaluate(const Expression &func) const;
1905 
1914  std::vector<ReturnType>
1915  evaluate(const std::vector<Expression> &funcs) const;
1916 
1925  template <int rank, int dim>
1927  evaluate(const Tensor<rank, dim, Expression> &funcs) const;
1928 
1929 
1938  template <int rank, int dim>
1940  evaluate(const SymmetricTensor<rank, dim, Expression> &funcs) const;
1941 
1942 
1950  ReturnType
1951  extract(const Expression & func,
1952  const std::vector<ReturnType> &cached_evaluation) const;
1953 
1954 
1962  std::vector<ReturnType>
1963  extract(const std::vector<Expression> &funcs,
1964  const std::vector<ReturnType> &cached_evaluation) const;
1965 
1966 
1974  template <int rank, int dim>
1977  const std::vector<ReturnType> & cached_evaluation) const;
1978 
1979 
1987  template <int rank, int dim>
1990  const std::vector<ReturnType> &cached_evaluation) const;
1991 
1993 
1994  private:
1998  enum OptimizerType method;
1999 
2004  enum OptimizationFlags flags;
2005 
2016 
2023 
2028  bool
2029  is_valid_nonunique_dependent_variable(
2030  const SD::Expression &function) const;
2031 
2036  bool
2037  is_valid_nonunique_dependent_variable(
2038  const SymEngine::RCP<const SymEngine::Basic> &function) const;
2039 
2054  mutable std::vector<ReturnType> dependent_variables_output;
2055 
2065  std::map<SD::Expression,
2066  std::size_t,
2068 
2074 
2081  mutable std::unique_ptr<SymEngine::Visitor> optimizer;
2082 
2092 
2097  mutable bool has_been_serialized;
2098 
2102  void
2103  register_scalar_function(const SD::Expression &function);
2104 
2109  void
2110  register_vector_functions(const types::symbol_vector &functions);
2111 
2115  void
2116  create_optimizer(std::unique_ptr<SymEngine::Visitor> &optimizer);
2117 
2134  void
2135  substitute(const std::vector<ReturnType> &substitution_values) const;
2136  };
2137 
2138 
2139 
2140  /* -------------------- inline and template functions ------------------ */
2141 
2142 
2143 # ifndef DOXYGEN
2144 
2145 
2146  template <typename ReturnType>
2147  template <typename Stream>
2148  void
2149  BatchOptimizer<ReturnType>::print(Stream &stream,
2150  const bool /*print_cse*/) const
2151  {
2152  // Settings
2153  stream << "Method? " << optimization_method() << "\n";
2154  stream << "Flags: " << optimization_flags() << "\n";
2155  stream << "Optimized? " << (optimized() ? "Yes" : "No") << "\n";
2156  stream << "Values substituted? " << values_substituted() << "\n\n";
2157 
2158  // Independent variables
2159  stream << "Symbols (" << n_independent_variables()
2160  << " independent variables):"
2161  << "\n";
2162  int cntr = 0;
2163  for (SD::types::substitution_map::const_iterator it =
2164  independent_variables_symbols.begin();
2165  it != independent_variables_symbols.end();
2166  ++it, ++cntr)
2167  {
2168  stream << cntr << ": " << it->first << "\n";
2169  }
2170  stream << "\n" << std::flush;
2171 
2172  // Dependent functions
2173  stream << "Functions (" << n_dependent_variables()
2174  << " dependent variables):"
2175  << "\n";
2176  cntr = 0;
2177  for (typename SD::types::symbol_vector::const_iterator it =
2178  dependent_variables_functions.begin();
2179  it != dependent_variables_functions.end();
2180  ++it, ++cntr)
2181  {
2182  stream << cntr << ": " << (*it) << "\n";
2183  }
2184  stream << "\n" << std::flush;
2185 
2186  // Common subexpression
2187  if (optimized() == true && use_symbolic_CSE() == true)
2188  {
2189  Assert(optimizer, ExcNotInitialized());
2190  const bool print_cse_reductions = true;
2191  const bool print_independent_symbols = false;
2192  const bool print_dependent_functions = false;
2193 
2194  if (optimization_method() == OptimizerType::dictionary)
2195  {
2196  Assert(dynamic_cast<typename internal::DictionaryOptimizer<
2197  ReturnType>::OptimizerType *>(optimizer.get()),
2198  ExcMessage("Cannot cast optimizer to Dictionary type."));
2199 
2201  ReturnType,
2203  print(stream,
2204  dynamic_cast<typename internal::DictionaryOptimizer<
2205  ReturnType>::OptimizerType *>(optimizer.get()),
2206  print_independent_symbols,
2207  print_dependent_functions,
2208  print_cse_reductions);
2209 
2210  stream << "\n" << std::flush;
2211  }
2212  else if (optimization_method() == OptimizerType::lambda)
2213  {
2214  Assert(dynamic_cast<typename internal::LambdaOptimizer<
2215  ReturnType>::OptimizerType *>(optimizer.get()),
2216  ExcMessage("Cannot cast optimizer to Lambda type."));
2217 
2218  internal::OptimizerHelper<ReturnType,
2220  print(stream,
2221  dynamic_cast<typename internal::LambdaOptimizer<
2222  ReturnType>::OptimizerType *>(optimizer.get()),
2223  print_independent_symbols,
2224  print_dependent_functions,
2225  print_cse_reductions);
2226  }
2227 # ifdef HAVE_SYMENGINE_LLVM
2228  else if (optimization_method() == OptimizerType::llvm)
2229  {
2230  Assert(dynamic_cast<typename internal::LLVMOptimizer<
2231  ReturnType>::OptimizerType *>(optimizer.get()),
2232  ExcMessage("Cannot cast optimizer to LLVM type."));
2233 
2234  internal::OptimizerHelper<ReturnType,
2235  internal::LLVMOptimizer<ReturnType>>::
2236  print(stream,
2237  dynamic_cast<typename internal::LLVMOptimizer<
2238  ReturnType>::OptimizerType *>(optimizer.get()),
2239  print_independent_symbols,
2240  print_dependent_functions,
2241  print_cse_reductions);
2242  }
2243 # endif // HAVE_SYMENGINE_LLVM
2244  else
2245  {
2246  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2247  }
2248  }
2249 
2250  if (values_substituted())
2251  {
2252  stream << "Evaluated functions:"
2253  << "\n";
2254  stream << std::flush;
2255  cntr = 0;
2256  for (typename std::vector<ReturnType>::const_iterator it =
2257  dependent_variables_output.begin();
2258  it != dependent_variables_output.end();
2259  ++it, ++cntr)
2260  {
2261  stream << cntr << ": " << (*it) << "\n";
2262  }
2263  stream << "\n" << std::flush;
2264  }
2265  }
2266 
2267 
2268 
2269  template <typename ReturnType>
2270  template <class Archive>
2271  void
2272  BatchOptimizer<ReturnType>::save(Archive & ar,
2273  const unsigned int version) const
2274  {
2275  // Serialize enum classes...
2276  {
2277  const auto m =
2278  static_cast<typename std::underlying_type<OptimizerType>::type>(
2279  method);
2280  ar &m;
2281  }
2282  {
2283  const auto f =
2284  static_cast<typename std::underlying_type<OptimizationFlags>::type>(
2285  flags);
2286  ar &f;
2287  }
2288 
2289  // Important: Independent variables must always be
2290  // serialized before the dependent variables.
2291  ar &independent_variables_symbols;
2292  ar &dependent_variables_functions;
2293 
2294  ar &dependent_variables_output;
2295  ar &map_dep_expr_vec_entry;
2296  ar &ready_for_value_extraction;
2297 
2298  // Mark that we've saved this class at some point.
2299  has_been_serialized = true;
2300  ar &has_been_serialized;
2301 
2302  // When we serialize the optimizer itself, we have to (unfortunately)
2303  // provide it with sufficient information to rebuild itself from scratch.
2304  // This is because only two of the three optimization classes support
2305  // real serialization (i.e. have save/load capability).
2306  const SD::types::symbol_vector symbol_vec =
2307  Utilities::extract_symbols(independent_variables_symbols);
2309  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2310  ReturnType>::OptimizerType *>(optimizer.get()))
2311  {
2312  Assert(optimization_method() == OptimizerType::dictionary,
2313  ExcInternalError());
2315  ReturnType,
2316  internal::DictionaryOptimizer<ReturnType>>::save(ar, version, opt);
2317  }
2319  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2320  ReturnType>::OptimizerType *>(optimizer.get()))
2321  {
2322  Assert(optimization_method() == OptimizerType::lambda,
2323  ExcInternalError());
2325  ReturnType,
2326  internal::LambdaOptimizer<ReturnType>>::save(ar, version, opt);
2327  }
2328 # ifdef HAVE_SYMENGINE_LLVM
2330  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2331  ReturnType>::OptimizerType *>(optimizer.get()))
2332  {
2333  Assert(optimization_method() == OptimizerType::llvm,
2334  ExcInternalError());
2336  ReturnType,
2337  internal::LLVMOptimizer<ReturnType>>::save(ar, version, opt);
2338  }
2339 # endif
2340  else
2341  {
2342  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2343  }
2344  }
2345 
2346 
2347 
2348  template <typename ReturnType>
2349  template <class Archive>
2350  void
2351  BatchOptimizer<ReturnType>::load(Archive &ar, const unsigned int version)
2352  {
2353  Assert(independent_variables_symbols.empty(), ExcInternalError());
2354  Assert(dependent_variables_functions.empty(), ExcInternalError());
2355  Assert(dependent_variables_output.empty(), ExcInternalError());
2356  Assert(map_dep_expr_vec_entry.empty(), ExcInternalError());
2357  Assert(ready_for_value_extraction == false, ExcInternalError());
2358 
2359  // Deserialize enum classes...
2360  {
2361  typename std::underlying_type<OptimizerType>::type m;
2362  ar & m;
2363  method = static_cast<OptimizerType>(m);
2364  }
2365  {
2366  typename std::underlying_type<OptimizationFlags>::type f;
2367  ar & f;
2368  flags = static_cast<OptimizationFlags>(f);
2369  }
2370 
2371  // Important: Independent variables must always be
2372  // deserialized before the dependent variables.
2373  ar &independent_variables_symbols;
2374  ar &dependent_variables_functions;
2375 
2376  ar &dependent_variables_output;
2377  ar &map_dep_expr_vec_entry;
2378  ar &ready_for_value_extraction;
2379 
2380  ar &has_been_serialized;
2381 
2382  // If we're reading in data, then create the optimizer
2383  // and then deserialize it.
2384  Assert(!optimizer, ExcInternalError());
2385 
2386  // Create and configure the optimizer
2387  create_optimizer(optimizer);
2388  Assert(optimizer, ExcNotInitialized());
2389 
2390  // When we deserialize the optimizer itself, we have to (unfortunately)
2391  // provide it with sufficient information to rebuild itself from scratch.
2392  // This is because only two of the three optimization classes support
2393  // real serialization (i.e. have save/load capability).
2394  const SD::types::symbol_vector symbol_vec =
2395  Utilities::extract_symbols(independent_variables_symbols);
2397  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2398  ReturnType>::OptimizerType *>(optimizer.get()))
2399  {
2400  Assert(optimization_method() == OptimizerType::dictionary,
2401  ExcInternalError());
2402  internal::OptimizerHelper<ReturnType,
2404  load(ar,
2405  version,
2406  opt,
2408  symbol_vec),
2410  dependent_variables_functions),
2411  optimization_flags());
2412  }
2414  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2415  ReturnType>::OptimizerType *>(optimizer.get()))
2416  {
2417  Assert(optimization_method() == OptimizerType::lambda,
2418  ExcInternalError());
2419  internal::OptimizerHelper<ReturnType,
2421  load(ar,
2422  version,
2423  opt,
2425  symbol_vec),
2427  dependent_variables_functions),
2428  optimization_flags());
2429  }
2430 # ifdef HAVE_SYMENGINE_LLVM
2432  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2433  ReturnType>::OptimizerType *>(optimizer.get()))
2434  {
2435  Assert(optimization_method() == OptimizerType::llvm,
2436  ExcInternalError());
2437  internal::OptimizerHelper<ReturnType,
2438  internal::LLVMOptimizer<ReturnType>>::
2439  load(ar,
2440  version,
2441  opt,
2443  symbol_vec),
2445  dependent_variables_functions),
2446  optimization_flags());
2447  }
2448 # endif
2449  else
2450  {
2451  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2452  }
2453  }
2454 
2455 
2456 
2457  template <typename ReturnType>
2458  template <int rank, int dim>
2459  void
2461  const Tensor<rank, dim, Expression> &function_tensor)
2462  {
2463  Assert(optimized() == false,
2464  ExcMessage(
2465  "Cannot register functions once the optimizer is finalised."));
2466 
2467  register_vector_functions(
2468  internal::unroll_to_expression_vector(function_tensor));
2469  }
2470 
2471 
2472 
2473  template <typename ReturnType>
2474  template <int rank, int dim>
2475  void
2477  const SymmetricTensor<rank, dim, Expression> &function_tensor)
2478  {
2479  Assert(optimized() == false,
2480  ExcMessage(
2481  "Cannot register functions once the optimizer is finalised."));
2482 
2483  register_vector_functions(
2484  internal::unroll_to_expression_vector(function_tensor));
2485  }
2486 
2487 
2488 
2489  template <typename ReturnType>
2490  template <typename T, typename... Args>
2491  void
2493  const T &functions,
2494  const Args &...other_functions)
2495  {
2496  internal::register_functions(*this, functions);
2497  internal::register_functions(*this, other_functions...);
2498  }
2499 
2500 
2501 
2502  template <typename ReturnType>
2503  template <typename T>
2504  void
2506  const std::vector<T> &functions)
2507  {
2508  internal::register_functions(*this, functions);
2509  }
2510 
2511 
2512 
2513  template <typename ReturnType>
2514  template <int rank, int dim>
2517  const Tensor<rank, dim, Expression> &funcs,
2518  const std::vector<ReturnType> & cached_evaluation) const
2519  {
2520  Assert(
2521  values_substituted() == true,
2522  ExcMessage(
2523  "The optimizer is not configured to perform evaluation. "
2524  "This action can only performed after substitute() has been called."));
2525 
2527  cached_evaluation,
2528  *this);
2529  }
2530 
2531 
2532 
2533  template <typename ReturnType>
2534  template <int rank, int dim>
2537  const Tensor<rank, dim, Expression> &funcs) const
2538  {
2539  return extract(funcs, dependent_variables_output);
2540  }
2541 
2542 
2543 
2544  template <typename ReturnType>
2545  template <int rank, int dim>
2549  const std::vector<ReturnType> & cached_evaluation) const
2550  {
2551  Assert(
2552  values_substituted() == true,
2553  ExcMessage(
2554  "The optimizer is not configured to perform evaluation. "
2555  "This action can only performed after substitute() has been called."));
2556 
2558  cached_evaluation,
2559  *this);
2560  }
2561 
2562 
2563 
2564  template <typename ReturnType>
2565  template <int rank, int dim>
2568  const SymmetricTensor<rank, dim, Expression> &funcs) const
2569  {
2570  return extract(funcs, dependent_variables_output);
2571  }
2572 
2573 # endif // DOXYGEN
2574 
2575  } // namespace SD
2576 } // namespace Differentiation
2577 
2578 
2580 
2581 #endif // DEAL_II_WITH_SYMENGINE
2582 
2583 #endif
std::ostream & operator<<(std::ostream &stream, const Expression &expression)
const std::vector< ReturnType > & evaluate() const
static constexpr unsigned int n_independent_components
OptimizationFlags & operator|=(OptimizationFlags &f1, const OptimizationFlags f2)
bool use_symbolic_CSE(const enum OptimizationFlags &flags)
void save(Archive &archive, const unsigned int version) const
int get_LLVM_optimization_level(const enum OptimizationFlags &flags)
STL namespace.
static ::ExceptionBase & ExcNotInitialized()
types::symbol_vector unroll_to_expression_vector(const SymmetricTensor< 4, dim, Expression > &symbol_tensor)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1571
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function, const Args &...other_functions)
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:414
void register_functions(const types::symbol_vector &functions)
std::vector< ReturnType > dependent_variables_output
static ::ExceptionBase & ExcMessage(std::string arg1)
TensorType< rank, dim, NumberType > tensor_evaluate_optimized(const TensorType< rank, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
void print(Stream &stream, const bool print_cse=false) const
static const char T
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
#define Assert(cond, exc)
Definition: exceptions.h:1461
Expression operator|(const Expression &lhs, const Expression &rhs)
void register_function(const Expression &function)
static ::ExceptionBase & ExcSymEngineLLVMReturnTypeNotSupported()
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:487
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:401
SymEngine::vec_basic convert_expression_vector_to_basic_vector(const SD::types::symbol_vector &symbol_vector)
std::string to_string(const T &t)
Definition: patterns.h:2329
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
std::vector< SD::Expression > symbol_vector
SymmetricTensor< 4, dim, NumberType > tensor_evaluate_optimized(const SymmetricTensor< 4, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
SD::types::symbol_vector extract_symbols(const SD::types::substitution_map &substitution_values)
types::substitution_map independent_variables_symbols
ReturnType extract(const Expression &func, const std::vector< ReturnType > &cached_evaluation) const
static ::ExceptionBase & ExcSymEngineLLVMNotAvailable()
OptimizationFlags & operator &=(OptimizationFlags &f1, const OptimizationFlags f2)
Expression substitute(const Expression &expression, const types::substitution_map &substitution_map)
Definition: tensor.h:506
std::unique_ptr< SymEngine::Visitor > optimizer
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:452
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:400
types::symbol_vector unroll_to_expression_vector(const TensorType< rank, dim, Expression > &symbol_tensor)
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function)
static ::ExceptionBase & ExcNotImplemented()
void copy(const T *begin, const T *end, U *dest)
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
void load(Archive &archive, const unsigned int version)
types::symbol_vector dependent_variables_functions
int(&) functions(const void *v1, const void *v2)
Expression operator &(const Expression &lhs, const Expression &rhs)
std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess > map_dependent_expression_to_vector_entry_t
static ::ExceptionBase & ExcInternalError()