Reference documentation for deal.II version 9.3.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symengine_optimizer.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_differentiation_sd_symengine_optimizer_h
17 #define dealii_differentiation_sd_symengine_optimizer_h
18 
19 #include <deal.II/base/config.h>
20 
21 #ifdef DEAL_II_WITH_SYMENGINE
22 
24 // Low level
25 # include <symengine/basic.h>
26 # include <symengine/dict.h>
27 # include <symengine/symengine_exception.h>
28 # include <symengine/symengine_rcp.h>
29 
30 // Optimization
31 # include <symengine/lambda_double.h>
32 # include <symengine/visitor.h>
33 # ifdef HAVE_SYMENGINE_LLVM
34 # include <symengine/llvm_double.h>
35 # endif
37 
38 # include <deal.II/base/exceptions.h>
39 # include <deal.II/base/logstream.h>
40 # include <deal.II/base/utilities.h>
41 
47 
48 # include <boost/serialization/split_member.hpp>
49 # include <boost/type_traits.hpp>
50 
51 # include <algorithm>
52 # include <map>
53 # include <memory>
54 # include <type_traits>
55 # include <utility>
56 # include <vector>
57 
58 
60 
61 
62 namespace Differentiation
63 {
64  namespace SD
65  {
76  "SymEngine has not been built with LLVM support.");
77 
83  "The SymEngine LLVM optimizer does not (yet) support the "
84  "selected return type.");
85 
87 
88 
89  // Forward declarations
90  template <typename ReturnType>
92 
93 
99  enum class OptimizerType
100  {
104  dictionary,
109  lambda,
114  llvm
115  };
116 
117 
121  template <class StreamType>
122  inline StreamType &
123  operator<<(StreamType &s, OptimizerType o)
124  {
125  if (o == OptimizerType::dictionary)
126  s << "dictionary";
127  else if (o == OptimizerType::lambda)
128  s << "lambda";
129  else if (o == OptimizerType::llvm)
130  s << "llvm";
131  else
132  {
133  Assert(false, ExcMessage("Unknown optimization method."));
134  }
135 
136  return s;
137  }
138 
139 
145  enum class OptimizationFlags : unsigned char
146  {
150  optimize_default = 0,
154  optimize_cse = 0x0001,
159  optimize_aggressive = 0x0002,
164  };
165 
166 
175  // This operator exists since if it did not then the result of the bit-or
176  // <tt>operator |</tt> would be an integer which would in turn trigger a
177  // compiler warning when we tried to assign it to an object of type
178  // OptimizationFlags.
179  inline OptimizationFlags
181  {
182  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) |
183  static_cast<unsigned int>(f2));
184  }
185 
186 
191  inline OptimizationFlags &
193  {
194  f1 = f1 | f2;
195  return f1;
196  }
197 
198 
207  // This operator exists since if it did not then the result of the bit-or
208  // <tt>operator |</tt> would be an integer which would in turn trigger a
209  // compiler warning when we tried to assign it to an object of type
210  // OptimizationFlags.
212  const OptimizationFlags f2)
213  {
214  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) &
215  static_cast<unsigned int>(f2));
216  }
217 
218 
223  inline OptimizationFlags &
225  {
226  f1 = f1 & f2;
227  return f1;
228  }
229 
230 
231  namespace internal
232  {
237  inline bool
239  {
240  return static_cast<int>(flags & OptimizationFlags::optimize_cse);
241  }
242 
247  inline int
249  {
250  // With the LLVM compiler there exists the opportunity to tune
251  // the level of optimizations performed during compilation.
252  // By default SymEngine sets this at "opt_level=2", which one
253  // presumes targets -O2. Here we are a bit more specific about
254  // want we want it to do:
255  // - Normal compilation: -02 (default settings)
256  // - Aggressive mode: -03 (the whole lot!)
257  // In theory we could also target
258  // - Debug mode: -O0 (no optimizations)
259  // but this doesn't make much sense since SymEngine is a
260  // tested external library.
261  const bool use_agg_opt =
262  static_cast<int>(flags & OptimizationFlags::optimize_aggressive);
263  const int opt_level = (use_agg_opt ? 3 : 2);
264  return opt_level;
265  }
266  } // namespace internal
267 
268 
273  template <class StreamType>
274  inline StreamType &
275  operator<<(StreamType &s, OptimizationFlags o)
276  {
277  s << " OptimizationFlags|";
278  if (static_cast<unsigned int>(o & OptimizationFlags::optimize_cse))
279  s << "cse|";
280 
281  // LLVM optimization level
282  s << "-O" +
285  "|";
286 
287  return s;
288  }
289 
290 
291  namespace internal
292  {
302  template <typename ReturnType, typename T = void>
304 
305 
315  template <typename ReturnType, typename T = void>
317 
318 
319 # ifdef HAVE_SYMENGINE_LLVM
320 
329  template <typename ReturnType, typename T = void>
330  struct LLVMOptimizer;
331 # endif // HAVE_SYMENGINE_LLVM
332 
333 
349  template <typename ReturnType, typename Optimizer, typename T = void>
351 
352 
353 # ifndef DOXYGEN
354 
355 
356  /* ----------- Specializations for the Optimizers ----------- */
357 
358 
359  // A helper struct to type trait detection for the optimizers that
360  // will be defined next.
361  template <typename ReturnType_, typename T = void>
362  struct SupportedOptimizerTypeTraits
363  {
364  static const bool is_supported = false;
365 
366  using ReturnType = void;
367  };
368 
369 
370 
371  // Specialization for arithmetic types
372  template <typename ReturnType_>
373  struct SupportedOptimizerTypeTraits<
374  ReturnType_,
375  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
376  {
377  static const bool is_supported = true;
378 
379  using ReturnType =
380  typename std::conditional<std::is_same<ReturnType_, float>::value,
381  float,
382  double>::type;
383  };
384 
385 
386 
387  // Specialization for complex arithmetic types
388  template <typename ReturnType_>
389  struct SupportedOptimizerTypeTraits<
390  ReturnType_,
391  typename std::enable_if<
392  boost::is_complex<ReturnType_>::value &&
393  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
394  {
395  static const bool is_supported = true;
396 
397  using ReturnType = typename std::conditional<
398  std::is_same<ReturnType_, std::complex<float>>::value,
399  std::complex<float>,
400  std::complex<double>>::type;
401  };
402 
403 
404 
405  template <typename ReturnType_>
406  struct DictionaryOptimizer<
407  ReturnType_,
408  typename std::enable_if<
409  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
410  {
411  using ReturnType =
412  typename SupportedOptimizerTypeTraits<ReturnType_>::ReturnType;
413  using OptimizerType =
415 
416 
425  static void
426  initialize(OptimizerType & optimizer,
427  const SymEngine::vec_basic & independent_symbols,
428  const SymEngine::vec_basic & dependent_functions,
429  const enum OptimizationFlags &optimization_flags)
430  {
431  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
432  optimizer.init(independent_symbols,
433  dependent_functions,
434  use_symbolic_cse);
435  }
436 
437 
438 
443  template <class Archive>
444  static void
445  save(Archive & archive,
446  const unsigned int version,
447  OptimizerType & optimizer)
448  {
449  optimizer.save(archive, version);
450  }
451 
452 
453 
458  template <class Archive>
459  static void
460  load(Archive & archive,
461  const unsigned int version,
462  OptimizerType & optimizer,
463  const SymEngine::vec_basic & /*independent_symbols*/,
464  const SymEngine::vec_basic & /*dependent_functions*/,
465  const enum OptimizationFlags & /*optimization_flags*/)
466  {
467  optimizer.load(archive, version);
468  }
469 
470 
471 
487  template <typename Stream>
488  static void
489  print(Stream & stream,
490  const OptimizerType &optimizer,
491  const bool print_independent_symbols = false,
492  const bool print_dependent_functions = false,
493  const bool print_cse_reductions = true)
494  {
495  optimizer.print(stream,
496  print_independent_symbols,
497  print_dependent_functions,
498  print_cse_reductions);
499  }
500  };
501 
502 
503 
504  template <typename ReturnType_>
505  struct LambdaOptimizer<
506  ReturnType_,
507  typename std::enable_if<
508  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
509  {
510  using ReturnType =
511  typename std::conditional<!boost::is_complex<ReturnType_>::value,
512  double,
513  std::complex<double>>::type;
514  using OptimizerType = typename std::conditional<
515  !boost::is_complex<ReturnType_>::value,
516  SymEngine::LambdaRealDoubleVisitor,
517  SymEngine::LambdaComplexDoubleVisitor>::type;
518 
519 
528  static void
529  initialize(OptimizerType & optimizer,
530  const SymEngine::vec_basic & independent_symbols,
531  const SymEngine::vec_basic & dependent_functions,
532  const enum OptimizationFlags &optimization_flags)
533  {
534  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
535  optimizer.init(independent_symbols,
536  dependent_functions,
537  use_symbolic_cse);
538  }
539 
540 
541 
546  template <class Archive>
547  static void
548  save(Archive & /*archive*/,
549  const unsigned int /*version*/,
550  OptimizerType & /*optimizer*/)
551  {}
552 
553 
558  template <class Archive>
559  static void
560  load(Archive & /*archive*/,
561  const unsigned int /*version*/,
562  OptimizerType & optimizer,
563  const SymEngine::vec_basic & independent_symbols,
564  const SymEngine::vec_basic & dependent_functions,
565  const enum OptimizationFlags &optimization_flags)
566  {
567  initialize(optimizer,
568  independent_symbols,
569  dependent_functions,
570  optimization_flags);
571  }
572 
573 
574 
590  template <typename StreamType>
591  static void
592  print(StreamType & /*stream*/,
593  const OptimizerType & /*optimizer*/,
594  const bool /*print_independent_symbols*/ = false,
595  const bool /*print_dependent_functions*/ = false,
596  const bool /*print_cse_reductions*/ = true)
597  {
598  // No built-in print function
599  }
600  };
601 
602 
603 
604 # ifdef HAVE_SYMENGINE_LLVM
605  template <typename ReturnType_>
606  struct LLVMOptimizer<
607  ReturnType_,
608  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
609  {
610  using ReturnType =
611  typename std::conditional<std::is_same<ReturnType_, float>::value,
612  float,
613  double>::type;
614  using OptimizerType =
615  typename std::conditional<std::is_same<ReturnType_, float>::value,
616  SymEngine::LLVMFloatVisitor,
617  SymEngine::LLVMDoubleVisitor>::type;
618 
623  static const bool supported_by_LLVM = true;
624 
625 
634  static void
635  initialize(OptimizerType & optimizer,
636  const SymEngine::vec_basic & independent_symbols,
637  const SymEngine::vec_basic & dependent_functions,
638  const enum OptimizationFlags &optimization_flags)
639  {
640  const int opt_level = get_LLVM_optimization_level(optimization_flags);
641  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
642  optimizer.init(independent_symbols,
643  dependent_functions,
644  use_symbolic_cse,
645  opt_level);
646  }
647 
648 
649 
654  template <class Archive>
655  static void
656  save(Archive &archive,
657  const unsigned int /*version*/,
658  OptimizerType &optimizer)
659  {
660  const std::string llvm_compiled_function = optimizer.dumps();
661  archive & llvm_compiled_function;
662  }
663 
664 
665 
670  template <class Archive>
671  static void
672  load(Archive &archive,
673  const unsigned int /*version*/,
674  OptimizerType &optimizer,
675  const SymEngine::vec_basic & /*independent_symbols*/,
676  const SymEngine::vec_basic & /*dependent_functions*/,
677  const enum OptimizationFlags & /*optimization_flags*/)
678  {
679  std::string llvm_compiled_function;
680  archive & llvm_compiled_function;
681  optimizer.loads(llvm_compiled_function);
682  }
683 
684 
685 
701  template <typename StreamType>
702  static void
703  print(StreamType & /*stream*/,
704  const OptimizerType & /*optimizer*/,
705  const bool /*print_independent_symbols*/ = false,
706  const bool /*print_dependent_functions*/ = false,
707  const bool /*print_cse_reductions*/ = true)
708  {
709  // No built-in print function
710  }
711  };
712 
713 
714  // There is no LLVM optimizer built with complex number support.
715  // So we fall back to the LambdaDouble case as a type (required
716  // at compile time), but offer no implementation. We expect that
717  // the calling class does not create this type: This can be done by
718  // checking the `supported_by_LLVM` flag.
719  template <typename ReturnType_>
720  struct LLVMOptimizer<
721  ReturnType_,
722  typename std::enable_if<
723  boost::is_complex<ReturnType_>::value &&
724  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
725  {
726  // Since there is no working implementation, these are dummy types
727  // that help with templating in the calling function.
728  using ReturnType = typename LambdaOptimizer<ReturnType_>::ReturnType;
729  using OptimizerType =
731 
736  static const bool supported_by_LLVM = false;
737 
738 
747  static void
748  initialize(OptimizerType & /*optimizer*/,
749  const SymEngine::vec_basic & /*independent_symbols*/,
750  const SymEngine::vec_basic & /*dependent_functions*/,
751  const enum OptimizationFlags & /*optimization_flags*/)
752  {
753  AssertThrow(false, ExcNotImplemented());
754  }
755 
756 
757 
762  template <class Archive>
763  static void
764  save(Archive & /*archive*/,
765  const unsigned int /*version*/,
766  OptimizerType & /*optimizer*/)
767  {
768  AssertThrow(false, ExcNotImplemented());
769  }
770 
771 
772 
777  template <class Archive>
778  static void
779  load(Archive & /*archive*/,
780  const unsigned int /*version*/,
781  OptimizerType & /*optimizer*/,
782  const SymEngine::vec_basic & /*independent_symbols*/,
783  const SymEngine::vec_basic & /*dependent_functions*/,
784  const enum OptimizationFlags & /*optimization_flags*/)
785  {
786  AssertThrow(false, ExcNotImplemented());
787  }
788 
789 
790 
806  template <typename StreamType>
807  static void
808  print(StreamType & /*stream*/,
809  const OptimizerType & /*optimizer*/,
810  const bool /*print_independent_symbols*/ = false,
811  const bool /*print_dependent_functions*/ = false,
812  const bool /*print_cse_reductions*/ = true)
813  {
814  AssertThrow(false, ExcNotImplemented());
815  }
816  };
817 # endif // HAVE_SYMENGINE_LLVM
818 
819 
820  /* ----------- Specializations for OptimizerHelper ----------- */
821 
822 
823  template <typename ReturnType, typename Optimizer>
824  struct OptimizerHelper<ReturnType,
825  Optimizer,
826  typename std::enable_if<std::is_same<
827  ReturnType,
828  typename Optimizer::ReturnType>::value>::type>
829  {
838  static void
839  initialize(typename Optimizer::OptimizerType *optimizer,
840  const SymEngine::vec_basic & independent_symbols,
841  const SymEngine::vec_basic & dependent_functions,
842  const enum OptimizationFlags & optimization_flags)
843  {
844  Assert(optimizer, ExcNotInitialized());
845 
846  // Some optimizers don't have the same interface for
847  // initialization, we filter them out through the specializations
848  // of the Optimizer class
849  Optimizer::initialize(*optimizer,
850  independent_symbols,
851  dependent_functions,
852  optimization_flags);
853  }
854 
855 
856 
870  static void
871  substitute(typename Optimizer::OptimizerType *optimizer,
872  std::vector<ReturnType> & output_values,
873  const std::vector<ReturnType> & substitution_values)
874  {
875  Assert(optimizer, ExcNotInitialized());
876  optimizer->call(output_values.data(), substitution_values.data());
877  }
878 
879 
880 
885  template <class Archive>
886  static void
887  save(Archive & archive,
888  const unsigned int version,
889  typename Optimizer::OptimizerType *optimizer)
890  {
891  Assert(optimizer, ExcNotInitialized());
892 
893  // Some optimizers don't have the same interface for
894  // serialization, we filter them out through the specializations
895  // of the Optimizer class
896  Optimizer::save(archive, version, *optimizer);
897  }
898 
899 
900 
905  template <class Archive>
906  static void
907  load(Archive & archive,
908  const unsigned int version,
909  typename Optimizer::OptimizerType *optimizer,
910  const SymEngine::vec_basic & independent_symbols,
911  const SymEngine::vec_basic & dependent_functions,
912  const enum OptimizationFlags & optimization_flags)
913  {
914  Assert(optimizer, ExcNotInitialized());
915 
916  // Some optimizers don't have the same interface for
917  // serialization, we filter them out through the specializations
918  // of the Optimizer class
919  Optimizer::load(archive,
920  version,
921  *optimizer,
922  independent_symbols,
923  dependent_functions,
924  optimization_flags);
925  }
926 
927 
928 
944  template <typename Stream>
945  static void
946  print(Stream & stream,
947  typename Optimizer::OptimizerType *optimizer,
948  const bool print_independent_symbols = false,
949  const bool print_dependent_functions = false,
950  const bool print_cse_reductions = true)
951  {
952  Assert(optimizer, ExcNotInitialized());
953 
954  // Some optimizers don't have a print function, so
955  // we filter them out through the specializations of
956  // the Optimizer class
957  Optimizer::print(stream,
958  *optimizer,
959  print_independent_symbols,
960  print_dependent_functions,
961  print_cse_reductions);
962  }
963  };
964 
965  template <typename ReturnType, typename Optimizer>
966  struct OptimizerHelper<ReturnType,
967  Optimizer,
968  typename std::enable_if<!std::is_same<
969  ReturnType,
970  typename Optimizer::ReturnType>::value>::type>
971  {
980  static void
981  initialize(typename Optimizer::OptimizerType *optimizer,
982  const SymEngine::vec_basic & independent_symbols,
983  const SymEngine::vec_basic & dependent_functions,
984  const enum OptimizationFlags & optimization_flags)
985  {
986  Assert(optimizer, ExcNotInitialized());
987 
988  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
989  optimizer->init(independent_symbols,
990  dependent_functions,
991  use_symbolic_cse);
992  }
993 
994 
995 
1009  static void
1010  substitute(typename Optimizer::OptimizerType *optimizer,
1011  std::vector<ReturnType> & output_values,
1012  const std::vector<ReturnType> & substitution_values)
1013  {
1014  Assert(optimizer, ExcNotInitialized());
1015 
1016  // Intermediate values to accommodate the difference in
1017  // value types.
1018  std::vector<typename Optimizer::ReturnType> int_outputs(
1019  output_values.size());
1020  std::vector<typename Optimizer::ReturnType> int_inputs(
1021  substitution_values.size());
1022 
1023  std::copy(substitution_values.begin(),
1024  substitution_values.end(),
1025  int_inputs.begin());
1026  optimizer->call(int_outputs.data(), int_inputs.data());
1027  std::copy(int_outputs.begin(),
1028  int_outputs.end(),
1029  output_values.begin());
1030  }
1031 
1032 
1033 
1038  template <class Archive>
1039  static void
1040  save(Archive & archive,
1041  const unsigned int version,
1042  typename Optimizer::OptimizerType *optimizer)
1043  {
1044  Assert(optimizer, ExcNotInitialized());
1045  Optimizer::save(archive, version, *optimizer);
1046  }
1047 
1048 
1049 
1054  template <class Archive>
1055  static void
1056  load(Archive & archive,
1057  const unsigned int version,
1058  typename Optimizer::OptimizerType *optimizer,
1059  const SymEngine::vec_basic & independent_symbols,
1060  const SymEngine::vec_basic & dependent_functions,
1061  const enum OptimizationFlags & optimization_flags)
1062  {
1063  Assert(optimizer, ExcNotInitialized());
1064 
1065  // Some optimizers don't have the same interface for
1066  // serialization, we filter them out through the specializations
1067  // of the Optimizer class
1068  Optimizer::load(archive,
1069  version,
1070  *optimizer,
1071  independent_symbols,
1072  dependent_functions,
1073  optimization_flags);
1074  }
1075 
1076 
1077 
1093  template <typename Stream>
1094  static void
1095  print(Stream & stream,
1096  typename Optimizer::OptimizerType *optimizer,
1097  const bool print_cse_reductions = true,
1098  const bool print_independent_symbols = false,
1099  const bool print_dependent_functions = false)
1100  {
1101  Assert(optimizer, ExcNotInitialized());
1102 
1103  optimizer->print(stream,
1104  print_independent_symbols,
1105  print_dependent_functions,
1106  print_cse_reductions);
1107  }
1108  };
1109 
1110 # endif // DOXYGEN
1111 
1112 
1113  /* -------------------- Utility functions ---------------------- */
1114 
1115 
1137  template <typename NumberType,
1138  int rank,
1139  int dim,
1140  template <int, int, typename> class TensorType>
1141  TensorType<rank, dim, NumberType>
1143  const TensorType<rank, dim, Expression> &symbol_tensor,
1144  const std::vector<NumberType> & cached_evaluation,
1145  const BatchOptimizer<NumberType> & optimizer)
1146  {
1147  TensorType<rank, dim, NumberType> out;
1148  for (unsigned int i = 0; i < out.n_independent_components; ++i)
1149  {
1150  const TableIndices<rank> indices(
1151  out.unrolled_to_component_indices(i));
1152  out[indices] =
1153  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1154  }
1155  return out;
1156  }
1157 
1158 
1181  template <typename NumberType, int dim>
1184  const SymmetricTensor<4, dim, Expression> &symbol_tensor,
1185  const std::vector<NumberType> & cached_evaluation,
1186  const BatchOptimizer<NumberType> & optimizer)
1187  {
1189  for (unsigned int i = 0;
1190  i < SymmetricTensor<2, dim>::n_independent_components;
1191  ++i)
1192  for (unsigned int j = 0;
1193  j < SymmetricTensor<2, dim>::n_independent_components;
1194  ++j)
1195  {
1196  const TableIndices<4> indices =
1197  make_rank_4_tensor_indices<dim>(i, j);
1198  out[indices] =
1199  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1200  }
1201  return out;
1202  }
1203 
1204 
1222  template <typename NumberType, typename T>
1223  void
1225  const T & function)
1226  {
1227  optimizer.register_function(function);
1228  }
1229 
1230 
1248  template <typename NumberType, typename T>
1249  void
1251  const std::vector<T> & functions)
1252  {
1253  for (const auto &function : functions)
1254  register_functions(optimizer, function);
1255  }
1256 
1257 
1277  template <typename NumberType, typename T, typename... Args>
1278  void
1280  const T & function,
1281  const Args &... other_functions)
1282  {
1283  register_functions(optimizer, function);
1284  register_functions(optimizer, other_functions...);
1285  }
1286 
1287 
1299  template <int rank,
1300  int dim,
1301  template <int, int, typename> class TensorType>
1304  const TensorType<rank, dim, Expression> &symbol_tensor)
1305  {
1307  out.reserve(symbol_tensor.n_independent_components);
1308  for (unsigned int i = 0; i < symbol_tensor.n_independent_components;
1309  ++i)
1310  {
1311  const TableIndices<rank> indices(
1312  symbol_tensor.unrolled_to_component_indices(i));
1313  out.push_back(symbol_tensor[indices].get_RCP());
1314  }
1315  return out;
1316  }
1317 
1318 
1328  template <int dim>
1331  const SymmetricTensor<4, dim, Expression> &symbol_tensor)
1332  {
1334  out.reserve(symbol_tensor.n_independent_components);
1335  for (unsigned int i = 0;
1336  i < SymmetricTensor<2, dim>::n_independent_components;
1337  ++i)
1338  for (unsigned int j = 0;
1339  j < SymmetricTensor<2, dim>::n_independent_components;
1340  ++j)
1341  {
1342  const TableIndices<4> indices =
1343  make_rank_4_tensor_indices<dim>(i, j);
1344  out.push_back(symbol_tensor[indices].get_RCP());
1345  }
1346  return out;
1347  }
1348 
1349  } // namespace internal
1350 
1351 
1352 
1443  template <typename ReturnType>
1444  class BatchOptimizer
1445  {
1446  public:
1454  BatchOptimizer();
1455 
1473  BatchOptimizer(const enum OptimizerType & optimization_method,
1474  const enum OptimizationFlags &optimization_flags =
1476 
1486  BatchOptimizer(const BatchOptimizer &other/*,
1487  const bool copy_initialized = true*/);
1488 
1492  BatchOptimizer(BatchOptimizer &&) = default;
1493 
1497  ~BatchOptimizer() = default;
1498 
1508  template <typename Stream>
1509  void
1510  print(Stream &stream, const bool print_cse = false) const;
1511 
1520  template <class Archive>
1521  void
1522  save(Archive &archive, const unsigned int version) const;
1523 
1537  template <class Archive>
1538  void
1539  load(Archive &archive, const unsigned int version);
1540 
1541 # ifdef DOXYGEN
1542 
1563  template <class Archive>
1564  void
1565  serialize(Archive &archive, const unsigned int version);
1566 # else
1567  // This macro defines the serialize() method that is compatible with
1568  // the templated save() and load() method that have been implemented.
1569  BOOST_SERIALIZATION_SPLIT_MEMBER()
1570 # endif
1571 
1576 
1582  void
1583  register_symbols(const types::substitution_map &substitution_map);
1584 
1590  void
1591  register_symbols(const SymEngine::map_basic_basic &substitution_map);
1592 
1603  void
1604  register_symbols(const types::symbol_vector &symbols);
1605 
1616  void
1617  register_symbols(const SymEngine::vec_basic &symbols);
1618 
1624  get_independent_symbols() const;
1625 
1631  std::size_t
1632  n_independent_variables() const;
1633 
1635 
1640 
1645  void
1646  register_function(const Expression &function);
1647 
1652  template <int rank, int dim>
1653  void
1654  register_function(const Tensor<rank, dim, Expression> &function_tensor);
1655 
1660  template <int rank, int dim>
1661  void
1662  register_function(
1663  const SymmetricTensor<rank, dim, Expression> &function_tensor);
1664 
1669  void
1671 
1676  void
1677  register_functions(const SymEngine::vec_basic &functions);
1678 
1688  template <typename T>
1689  void
1690  register_functions(const std::vector<T> &functions);
1691 
1705  template <typename T, typename... Args>
1706  void
1707  register_functions(const T &functions, const Args &... other_functions);
1708 
1713  const types::symbol_vector &
1714  get_dependent_functions() const;
1715 
1722  std::size_t
1723  n_dependent_variables() const;
1724 
1726 
1731 
1743  void
1744  set_optimization_method(const enum OptimizerType & optimization_method,
1745  const enum OptimizationFlags &optimization_flags =
1747 
1752  enum OptimizerType
1753  optimization_method() const;
1754 
1759  enum OptimizationFlags
1760  optimization_flags() const;
1761 
1767  bool
1768  use_symbolic_CSE() const;
1769 
1785  void
1786  optimize();
1787 
1792  bool
1793  optimized() const;
1794 
1796 
1801 
1811  void
1812  substitute(const types::substitution_map &substitution_map) const;
1813 
1823  void
1824  substitute(const SymEngine::map_basic_basic &substitution_map) const;
1825 
1836  void
1837  substitute(const types::symbol_vector & symbols,
1838  const std::vector<ReturnType> &values) const;
1839 
1850  void
1851  substitute(const SymEngine::vec_basic & symbols,
1852  const std::vector<ReturnType> &values) const;
1853 
1859  bool
1860  values_substituted() const;
1861 
1863 
1868 
1891  const std::vector<ReturnType> &
1892  evaluate() const;
1893 
1901  ReturnType
1902  evaluate(const Expression &func) const;
1903 
1912  std::vector<ReturnType>
1913  evaluate(const std::vector<Expression> &funcs) const;
1914 
1923  template <int rank, int dim>
1925  evaluate(const Tensor<rank, dim, Expression> &funcs) const;
1926 
1927 
1936  template <int rank, int dim>
1938  evaluate(const SymmetricTensor<rank, dim, Expression> &funcs) const;
1939 
1940 
1948  ReturnType
1949  extract(const Expression & func,
1950  const std::vector<ReturnType> &cached_evaluation) const;
1951 
1952 
1960  std::vector<ReturnType>
1961  extract(const std::vector<Expression> &funcs,
1962  const std::vector<ReturnType> &cached_evaluation) const;
1963 
1964 
1972  template <int rank, int dim>
1975  const std::vector<ReturnType> & cached_evaluation) const;
1976 
1977 
1985  template <int rank, int dim>
1988  const std::vector<ReturnType> &cached_evaluation) const;
1989 
1991 
1992  private:
1996  enum OptimizerType method;
1997 
2002  enum OptimizationFlags flags;
2003 
2014 
2021 
2026  bool
2027  is_valid_nonunique_dependent_variable(
2028  const SD::Expression &function) const;
2029 
2034  bool
2035  is_valid_nonunique_dependent_variable(
2036  const SymEngine::RCP<const SymEngine::Basic> &function) const;
2037 
2052  mutable std::vector<ReturnType> dependent_variables_output;
2053 
2063  std::map<SD::Expression,
2064  std::size_t,
2066 
2072 
2079  mutable std::unique_ptr<SymEngine::Visitor> optimizer;
2080 
2090 
2095  mutable bool has_been_serialized;
2096 
2100  void
2101  register_scalar_function(const SD::Expression &function);
2102 
2107  void
2108  register_vector_functions(const types::symbol_vector &functions);
2109 
2113  void
2114  create_optimizer(std::unique_ptr<SymEngine::Visitor> &optimizer);
2115 
2132  void
2133  substitute(const std::vector<ReturnType> &substitution_values) const;
2134  };
2135 
2136 
2137 
2138  /* -------------------- inline and template functions ------------------ */
2139 
2140 
2141 # ifndef DOXYGEN
2142 
2143 
2144  template <typename ReturnType>
2145  template <typename Stream>
2146  void
2147  BatchOptimizer<ReturnType>::print(Stream &stream,
2148  const bool /*print_cse*/) const
2149  {
2150  // Settings
2151  stream << "Method? " << optimization_method() << "\n";
2152  stream << "Flags: " << optimization_flags() << "\n";
2153  stream << "Optimized? " << (optimized() ? "Yes" : "No") << "\n";
2154  stream << "Values substituted? " << values_substituted() << "\n\n";
2155 
2156  // Independent variables
2157  stream << "Symbols (" << n_independent_variables()
2158  << " independent variables):"
2159  << "\n";
2160  int cntr = 0;
2161  for (SD::types::substitution_map::const_iterator it =
2162  independent_variables_symbols.begin();
2163  it != independent_variables_symbols.end();
2164  ++it, ++cntr)
2165  {
2166  stream << cntr << ": " << it->first << "\n";
2167  }
2168  stream << "\n" << std::flush;
2169 
2170  // Dependent functions
2171  stream << "Functions (" << n_dependent_variables()
2172  << " dependent variables):"
2173  << "\n";
2174  cntr = 0;
2175  for (typename SD::types::symbol_vector::const_iterator it =
2176  dependent_variables_functions.begin();
2177  it != dependent_variables_functions.end();
2178  ++it, ++cntr)
2179  {
2180  stream << cntr << ": " << (*it) << "\n";
2181  }
2182  stream << "\n" << std::flush;
2183 
2184  // Common subexpression
2185  if (optimized() == true && use_symbolic_CSE() == true)
2186  {
2187  Assert(optimizer, ExcNotInitialized());
2188  const bool print_cse_reductions = true;
2189  const bool print_independent_symbols = false;
2190  const bool print_dependent_functions = false;
2191 
2192  if (optimization_method() == OptimizerType::dictionary)
2193  {
2194  Assert(dynamic_cast<typename internal::DictionaryOptimizer<
2195  ReturnType>::OptimizerType *>(optimizer.get()),
2196  ExcMessage("Cannot cast optimizer to Dictionary type."));
2197 
2199  ReturnType,
2201  print(stream,
2202  dynamic_cast<typename internal::DictionaryOptimizer<
2203  ReturnType>::OptimizerType *>(optimizer.get()),
2204  print_independent_symbols,
2205  print_dependent_functions,
2206  print_cse_reductions);
2207 
2208  stream << "\n" << std::flush;
2209  }
2210  else if (optimization_method() == OptimizerType::lambda)
2211  {
2212  Assert(dynamic_cast<typename internal::LambdaOptimizer<
2213  ReturnType>::OptimizerType *>(optimizer.get()),
2214  ExcMessage("Cannot cast optimizer to Lambda type."));
2215 
2216  internal::OptimizerHelper<ReturnType,
2218  print(stream,
2219  dynamic_cast<typename internal::LambdaOptimizer<
2220  ReturnType>::OptimizerType *>(optimizer.get()),
2221  print_independent_symbols,
2222  print_dependent_functions,
2223  print_cse_reductions);
2224  }
2225 # ifdef HAVE_SYMENGINE_LLVM
2226  else if (optimization_method() == OptimizerType::llvm)
2227  {
2228  Assert(dynamic_cast<typename internal::LLVMOptimizer<
2229  ReturnType>::OptimizerType *>(optimizer.get()),
2230  ExcMessage("Cannot cast optimizer to LLVM type."));
2231 
2232  internal::OptimizerHelper<ReturnType,
2233  internal::LLVMOptimizer<ReturnType>>::
2234  print(stream,
2235  dynamic_cast<typename internal::LLVMOptimizer<
2236  ReturnType>::OptimizerType *>(optimizer.get()),
2237  print_independent_symbols,
2238  print_dependent_functions,
2239  print_cse_reductions);
2240  }
2241 # endif // HAVE_SYMENGINE_LLVM
2242  else
2243  {
2244  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2245  }
2246  }
2247 
2248  if (values_substituted())
2249  {
2250  stream << "Evaluated functions:"
2251  << "\n";
2252  stream << std::flush;
2253  cntr = 0;
2254  for (typename std::vector<ReturnType>::const_iterator it =
2255  dependent_variables_output.begin();
2256  it != dependent_variables_output.end();
2257  ++it, ++cntr)
2258  {
2259  stream << cntr << ": " << (*it) << "\n";
2260  }
2261  stream << "\n" << std::flush;
2262  }
2263  }
2264 
2265 
2266 
2267  template <typename ReturnType>
2268  template <class Archive>
2269  void
2270  BatchOptimizer<ReturnType>::save(Archive & ar,
2271  const unsigned int version) const
2272  {
2273  // Serialize enum classes...
2274  {
2275  const auto m =
2276  static_cast<typename std::underlying_type<OptimizerType>::type>(
2277  method);
2278  ar &m;
2279  }
2280  {
2281  const auto f =
2282  static_cast<typename std::underlying_type<OptimizationFlags>::type>(
2283  flags);
2284  ar &f;
2285  }
2286 
2287  // Important: Independent variables must always be
2288  // serialized before the dependent variables.
2289  ar &independent_variables_symbols;
2290  ar &dependent_variables_functions;
2291 
2292  ar &dependent_variables_output;
2293  ar &map_dep_expr_vec_entry;
2294  ar &ready_for_value_extraction;
2295 
2296  // Mark that we've saved this class at some point.
2297  has_been_serialized = true;
2298  ar &has_been_serialized;
2299 
2300  // When we serialize the optimizer itself, we have to (unfortunately)
2301  // provide it with sufficient information to rebuild itself from scratch.
2302  // This is because only two of the three optimization classes support
2303  // real serialization (i.e. have save/load capability).
2304  const SD::types::symbol_vector symbol_vec =
2305  Utilities::extract_symbols(independent_variables_symbols);
2307  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2308  ReturnType>::OptimizerType *>(optimizer.get()))
2309  {
2310  Assert(optimization_method() == OptimizerType::dictionary,
2311  ExcInternalError());
2313  ReturnType,
2314  internal::DictionaryOptimizer<ReturnType>>::save(ar, version, opt);
2315  }
2317  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2318  ReturnType>::OptimizerType *>(optimizer.get()))
2319  {
2320  Assert(optimization_method() == OptimizerType::lambda,
2321  ExcInternalError());
2323  ReturnType,
2324  internal::LambdaOptimizer<ReturnType>>::save(ar, version, opt);
2325  }
2326 # ifdef HAVE_SYMENGINE_LLVM
2328  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2329  ReturnType>::OptimizerType *>(optimizer.get()))
2330  {
2331  Assert(optimization_method() == OptimizerType::llvm,
2332  ExcInternalError());
2334  ReturnType,
2335  internal::LLVMOptimizer<ReturnType>>::save(ar, version, opt);
2336  }
2337 # endif
2338  else
2339  {
2340  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2341  }
2342  }
2343 
2344 
2345 
2346  template <typename ReturnType>
2347  template <class Archive>
2348  void
2349  BatchOptimizer<ReturnType>::load(Archive &ar, const unsigned int version)
2350  {
2351  Assert(independent_variables_symbols.empty(), ExcInternalError());
2352  Assert(dependent_variables_functions.empty(), ExcInternalError());
2353  Assert(dependent_variables_output.empty(), ExcInternalError());
2354  Assert(map_dep_expr_vec_entry.empty(), ExcInternalError());
2355  Assert(ready_for_value_extraction == false, ExcInternalError());
2356 
2357  // Deserialize enum classes...
2358  {
2359  typename std::underlying_type<OptimizerType>::type m;
2360  ar & m;
2361  method = static_cast<OptimizerType>(m);
2362  }
2363  {
2364  typename std::underlying_type<OptimizationFlags>::type f;
2365  ar & f;
2366  flags = static_cast<OptimizationFlags>(f);
2367  }
2368 
2369  // Important: Independent variables must always be
2370  // deserialized before the dependent variables.
2371  ar &independent_variables_symbols;
2372  ar &dependent_variables_functions;
2373 
2374  ar &dependent_variables_output;
2375  ar &map_dep_expr_vec_entry;
2376  ar &ready_for_value_extraction;
2377 
2378  ar &has_been_serialized;
2379 
2380  // If we're reading in data, then create the optimizer
2381  // and then deserialize it.
2382  Assert(!optimizer, ExcInternalError());
2383 
2384  // Create and configure the optimizer
2385  create_optimizer(optimizer);
2386  Assert(optimizer, ExcNotInitialized());
2387 
2388  // When we deserialize the optimizer itself, we have to (unfortunately)
2389  // provide it with sufficient information to rebuild itself from scratch.
2390  // This is because only two of the three optimization classes support
2391  // real serialization (i.e. have save/load capability).
2392  const SD::types::symbol_vector symbol_vec =
2393  Utilities::extract_symbols(independent_variables_symbols);
2395  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2396  ReturnType>::OptimizerType *>(optimizer.get()))
2397  {
2398  Assert(optimization_method() == OptimizerType::dictionary,
2399  ExcInternalError());
2400  internal::OptimizerHelper<ReturnType,
2402  load(ar,
2403  version,
2404  opt,
2406  symbol_vec),
2408  dependent_variables_functions),
2409  optimization_flags());
2410  }
2412  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2413  ReturnType>::OptimizerType *>(optimizer.get()))
2414  {
2415  Assert(optimization_method() == OptimizerType::lambda,
2416  ExcInternalError());
2417  internal::OptimizerHelper<ReturnType,
2419  load(ar,
2420  version,
2421  opt,
2423  symbol_vec),
2425  dependent_variables_functions),
2426  optimization_flags());
2427  }
2428 # ifdef HAVE_SYMENGINE_LLVM
2430  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2431  ReturnType>::OptimizerType *>(optimizer.get()))
2432  {
2433  Assert(optimization_method() == OptimizerType::llvm,
2434  ExcInternalError());
2435  internal::OptimizerHelper<ReturnType,
2436  internal::LLVMOptimizer<ReturnType>>::
2437  load(ar,
2438  version,
2439  opt,
2441  symbol_vec),
2443  dependent_variables_functions),
2444  optimization_flags());
2445  }
2446 # endif
2447  else
2448  {
2449  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2450  }
2451  }
2452 
2453 
2454 
2455  template <typename ReturnType>
2456  template <int rank, int dim>
2457  void
2459  const Tensor<rank, dim, Expression> &function_tensor)
2460  {
2461  Assert(optimized() == false,
2462  ExcMessage(
2463  "Cannot register functions once the optimizer is finalised."));
2464 
2465  register_vector_functions(
2466  internal::unroll_to_expression_vector(function_tensor));
2467  }
2468 
2469 
2470 
2471  template <typename ReturnType>
2472  template <int rank, int dim>
2473  void
2475  const SymmetricTensor<rank, dim, Expression> &function_tensor)
2476  {
2477  Assert(optimized() == false,
2478  ExcMessage(
2479  "Cannot register functions once the optimizer is finalised."));
2480 
2481  register_vector_functions(
2482  internal::unroll_to_expression_vector(function_tensor));
2483  }
2484 
2485 
2486 
2487  template <typename ReturnType>
2488  template <typename T, typename... Args>
2489  void
2491  const T &functions,
2492  const Args &... other_functions)
2493  {
2494  internal::register_functions(*this, functions);
2495  internal::register_functions(*this, other_functions...);
2496  }
2497 
2498 
2499 
2500  template <typename ReturnType>
2501  template <typename T>
2502  void
2504  const std::vector<T> &functions)
2505  {
2506  internal::register_functions(*this, functions);
2507  }
2508 
2509 
2510 
2511  template <typename ReturnType>
2512  template <int rank, int dim>
2515  const Tensor<rank, dim, Expression> &funcs,
2516  const std::vector<ReturnType> & cached_evaluation) const
2517  {
2518  Assert(
2519  values_substituted() == true,
2520  ExcMessage(
2521  "The optimizer is not configured to perform evaluation. "
2522  "This action can only performed after substitute() has been called."));
2523 
2525  cached_evaluation,
2526  *this);
2527  }
2528 
2529 
2530 
2531  template <typename ReturnType>
2532  template <int rank, int dim>
2535  const Tensor<rank, dim, Expression> &funcs) const
2536  {
2537  return extract(funcs, dependent_variables_output);
2538  }
2539 
2540 
2541 
2542  template <typename ReturnType>
2543  template <int rank, int dim>
2547  const std::vector<ReturnType> & cached_evaluation) const
2548  {
2549  Assert(
2550  values_substituted() == true,
2551  ExcMessage(
2552  "The optimizer is not configured to perform evaluation. "
2553  "This action can only performed after substitute() has been called."));
2554 
2556  cached_evaluation,
2557  *this);
2558  }
2559 
2560 
2561 
2562  template <typename ReturnType>
2563  template <int rank, int dim>
2566  const SymmetricTensor<rank, dim, Expression> &funcs) const
2567  {
2568  return extract(funcs, dependent_variables_output);
2569  }
2570 
2571 # endif // DOXYGEN
2572 
2573  } // namespace SD
2574 } // namespace Differentiation
2575 
2576 
2578 
2579 #endif // DEAL_II_WITH_SYMENGINE
2580 
2581 #endif
std::ostream & operator<<(std::ostream &stream, const Expression &expression)
const std::vector< ReturnType > & evaluate() const
static constexpr unsigned int n_independent_components
OptimizationFlags & operator|=(OptimizationFlags &f1, const OptimizationFlags f2)
bool use_symbolic_CSE(const enum OptimizationFlags &flags)
void save(Archive &archive, const unsigned int version) const
int get_LLVM_optimization_level(const enum OptimizationFlags &flags)
STL namespace.
static ::ExceptionBase & ExcNotInitialized()
types::symbol_vector unroll_to_expression_vector(const SymmetricTensor< 4, dim, Expression > &symbol_tensor)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:408
void register_functions(const types::symbol_vector &functions)
std::vector< ReturnType > dependent_variables_output
static ::ExceptionBase & ExcMessage(std::string arg1)
TensorType< rank, dim, NumberType > tensor_evaluate_optimized(const TensorType< rank, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
void print(Stream &stream, const bool print_cse=false) const
static const char T
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
#define Assert(cond, exc)
Definition: exceptions.h:1465
Expression operator|(const Expression &lhs, const Expression &rhs)
void register_function(const Expression &function)
static ::ExceptionBase & ExcSymEngineLLVMReturnTypeNotSupported()
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:493
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:395
SymEngine::vec_basic convert_expression_vector_to_basic_vector(const SD::types::symbol_vector &symbol_vector)
std::string to_string(const T &t)
Definition: patterns.h:2329
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function, const Args &... other_functions)
std::vector< SD::Expression > symbol_vector
SymmetricTensor< 4, dim, NumberType > tensor_evaluate_optimized(const SymmetricTensor< 4, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
SD::types::symbol_vector extract_symbols(const SD::types::substitution_map &substitution_values)
types::substitution_map independent_variables_symbols
ReturnType extract(const Expression &func, const std::vector< ReturnType > &cached_evaluation) const
static ::ExceptionBase & ExcSymEngineLLVMNotAvailable()
OptimizationFlags & operator &=(OptimizationFlags &f1, const OptimizationFlags f2)
Expression substitute(const Expression &expression, const types::substitution_map &substitution_map)
Definition: tensor.h:462
std::unique_ptr< SymEngine::Visitor > optimizer
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:445
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:394
types::symbol_vector unroll_to_expression_vector(const TensorType< rank, dim, Expression > &symbol_tensor)
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function)
static ::ExceptionBase & ExcNotImplemented()
void copy(const T *begin, const T *end, U *dest)
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
void load(Archive &archive, const unsigned int version)
types::symbol_vector dependent_variables_functions
int(&) functions(const void *v1, const void *v2)
Expression operator &(const Expression &lhs, const Expression &rhs)
std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess > map_dependent_expression_to_vector_entry_t
static ::ExceptionBase & ExcInternalError()