Reference documentation for deal.II version Git 122d3af54c 2020-10-24 18:39:52 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symengine_optimizer.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_differentiation_sd_symengine_optimizer_h
17 #define dealii_differentiation_sd_symengine_optimizer_h
18 
19 #include <deal.II/base/config.h>
20 
21 #ifdef DEAL_II_WITH_SYMENGINE
22 
24 // Low level
25 # include <symengine/basic.h>
26 # include <symengine/dict.h>
27 # include <symengine/symengine_exception.h>
28 # include <symengine/symengine_rcp.h>
29 
30 // Optimization
31 # include <symengine/lambda_double.h>
32 # include <symengine/visitor.h>
33 # ifdef HAVE_SYMENGINE_LLVM
34 # include <symengine/llvm_double.h>
35 # endif
37 
38 # include <deal.II/base/exceptions.h>
39 # include <deal.II/base/logstream.h>
40 # include <deal.II/base/utilities.h>
41 
47 
48 # include <boost/serialization/split_member.hpp>
49 # include <boost/type_traits.hpp>
50 
51 # include <algorithm>
52 # include <map>
53 # include <memory>
54 # include <type_traits>
55 # include <utility>
56 # include <vector>
57 
58 
60 
61 
62 namespace Differentiation
63 {
64  namespace SD
65  {
76  "SymEngine has not been built with LLVM support.");
77 
83  "The SymEngine LLVM optimizer does not (yet) support the "
84  "selected return type.");
85 
87 
88 
89  // Forward declarations
90  template <typename ReturnType>
92 
93 
99  enum class OptimizerType
100  {
104  dictionary,
109  lambda,
114  llvm
115  };
116 
117 
121  template <class StreamType>
122  inline StreamType &
123  operator<<(StreamType &s, OptimizerType o)
124  {
125  if (o == OptimizerType::dictionary)
126  s << "dictionary";
127  else if (o == OptimizerType::lambda)
128  s << "lambda";
129  else if (o == OptimizerType::llvm)
130  s << "llvm";
131  else
132  {
133  Assert(false, ExcMessage("Unknown optimization method."));
134  }
135 
136  return s;
137  }
138 
139 
145  enum class OptimizationFlags : unsigned char
146  {
150  optimize_default = 0,
154  optimize_cse = 0x0001,
159  optimize_aggressive = 0x0002,
164  };
165 
166 
175  // This operator exists since if it did not then the result of the bit-or
176  // <tt>operator |</tt> would be an integer which would in turn trigger a
177  // compiler warning when we tried to assign it to an object of type
178  // OptimizationFlags.
179  inline OptimizationFlags
181  {
182  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) |
183  static_cast<unsigned int>(f2));
184  }
185 
186 
191  inline OptimizationFlags &
193  {
194  f1 = f1 | f2;
195  return f1;
196  }
197 
198 
207  // This operator exists since if it did not then the result of the bit-or
208  // <tt>operator |</tt> would be an integer which would in turn trigger a
209  // compiler warning when we tried to assign it to an object of type
210  // OptimizationFlags.
212  const OptimizationFlags f2)
213  {
214  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) &
215  static_cast<unsigned int>(f2));
216  }
217 
218 
223  inline OptimizationFlags &
225  {
226  f1 = f1 & f2;
227  return f1;
228  }
229 
230 
231  namespace internal
232  {
237  inline bool
239  {
240  return static_cast<int>(flags & OptimizationFlags::optimize_cse);
241  }
242 
247  inline int
249  {
250  // With the LLVM compiler there exists the opportunity to tune
251  // the level of optimizations performed during compilation.
252  // By default SymEngine sets this at "opt_level=2", which one
253  // presumes targets -O2. Here we are a bit more specific about
254  // want we want it to do:
255  // - Normal compilation: -02 (default settings)
256  // - Aggressive mode: -03 (the whole lot!)
257  // In theory we could also target
258  // - Debug mode: -O0 (no optimizations)
259  // but this doesn't make much sense since SymEngine is a
260  // tested external library.
261  const bool use_agg_opt =
262  static_cast<int>(flags & OptimizationFlags::optimize_aggressive);
263  const int opt_level = (use_agg_opt ? 3 : 2);
264  return opt_level;
265  }
266  } // namespace internal
267 
268 
273  template <class StreamType>
274  inline StreamType &
275  operator<<(StreamType &s, OptimizationFlags o)
276  {
277  s << " OptimizationFlags|";
278  if (static_cast<unsigned int>(o & OptimizationFlags::optimize_cse))
279  s << "cse|";
280 
281  // LLVM optimization level
282  s << "-O" +
285  "|";
286 
287  return s;
288  }
289 
290 
291  namespace internal
292  {
302  template <typename ReturnType, typename T = void>
304 
305 
315  template <typename ReturnType, typename T = void>
317 
318 
319 # ifdef HAVE_SYMENGINE_LLVM
320 
329  template <typename ReturnType, typename T = void>
330  struct LLVMOptimizer;
331 # endif // HAVE_SYMENGINE_LLVM
332 
333 
349  template <typename ReturnType, typename Optimizer, typename T = void>
351 
352 
353 # ifndef DOXYGEN
354 
355 
356  /* ----------- Specializations for the Optimizers ----------- */
357 
358 
359  // A helper struct to type trait detection for the optimizers that
360  // will be defined next.
361  template <typename ReturnType_, typename T = void>
362  struct SupportedOptimizerTypeTraits
363  {
364  static const bool is_supported = false;
365 
366  using ReturnType = void;
367  };
368 
369 
370 
371  // Specialization for arithmetic types
372  template <typename ReturnType_>
373  struct SupportedOptimizerTypeTraits<
374  ReturnType_,
375  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
376  {
377  static const bool is_supported = true;
378 
379  using ReturnType =
380  typename std::conditional<std::is_same<ReturnType_, float>::value,
381  float,
382  double>::type;
383  };
384 
385 
386 
387  // Specialization for complex arithmetic types
388  template <typename ReturnType_>
389  struct SupportedOptimizerTypeTraits<
390  ReturnType_,
391  typename std::enable_if<
392  boost::is_complex<ReturnType_>::value &&
393  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
394  {
395  static const bool is_supported = true;
396 
397  using ReturnType = typename std::conditional<
398  std::is_same<ReturnType_, std::complex<float>>::value,
399  std::complex<float>,
400  std::complex<double>>::type;
401  };
402 
403 
404 
405  template <typename ReturnType_>
406  struct DictionaryOptimizer<
407  ReturnType_,
408  typename std::enable_if<
409  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
410  {
411  using ReturnType =
412  typename SupportedOptimizerTypeTraits<ReturnType_>::ReturnType;
413  using OptimizerType =
415 
416 
425  static void
426  initialize(OptimizerType & optimizer,
427  const SymEngine::vec_basic & independent_symbols,
428  const SymEngine::vec_basic & dependent_functions,
429  const enum OptimizationFlags &optimization_flags)
430  {
431  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
432  optimizer.init(independent_symbols,
433  dependent_functions,
434  use_symbolic_cse);
435  }
436 
437 
438 
443  template <class Archive>
444  static void
445  save(Archive & archive,
446  const unsigned int version,
447  OptimizerType & optimizer)
448  {
449  optimizer.save(archive, version);
450  }
451 
452 
453 
458  template <class Archive>
459  static void
460  load(Archive & archive,
461  const unsigned int version,
462  OptimizerType & optimizer,
463  const SymEngine::vec_basic & /*independent_symbols*/,
464  const SymEngine::vec_basic & /*dependent_functions*/,
465  const enum OptimizationFlags & /*optimization_flags*/)
466  {
467  optimizer.load(archive, version);
468  }
469 
470 
471 
487  template <typename Stream>
488  static void
489  print(Stream & stream,
490  const OptimizerType &optimizer,
491  const bool print_independent_symbols = false,
492  const bool print_dependent_functions = false,
493  const bool print_cse_reductions = true)
494  {
495  optimizer.print(stream,
496  print_independent_symbols,
497  print_dependent_functions,
498  print_cse_reductions);
499  }
500  };
501 
502 
503 
504  template <typename ReturnType_>
505  struct LambdaOptimizer<
506  ReturnType_,
507  typename std::enable_if<
508  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
509  {
510  using ReturnType =
511  typename std::conditional<!boost::is_complex<ReturnType_>::value,
512  double,
513  std::complex<double>>::type;
514  using OptimizerType = typename std::conditional<
515  !boost::is_complex<ReturnType_>::value,
516  SymEngine::LambdaRealDoubleVisitor,
517  SymEngine::LambdaComplexDoubleVisitor>::type;
518 
519 
528  static void
529  initialize(OptimizerType & optimizer,
530  const SymEngine::vec_basic & independent_symbols,
531  const SymEngine::vec_basic & dependent_functions,
532  const enum OptimizationFlags &optimization_flags)
533  {
534  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
535  optimizer.init(independent_symbols,
536  dependent_functions,
537  use_symbolic_cse);
538  }
539 
540 
541 
546  template <class Archive>
547  static void
548  save(Archive & /*archive*/,
549  const unsigned int /*version*/,
550  OptimizerType & /*optimizer*/)
551  {}
552 
553 
558  template <class Archive>
559  static void
560  load(Archive & /*archive*/,
561  const unsigned int /*version*/,
562  OptimizerType & optimizer,
563  const SymEngine::vec_basic & independent_symbols,
564  const SymEngine::vec_basic & dependent_functions,
565  const enum OptimizationFlags &optimization_flags)
566  {
567  initialize(optimizer,
568  independent_symbols,
569  dependent_functions,
570  optimization_flags);
571  }
572 
573 
574 
590  template <typename StreamType>
591  static void
592  print(StreamType & /*stream*/,
593  const OptimizerType & /*optimizer*/,
594  const bool /*print_independent_symbols*/ = false,
595  const bool /*print_dependent_functions*/ = false,
596  const bool /*print_cse_reductions*/ = true)
597  {
598  // No built-in print function
599  }
600  };
601 
602 
603 
604 # ifdef HAVE_SYMENGINE_LLVM
605  template <typename ReturnType_>
606  struct LLVMOptimizer<
607  ReturnType_,
608  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
609  {
610  using ReturnType =
611  typename std::conditional<std::is_same<ReturnType_, float>::value,
612  float,
613  double>::type;
614  using OptimizerType =
615  typename std::conditional<std::is_same<ReturnType_, float>::value,
616  SymEngine::LLVMFloatVisitor,
617  SymEngine::LLVMDoubleVisitor>::type;
618 
623  static const bool supported_by_LLVM = true;
624 
625 
634  static void
635  initialize(OptimizerType & optimizer,
636  const SymEngine::vec_basic & independent_symbols,
637  const SymEngine::vec_basic & dependent_functions,
638  const enum OptimizationFlags &optimization_flags)
639  {
640  const int opt_level = get_LLVM_optimization_level(optimization_flags);
641  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
642  optimizer.init(independent_symbols,
643  dependent_functions,
644  use_symbolic_cse,
645  opt_level);
646  }
647 
648 
649 
654  template <class Archive>
655  static void
656  save(Archive &archive,
657  const unsigned int /*version*/,
658  OptimizerType &optimizer)
659  {
660  const std::string llvm_compiled_function = optimizer.dumps();
661  archive & llvm_compiled_function;
662  }
663 
664 
665 
670  template <class Archive>
671  static void
672  load(Archive &archive,
673  const unsigned int /*version*/,
674  OptimizerType &optimizer,
675  const SymEngine::vec_basic & /*independent_symbols*/,
676  const SymEngine::vec_basic & /*dependent_functions*/,
677  const enum OptimizationFlags & /*optimization_flags*/)
678  {
679  std::string llvm_compiled_function;
680  archive & llvm_compiled_function;
681  optimizer.loads(llvm_compiled_function);
682  }
683 
684 
685 
701  template <typename StreamType>
702  static void
703  print(StreamType & /*stream*/,
704  const OptimizerType & /*optimizer*/,
705  const bool /*print_independent_symbols*/ = false,
706  const bool /*print_dependent_functions*/ = false,
707  const bool /*print_cse_reductions*/ = true)
708  {
709  // No built-in print function
710  }
711  };
712 
713 
714  // There is no LLVM optimizer built with complex number support.
715  // So we fall back to the LambdaDouble case as a type (required
716  // at compile time), but offer no implementation. We expect that
717  // the calling class does not create this type: This can be done by
718  // checking the `supported_by_LLVM` flag.
719  template <typename ReturnType_>
720  struct LLVMOptimizer<
721  ReturnType_,
722  typename std::enable_if<
723  boost::is_complex<ReturnType_>::value &&
724  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
725  {
726  // Since there is no working implementation, these are dummy types
727  // that help with templating in the calling function.
728  using ReturnType = typename LambdaOptimizer<ReturnType_>::ReturnType;
729  using OptimizerType =
731 
736  static const bool supported_by_LLVM = false;
737 
738 
747  static void
748  initialize(OptimizerType & /*optimizer*/,
749  const SymEngine::vec_basic & /*independent_symbols*/,
750  const SymEngine::vec_basic & /*dependent_functions*/,
751  const enum OptimizationFlags & /*optimization_flags*/)
752  {
753  AssertThrow(false, ExcNotImplemented());
754  }
755 
756 
757 
762  template <class Archive>
763  static void
764  save(Archive & /*archive*/,
765  const unsigned int /*version*/,
766  OptimizerType & /*optimizer*/)
767  {
768  AssertThrow(false, ExcNotImplemented());
769  }
770 
771 
772 
777  template <class Archive>
778  static void
779  load(Archive & /*archive*/,
780  const unsigned int /*version*/,
781  OptimizerType & /*optimizer*/,
782  const SymEngine::vec_basic & /*independent_symbols*/,
783  const SymEngine::vec_basic & /*dependent_functions*/,
784  const enum OptimizationFlags & /*optimization_flags*/)
785  {
786  AssertThrow(false, ExcNotImplemented());
787  }
788 
789 
790 
806  template <typename StreamType>
807  static void
808  print(StreamType & /*stream*/,
809  const OptimizerType & /*optimizer*/,
810  const bool /*print_independent_symbols*/ = false,
811  const bool /*print_dependent_functions*/ = false,
812  const bool /*print_cse_reductions*/ = true)
813  {
814  AssertThrow(false, ExcNotImplemented());
815  }
816  };
817 # endif // HAVE_SYMENGINE_LLVM
818 
819 
820  /* ----------- Specializations for OptimizerHelper ----------- */
821 
822 
823  template <typename ReturnType, typename Optimizer>
824  struct OptimizerHelper<ReturnType,
825  Optimizer,
826  typename std::enable_if<std::is_same<
827  ReturnType,
828  typename Optimizer::ReturnType>::value>::type>
829  {
838  static void
839  initialize(typename Optimizer::OptimizerType *optimizer,
840  const SymEngine::vec_basic & independent_symbols,
841  const SymEngine::vec_basic & dependent_functions,
842  const enum OptimizationFlags & optimization_flags)
843  {
844  Assert(optimizer, ExcNotInitialized());
845 
846  // Some optimizers don't have the same interface for
847  // initialization, we filter them out through the specializations
848  // of the Optimizer class
849  Optimizer::initialize(*optimizer,
850  independent_symbols,
851  dependent_functions,
852  optimization_flags);
853  }
854 
855 
856 
870  static void
871  substitute(typename Optimizer::OptimizerType *optimizer,
872  std::vector<ReturnType> & output_values,
873  const std::vector<ReturnType> & substitution_values)
874  {
875  Assert(optimizer, ExcNotInitialized());
876  optimizer->call(output_values.data(), substitution_values.data());
877  }
878 
879 
880 
885  template <class Archive>
886  static void
887  save(Archive & archive,
888  const unsigned int version,
889  typename Optimizer::OptimizerType *optimizer)
890  {
891  Assert(optimizer, ExcNotInitialized());
892 
893  // Some optimizers don't have the same interface for
894  // serialization, we filter them out through the specializations
895  // of the Optimizer class
896  Optimizer::save(archive, version, *optimizer);
897  }
898 
899 
900 
905  template <class Archive>
906  static void
907  load(Archive & archive,
908  const unsigned int version,
909  typename Optimizer::OptimizerType *optimizer,
910  const SymEngine::vec_basic & independent_symbols,
911  const SymEngine::vec_basic & dependent_functions,
912  const enum OptimizationFlags & optimization_flags)
913  {
914  Assert(optimizer, ExcNotInitialized());
915 
916  // Some optimizers don't have the same interface for
917  // serialization, we filter them out through the specializations
918  // of the Optimizer class
919  Optimizer::load(archive,
920  version,
921  *optimizer,
922  independent_symbols,
923  dependent_functions,
924  optimization_flags);
925  }
926 
927 
928 
944  template <typename Stream>
945  static void
946  print(Stream & stream,
947  typename Optimizer::OptimizerType *optimizer,
948  const bool print_independent_symbols = false,
949  const bool print_dependent_functions = false,
950  const bool print_cse_reductions = true)
951  {
952  Assert(optimizer, ExcNotInitialized());
953 
954  // Some optimizers don't have a print function, so
955  // we filter them out through the specializations of
956  // the Optimizer class
957  Optimizer::print(stream,
958  *optimizer,
959  print_independent_symbols,
960  print_dependent_functions,
961  print_cse_reductions);
962  }
963  };
964 
965  template <typename ReturnType, typename Optimizer>
966  struct OptimizerHelper<ReturnType,
967  Optimizer,
968  typename std::enable_if<!std::is_same<
969  ReturnType,
970  typename Optimizer::ReturnType>::value>::type>
971  {
980  static void
981  initialize(typename Optimizer::OptimizerType *optimizer,
982  const SymEngine::vec_basic & independent_symbols,
983  const SymEngine::vec_basic & dependent_functions,
984  const enum OptimizationFlags & optimization_flags)
985  {
986  Assert(optimizer, ExcNotInitialized());
987 
988  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
989  optimizer->init(independent_symbols,
990  dependent_functions,
991  use_symbolic_cse);
992  }
993 
994 
995 
1009  static void
1010  substitute(typename Optimizer::OptimizerType *optimizer,
1011  std::vector<ReturnType> & output_values,
1012  const std::vector<ReturnType> & substitution_values)
1013  {
1014  Assert(optimizer, ExcNotInitialized());
1015 
1016  // Intermediate values to accommodate the difference in
1017  // value types.
1018  std::vector<typename Optimizer::ReturnType> int_outputs(
1019  output_values.size());
1020  std::vector<typename Optimizer::ReturnType> int_inputs(
1021  substitution_values.size());
1022 
1023  std::copy(substitution_values.begin(),
1024  substitution_values.end(),
1025  int_inputs.begin());
1026  optimizer->call(int_outputs.data(), int_inputs.data());
1027  std::copy(int_outputs.begin(),
1028  int_outputs.end(),
1029  output_values.begin());
1030  }
1031 
1032 
1033 
1038  template <class Archive>
1039  static void
1040  save(Archive & archive,
1041  const unsigned int version,
1042  typename Optimizer::OptimizerType *optimizer)
1043  {
1044  Assert(optimizer, ExcNotInitialized());
1045  Optimizer::save(archive, version, *optimizer);
1046  }
1047 
1048 
1049 
1054  template <class Archive>
1055  static void
1056  load(Archive & archive,
1057  const unsigned int version,
1058  typename Optimizer::OptimizerType *optimizer,
1059  const SymEngine::vec_basic & independent_symbols,
1060  const SymEngine::vec_basic & dependent_functions,
1061  const enum OptimizationFlags & optimization_flags)
1062  {
1063  Assert(optimizer, ExcNotInitialized());
1064 
1065  // Some optimizers don't have the same interface for
1066  // serialization, we filter them out through the specializations
1067  // of the Optimizer class
1068  Optimizer::load(archive,
1069  version,
1070  *optimizer,
1071  independent_symbols,
1072  dependent_functions,
1073  optimization_flags);
1074  }
1075 
1076 
1077 
1093  template <typename Stream>
1094  static void
1095  print(Stream & stream,
1096  typename Optimizer::OptimizerType *optimizer,
1097  const bool print_cse_reductions = true,
1098  const bool print_independent_symbols = false,
1099  const bool print_dependent_functions = false)
1100  {
1101  Assert(optimizer, ExcNotInitialized());
1102 
1103  optimizer->print(stream,
1104  print_independent_symbols,
1105  print_dependent_functions,
1106  print_cse_reductions);
1107  }
1108  };
1109 
1110 # endif // DOXYGEN
1111 
1112 
1113  /* -------------------- Utility functions ---------------------- */
1114 
1115 
1132  template <typename NumberType,
1133  int rank,
1134  int dim,
1135  template <int, int, typename> class TensorType>
1136  TensorType<rank, dim, NumberType>
1138  const TensorType<rank, dim, Expression> &symbol_tensor,
1139  const BatchOptimizer<NumberType> & optimizer)
1140  {
1141  TensorType<rank, dim, NumberType> out;
1142  for (unsigned int i = 0; i < out.n_independent_components; ++i)
1143  {
1144  const TableIndices<rank> indices(
1145  out.unrolled_to_component_indices(i));
1146  out[indices] = optimizer.evaluate(symbol_tensor[indices]);
1147  }
1148  return out;
1149  }
1150 
1151 
1169  template <typename NumberType, int dim>
1172  const SymmetricTensor<4, dim, Expression> &symbol_tensor,
1173  const BatchOptimizer<NumberType> & optimizer)
1174  {
1176  for (unsigned int i = 0;
1177  i < SymmetricTensor<2, dim>::n_independent_components;
1178  ++i)
1179  for (unsigned int j = 0;
1180  j < SymmetricTensor<2, dim>::n_independent_components;
1181  ++j)
1182  {
1183  const TableIndices<4> indices =
1184  make_rank_4_tensor_indices<dim>(i, j);
1185  out[indices] = optimizer.evaluate(symbol_tensor[indices]);
1186  }
1187  return out;
1188  }
1189 
1190 
1208  template <typename NumberType, typename T>
1209  void
1211  const T & function)
1212  {
1213  optimizer.register_function(function);
1214  }
1215 
1216 
1234  template <typename NumberType, typename T>
1235  void
1237  const std::vector<T> & functions)
1238  {
1239  for (const auto &function : functions)
1240  register_functions(optimizer, function);
1241  }
1242 
1243 
1263  template <typename NumberType, typename T, typename... Args>
1264  void
1266  const T & function,
1267  const Args &... other_functions)
1268  {
1269  register_functions(optimizer, function);
1270  register_functions(optimizer, other_functions...);
1271  }
1272 
1273 
1285  template <int rank,
1286  int dim,
1287  template <int, int, typename> class TensorType>
1290  const TensorType<rank, dim, Expression> &symbol_tensor)
1291  {
1293  out.reserve(symbol_tensor.n_independent_components);
1294  for (unsigned int i = 0; i < symbol_tensor.n_independent_components;
1295  ++i)
1296  {
1297  const TableIndices<rank> indices(
1298  symbol_tensor.unrolled_to_component_indices(i));
1299  out.push_back(symbol_tensor[indices].get_RCP());
1300  }
1301  return out;
1302  }
1303 
1304 
1314  template <int dim>
1317  const SymmetricTensor<4, dim, Expression> &symbol_tensor)
1318  {
1320  out.reserve(symbol_tensor.n_independent_components);
1321  for (unsigned int i = 0;
1322  i < SymmetricTensor<2, dim>::n_independent_components;
1323  ++i)
1324  for (unsigned int j = 0;
1325  j < SymmetricTensor<2, dim>::n_independent_components;
1326  ++j)
1327  {
1328  const TableIndices<4> indices =
1329  make_rank_4_tensor_indices<dim>(i, j);
1330  out.push_back(symbol_tensor[indices].get_RCP());
1331  }
1332  return out;
1333  }
1334 
1335  } // namespace internal
1336 
1337 
1338 
1429  template <typename ReturnType>
1430  class BatchOptimizer
1431  {
1432  public:
1440  BatchOptimizer();
1441 
1459  BatchOptimizer(const enum OptimizerType & optimization_method,
1460  const enum OptimizationFlags &optimization_flags =
1462 
1472  BatchOptimizer(const BatchOptimizer &other/*,
1473  const bool copy_initialized = true*/);
1474 
1478  BatchOptimizer(BatchOptimizer &&) = default;
1479 
1483  ~BatchOptimizer() = default;
1484 
1494  template <typename Stream>
1495  void
1496  print(Stream &stream, const bool print_cse = false) const;
1497 
1505  template <class Archive>
1506  void
1507  save(Archive &archive, const unsigned int version) const;
1508 
1521  template <class Archive>
1522  void
1523  load(Archive &archive, const unsigned int version);
1524 
1525 # ifdef DOXYGEN
1526 
1546  template <class Archive>
1547  void
1548  serialize(Archive &archive, const unsigned int version);
1549 # else
1550  // This macro defines the serialize() method that is compatible with
1551  // the templated save() and load() method that have been implemented.
1552  BOOST_SERIALIZATION_SPLIT_MEMBER()
1553 # endif
1554 
1559 
1565  void
1566  register_symbols(const types::substitution_map &substitution_map);
1567 
1573  void
1574  register_symbols(const SymEngine::map_basic_basic &substitution_map);
1575 
1586  void
1587  register_symbols(const types::symbol_vector &symbols);
1588 
1599  void
1600  register_symbols(const SymEngine::vec_basic &symbols);
1601 
1607  get_independent_symbols() const;
1608 
1614  std::size_t
1615  n_independent_variables() const;
1616 
1618 
1623 
1628  void
1629  register_function(const Expression &function);
1630 
1635  template <int rank, int dim>
1636  void
1637  register_function(const Tensor<rank, dim, Expression> &function_tensor);
1638 
1643  template <int rank, int dim>
1644  void
1645  register_function(
1646  const SymmetricTensor<rank, dim, Expression> &function_tensor);
1647 
1652  void
1654 
1659  void
1660  register_functions(const SymEngine::vec_basic &functions);
1661 
1671  template <typename T>
1672  void
1673  register_functions(const std::vector<T> &functions);
1674 
1688  template <typename T, typename... Args>
1689  void
1690  register_functions(const T &functions, const Args &... other_functions);
1691 
1696  const types::symbol_vector &
1697  get_dependent_functions() const;
1698 
1705  std::size_t
1706  n_dependent_variables() const;
1707 
1709 
1714 
1726  void
1727  set_optimization_method(const enum OptimizerType & optimization_method,
1728  const enum OptimizationFlags &optimization_flags =
1730 
1735  enum OptimizerType
1736  optimization_method() const;
1737 
1742  enum OptimizationFlags
1743  optimization_flags() const;
1744 
1750  bool
1751  use_symbolic_CSE() const;
1752 
1768  void
1769  optimize();
1770 
1775  bool
1776  optimized() const;
1777 
1779 
1784 
1794  void
1795  substitute(const types::substitution_map &substitution_map) const;
1796 
1806  void
1807  substitute(const SymEngine::map_basic_basic &substitution_map) const;
1808 
1819  void
1820  substitute(const types::symbol_vector & symbols,
1821  const std::vector<ReturnType> &values) const;
1822 
1833  void
1834  substitute(const SymEngine::vec_basic & symbols,
1835  const std::vector<ReturnType> &values) const;
1836 
1842  bool
1843  values_substituted() const;
1844 
1846 
1851 
1872  const std::vector<ReturnType> &
1873  evaluate() const;
1874 
1882  ReturnType
1883  evaluate(const Expression &func) const;
1884 
1893  std::vector<ReturnType>
1894  evaluate(const std::vector<Expression> &funcs) const;
1895 
1904  template <int rank, int dim>
1906  evaluate(const Tensor<rank, dim, Expression> &funcs) const;
1907 
1908 
1917  template <int rank, int dim>
1919  evaluate(const SymmetricTensor<rank, dim, Expression> &funcs) const;
1920 
1922 
1923  private:
1927  enum OptimizerType method;
1928 
1933  enum OptimizationFlags flags;
1934 
1945 
1952 
1957  bool
1958  is_valid_nonunique_dependent_variable(
1959  const SD::Expression &function) const;
1960 
1965  bool
1966  is_valid_nonunique_dependent_variable(
1967  const SymEngine::RCP<const SymEngine::Basic> &function) const;
1968 
1983  mutable std::vector<ReturnType> dependent_variables_output;
1984 
1994  std::map<SD::Expression,
1995  std::size_t,
1997 
2003 
2010  mutable std::unique_ptr<SymEngine::Visitor> optimizer;
2011 
2021 
2026  mutable bool has_been_serialized;
2027 
2031  void
2032  register_scalar_function(const SD::Expression &function);
2033 
2038  void
2039  register_vector_functions(const types::symbol_vector &functions);
2040 
2044  void
2045  create_optimizer(std::unique_ptr<SymEngine::Visitor> &optimizer);
2046 
2063  void
2064  substitute(const std::vector<ReturnType> &substitution_values) const;
2065  };
2066 
2067 
2068 
2069  /* -------------------- inline and template functions ------------------ */
2070 
2071 
2072 # ifndef DOXYGEN
2073 
2074 
2075  template <typename ReturnType>
2076  template <typename Stream>
2077  void
2078  BatchOptimizer<ReturnType>::print(Stream &stream,
2079  const bool /*print_cse*/) const
2080  {
2081  // Settings
2082  stream << "Method? " << optimization_method() << "\n";
2083  stream << "Flags: " << optimization_flags() << "\n";
2084  stream << "Optimized? " << (optimized() ? "Yes" : "No") << "\n";
2085  stream << "Values substituted? " << values_substituted() << "\n\n";
2086 
2087  // Independent variables
2088  stream << "Symbols (" << n_independent_variables()
2089  << " independent variables):"
2090  << "\n";
2091  int cntr = 0;
2092  for (SD::types::substitution_map::const_iterator it =
2093  independent_variables_symbols.begin();
2094  it != independent_variables_symbols.end();
2095  ++it, ++cntr)
2096  {
2097  stream << cntr << ": " << it->first << "\n";
2098  }
2099  stream << "\n" << std::flush;
2100 
2101  // Dependent functions
2102  stream << "Functions (" << n_dependent_variables()
2103  << " dependent variables):"
2104  << "\n";
2105  cntr = 0;
2106  for (typename SD::types::symbol_vector::const_iterator it =
2107  dependent_variables_functions.begin();
2108  it != dependent_variables_functions.end();
2109  ++it, ++cntr)
2110  {
2111  stream << cntr << ": " << (*it) << "\n";
2112  }
2113  stream << "\n" << std::flush;
2114 
2115  // Common subexpression
2116  if (optimized() == true && use_symbolic_CSE() == true)
2117  {
2118  Assert(optimizer, ExcNotInitialized());
2119  const bool print_cse_reductions = true;
2120  const bool print_independent_symbols = false;
2121  const bool print_dependent_functions = false;
2122 
2123  if (optimization_method() == OptimizerType::dictionary)
2124  {
2125  Assert(dynamic_cast<typename internal::DictionaryOptimizer<
2126  ReturnType>::OptimizerType *>(optimizer.get()),
2127  ExcMessage("Cannot cast optimizer to Dictionary type."));
2128 
2130  ReturnType,
2132  print(stream,
2133  dynamic_cast<typename internal::DictionaryOptimizer<
2134  ReturnType>::OptimizerType *>(optimizer.get()),
2135  print_independent_symbols,
2136  print_dependent_functions,
2137  print_cse_reductions);
2138 
2139  stream << "\n" << std::flush;
2140  }
2141  else if (optimization_method() == OptimizerType::lambda)
2142  {
2143  Assert(dynamic_cast<typename internal::LambdaOptimizer<
2144  ReturnType>::OptimizerType *>(optimizer.get()),
2145  ExcMessage("Cannot cast optimizer to Lambda type."));
2146 
2147  internal::OptimizerHelper<ReturnType,
2149  print(stream,
2150  dynamic_cast<typename internal::LambdaOptimizer<
2151  ReturnType>::OptimizerType *>(optimizer.get()),
2152  print_independent_symbols,
2153  print_dependent_functions,
2154  print_cse_reductions);
2155  }
2156 # ifdef HAVE_SYMENGINE_LLVM
2157  else if (optimization_method() == OptimizerType::llvm)
2158  {
2159  Assert(dynamic_cast<typename internal::LLVMOptimizer<
2160  ReturnType>::OptimizerType *>(optimizer.get()),
2161  ExcMessage("Cannot cast optimizer to LLVM type."));
2162 
2163  internal::OptimizerHelper<ReturnType,
2164  internal::LLVMOptimizer<ReturnType>>::
2165  print(stream,
2166  dynamic_cast<typename internal::LLVMOptimizer<
2167  ReturnType>::OptimizerType *>(optimizer.get()),
2168  print_independent_symbols,
2169  print_dependent_functions,
2170  print_cse_reductions);
2171  }
2172 # endif // HAVE_SYMENGINE_LLVM
2173  else
2174  {
2175  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2176  }
2177  }
2178 
2179  if (values_substituted())
2180  {
2181  stream << "Evaluated functions:"
2182  << "\n";
2183  stream << std::flush;
2184  cntr = 0;
2185  for (typename std::vector<ReturnType>::const_iterator it =
2186  dependent_variables_output.begin();
2187  it != dependent_variables_output.end();
2188  ++it, ++cntr)
2189  {
2190  stream << cntr << ": " << (*it) << "\n";
2191  }
2192  stream << "\n" << std::flush;
2193  }
2194  }
2195 
2196 
2197 
2198  template <typename ReturnType>
2199  template <class Archive>
2200  void
2201  BatchOptimizer<ReturnType>::save(Archive & ar,
2202  const unsigned int version) const
2203  {
2204  // Serialize enum classes...
2205  {
2206  const auto m =
2207  static_cast<typename std::underlying_type<OptimizerType>::type>(
2208  method);
2209  ar &m;
2210  }
2211  {
2212  const auto f =
2213  static_cast<typename std::underlying_type<OptimizationFlags>::type>(
2214  flags);
2215  ar &f;
2216  }
2217 
2218  // Important: Independent variables must always be
2219  // serialized before the dependent variables.
2220  ar &independent_variables_symbols;
2221  ar &dependent_variables_functions;
2222 
2223  ar &dependent_variables_output;
2224  ar &map_dep_expr_vec_entry;
2225  ar &ready_for_value_extraction;
2226 
2227  // Mark that we've saved this class at some point.
2228  has_been_serialized = true;
2229  ar &has_been_serialized;
2230 
2231  // When we serialize the optimizer itself, we have to (unfortunately)
2232  // provide it with sufficient information to rebuild itself from scratch.
2233  // This is because only two of the three optimization classes support
2234  // real serialization (i.e. have save/load capability).
2235  const SD::types::symbol_vector symbol_vec =
2236  Utilities::extract_symbols(independent_variables_symbols);
2238  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2239  ReturnType>::OptimizerType *>(optimizer.get()))
2240  {
2241  Assert(optimization_method() == OptimizerType::dictionary,
2242  ExcInternalError());
2244  ReturnType,
2245  internal::DictionaryOptimizer<ReturnType>>::save(ar, version, opt);
2246  }
2248  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2249  ReturnType>::OptimizerType *>(optimizer.get()))
2250  {
2251  Assert(optimization_method() == OptimizerType::lambda,
2252  ExcInternalError());
2254  ReturnType,
2255  internal::LambdaOptimizer<ReturnType>>::save(ar, version, opt);
2256  }
2257 # ifdef HAVE_SYMENGINE_LLVM
2259  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2260  ReturnType>::OptimizerType *>(optimizer.get()))
2261  {
2262  Assert(optimization_method() == OptimizerType::llvm,
2263  ExcInternalError());
2265  ReturnType,
2266  internal::LLVMOptimizer<ReturnType>>::save(ar, version, opt);
2267  }
2268 # endif
2269  else
2270  {
2271  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2272  }
2273  }
2274 
2275 
2276 
2277  template <typename ReturnType>
2278  template <class Archive>
2279  void
2280  BatchOptimizer<ReturnType>::load(Archive &ar, const unsigned int version)
2281  {
2282  Assert(independent_variables_symbols.empty(), ExcInternalError());
2283  Assert(dependent_variables_functions.empty(), ExcInternalError());
2284  Assert(dependent_variables_output.empty(), ExcInternalError());
2285  Assert(map_dep_expr_vec_entry.empty(), ExcInternalError());
2286  Assert(ready_for_value_extraction == false, ExcInternalError());
2287 
2288  // Deserialize enum classes...
2289  {
2290  typename std::underlying_type<OptimizerType>::type m;
2291  ar & m;
2292  method = static_cast<OptimizerType>(m);
2293  }
2294  {
2295  typename std::underlying_type<OptimizationFlags>::type f;
2296  ar & f;
2297  flags = static_cast<OptimizationFlags>(f);
2298  }
2299 
2300  // Important: Independent variables must always be
2301  // deserialized before the dependent variables.
2302  ar &independent_variables_symbols;
2303  ar &dependent_variables_functions;
2304 
2305  ar &dependent_variables_output;
2306  ar &map_dep_expr_vec_entry;
2307  ar &ready_for_value_extraction;
2308 
2309  ar &has_been_serialized;
2310 
2311  // If we're reading in data, then create the optimizer
2312  // and then deserialize it.
2313  Assert(!optimizer, ExcInternalError());
2314 
2315  // Create and configure the optimizer
2316  create_optimizer(optimizer);
2317  Assert(optimizer, ExcNotInitialized());
2318 
2319  // When we deserialize the optimizer itself, we have to (unfortunately)
2320  // provide it with sufficient information to rebuild itself from scratch.
2321  // This is because only two of the three optimization classes support
2322  // real serialization (i.e. have save/load capability).
2323  const SD::types::symbol_vector symbol_vec =
2324  Utilities::extract_symbols(independent_variables_symbols);
2326  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2327  ReturnType>::OptimizerType *>(optimizer.get()))
2328  {
2329  Assert(optimization_method() == OptimizerType::dictionary,
2330  ExcInternalError());
2331  internal::OptimizerHelper<ReturnType,
2333  load(ar,
2334  version,
2335  opt,
2337  symbol_vec),
2339  dependent_variables_functions),
2340  optimization_flags());
2341  }
2343  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2344  ReturnType>::OptimizerType *>(optimizer.get()))
2345  {
2346  Assert(optimization_method() == OptimizerType::lambda,
2347  ExcInternalError());
2348  internal::OptimizerHelper<ReturnType,
2350  load(ar,
2351  version,
2352  opt,
2354  symbol_vec),
2356  dependent_variables_functions),
2357  optimization_flags());
2358  }
2359 # ifdef HAVE_SYMENGINE_LLVM
2361  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2362  ReturnType>::OptimizerType *>(optimizer.get()))
2363  {
2364  Assert(optimization_method() == OptimizerType::llvm,
2365  ExcInternalError());
2366  internal::OptimizerHelper<ReturnType,
2367  internal::LLVMOptimizer<ReturnType>>::
2368  load(ar,
2369  version,
2370  opt,
2372  symbol_vec),
2374  dependent_variables_functions),
2375  optimization_flags());
2376  }
2377 # endif
2378  else
2379  {
2380  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2381  }
2382  }
2383 
2384 
2385 
2386  template <typename ReturnType>
2387  template <int rank, int dim>
2388  void
2390  const Tensor<rank, dim, Expression> &function_tensor)
2391  {
2392  Assert(optimized() == false,
2393  ExcMessage(
2394  "Cannot register functions once the optimizer is finalised."));
2395 
2396  register_vector_functions(
2397  internal::unroll_to_expression_vector(function_tensor));
2398  }
2399 
2400 
2401 
2402  template <typename ReturnType>
2403  template <int rank, int dim>
2404  void
2406  const SymmetricTensor<rank, dim, Expression> &function_tensor)
2407  {
2408  Assert(optimized() == false,
2409  ExcMessage(
2410  "Cannot register functions once the optimizer is finalised."));
2411 
2412  register_vector_functions(
2413  internal::unroll_to_expression_vector(function_tensor));
2414  }
2415 
2416 
2417 
2418  template <typename ReturnType>
2419  template <typename T, typename... Args>
2420  void
2422  const T &functions,
2423  const Args &... other_functions)
2424  {
2425  internal::register_functions(*this, functions);
2426  internal::register_functions(*this, other_functions...);
2427  }
2428 
2429 
2430 
2431  template <typename ReturnType>
2432  template <typename T>
2433  void
2435  const std::vector<T> &functions)
2436  {
2437  internal::register_functions(*this, functions);
2438  }
2439 
2440 
2441 
2442  template <typename ReturnType>
2443  template <int rank, int dim>
2446  const Tensor<rank, dim, Expression> &funcs) const
2447  {
2448  Assert(
2449  values_substituted() == true,
2450  ExcMessage(
2451  "The optimizer is not configured to perform evaluation. "
2452  "This action can only performed after substitute() has been called."));
2453 
2454  return internal::tensor_evaluate_optimized(funcs, *this);
2455  }
2456 
2457 
2458 
2459  template <typename ReturnType>
2460  template <int rank, int dim>
2463  const SymmetricTensor<rank, dim, Expression> &funcs) const
2464  {
2465  Assert(
2466  values_substituted() == true,
2467  ExcMessage(
2468  "The optimizer is not configured to perform evaluation. "
2469  "This action can only performed after substitute() has been called."));
2470 
2471  return internal::tensor_evaluate_optimized(funcs, *this);
2472  }
2473 
2474 # endif // DOXYGEN
2475 
2476  } // namespace SD
2477 } // namespace Differentiation
2478 
2479 
2481 
2482 #endif // DEAL_II_WITH_SYMENGINE
2483 
2484 #endif
std::ostream & operator<<(std::ostream &stream, const Expression &expression)
const std::vector< ReturnType > & evaluate() const
static constexpr unsigned int n_independent_components
OptimizationFlags & operator|=(OptimizationFlags &f1, const OptimizationFlags f2)
bool use_symbolic_CSE(const enum OptimizationFlags &flags)
void save(Archive &archive, const unsigned int version) const
int get_LLVM_optimization_level(const enum OptimizationFlags &flags)
STL namespace.
static ::ExceptionBase & ExcNotInitialized()
types::symbol_vector unroll_to_expression_vector(const SymmetricTensor< 4, dim, Expression > &symbol_tensor)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1533
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:382
void register_functions(const types::symbol_vector &functions)
std::vector< ReturnType > dependent_variables_output
static ::ExceptionBase & ExcMessage(std::string arg1)
void print(Stream &stream, const bool print_cse=false) const
static const char T
#define Assert(cond, exc)
Definition: exceptions.h:1423
Expression operator|(const Expression &lhs, const Expression &rhs)
void register_function(const Expression &function)
static ::ExceptionBase & ExcSymEngineLLVMReturnTypeNotSupported()
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:493
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:369
SymEngine::vec_basic convert_expression_vector_to_basic_vector(const SD::types::symbol_vector &symbol_vector)
std::string to_string(const T &t)
Definition: patterns.h:2342
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function, const Args &... other_functions)
std::vector< SD::Expression > symbol_vector
SD::types::symbol_vector extract_symbols(const SD::types::substitution_map &substitution_values)
types::substitution_map independent_variables_symbols
static ::ExceptionBase & ExcSymEngineLLVMNotAvailable()
OptimizationFlags & operator &=(OptimizationFlags &f1, const OptimizationFlags f2)
Expression substitute(const Expression &expression, const types::substitution_map &substitution_map)
Definition: tensor.h:448
std::unique_ptr< SymEngine::Visitor > optimizer
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:419
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:368
types::symbol_vector unroll_to_expression_vector(const TensorType< rank, dim, Expression > &symbol_tensor)
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function)
static ::ExceptionBase & ExcNotImplemented()
void copy(const T *begin, const T *end, U *dest)
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
void load(Archive &archive, const unsigned int version)
TensorType< rank, dim, NumberType > tensor_evaluate_optimized(const TensorType< rank, dim, Expression > &symbol_tensor, const BatchOptimizer< NumberType > &optimizer)
types::symbol_vector dependent_variables_functions
int(&) functions(const void *v1, const void *v2)
Expression operator &(const Expression &lhs, const Expression &rhs)
std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess > map_dependent_expression_to_vector_entry_t
static ::ExceptionBase & ExcInternalError()
SymmetricTensor< 4, dim, NumberType > tensor_evaluate_optimized(const SymmetricTensor< 4, dim, Expression > &symbol_tensor, const BatchOptimizer< NumberType > &optimizer)