Reference documentation for deal.II version GIT 3801df8983 2022-05-22 23:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
symengine_optimizer.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2020 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_differentiation_sd_symengine_optimizer_h
17 #define dealii_differentiation_sd_symengine_optimizer_h
18 
19 #include <deal.II/base/config.h>
20 
21 #ifdef DEAL_II_WITH_SYMENGINE
22 
24 // Low level
25 # include <symengine/basic.h>
26 # include <symengine/dict.h>
27 # include <symengine/symengine_exception.h>
28 # include <symengine/symengine_rcp.h>
29 
30 // Optimization
31 # include <symengine/lambda_double.h>
32 # include <symengine/visitor.h>
33 # ifdef HAVE_SYMENGINE_LLVM
34 # include <symengine/llvm_double.h>
35 # endif
37 
38 # include <deal.II/base/exceptions.h>
39 # include <deal.II/base/logstream.h>
40 # include <deal.II/base/utilities.h>
41 
47 
48 # include <boost/serialization/split_member.hpp>
49 # include <boost/type_traits.hpp>
50 
51 # include <algorithm>
52 # include <map>
53 # include <memory>
54 # include <type_traits>
55 # include <utility>
56 # include <vector>
57 
58 
60 
61 
62 namespace Differentiation
63 {
64  namespace SD
65  {
76  "SymEngine has not been built with LLVM support.");
77 
83  "The SymEngine LLVM optimizer does not (yet) support the "
84  "selected return type.");
85 
87 
88 
89  // Forward declarations
90  template <typename ReturnType>
91  class BatchOptimizer;
92 
93 
99  enum class OptimizerType
100  {
104  dictionary,
109  lambda,
114  llvm
115  };
116 
117 
121  template <class StreamType>
122  inline StreamType &
123  operator<<(StreamType &s, OptimizerType o)
124  {
125  if (o == OptimizerType::dictionary)
126  s << "dictionary";
127  else if (o == OptimizerType::lambda)
128  s << "lambda";
129  else if (o == OptimizerType::llvm)
130  s << "llvm";
131  else
132  {
133  Assert(false, ExcMessage("Unknown optimization method."));
134  }
135 
136  return s;
137  }
138 
139 
145  enum class OptimizationFlags : unsigned char
146  {
150  optimize_default = 0,
154  optimize_cse = 0x0001,
159  optimize_aggressive = 0x0002,
164  };
165 
166 
175  // This operator exists since if it did not then the result of the bit-or
176  // <tt>operator |</tt> would be an integer which would in turn trigger a
177  // compiler warning when we tried to assign it to an object of type
178  // OptimizationFlags.
179  inline OptimizationFlags
181  {
182  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) |
183  static_cast<unsigned int>(f2));
184  }
185 
186 
191  inline OptimizationFlags &
193  {
194  f1 = f1 | f2;
195  return f1;
196  }
197 
198 
207  // This operator exists since if it did not then the result of the bit-or
208  // <tt>operator |</tt> would be an integer which would in turn trigger a
209  // compiler warning when we tried to assign it to an object of type
210  // OptimizationFlags.
211  inline OptimizationFlags
213  {
214  return static_cast<OptimizationFlags>(static_cast<unsigned int>(f1) &
215  static_cast<unsigned int>(f2));
216  }
217 
218 
223  inline OptimizationFlags &
225  {
226  f1 = f1 & f2;
227  return f1;
228  }
229 
230 
231  namespace internal
232  {
237  inline bool
239  {
240  return static_cast<int>(flags & OptimizationFlags::optimize_cse);
241  }
242 
247  inline int
249  {
250  // With the LLVM compiler there exists the opportunity to tune
251  // the level of optimizations performed during compilation.
252  // By default SymEngine sets this at "opt_level=2", which one
253  // presumes targets -O2. Here we are a bit more specific about
254  // want we want it to do:
255  // - Normal compilation: -02 (default settings)
256  // - Aggressive mode: -03 (the whole lot!)
257  // In theory we could also target
258  // - Debug mode: -O0 (no optimizations)
259  // but this doesn't make much sense since SymEngine is a
260  // tested external library.
261  const bool use_agg_opt =
262  static_cast<int>(flags & OptimizationFlags::optimize_aggressive);
263  const int opt_level = (use_agg_opt ? 3 : 2);
264  return opt_level;
265  }
266  } // namespace internal
267 
268 
273  template <class StreamType>
274  inline StreamType &
275  operator<<(StreamType &s, OptimizationFlags o)
276  {
277  s << " OptimizationFlags|";
278  if (static_cast<unsigned int>(o & OptimizationFlags::optimize_cse))
279  s << "cse|";
280 
281  // LLVM optimization level
282  s << "-O" +
285  "|";
286 
287  return s;
288  }
289 
290 
291  namespace internal
292  {
302  template <typename ReturnType, typename T = void>
304 
305 
315  template <typename ReturnType, typename T = void>
317 
318 
319 # ifdef HAVE_SYMENGINE_LLVM
329  template <typename ReturnType, typename T = void>
330  struct LLVMOptimizer;
331 # endif // HAVE_SYMENGINE_LLVM
332 
333 
349  template <typename ReturnType, typename Optimizer, typename T = void>
351 
352 
353 # ifndef DOXYGEN
354 
355 
356  /* ----------- Specializations for the Optimizers ----------- */
357 
358 
359  // A helper struct to type trait detection for the optimizers that
360  // will be defined next.
361  template <typename ReturnType_, typename T = void>
362  struct SupportedOptimizerTypeTraits
363  {
364  static const bool is_supported = false;
365 
366  using ReturnType = void;
367  };
368 
369 
370 
371  // Specialization for arithmetic types
372  template <typename ReturnType_>
373  struct SupportedOptimizerTypeTraits<
374  ReturnType_,
375  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
376  {
377  static const bool is_supported = true;
378 
379  using ReturnType =
380  typename std::conditional<std::is_same<ReturnType_, float>::value,
381  float,
382  double>::type;
383  };
384 
385 
386 
387  // Specialization for complex arithmetic types
388  template <typename ReturnType_>
389  struct SupportedOptimizerTypeTraits<
390  ReturnType_,
391  typename std::enable_if<
392  boost::is_complex<ReturnType_>::value &&
393  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
394  {
395  static const bool is_supported = true;
396 
397  using ReturnType = typename std::conditional<
398  std::is_same<ReturnType_, std::complex<float>>::value,
399  std::complex<float>,
400  std::complex<double>>::type;
401  };
402 
403 
404 
405  template <typename ReturnType_>
406  struct DictionaryOptimizer<
407  ReturnType_,
408  typename std::enable_if<
409  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
410  {
411  using ReturnType =
412  typename SupportedOptimizerTypeTraits<ReturnType_>::ReturnType;
413  using OptimizerType =
414  internal::DictionarySubstitutionVisitor<ReturnType, SD::Expression>;
415 
416 
425  static void
426  initialize(OptimizerType & optimizer,
427  const SymEngine::vec_basic & independent_symbols,
428  const SymEngine::vec_basic & dependent_functions,
429  const enum OptimizationFlags &optimization_flags)
430  {
431  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
432  optimizer.init(independent_symbols,
433  dependent_functions,
434  use_symbolic_cse);
435  }
436 
437 
438 
443  template <class Archive>
444  static void
445  save(Archive & archive,
446  const unsigned int version,
447  OptimizerType & optimizer)
448  {
449  optimizer.save(archive, version);
450  }
451 
452 
453 
458  template <class Archive>
459  static void
460  load(Archive & archive,
461  const unsigned int version,
462  OptimizerType & optimizer,
463  const SymEngine::vec_basic & /*independent_symbols*/,
464  const SymEngine::vec_basic & /*dependent_functions*/,
465  const enum OptimizationFlags & /*optimization_flags*/)
466  {
467  optimizer.load(archive, version);
468  }
469 
470 
471 
487  template <typename Stream>
488  static void
489  print(Stream & stream,
490  const OptimizerType &optimizer,
491  const bool print_independent_symbols = false,
492  const bool print_dependent_functions = false,
493  const bool print_cse_reductions = true)
494  {
495  optimizer.print(stream,
496  print_independent_symbols,
497  print_dependent_functions,
498  print_cse_reductions);
499  }
500  };
501 
502 
503 
504  template <typename ReturnType_>
505  struct LambdaOptimizer<
506  ReturnType_,
507  typename std::enable_if<
508  SupportedOptimizerTypeTraits<ReturnType_>::is_supported>::type>
509  {
510  using ReturnType =
511  typename std::conditional<!boost::is_complex<ReturnType_>::value,
512  double,
513  std::complex<double>>::type;
514  using OptimizerType = typename std::conditional<
515  !boost::is_complex<ReturnType_>::value,
516  SymEngine::LambdaRealDoubleVisitor,
517  SymEngine::LambdaComplexDoubleVisitor>::type;
518 
519 
528  static void
529  initialize(OptimizerType & optimizer,
530  const SymEngine::vec_basic & independent_symbols,
531  const SymEngine::vec_basic & dependent_functions,
532  const enum OptimizationFlags &optimization_flags)
533  {
534  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
535  optimizer.init(independent_symbols,
536  dependent_functions,
537  use_symbolic_cse);
538  }
539 
540 
541 
546  template <class Archive>
547  static void
548  save(Archive & /*archive*/,
549  const unsigned int /*version*/,
550  OptimizerType & /*optimizer*/)
551  {}
552 
553 
558  template <class Archive>
559  static void
560  load(Archive & /*archive*/,
561  const unsigned int /*version*/,
562  OptimizerType & optimizer,
563  const SymEngine::vec_basic & independent_symbols,
564  const SymEngine::vec_basic & dependent_functions,
565  const enum OptimizationFlags &optimization_flags)
566  {
567  initialize(optimizer,
568  independent_symbols,
569  dependent_functions,
570  optimization_flags);
571  }
572 
573 
574 
590  template <typename StreamType>
591  static void
592  print(StreamType & /*stream*/,
593  const OptimizerType & /*optimizer*/,
594  const bool /*print_independent_symbols*/ = false,
595  const bool /*print_dependent_functions*/ = false,
596  const bool /*print_cse_reductions*/ = true)
597  {
598  // No built-in print function
599  }
600  };
601 
602 
603 
604 # ifdef HAVE_SYMENGINE_LLVM
605  template <typename ReturnType_>
606  struct LLVMOptimizer<
607  ReturnType_,
608  typename std::enable_if<std::is_arithmetic<ReturnType_>::value>::type>
609  {
610  using ReturnType =
611  typename std::conditional<std::is_same<ReturnType_, float>::value,
612  float,
613  double>::type;
614  using OptimizerType =
615  typename std::conditional<std::is_same<ReturnType_, float>::value,
616  SymEngine::LLVMFloatVisitor,
617  SymEngine::LLVMDoubleVisitor>::type;
618 
623  static const bool supported_by_LLVM = true;
624 
625 
634  static void
635  initialize(OptimizerType & optimizer,
636  const SymEngine::vec_basic & independent_symbols,
637  const SymEngine::vec_basic & dependent_functions,
638  const enum OptimizationFlags &optimization_flags)
639  {
640  const int opt_level = get_LLVM_optimization_level(optimization_flags);
641  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
642  optimizer.init(independent_symbols,
643  dependent_functions,
644  use_symbolic_cse,
645  opt_level);
646  }
647 
648 
649 
654  template <class Archive>
655  static void
656  save(Archive &archive,
657  const unsigned int /*version*/,
658  OptimizerType &optimizer)
659  {
660  const std::string llvm_compiled_function = optimizer.dumps();
661  archive & llvm_compiled_function;
662  }
663 
664 
665 
670  template <class Archive>
671  static void
672  load(Archive &archive,
673  const unsigned int /*version*/,
674  OptimizerType &optimizer,
675  const SymEngine::vec_basic & /*independent_symbols*/,
676  const SymEngine::vec_basic & /*dependent_functions*/,
677  const enum OptimizationFlags & /*optimization_flags*/)
678  {
679  std::string llvm_compiled_function;
680  archive & llvm_compiled_function;
681  optimizer.loads(llvm_compiled_function);
682  }
683 
684 
685 
701  template <typename StreamType>
702  static void
703  print(StreamType & /*stream*/,
704  const OptimizerType & /*optimizer*/,
705  const bool /*print_independent_symbols*/ = false,
706  const bool /*print_dependent_functions*/ = false,
707  const bool /*print_cse_reductions*/ = true)
708  {
709  // No built-in print function
710  }
711  };
712 
713 
714  // There is no LLVM optimizer built with complex number support.
715  // So we fall back to the LambdaDouble case as a type (required
716  // at compile time), but offer no implementation. We expect that
717  // the calling class does not create this type: This can be done by
718  // checking the `supported_by_LLVM` flag.
719  template <typename ReturnType_>
720  struct LLVMOptimizer<
721  ReturnType_,
722  typename std::enable_if<
723  boost::is_complex<ReturnType_>::value &&
724  std::is_arithmetic<typename ReturnType_::value_type>::value>::type>
725  {
726  // Since there is no working implementation, these are dummy types
727  // that help with templating in the calling function.
728  using ReturnType = typename LambdaOptimizer<ReturnType_>::ReturnType;
729  using OptimizerType =
731 
736  static const bool supported_by_LLVM = false;
737 
738 
747  static void
748  initialize(OptimizerType & /*optimizer*/,
749  const SymEngine::vec_basic & /*independent_symbols*/,
750  const SymEngine::vec_basic & /*dependent_functions*/,
751  const enum OptimizationFlags & /*optimization_flags*/)
752  {
753  AssertThrow(false, ExcNotImplemented());
754  }
755 
756 
757 
762  template <class Archive>
763  static void
764  save(Archive & /*archive*/,
765  const unsigned int /*version*/,
766  OptimizerType & /*optimizer*/)
767  {
768  AssertThrow(false, ExcNotImplemented());
769  }
770 
771 
772 
777  template <class Archive>
778  static void
779  load(Archive & /*archive*/,
780  const unsigned int /*version*/,
781  OptimizerType & /*optimizer*/,
782  const SymEngine::vec_basic & /*independent_symbols*/,
783  const SymEngine::vec_basic & /*dependent_functions*/,
784  const enum OptimizationFlags & /*optimization_flags*/)
785  {
786  AssertThrow(false, ExcNotImplemented());
787  }
788 
789 
790 
806  template <typename StreamType>
807  static void
808  print(StreamType & /*stream*/,
809  const OptimizerType & /*optimizer*/,
810  const bool /*print_independent_symbols*/ = false,
811  const bool /*print_dependent_functions*/ = false,
812  const bool /*print_cse_reductions*/ = true)
813  {
814  AssertThrow(false, ExcNotImplemented());
815  }
816  };
817 # endif // HAVE_SYMENGINE_LLVM
818 
819 
820  /* ----------- Specializations for OptimizerHelper ----------- */
821 
822 
823  template <typename ReturnType, typename Optimizer>
824  struct OptimizerHelper<ReturnType,
825  Optimizer,
826  typename std::enable_if<std::is_same<
827  ReturnType,
828  typename Optimizer::ReturnType>::value>::type>
829  {
838  static void
839  initialize(typename Optimizer::OptimizerType *optimizer,
840  const SymEngine::vec_basic & independent_symbols,
841  const SymEngine::vec_basic & dependent_functions,
842  const enum OptimizationFlags & optimization_flags)
843  {
844  Assert(optimizer, ExcNotInitialized());
845 
846  // Some optimizers don't have the same interface for
847  // initialization, we filter them out through the specializations
848  // of the Optimizer class
849  Optimizer::initialize(*optimizer,
850  independent_symbols,
851  dependent_functions,
852  optimization_flags);
853  }
854 
855 
856 
870  static void
871  substitute(typename Optimizer::OptimizerType *optimizer,
872  std::vector<ReturnType> & output_values,
873  const std::vector<ReturnType> & substitution_values)
874  {
875  Assert(optimizer, ExcNotInitialized());
876  optimizer->call(output_values.data(), substitution_values.data());
877  }
878 
879 
880 
885  template <class Archive>
886  static void
887  save(Archive & archive,
888  const unsigned int version,
889  typename Optimizer::OptimizerType *optimizer)
890  {
891  Assert(optimizer, ExcNotInitialized());
892 
893  // Some optimizers don't have the same interface for
894  // serialization, we filter them out through the specializations
895  // of the Optimizer class
896  Optimizer::save(archive, version, *optimizer);
897  }
898 
899 
900 
905  template <class Archive>
906  static void
907  load(Archive & archive,
908  const unsigned int version,
909  typename Optimizer::OptimizerType *optimizer,
910  const SymEngine::vec_basic & independent_symbols,
911  const SymEngine::vec_basic & dependent_functions,
912  const enum OptimizationFlags & optimization_flags)
913  {
914  Assert(optimizer, ExcNotInitialized());
915 
916  // Some optimizers don't have the same interface for
917  // serialization, we filter them out through the specializations
918  // of the Optimizer class
919  Optimizer::load(archive,
920  version,
921  *optimizer,
922  independent_symbols,
923  dependent_functions,
924  optimization_flags);
925  }
926 
927 
928 
944  template <typename Stream>
945  static void
946  print(Stream & stream,
947  typename Optimizer::OptimizerType *optimizer,
948  const bool print_independent_symbols = false,
949  const bool print_dependent_functions = false,
950  const bool print_cse_reductions = true)
951  {
952  Assert(optimizer, ExcNotInitialized());
953 
954  // Some optimizers don't have a print function, so
955  // we filter them out through the specializations of
956  // the Optimizer class
957  Optimizer::print(stream,
958  *optimizer,
959  print_independent_symbols,
960  print_dependent_functions,
961  print_cse_reductions);
962  }
963  };
964 
965  template <typename ReturnType, typename Optimizer>
966  struct OptimizerHelper<ReturnType,
967  Optimizer,
968  typename std::enable_if<!std::is_same<
969  ReturnType,
970  typename Optimizer::ReturnType>::value>::type>
971  {
980  static void
981  initialize(typename Optimizer::OptimizerType *optimizer,
982  const SymEngine::vec_basic & independent_symbols,
983  const SymEngine::vec_basic & dependent_functions,
984  const enum OptimizationFlags & optimization_flags)
985  {
986  Assert(optimizer, ExcNotInitialized());
987 
988  const bool use_symbolic_cse = use_symbolic_CSE(optimization_flags);
989  optimizer->init(independent_symbols,
990  dependent_functions,
991  use_symbolic_cse);
992  }
993 
994 
995 
1009  static void
1010  substitute(typename Optimizer::OptimizerType *optimizer,
1011  std::vector<ReturnType> & output_values,
1012  const std::vector<ReturnType> & substitution_values)
1013  {
1014  Assert(optimizer, ExcNotInitialized());
1015 
1016  // Intermediate values to accommodate the difference in
1017  // value types.
1018  std::vector<typename Optimizer::ReturnType> int_outputs(
1019  output_values.size());
1020  std::vector<typename Optimizer::ReturnType> int_inputs(
1021  substitution_values.size());
1022 
1023  std::copy(substitution_values.begin(),
1024  substitution_values.end(),
1025  int_inputs.begin());
1026  optimizer->call(int_outputs.data(), int_inputs.data());
1027  std::copy(int_outputs.begin(),
1028  int_outputs.end(),
1029  output_values.begin());
1030  }
1031 
1032 
1033 
1038  template <class Archive>
1039  static void
1040  save(Archive & archive,
1041  const unsigned int version,
1042  typename Optimizer::OptimizerType *optimizer)
1043  {
1044  Assert(optimizer, ExcNotInitialized());
1045  Optimizer::save(archive, version, *optimizer);
1046  }
1047 
1048 
1049 
1054  template <class Archive>
1055  static void
1056  load(Archive & archive,
1057  const unsigned int version,
1058  typename Optimizer::OptimizerType *optimizer,
1059  const SymEngine::vec_basic & independent_symbols,
1060  const SymEngine::vec_basic & dependent_functions,
1061  const enum OptimizationFlags & optimization_flags)
1062  {
1063  Assert(optimizer, ExcNotInitialized());
1064 
1065  // Some optimizers don't have the same interface for
1066  // serialization, we filter them out through the specializations
1067  // of the Optimizer class
1068  Optimizer::load(archive,
1069  version,
1070  *optimizer,
1071  independent_symbols,
1072  dependent_functions,
1073  optimization_flags);
1074  }
1075 
1076 
1077 
1093  template <typename Stream>
1094  static void
1095  print(Stream & stream,
1096  typename Optimizer::OptimizerType *optimizer,
1097  const bool print_cse_reductions = true,
1098  const bool print_independent_symbols = false,
1099  const bool print_dependent_functions = false)
1100  {
1101  Assert(optimizer, ExcNotInitialized());
1102 
1103  optimizer->print(stream,
1104  print_independent_symbols,
1105  print_dependent_functions,
1106  print_cse_reductions);
1107  }
1108  };
1109 
1110 # endif // DOXYGEN
1111 
1112 
1113  /* -------------------- Utility functions ---------------------- */
1114 
1115 
1137  template <typename NumberType,
1138  int rank,
1139  int dim,
1140  template <int, int, typename>
1141  class TensorType>
1142  TensorType<rank, dim, NumberType>
1144  const TensorType<rank, dim, Expression> &symbol_tensor,
1145  const std::vector<NumberType> & cached_evaluation,
1146  const BatchOptimizer<NumberType> & optimizer)
1147  {
1148  TensorType<rank, dim, NumberType> out;
1149  for (unsigned int i = 0; i < out.n_independent_components; ++i)
1150  {
1151  const TableIndices<rank> indices(
1152  out.unrolled_to_component_indices(i));
1153  out[indices] =
1154  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1155  }
1156  return out;
1157  }
1158 
1159 
1182  template <typename NumberType, int dim>
1185  const SymmetricTensor<4, dim, Expression> &symbol_tensor,
1186  const std::vector<NumberType> & cached_evaluation,
1187  const BatchOptimizer<NumberType> & optimizer)
1188  {
1190  for (unsigned int i = 0;
1191  i < SymmetricTensor<2, dim>::n_independent_components;
1192  ++i)
1193  for (unsigned int j = 0;
1194  j < SymmetricTensor<2, dim>::n_independent_components;
1195  ++j)
1196  {
1197  const TableIndices<4> indices =
1198  make_rank_4_tensor_indices<dim>(i, j);
1199  out[indices] =
1200  optimizer.extract(symbol_tensor[indices], cached_evaluation);
1201  }
1202  return out;
1203  }
1204 
1205 
1223  template <typename NumberType, typename T>
1224  void
1226  const T & function)
1227  {
1228  optimizer.register_function(function);
1229  }
1230 
1231 
1249  template <typename NumberType, typename T>
1250  void
1252  const std::vector<T> & functions)
1253  {
1254  for (const auto &function : functions)
1255  register_functions(optimizer, function);
1256  }
1257 
1258 
1278  template <typename NumberType, typename T, typename... Args>
1279  void
1281  const T & function,
1282  const Args &...other_functions)
1283  {
1284  register_functions(optimizer, function);
1285  register_functions(optimizer, other_functions...);
1286  }
1287 
1288 
1300  template <int rank,
1301  int dim,
1302  template <int, int, typename>
1303  class TensorType>
1306  const TensorType<rank, dim, Expression> &symbol_tensor)
1307  {
1309  out.reserve(symbol_tensor.n_independent_components);
1310  for (unsigned int i = 0; i < symbol_tensor.n_independent_components;
1311  ++i)
1312  {
1313  const TableIndices<rank> indices(
1314  symbol_tensor.unrolled_to_component_indices(i));
1315  out.push_back(symbol_tensor[indices].get_RCP());
1316  }
1317  return out;
1318  }
1319 
1320 
1330  template <int dim>
1333  const SymmetricTensor<4, dim, Expression> &symbol_tensor)
1334  {
1336  out.reserve(symbol_tensor.n_independent_components);
1337  for (unsigned int i = 0;
1338  i < SymmetricTensor<2, dim>::n_independent_components;
1339  ++i)
1340  for (unsigned int j = 0;
1341  j < SymmetricTensor<2, dim>::n_independent_components;
1342  ++j)
1343  {
1344  const TableIndices<4> indices =
1345  make_rank_4_tensor_indices<dim>(i, j);
1346  out.push_back(symbol_tensor[indices].get_RCP());
1347  }
1348  return out;
1349  }
1350 
1351  } // namespace internal
1352 
1353 
1354 
1445  template <typename ReturnType>
1447  {
1448  public:
1456  BatchOptimizer();
1457 
1478 
1488  BatchOptimizer(const BatchOptimizer &other);
1489 
1494 
1498  ~BatchOptimizer() = default;
1499 
1510  void
1511  copy_from(const BatchOptimizer &other);
1512 
1522  template <typename Stream>
1523  void
1524  print(Stream &stream, const bool print_cse = false) const;
1525 
1534  template <class Archive>
1535  void
1536  save(Archive &archive, const unsigned int version) const;
1537 
1551  template <class Archive>
1552  void
1553  load(Archive &archive, const unsigned int version);
1554 
1555 # ifdef DOXYGEN
1577  template <class Archive>
1578  void
1579  serialize(Archive &archive, const unsigned int version);
1580 # else
1581  // This macro defines the serialize() method that is compatible with
1582  // the templated save() and load() method that have been implemented.
1583  BOOST_SERIALIZATION_SPLIT_MEMBER()
1584 # endif
1585 
1590 
1596  void
1598 
1604  void
1605  register_symbols(const SymEngine::map_basic_basic &substitution_map);
1606 
1617  void
1618  register_symbols(const types::symbol_vector &symbols);
1619 
1630  void
1631  register_symbols(const SymEngine::vec_basic &symbols);
1632 
1638  get_independent_symbols() const;
1639 
1645  std::size_t
1646  n_independent_variables() const;
1647 
1649 
1654 
1659  void
1660  register_function(const Expression &function);
1661 
1666  template <int rank, int dim>
1667  void
1669 
1674  template <int rank, int dim>
1675  void
1677  const SymmetricTensor<rank, dim, Expression> &function_tensor);
1678 
1683  void
1685 
1690  void
1691  register_functions(const SymEngine::vec_basic &functions);
1692 
1702  template <typename T>
1703  void
1704  register_functions(const std::vector<T> &functions);
1705 
1719  template <typename T, typename... Args>
1720  void
1721  register_functions(const T &functions, const Args &...other_functions);
1722 
1727  const types::symbol_vector &
1728  get_dependent_functions() const;
1729 
1736  std::size_t
1737  n_dependent_variables() const;
1738 
1740 
1745 
1757  void
1761 
1766  enum OptimizerType
1767  optimization_method() const;
1768 
1773  enum OptimizationFlags
1774  optimization_flags() const;
1775 
1781  bool
1782  use_symbolic_CSE() const;
1783 
1799  void
1800  optimize();
1801 
1806  bool
1807  optimized() const;
1808 
1810 
1815 
1825  void
1827 
1837  void
1838  substitute(const SymEngine::map_basic_basic &substitution_map) const;
1839 
1850  void
1851  substitute(const types::symbol_vector & symbols,
1852  const std::vector<ReturnType> &values) const;
1853 
1864  void
1865  substitute(const SymEngine::vec_basic & symbols,
1866  const std::vector<ReturnType> &values) const;
1867 
1873  bool
1874  values_substituted() const;
1875 
1877 
1882 
1905  const std::vector<ReturnType> &
1906  evaluate() const;
1907 
1915  ReturnType
1916  evaluate(const Expression &func) const;
1917 
1926  std::vector<ReturnType>
1927  evaluate(const std::vector<Expression> &funcs) const;
1928 
1937  template <int rank, int dim>
1940 
1941 
1950  template <int rank, int dim>
1953 
1954 
1962  ReturnType
1963  extract(const Expression & func,
1964  const std::vector<ReturnType> &cached_evaluation) const;
1965 
1966 
1974  std::vector<ReturnType>
1975  extract(const std::vector<Expression> &funcs,
1976  const std::vector<ReturnType> &cached_evaluation) const;
1977 
1978 
1986  template <int rank, int dim>
1989  const std::vector<ReturnType> & cached_evaluation) const;
1990 
1991 
1999  template <int rank, int dim>
2002  const std::vector<ReturnType> &cached_evaluation) const;
2003 
2005 
2006  private:
2010  enum OptimizerType method;
2011 
2016  enum OptimizationFlags flags;
2017 
2028 
2035 
2040  bool
2042  const SD::Expression &function) const;
2043 
2048  bool
2050  const SymEngine::RCP<const SymEngine::Basic> &function) const;
2051 
2066  mutable std::vector<ReturnType> dependent_variables_output;
2067 
2077  std::map<SD::Expression,
2078  std::size_t,
2080 
2086 
2093  mutable std::unique_ptr<SymEngine::Visitor> optimizer;
2094 
2104 
2109  mutable bool has_been_serialized;
2110 
2114  void
2115  register_scalar_function(const SD::Expression &function);
2116 
2121  void
2123 
2127  void
2128  create_optimizer(std::unique_ptr<SymEngine::Visitor> &optimizer);
2129 
2146  void
2147  substitute(const std::vector<ReturnType> &substitution_values) const;
2148  };
2149 
2150 
2151 
2152  /* -------------------- inline and template functions ------------------ */
2153 
2154 
2155 # ifndef DOXYGEN
2156 
2157 
2158  template <typename ReturnType>
2159  template <typename Stream>
2160  void
2161  BatchOptimizer<ReturnType>::print(Stream &stream,
2162  const bool /*print_cse*/) const
2163  {
2164  // Settings
2165  stream << "Method? " << optimization_method() << '\n';
2166  stream << "Flags: " << optimization_flags() << '\n';
2167  stream << "Optimized? " << (optimized() ? "Yes" : "No") << '\n';
2168  stream << "Values substituted? " << values_substituted() << "\n\n";
2169 
2170  // Independent variables
2171  stream << "Symbols (" << n_independent_variables()
2172  << " independent variables):" << '\n';
2173  int cntr = 0;
2174  for (SD::types::substitution_map::const_iterator it =
2175  independent_variables_symbols.begin();
2176  it != independent_variables_symbols.end();
2177  ++it, ++cntr)
2178  {
2179  stream << cntr << ": " << it->first << '\n';
2180  }
2181  stream << '\n' << std::flush;
2182 
2183  // Dependent functions
2184  stream << "Functions (" << n_dependent_variables()
2185  << " dependent variables):" << '\n';
2186  cntr = 0;
2187  for (typename SD::types::symbol_vector::const_iterator it =
2188  dependent_variables_functions.begin();
2189  it != dependent_variables_functions.end();
2190  ++it, ++cntr)
2191  {
2192  stream << cntr << ": " << (*it) << '\n';
2193  }
2194  stream << '\n' << std::flush;
2195 
2196  // Common subexpression
2197  if (optimized() == true && use_symbolic_CSE() == true)
2198  {
2199  Assert(optimizer, ExcNotInitialized());
2200  const bool print_cse_reductions = true;
2201  const bool print_independent_symbols = false;
2202  const bool print_dependent_functions = false;
2203 
2204  if (optimization_method() == OptimizerType::dictionary)
2205  {
2206  Assert(dynamic_cast<typename internal::DictionaryOptimizer<
2207  ReturnType>::OptimizerType *>(optimizer.get()),
2208  ExcMessage("Cannot cast optimizer to Dictionary type."));
2209 
2210  internal::OptimizerHelper<
2211  ReturnType,
2212  internal::DictionaryOptimizer<ReturnType>>::
2213  print(stream,
2214  dynamic_cast<typename internal::DictionaryOptimizer<
2215  ReturnType>::OptimizerType *>(optimizer.get()),
2216  print_independent_symbols,
2217  print_dependent_functions,
2218  print_cse_reductions);
2219 
2220  stream << '\n' << std::flush;
2221  }
2222  else if (optimization_method() == OptimizerType::lambda)
2223  {
2224  Assert(dynamic_cast<typename internal::LambdaOptimizer<
2225  ReturnType>::OptimizerType *>(optimizer.get()),
2226  ExcMessage("Cannot cast optimizer to Lambda type."));
2227 
2228  internal::OptimizerHelper<ReturnType,
2229  internal::LambdaOptimizer<ReturnType>>::
2230  print(stream,
2231  dynamic_cast<typename internal::LambdaOptimizer<
2232  ReturnType>::OptimizerType *>(optimizer.get()),
2233  print_independent_symbols,
2234  print_dependent_functions,
2235  print_cse_reductions);
2236  }
2237 # ifdef HAVE_SYMENGINE_LLVM
2238  else if (optimization_method() == OptimizerType::llvm)
2239  {
2240  Assert(dynamic_cast<typename internal::LLVMOptimizer<
2241  ReturnType>::OptimizerType *>(optimizer.get()),
2242  ExcMessage("Cannot cast optimizer to LLVM type."));
2243 
2244  internal::OptimizerHelper<ReturnType,
2245  internal::LLVMOptimizer<ReturnType>>::
2246  print(stream,
2247  dynamic_cast<typename internal::LLVMOptimizer<
2248  ReturnType>::OptimizerType *>(optimizer.get()),
2249  print_independent_symbols,
2250  print_dependent_functions,
2251  print_cse_reductions);
2252  }
2253 # endif // HAVE_SYMENGINE_LLVM
2254  else
2255  {
2256  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2257  }
2258  }
2259 
2260  if (values_substituted())
2261  {
2262  stream << "Evaluated functions:" << '\n';
2263  stream << std::flush;
2264  cntr = 0;
2265  for (typename std::vector<ReturnType>::const_iterator it =
2266  dependent_variables_output.begin();
2267  it != dependent_variables_output.end();
2268  ++it, ++cntr)
2269  {
2270  stream << cntr << ": " << (*it) << '\n';
2271  }
2272  stream << '\n' << std::flush;
2273  }
2274  }
2275 
2276 
2277 
2278  template <typename ReturnType>
2279  template <class Archive>
2280  void
2281  BatchOptimizer<ReturnType>::save(Archive & ar,
2282  const unsigned int version) const
2283  {
2284  // Serialize enum classes...
2285  {
2286  const auto m =
2287  static_cast<typename std::underlying_type<OptimizerType>::type>(
2288  method);
2289  ar &m;
2290  }
2291  {
2292  const auto f =
2293  static_cast<typename std::underlying_type<OptimizationFlags>::type>(
2294  flags);
2295  ar &f;
2296  }
2297 
2298  // Important: Independent variables must always be
2299  // serialized before the dependent variables.
2300  ar &independent_variables_symbols;
2301  ar &dependent_variables_functions;
2302 
2303  ar &dependent_variables_output;
2304  ar &map_dep_expr_vec_entry;
2305  ar &ready_for_value_extraction;
2306 
2307  // Mark that we've saved this class at some point.
2308  has_been_serialized = true;
2309  ar &has_been_serialized;
2310 
2311  // When we serialize the optimizer itself, we have to (unfortunately)
2312  // provide it with sufficient information to rebuild itself from scratch.
2313  // This is because only two of the three optimization classes support
2314  // real serialization (i.e. have save/load capability).
2315  const SD::types::symbol_vector symbol_vec =
2316  Utilities::extract_symbols(independent_variables_symbols);
2318  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2319  ReturnType>::OptimizerType *>(optimizer.get()))
2320  {
2321  Assert(optimization_method() == OptimizerType::dictionary,
2322  ExcInternalError());
2323  internal::OptimizerHelper<
2324  ReturnType,
2325  internal::DictionaryOptimizer<ReturnType>>::save(ar, version, opt);
2326  }
2328  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2329  ReturnType>::OptimizerType *>(optimizer.get()))
2330  {
2331  Assert(optimization_method() == OptimizerType::lambda,
2332  ExcInternalError());
2333  internal::OptimizerHelper<
2334  ReturnType,
2335  internal::LambdaOptimizer<ReturnType>>::save(ar, version, opt);
2336  }
2337 # ifdef HAVE_SYMENGINE_LLVM
2339  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2340  ReturnType>::OptimizerType *>(optimizer.get()))
2341  {
2342  Assert(optimization_method() == OptimizerType::llvm,
2343  ExcInternalError());
2344  internal::OptimizerHelper<
2345  ReturnType,
2346  internal::LLVMOptimizer<ReturnType>>::save(ar, version, opt);
2347  }
2348 # endif
2349  else
2350  {
2351  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2352  }
2353  }
2354 
2355 
2356 
2357  template <typename ReturnType>
2358  template <class Archive>
2359  void
2360  BatchOptimizer<ReturnType>::load(Archive &ar, const unsigned int version)
2361  {
2362  Assert(independent_variables_symbols.empty(), ExcInternalError());
2363  Assert(dependent_variables_functions.empty(), ExcInternalError());
2364  Assert(dependent_variables_output.empty(), ExcInternalError());
2365  Assert(map_dep_expr_vec_entry.empty(), ExcInternalError());
2366  Assert(ready_for_value_extraction == false, ExcInternalError());
2367 
2368  // Deserialize enum classes...
2369  {
2370  typename std::underlying_type<OptimizerType>::type m;
2371  ar & m;
2372  method = static_cast<OptimizerType>(m);
2373  }
2374  {
2375  typename std::underlying_type<OptimizationFlags>::type f;
2376  ar & f;
2377  flags = static_cast<OptimizationFlags>(f);
2378  }
2379 
2380  // Important: Independent variables must always be
2381  // deserialized before the dependent variables.
2382  ar &independent_variables_symbols;
2383  ar &dependent_variables_functions;
2384 
2385  ar &dependent_variables_output;
2386  ar &map_dep_expr_vec_entry;
2387  ar &ready_for_value_extraction;
2388 
2389  ar &has_been_serialized;
2390 
2391  // If we're reading in data, then create the optimizer
2392  // and then deserialize it.
2393  Assert(!optimizer, ExcInternalError());
2394 
2395  // Create and configure the optimizer
2396  create_optimizer(optimizer);
2397  Assert(optimizer, ExcNotInitialized());
2398 
2399  // When we deserialize the optimizer itself, we have to (unfortunately)
2400  // provide it with sufficient information to rebuild itself from scratch.
2401  // This is because only two of the three optimization classes support
2402  // real serialization (i.e. have save/load capability).
2403  const SD::types::symbol_vector symbol_vec =
2404  Utilities::extract_symbols(independent_variables_symbols);
2406  *opt = dynamic_cast<typename internal::DictionaryOptimizer<
2407  ReturnType>::OptimizerType *>(optimizer.get()))
2408  {
2409  Assert(optimization_method() == OptimizerType::dictionary,
2410  ExcInternalError());
2411  internal::OptimizerHelper<ReturnType,
2412  internal::DictionaryOptimizer<ReturnType>>::
2413  load(ar,
2414  version,
2415  opt,
2417  symbol_vec),
2419  dependent_variables_functions),
2420  optimization_flags());
2421  }
2423  *opt = dynamic_cast<typename internal::LambdaOptimizer<
2424  ReturnType>::OptimizerType *>(optimizer.get()))
2425  {
2426  Assert(optimization_method() == OptimizerType::lambda,
2427  ExcInternalError());
2428  internal::OptimizerHelper<ReturnType,
2429  internal::LambdaOptimizer<ReturnType>>::
2430  load(ar,
2431  version,
2432  opt,
2434  symbol_vec),
2436  dependent_variables_functions),
2437  optimization_flags());
2438  }
2439 # ifdef HAVE_SYMENGINE_LLVM
2441  *opt = dynamic_cast<typename internal::LLVMOptimizer<
2442  ReturnType>::OptimizerType *>(optimizer.get()))
2443  {
2444  Assert(optimization_method() == OptimizerType::llvm,
2445  ExcInternalError());
2446  internal::OptimizerHelper<ReturnType,
2447  internal::LLVMOptimizer<ReturnType>>::
2448  load(ar,
2449  version,
2450  opt,
2452  symbol_vec),
2454  dependent_variables_functions),
2455  optimization_flags());
2456  }
2457 # endif
2458  else
2459  {
2460  AssertThrow(false, ExcMessage("Unknown optimizer type."));
2461  }
2462  }
2463 
2464 
2465 
2466  template <typename ReturnType>
2467  template <int rank, int dim>
2468  void
2470  const Tensor<rank, dim, Expression> &function_tensor)
2471  {
2472  Assert(optimized() == false,
2473  ExcMessage(
2474  "Cannot register functions once the optimizer is finalised."));
2475 
2476  register_vector_functions(
2477  internal::unroll_to_expression_vector(function_tensor));
2478  }
2479 
2480 
2481 
2482  template <typename ReturnType>
2483  template <int rank, int dim>
2484  void
2486  const SymmetricTensor<rank, dim, Expression> &function_tensor)
2487  {
2488  Assert(optimized() == false,
2489  ExcMessage(
2490  "Cannot register functions once the optimizer is finalised."));
2491 
2492  register_vector_functions(
2493  internal::unroll_to_expression_vector(function_tensor));
2494  }
2495 
2496 
2497 
2498  template <typename ReturnType>
2499  template <typename T, typename... Args>
2500  void
2502  const T &functions,
2503  const Args &...other_functions)
2504  {
2506  internal::register_functions(*this, other_functions...);
2507  }
2508 
2509 
2510 
2511  template <typename ReturnType>
2512  template <typename T>
2513  void
2515  const std::vector<T> &functions)
2516  {
2518  }
2519 
2520 
2521 
2522  template <typename ReturnType>
2523  template <int rank, int dim>
2526  const Tensor<rank, dim, Expression> &funcs,
2527  const std::vector<ReturnType> & cached_evaluation) const
2528  {
2530  cached_evaluation,
2531  *this);
2532  }
2533 
2534 
2535 
2536  template <typename ReturnType>
2537  template <int rank, int dim>
2540  const Tensor<rank, dim, Expression> &funcs) const
2541  {
2542  Assert(
2543  values_substituted() == true,
2544  ExcMessage(
2545  "The optimizer is not configured to perform evaluation. "
2546  "This action can only performed after substitute() has been called."));
2547 
2548  return extract(funcs, dependent_variables_output);
2549  }
2550 
2551 
2552 
2553  template <typename ReturnType>
2554  template <int rank, int dim>
2558  const std::vector<ReturnType> & cached_evaluation) const
2559  {
2561  cached_evaluation,
2562  *this);
2563  }
2564 
2565 
2566 
2567  template <typename ReturnType>
2568  template <int rank, int dim>
2571  const SymmetricTensor<rank, dim, Expression> &funcs) const
2572  {
2573  Assert(
2574  values_substituted() == true,
2575  ExcMessage(
2576  "The optimizer is not configured to perform evaluation. "
2577  "This action can only performed after substitute() has been called."));
2578 
2579  return extract(funcs, dependent_variables_output);
2580  }
2581 
2582 # endif // DOXYGEN
2583 
2584  } // namespace SD
2585 } // namespace Differentiation
2586 
2587 
2589 
2590 #endif // DEAL_II_WITH_SYMENGINE
2591 
2592 #endif
static constexpr unsigned int n_independent_components
Definition: tensor.h:503
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:416
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition: config.h:430
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:417
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition: config.h:469
types::substitution_map independent_variables_symbols
types::symbol_vector dependent_variables_functions
void substitute(const types::substitution_map &substitution_map) const
Tensor< rank, dim, ReturnType > evaluate(const Tensor< rank, dim, Expression > &funcs) const
void register_functions(const T &functions, const Args &...other_functions)
void register_scalar_function(const SD::Expression &function)
const types::symbol_vector & get_dependent_functions() const
void create_optimizer(std::unique_ptr< SymEngine::Visitor > &optimizer)
Tensor< rank, dim, ReturnType > extract(const Tensor< rank, dim, Expression > &funcs, const std::vector< ReturnType > &cached_evaluation) const
void print(Stream &stream, const bool print_cse=false) const
OptimizationFlags & operator|=(OptimizationFlags &f1, const OptimizationFlags f2)
enum OptimizerType optimization_method() const
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNotInitialized()
void copy_from(const BatchOptimizer &other)
void register_function(const Tensor< rank, dim, Expression > &function_tensor)
void set_optimization_method(const enum OptimizerType &optimization_method, const enum OptimizationFlags &optimization_flags=OptimizationFlags::optimize_all)
TensorType< rank, dim, NumberType > tensor_evaluate_optimized(const TensorType< rank, dim, Expression > &symbol_tensor, const std::vector< NumberType > &cached_evaluation, const BatchOptimizer< NumberType > &optimizer)
void save(Archive &archive, const unsigned int version) const
bool use_symbolic_CSE(const enum OptimizationFlags &flags)
enum OptimizationFlags optimization_flags() const
types::symbol_vector unroll_to_expression_vector(const TensorType< rank, dim, Expression > &symbol_tensor)
#define Assert(cond, exc)
Definition: exceptions.h:1473
std::string to_string(const T &t)
Definition: patterns.h:2403
static ::ExceptionBase & ExcNotImplemented()
void register_functions(const types::symbol_vector &functions)
BatchOptimizer(BatchOptimizer &&)=default
SymmetricTensor< rank, dim, ReturnType > evaluate(const SymmetricTensor< rank, dim, Expression > &funcs) const
std::vector< ReturnType > dependent_variables_output
Expression operator|(const Expression &lhs, const Expression &rhs)
int get_LLVM_optimization_level(const enum OptimizationFlags &flags)
void serialize(Archive &archive, const unsigned int version)
#define DeclExceptionMsg(Exception, defaulttext)
Definition: exceptions.h:487
void register_symbols(const types::substitution_map &substitution_map)
const std::vector< ReturnType > & evaluate() const
std::map< SD::Expression, std::size_t, SD::types::internal::ExpressionKeyLess > map_dependent_expression_to_vector_entry_t
Expression operator&(const Expression &lhs, const Expression &rhs)
SymmetricTensor< rank, dim, ReturnType > extract(const SymmetricTensor< rank, dim, Expression > &funcs, const std::vector< ReturnType > &cached_evaluation) const
void register_function(const Expression &function)
std::unique_ptr< SymEngine::Visitor > optimizer
ReturnType extract(const Expression &func, const std::vector< ReturnType > &cached_evaluation) const
void load(Archive &archive, const unsigned int version)
void register_functions(const std::vector< T > &functions)
bool is_valid_nonunique_dependent_variable(const SD::Expression &function) const
OptimizationFlags & operator&=(OptimizationFlags &f1, const OptimizationFlags f2)
void register_vector_functions(const types::symbol_vector &functions)
void register_functions(BatchOptimizer< NumberType > &optimizer, const T &function)
static ::ExceptionBase & ExcSymEngineLLVMNotAvailable()
std::ostream & operator<<(std::ostream &stream, const Expression &expression)
static ::ExceptionBase & ExcMessage(std::string arg1)
static ::ExceptionBase & ExcSymEngineLLVMReturnTypeNotSupported()
types::symbol_vector get_independent_symbols() const
void register_function(const SymmetricTensor< rank, dim, Expression > &function_tensor)
map_dependent_expression_to_vector_entry_t map_dep_expr_vec_entry
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
SD::types::symbol_vector extract_symbols(const SD::types::substitution_map &substitution_values)
SymEngine::vec_basic convert_expression_vector_to_basic_vector(const SD::types::symbol_vector &symbol_vector)
std::vector< SD::Expression > symbol_vector
std::map< SD::Expression, SD::Expression, internal::ExpressionKeyLess > substitution_map
Expression substitute(const Expression &expression, const types::substitution_map &substitution_map)
static const char T
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)