Reference documentation for deal.II version GIT 9c182271f7 2023-03-28 14:30:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
cuda_vector.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2016 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/cuda.h>
17 #include <deal.II/base/cuda_size.h>
19 
24 
25 #include <cmath>
26 
27 #ifdef DEAL_II_WITH_CUDA
28 
30 
31 namespace LinearAlgebra
32 {
33  namespace CUDAWrappers
34  {
37 
38 
39 
40  template <typename Number>
42  : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
43  , n_elements(0)
44  {}
45 
46 
47 
48  template <typename Number>
50  : val(Utilities::CUDA::allocate_device_data<Number>(V.n_elements),
52  , n_elements(V.n_elements)
53  {
54  // Copy the values.
55  const cudaError_t error_code = cudaMemcpy(val.get(),
56  V.val.get(),
57  n_elements * sizeof(Number),
58  cudaMemcpyDeviceToDevice);
59  AssertCuda(error_code);
60  }
61 
62 
63 
64  template <typename Number>
67  {
68  if (n_elements < V.n_elements)
69  reinit(V.n_elements, true);
70  else
71  n_elements = V.n_elements;
72 
73  // Copy the values.
74  const cudaError_t error_code = cudaMemcpy(val.get(),
75  V.val.get(),
76  n_elements * sizeof(Number),
77  cudaMemcpyDeviceToDevice);
78  AssertCuda(error_code);
79 
80  return *this;
81  }
82 
83 
84 
85  template <typename Number>
87  : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
88  , n_elements(0)
89  {
90  reinit(n, false);
91  }
92 
93 
94 
95  template <typename Number>
96  void
97  Vector<Number>::reinit(const size_type n, const bool omit_zeroing_entries)
98  {
99  // Resize the underlying array if necessary
100  if (n == 0)
101  val.reset();
102  else if (n != n_elements)
103  val.reset(Utilities::CUDA::allocate_device_data<Number>(n));
104 
105  // If necessary set the elements to zero
106  if (omit_zeroing_entries == false)
107  {
108  const cudaError_t error_code =
109  cudaMemset(val.get(), 0, n * sizeof(Number));
110  AssertCuda(error_code);
111  }
112  n_elements = n;
113  }
114 
115 
116 
117  template <typename Number>
118  void
120  const bool omit_zeroing_entries)
121  {
122  reinit(V.size(), omit_zeroing_entries);
123  }
124 
125 
126 
127  template <typename Number>
128  void
130  const ReadWriteVector<Number> &V,
131  VectorOperation::values operation,
132  std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>)
133  {
134  if (operation == VectorOperation::insert)
135  {
136  const cudaError_t error_code = cudaMemcpy(val.get(),
137  V.begin(),
138  n_elements * sizeof(Number),
139  cudaMemcpyHostToDevice);
140  AssertCuda(error_code);
141  }
142  else if (operation == VectorOperation::add)
143  {
144  // Create a temporary vector on the device
145  Number * tmp;
146  cudaError_t error_code =
147  cudaMalloc(&tmp, n_elements * sizeof(Number));
148  AssertCuda(error_code);
149 
150  // Copy the vector from the host to the temporary vector on the device
151  error_code = cudaMemcpy(tmp,
152  V.begin(),
153  n_elements * sizeof(Number),
154  cudaMemcpyHostToDevice);
155  AssertCuda(error_code);
156 
157  // Add the two vectors
158  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
159 
160  kernel::vector_bin_op<Number, kernel::Binop_Addition>
161  <<<n_blocks, block_size>>>(val.get(), tmp, n_elements);
163 
164  // Delete the temporary vector
166  }
167  else
168  AssertThrow(false, ExcNotImplemented());
169  }
170 
171 
172 
173  template <typename Number>
176  {
177  Assert(s == Number(), ExcMessage("Only 0 can be assigned to a vector."));
178  (void)s;
179 
180  const cudaError_t error_code =
181  cudaMemset(val.get(), 0, n_elements * sizeof(Number));
182  AssertCuda(error_code);
183 
184  return *this;
185  }
186 
187 
188 
189  template <typename Number>
191  Vector<Number>::operator*=(const Number factor)
192  {
193  AssertIsFinite(factor);
194  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
195  kernel::vec_scale<Number>
196  <<<n_blocks, block_size>>>(val.get(), factor, n_elements);
198 
199  return *this;
200  }
201 
202 
203 
204  template <typename Number>
206  Vector<Number>::operator/=(const Number factor)
207  {
208  AssertIsFinite(factor);
209  Assert(factor != Number(0.), ExcZero());
210  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
211  kernel::vec_scale<Number>
212  <<<n_blocks, block_size>>>(val.get(), 1. / factor, n_elements);
214 
215  return *this;
216  }
217 
218 
219 
220  template <typename Number>
223  {
224  // Check that casting will work
225  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
226  ExcVectorTypeNotCompatible());
227 
228  // Downcast V. If it fails, it throw an exception.
229  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
230  Assert(down_V.size() == this->size(),
231  ExcMessage(
232  "Cannot add two vectors with different numbers of elements"));
233 
234  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
235 
236  kernel::vector_bin_op<Number, kernel::Binop_Addition>
237  <<<n_blocks, block_size>>>(val.get(), down_V.val.get(), n_elements);
239 
240  return *this;
241  }
242 
243 
244 
245  template <typename Number>
248  {
249  // Check that casting will work
250  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
251  ExcVectorTypeNotCompatible());
252 
253  // Downcast V. If fails, throws an exception.
254  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
255  Assert(down_V.size() == this->size(),
256  ExcMessage(
257  "Cannot add two vectors with different numbers of elements."));
258 
259  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
260 
261  kernel::vector_bin_op<Number, kernel::Binop_Subtraction>
262  <<<n_blocks, block_size>>>(val.get(), down_V.val.get(), n_elements);
264 
265  return *this;
266  }
267 
268 
269 
270  template <typename Number>
271  Number
273  {
274  // Check that casting will work
275  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
276  ExcVectorTypeNotCompatible());
277 
278  // Downcast V. If fails, throws an exception.
279  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
280  Assert(down_V.size() == this->size(),
281  ExcMessage(
282  "Cannot add two vectors with different numbers of elements"));
283 
284  Number * result_device;
285  cudaError_t error_code =
286  cudaMalloc(&result_device, n_elements * sizeof(Number));
287  AssertCuda(error_code);
288  error_code = cudaMemset(result_device, 0, sizeof(Number));
289 
290  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
291  kernel::double_vector_reduction<Number, kernel::DotProduct<Number>>
292  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
293  val.get(),
294  down_V.val.get(),
295  static_cast<unsigned int>(
296  n_elements));
297 
298  // Copy the result back to the host
299  Number result;
300  error_code = cudaMemcpy(&result,
301  result_device,
302  sizeof(Number),
303  cudaMemcpyDeviceToHost);
304  AssertCuda(error_code);
305  // Free the memory on the device
306  Utilities::CUDA::free(result_device);
307 
308  return result;
309  }
310 
311 
312 
313  template <typename Number>
314  void
315  Vector<Number>::add(const Number a)
316  {
317  AssertIsFinite(a);
318  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
319  kernel::vec_add<Number>
320  <<<n_blocks, block_size>>>(val.get(), a, n_elements);
322  }
323 
324 
325 
326  template <typename Number>
327  void
329  {
330  AssertIsFinite(a);
331 
332  // Check that casting will work.
333  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
334  ExcVectorTypeNotCompatible());
335 
336  // Downcast V. If fails, throw an exception.
337  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
338  Assert(down_V.size() == this->size(),
339  ExcMessage(
340  "Cannot add two vectors with different numbers of elements."));
341 
342  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
343  kernel::add_aV<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
344  val.get(), a, down_V.val.get(), n_elements);
346  }
347 
348 
349 
350  template <typename Number>
351  void
352  Vector<Number>::add(const Number a,
354  const Number b,
355  const VectorSpaceVector<Number> &W)
356  {
357  AssertIsFinite(a);
358  AssertIsFinite(b);
359 
360  // Check that casting will work.
361  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
362  ExcVectorTypeNotCompatible());
363 
364  // Downcast V. If fails, throw an exception.
365  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
366  Assert(down_V.size() == this->size(),
367  ExcMessage(
368  "Cannot add two vectors with different numbers of elements."));
369 
370  // Check that casting will work.
371  Assert(dynamic_cast<const Vector<Number> *>(&W) != nullptr,
372  ExcVectorTypeNotCompatible());
373 
374  // Downcast V. If fails, throw an exception.
375  const Vector<Number> &down_W = dynamic_cast<const Vector<Number> &>(W);
376  Assert(down_W.size() == this->size(),
377  ExcMessage(
378  "Cannot add two vectors with different numbers of elements."));
379 
380  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
381  kernel::add_aVbW<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
382  val.get(), a, down_V.val.get(), b, down_W.val.get(), n_elements);
384  }
385 
386 
387 
388  template <typename Number>
389  void
390  Vector<Number>::sadd(const Number s,
391  const Number a,
393  {
394  AssertIsFinite(s);
395  AssertIsFinite(a);
396 
397  // Check that casting will work.
398  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
399  ExcVectorTypeNotCompatible());
400 
401  // Downcast V. If fails, throw an exception.
402  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
403  Assert(down_V.size() == this->size(),
404  ExcMessage(
405  "Cannot add two vectors with different numbers of elements."));
406 
407  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
408  kernel::sadd<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
409  s, val.get(), a, down_V.val.get(), n_elements);
411  }
412 
413 
414 
415  template <typename Number>
416  void
418  {
419  // Check that casting will work.
420  Assert(dynamic_cast<const Vector<Number> *>(&scaling_factors) != nullptr,
421  ExcVectorTypeNotCompatible());
422 
423  // Downcast V. If fails, throw an exception.
424  const Vector<Number> &down_scaling_factors =
425  dynamic_cast<const Vector<Number> &>(scaling_factors);
426  Assert(down_scaling_factors.size() == this->size(),
427  ExcMessage(
428  "Cannot scale two vectors with different numbers of elements."));
429 
430  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
431  kernel::scale<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
432  val.get(), down_scaling_factors.val.get(), n_elements);
434  }
435 
436 
437 
438  template <typename Number>
439  void
441  {
442  AssertIsFinite(a);
443 
444  // Check that casting will work.
445  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
446  ExcVectorTypeNotCompatible());
447 
448  // Downcast V. If fails, throw an exception.
449  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
450  Assert(
451  down_V.size() == this->size(),
452  ExcMessage(
453  "Cannot assign two vectors with different numbers of elements."));
454 
455  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
456  kernel::equ<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
457  val.get(), a, down_V.val.get(), n_elements);
459  }
460 
461 
462 
463  template <typename Number>
464  bool
466  {
467  return (linfty_norm() == 0) ? true : false;
468  }
469 
470 
471 
472  template <typename Number>
475  {
476  Number * result_device;
477  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
478  AssertCuda(error_code);
479  error_code = cudaMemset(result_device, 0, sizeof(Number));
480 
481  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
482  kernel::reduction<Number, kernel::ElemSum<Number>>
483  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
484  val.get(),
485  n_elements);
486 
487  // Copy the result back to the host
488  Number result;
489  error_code = cudaMemcpy(&result,
490  result_device,
491  sizeof(Number),
492  cudaMemcpyDeviceToHost);
493  AssertCuda(error_code);
494  // Free the memory on the device
495  Utilities::CUDA::free(result_device);
496 
497  return result /
498  static_cast<typename Vector<Number>::value_type>(n_elements);
499  }
500 
501 
502 
503  template <typename Number>
506  {
507  Number * result_device;
508  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
509  AssertCuda(error_code);
510  error_code = cudaMemset(result_device, 0, sizeof(Number));
511 
512  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
513  kernel::reduction<Number, kernel::L1Norm<Number>>
514  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
515  val.get(),
516  n_elements);
517 
518  // Copy the result back to the host
519  Number result;
520  error_code = cudaMemcpy(&result,
521  result_device,
522  sizeof(Number),
523  cudaMemcpyDeviceToHost);
524  AssertCuda(error_code);
525  // Free the memory on the device
526  Utilities::CUDA::free(result_device);
527 
528  return result;
529  }
530 
531 
532 
533  template <typename Number>
536  {
537  return std::sqrt(norm_sqr());
538  }
539 
540 
541 
542  template <typename Number>
545  {
546  return (*this) * (*this);
547  }
548 
549 
550 
551  template <typename Number>
554  {
555  Number * result_device;
556  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
557  AssertCuda(error_code);
558  error_code = cudaMemset(result_device, 0, sizeof(Number));
559 
560  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
561  kernel::reduction<Number, kernel::LInfty<Number>>
562  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
563  val.get(),
564  n_elements);
565 
566  // Copy the result back to the host
567  Number result;
568  error_code = cudaMemcpy(&result,
569  result_device,
570  sizeof(Number),
571  cudaMemcpyDeviceToHost);
572  AssertCuda(error_code);
573  // Free the memory on the device
574  Utilities::CUDA::free(result_device);
575 
576  return result;
577  }
578 
579 
580 
581  template <typename Number>
582  Number
585  const VectorSpaceVector<Number> &W)
586  {
587  AssertIsFinite(a);
588 
589  // Check that casting will work
590  Assert(dynamic_cast<const Vector<Number> *>(&V) != nullptr,
591  ExcVectorTypeNotCompatible());
592  Assert(dynamic_cast<const Vector<Number> *>(&W) != nullptr,
593  ExcVectorTypeNotCompatible());
594 
595  // Downcast V and W. If it fails, throw an exception.
596  const Vector<Number> &down_V = dynamic_cast<const Vector<Number> &>(V);
597  Assert(down_V.size() == this->size(),
598  ExcMessage("Vector V has the wrong size."));
599  const Vector<Number> &down_W = dynamic_cast<const Vector<Number> &>(W);
600  Assert(down_W.size() == this->size(),
601  ExcMessage("Vector W has the wrong size."));
602 
603  Number * result_device;
604  cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
605  AssertCuda(error_code);
606  error_code = cudaMemset(result_device, 0, sizeof(Number));
607  AssertCuda(error_code);
608 
609  const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
610  kernel::add_and_dot<Number>
611  <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
612  val.get(),
613  down_V.val.get(),
614  down_W.val.get(),
615  a,
616  n_elements);
617 
618  Number result;
619  error_code = cudaMemcpy(&result,
620  result_device,
621  sizeof(Number),
622  cudaMemcpyDeviceToHost);
623  Utilities::CUDA::free(result_device);
624 
625  return result;
626  }
627 
628 
629 
630  template <typename Number>
631  void
632  Vector<Number>::print(std::ostream & out,
633  const unsigned int precision,
634  const bool scientific,
635  const bool) const
636  {
637  AssertThrow(out.fail() == false, ExcIO());
638  std::ios::fmtflags old_flags = out.flags();
639  unsigned int old_precision = out.precision(precision);
640 
641  out.precision(precision);
642  if (scientific)
643  out.setf(std::ios::scientific, std::ios::floatfield);
644  else
645  out.setf(std::ios::fixed, std::ios::floatfield);
646 
647  out << "IndexSet: ";
648  complete_index_set(n_elements).print(out);
649  out << std::endl;
650 
651  // Copy the vector to the host
652  std::vector<Number> cpu_val(n_elements);
653  Utilities::CUDA::copy_to_host(val.get(), cpu_val);
654  for (unsigned int i = 0; i < n_elements; ++i)
655  out << cpu_val[i] << std::endl;
656  out << std::flush;
657 
658  AssertThrow(out.fail() == false, ExcIO());
659  // reset output format
660  out.flags(old_flags);
661  out.precision(old_precision);
662  }
663 
664 
665 
666  template <typename Number>
667  std::size_t
669  {
670  std::size_t memory = sizeof(*this);
671  memory += sizeof(Number) * static_cast<std::size_t>(n_elements);
672 
673  return memory;
674  }
675 
676 
677 
678  // Explicit Instanationation
679  template class Vector<float>;
680  template class Vector<double>;
681  } // namespace CUDAWrappers
682 } // namespace LinearAlgebra
683 
685 
686 #endif
void print(StreamType &out) const
Definition: index_set.h:1935
virtual value_type mean_value() const override
Definition: cuda_vector.cc:474
virtual void scale(const VectorSpaceVector< Number > &scaling_factors) override
Definition: cuda_vector.cc:417
typename VectorSpaceVector< Number >::value_type value_type
Definition: cuda_vector.h:58
std::unique_ptr< Number[], void(*)(Number *)> val
Definition: cuda_vector.h:339
virtual void add(const Number a) override
Definition: cuda_vector.cc:315
typename VectorSpaceVector< Number >::size_type size_type
Definition: cuda_vector.h:59
virtual Vector< Number > & operator+=(const VectorSpaceVector< Number > &V) override
Definition: cuda_vector.cc:222
virtual void sadd(const Number s, const Number a, const VectorSpaceVector< Number > &V) override
Definition: cuda_vector.cc:390
virtual Vector< Number > & operator-=(const VectorSpaceVector< Number > &V) override
Definition: cuda_vector.cc:247
virtual Vector< Number > & operator*=(const Number factor) override
Definition: cuda_vector.cc:191
virtual Number add_and_dot(const Number a, const VectorSpaceVector< Number > &V, const VectorSpaceVector< Number > &W) override
Definition: cuda_vector.cc:583
typename VectorSpaceVector< Number >::real_type real_type
Definition: cuda_vector.h:60
virtual void print(std::ostream &out, const unsigned int precision=2, const bool scientific=true, const bool across=true) const override
Definition: cuda_vector.cc:632
virtual real_type l2_norm() const override
Definition: cuda_vector.cc:535
virtual void import(const ReadWriteVector< Number > &V, VectorOperation::values operation, std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > communication_pattern={}) override
Definition: cuda_vector.cc:129
Vector & operator=(const Vector< Number > &v)
Definition: cuda_vector.cc:66
virtual void equ(const Number a, const VectorSpaceVector< Number > &V) override
Definition: cuda_vector.cc:440
virtual Number operator*(const VectorSpaceVector< Number > &V) const override
Definition: cuda_vector.cc:272
virtual real_type l1_norm() const override
Definition: cuda_vector.cc:505
virtual bool all_zero() const override
Definition: cuda_vector.cc:465
virtual real_type linfty_norm() const override
Definition: cuda_vector.cc:553
virtual Vector< Number > & operator/=(const Number factor) override
Definition: cuda_vector.cc:206
void reinit(const size_type n, const bool omit_zeroing_entries=false)
Definition: cuda_vector.cc:97
virtual std::size_t memory_consumption() const override
Definition: cuda_vector.cc:668
virtual size_type size() const override
Definition: cuda_vector.h:381
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:474
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:475
static ::ExceptionBase & ExcZero()
#define AssertCudaKernel()
Definition: exceptions.h:1967
#define Assert(cond, exc)
Definition: exceptions.h:1586
static ::ExceptionBase & ExcNotImplemented()
#define AssertIsFinite(number)
Definition: exceptions.h:1854
#define AssertCuda(error_code)
Definition: exceptions.h:1912
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1675
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1076
constexpr int chunk_size
Definition: cuda_size.h:35
constexpr int block_size
Definition: cuda_size.h:29
static const char V
std::enable_if_t< IsBlockVector< VectorType >::value, unsigned int > n_blocks(const VectorType &vector)
Definition: operators.h:50
Default CUDA
Definition: memory_space.h:53
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Number * allocate_device_data(const std::size_t size)
Definition: cuda.h:109
void delete_device_data(Number *device_ptr) noexcept
Definition: cuda.h:121
void copy_to_host(const ArrayView< const T, MemorySpace::CUDA > &in, ArrayView< T, MemorySpace::Host > &out)
Definition: cuda.h:132
void free(T *&pointer)
Definition: cuda.h:97
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:618
Number linfty_norm(const Tensor< 2, dim, Number > &t)
Definition: tensor.h:3057