Reference documentation for deal.II version GIT relicensing-249-g48dc7357c7 2024-03-29 12:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
cuda_vector.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2016 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/cuda.h>
18
23
24#include <cmath>
25
26#ifdef DEAL_II_WITH_CUDA
27
29
30namespace LinearAlgebra
31{
32 namespace CUDAWrappers
33 {
34 using ::CUDAWrappers::block_size;
35 using ::CUDAWrappers::chunk_size;
36
37
38
39 template <typename Number>
41 : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
42 , n_elements(0)
43 {}
44
45
46
47 template <typename Number>
49 : val(Utilities::CUDA::allocate_device_data<Number>(V.n_elements),
50 Utilities::CUDA::delete_device_data<Number>)
51 , n_elements(V.n_elements)
52 {
53 // Copy the values.
54 const cudaError_t error_code = cudaMemcpy(val.get(),
55 V.val.get(),
56 n_elements * sizeof(Number),
57 cudaMemcpyDeviceToDevice);
58 AssertCuda(error_code);
59 }
60
61
62
63 template <typename Number>
66 {
67 if (n_elements < V.n_elements)
68 reinit(V.n_elements, true);
69 else
70 n_elements = V.n_elements;
71
72 // Copy the values.
73 const cudaError_t error_code = cudaMemcpy(val.get(),
74 V.val.get(),
75 n_elements * sizeof(Number),
76 cudaMemcpyDeviceToDevice);
77 AssertCuda(error_code);
78
79 return *this;
80 }
81
82
83
84 template <typename Number>
86 : val(nullptr, Utilities::CUDA::delete_device_data<Number>)
87 , n_elements(0)
88 {
89 reinit(n, false);
90 }
91
92
93
94 template <typename Number>
95 void
96 Vector<Number>::reinit(const size_type n, const bool omit_zeroing_entries)
97 {
98 // Resize the underlying array if necessary
99 if (n == 0)
100 val.reset();
101 else if (n != n_elements)
102 val.reset(Utilities::CUDA::allocate_device_data<Number>(n));
103
104 // If necessary set the elements to zero
105 if (omit_zeroing_entries == false)
106 {
107 const cudaError_t error_code =
108 cudaMemset(val.get(), 0, n * sizeof(Number));
109 AssertCuda(error_code);
110 }
111 n_elements = n;
112 }
113
114
115
116 template <typename Number>
117 void
119 const bool omit_zeroing_entries)
120 {
121 reinit(V.size(), omit_zeroing_entries);
122 }
123
124
125
126 template <typename Number>
127 void
130 const VectorOperation::values operation,
131 const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase> &)
132 {
133 if (operation == VectorOperation::insert)
134 {
135 const cudaError_t error_code = cudaMemcpy(val.get(),
136 V.begin(),
137 n_elements * sizeof(Number),
138 cudaMemcpyHostToDevice);
139 AssertCuda(error_code);
140 }
141 else if (operation == VectorOperation::add)
142 {
143 // Create a temporary vector on the device
144 Number *tmp;
145 cudaError_t error_code =
146 cudaMalloc(&tmp, n_elements * sizeof(Number));
147 AssertCuda(error_code);
148
149 // Copy the vector from the host to the temporary vector on the device
150 error_code = cudaMemcpy(tmp,
151 V.begin(),
152 n_elements * sizeof(Number),
153 cudaMemcpyHostToDevice);
154 AssertCuda(error_code);
155
156 // Add the two vectors
157 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
158
159 kernel::vector_bin_op<Number, kernel::Binop_Addition>
160 <<<n_blocks, block_size>>>(val.get(), tmp, n_elements);
162
163 // Delete the temporary vector
165 }
166 else
168 }
169
170
171
172 template <typename Number>
175 {
176 Assert(s == Number(), ExcMessage("Only 0 can be assigned to a vector."));
177 (void)s;
178
179 const cudaError_t error_code =
180 cudaMemset(val.get(), 0, n_elements * sizeof(Number));
181 AssertCuda(error_code);
182
183 return *this;
184 }
185
186
187
188 template <typename Number>
190 Vector<Number>::operator*=(const Number factor)
191 {
192 AssertIsFinite(factor);
193 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
194 kernel::vec_scale<Number>
195 <<<n_blocks, block_size>>>(val.get(), factor, n_elements);
197
198 return *this;
199 }
200
201
202
203 template <typename Number>
205 Vector<Number>::operator/=(const Number factor)
206 {
207 AssertIsFinite(factor);
208 Assert(factor != Number(0.), ExcZero());
209 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
210 kernel::vec_scale<Number>
211 <<<n_blocks, block_size>>>(val.get(), 1. / factor, n_elements);
213
214 return *this;
215 }
216
217
218
219 template <typename Number>
222 {
223 Assert(V.size() == this->size(),
225 "Cannot add two vectors with different numbers of elements"));
226
227 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
228
229 kernel::vector_bin_op<Number, kernel::Binop_Addition>
230 <<<n_blocks, block_size>>>(val.get(), V.val.get(), n_elements);
232
233 return *this;
234 }
235
236
237
238 template <typename Number>
241 {
242 Assert(V.size() == this->size(),
244 "Cannot add two vectors with different numbers of elements."));
245
246 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
247
248 kernel::vector_bin_op<Number, kernel::Binop_Subtraction>
249 <<<n_blocks, block_size>>>(val.get(), V.val.get(), n_elements);
251
252 return *this;
253 }
254
255
256
257 template <typename Number>
258 Number
260 {
261 Assert(V.size() == this->size(),
263 "Cannot add two vectors with different numbers of elements"));
264
265 Number *result_device;
266 cudaError_t error_code =
267 cudaMalloc(&result_device, n_elements * sizeof(Number));
268 AssertCuda(error_code);
269 error_code = cudaMemset(result_device, 0, sizeof(Number));
270
271 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
272 kernel::double_vector_reduction<Number, kernel::DotProduct<Number>>
273 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
274 val.get(),
275 V.val.get(),
276 static_cast<unsigned int>(
277 n_elements));
278
279 // Copy the result back to the host
280 Number result;
281 error_code = cudaMemcpy(&result,
282 result_device,
283 sizeof(Number),
284 cudaMemcpyDeviceToHost);
285 AssertCuda(error_code);
286 // Free the memory on the device
287 Utilities::CUDA::free(result_device);
288
289 return result;
290 }
291
292
293
294 template <typename Number>
295 void
296 Vector<Number>::add(const Number a)
297 {
299 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
300 kernel::vec_add<Number>
301 <<<n_blocks, block_size>>>(val.get(), a, n_elements);
303 }
304
305
306
307 template <typename Number>
308 void
309 Vector<Number>::add(const Number a, const Vector<Number> &V)
310 {
312
313 Assert(V.size() == this->size(),
315 "Cannot add two vectors with different numbers of elements."));
316
317 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
318 kernel::add_aV<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
319 val.get(), a, V.val.get(), n_elements);
321 }
322
323
324
325 template <typename Number>
326 void
327 Vector<Number>::add(const Number a,
328 const Vector<Number> &V,
329 const Number b,
330 const Vector<Number> &W)
331 {
334
335 Assert(V.size() == this->size(),
337 "Cannot add two vectors with different numbers of elements."));
338
339 Assert(W.size() == this->size(),
341 "Cannot add two vectors with different numbers of elements."));
342
343 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
344 kernel::add_aVbW<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
345 val.get(), a, V.val.get(), b, W.val.get(), n_elements);
347 }
348
349
350
351 template <typename Number>
352 void
353 Vector<Number>::sadd(const Number s,
354 const Number a,
355 const Vector<Number> &V)
356 {
359
360 Assert(V.size() == this->size(),
362 "Cannot add two vectors with different numbers of elements."));
363
364 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
365 kernel::sadd<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
366 s, val.get(), a, V.val.get(), n_elements);
368 }
369
370
371
372 template <typename Number>
373 void
374 Vector<Number>::scale(const Vector<Number> &scaling_factors)
375 {
376 Assert(scaling_factors.size() == this->size(),
378 "Cannot scale two vectors with different numbers of elements."));
379
380 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
381 kernel::scale<Number>
382 <<<dim3(n_blocks, 1), dim3(block_size)>>>(val.get(),
383 scaling_factors.val.get(),
384 n_elements);
386 }
387
388
389
390 template <typename Number>
391 void
392 Vector<Number>::equ(const Number a, const Vector<Number> &V)
393 {
395
396 Assert(
397 V.size() == this->size(),
399 "Cannot assign two vectors with different numbers of elements."));
400
401 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
402 kernel::equ<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(val.get(),
403 a,
404 V.val.get(),
405 n_elements);
407 }
408
409
410
411 template <typename Number>
412 bool
414 {
415 return (linfty_norm() == 0) ? true : false;
416 }
417
418
419
420 template <typename Number>
423 {
424 Number *result_device;
425 cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
426 AssertCuda(error_code);
427 error_code = cudaMemset(result_device, 0, sizeof(Number));
428
429 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
430 kernel::reduction<Number, kernel::ElemSum<Number>>
431 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
432 val.get(),
433 n_elements);
434
435 // Copy the result back to the host
436 Number result;
437 error_code = cudaMemcpy(&result,
438 result_device,
439 sizeof(Number),
440 cudaMemcpyDeviceToHost);
441 AssertCuda(error_code);
442 // Free the memory on the device
443 Utilities::CUDA::free(result_device);
444
445 return result /
446 static_cast<typename Vector<Number>::value_type>(n_elements);
447 }
448
449
450
451 template <typename Number>
454 {
455 Number *result_device;
456 cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
457 AssertCuda(error_code);
458 error_code = cudaMemset(result_device, 0, sizeof(Number));
459
460 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
461 kernel::reduction<Number, kernel::L1Norm<Number>>
462 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
463 val.get(),
464 n_elements);
465
466 // Copy the result back to the host
467 Number result;
468 error_code = cudaMemcpy(&result,
469 result_device,
470 sizeof(Number),
471 cudaMemcpyDeviceToHost);
472 AssertCuda(error_code);
473 // Free the memory on the device
474 Utilities::CUDA::free(result_device);
475
476 return result;
477 }
478
479
480
481 template <typename Number>
484 {
485 return std::sqrt(norm_sqr());
486 }
487
488
489
490 template <typename Number>
493 {
494 return (*this) * (*this);
495 }
496
497
498
499 template <typename Number>
502 {
503 Number *result_device;
504 cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
505 AssertCuda(error_code);
506 error_code = cudaMemset(result_device, 0, sizeof(Number));
507
508 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
509 kernel::reduction<Number, kernel::LInfty<Number>>
510 <<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
511 val.get(),
512 n_elements);
513
514 // Copy the result back to the host
515 Number result;
516 error_code = cudaMemcpy(&result,
517 result_device,
518 sizeof(Number),
519 cudaMemcpyDeviceToHost);
520 AssertCuda(error_code);
521 // Free the memory on the device
522 Utilities::CUDA::free(result_device);
523
524 return result;
525 }
526
527
528
529 template <typename Number>
530 Number
532 const Vector<Number> &V,
533 const Vector<Number> &W)
534 {
536
537 Assert(V.size() == this->size(),
538 ExcMessage("Vector V has the wrong size."));
539 Assert(W.size() == this->size(),
540 ExcMessage("Vector W has the wrong size."));
541
542 Number *result_device;
543 cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
544 AssertCuda(error_code);
545 error_code = cudaMemset(result_device, 0, sizeof(Number));
546 AssertCuda(error_code);
547
548 const int n_blocks = 1 + (n_elements - 1) / (chunk_size * block_size);
549 kernel::add_and_dot<Number><<<dim3(n_blocks, 1), dim3(block_size)>>>(
550 result_device, val.get(), V.val.get(), W.val.get(), a, n_elements);
551
552 Number result;
553 error_code = cudaMemcpy(&result,
554 result_device,
555 sizeof(Number),
556 cudaMemcpyDeviceToHost);
557 Utilities::CUDA::free(result_device);
558
559 return result;
560 }
561
562
563
564 template <typename Number>
565 void
566 Vector<Number>::print(std::ostream &out,
567 const unsigned int precision,
568 const bool scientific,
569 const bool) const
570 {
571 AssertThrow(out.fail() == false, ExcIO());
572 std::ios::fmtflags old_flags = out.flags();
573 unsigned int old_precision = out.precision(precision);
574
575 out.precision(precision);
576 if (scientific)
577 out.setf(std::ios::scientific, std::ios::floatfield);
578 else
579 out.setf(std::ios::fixed, std::ios::floatfield);
580
581 out << "IndexSet: ";
582 complete_index_set(n_elements).print(out);
583 out << std::endl;
584
585 // Copy the vector to the host
586 std::vector<Number> cpu_val(n_elements);
587 Utilities::CUDA::copy_to_host(val.get(), cpu_val);
588 for (unsigned int i = 0; i < n_elements; ++i)
589 out << cpu_val[i] << std::endl;
590 out << std::flush;
591
592 AssertThrow(out.fail() == false, ExcIO());
593 // reset output format
594 out.flags(old_flags);
595 out.precision(old_precision);
596 }
597
598
599
600 template <typename Number>
601 std::size_t
603 {
604 std::size_t memory = sizeof(*this);
605 memory += sizeof(Number) * static_cast<std::size_t>(n_elements);
606
607 return memory;
608 }
609
610
611
612 // Explicit Instanationation
613 template class Vector<float>;
614 template class Vector<double>;
615 } // namespace CUDAWrappers
616} // namespace LinearAlgebra
617
619
620#endif
void print(StreamType &out) const
Definition index_set.h:2078
void equ(const Number a, const Vector< Number > &V)
void sadd(const Number s, const Number a, const Vector< Number > &V)
Number add_and_dot(const Number a, const Vector< Number > &V, const Vector< Number > &W)
std::unique_ptr< Number[], void(*)(Number *)> val
typename numbers::NumberTraits< Number >::real_type real_type
Definition cuda_vector.h:60
Vector< Number > & operator*=(const Number factor)
void import_elements(const ReadWriteVector< Number > &V, const VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
Vector & operator=(const Vector< Number > &v)
Vector< Number > & operator+=(const Vector< Number > &V)
void scale(const Vector< Number > &scaling_factors)
void print(std::ostream &out, const unsigned int precision=2, const bool scientific=true, const bool across=true) const
Number operator*(const Vector< Number > &V) const
void reinit(const size_type n, const bool omit_zeroing_entries=false)
Vector< Number > & operator-=(const Vector< Number > &V)
std::size_t memory_consumption() const
Vector< Number > & operator/=(const Number factor)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
static ::ExceptionBase & ExcIO()
#define AssertCudaKernel()
static ::ExceptionBase & ExcZero()
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertIsFinite(number)
#define AssertCuda(error_code)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1193
void copy_to_host(const ArrayView< const T, MemorySpace::CUDA > &in, ArrayView< T, MemorySpace::Host > &out)
Definition cuda.h:131
void free(T *&pointer)
Definition cuda.h:96
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Number linfty_norm(const Tensor< 2, dim, Number > &t)
Definition tensor.h:3099