27 #ifdef DEAL_II_WITH_CUDA
40 template <
typename Number>
48 template <
typename Number>
52 , n_elements(
V.n_elements)
55 const cudaError_t error_code = cudaMemcpy(
val.get(),
58 cudaMemcpyDeviceToDevice);
64 template <
typename Number>
68 if (n_elements <
V.n_elements)
71 n_elements =
V.n_elements;
74 const cudaError_t error_code = cudaMemcpy(val.get(),
76 n_elements *
sizeof(Number),
77 cudaMemcpyDeviceToDevice);
85 template <
typename Number>
95 template <
typename Number>
102 else if (n != n_elements)
103 val.reset(Utilities::CUDA::allocate_device_data<Number>(n));
106 if (omit_zeroing_entries ==
false)
108 const cudaError_t error_code =
109 cudaMemset(val.get(), 0, n *
sizeof(Number));
117 template <
typename Number>
120 const bool omit_zeroing_entries)
122 reinit(
V.size(), omit_zeroing_entries);
127 template <
typename Number>
132 const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase> &)
136 const cudaError_t error_code = cudaMemcpy(val.get(),
138 n_elements *
sizeof(Number),
139 cudaMemcpyHostToDevice);
146 cudaError_t error_code =
147 cudaMalloc(&tmp, n_elements *
sizeof(Number));
151 error_code = cudaMemcpy(tmp,
153 n_elements *
sizeof(Number),
154 cudaMemcpyHostToDevice);
160 kernel::vector_bin_op<Number, kernel::Binop_Addition>
173 template <
typename Number>
180 const cudaError_t error_code =
181 cudaMemset(val.get(), 0, n_elements *
sizeof(Number));
189 template <
typename Number>
195 kernel::vec_scale<Number>
204 template <
typename Number>
211 kernel::vec_scale<Number>
220 template <
typename Number>
224 Assert(
V.size() == this->size(),
226 "Cannot add two vectors with different numbers of elements"));
230 kernel::vector_bin_op<Number, kernel::Binop_Addition>
239 template <
typename Number>
243 Assert(
V.size() == this->size(),
245 "Cannot add two vectors with different numbers of elements."));
249 kernel::vector_bin_op<Number, kernel::Binop_Subtraction>
258 template <
typename Number>
262 Assert(
V.size() == this->size(),
264 "Cannot add two vectors with different numbers of elements"));
266 Number *result_device;
267 cudaError_t error_code =
268 cudaMalloc(&result_device, n_elements *
sizeof(Number));
270 error_code = cudaMemset(result_device, 0,
sizeof(Number));
273 kernel::double_vector_reduction<Number, kernel::DotProduct<Number>>
277 static_cast<unsigned int>(
282 error_code = cudaMemcpy(&result,
285 cudaMemcpyDeviceToHost);
295 template <
typename Number>
301 kernel::vec_add<Number>
308 template <
typename Number>
314 Assert(
V.size() == this->size(),
316 "Cannot add two vectors with different numbers of elements."));
320 val.get(), a,
V.val.get(), n_elements);
326 template <
typename Number>
336 Assert(
V.size() == this->size(),
338 "Cannot add two vectors with different numbers of elements."));
342 "Cannot add two vectors with different numbers of elements."));
346 val.get(), a,
V.val.get(),
b, W.
val.get(), n_elements);
352 template <
typename Number>
361 Assert(
V.size() == this->size(),
363 "Cannot add two vectors with different numbers of elements."));
367 s, val.get(), a,
V.val.get(), n_elements);
373 template <
typename Number>
377 Assert(scaling_factors.
size() == this->size(),
379 "Cannot scale two vectors with different numbers of elements."));
382 kernel::scale<Number>
384 scaling_factors.
val.get(),
391 template <
typename Number>
398 V.size() == this->size(),
400 "Cannot assign two vectors with different numbers of elements."));
412 template <
typename Number>
421 template <
typename Number>
425 Number *result_device;
426 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
428 error_code = cudaMemset(result_device, 0,
sizeof(Number));
431 kernel::reduction<Number, kernel::ElemSum<Number>>
438 error_code = cudaMemcpy(&result,
441 cudaMemcpyDeviceToHost);
452 template <
typename Number>
456 Number *result_device;
457 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
459 error_code = cudaMemset(result_device, 0,
sizeof(Number));
462 kernel::reduction<Number, kernel::L1Norm<Number>>
469 error_code = cudaMemcpy(&result,
472 cudaMemcpyDeviceToHost);
482 template <
typename Number>
486 return std::sqrt(norm_sqr());
491 template <
typename Number>
495 return (*
this) * (*this);
500 template <
typename Number>
504 Number *result_device;
505 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
507 error_code = cudaMemset(result_device, 0,
sizeof(Number));
510 kernel::reduction<Number, kernel::LInfty<Number>>
517 error_code = cudaMemcpy(&result,
520 cudaMemcpyDeviceToHost);
530 template <
typename Number>
538 Assert(
V.size() == this->size(),
543 Number *result_device;
544 cudaError_t error_code = cudaMalloc(&result_device,
sizeof(Number));
546 error_code = cudaMemset(result_device, 0,
sizeof(Number));
551 result_device, val.get(),
V.val.get(), W.
val.get(), a, n_elements);
554 error_code = cudaMemcpy(&result,
557 cudaMemcpyDeviceToHost);
565 template <
typename Number>
568 const unsigned int precision,
569 const bool scientific,
573 std::ios::fmtflags old_flags = out.flags();
574 unsigned int old_precision = out.precision(precision);
576 out.precision(precision);
578 out.setf(std::ios::scientific, std::ios::floatfield);
580 out.setf(std::ios::fixed, std::ios::floatfield);
587 std::vector<Number> cpu_val(n_elements);
589 for (
unsigned int i = 0; i < n_elements; ++i)
590 out << cpu_val[i] << std::endl;
595 out.flags(old_flags);
596 out.precision(old_precision);
601 template <
typename Number>
605 std::size_t memory =
sizeof(*this);
606 memory +=
sizeof(Number) *
static_cast<std::size_t
>(n_elements);
void print(StreamType &out) const
real_type linfty_norm() const
void equ(const Number a, const Vector< Number > &V)
types::global_dof_index size_type
void sadd(const Number s, const Number a, const Vector< Number > &V)
Number add_and_dot(const Number a, const Vector< Number > &V, const Vector< Number > &W)
real_type l2_norm() const
std::unique_ptr< Number[], void(*)(Number *)> val
typename numbers::NumberTraits< Number >::real_type real_type
Vector< Number > & operator*=(const Number factor)
real_type l1_norm() const
value_type mean_value() const
real_type norm_sqr() const
void import_elements(const ReadWriteVector< Number > &V, const VectorOperation::values operation, const std::shared_ptr< const Utilities::MPI::CommunicationPatternBase > &communication_pattern={})
Vector & operator=(const Vector< Number > &v)
Vector< Number > & operator+=(const Vector< Number > &V)
void scale(const Vector< Number > &scaling_factors)
void print(std::ostream &out, const unsigned int precision=2, const bool scientific=true, const bool across=true) const
Number operator*(const Vector< Number > &V) const
void reinit(const size_type n, const bool omit_zeroing_entries=false)
Vector< Number > & operator-=(const Vector< Number > &V)
std::size_t memory_consumption() const
Vector< Number > & operator/=(const Number factor)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcZero()
#define AssertCudaKernel()
#define Assert(cond, exc)
static ::ExceptionBase & ExcNotImplemented()
#define AssertIsFinite(number)
#define AssertCuda(error_code)
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
std::enable_if_t< IsBlockVector< VectorType >::value, unsigned int > n_blocks(const VectorType &vector)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
Number * allocate_device_data(const std::size_t size)
void delete_device_data(Number *device_ptr) noexcept
void copy_to_host(const ArrayView< const T, MemorySpace::CUDA > &in, ArrayView< T, MemorySpace::Host > &out)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Number linfty_norm(const Tensor< 2, dim, Number > &t)