42#ifndef IFPACK2_DETAILS_SCALEDDAMPEDRESIDUAL_DEF_HPP
43#define IFPACK2_DETAILS_SCALEDDAMPEDRESIDUAL_DEF_HPP
45#include "Tpetra_CrsMatrix.hpp"
46#include "Tpetra_MultiVector.hpp"
47#include "Tpetra_Operator.hpp"
48#include "Tpetra_Vector.hpp"
49#include "Tpetra_Export_decl.hpp"
50#include "Tpetra_Import_decl.hpp"
51#include "Kokkos_ArithTraits.hpp"
52#include "Teuchos_Assert.hpp"
54#include "KokkosSparse_spmv_impl.hpp"
64template<
class WVector,
72 static_assert (
static_cast<int> (WVector::Rank) == 1,
73 "WVector must be a rank 1 View.");
74 static_assert (
static_cast<int> (DVector::Rank) == 1,
75 "DVector must be a rank 1 View.");
76 static_assert (
static_cast<int> (BVector::Rank) == 1,
77 "BVector must be a rank 1 View.");
78 static_assert (
static_cast<int> (XVector::Rank) == 1,
79 "XVector must be a rank 1 View.");
81 using execution_space =
typename AMatrix::execution_space;
82 using LO =
typename AMatrix::non_const_ordinal_type;
83 using value_type =
typename AMatrix::non_const_value_type;
84 using team_policy =
typename Kokkos::TeamPolicy<execution_space>;
85 using team_member =
typename team_policy::member_type;
86 using ATV = Kokkos::ArithTraits<value_type>;
96 const LO rows_per_team;
105 const int rows_per_team_) :
113 rows_per_team (rows_per_team_)
115 const size_t numRows = m_A.numRows ();
116 const size_t numCols = m_A.numCols ();
118 TEUCHOS_ASSERT( m_w.extent (0) == m_d.extent (0) );
119 TEUCHOS_ASSERT( m_w.extent (0) == m_b.extent (0) );
120 TEUCHOS_ASSERT( numRows ==
size_t (m_w.extent (0)) );
121 TEUCHOS_ASSERT( numCols <=
size_t (m_x.extent (0)) );
124 KOKKOS_INLINE_FUNCTION
125 void operator() (
const team_member& dev)
const
127 using residual_value_type =
typename BVector::non_const_value_type;
128 using KAT = Kokkos::ArithTraits<residual_value_type>;
131 (Kokkos::TeamThreadRange (dev, 0, rows_per_team),
132 [&] (
const LO& loop) {
134 static_cast<LO
> (dev.league_rank ()) * rows_per_team + loop;
135 if (lclRow >= m_A.numRows ()) {
138 const auto A_row = m_A.rowConst(lclRow);
139 const LO row_length =
static_cast<LO
> (A_row.length);
140 residual_value_type A_x = KAT::zero ();
142 Kokkos::parallel_reduce
143 (Kokkos::ThreadVectorRange (dev, row_length),
144 [&] (
const LO iEntry, residual_value_type& lsum) {
145 const auto A_val = A_row.value(iEntry);
146 lsum += A_val * m_x(A_row.colidx(iEntry));
150 (Kokkos::PerThread(dev),
152 const auto alpha_D_res =
153 alpha * m_d(lclRow) * (m_b(lclRow) - A_x);
155 m_w(lclRow) = beta * m_w(lclRow) + alpha_D_res;
158 m_w(lclRow) = alpha_D_res;
168template<
class WVector,
175scaled_damped_residual_vector
184 using execution_space =
typename AMatrix::execution_space;
186 if (A.numRows () == 0) {
191 int vector_length = -1;
192 int64_t rows_per_thread = -1;
194 const int64_t rows_per_team = KokkosSparse::Impl::spmv_launch_parameters<execution_space>(A.numRows(), A.nnz(), rows_per_thread, team_size, vector_length);
195 int64_t worksets = (b.extent (0) + rows_per_team - 1) / rows_per_team;
197 using Kokkos::Dynamic;
198 using Kokkos::Static;
199 using Kokkos::Schedule;
200 using Kokkos::TeamPolicy;
201 using policy_type_dynamic = TeamPolicy<execution_space, Schedule<Dynamic> >;
202 using policy_type_static = TeamPolicy<execution_space, Schedule<Static> >;
203 const char kernel_label[] =
"scaled_damped_residual_vector";
204 policy_type_dynamic policyDynamic (1, 1);
205 policy_type_static policyStatic (1, 1);
207 policyDynamic = policy_type_dynamic (worksets, Kokkos::AUTO, vector_length);
208 policyStatic = policy_type_static (worksets, Kokkos::AUTO, vector_length);
211 policyDynamic = policy_type_dynamic (worksets, team_size, vector_length);
212 policyStatic = policy_type_static (worksets, team_size, vector_length);
216 using w_vec_type =
typename WVector::non_const_type;
217 using d_vec_type =
typename DVector::const_type;
218 using b_vec_type =
typename BVector::const_type;
219 using matrix_type = AMatrix;
220 using x_vec_type =
typename XVector::const_type;
221 using scalar_type =
typename Kokkos::ArithTraits<Scalar>::val_type;
223 if (beta == Kokkos::ArithTraits<Scalar>::zero ()) {
224 constexpr bool use_beta =
false;
226 ScaledDampedResidualVectorFunctor<w_vec_type, d_vec_type,
227 b_vec_type, matrix_type,
228 x_vec_type, scalar_type,
230 functor_type func (alpha, w, d, b, A, x, beta, rows_per_team);
232 Kokkos::parallel_for (kernel_label, policyDynamic, func);
234 Kokkos::parallel_for (kernel_label, policyStatic, func);
237 constexpr bool use_beta =
true;
239 ScaledDampedResidualVectorFunctor<w_vec_type, d_vec_type,
240 b_vec_type, matrix_type,
241 x_vec_type, scalar_type,
243 functor_type func (alpha, w, d, b, A, x, beta, rows_per_team);
245 Kokkos::parallel_for (kernel_label, policyDynamic, func);
247 Kokkos::parallel_for (kernel_label, policyStatic, func);
253template<
class TpetraOperatorType>
254ScaledDampedResidual<TpetraOperatorType>::
255ScaledDampedResidual (
const Teuchos::RCP<const operator_type>& A)
260template<
class TpetraOperatorType>
262ScaledDampedResidual<TpetraOperatorType>::
263setMatrix (
const Teuchos::RCP<const operator_type>& A)
265 if (A_op_.get () != A.get ()) {
269 X_colMap_ = std::unique_ptr<vector_type> (
nullptr);
270 V1_ = std::unique_ptr<multivector_type> (
nullptr);
272 using Teuchos::rcp_dynamic_cast;
273 Teuchos::RCP<const crs_matrix_type> A_crs =
274 rcp_dynamic_cast<const crs_matrix_type> (A);
275 if (A_crs.is_null ()) {
276 A_crs_ = Teuchos::null;
277 imp_ = Teuchos::null;
278 exp_ = Teuchos::null;
281 TEUCHOS_ASSERT( A_crs->isFillComplete () );
283 auto G = A_crs->getCrsGraph ();
284 imp_ = G->getImporter ();
285 exp_ = G->getExporter ();
290template<
class TpetraOperatorType>
292ScaledDampedResidual<TpetraOperatorType>::
293compute (multivector_type& W,
305 W_vec_ = W.getVectorNonConst (0);
306 B_vec_ = B.getVectorNonConst (0);
307 X_vec_ = X.getVectorNonConst (0);
308 TEUCHOS_ASSERT( ! A_crs_.is_null () );
309 fusedCase (*W_vec_, alpha, D_inv, *B_vec_, *A_crs_, *X_vec_, beta);
312 TEUCHOS_ASSERT( ! A_op_.is_null () );
313 unfusedCase (W, alpha, D_inv, B, *A_op_, X, beta);
317template<
class TpetraOperatorType>
318typename ScaledDampedResidual<TpetraOperatorType>::vector_type&
319ScaledDampedResidual<TpetraOperatorType>::
320importVector (vector_type& X_domMap)
322 if (imp_.is_null ()) {
326 if (X_colMap_.get () ==
nullptr) {
327 using V = vector_type;
328 X_colMap_ = std::unique_ptr<V> (
new V (imp_->getTargetMap ()));
330 X_colMap_->doImport (X_domMap, *imp_, Tpetra::REPLACE);
335template<
class TpetraOperatorType>
337ScaledDampedResidual<TpetraOperatorType>::
338canFuse (
const multivector_type& B)
const
340 return B.getNumVectors () == size_t (1) &&
341 ! A_crs_.is_null () &&
345template<
class TpetraOperatorType>
347ScaledDampedResidual<TpetraOperatorType>::
348unfusedCase (multivector_type& W,
352 const operator_type& A,
356 if (V1_.get () ==
nullptr) {
357 using MV = multivector_type;
358 const size_t numVecs = B.getNumVectors ();
359 V1_ = std::unique_ptr<MV> (
new MV (B.getMap (), numVecs));
361 const SC one = Teuchos::ScalarTraits<SC>::one ();
364 Tpetra::deep_copy (*V1_, B);
365 A.apply (X, *V1_, Teuchos::NO_TRANS, -one, one);
368 W.elementWiseMultiply (alpha, D_inv, *V1_, beta);
371template<
class TpetraOperatorType>
373ScaledDampedResidual<TpetraOperatorType>::
374fusedCase (vector_type& W,
378 const crs_matrix_type& A,
382 vector_type& X_colMap = importVector (X);
384 using Impl::scaled_damped_residual_vector;
385 using STS = Teuchos::ScalarTraits<SC>;
387 auto A_lcl = A.getLocalMatrixDevice ();
388 auto Dinv_lcl = Kokkos::subview(D_inv.getLocalViewDevice(Tpetra::Access::ReadOnly), Kokkos::ALL(), 0);
389 auto B_lcl = Kokkos::subview(B.getLocalViewDevice(Tpetra::Access::ReadOnly), Kokkos::ALL(), 0);
390 auto X_lcl = Kokkos::subview(X_colMap.getLocalViewDevice(Tpetra::Access::ReadOnly), Kokkos::ALL(), 0);
391 if (beta == STS::zero ()) {
392 auto W_lcl = Kokkos::subview(W.getLocalViewDevice(Tpetra::Access::OverwriteAll), Kokkos::ALL(), 0);
393 scaled_damped_residual_vector (alpha, W_lcl, Dinv_lcl,
394 B_lcl, A_lcl, X_lcl, beta);
397 auto W_lcl = Kokkos::subview(W.getLocalViewDevice(Tpetra::Access::ReadWrite), Kokkos::ALL(), 0);
398 scaled_damped_residual_vector (alpha, W_lcl, Dinv_lcl,
399 B_lcl, A_lcl, X_lcl, beta);
406#define IFPACK2_DETAILS_SCALEDDAMPEDRESIDUAL_INSTANT(SC,LO,GO,NT) \
407 template class Ifpack2::Details::ScaledDampedResidual<Tpetra::Operator<SC, LO, GO, NT> >;
Ifpack2 implementation details.
Preconditioners and smoothers for Tpetra sparse matrices.
Definition Ifpack2_AdditiveSchwarz_decl.hpp:74
Functor for computing W := alpha * D * (B - A*X) + beta * W.
Definition Ifpack2_Details_ScaledDampedResidual_def.hpp:71