void LocalSparseTriangularSolver<MatrixType>:: localApply (const MV& X, MV& Y, const Teuchos::ETransp mode, const scalar_type& alpha, const scalar_type& beta) const { using Teuchos::RCP; typedef scalar_type ST; typedef Teuchos::ScalarTraits<ST> STS; if (beta == STS::zero ()) { if (alpha == STS::zero ()) { Y.putScalar (STS::zero ()); // Y := 0 * Y (ignore contents of Y) } else { // alpha != 0 A_crs_->template localSolve<ST, ST> (X, Y, mode); if (alpha != STS::one ()) { Y.scale (alpha); } } } else { // beta != 0 if (alpha == STS::zero ()) { Y.scale (beta); // Y := beta * Y } else { // alpha != 0 MV Y_tmp (Y, Teuchos::Copy); A_crs_->template localSolve<ST, ST> (X, Y_tmp, mode); // Y_tmp := M * X Y.update (alpha, Y_tmp, beta); // Y := beta * Y + alpha * Y_tmp } } }
void Chebyshev<MatrixType>:: applyImpl (const MV& X, MV& Y, Teuchos::ETransp mode, scalar_type alpha, scalar_type beta) const { using Teuchos::ArrayRCP; using Teuchos::as; using Teuchos::RCP; using Teuchos::rcp; using Teuchos::rcp_const_cast; using Teuchos::rcpFromRef; const scalar_type zero = STS::zero(); const scalar_type one = STS::one(); // Y = beta*Y + alpha*M*X. // If alpha == 0, then we don't need to do Chebyshev at all. if (alpha == zero) { if (beta == zero) { // Obey Sparse BLAS rules; avoid 0*NaN. Y.putScalar (zero); } else { Y.scale (beta); } return; } // If beta != 0, then we need to keep a copy of the initial value of // Y, so that we can add beta*it to the Chebyshev result at the end. // Usually this method is called with beta == 0, so we don't have to // worry about caching Y_org. RCP<MV> Y_orig; if (beta != zero) { Y_orig = rcp (new MV (Y)); } // If X and Y point to the same memory location, we need to use a // copy of X (X_copy) as the input MV. Otherwise, just let X_copy // point to X. // // This is hopefully an uncommon use case, so we don't bother to // optimize for it by caching X_copy. RCP<const MV> X_copy; bool copiedInput = false; if (X.getLocalMV().getValues() == Y.getLocalMV().getValues()) { X_copy = rcp (new MV (X)); copiedInput = true; } else { X_copy = rcpFromRef (X); } // If alpha != 1, fold alpha into (a copy of) X. // // This is an uncommon use case, so we don't bother to optimize for // it by caching X_copy. However, we do check whether we've already // copied X above, to avoid a second copy. if (alpha != one) { RCP<MV> X_copy_nonConst = rcp_const_cast<MV> (X_copy); if (! copiedInput) { X_copy_nonConst = rcp (new MV (X)); copiedInput = true; } X_copy_nonConst->scale (alpha); X_copy = rcp_const_cast<const MV> (X_copy_nonConst); } impl_.apply (*X_copy, Y); if (beta != zero) { Y.update (beta, *Y_orig, one); // Y = beta * Y_orig + 1 * Y } }