Ejemplo n.º 1
0
// ============================================================================
void
BorderingHelpers::
dissect(const Tpetra::MultiVector<double,int,int> & x,
        Tpetra::MultiVector<double,int,int> & xSmall,
        double * lambda
       )
{
#ifndef NDEBUG
  TEUCHOS_ASSERT_EQUALITY(x.NumVectors(), xSmall.NumVectors());
  // Make sure the maps are matching.
  std::shared_ptr<const Tpetra::Map<int,int>> extendedMap =
    nosh::BorderingHelpers::extendMapBy1(xSmall.getMap());
  TEUCHOS_ASSERT(x.getMap().SameAs(*extendedMap));
#endif

  Epetra_Import importer(xSmall.getMap(), x.getMap());

  // Strip off the phase constraint variable.
  xSmall.Import(x, importer, Insert);

  // TODO Check if we need lambda on all procs.
  if (x.getMap().Comm().MyPID() == 0) {
    const int n = x.MyLength();
    for (int k = 0; k < x.NumVectors(); k++)
      lambda[k] = (*(x(k)))[n - 1];
  }

  return;
}
void SingletonFilter<MatrixType>::CreateReducedRHSTempl(const Tpetra::MultiVector<DomainScalar,LocalOrdinal,GlobalOrdinal,Node>& LHS,
                                                        const Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node>& RHS,
                                                        Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node>& ReducedRHS)
{
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const RangeScalar > > RHS_ptr = RHS.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> > LHS_ptr = LHS.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        ReducedRHS_ptr = ReducedRHS.get2dViewNonConst();

  size_t NumVectors = LHS.getNumVectors();

  for (size_t i = 0 ; i < NumRows_ ; ++i)
    for (size_t k = 0 ; k < NumVectors ; ++k)
      ReducedRHS_ptr[k][i] = RHS_ptr[k][InvReorder_[i]];

  for (size_t i = 0 ; i < NumRows_ ; ++i) {
    LocalOrdinal ii = InvReorder_[i];
    size_t Nnz;
    A_->getLocalRowCopy(ii,Indices_(),Values_(),Nnz);

    for (size_t j = 0 ; j < Nnz ; ++j) {
      if (Reorder_[Indices_[j]] == -1) {
        for (size_t k = 0 ; k < NumVectors ; ++k)
          ReducedRHS_ptr[k][i] -= (RangeScalar)Values_[j] * (RangeScalar)LHS_ptr[k][Indices_[j]];
      }
    }
  }
}
void TpetraOperatorWrapper::apply(const Tpetra::MultiVector<ST,LO,GO,NT>& X, Tpetra::MultiVector<ST,LO,GO,NT>& Y,Teuchos::ETransp mode,ST alpha, ST beta) const
{
   if (!useTranspose_)
   {
       // allocate space for each vector
       RCP<Thyra::MultiVectorBase<ST> > tX;
       RCP<Thyra::MultiVectorBase<ST> > tY; 

       tX = Thyra::createMembers(thyraOp_->domain(),X.getNumVectors()); 
       tY = Thyra::createMembers(thyraOp_->range(),X.getNumVectors());

       Thyra::assign(tX.ptr(),0.0);
       Thyra::assign(tY.ptr(),0.0);

       // copy epetra X into thyra X
       mapStrategy_->copyTpetraIntoThyra(X, tX.ptr());
       mapStrategy_->copyTpetraIntoThyra(Y, tY.ptr()); // if this matrix isn't block square, this probably won't work!

       // perform matrix vector multiplication
       thyraOp_->apply(Thyra::NOTRANS,*tX,tY.ptr(),alpha,beta);

       // copy thyra Y into epetra Y
       mapStrategy_->copyThyraIntoTpetra(tY, Y);
   }
   else
   {
       TEUCHOS_ASSERT(false);
   }
}
Ejemplo n.º 4
0
void 
Chebyshev<MatrixType>::
applyMat (const Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& X,
	  Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& Y,
	  Teuchos::ETransp mode) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
   "Ifpack2::Chebyshev::applyMat(): X.getNumVectors() != Y.getNumVectors().");
  impl_.getMatrix ()->apply (X, Y, mode);
}
Ejemplo n.º 5
0
void TomBlockRelaxation<MatrixType,ContainerType>::apply(
          const Tpetra::MultiVector<typename MatrixType::scalar_type,
                                    typename MatrixType::local_ordinal_type,
                                    typename MatrixType::global_ordinal_type,
                                    typename MatrixType::node_type>& X,
                Tpetra::MultiVector<typename MatrixType::scalar_type,
                                    typename MatrixType::local_ordinal_type,
                                    typename MatrixType::global_ordinal_type,
                                    typename MatrixType::node_type>& Y,
                Teuchos::ETransp mode,
                 Scalar alpha,
                 Scalar beta) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(isComputed() == false, std::runtime_error,
     "Ifpack2::TomBlockRelaxation::apply ERROR: isComputed() must be true prior to calling apply.");

  TEUCHOS_TEST_FOR_EXCEPTION(X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
     "Ifpack2::TomBlockRelaxation::apply ERROR: X.getNumVectors() != Y.getNumVectors().");

  TEUCHOS_TEST_FOR_EXCEPTION(mode != Teuchos::NO_TRANS, std::runtime_error,
			     "Ifpack2::TomBlockRelaxation::apply ERORR: transpose modes not supported.");

  Time_->start(true);

  // If X and Y are pointing to the same memory location,
  // we need to create an auxiliary vector, Xcopy
  Teuchos::RCP< const Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> > Xcopy;
  if (X.getLocalMV().getValues() == Y.getLocalMV().getValues())
    Xcopy = Teuchos::rcp( new Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>(X) );
  else
    Xcopy = Teuchos::rcp( &X, false );

  if (ZeroStartingSolution_)
    Y.putScalar(0.0);

  // Flops are updated in each of the following.
  switch (PrecType_) {
  case Ifpack2::JACOBI:
    ApplyInverseJacobi(*Xcopy,Y);
    break;
  case Ifpack2::GS:
    ApplyInverseGS(*Xcopy,Y);
    break;
  case Ifpack2::SGS:
    ApplyInverseSGS(*Xcopy,Y);
    break;
  default:
    throw std::runtime_error("Ifpack2::TomBlockRelaxation::apply internal logic error.");
  }

  ++NumApply_;
  Time_->stop();
  ApplyTime_ += Time_->totalElapsedTime();
}
void SingletonFilter<MatrixType>::UpdateLHSTempl(const Tpetra::MultiVector<DomainScalar,LocalOrdinal,GlobalOrdinal,Node>& ReducedLHS,
                                                 Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node>& LHS)
{

  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        LHS_ptr = LHS.get2dViewNonConst();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> >  ReducedLHS_ptr = ReducedLHS.get2dView();

  for (size_t i = 0 ; i < NumRows_ ; ++i)
    for (size_t k = 0 ; k < LHS.getNumVectors() ; ++k)
      LHS_ptr[k][InvReorder_[i]] = (RangeScalar)ReducedLHS_ptr[k][i];
}
void
Redistributor<Node>::redistribute_reverse(const  ::Tpetra::MultiVector<double,int,int,Node> & input_vector,  ::Tpetra::MultiVector<double,int,int,Node> & output_vector)
{
  if (!created_importer_) {
    create_importer(input_vector.Map());
  }

  // Export using the importer
  output_vector.Export(input_vector, *importer_, ::Tpetra::INSERT);

}
Ejemplo n.º 8
0
void RILUK<MatrixType>::apply(
       const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X,
             Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y,
             Teuchos::ETransp mode, scalar_type alpha, scalar_type beta) const
{
  typedef Teuchos::ScalarTraits<scalar_type> STS;

  TEUCHOS_TEST_FOR_EXCEPTION(!isComputed(), std::runtime_error,
    "Ifpack2::RILUK::apply() ERROR, compute() hasn't been called yet.");

  TEUCHOS_TEST_FOR_EXCEPTION(
    alpha != STS::one (), 
    std::logic_error,
    "Ifpack2::RILUK::apply() does not currently allow alpha != 1.");
  TEUCHOS_TEST_FOR_EXCEPTION(
    beta != STS::zero (), 
    std::logic_error,
    "Ifpack2::RILUK::apply() does not currently allow zero != 0.");

//
// This function finds Y such that
// LDU Y = X, or
// U(trans) D L(trans) Y = X
// for multiple RHS
//

  // First generate X and Y as needed for this function
  Teuchos::RCP<const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> > X1;
  Teuchos::RCP<Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> > Y1;
  generateXY(mode, X, Y, X1, Y1);

  scalar_type one = Teuchos::ScalarTraits<scalar_type>::one();
  scalar_type zero = Teuchos::ScalarTraits<scalar_type>::zero();

  if (mode == Teuchos::NO_TRANS) {

    L_->localSolve(*X1, *Y1,mode);
    Y1->elementWiseMultiply(one, *D_, *Y1, zero); // y = D*y (D_ has inverse of diagonal)
    U_->localSolve(*Y1, *Y1,mode); // Solve Uy = y
    if (isOverlapped_) {
      // Export computed Y values if needed
      Y.doExport(*Y1,*L_->getGraph()->getExporter(), OverlapMode_);
    }
  }
  else {
    U_->localSolve(*X1, *Y1,mode); // Solve Uy = y
    Y1->elementWiseMultiply(one, *D_, *Y1, zero); // y = D*y (D_ has inverse of diagonal)
    L_->localSolve(*Y1, *Y1,mode);
    if (isOverlapped_) {Y.doExport(*Y1,*U_->getGraph()->getImporter(), OverlapMode_);} // Export computed Y values if needed
  }

  ++numApply_;
}
void
Redistributor<Node>::redistribute(const  ::Tpetra::MultiVector<double,int,int,Node> & inputVector,  ::Tpetra::MultiVector<double,int,int,Node>  * &outputVector)
{
  if (!created_importer_) {
    create_importer(inputVector.Map());
  }

  outputVector = new  ::Tpetra::MultiVector<double,int,int,Node> (*target_map_, inputVector.NumVectors());

  outputVector->Import(inputVector, *importer_, ::Tpetra::INSERT);

  return;
}
Ejemplo n.º 10
0
// ============================================================================
void
BorderingHelpers::
merge(const Tpetra::MultiVector<double,int,int> & x,
      const double * lambda,
      Tpetra::MultiVector<double,int,int> & out
    )
{
#ifndef NDEBUG
  // Check if the maps are matching.
  std::shared_ptr<const Tpetra::Map<int,int>> extendedMap =
    nosh::BorderingHelpers::extendMapBy1(x.getMap());
  TEUCHOS_ASSERT(out.getMap().SameAs(*extendedMap));
#endif

  Epetra_Import importer(out.getMap(), x.getMap());

  TEUCHOS_ASSERT_EQUALITY(0, out.Import(x, importer, Insert));

  // Set last entry on proc 0.
  if (x.getMap().Comm().MyPID() == 0) {
    const int numMyElems = x.getMap().NumMyElements();
    for (int k = 0; k < x.NumVectors(); k++)
      (*out(k))[numMyElems] = lambda[k];
  }

  return;
}
void BorderedOperator<Scalar, LocalOrdinal, GlobalOrdinal, Node >::apply(
     const Tpetra::MultiVector<Scalar, LocalOrdinal, GlobalOrdinal, Node >& X,
           Tpetra::MultiVector<Scalar, LocalOrdinal, GlobalOrdinal, Node >& Y,
     Teuchos::ETransp mode, 
     Scalar coefAx, 
     Scalar coefY ) const 
{
  //bool opHasTrans = A_->hasTransposeApply();
  //TEUCHOS_TEST_FOR_EXCEPTION( mode  &&  !opHasTrans, std::runtime_error,
  //"Ifpack2::BorderedOperator::apply() ERROR: The operator does not implement transpose.");
  TEUCHOS_TEST_FOR_EXCEPTION(X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
     "Ifpack2::BorderedOperator::apply() ERROR: X.getNumVectors() != Y.getNumVectors().");
  A_->apply(X, Y, mode, coefAx, coefY );
}
Ejemplo n.º 12
0
void Hiptmair<MatrixType>::
applyHiptmairSmoother(const Tpetra::MultiVector<typename MatrixType::scalar_type,
                      typename MatrixType::local_ordinal_type,
                      typename MatrixType::global_ordinal_type,
                      typename MatrixType::node_type>& X,
                      Tpetra::MultiVector<typename MatrixType::scalar_type,
                      typename MatrixType::local_ordinal_type,
                      typename MatrixType::global_ordinal_type,
                      typename MatrixType::node_type>& Y) const
{
  using Teuchos::RCP;
  using Teuchos::rcp;
  using Teuchos::rcpFromRef;
  typedef Tpetra::MultiVector<scalar_type, local_ordinal_type,
    global_ordinal_type, node_type> MV;
  const scalar_type ZERO = STS::zero ();
  const scalar_type ONE = STS::one ();

  RCP<MV> res1 = rcp (new MV (A_->getRowMap (), X.getNumVectors ()));
  RCP<MV> vec1 = rcp (new MV (A_->getRowMap (), X.getNumVectors ()));
  RCP<MV> res2 = rcp (new MV (PtAP_->getRowMap (), X.getNumVectors ()));
  RCP<MV> vec2 = rcp (new MV (PtAP_->getRowMap (), X.getNumVectors ()));

  if (preOrPost_ == "pre" || preOrPost_ == "both") {
    // apply initial relaxation to primary space
    A_->apply (Y, *res1);
    res1->update (ONE, X, -ONE);
    vec1->putScalar (ZERO);
    ifpack2_prec1_->apply (*res1, *vec1);
    Y.update (ONE, *vec1, ONE);
  }

  // project to auxiliary space and smooth
  A_->apply (Y, *res1);
  res1->update (ONE, X, -ONE);
  P_->apply (*res1, *res2, Teuchos::TRANS);
  vec2->putScalar (ZERO);
  ifpack2_prec2_->apply (*res2, *vec2);
  P_->apply (*vec2, *vec1, Teuchos::NO_TRANS);
  Y.update (ONE,*vec1,ONE);

  if (preOrPost_ == "post" || preOrPost_ == "both") {
    // smooth again on primary space
    A_->apply (Y, *res1);
    res1->update (ONE, X, -ONE);
    vec1->putScalar (ZERO);
    ifpack2_prec1_->apply (*res1, *vec1);
    Y.update (ONE, *vec1, ONE);
  }
}
Ejemplo n.º 13
0
void TomBlockRelaxation<MatrixType,ContainerType>::DoJacobi(const Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& X, Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& Y) const
{
  size_t NumVectors = X.getNumVectors();
  Scalar one=Teuchos::ScalarTraits<Scalar>::one();
  // Note: Flop counts copied naively from Ifpack.

  if (OverlapLevel_ == 0) {
    // Non-overlapping Jacobi
    for (LocalOrdinal i = 0 ; i < NumLocalBlocks_ ; i++) {     
      // may happen that a partition is empty
      if (Containers_[i]->getNumRows() == 0) continue;
      Containers_[i]->apply(X,Y,Teuchos::NO_TRANS,DampingFactor_,one);     
      ApplyFlops_ += NumVectors * 2 * NumGlobalRows_;
    }
  }
  else {
    // Overlapping Jacobi
    for (LocalOrdinal i = 0 ; i < NumLocalBlocks_ ; i++) {
      // may happen that a partition is empty
      if (Containers_[i]->getNumRows() == 0) continue;
      Containers_[i]->weightedApply(X,Y,*W_,Teuchos::NO_TRANS,DampingFactor_,one);
      // NOTE: do not count (for simplicity) the flops due to overlapping rows
      ApplyFlops_ += NumVectors * 4 * NumGlobalRows_;
    }
  }
}
void
OverlappingRowMatrix<MatrixType>::
exportMultiVector (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> &OvX,
                   Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> &X,
                   Tpetra::CombineMode CM)
{
  X.doExport (OvX, *Importer_, CM);
}
Ejemplo n.º 15
0
void TomBlockRelaxation<MatrixType,ContainerType>::applyMat(
          const Tpetra::MultiVector<typename MatrixType::scalar_type,
                                    typename MatrixType::local_ordinal_type,
                                    typename MatrixType::global_ordinal_type,
                                    typename MatrixType::node_type>& X,
                Tpetra::MultiVector<typename MatrixType::scalar_type,
                                    typename MatrixType::local_ordinal_type,
                                    typename MatrixType::global_ordinal_type,
                                    typename MatrixType::node_type>& Y,
             Teuchos::ETransp mode) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(isComputed() == false, std::runtime_error,
     "Ifpack2::TomBlockRelaxation::applyMat() ERROR: isComputed() must be true prior to calling applyMat().");
  TEUCHOS_TEST_FOR_EXCEPTION(X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
     "Ifpack2::TomBlockRelaxation::applyMat() ERROR: X.getNumVectors() != Y.getNumVectors().");
  A_->apply(X, Y, mode);
}
void
Chebyshev<MatrixType>::
applyMat (const Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& X,
          Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& Y,
          Teuchos::ETransp mode) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(
    X.getNumVectors () != Y.getNumVectors (), std::invalid_argument,
    "Ifpack2::Chebyshev::applyMat: X.getNumVectors() != Y.getNumVectors().");

  Teuchos::RCP<const row_matrix_type> A = impl_.getMatrix ();
  TEUCHOS_TEST_FOR_EXCEPTION(
    A.is_null (), std::runtime_error, "Ifpack2::Chebyshev::applyMat: The input "
    "matrix A is null.  Please call setMatrix() with a nonnull input matrix "
    "before calling this method.");

  A->apply (X, Y, mode);
}
Ejemplo n.º 17
0
 void
 apply(
     const Tpetra::MultiVector<double,int,int> & X,
     Tpetra::MultiVector<double,int,int> & Y,
     Teuchos::ETransp mode = Teuchos::NO_TRANS,
     double alpha = Teuchos::ScalarTraits<double>::one(),
     double beta = Teuchos::ScalarTraits<double>::zero()
     ) const
 {
   for (size_t k = 0; k < Y.getNumVectors(); k++) {
     const auto x_data = X.getData(k);
     const auto x0_data = x0_.getData();
     auto y_data = Y.getDataNonConst(k);
     for (size_t i = 0; i < y_data.size(); i++) {
       y_data[i] = 2 * x0_data[i] * x_data[i];
     }
   }
   return;
 }
void SingletonFilter<MatrixType>::SolveSingletonsTempl(const Tpetra::MultiVector<DomainScalar,LocalOrdinal,GlobalOrdinal,Node>& RHS,
                                                       Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node>& LHS)
{
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> > RHS_ptr = RHS.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        LHS_ptr = LHS.get2dViewNonConst();

  for (size_t i = 0 ; i < NumSingletons_ ; ++i) {
    LocalOrdinal ii = SingletonIndex_[i];
    // get the diagonal value for the singleton
    size_t Nnz;
    A_->getLocalRowCopy(ii,Indices_(),Values_(),Nnz);
    for (size_t j = 0 ; j < Nnz ; ++j) {
      if (Indices_[j] == ii) {
        for (size_t k = 0 ; k < LHS.getNumVectors() ; ++k)
          LHS_ptr[k][ii] = (RangeScalar)RHS_ptr[k][ii] / (RangeScalar)Values_[j];
      }
    }
  }
}
void DiagonalFilter<MatrixType>::apply(const Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> &X, 
				       Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> &Y, 
				       Teuchos::ETransp mode, 
				       Scalar alpha,
				       Scalar beta) const
{
  Scalar one = Teuchos::ScalarTraits<Scalar>::one();
  A_->apply(X,Y,mode,alpha,beta);
  Y.elementWiseMultiply(one,*val_,X,one);
}
Ejemplo n.º 20
0
int RILUK<MatrixType>::Multiply(const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X,
                              Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y,
            Teuchos::ETransp mode) const {
//
// This function finds X such that LDU Y = X or U(trans) D L(trans) Y = X for multiple RHS
//

  // First generate X and Y as needed for this function
  Teuchos::RCP<const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> > X1;
  Teuchos::RCP<Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> > Y1;
  generateXY(mode, X, Y, X1, Y1);

//  Epetra_Flops * counter = this->GetFlopCounter();
//  if (counter!=0) {
//    L_->SetFlopCounter(*counter);
//    Y1->SetFlopCounter(*counter);
//    U_->SetFlopCounter(*counter);
//  }

  if (!mode == Teuchos::NO_TRANS) {
    U_->apply(*X1, *Y1,mode); //
    Y1->update(1.0, *X1, 1.0); // Y1 = Y1 + X1 (account for implicit unit diagonal)
    Y1->elementWiseMultiply(1.0, *D_, *Y1, 0.0); // y = D*y (D_ has inverse of diagonal)
    Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> Y1temp(*Y1); // Need a temp copy of Y1
    L_->apply(Y1temp, *Y1,mode);
    Y1->update(1.0, Y1temp, 1.0); // (account for implicit unit diagonal)
    if (isOverlapped_) {Y.doExport(*Y1,*L_->getGraph()->getExporter(), OverlapMode_);} // Export computed Y values if needed
  }
  else {

    L_->apply(*X1, *Y1,mode);
    Y1->update(1.0, *X1, 1.0); // Y1 = Y1 + X1 (account for implicit unit diagonal)
    Y1->elementWiseMultiply(1, *D_, *Y1, 0); // y = D*y (D_ has inverse of diagonal)
    Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> Y1temp(*Y1); // Need a temp copy of Y1
    U_->apply(Y1temp, *Y1,mode);
    Y1->update(1.0, Y1temp, 1.0); // (account for implicit unit diagonal)
    if (isOverlapped_) {Y.doExport(*Y1,*L_->getGraph()->getExporter(), OverlapMode_);}
  }
  return(0);
}
Ejemplo n.º 21
0
void RILUK<MatrixType>::generateXY(Teuchos::ETransp mode,
    const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Xin,
    const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Yin,
    Teuchos::RCP<const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> >& Xout,
    Teuchos::RCP<Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> >& Yout) const {

  // Generate an X and Y suitable for performing Solve() and Multiply() methods

  TEUCHOS_TEST_FOR_EXCEPTION(Xin.getNumVectors()!=Yin.getNumVectors(), std::runtime_error,
       "Ifpack2::RILUK::GenerateXY ERROR: X and Y not the same size");

  //cout << "Xin = " << Xin << endl;
  Xout = Teuchos::rcp( (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> *) &Xin, false );
  Yout = Teuchos::rcp( (Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> *) &Yin, false );
  if (!isOverlapped_) return; // Nothing more to do

  if (isOverlapped_) {
    // Make sure the number of vectors in the multivector is the same as before.
    if (OverlapX_!=Teuchos::null) {
      if (OverlapX_->getNumVectors()!=Xin.getNumVectors()) {
        OverlapX_ = Teuchos::null;
        OverlapY_ = Teuchos::null;
      }
    }
    if (OverlapX_==Teuchos::null) { // Need to allocate space for overlap X and Y
      OverlapX_ = Teuchos::rcp( new Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>(U_->getColMap(), Xout->getNumVectors()) );
      OverlapY_ = Teuchos::rcp( new Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>(L_->getRowMap(), Yout->getNumVectors()) );
    }
    if (mode == Teuchos::NO_TRANS) {
      OverlapX_->doImport(*Xout,*U_->getGraph()->getImporter(), Tpetra::INSERT); // Import X values for solve
    }
    else {
      OverlapX_->doImport(*Xout,*L_->getGraph()->getExporter(), Tpetra::INSERT); // Import X values for solve
    }
    Xout = OverlapX_;
    Yout = OverlapY_; // Set pointers for Xout and Yout to point to overlap space
    //cout << "OverlapX_ = " << *OverlapX_ << endl;
  }
}
void IdentitySolver<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X,
       Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y,
       Teuchos::ETransp /*mode*/,
       scalar_type alpha,
       scalar_type beta) const
{
  using Teuchos::RCP;
  typedef Teuchos::ScalarTraits<scalar_type> STS;
  typedef Tpetra::MultiVector<scalar_type, local_ordinal_type,
                              global_ordinal_type, node_type> MV;

  TEUCHOS_TEST_FOR_EXCEPTION(
    ! isComputed (), std::runtime_error,
    "Ifpack2::IdentitySolver::apply: If compute() has not yet been called, "
    "or if you have changed the matrix via setMatrix(), "
    "you must call compute() before you may call this method.");

  // "Identity solver" does what it says: it's the identity operator.
  // We have to Export if the domain and range Maps are not the same.
  // Otherwise, this operator would be a permutation, not the identity.
  if (export_.is_null ()) {
    Y.update (alpha, X, beta);
  }
  else {
    if (alpha == STS::one () && beta == STS::zero ()) { // the common case
      Y.doExport (X, *export_, Tpetra::REPLACE);
    }
    else {
      // We know that the domain and range Maps are compatible.  First
      // bring X into the range Map via Export.  Then compute in place
      // in Y.
      MV X_tmp (Y.getMap (), Y.getNumVectors ());
      X_tmp.doExport (X, *export_, Tpetra::REPLACE);
      Y.update (alpha, X_tmp, beta);
    }
  }
  ++numApply_;
}
Ejemplo n.º 23
0
void OverlappingRowMatrix<MatrixType>::applyTempl(const Tpetra::MultiVector<DomainScalar,LocalOrdinal,GlobalOrdinal,Node> &X, 
						  Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node> &Y, 
						  Teuchos::ETransp mode, 
						  RangeScalar alpha,
						  RangeScalar beta) const
{
  // Note: This isn't AztecOO compliant.  But neither was Ifpack's version.
  TEUCHOS_TEST_FOR_EXCEPTION(X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
			     "Ifpack2::OverlappingRowMatrix::apply ERROR: X.getNumVectors() != Y.getNumVectors().");

  RangeScalar zero = Teuchos::ScalarTraits<RangeScalar>::zero();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> > x_ptr = X.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        y_ptr = Y.get2dViewNonConst();
  Y.putScalar(zero);
  size_t NumVectors = Y.getNumVectors();

  for (size_t i = 0 ; i < NumMyRowsA_ ; ++i) {
    size_t Nnz;
    // Use this class's getrow to make the below code simpler
    A_->getLocalRowCopy(i,Indices_(),Values_(),Nnz);
    if (mode==Teuchos::NO_TRANS){
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][i] += (RangeScalar)Values_[j] * (RangeScalar)x_ptr[k][Indices_[j]];      
    }
    else if (mode==Teuchos::TRANS){
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][Indices_[j]] += (RangeScalar)Values_[j] * (RangeScalar)x_ptr[k][i];
    }
    else { //mode==Teuchos::CONJ_TRANS
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][Indices_[j]] += Teuchos::ScalarTraits<RangeScalar>::conjugate((RangeScalar)Values_[j]) * (RangeScalar)x_ptr[k][i];
    }
  }

 for (size_t i = 0 ; i < NumMyRowsB_ ; ++i) {
    size_t Nnz;
    // Use this class's getrow to make the below code simpler
    ExtMatrix_->getLocalRowCopy(i,Indices_(),Values_(),Nnz);
    if (mode==Teuchos::NO_TRANS){
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][NumMyRowsA_+i] += (RangeScalar)Values_[j] * (RangeScalar)x_ptr[k][Indices_[j]];      
    }
    else if (mode==Teuchos::TRANS){
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][NumMyRowsA_+Indices_[j]] += (RangeScalar)Values_[j] * (RangeScalar)x_ptr[k][i];
    }
    else { //mode==Teuchos::CONJ_TRANS
      for (size_t j = 0 ; j < Nnz ; ++j) 
	for (size_t k = 0 ; k < NumVectors ; ++k)
	  y_ptr[k][NumMyRowsA_+Indices_[j]] += Teuchos::ScalarTraits<RangeScalar>::conjugate((RangeScalar)Values_[j]) * (RangeScalar)x_ptr[k][i];
    }
  }
}
void
Chebyshev<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& X,
       Tpetra::MultiVector<scalar_type, local_ordinal_type, global_ordinal_type, node_type>& Y,
       Teuchos::ETransp mode,
       scalar_type alpha,
       scalar_type beta) const
{
  const std::string timerName ("Ifpack2::Chebyshev::apply");
  Teuchos::RCP<Teuchos::Time> timer = Teuchos::TimeMonitor::lookupCounter (timerName);
  if (timer.is_null ()) {
    timer = Teuchos::TimeMonitor::getNewCounter (timerName);
  }

  // Start timing here.
  {
    Teuchos::TimeMonitor timeMon (*timer);

    // compute() calls initialize() if it hasn't already been called.
    // Thus, we only need to check isComputed().
    TEUCHOS_TEST_FOR_EXCEPTION(
      ! isComputed (), std::runtime_error,
      "Ifpack2::Chebyshev::apply(): You must call the compute() method before "
      "you may call apply().");
    TEUCHOS_TEST_FOR_EXCEPTION(
      X.getNumVectors () != Y.getNumVectors (), std::runtime_error,
      "Ifpack2::Chebyshev::apply(): X and Y must have the same number of "
      "columns.  X.getNumVectors() = " << X.getNumVectors() << " != "
      << "Y.getNumVectors() = " << Y.getNumVectors() << ".");
    applyImpl (X, Y, mode, alpha, beta);
  }
  ++NumApply_;

  // timer->totalElapsedTime() returns the total time over all timer
  // calls.  Thus, we use = instead of +=.
  ApplyTime_ = timer->totalElapsedTime ();
}
Ejemplo n.º 25
0
void TomBlockRelaxation<MatrixType,ContainerType>::ApplyInverseJacobi(
        const Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& X, 
              Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node>& Y) const
{
  size_t NumVectors = X.getNumVectors();
  Tpetra::MultiVector<Scalar,LocalOrdinal,GlobalOrdinal,Node> AY( Y.getMap(),NumVectors );
  
  // Initial matvec not needed
  int starting_iteration=0;
  if(ZeroStartingSolution_) {
    DoJacobi(X,Y);
    starting_iteration=1;
  }

  for (int j = starting_iteration; j < NumSweeps_ ; j++) {       
    applyMat(Y,AY);
    AY.update(1.0,X,-1.0);
    DoJacobi(AY,Y);

    // Flops for matrix apply & update
    ApplyFlops_ += NumVectors * (2 * NumGlobalNonzeros_ + 2 * NumGlobalRows_);
  }

}
void SparseContainer<MatrixType,InverseType>::
applyImpl (const Tpetra::MultiVector<InverseScalar,InverseLocalOrdinal,InverseGlobalOrdinal,InverseNode>& X,
           Tpetra::MultiVector<InverseScalar,InverseLocalOrdinal,InverseGlobalOrdinal,InverseNode>& Y,
           Teuchos::ETransp mode,
           InverseScalar alpha,
           InverseScalar beta) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(
    Inverse_->getDomainMap ()->getNodeNumElements () != X.getLocalLength (),
    std::logic_error, "Ifpack2::SparseContainer::apply: Inverse_ "
    "operator and X have incompatible dimensions (" <<
    Inverse_->getDomainMap ()->getNodeNumElements () << " resp. "
    << X.getLocalLength () << ").  Please report this bug to "
    "the Ifpack2 developers.");
  TEUCHOS_TEST_FOR_EXCEPTION(
    Inverse_->getRangeMap ()->getNodeNumElements () != Y.getLocalLength (),
    std::logic_error, "Ifpack2::SparseContainer::apply: Inverse_ "
    "operator and Y have incompatible dimensions (" <<
    Inverse_->getRangeMap ()->getNodeNumElements () << " resp. "
    << Y.getLocalLength () << ").  Please report this bug to "
    "the Ifpack2 developers.");

  Inverse_->apply (X, Y, mode, alpha, beta);
}
Ejemplo n.º 27
0
void Diagonal<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& X,
       Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type>& Y,
       Teuchos::ETransp /*mode*/,
       scalar_type alpha,
       scalar_type beta) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(
    ! isComputed (), std::runtime_error, "Ifpack2::Diagonal::apply: You "
    "must first call compute() before you may call apply().  Once you have "
    "called compute(), you need not call it again unless the values in the "
    "matrix have changed, or unless you have called setMatrix().");

  Y.elementWiseMultiply (alpha, *inverseDiag_, X, beta);
  ++numApply_;
}
Ejemplo n.º 28
0
void ReorderFilter<MatrixType>::permuteReorderedToOriginalTempl(const Tpetra::MultiVector<DomainScalar,LocalOrdinal,GlobalOrdinal,Node> &reorderedX, 
								Tpetra::MultiVector<RangeScalar,LocalOrdinal,GlobalOrdinal,Node> &originalY) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(reorderedX.getNumVectors() != originalY.getNumVectors(), std::runtime_error,
			     "Ifpack2::ReorderFilter::permuteReorderedToOriginal ERROR: X.getNumVectors() != Y.getNumVectors().");

  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> > x_ptr = reorderedX.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        y_ptr = originalY.get2dViewNonConst();

  for(size_t k=0; k < reorderedX.getNumVectors(); k++)
    for(LocalOrdinal i=0; (size_t)i< reorderedX.getLocalLength(); i++)
      y_ptr[k][reverseperm_[i]] = (RangeScalar)x_ptr[k][i];
}
Ejemplo n.º 29
0
void ReorderFilter<MatrixType>::permuteOriginalToReorderedTempl(const Tpetra::MultiVector<DomainScalar,local_ordinal_type,global_ordinal_type,node_type> &originalX,
                                                                Tpetra::MultiVector<RangeScalar,local_ordinal_type,global_ordinal_type,node_type> &reorderedY) const
{
  TEUCHOS_TEST_FOR_EXCEPTION(originalX.getNumVectors() != reorderedY.getNumVectors(), std::runtime_error,
                             "Ifpack2::ReorderFilter::permuteOriginalToReordered ERROR: X.getNumVectors() != Y.getNumVectors().");

  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const DomainScalar> > x_ptr = originalX.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<RangeScalar> >        y_ptr = reorderedY.get2dViewNonConst();

  for(size_t k=0; k < originalX.getNumVectors(); k++)
    for(local_ordinal_type i=0; (size_t)i< originalX.getLocalLength(); i++)
      y_ptr[k][perm_[i]] = (RangeScalar)x_ptr[k][i];
}
Ejemplo n.º 30
0
void ReorderFilter<MatrixType>::
apply (const Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> &X,
       Tpetra::MultiVector<scalar_type,local_ordinal_type,global_ordinal_type,node_type> &Y,
       Teuchos::ETransp mode,
       scalar_type alpha,
       scalar_type beta) const
{
  typedef Teuchos::ScalarTraits<scalar_type> STS;

  // Note: This isn't AztecOO compliant.  But neither was Ifpack's version.
  // Note: The localized maps mean the matvec is trivial (and has no import)
  TEUCHOS_TEST_FOR_EXCEPTION(
    X.getNumVectors() != Y.getNumVectors(), std::runtime_error,
    "Ifpack2::ReorderFilter::apply: X.getNumVectors() != Y.getNumVectors().");

  const scalar_type zero = STS::zero ();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<const scalar_type> > x_ptr = X.get2dView();
  Teuchos::ArrayRCP<Teuchos::ArrayRCP<scalar_type> > y_ptr = Y.get2dViewNonConst();

  Y.putScalar (zero);
  const size_t NumVectors = Y.getNumVectors ();

  for (size_t i = 0; i < A_->getNodeNumRows (); ++i) {
    size_t Nnz;
    // Use this class's getrow to make the below code simpler
    getLocalRowCopy (i, Indices_ (), Values_ (), Nnz);
    if (mode == Teuchos::NO_TRANS) {
      for (size_t j = 0; j < Nnz; ++j) {
        for (size_t k = 0; k < NumVectors; ++k) {
          y_ptr[k][i] += Values_[j] * x_ptr[k][Indices_[j]];
        }
      }
    }
    else if (mode == Teuchos::TRANS) {
      for (size_t j = 0; j < Nnz; ++j) {
        for (size_t k = 0; k < NumVectors; ++k) {
          y_ptr[k][Indices_[j]] += Values_[j] * x_ptr[k][i];
        }
      }
    }
    else { //mode==Teuchos::CONJ_TRANS
      for (size_t j = 0; j < Nnz; ++j) {
        for (size_t k = 0; k < NumVectors; ++k) {
          y_ptr[k][Indices_[j]] += STS::conjugate(Values_[j]) * x_ptr[k][i];
        }
      }
    }
  }
}