Example #1
0
// ======================================================================
void Krylov(const Operator& A, const MultiVector& LHS,
            const MultiVector& RHS, const BaseOperator& Prec,
            Teuchos::ParameterList& List)
{
#ifndef HAVE_ML_AZTECOO
      std::cerr << "Please configure ML with --enable-aztecoo to use" << std::endl;
      std::cerr << "MLAPI Krylov solvers" << std::endl;
      exit(EXIT_FAILURE);
#else
  if (LHS.GetNumVectors() != 1)
    ML_THROW("FIXME: only one vector is currently supported", -1);

  Epetra_LinearProblem Problem;

  const Epetra_RowMatrix& A_Epetra = *(A.GetRowMatrix());

  Epetra_Vector LHS_Epetra(View,A_Epetra.OperatorDomainMap(),
                           (double*)&(LHS(0)));
  Epetra_Vector RHS_Epetra(View,A_Epetra.OperatorRangeMap(),
                           (double*)&(RHS(0)));

  // FIXME: this works only for Epetra-based operators
  Problem.SetOperator((const_cast<Epetra_RowMatrix*>(&A_Epetra)));
  Problem.SetLHS(&LHS_Epetra);
  Problem.SetRHS(&RHS_Epetra);

  AztecOO solver(Problem);

  EpetraBaseOperator Prec_Epetra(A_Epetra.OperatorDomainMap(),Prec);
  solver.SetPrecOperator(&Prec_Epetra);

  // get options from List
  int    NumIters = List.get("krylov: max iterations", 1550);
  double Tol      = List.get("krylov: tolerance", 1e-9);
  std::string type     = List.get("krylov: type", "gmres");
  int    output   = List.get("krylov: output level", GetPrintLevel());

  // set options in `solver'
  if (type == "cg")
    solver.SetAztecOption(AZ_solver, AZ_cg);
  else if (type == "cg_condnum")
    solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  else if (type == "gmres")
    solver.SetAztecOption(AZ_solver, AZ_gmres);
  else if (type == "gmres_condnum")
    solver.SetAztecOption(AZ_solver, AZ_gmres_condnum);
  else if (type == "fixed point")
    solver.SetAztecOption(AZ_solver, AZ_fixed_pt);
  else
    ML_THROW("krylov: type has incorrect value (" +
             type + ")", -1);

  solver.SetAztecOption(AZ_output, output);
  solver.Iterate(NumIters, Tol);
#endif

}
int 
InverseOperator::Apply(const MultiVector& x, MultiVector& y) const
{
  ResetTimer();
  StackPush();

  if (GetDomainSpace() != x.GetVectorSpace())
    ML_THROW("DomainSpace and x.GetVectorSpace() differ", -1);

  if (GetRangeSpace() != y.GetVectorSpace())
    ML_THROW("RangeSpace and y.GetVectorSpace() differ", -1);

  int x_nv = x.GetNumVectors();
  int y_nv = y.GetNumVectors();
  double FL = 0.0;
  if (RCPData_ != Teuchos::null)
    FL = RCPData_->ComputeFlops();

  if (x_nv != y_nv)
    ML_THROW("Number of vectors of x and y differ (" +
             GetString(x_nv) + " vs. " + GetString(x_nv), -1);

  for (int v = 0 ; v < x_nv ; ++v) {

    Epetra_Vector x_Epetra(View,RowMatrix()->OperatorDomainMap(),
                           (double*)&(x(0,v)));
    Epetra_Vector y_Epetra(View,RowMatrix()->OperatorRangeMap(),
                           (double*)&(y(0,v)));

    if (RCPData_ != Teuchos::null)
      RCPData_->ApplyInverse(x_Epetra,y_Epetra);
    else if (RCPMLPrec_ != Teuchos::null)
      RCPMLPrec_->ApplyInverse(x_Epetra,y_Epetra);
    else
      ML_THROW("Neither Ifpack nor ML smoother is properly set up", -1);
  }

  StackPop();
  if (RCPData_ != Teuchos::null)
    UpdateFlops(RCPData_->ComputeFlops() - FL);
  UpdateTime();

  return(0);
}
// ======================================================================
MultiVector Extract(const MultiVector& y, const int v)
{
  if ((v < 0) || v >= y.GetNumVectors())
    ML_THROW("Wrong input parameter v (" +
             GetString(v) + ")", -1);

  MultiVector x(y.GetVectorSpace(), y.GetRCPValues(v));

  return(x);
}
// ======================================================================
MultiVector Duplicate(const MultiVector& y, const int v)
{
  if ((v < 0) || v >= y.GetNumVectors())
    ML_THROW("Wrong input parameter v (" +
             GetString(v) + ")", -1);

  // FIXME: use Extract
  MultiVector x(y.GetVectorSpace(), 1);
  for (int i = 0 ; i < x.GetMyLength() ; ++i)
    x(i) = y(i,v);

  return(x);
}
// ====================================================================== 
Operator GetPtent1D(const MultiVector& D, const int offset = 0)
{
  if (D.GetNumVectors() != 1)
    ML_THROW("D.GetNumVectors() != 1", -1);

  int size = D.GetMyLength();
  if (size == 0)
    ML_THROW("empty diagonal vector in input", -1);

  double* diag = new double[size];
  for (int i = 0 ; i < size ; ++i)
    diag[i] = D(i);

  // creates the ML operator and store the diag pointer,
  // as well as the function pointers
  ML_Operator* MLDiag = ML_Operator_Create(GetML_Comm());

  int invec_leng = size / 3 + size % 3;
  int outvec_leng = size;

  MLDiag->invec_leng = invec_leng;
  MLDiag->outvec_leng = outvec_leng;
  MLDiag->data = (void*)diag;
  MLDiag->data_destroy = Ptent1D_destroy;
  MLDiag->matvec->func_ptr = Ptent1D_matvec;

  MLDiag->matvec->ML_id = ML_NONEMPTY;
  MLDiag->matvec->Nrows = outvec_leng;
  MLDiag->from_an_ml_operator = 0;

  MLDiag->getrow->func_ptr = Ptent1D_getrows;

  MLDiag->getrow->ML_id = ML_NONEMPTY;
  MLDiag->getrow->Nrows = outvec_leng;

  // creates the domain space
  vector<int> MyGlobalElements(invec_leng);
  for (int i = 0 ; i < invec_leng ; ++i) 
    MyGlobalElements[i] = D.GetVectorSpace()(i * 3) / 3;
  Space DomainSpace(invec_leng, -1, &MyGlobalElements[0]);
  Space RangeSpace = D.GetVectorSpace();

  // creates the MLAPI wrapper
  Operator Diag(DomainSpace,RangeSpace,MLDiag,true);
  return(Diag);
}
// ======================================================================
MultiVector Redistribute(const MultiVector& y, const int NumEquations)
{
  StackPush();

  if (y.GetMyLength() % NumEquations)
    ML_THROW("NumEquations does not divide MyLength()", -1);

  if (y.GetNumVectors() != 1)
    ML_THROW("Redistribute() works with single vectors only", -1);

  Space NewSpace(y.GetMyLength() / NumEquations);

  MultiVector y2(NewSpace, NumEquations);

  for (int i = 0 ; i < y2.GetMyLength() ; ++i)
    for (int j = 0 ; j < NumEquations ; ++j)
      y2(i, j) = y(j + NumEquations * i);

  StackPop();

  return(y2);
}
// ====================================================================== 
Operator GetDiagonal(const MultiVector& D)
{
  if (D.GetNumVectors() != 1)
    ML_THROW("D.GetNumVectors() != 1", -1);

  int size = D.GetMyLength();
  if (size == 0)
    ML_THROW("empty diagonal vector in input", -1);

  double* diag = new double[size];
  for (int i = 0 ; i < size ; ++i) 
    diag[i] = D(i);

  // creates the ML operator and store the diag pointer,
  // as well as the function pointers
  ML_Operator* MLDiag = ML_Operator_Create(GetML_Comm());

  MLDiag->invec_leng = size;
  MLDiag->outvec_leng = size;
  MLDiag->data = (void*)diag;
  MLDiag->matvec->func_ptr = diag_matvec;

  MLDiag->matvec->ML_id = ML_NONEMPTY;
  MLDiag->matvec->Nrows = size;
  MLDiag->from_an_ml_operator = 0;
  MLDiag->data_destroy = diag_destroy;

  MLDiag->getrow->func_ptr = diag_getrows;

  MLDiag->getrow->ML_id = ML_NONEMPTY;
  MLDiag->getrow->Nrows = size;

  // creates the MLAPI wrapper
  Operator Diag(D.GetVectorSpace(),D.GetVectorSpace(),MLDiag,true);
  return(Diag);
}
// ======================================================================
MultiVector Duplicate(const MultiVector& y)
{
  MultiVector x(y.GetVectorSpace(), y.GetNumVectors());
  x.Update(y);
  return(x);
}
Example #9
0
// ======================================================================
void GetPtent(const Operator& A, Teuchos::ParameterList& List,
              const MultiVector& ThisNS,
              Operator& Ptent, MultiVector& NextNS)
{
  std::string CoarsenType     = List.get("aggregation: type", "Uncoupled");
  /* old version
  int    NodesPerAggr    = List.get("aggregation: per aggregate", 64);
  */
  double Threshold       = List.get("aggregation: threshold", 0.0);
  int    NumPDEEquations = List.get("PDE equations", 1);

  ML_Aggregate* agg_object;
  ML_Aggregate_Create(&agg_object);
  ML_Aggregate_Set_MaxLevels(agg_object,2);
  ML_Aggregate_Set_StartLevel(agg_object,0);
  ML_Aggregate_Set_Threshold(agg_object,Threshold);
  //agg_object->curr_threshold = 0.0;

  ML_Operator* ML_Ptent = 0;
  ML_Ptent = ML_Operator_Create(GetML_Comm());

  if (ThisNS.GetNumVectors() == 0)
    ML_THROW("zero-dimension null space", -1);

  int size = ThisNS.GetMyLength();

  double* null_vect = 0;
  ML_memory_alloc((void **)(&null_vect), sizeof(double) * size * ThisNS.GetNumVectors(), "ns");

  int incr = 1;
  for (int v = 0 ; v < ThisNS.GetNumVectors() ; ++v)
    DCOPY_F77(&size, (double*)ThisNS.GetValues(v), &incr,
              null_vect + v * ThisNS.GetMyLength(), &incr);


  ML_Aggregate_Set_NullSpace(agg_object, NumPDEEquations,
                             ThisNS.GetNumVectors(), null_vect,
                             ThisNS.GetMyLength());

  if (CoarsenType == "Uncoupled")
    agg_object->coarsen_scheme = ML_AGGR_UNCOUPLED;
  else if (CoarsenType == "Uncoupled-MIS")
    agg_object->coarsen_scheme = ML_AGGR_HYBRIDUM;
  else if (CoarsenType == "MIS") {
   /* needed for MIS, otherwise it sets the number of equations to
    * the null space dimension */
    agg_object->max_levels  = -7;
    agg_object->coarsen_scheme = ML_AGGR_MIS;
  }
  else if (CoarsenType == "METIS")
    agg_object->coarsen_scheme = ML_AGGR_METIS;
  else {
    ML_THROW("Requested aggregation scheme (" + CoarsenType +
             ") not recognized", -1);
  }

  int NextSize = ML_Aggregate_Coarsen(agg_object, A.GetML_Operator(),
                                      &ML_Ptent, GetML_Comm());

  /* This is the old version
  int NextSize;

  if (CoarsenType == "Uncoupled") {
    NextSize = ML_Aggregate_CoarsenUncoupled(agg_object, A.GetML_Operator(),
  }
  else if (CoarsenType == "MIS") {
    NextSize = ML_Aggregate_CoarsenMIS(agg_object, A.GetML_Operator(),
                                       &ML_Ptent, GetML_Comm());
  }
  else if (CoarsenType == "METIS") {
    ML ml_object;
    ml_object.ML_num_levels = 1; // crap for line below
    ML_Aggregate_Set_NodesPerAggr(&ml_object,agg_object,0,NodesPerAggr);
    NextSize = ML_Aggregate_CoarsenMETIS(agg_object, A.GetML_Operator(),
                                         &ML_Ptent, GetML_Comm());
  }
  else {
    ML_THROW("Requested aggregation scheme (" + CoarsenType +
             ") not recognized", -1);
  }
  */

  ML_Operator_ChangeToSinglePrecision(ML_Ptent);

  int NumMyElements = NextSize;
  Space CoarseSpace(-1,NumMyElements);
  Ptent.Reshape(CoarseSpace,A.GetRangeSpace(),ML_Ptent,true);

  assert (NextSize * ThisNS.GetNumVectors() != 0);

  NextNS.Reshape(CoarseSpace, ThisNS.GetNumVectors());

  size = NextNS.GetMyLength();
  for (int v = 0 ; v < NextNS.GetNumVectors() ; ++v)
    DCOPY_F77(&size, agg_object->nullspace_vect + v * size, &incr,
              NextNS.GetValues(v), &incr);

  ML_Aggregate_Destroy(&agg_object);
  ML_memory_free((void**)(&null_vect));
}
/*----------------------------------------------------------------------*
 |  compute the preconditioner (public)                      m.gee 03/06|
 *----------------------------------------------------------------------*/
bool MOERTEL::Mortar_ML_Preconditioner::Compute()
{

    iscomputed_ = false;

    MLAPI::Init();

    // get parameters
    int     maxlevels     = mlparams_.get("max levels",10);
    int     maxcoarsesize = mlparams_.get("coarse: max size",10);
    double* nullspace     = mlparams_.get("null space: vectors",(double*)NULL);
    int     nsdim         = mlparams_.get("null space: dimension",1);
    int     numpde        = mlparams_.get("PDE equations",1);
    double  damping       = mlparams_.get("aggregation: damping factor",1.33);
    std::string  eigenanalysis = mlparams_.get("eigen-analysis: type", "Anorm");
    std::string  smoothertype  = mlparams_.get("smoother: type","symmetric Gauss-Seidel");
    std::string  coarsetype    = mlparams_.get("coarse: type","Amesos-KLU");
    std::string  ptype         = mlparams_.get("prolongator: type","mod_full");

    // create the missing rowmap Arrmap_
    Arrmap_ = Teuchos::rcp(MOERTEL::SplitMap(A_->RowMap(),*Annmap_));
    Teuchos::RCP<Epetra_Map> map1 = Arrmap_;
    Teuchos::RCP<Epetra_Map> map2 = Annmap_;

    // split Atilde
    //
    //  Atilde11 Atilde12
    //  Atilde21 Atilde22
    //
    Teuchos::RCP<Epetra_CrsMatrix> Atilde11;
    Teuchos::RCP<Epetra_CrsMatrix> Atilde12;
    Teuchos::RCP<Epetra_CrsMatrix> Atilde21;
    Teuchos::RCP<Epetra_CrsMatrix> Atilde22;
    MOERTEL::SplitMatrix2x2(Atilde_,map1,map2,Atilde11,Atilde12,Atilde21,Atilde22);

    // build Atildesplit
    //
    //  Atilde11  0
    //  0         I
    //
    Atildesplit_ = Teuchos::rcp(new Epetra_CrsMatrix(Copy,A_->RowMap(),50,false));
    MOERTEL::MatrixMatrixAdd(*Atilde11,false,1.0,*Atildesplit_,0.0);
    Teuchos::RCP<Epetra_CrsMatrix> tmp = Teuchos::rcp(MOERTEL::PaddedMatrix(*map2,1.0,1));
    tmp->FillComplete();
    MOERTEL::MatrixMatrixAdd(*tmp,false,1.0,*Atildesplit_,1.0);
    Atildesplit_->FillComplete();
    Atildesplit_->OptimizeStorage();

    // split A
    //
    //  A11 A12
    //  A21 A22
    //
    Teuchos::RCP<Epetra_CrsMatrix> A11;
    Teuchos::RCP<Epetra_CrsMatrix> A12;
    Teuchos::RCP<Epetra_CrsMatrix> A21;
    Teuchos::RCP<Epetra_CrsMatrix> A22;
    MOERTEL::SplitMatrix2x2(A_,map1,map2,A11,A12,A21,A22);

    // build Asplit_
    //
    //  A11  0
    //  0    A22
    //
    Asplit_ = Teuchos::rcp(new Epetra_CrsMatrix(Copy,A_->RowMap(),50,false));
    MOERTEL::MatrixMatrixAdd(*A11,false,1.0,*Asplit_,0.0);
    MOERTEL::MatrixMatrixAdd(*A22,false,1.0,*Asplit_,1.0);
    Asplit_->FillComplete();
    Asplit_->OptimizeStorage();

    // build BWT (padded to full size)
    //
    //  0   Mr Dinv
    //  0    I
    //
    Teuchos::RCP<Epetra_CrsMatrix> BWT = Teuchos::rcp(MOERTEL::MatMatMult(*B_,false,*WT_,false,10));
    tmp = Teuchos::rcp(MOERTEL::PaddedMatrix(BWT->RowMap(),0.0,25));
    MOERTEL::MatrixMatrixAdd(*BWT,false,1.0,*tmp,0.0);
    tmp->FillComplete(BWT->DomainMap(),BWT->RangeMap());
    BWT = tmp;
    tmp = Teuchos::null;

    // split BWT to obtain M = Mr Dinv
    Teuchos::RCP<Epetra_CrsMatrix> Zero11;
    Teuchos::RCP<Epetra_CrsMatrix> M;
    Teuchos::RCP<Epetra_CrsMatrix> Zero21;
    Teuchos::RCP<Epetra_CrsMatrix> I22;
    MOERTEL::SplitMatrix2x2(BWT,map1,map2,Zero11,M,Zero21,I22);


    // build matrix Ahat11 = Atilde11 - M Atilde22 M^T
    Teuchos::RCP<Epetra_CrsMatrix> Ahat11 = Teuchos::rcp(new Epetra_CrsMatrix(Copy,*map1,50,false));
    MOERTEL::MatrixMatrixAdd(*Atilde11,false,1.0,*Ahat11,0.0);
    Teuchos::RCP<Epetra_CrsMatrix> tmp1 = Teuchos::rcp(MOERTEL::MatMatMult(*Atilde22,false,*M,true,10));
    Teuchos::RCP<Epetra_CrsMatrix> tmp2 = Teuchos::rcp(MOERTEL::MatMatMult(*M,false,*tmp1,false,10));
    MOERTEL::MatrixMatrixAdd(*tmp2,false,-1.0,*Ahat11,1.0);
    Ahat11->FillComplete();
    Ahat11->OptimizeStorage();

    // build matrix Ahat
    //
    //  Ahat11   0   =   Atilde11 - M Atilde22 M^T   0
    //  0        0       0                           0
    //
    Ahat_ = Teuchos::rcp(MOERTEL::PaddedMatrix(A_->RowMap(),0.0,25));
    MOERTEL::MatrixMatrixAdd(*Ahat11,false,1.0,*Ahat_,0.0);
    Ahat_->FillComplete();
    Ahat_->OptimizeStorage();


    // build mlapi objects
    Space space(A_->RowMatrixRowMap());
    Operator mlapiAsplit(space,space,Asplit_.get(),false);
    Operator mlapiAtildesplit(space,space,Atildesplit_.get(),false);
    Operator mlapiAhat(space,space,Ahat_.get(),false);
    Operator mlapiBWT(space,space,BWT.get(),false);
    Operator mlapiBWTcoarse;
    Operator ImBWTfine = GetIdentity(space,space) - mlapiBWT;
    Operator ImBWTcoarse;
    Operator Ptent;
    Operator P;
    Operator Pmod;
    Operator Rtent;
    Operator R;
    Operator Rmod;
    Operator IminusA;
    Operator C;
    InverseOperator S;

    mlapiAtildesplit_.resize(maxlevels);
    mlapiAhat_.resize(maxlevels);
    mlapiImBWT_.resize(maxlevels);
    mlapiImWBT_.resize(maxlevels);
    mlapiRmod_.resize(maxlevels);
    mlapiPmod_.resize(maxlevels);
    mlapiS_.resize(maxlevels);

    mlapiAtildesplit_[0] = mlapiAtildesplit;
    mlapiAhat_[0]        = mlapiAhat;
    mlapiImBWT_[0]       = ImBWTfine;
    mlapiImWBT_[0]       = GetTranspose(ImBWTfine);


    // build nullspace
    MultiVector NS;
    MultiVector NextNS;
    NS.Reshape(mlapiAsplit.GetRangeSpace(),nsdim);
    if (nullspace)
    {
        for (int i=0; i<nsdim; ++i)
            for (int j=0; j<NS.GetMyLength(); ++j)
                NS(j,i) = nullspace[i*NS.GetMyLength()+j];
    }
    else
    {
        if (numpde==1) NS = 1.0;
        else
        {
            NS = 0.0;
            for (int i=0; i<NS.GetMyLength(); ++i)
                for (int j=0; j<numpde; ++j)
                    if ( i % numpde == j)
                        NS(i,j) = 1.0;
        }
    }

    double lambdamax;

    // construct the hierarchy
    int level=0;
    for (level=0; level<maxlevels-1; ++level)
    {
        // this level's smoothing operator
        mlapiAtildesplit = mlapiAtildesplit_[level];

        // build smoother
        if (Comm().MyPID()==0)
        {
            ML_print_line("-", 78);
            std::cout << "MOERTEL/ML : creating smoother level " << level << std::endl;
            fflush(stdout);
        }
        S.Reshape(mlapiAtildesplit,smoothertype,mlparams_);

        if (level) mlparams_.set("PDE equations", NS.GetNumVectors());

        if (Comm().MyPID()==0)
        {
            ML_print_line("-", 80);
            std::cout << "MOERTEL/ML : creating level " << level+1 << std::endl;
            ML_print_line("-", 80);
            fflush(stdout);
        }
        mlparams_.set("workspace: current level",level);

        // get tentative prolongator based on decoupled original system
        GetPtent(mlapiAsplit,mlparams_,NS,Ptent,NextNS);
        NS = NextNS;

        // do prolongator smoothing
        if (damping)
        {
            if (eigenanalysis == "Anorm")
                lambdamax = MaxEigAnorm(mlapiAsplit,true);
            else if (eigenanalysis == "cg")
                lambdamax = MaxEigCG(mlapiAsplit,true);
            else if (eigenanalysis == "power-method")
                lambdamax = MaxEigPowerMethod(mlapiAsplit,true);
            else ML_THROW("incorrect parameter (" + eigenanalysis + ")", -1);

            IminusA = GetJacobiIterationOperator(mlapiAsplit,damping/lambdamax);
            P       = IminusA * Ptent;
            R       = GetTranspose(P);
            Rtent   = GetTranspose(Ptent);
        }
        else
        {
            P     = Ptent;
            Rtent = GetTranspose(Ptent);
            R     = Rtent;
            lambdamax = -1.0;
        }

        // do variational coarse grid of split original matrix Asplit
        C = GetRAP(R,mlapiAsplit,P);

        // compute the mortar projections on coarse grid
        mlapiBWTcoarse = GetRAP(Rtent,mlapiBWT,Ptent);
        ImBWTcoarse    = GetIdentity(C.GetDomainSpace(),C.GetRangeSpace());
        ImBWTcoarse    = ImBWTcoarse - mlapiBWTcoarse;

        // do modified prolongation and restriction
        if (ptype=="mod_full")
            Rmod = ImBWTcoarse * ( R * ImBWTfine ) + mlapiBWTcoarse * ( R * mlapiBWT );
        else if (ptype=="mod_middle")
            Rmod = ImBWTcoarse * ( R * ImBWTfine );
        else if (ptype=="mod_simple")
            Rmod = R * ImBWTfine;
        else if (ptype=="original")
            Rmod = R;
        else
            ML_THROW("incorrect parameter ( " + ptype + " )", -1);
        Pmod = GetTranspose(Rmod);

        // store matrix for construction of next level
        mlapiAsplit = C;

        // make coarse smoothing operator
        // make coarse residual operator
        mlapiAtildesplit_[level+1] = GetRAP(Rmod,mlapiAtildesplit,Pmod);
        mlapiAhat_[level+1]        = GetRAP(Rmod,mlapiAhat_[level],Pmod);
        mlapiImBWT_[level]         = ImBWTfine;
        mlapiImBWT_[level+1]       = ImBWTcoarse;
        mlapiImWBT_[level]         = GetTranspose(ImBWTfine);
        mlapiImWBT_[level+1]       = GetTranspose(ImBWTcoarse);
        mlapiRmod_[level]          = Rmod;
        mlapiPmod_[level]          = Pmod;
        mlapiS_[level]             = S;

        // prepare for next level
        mlapiBWT  = mlapiBWTcoarse;
        ImBWTfine = ImBWTcoarse;

        // break if coarsest level is below specified size
        if (mlapiAsplit.GetNumGlobalRows() <= maxcoarsesize)
        {
            ++level;
            break;
        }

    } // for (level=0; level<maxlevels-1; ++level)

    // do coarse smoother
    S.Reshape(mlapiAtildesplit_[level],coarsetype,mlparams_);
    mlapiS_[level] = S;

    // store max number of levels
    maxlevels_ = level+1;

    iscomputed_ = true;
    return true;
}