Exemplo n.º 1
0
NOX::Epetra::Vector::
Vector(const Teuchos::RCP<Epetra_Vector>& source,
       NOX::Epetra::Vector::MemoryType memoryType,
       NOX::CopyType type,
       Teuchos::RCP<NOX::Epetra::VectorSpace> vs)
{
  if (Teuchos::is_null(vs))
    vectorSpace = Teuchos::rcp(new NOX::Epetra::VectorSpaceL2);
  else
    vectorSpace = vs;

  if (memoryType == NOX::Epetra::Vector::CreateView)
    epetraVec = source;
  else {

    switch (type) {

    case DeepCopy:        // default behavior

      epetraVec = Teuchos::rcp(new Epetra_Vector(*source));
      break;

    case ShapeCopy:

      epetraVec = Teuchos::rcp(new Epetra_Vector(source->Map()));
      break;
    }

  }
}
Exemplo n.º 2
0
/// building the stencil
void PoissonSolver::StencilGeometry(Teuchos::RCP<Epetra_Vector> & RHS,
                                    Teuchos::RCP<Epetra_CrsMatrix> & A)
{
    const Epetra_BlockMap & MyMap = RHS->Map();
    int NumMyElements = MyMap.NumMyElements();
    int* MyGlobalElements = MyMap.MyGlobalElements();
    double * rhsvalues = RHS->Values();

    std::vector<double> Values(5);
    std::vector<int> Indices(5);

    const auto & inside = _bend.getInsideMask();

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        size_t NumEntries = 0;

        const size_t & gid = MyGlobalElements[lid];

        cutoffStencil(Indices,
                      Values,
                      rhsvalues[lid],
                      NumEntries,
                      inside,
                      gid);
        A->InsertGlobalValues(gid, NumEntries, &Values[0], &Indices[0]);
    }

    A->FillComplete();
    A->OptimizeStorage();
}
Exemplo n.º 3
0
void PoissonSolver::plotPotential(BinaryVtkFile & vtkFile,
                                  const Teuchos::RCP<Epetra_Vector> & LHS,
                                  const VField_Edge_t & EFD,
                                  const int & component)
{
    const Epetra_BlockMap & Map = LHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();

    boost::shared_ptr<SField_Vert_t> scalarField = Utils::getScalarVertField(EFD);
    NDIndex<DIM> elem;
    char fieldName[50];
    ST * values = LHS->Values();

    sprintf(fieldName, "potential_%d", component);

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const size_t idx = MyGlobalElements[lid] % _Nx;
        const size_t idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        scalarField->localElement(elem) = values[lid];
    }

    vtkFile.addScalarField(*scalarField, fieldName);
}
Exemplo n.º 4
0
 void ISVDUDV::Initialize( const Teuchos::RCP< Teuchos::ParameterList >& params,
                           const Teuchos::RCP< const Epetra_MultiVector >& ss,
                           const Teuchos::RCP< RBGen::FileIOHandler< Epetra_Operator > >& fileio) 
 {
   workU_ = Teuchos::rcp( new Epetra_MultiVector(ss->Map(),maxBasisSize_,false) );
   Epetra_LocalMap lclmap(ss->NumVectors(),0,ss->Comm());
   workV_ = Teuchos::rcp( new Epetra_MultiVector(lclmap,maxBasisSize_,false) );
 }
Exemplo n.º 5
0
void panzer::ScatterDirichletResidual_Epetra<panzer::Traits::Residual, Traits,LO,GO>::
evaluateFields(typename Traits::EvalData workset)
{ 
   std::vector<GO> GIDs;
   std::vector<int> LIDs;
 
   // for convenience pull out some objects from workset
   std::string blockId = workset.block_id;
   const std::vector<std::size_t> & localCellIds = workset.cell_local_ids;

   Teuchos::RCP<const EpetraLinearObjContainer> epetraContainer = epetraContainer_;
   Teuchos::RCP<Epetra_Vector> r = epetraContainer->get_f(); 

   // NOTE: A reordering of these loops will likely improve performance
   //       The "getGIDFieldOffsets may be expensive.  However the
   //       "getElementGIDs" can be cheaper. However the lookup for LIDs
   //       may be more expensive!

   // scatter operation for each cell in workset
   for(std::size_t worksetCellIndex=0;worksetCellIndex<localCellIds.size();++worksetCellIndex) {
      std::size_t cellLocalId = localCellIds[worksetCellIndex];

      globalIndexer_->getElementGIDs(cellLocalId,GIDs); 

      // caculate the local IDs for this element
      LIDs.resize(GIDs.size());
      for(std::size_t i=0;i<GIDs.size();i++)
         LIDs[i] = r->Map().LID(GIDs[i]);

      // loop over each field to be scattered
      for(std::size_t fieldIndex = 0; fieldIndex < scatterFields_.size(); fieldIndex++) {
         int fieldNum = fieldIds_[fieldIndex];
   
         // this call "should" get the right ordering accordint to the Intrepid basis
         const std::pair<std::vector<int>,std::vector<int> > & indicePair 
               = globalIndexer_->getGIDFieldOffsets_closure(blockId,fieldNum, side_subcell_dim_, local_side_id_);
         const std::vector<int> & elmtOffset = indicePair.first;
         const std::vector<int> & basisIdMap = indicePair.second;
   
         // loop over basis functions
         for(std::size_t basis=0;basis<elmtOffset.size();basis++) {
            int offset = elmtOffset[basis];
            int lid = LIDs[offset];
            if(lid<0) // not on this processor!
               continue;

            int basisId = basisIdMap[basis];
            (*r)[lid] = (scatterFields_[fieldIndex])(worksetCellIndex,basisId);

            // record that you set a dirichlet condition
            if(dirichletCounter_!=Teuchos::null)
              (*dirichletCounter_)[lid] = 1.0;
         }
      }
   }
}
void PeridigmNS::BoundaryCondition::evaluateParser(const int & localNodeID, double & currentValue, double & previousValue, const double & timeCurrent, const double & timePrevious){
  Teuchos::RCP<Epetra_Vector> x = peridigm->getX();
  const Epetra_BlockMap& threeDimensionalMap = x->Map();
  TEUCHOS_TEST_FOR_EXCEPT_MSG(threeDimensionalMap.ElementSize() != 3, "**** setVectorValues() must be called with map having element size = 3.\n");

  bool success(true);
  // set the coordinates and set the return value to 0.0
  if(success)
    rtcFunction->varValueFill(0, (*x)[localNodeID*3]);
  if(success)
    success = rtcFunction->varValueFill(1, (*x)[localNodeID*3 + 1]);
  if(success)
    success = rtcFunction->varValueFill(2, (*x)[localNodeID*3 + 2]);
  if(success)
    success = rtcFunction->varValueFill(4, 0.0);
  // evaluate at previous time
  if(success)
    success = rtcFunction->varValueFill(3, timePrevious);
  if(success)
    success = rtcFunction->execute();
  if(success)
    previousValue = rtcFunction->getValueOfVar("value");
  // evaluate at current time
  if(success)
    success = rtcFunction->varValueFill(3, timeCurrent);
  if(success)
    success = rtcFunction->execute();
  if(success)
    currentValue = rtcFunction->getValueOfVar("value");
  if(!success){
    string msg = "\n**** Error in BoundaryCondition::evaluateParser().\n";
    msg += "**** " + rtcFunction->getErrors() + "\n";
    TEUCHOS_TEST_FOR_EXCEPT_MSG(!success, msg);
  }

  // if this is any other boundary condition besides prescribed displacement
  // get the previous value from evaluating the string function as above
  // if it is a perscribed displacement bc, check to see if the increment is
  // zero. This could happen if the user specifies a constant prescribed displacement.
  // If so, the increment should be the current prescribed value
  // minus the existing field value instead of the parser evaluation
  if(bcType==PRESCRIBED_DISPLACEMENT && currentValue - previousValue == 0.0)
  {
    Teuchos::RCP<Epetra_Vector> previousDisplacement = peridigm->getU();
    previousValue = (*previousDisplacement)[localNodeID*3 + coord];
  }

  // TODO: we should revisit how prescribed boundary conditions interact with initial conditions
  // in the case that the prescribed boundary condition at time zero doesn't match the inital condition
  // who wins?
}
Exemplo n.º 7
0
Library::
Library(Teuchos::RCP<const Epetra_MultiVector> input_coords, int itype)
  : input_type_(itype),
    numPartSizes(0),
    partGIDs(NULL),
    partSizes(NULL),
    input_graph_(0),
    input_matrix_(0),
    input_coords_(input_coords),
    costs_(0),
    weights_(0)
{
  input_map_ = Teuchos::rcp(&(input_coords->Map()), false);
}
Teuchos::RCP<const Epetra_Vector>
EpetraExt::createInverseModelScalingVector(
  Teuchos::RCP<const Epetra_Vector> const& scalingVector
  )
{
  Teuchos::RCP<Epetra_Vector> invScalingVector =
    Teuchos::rcpWithEmbeddedObj(
      new Epetra_Vector(scalingVector->Map()),
      scalingVector
      );
  invScalingVector->Reciprocal(*scalingVector);
  return invScalingVector;
  // Above, we embedd the forward scaling vector.  This is done in order to
  // achieve the exact same numerics as before this refactoring and to improve
  // runtime speed and accruacy.
}
void panzer::ScatterInitialCondition_Epetra<panzer::Traits::Tangent, Traits,LO,GO>::
evaluateFields(typename Traits::EvalData workset)
{ 
   TEUCHOS_ASSERT(false);

   std::vector<GO> GIDs;
   std::vector<int> LIDs;
 
   // for convenience pull out some objects from workset
   std::string blockId = workset.block_id;
   const std::vector<std::size_t> & localCellIds = workset.cell_local_ids;

   Teuchos::RCP<Epetra_Vector> x = epetraContainer_->get_x(); 

   // NOTE: A reordering of these loops will likely improve performance
   //       The "getGIDFieldOffsets may be expensive.  However the
   //       "getElementGIDs" can be cheaper. However the lookup for LIDs
   //       may be more expensive!

   // scatter operation for each cell in workset
   for(std::size_t worksetCellIndex=0;worksetCellIndex<localCellIds.size();++worksetCellIndex) {
      std::size_t cellLocalId = localCellIds[worksetCellIndex];

      globalIndexer_->getElementGIDs(cellLocalId,GIDs); 

      // caculate the local IDs for this element
      LIDs.resize(GIDs.size());
      for(std::size_t i=0;i<GIDs.size();i++)
         LIDs[i] = x->Map().LID(GIDs[i]);

      // loop over each field to be scattered
      for (std::size_t fieldIndex = 0; fieldIndex < scatterFields_.size(); fieldIndex++) {
         int fieldNum = fieldIds_[fieldIndex];
         const std::vector<int> & elmtOffset = globalIndexer_->getGIDFieldOffsets(blockId,fieldNum);
   
         // loop over basis functions
         for(std::size_t basis=0;basis<elmtOffset.size();basis++) {

            int offset = elmtOffset[basis];
            int lid = LIDs[offset];
	    if (lid != -1)
	      (*x)[lid] = (scatterFields_[fieldIndex])(worksetCellIndex,basis).val();
         }
      }
   }
}
Exemplo n.º 10
0
static int run_test(Teuchos::RCP<const Epetra_MultiVector> &coords,
		    const Teuchos::ParameterList &params)
{
  Teuchos::RCP<Isorropia::Epetra::Partitioner> part =
    Teuchos::rcp(new Isorropia::Epetra::Partitioner(coords, params));

  // both Zoltan and Isorropia partitioners will use unit weights,
  // so we create unit weights to compute balances

  Epetra_MultiVector tmp(coords->Map(), 1);
  tmp.PutScalar(1.0);

  Teuchos::RCP<const Epetra_MultiVector> unitweights =
    Teuchos::rcp(new const Epetra_MultiVector(tmp));

  return check_test(coords, unitweights, part);
}
void SeparableScatterScalarResponse<PHAL::AlbanyTraits::DistParamDeriv, Traits>::
postEvaluate(typename Traits::PostEvalData workset)
{
#if defined(ALBANY_EPETRA)
  // Here we scatter the *global* response and its derivatives
  Teuchos::RCP<Epetra_Vector> g = workset.g;
  Teuchos::RCP<Epetra_MultiVector> dgdp = workset.dgdp;
  Teuchos::RCP<Epetra_MultiVector> overlapped_dgdp = workset.overlapped_dgdp;
  if (g != Teuchos::null)
     for (std::size_t res = 0; res < this->global_response.size(); res++) {
       (*g)[res] = this->global_response[res].val();
   }
  if (dgdp != Teuchos::null) {
    Epetra_Export exporter(overlapped_dgdp->Map(), dgdp->Map());
    dgdp->Export(*overlapped_dgdp, exporter, Add);
  }
#endif
}
Exemplo n.º 12
0
void PoissonSolver::initializeLHS(Teuchos::RCP<Epetra_Vector> & LHS) const
{
    const Epetra_BlockMap & Map = LHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();

    ST * values = LHS->Values();

    // size_t length = _bend.getLengthStraightSection() + 1;
    // size_t width = _bend.getWidthStraightSection() + 1;
    Vector_t position;
    _bunch.get_rmean(position);
    _bend.getPositionInCells(position);

    int lastX = (_bend.getGlobalDomain())[0].last();
    int lastY = (_bend.getGlobalDomain())[1].last();

    // NDIndex<DIM> initDom(Index(0,length), Index(lastY - width, lastY));

    NDIndex<DIM> elem;
    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const size_t idx = MyGlobalElements[lid] % _Nx;
        const size_t idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        Vector_t contr;
        if (idx > position(0)) {
            contr(0) = (idx - position(0)) / (lastX - position(0));
        } else {
            contr(0) = (position(0) - idx) / position(0);
        }
        if (idy > position(1)) {
            contr(1) = (idy - position(1)) / (lastY - position(1));
        } else {
            contr(1) = (position(1) - idy) / position(1);
        }

        const double val = sqrt(dot(contr, contr));
        dbg << std::min(10.0, -log(val)) << std::endl;
        values[lid] = -std::max(0.0, std::min(10.0,-log(val)));
    }
}
Exemplo n.º 13
0
Teuchos::RCP<const Epetra_MultiVector>
ReducedSpaceFactory::getProjector(const Teuchos::RCP<Teuchos::ParameterList> &params)
{
  const Teuchos::RCP<const Epetra_MultiVector> basis = this->getBasis(params);

  const Teuchos::RCP<const Epetra_Map> basisMap = mapDowncast(basis->Map());
  TEUCHOS_TEST_FOR_EXCEPT(Teuchos::is_null(basisMap));

  const Teuchos::RCP<const Epetra_Operator> collocationOperator =
    this->getSamplingOperator(params, *basisMap);

  Teuchos::RCP<const Epetra_MultiVector> result = basis;
  if (Teuchos::nonnull(collocationOperator)) {
    const Teuchos::RCP<Epetra_MultiVector> dualBasis(
        new Epetra_MultiVector(collocationOperator->OperatorRangeMap(), basis->NumVectors(), false));
    dualize(*basis, *collocationOperator, *dualBasis);
    result = dualBasis;
  }
  return result;
}
Exemplo n.º 14
0
void PoissonSolver::shiftLHS(SField_t & rho,
                             Teuchos::RCP<Epetra_Vector> & LHS,
                             double tau) const
{
    const Epetra_BlockMap & Map = LHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();

    NDIndex<DIM> elem;
    NDIndex<DIM> ldom = rho.getLayout().getLocalNDIndex();
    Index II = ldom[0], JJ = ldom[1];
    ST * values = LHS->Values();

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const size_t idx = MyGlobalElements[lid] % _Nx;
        const size_t idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        rho.localElement(elem) = values[lid];
    }
    rho.fillGuardCells();

    if (tau > 0) {
        if (tau > 1) tau = 1;
        rho[II][JJ] += tau * rho[II+1][JJ] - tau * rho[II][JJ];
    } else {
        tau = - tau;
        if (tau > 1) tau = 1;
        rho[II][JJ] += tau * rho[II-1][JJ] - tau * rho[II][JJ];
    }

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const int idx = MyGlobalElements[lid] % _Nx;
        const int idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        values[lid] = rho.localElement(elem);
    }
}
Exemplo n.º 15
0
void PoissonSolver::add(const SField_t & rho,
                        Teuchos::RCP<Epetra_Vector> & LHS) const
{
    const Epetra_BlockMap & Map = LHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();

    NDIndex<DIM> elem;
    NDIndex<DIM> ldom = rho.getLayout().getLocalNDIndex();
    Index II = ldom[0], JJ = ldom[1];
    ST * values = LHS->Values();

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const size_t idx = MyGlobalElements[lid] % _Nx;
        const size_t idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        values[lid] += rho.localElement(elem);
    }
}
Exemplo n.º 16
0
void PoissonSolver::initialGuessLHS(Teuchos::RCP<Epetra_Vector> & LHS) const
{
    const Epetra_BlockMap & Map = LHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();
    Vector_t spos;
    NDIndex<DIM> elem;
    Vector_t origin = _mesh.get_origin();
    double totalQ = -_bunch.get_qtotal() / (2 * Physics::pi * Physics::epsilon_0);
    totalQ /= 97.63;
    double *values = LHS->Values();

    _bunch.get_rmean(spos);
    spos[0] = (spos[0] - origin(0)) / _mesh.get_meshSpacing(0);
    spos[1] = (spos[1] - origin(1)) / _mesh.get_meshSpacing(1);

    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const double dx = _hr[0] * (spos[0] - MyGlobalElements[lid] % _Nx);
        const double dy = _hr[1] * (spos[1] - MyGlobalElements[lid] / _Nx);
        values[lid] = totalQ / sqrt(dx*dx + dy*dy + _hr[0]*_hr[1]);
    }
}
Exemplo n.º 17
0
void PoissonSolver::fillRHS(const SField_t & rho,
                            Teuchos::RCP<Epetra_Vector> & RHS)
{
    const ST couplingConstant = 1 / (Physics::epsilon_0 * _gamma);
    const Epetra_BlockMap & Map = RHS->Map();
    const int * MyGlobalElements = Map.MyGlobalElements();
    const int NumMyElements = Map.NumMyElements();

    NDIndex<DIM> elem;
    ST * values = RHS->Values();
    // double minv = 0.0;
    // double maxv = 0.0;
    for (int lid = 0; lid < NumMyElements; ++ lid) {
        const size_t idx = MyGlobalElements[lid] % _Nx;
        const size_t idy = MyGlobalElements[lid] / _Nx;
        elem[0] = Index(idx, idx);
        elem[1] = Index(idy, idy);

        values[lid] = rho.localElement(elem) * couplingConstant;
        // if (minv > values[lid]) minv = values[lid];
        // if (maxv < values[lid]) maxv = values[lid];
    }
//     dbg << "rhs_{min}: " << minv << " - rhs_{max}: " << maxv << endl;
}
Exemplo n.º 18
0
int main(int argc, char *argv[]) {
#if defined(HAVE_MUELU_EPETRA)
#include <MueLu_UseShortNames.hpp>

  using Teuchos::RCP; // reference count pointers
  using Teuchos::rcp;
  using Teuchos::TimeMonitor;

  // =========================================================================
  // MPI initialization using Teuchos
  // =========================================================================
  Teuchos::GlobalMPISession mpiSession(&argc, &argv, NULL);

  bool success = false;
  try {
    RCP< const Teuchos::Comm<int> > comm = Teuchos::DefaultComm<int>::getComm();
    int MyPID   = comm->getRank();
    int NumProc = comm->getSize();

    const Teuchos::RCP<Epetra_Comm> epComm = Teuchos::rcp_const_cast<Epetra_Comm>(Xpetra::toEpetra(comm));

    // =========================================================================
    // Convenient definitions
    // =========================================================================
    //SC zero = Teuchos::ScalarTraits<SC>::zero(), one = Teuchos::ScalarTraits<SC>::one();

    // Instead of checking each time for rank, create a rank 0 stream
    RCP<Teuchos::FancyOStream> fancy = Teuchos::fancyOStream(Teuchos::rcpFromRef(std::cout));
    Teuchos::FancyOStream& fancyout = *fancy;
    fancyout.setOutputToRootOnly(0);

    // =========================================================================
    // Parameters initialization
    // =========================================================================
    Teuchos::CommandLineProcessor clp(false);
    GO nx = 100, ny = 100;
    GO maxCoarseSize = 10;
    LO maxLevels = 4;
    clp.setOption("nx",                   &nx,              "mesh size in x direction");
    clp.setOption("ny",                   &ny,              "mesh size in y direction");
    clp.setOption("maxCoarseSize",        &maxCoarseSize,   "maximum coarse size");
    clp.setOption("maxLevels",            &maxLevels,       "maximum number of multigrid levels");
    int mgridSweeps = 1; clp.setOption("mgridSweeps", &mgridSweeps, "number of multigrid sweeps within Multigrid solver.");
    std::string printTimings = "no";   clp.setOption("timings", &printTimings,     "print timings to screen [yes/no]");
    double tol               = 1e-12;  clp.setOption("tol",                   &tol,              "solver convergence tolerance");

    switch (clp.parse(argc,argv)) {
      case Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED:        return EXIT_SUCCESS; break;
      case Teuchos::CommandLineProcessor::PARSE_ERROR:
      case Teuchos::CommandLineProcessor::PARSE_UNRECOGNIZED_OPTION: return EXIT_FAILURE; break;
      case Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL:                               break;
    }

    // =========================================================================
    // Problem construction
    // =========================================================================
    RCP<TimeMonitor> globalTimeMonitor = rcp(new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: S - Global Time"))), tm;

    comm->barrier();
    tm = rcp(new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: 1 - Matrix Build")));

    Teuchos::ParameterList GaleriList;
    GaleriList.set("nx", nx);
    GaleriList.set("ny", ny);
    GaleriList.set("mx", epComm->NumProc());
    GaleriList.set("my", 1);
    GaleriList.set("lx", 1.0); // length of x-axis
    GaleriList.set("ly", 1.0); // length of y-axis
    GaleriList.set("diff", 1e-5);
    GaleriList.set("conv", 1.0);

    // create map
    Teuchos::RCP<Epetra_Map> epMap = Teuchos::rcp(Galeri::CreateMap("Cartesian2D", *epComm, GaleriList));

    // create coordinates
    Teuchos::RCP<Epetra_MultiVector> epCoord = Teuchos::rcp(Galeri::CreateCartesianCoordinates("2D", epMap.get(), GaleriList));

    // create matrix
    Teuchos::RCP<Epetra_CrsMatrix> epA = Teuchos::rcp(Galeri::CreateCrsMatrix("Recirc2D", epMap.get(), GaleriList));

    // Epetra -> Xpetra
    Teuchos::RCP<CrsMatrix> exA = Teuchos::rcp(new Xpetra::EpetraCrsMatrixT<int,Node>(epA));
    Teuchos::RCP<CrsMatrixWrap> exAWrap = Teuchos::rcp(new CrsMatrixWrap(exA));

    RCP<Matrix> A = Teuchos::rcp_dynamic_cast<Matrix>(exAWrap);
    int numPDEs = 1;
    A->SetFixedBlockSize(numPDEs);

    // set rhs and solution vector
    RCP<Epetra_Vector> B = Teuchos::rcp(new Epetra_Vector(*epMap));
    RCP<Epetra_Vector> X = Teuchos::rcp(new Epetra_Vector(*epMap));
    B->PutScalar(1.0);
    X->PutScalar(0.0);

    // Epetra -> Xpetra
    RCP<Vector> xB = Teuchos::rcp(new Xpetra::EpetraVectorT<int,Node>(B));
    RCP<Vector> xX = Teuchos::rcp(new Xpetra::EpetraVectorT<int,Node>(X));

    xX->setSeed(100);
    xX->randomize();

    // build null space vector
    RCP<const Map> map = A->getRowMap();
    RCP<MultiVector> nullspace = MultiVectorFactory::Build(map, numPDEs);

    for (int i=0; i<numPDEs; ++i) {
      Teuchos::ArrayRCP<Scalar> nsValues = nullspace->getDataNonConst(i);
      int numBlocks = nsValues.size() / numPDEs;
      for (int j=0; j< numBlocks; ++j) {
        nsValues[j*numPDEs + i] = 1.0;
      }
    }

    comm->barrier();
    tm = Teuchos::null;

    fancyout << "========================================================\nGaleri complete.\n========================================================" << std::endl;

    // =========================================================================
    // Preconditioner construction
    // =========================================================================
    comm->barrier();
    tm = rcp(new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: 1.5 - MueLu read XML")));

    RCP<Hierarchy> H = rcp ( new Hierarchy() );
    H->setDefaultVerbLevel(Teuchos::VERB_HIGH);
    H->SetMaxCoarseSize(maxCoarseSize);

    // build finest Level
    RCP<MueLu::Level> Finest = H->GetLevel();
    Finest->setDefaultVerbLevel(Teuchos::VERB_HIGH);
    Finest->Set("A",A);
    Finest->Set("Nullspace",nullspace);

    // create factories for transfer operators
    RCP<TentativePFactory> PFact = Teuchos::rcp(new TentativePFactory());
    RCP<TransPFactory>     RFact = Teuchos::rcp(new TransPFactory());
    RFact->SetFactory("P", PFact);

    // build level smoothers
    // use symmetric Gauss-Seidel both for fine and coarse level smoother
    RCP<SmootherPrototype> smooProto;
    std::string ifpackType;
    Teuchos::ParameterList ifpackList;
    ifpackList.set("relaxation: sweeps", (LO) 1);
    ifpackList.set("relaxation: damping factor", (SC) 1.0);
    ifpackType = "RELAXATION";
    ifpackList.set("relaxation: type", "Symmetric Gauss-Seidel");

    smooProto = Teuchos::rcp( new TrilinosSmoother(ifpackType, ifpackList) );
    RCP<SmootherFactory> SmooFact;
    if (maxLevels > 1)
      SmooFact = rcp( new SmootherFactory(smooProto) );

    // design multigrid hierarchy
    FactoryManager M;
    M.SetFactory("P", PFact);
    M.SetFactory("R", RFact);
    M.SetFactory("Nullspace", PFact);
    M.SetFactory("Smoother", SmooFact);
    M.SetFactory("CoarseSolver", SmooFact);

    H->Setup(M, 0, maxLevels);

    comm->barrier();
    tm = Teuchos::null;

    // =========================================================================
    // System solution (Ax = b)
    // =========================================================================

    //
    // generate exact solution using a direct solver
    //
    RCP<Epetra_Vector> exactLsgVec = rcp(new Epetra_Vector(X->Map()));
    {
      fancyout << "========================================================\nCalculate exact solution." << std::endl;
      tm = rcp(new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: 3 - direct solve")));
      exactLsgVec->PutScalar(0.0);
      exactLsgVec->Update(1.0,*X,1.0);
      Epetra_LinearProblem epetraProblem(epA.get(), exactLsgVec.get(), B.get());

      Amesos amesosFactory;
      RCP<Amesos_BaseSolver> rcp_directSolver = Teuchos::rcp(amesosFactory.Create("Amesos_Klu", epetraProblem));
      rcp_directSolver->SymbolicFactorization();
      rcp_directSolver->NumericFactorization();
      rcp_directSolver->Solve();

      comm->barrier();
      tm = Teuchos::null;
    }

    //
    // Solve Ax = b using AMG as a preconditioner in AztecOO
    //

    RCP<Epetra_Vector> precLsgVec = rcp(new Epetra_Vector(X->Map()));
    {
      fancyout << "========================================================\nUse multigrid hierarchy as preconditioner within CG." << std::endl;
      tm = rcp(new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: 4 - AMG as preconditioner")));

      precLsgVec->PutScalar(0.0);
      precLsgVec->Update(1.0,*X,1.0);
      Epetra_LinearProblem epetraProblem(epA.get(), precLsgVec.get(), B.get());

      AztecOO aztecSolver(epetraProblem);
      aztecSolver.SetAztecOption(AZ_solver, AZ_gmres);

      MueLu::EpetraOperator aztecPrec(H);
      aztecSolver.SetPrecOperator(&aztecPrec);

      int maxIts = 100;
      //double tol2 = 1e-8;

      aztecSolver.Iterate(maxIts, tol);

      comm->barrier();
      tm = Teuchos::null;
    }

    //////////////////

    // use multigrid hierarchy as solver
    RCP<Vector> mgridLsgVec = VectorFactory::Build(map);
    mgridLsgVec->putScalar(0.0);
    {
      fancyout << "========================================================\nUse multigrid hierarchy as solver." << std::endl;
      tm = rcp (new TimeMonitor(*TimeMonitor::getNewTimer("ScalingTest: 5 - Multigrid Solve")));
      mgridLsgVec->update(1.0,*xX,1.0);
      H->IsPreconditioner(false);
      H->Iterate(*xB, *mgridLsgVec, mgridSweeps);
      comm->barrier();
      tm = Teuchos::null;
    }

    //////////////////

    fancyout << "========================================================\nExport results.\n========================================================" << std::endl;
    std::ofstream myfile;
    std::stringstream ss; ss << "example" << MyPID << ".txt";
    myfile.open (ss.str().c_str());

    //////////////////

    // loop over all procs
    for (int iproc=0; iproc < NumProc; iproc++) {
      if (MyPID==iproc) {
        int NumVectors1 = 2;
        int NumMyElements1 = epCoord->Map(). NumMyElements();
        int MaxElementSize1 = epCoord->Map().MaxElementSize();
        int * FirstPointInElementList1 = NULL;
        if (MaxElementSize1!=1) FirstPointInElementList1 = epCoord->Map().FirstPointInElementList();
        double ** A_Pointers = epCoord->Pointers();

        if (MyPID==0) {
          myfile.width(8);
          myfile <<  "#     MyPID"; myfile << "    ";
          myfile.width(12);
          if (MaxElementSize1==1)
            myfile <<  "GID  ";
          else
            myfile <<  "     GID/Point";
          for (int j = 0; j < NumVectors1 ; j++)
          {
            myfile.width(20);
            myfile <<  "Value  ";
          }
          myfile << std::endl;
        }
        for (int i=0; i < NumMyElements1; i++) {
          for (int ii=0; ii< epCoord->Map().ElementSize(i); ii++) {
            int iii;
            myfile.width(10);
            myfile <<  MyPID; myfile << "    ";
            myfile.width(10);
            if (MaxElementSize1==1) {
              if(epCoord->Map().GlobalIndicesInt())
              {
                int * MyGlobalElements1 = epCoord->Map().MyGlobalElements();
                myfile << MyGlobalElements1[i] << "    ";
              }

              iii = i;
            }
            else {
              if(epCoord->Map().GlobalIndicesInt())
              {

                int * MyGlobalElements1 = epCoord->Map().MyGlobalElements();
                myfile <<  MyGlobalElements1[i]<< "/" << ii << "    ";
              }

              iii = FirstPointInElementList1[i]+ii;
            }
            for (int j = 0; j < NumVectors1 ; j++)
            {
              myfile.width(20);
              myfile <<  A_Pointers[j][iii];
            }

            myfile.precision(18); // set high precision for output

            // add solution vector entry
            myfile.width(25); myfile << (*exactLsgVec)[iii];

            // add preconditioned solution vector entry
            myfile.width(25); myfile << (*precLsgVec)[iii];

            Teuchos::ArrayRCP<SC> mgridLsgVecData = mgridLsgVec->getDataNonConst(0);
            myfile.width(25); myfile << mgridLsgVecData[iii];


            myfile.precision(6); // set default precision
            myfile << std::endl;
          }
        } // end loop over all lines on current proc
        myfile << std::flush;

        // syncronize procs
        comm->barrier();
        comm->barrier();
        comm->barrier();

      } // end myProc
    }

    ////////////
    myfile.close();

    comm->barrier();
    tm = Teuchos::null;
    globalTimeMonitor = Teuchos::null;

    if (printTimings == "yes") {
      TimeMonitor::summarize(A->getRowMap()->getComm().ptr(), std::cout, false, true, false, Teuchos::Union, "", true);
    }

    success = true;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
#else
  return EXIT_SUCCESS;
#endif // #if defined(HAVE_MUELU_EPETRA) and defined(HAVE_MUELU_SERIAL)
} //main
Exemplo n.º 19
0
void InitialConditions(const Teuchos::RCP<Epetra_Vector>& soln,
                       const Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<int> > > >& wsElNodeEqID,
                       const Teuchos::ArrayRCP<std::string>& wsEBNames,
                       const Teuchos::ArrayRCP<Teuchos::ArrayRCP<Teuchos::ArrayRCP<double*> > > coords,
                       const int neq, const int numDim,
                       Teuchos::ParameterList& icParams, const bool hasRestartSolution) {
  // Called twice, with x and xdot. Different param lists are sent in.
  icParams.validateParameters(*AAdapt::getValidInitialConditionParameters(wsEBNames), 0);

  // Default function is Constant, unless a Restart solution vector
  // was used, in which case the Init COnd defaults to Restart.
  std::string name;

  if(!hasRestartSolution) name = icParams.get("Function", "Constant");

  else                     name = icParams.get("Function", "Restart");

  if(name == "Restart") return;

  // Handle element block specific constant data

  if(name == "EBPerturb" || name == "EBPerturbGaussian" || name == "EBConstant") {

    bool perturb_values = false;

    Teuchos::Array<double> defaultData(neq);
    Teuchos::Array<double> perturb_mag;

    // Only perturb if the user has told us by how much to perturb
    if(name != "EBConstant" && icParams.isParameter("Perturb IC")) {

      perturb_values = true;

      perturb_mag = icParams.get("Perturb IC", defaultData);

    }

    /* The element block-based IC specification here is currently a hack. It assumes the initial value is constant
     * within each element across the element block (or optionally perturbed somewhat element by element). The
     * proper way to do this would be to project the element integration point values to the nodes using the basis
     * functions and a consistent mass matrix.
     *
     * The current implementation uses a single integration point per element - this integration point value for this
     * element within the element block is specified in the input file (and optionally perturbed). An approximation
     * of the load vector is obtained by accumulating the resulting (possibly perturbed) value into the nodes. Then,
     * a lumped version of the mass matrix is inverted and used to solve for the approximate nodal point initial
     * conditions.
     */

    // Use an Epetra_Vector to hold the lumped mass matrix (has entries only on the diagonal). Zero-ed out.

    Epetra_Vector lumpedMM(soln->Map(), true);

    // Make sure soln is zeroed - we are accumulating into it

    for(int i = 0; i < soln->MyLength(); i++)

      (*soln)[i] = 0;

    // Loop over all worksets, elements, all local nodes: compute soln as a function of coord and wsEBName


    Teuchos::RCP<AAdapt::AnalyticFunction> initFunc;

    for(int ws = 0; ws < wsElNodeEqID.size(); ws++) { // loop over worksets

      Teuchos::Array<double> data = icParams.get(wsEBNames[ws], defaultData);
      // Call factory method from library of initial condition functions

      if(perturb_values) {

        if(name == "EBPerturb")

          initFunc = Teuchos::rcp(new AAdapt::ConstantFunctionPerturbed(neq, numDim, ws, data, perturb_mag));

        else // name == EBGaussianPerturb

          initFunc = Teuchos::rcp(new
                                  AAdapt::ConstantFunctionGaussianPerturbed(neq, numDim, ws, data, perturb_mag));
      }

      else

        initFunc = Teuchos::rcp(new AAdapt::ConstantFunction(neq, numDim, data));

      std::vector<double> X(neq);
      std::vector<double> x(neq);

      for(int el = 0; el < wsElNodeEqID[ws].size(); el++) { // loop over elements in workset

        for(int i = 0; i < neq; i++)
          X[i] = 0;

        for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) // loop over node local to the element
          for(int i = 0; i < neq; i++)
            X[i] += coords[ws][el][ln][i]; // nodal coords

        for(int i = 0; i < neq; i++)
          X[i] /= (double)neq;

        initFunc->compute(&x[0], &X[0]);

        for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) { // loop over node local to the element
          Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln]; // local node ids

          for(int i = 0; i < neq; i++) {

            (*soln)[lid[i]] += x[i];
            //             (*soln)[lid[i]] += X[i]; // Test with coord values
            lumpedMM[lid[i]] += 1.0;

          }

        }
      }
    }

    //  Apply the inverted lumped mass matrix to get the final nodal projection

    for(int i = 0; i < soln->MyLength(); i++)

      (*soln)[i] /= lumpedMM[i];

    return;

  }

  if(name == "Coordinates") {

    // Place the coordinate locations of the nodes into the solution vector for an initial guess

    int numDOFsPerDim = neq / numDim;

    for(int ws = 0; ws < wsElNodeEqID.size(); ws++) {
      for(int el = 0; el < wsElNodeEqID[ws].size(); el++) {
        for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) {

          const double* X = coords[ws][el][ln];
          Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln];

/*
numDim = 3; numDOFSsPerDim = 2 (coord soln, tgt soln)
X[0] = x;
X[1] = y;
X[2] = z;
lid[0] = DOF[0],eq[0] (x eqn)
lid[1] = DOF[0],eq[1] (y eqn)
lid[2] = DOF[0],eq[2] (z eqn)
lid[3] = DOF[1],eq[0] (x eqn)
lid[4] = DOF[1],eq[1] (y eqn)
lid[5] = DOF[1],eq[2] (z eqn)
*/

          for(int j = 0; j < numDOFsPerDim; j++)
            for(int i = 0; i < numDim; i++)
              (*soln)[lid[j * numDim + i]] = X[i];

        }
      }
    }

  }

  else {

    Teuchos::Array<double> defaultData(neq);
    Teuchos::Array<double> data = icParams.get("Function Data", defaultData);

    // Call factory method from library of initial condition functions
    Teuchos::RCP<AAdapt::AnalyticFunction> initFunc
      = createAnalyticFunction(name, neq, numDim, data);

    // Loop over all worksets, elements, all local nodes: compute soln as a function of coord
    std::vector<double> x(neq);

    for(int ws = 0; ws < wsElNodeEqID.size(); ws++) {
      for(int el = 0; el < wsElNodeEqID[ws].size(); el++) {
        for(int ln = 0; ln < wsElNodeEqID[ws][el].size(); ln++) {
          const double* X = coords[ws][el][ln];
          Teuchos::ArrayRCP<int> lid = wsElNodeEqID[ws][el][ln];

          for(int i = 0; i < neq; i++) x[i] = (*soln)[lid[i]];

          initFunc->compute(&x[0], X);

          for(int i = 0; i < neq; i++)(*soln)[lid[i]] = x[i];
        }
      }
    }

  }

}
  Teuchos::RCP<Vector<Real> > basis( const int i ) const {
    Teuchos::RCP<EpetraMultiVector> e = Teuchos::rcp( new EpetraMultiVector( Teuchos::rcp(new Epetra_MultiVector(epetra_vec_->Map(),epetra_vec_->NumVectors(),true)) ));
    const Epetra_BlockMap & domainMap = e->getVector()->Map();

    Epetra_Map linearMap(domainMap.NumGlobalElements(), domainMap.NumMyElements(), 0, domainMap.Comm());
    int lid = linearMap.LID(i);
    if(lid >=0)
      (*e->getVector())[0][lid]= 1.0;

    return e;

    /*


    // Build IntVector of GIDs on all processors.
    const Epetra_Comm & comm = domainMap.Comm();
    int numMyElements = domainMap.NumMyElements();
    Epetra_BlockMap allGidsMap(-1, numMyElements, 1, 0, comm);
    Epetra_IntVector allGids(allGidsMap);
    for (int j=0; j<numMyElements; j++) {allGids[j] = domainMap.GID(j);}

    // Import my GIDs into an all-inclusive map. 
    int numGlobalElements = domainMap.NumGlobalElements();
    Epetra_LocalMap allGidsOnRootMap(numGlobalElements, 0, comm);
    Epetra_Import importer(allGidsOnRootMap, allGidsMap);
    Epetra_IntVector allGidsOnRoot(allGidsOnRootMap);
    allGidsOnRoot.Import(allGids, importer, Insert);
    Epetra_Map rootDomainMap(-1, allGidsOnRoot.MyLength(), allGidsOnRoot.Values(), domainMap.IndexBase(), comm);

    for (int j = 0; j < this->dimension(); j++) {
      // Put 1's in slots
      int curGlobalCol = rootDomainMap.GID(i); // Should return same value on all processors
      if (domainMap.MyGID(curGlobalCol)){
        int curCol = domainMap.LID(curGlobalCol);
        (*e->getVector())[0][curCol]= 1.0;
      }
    }

    return e;

    */
  }
 /** \brief Clone to make a new (uninitialized) vector.
 */
 Teuchos::RCP<Vector<Real> > clone() const{
   return Teuchos::rcp(new EpetraMultiVector( 
 	     Teuchos::rcp(new Epetra_MultiVector(epetra_vec_->Map(),epetra_vec_->NumVectors(),false)) ));
 }
Exemplo n.º 22
0
/*----------------------------------------------------------------------*
 | solve problem (public)                                    mwgee 12/05|
 *----------------------------------------------------------------------*/
bool MOERTEL::Manager::Solve(Epetra_Vector& sol, const Epetra_Vector& rhs)
{
  // test for solver parameters
  if (solverparams_==Teuchos::null)
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** No solver parameters set, use SetSolverParameters(ParameterList& params)\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  // test for problemmap_
  if (problemmap_==Teuchos::null)
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** No problem map set, use SetProblemMap(Epetra_Map* map)\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  // test for inputmatrix
  if (inputmatrix_==Teuchos::null)
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** No inputmatrix set, use SetInputMatrix(Epetra_CrsMatrix* inputmatrix, bool DeepCopy = false)\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  // test whether problemmap_ matches RangeMap() of inputmatrix
  if (!problemmap_->PointSameAs(inputmatrix_->RangeMap()))
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** problem map does not match range map of input matrix\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  // test whether maps of rhs and sol are ok
  if (!problemmap_->PointSameAs(rhs.Map()) || 
      !problemmap_->PointSameAs(sol.Map()) )
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** problem map does not match map of rhs and/or sol\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }  
  
  // test whether interfaces are complete and have been integrated
  std::map<int,Teuchos::RCP<MOERTEL::Interface> >::iterator curr;
  for (curr=interface_.begin(); curr != interface_.end(); ++curr)
  {
    if (curr->second->IsComplete() == false)
    {
      cout << "***ERR*** MOERTEL::Manager::Solve:\n"
           << "***ERR*** interface " << curr->second->Id() << " is not IsComplete()\n"
           << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
      return false;
    }
    if (curr->second->IsIntegrated() == false)
    {
      cout << "***ERR*** MOERTEL::Manager::Solve:\n"
           << "***ERR*** interface " << curr->second->Id() << " is not integrated yet\n"
           << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
      return false;
    }
  }
  
  // test whether we have M and D matrix
  if (D_==Teuchos::null || M_==Teuchos::null)
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** Matrix M or D is NULL\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  //---------------------------------------------------------------------------
  // make solution and rhs vector matching the system
  Teuchos::RCP<Epetra_Vector> b = Teuchos::rcp(const_cast<Epetra_Vector*>(&rhs)); 
  b.release();
  Teuchos::RCP<Epetra_Vector> x = Teuchos::rcp(&sol); 
  x.release();

  //---------------------------------------------------------------------------
  // get type of system to be used/generated
  Teuchos::RCP<Epetra_CrsMatrix> matrix = Teuchos::null;
  string system = solverparams_->get("System","None");
  if (system=="None")
  {
    cout << "***WRN*** MOERTEL::Manager::Solve:\n"
         << "***WRN*** parameter 'System' was not chosen, using default\n"
         << "***WRN*** which is 'SaddleSystem'\n"
         << "***WRN*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    solverparams_->set("System","SaddleSystem");
    system = "SaddleSystem";
  }
  
  //---------------------------------------------------------------------------
  // build a saddle point system
  if (system=="SaddleSystem"  || system=="saddlesystem"  || system=="SADDLESYSTEM" || 
      system=="Saddle_System" || system=="saddle_system" || system=="SADDLE_SYSTEM")
  {
    if (saddlematrix_==Teuchos::null)
    {
      Epetra_CrsMatrix* tmp = MakeSaddleProblem();
      if (!tmp)
      {
        cout << "***ERR*** MOERTEL::Manager::Solve:\n"
             << "***ERR*** MakeSaddleProblem() returned NULL\n"
             << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
        return false;
      }
    }
    matrix = saddlematrix_;
    b = Teuchos::rcp(new Epetra_Vector(*saddlemap_,true)); 
    b.set_has_ownership();
    x = Teuchos::rcp(new Epetra_Vector(*saddlemap_,false)); 
    x.set_has_ownership();
    for (int i=0; i<rhs.MyLength(); ++i)
    {
      (*b)[i] = rhs[i];
      (*x)[i] = sol[i];
    }
  }

  //---------------------------------------------------------------------------
  // build a spd system
  else if (system=="SPDSystem"  || system=="spdsystem" || system=="spd_system" || 
           system=="SPD_System" || system=="SPDSYSTEM" || system=="SPD_SYSTEM")
  {
    if (spdmatrix_==Teuchos::null)
    {
      Epetra_CrsMatrix* tmp = MakeSPDProblem();
      if (!tmp)
      {
        cout << "***ERR*** MOERTEL::Manager::Solve:\n"
             << "***ERR*** MakeSPDProblem() returned NULL\n"
             << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
        return false;
      }
    }
    matrix = spdmatrix_;
    // we have to multiply the rhs vector b with spdrhs_ to fit with spdmatrix_
    if (spdrhs_==Teuchos::null)
    {
      cout << "***ERR*** MOERTEL::Manager::Solve:\n"
           << "***ERR*** Cannot build righthandside for spd problem\n"
           << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
      return false;
    }
    Epetra_Vector *tmp = new Epetra_Vector(b->Map(),false);
    int err = spdrhs_->Multiply(false,*b,*tmp);
    if (err)
    {
      cout << "***ERR*** MOERTEL::Manager::Solve:\n"
           << "***ERR*** spdrhs_->Multiply returned err = " << err << endl
           << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
      return false;
    }
    b = Teuchos::rcp(tmp);
    b.set_has_ownership();
  }

  //---------------------------------------------------------------------------
  // unknown parameter "System"
  else
  {
    cout << "***ERR*** MOERTEL::Manager::Solve:\n"
         << "***ERR*** Unknown type of parameter 'System': " << system << "\n"
         << "***ERR*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
    return false;
  }
  
  //---------------------------------------------------------------------------
  // create a mortar solver class instance
  if (solver_==Teuchos::null)
    solver_ = Teuchos::rcp(new MOERTEL::Solver(Comm(),OutLevel()));
  
  //---------------------------------------------------------------------------
  // solve
  bool ok = solver_->Solve(solverparams_,matrix,x,b,*this);
  if (!ok)
  {
    if (Comm().MyPID()==0)
    cout << "***WRN*** MOERTEL::Manager::Solve:\n"
         << "***WRN*** MOERTEL::Solver::Solve returned an error\n"
         << "***WRN*** file/line: " << __FILE__ << "/" << __LINE__ << "\n";
  }
  
  //---------------------------------------------------------------------------
  // copy solution back to sol if neccessary
  if (x.has_ownership())
  {
    for (int i=0; i<sol.MyLength(); ++i)
      sol[i] = (*x)[i];
  }

  return ok;
}
Exemplo n.º 23
0
bool ContinuationManager::
BuildLOCAPeriodicStepper(const Teuchos::RCP<EpetraExt::MultiComm> globalComm)
{

  if (comm->MyPID()==0) std::cout << std::endl << "Building the LOCA stepper..." << std::endl;

  // Make sure the problem has been set
  TEUCHOS_TEST_FOR_EXCEPTION( problem == Teuchos::null, 
      std::logic_error,
      "ContinuationManager has not been given a valid ProblemLOCAPrototype");

  // Create the Epetra_RowMatrix for the Jacobian/Preconditioner
  Teuchos::RCP <Epetra_RowMatrix> jacobian = problem->GetJacF();

  // Get the initial guess vector
  Teuchos::RCP <Epetra_Vector> initialGuess = 
    problem->GetInitialGuess();

  // NOX and LOCA sublist
  Teuchos::RCP <Teuchos::ParameterList> noxAndLocaList =
    Teuchos::rcp(new Teuchos::ParameterList(taskList->sublist("NOX and LOCA")));

  // Create the lists needed for linear systems
  Teuchos::ParameterList & noxPrintingList = 
    noxAndLocaList->
      sublist("NOX").
        sublist("Printing");
  Teuchos::ParameterList & linearSystemList = 
    noxAndLocaList->
      sublist("NOX").
        sublist("Direction").
          sublist("Newton").
            sublist("Linear Solver");

  // Create the interface between the test problem and the nonlinear solver
  // This is created by the user using inheritance of the abstract base 
  // class
  Teuchos::RCP <LOCAInterface> interface = 
    Teuchos::rcp(new LOCAInterface(problem,Teuchos::rcp(&*this,false)));

  Epetra_MultiVector guessMV(initialGuess->Map(), globalComm->NumTimeStepsOnDomain());
  for (int i=0; i<globalComm->NumTimeStepsOnDomain(); i++) *(guessMV(i)) = *initialGuess;


std::cout << "XXX  num time steps on domain = " <<  globalComm->NumTimeStepsOnDomain() << std::endl;

  double dt = 1.0;
  Teuchos::RCP <LOCA::Epetra::Interface::xyzt> xyzt_interface = 
   Teuchos::rcp(new LOCA::Epetra::Interface::xyzt(interface, guessMV, jacobian, globalComm, *initialGuess, dt ));


  Teuchos::RCP <LOCA::Epetra::Interface::Required> interfaceRequired = xyzt_interface;
  Teuchos::RCP <NOX::Epetra::Interface::Jacobian> interfaceJacobian = xyzt_interface;

  Teuchos::RCP<Epetra_RowMatrix> Axyzt =
     Teuchos::rcp(&(xyzt_interface->getJacobian()),false);
  Epetra_Vector& solnxyzt = xyzt_interface->getSolution();


  // Create the linear system
  Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linearSystem = 
    Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(noxPrintingList, 
          linearSystemList,
          interfaceRequired,
          interfaceJacobian,
          Axyzt,
          solnxyzt));

  // Get the parameter vector
  LOCA::ParameterVector continuableParams = problem->GetContinuableParams();

  // Create Epetra factory
  Teuchos::RCP <LOCA::Abstract::Factory> epetraFactory =
    Teuchos::rcp(new LOCA::Epetra::Factory);

  // Create the loca vector
  Teuchos::RCP <NOX::Epetra::Vector> locaInitialGuess = 
    Teuchos::rcp (new NOX::Epetra::Vector(solnxyzt));

  // Instantiate the constraint objects
  //if (isConstrainedProblem) 
  if (interfaceConstraint != Teuchos::null) {

//    // Instantiate the constraint
//    Teuchos::RCP <PhaseConstraint> phaseConstraint = 
//      Teuchos::rcp(new PhaseConstraint(problem,*locaInitialGuess));
//
//    // Instantiate the interface
//    Teuchos::RCP <LOCA::MultiContinuation::ConstraintInterface> 
//      interfaceConstraint = phaseConstraint;

    // Instantiate the constraint parameters names
    Teuchos::RCP< std::vector<std::string> > constraintParamsNames = 
      Teuchos::rcp(new std::vector<std::string>());

    // The user-defined constrained parameters
    Teuchos::ParameterList & constraintParams = 
      taskList->sublist("Continuation Manager").
                   sublist("Constraint").
		    sublist("Constraint Parameters");

    // Getting the parameter names from the user-defined list
    Teuchos::ParameterList::ConstIterator i;
    for (i = constraintParams.begin(); i !=constraintParams.end(); ++i) 
      constraintParamsNames->push_back(
	  constraintParams.get<std::string>(constraintParams.name(i)));

    // Instantiating the constraint list
    Teuchos::ParameterList & constraintsList =
      noxAndLocaList->sublist("LOCA").sublist("Constraints");
    constraintsList.set("Constraint Object", interfaceConstraint);
    constraintsList.set("Constraint Parameter Names", 
	constraintParamsNames);
    constraintsList.set("Bordered Solver Method", "Householder");

  }

  // Create the global data object
  locaGlobalData = LOCA::createGlobalData(noxAndLocaList, epetraFactory);

  // Create the Group
  Teuchos::RCP<LOCA::Epetra::Group> group = 
    Teuchos::rcp(new LOCA::Epetra::Group(locaGlobalData, noxPrintingList, 
      interfaceRequired, *locaInitialGuess, linearSystem, continuableParams));

  // Create the Solver convergence test
  Teuchos::RCP<NOX::StatusTest::NormF> normTolerance = 
    Teuchos::rcp(new NOX::StatusTest::NormF(
	  taskList->
	    sublist("Continuation Manager").
	      sublist("Continuation").
		get<double>("Nonlinear Step Tolerance"),
	  NOX::StatusTest::NormF::Scaled));
  Teuchos::RCP<NOX::StatusTest::MaxIters> maxIterations = 
    Teuchos::rcp(new NOX::StatusTest::MaxIters(
	  noxAndLocaList->
	    sublist("LOCA").
	      sublist("Stepper").
	        get<int>("Max Nonlinear Iterations")));
  Teuchos::RCP<NOX::StatusTest::Combo> combo = 
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
  combo->addStatusTest(normTolerance);
  combo->addStatusTest(maxIterations);

  // Create the stepper
  locaStepper = Teuchos::rcp (new LOCA::Stepper(locaGlobalData, group, combo, noxAndLocaList));

  // Performing some checks
  ValidateLOCAStepper();

  return true;
}
Exemplo n.º 24
0
int main(int argc, char *argv[])
{

  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else
    NumGlobalElements = 101;

  bool success = false;
  try {
    // The number of unknowns must be at least equal to the
    // number of processors.
    if (NumGlobalElements < NumProc) {
      std::cout << "numGlobalBlocks = " << NumGlobalElements
        << " cannot be < number of processors = " << NumProc << std::endl;
      std::cout << "Test failed!" << std::endl;
      throw "NOX Error";
    }

    // Create the interface between NOX and the application
    // This object is derived from NOX::Epetra::Interface
    Teuchos::RCP<Interface> interface =
      Teuchos::rcp(new Interface(NumGlobalElements, Comm));

    // Get the vector from the Problem
    Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
    Teuchos::RCP<NOX::Epetra::Vector> noxSoln =
      Teuchos::rcp(new NOX::Epetra::Vector(soln,
            NOX::Epetra::Vector::CreateView));

    // Set the PDE factor (for nonlinear forcing term).  This could be specified
    // via user input.
    interface->setPDEfactor(1000.0);

    // Set the initial guess
    soln->PutScalar(1.0);

    // Begin Nonlinear Solver ************************************

    // Create the top level parameter list
    Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
      Teuchos::rcp(new Teuchos::ParameterList);
    Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

    // Set the nonlinear solver method
    nlParams.set("Nonlinear Solver", "Line Search Based");

    // Set the printing parameters in the "Printing" sublist
    Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
    printParams.set("MyPID", MyPID);
    printParams.set("Output Precision", 3);
    printParams.set("Output Processor", 0);
    if (verbose)
      printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::TestDetails +
          NOX::Utils::Error);
    else
      printParams.set("Output Information", NOX::Utils::Error +
          NOX::Utils::TestDetails);

    // Create a print class for controlling output below
    NOX::Utils printing(printParams);

    // Sublist for line search
    Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
    searchParams.set("Method", "Full Step");

    // Sublist for direction
    Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
    dirParams.set("Method", "Newton");
    Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Constant");
    //newtonParams.set("Forcing Term Method", "Type 1");

    // Sublist for linear solver for the Newton method
    Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
    lsParams.set("Aztec Solver", "GMRES");
    lsParams.set("Max Iterations", 800);
    lsParams.set("Tolerance", 1e-4);

    // Various Preconditioner options
    lsParams.set("Preconditioner", "None");
    //lsParams.set("Preconditioner", "AztecOO");
    //lsParams.set("Preconditioner", "New Ifpack");
    lsParams.set("Preconditioner Reuse Policy", "Reuse");
    //lsParams.set("Preconditioner Reuse Policy", "Recompute");
    //lsParams.set("Preconditioner Reuse Policy", "Rebuild");
    lsParams.set("Max Age Of Prec", 5);

    // Add a user defined pre/post operator object
    Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo =
      Teuchos::rcp(new UserPrePostOperator(printing));
    nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator",
        ppo);

    // Let's force all status tests to do a full check
    nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete");

    // Create all possible Epetra_Operators.
    // 1. User supplied (Epetra_RowMatrix)
    Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();
    // 2. Matrix-Free (Epetra_Operator)
    Teuchos::RCP<NOX::Epetra::MatrixFree> MF =
      Teuchos::rcp(new NOX::Epetra::MatrixFree(printParams, interface,
            *noxSoln));
    // 3. Finite Difference (Epetra_RowMatrix)
    Teuchos::RCP<NOX::Epetra::FiniteDifference> FD =
      Teuchos::rcp(new NOX::Epetra::FiniteDifference(printParams, interface,
            *soln));

    // Create the linear system
    Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
    Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = MF;
    Teuchos::RCP<NOX::Epetra::Interface::Preconditioner> iPrec = FD;
    Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys =
      Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
            //interface,
            iJac, MF,
            iPrec, FD,
            *soln));

    // Create the Group
    NOX::Epetra::Vector initialGuess(soln, NOX::Epetra::Vector::CreateView);
    Teuchos::RCP<NOX::Epetra::Group> grpPtr =
      Teuchos::rcp(new NOX::Epetra::Group(printParams,
            iReq,
            initialGuess,
            linSys));
    NOX::Epetra::Group& grp = *grpPtr;

    // For LeanMatrixFree, disable linear resid checking
    grp.disableLinearResidualComputation(true);

    // uncomment the following for loca supergroups
    //MF->setGroupForComputeF(*grpPtr);
    //FD->setGroupForComputeF(*grpPtr);

    // Create the convergence tests
    Teuchos::RCP<NOX::StatusTest::NormF> absresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
    Teuchos::RCP<NOX::StatusTest::NormF> relresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
    Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
      Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
    Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
      Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
    Teuchos::RCP<NOX::StatusTest::Combo> converged =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
    converged->addStatusTest(absresid);
    converged->addStatusTest(relresid);
    converged->addStatusTest(wrms);
    converged->addStatusTest(update);
    Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters =
      Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
    Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
      Teuchos::rcp(new NOX::StatusTest::FiniteValue);
    Teuchos::RCP<NOX::StatusTest::Combo> combo =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
    combo->addStatusTest(fv);
    combo->addStatusTest(converged);
    combo->addStatusTest(maxiters);

    // Create the solver
    Teuchos::RCP<NOX::Solver::Generic> solver =
      NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);

    // For LeanMatrixFree, get unperturbed F from solver
    MF->setSolverForComputeJacobian(solver);

    NOX::StatusTest::StatusType solvStatus = solver->solve();

    // End Nonlinear Solver **************************************

    // Get the Epetra_Vector with the final solution from the solver
    const NOX::Epetra::Group& finalGroup =
      dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
    const Epetra_Vector& finalSolution =
      (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).
      getEpetraVector();

    // Output the parameter list
    if (verbose) {
      if (printing.isPrintType(NOX::Utils::Parameters)) {
        printing.out() << std::endl << "Final Parameters" << std::endl
          << "****************" << std::endl;
        solver->getList().print(printing.out());
        printing.out() << std::endl;
      }
    }

    // Print solution
    char file_name[25];
    FILE *ifp;
    int NumMyElements = soln->Map().NumMyElements();
    (void) sprintf(file_name, "output.%d",MyPID);
    ifp = fopen(file_name, "w");
    for (int i=0; i<NumMyElements; i++)
      fprintf(ifp, "%d  %E\n", soln->Map().MinMyGID()+i, finalSolution[i]);
    fclose(ifp);


    // Tests
    int status = 0; // Converged

    // 1. Convergence
    if (solvStatus != NOX::StatusTest::Converged) {
      status = 1;
      if (printing.isPrintType(NOX::Utils::Error))
        printing.out() << "Nonlinear solver failed to converge!" << std::endl;
    }
#ifndef HAVE_MPI
    // 2. Linear solve iterations (53) - SERIAL TEST ONLY!
    //    The number of linear iterations changes with # of procs.
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 659) {
      status = 2;
    }
#endif
    // 3. Nonlinear solve iterations (10)
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 10)
      status = 3;
    // 4. Test the pre/post iterate options
    {
      UserPrePostOperator & ppo2 =
        dynamic_cast<UserPrePostOperator&>(*ppo.get());
      if (ppo2.getNumRunPreIterate() != 10)
        status = 4;
      if (ppo2.getNumRunPostIterate() != 10)
        status = 4;
      if (ppo2.getNumRunPreSolve() != 1)
        status = 4;
      if (ppo2.getNumRunPostSolve() != 1)
        status = 4;
    }

    success = status==0;
    // Summarize test results
    if (success)
      printing.out() << "Test passed!" << std::endl;
    else
      printing.out() << "Test failed!" << std::endl;

    printing.out() << "Status = " << status << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}
int main(int argc, char *argv[])
{
 
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else 
    NumGlobalElements = 101;

  // The number of unknowns must be at least equal to the 
  // number of processors.
  if (NumGlobalElements < NumProc) {
    std::cout << "numGlobalBlocks = " << NumGlobalElements 
	 << " cannot be < number of processors = " << NumProc << std::endl;
    std::cout << "Test failed!" << std::endl;
    throw "NOX Error";
  }

  // Create the interface between NOX and the application
  // This object is derived from NOX::Epetra::Interface
  Teuchos::RCP<Interface> interface = 
    Teuchos::rcp(new Interface(NumGlobalElements, Comm));

  // Set the PDE factor (for nonlinear forcing term).  This could be specified
  // via user input.
  interface->setPDEfactor(100000.0);

  // Use a scaled vector space.  The scaling must also be registered
  // with the linear solver so the linear system is consistent!
  Teuchos::RCP<Epetra_Vector> scaleVec = 
    Teuchos::rcp(new Epetra_Vector( *(interface->getSolution())));
  scaleVec->PutScalar(2.0);
  Teuchos::RCP<NOX::Epetra::Scaling> scaling = 
    Teuchos::rcp(new NOX::Epetra::Scaling);
  scaling->addUserScaling(NOX::Epetra::Scaling::Left, scaleVec);

  // Use a weighted vector space for scaling all norms
  Teuchos::RCP<NOX::Epetra::VectorSpace> weightedVectorSpace = 
    Teuchos::rcp(new NOX::Epetra::VectorSpaceScaledL2(scaling));

  // Get the vector from the Problem
  Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
  Teuchos::RCP<NOX::Epetra::Vector> noxSoln = 
    Teuchos::rcp(new NOX::Epetra::Vector(soln, 
					 NOX::Epetra::Vector::CreateCopy,
					 NOX::DeepCopy,
					 weightedVectorSpace));

  // Initial Guess 
  noxSoln->init(2.0);

  // Begin Nonlinear Solver ************************************

  // Create the top level parameter list
  Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
    Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

  // Set the nonlinear solver method
  nlParams.set("Nonlinear Solver", "Inexact Trust Region Based");
  nlParams.sublist("Trust Region").
    set("Inner Iteration Method", "Inexact Trust Region");

  // Set the printing parameters in the "Printing" sublist
  Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
  
  // RPP: Commenting this line out.  There is now a default for MPI
  // specific builds.  We are testing that it works here.
  // //printParams.set("MyPID", MyPID);

  printParams.set("Output Precision", 3);
  printParams.set("Output Processor", 0);
  if (verbose)
    printParams.set("Output Information", 
			     NOX::Utils::OuterIteration + 
			     NOX::Utils::OuterIterationStatusTest + 
			     NOX::Utils::InnerIteration +
			     NOX::Utils::LinearSolverDetails +
			     NOX::Utils::Parameters + 
			     NOX::Utils::Details + 
			     NOX::Utils::Warning +
                             NOX::Utils::Debug +
			     NOX::Utils::TestDetails +
			     NOX::Utils::Error);
  else
    printParams.set("Output Information", NOX::Utils::Error +
			     NOX::Utils::TestDetails);

  // Create a print class for controlling output below
  NOX::Utils printing(printParams);

  // Sublist for direction
  Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
  dirParams.set("Method", "Newton");
  Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Type 1");

  // Sublist for linear solver for the Newton method
  Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
  lsParams.set("Aztec Solver", "GMRES");  
  lsParams.set("Max Iterations", 800);  
  lsParams.set("Tolerance", 1e-4);

  // Various Preconditioner options
  //lsParams.set("Preconditioner", "AztecOO");
  lsParams.set("Preconditioner", "Ifpack");
  lsParams.set("Preconditioner Reuse Policy", "Rebuild");

  // Sublist for Cauchy direction
  Teuchos::ParameterList& cauchyDirParams = nlParams.sublist("Cauchy Direction");
  cauchyDirParams.set("Method", "Steepest Descent");
  Teuchos::ParameterList& sdParams = cauchyDirParams.sublist("Steepest Descent");
  sdParams.set("Scaling Type", "Quadratic Model Min");

  // Add a user defined pre/post operator object
  Teuchos::RCP<NOX::Abstract::PrePostOperator> ppo =
    Teuchos::rcp(new UserPrePostOperator(printing));
  nlParams.sublist("Solver Options").set("User Defined Pre/Post Operator", 
					 ppo);

  // Let's force all status tests to do a full check
  nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete");

  // User supplied (Epetra_RowMatrix)
  Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();

  // Create the linear system
  Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
  Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
  Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys = 
    Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
						      iReq,
						      iJac, Analytic, 
						      *noxSoln,
						      scaling));
  
  // Create the Group
  Teuchos::RCP<NOX::Epetra::Group> grpPtr = 
    Teuchos::rcp(new NOX::Epetra::Group(printParams, 
					iReq, 
					*noxSoln, 
					linSys));  
  NOX::Epetra::Group& grp = *grpPtr;

  // uncomment the following for loca supergroups
  //MF->setGroupForComputeF(*grpPtr);
  //FD->setGroupForComputeF(*grpPtr);

  // Create the convergence tests
  Teuchos::RCP<NOX::StatusTest::NormF> absresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
  Teuchos::RCP<NOX::StatusTest::NormF> relresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
  Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
    Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
  Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
    Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
  Teuchos::RCP<NOX::StatusTest::Combo> converged =
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
  converged->addStatusTest(absresid);
  converged->addStatusTest(relresid);
  converged->addStatusTest(wrms);
  converged->addStatusTest(update);
  Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = 
    Teuchos::rcp(new NOX::StatusTest::MaxIters(200));
  Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
    Teuchos::rcp(new NOX::StatusTest::FiniteValue);
  Teuchos::RCP<NOX::StatusTest::Combo> combo = 
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
  combo->addStatusTest(fv);
  combo->addStatusTest(converged);
  combo->addStatusTest(maxiters);

  // Create the solver
  Teuchos::RCP<NOX::Solver::Generic> solver = 
    NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);
  NOX::StatusTest::StatusType solvStatus = solver->solve();

  // End Nonlinear Solver **************************************

  // Get the Epetra_Vector with the final solution from the solver
  const NOX::Epetra::Group& finalGroup = 
    dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
  const Epetra_Vector& finalSolution = 
    (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).
    getEpetraVector();

  // Output the parameter list
  if (verbose) {
    if (printing.isPrintType(NOX::Utils::Parameters)) {
      printing.out() << std::endl << "Final Parameters" << std::endl
	   << "****************" << std::endl;
      solver->getList().print(printing.out());
      printing.out() << std::endl;
    }
  }

  // Print solution
  char file_name[25];
  FILE *ifp;
  int NumMyElements = soln->Map().NumMyElements();
  (void) sprintf(file_name, "output.%d",MyPID);
  ifp = fopen(file_name, "w");
  for (int i=0; i<NumMyElements; i++)
    fprintf(ifp, "%d  %E\n", soln->Map().MinMyGID()+i, finalSolution[i]);
  fclose(ifp);


  // Tests
  int status = 0; // Converged
  
  // 1. Convergence
  if (solvStatus != NOX::StatusTest::Converged) {
      status = 1;
      if (printing.isPrintType(NOX::Utils::Error))
	printing.out() << "Nonlinear solver failed to converge!" << std::endl;
  }
#ifndef HAVE_MPI 
  // 2. Linear solve iterations (14) - SERIAL TEST ONLY!
  //    The number of linear iterations changes with # of procs.
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 14) {
    status = 2;
  }
#endif
  // 3. Nonlinear solve iterations (17)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 17)
    status = 3;
  // 4. Test the pre/post iterate options
  {
  UserPrePostOperator* ppoPtr = dynamic_cast<UserPrePostOperator*>(ppo.get());
  if (ppoPtr->getNumRunPreIterate() != 17)
    status = 4;
  if (ppoPtr->getNumRunPostIterate() != 17)
    status = 4;
  if (ppoPtr->getNumRunPreSolve() != 1)
    status = 4;
  if (ppoPtr->getNumRunPostSolve() != 1)
    status = 4;
  }
  // 5. Number of Cauchy steps (3)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Trust Region").sublist("Output").get("Number of Cauchy Steps", 0) != 3)
    status = 5;
  // 6. Number of Newton steps (14)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Trust Region").sublist("Output").get("Number of Newton Steps", 0) != 14)
    status = 6;

  // Summarize test results 
  if (status == 0)
    printing.out() << "Test passed!" << std::endl;
  else 
    printing.out() << "Test failed!" << std::endl;
  
#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // Final return value (0 = successfull, non-zero = failure)
  return status;
}
Exemplo n.º 26
0
  void IncSVDPOD::Initialize( const Teuchos::RCP< Teuchos::ParameterList >& params,
                              const Teuchos::RCP< const Epetra_MultiVector >& ss,
                              const Teuchos::RCP< RBGen::FileIOHandler< Epetra_Operator > >& fileio ) {

    using Teuchos::rcp;

    // Get the "Reduced Basis Method" sublist.
    Teuchos::ParameterList rbmethod_params = params->sublist( "Reduced Basis Method" );

    // Get the maximum basis size
    maxBasisSize_ = rbmethod_params.get<int>("Max Basis Size");
    TEUCHOS_TEST_FOR_EXCEPTION(maxBasisSize_ < 2,std::invalid_argument,"\"Max Basis Size\" must be at least 2.");

    // Get a filter
    filter_ = rbmethod_params.get<Teuchos::RCP<Filter<double> > >("Filter",Teuchos::null);
    if (filter_ == Teuchos::null) {
      int k = rbmethod_params.get("Rank",(int)5);
      filter_ = rcp( new RangeFilter<double>(LARGEST,k,k) );
    }

    // Get convergence tolerance
    tol_ = rbmethod_params.get<double>("Convergence Tolerance",tol_);

    // Get debugging flag
    debug_ = rbmethod_params.get<bool>("IncSVD Debug",debug_);

    // Get verbosity level
    verbLevel_ = rbmethod_params.get<int>("IncSVD Verbosity Level",verbLevel_);

    // Get an Anasazi orthomanager
    if (rbmethod_params.isType<
          Teuchos::RCP< Anasazi::OrthoManager<double,Epetra_MultiVector> > 
        >("Ortho Manager")
       ) 
    {
      ortho_ = rbmethod_params.get< 
                Teuchos::RCP<Anasazi::OrthoManager<double,Epetra_MultiVector> >
               >("Ortho Manager");
      TEUCHOS_TEST_FOR_EXCEPTION(ortho_ == Teuchos::null,std::invalid_argument,"User specified null ortho manager.");
    }
    else {
      std::string omstr = rbmethod_params.get("Ortho Manager","DGKS");
      if (omstr == "SVQB") {
        ortho_ = rcp( new Anasazi::SVQBOrthoManager<double,Epetra_MultiVector,Epetra_Operator>() );
      }
      else { // if omstr == "DGKS"
        ortho_ = rcp( new Anasazi::BasicOrthoManager<double,Epetra_MultiVector,Epetra_Operator>() );
      }
    }

    // Lmin,Lmax,Kstart
    lmin_ = rbmethod_params.get("Min Update Size",1);
    TEUCHOS_TEST_FOR_EXCEPTION(lmin_ < 1 || lmin_ >= maxBasisSize_,std::invalid_argument,
                       "Method requires 1 <= min update size < max basis size.");
    lmax_ = rbmethod_params.get("Max Update Size",maxBasisSize_);
    TEUCHOS_TEST_FOR_EXCEPTION(lmin_ > lmax_,std::invalid_argument,"Max update size must be >= min update size.");

    startRank_ = rbmethod_params.get("Start Rank",lmin_);
    TEUCHOS_TEST_FOR_EXCEPTION(startRank_ < 1 || startRank_ > maxBasisSize_,std::invalid_argument,
                       "Starting rank must be in [1,maxBasisSize_)");
    // MaxNumPasses
    maxNumPasses_ = rbmethod_params.get("Maximum Number Passes",maxNumPasses_);
    TEUCHOS_TEST_FOR_EXCEPTION(maxNumPasses_ != -1 && maxNumPasses_ <= 0, std::invalid_argument,
                       "Maximum number passes must be -1 or > 0.");
    // Save the pointer to the snapshot matrix
    TEUCHOS_TEST_FOR_EXCEPTION(ss == Teuchos::null,std::invalid_argument,"Input snapshot matrix cannot be null.");
    A_ = ss;

    // MaxNumCols
    maxNumCols_ = A_->NumVectors();
    maxNumCols_ = rbmethod_params.get("Maximum Number Columns",maxNumCols_);
    TEUCHOS_TEST_FOR_EXCEPTION(maxNumCols_ < A_->NumVectors(), std::invalid_argument,
                       "Maximum number of columns must be at least as many as in the initializing data set.");

    // V locally replicated or globally distributed
    // this must be true for now
    // Vlocal_ = rbmethod_params.get("V Local",Vlocal_);

    // Allocate space for the factorization
    sigma_.reserve( maxBasisSize_ );
    U_ = Teuchos::null;
    V_ = Teuchos::null;
    U_ = rcp( new Epetra_MultiVector(ss->Map(),maxBasisSize_,false) );
    if (Vlocal_) {
      Epetra_LocalMap lclmap(maxNumCols_,0,ss->Comm());
      V_ = rcp( new Epetra_MultiVector(lclmap,maxBasisSize_,false) );
    }
    else {
      Epetra_Map gblmap(maxNumCols_,0,ss->Comm());
      V_ = rcp( new Epetra_MultiVector(gblmap,maxBasisSize_,false) );
    }
    B_ = rcp( new Epetra_SerialDenseMatrix(maxBasisSize_,maxBasisSize_) );
    resNorms_.reserve(maxBasisSize_);

    // clear counters
    numProc_ = 0;
    curNumPasses_ = 0;

    // we are now initialized, albeit with null rank
    isInitialized_ = true;
  }
Exemplo n.º 27
0
// ------------------------------------------------------------------------
// ---------------------------   Main Program -----------------------------
// ------------------------------------------------------------------------
int main(int argc, char *argv[])
{
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  bool verbose = false;
  // Check for verbose output
  if (argc>1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else
    NumGlobalElements = 101;

  bool success = false;
  try {
    // The number of unknowns must be at least equal to the
    // number of processors.
    if (NumGlobalElements < NumProc) {
      std::cout << "numGlobalBlocks = " << NumGlobalElements
        << " cannot be < number of processors = " << NumProc << std::endl;
      throw "NOX Error";
    }

    // Create the interface between NOX and the application
    // This object is derived from NOX::Epetra::Interface
    Teuchos::RCP<TransientInterface> interface =
      Teuchos::rcp(new TransientInterface(NumGlobalElements, Comm, -20.0, 20.0));
    double dt = 0.10;
    interface->setdt(dt);

    // Set the PDE nonlinear coefficient for this problem
    interface->setPDEfactor(1.0);

    // Get the vector from the Problem
    Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
    NOX::Epetra::Vector noxSoln(soln, NOX::Epetra::Vector::CreateView);

    // Begin Nonlinear Solver ************************************

    // Create the top level parameter list
    Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
      Teuchos::rcp(new Teuchos::ParameterList);
    Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

    // Set the nonlinear solver method
    nlParams.set("Nonlinear Solver", "Line Search Based");

    // Set the printing parameters in the "Printing" sublist
    Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
    printParams.set("MyPID", MyPID);
    printParams.set("Output Precision", 3);
    printParams.set("Output Processor", 0);
    if (verbose)
      printParams.set("Output Information",
          NOX::Utils::OuterIteration +
          NOX::Utils::OuterIterationStatusTest +
          NOX::Utils::InnerIteration +
          NOX::Utils::LinearSolverDetails +
          NOX::Utils::Parameters +
          NOX::Utils::Details +
          NOX::Utils::Warning +
          NOX::Utils::Debug +
          NOX::Utils::Error);
    else
      printParams.set("Output Information", NOX::Utils::Error);

    // Create a print class for controlling output below
    NOX::Utils utils(printParams);

    // Sublist for line search
    Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
    searchParams.set("Method", "Full Step");

    // Sublist for direction
    Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
    dirParams.set("Method", "Newton");
    Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Constant");

    // Sublist for linear solver for the Newton method
    Teuchos::ParameterList& lsParams = newtonParams.sublist("Linear Solver");
    lsParams.set("Aztec Solver", "GMRES");
    lsParams.set("Max Iterations", 800);
    lsParams.set("Tolerance", 1e-4);
    lsParams.set("Preconditioner", "AztecOO");

    // Create all possible Epetra_Operators.
    // 1. User supplied (Epetra_RowMatrix)
    Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();
    // 2. Matrix-Free (Epetra_Operator)

    // Four constructors to create the Linear System
    Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
    Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
    Teuchos::RCP<NOX::Epetra::LinearSystemAztecOO> linSys =
      Teuchos::rcp(new NOX::Epetra::LinearSystemAztecOO(printParams, lsParams,
            iReq, iJac, Analytic,
            noxSoln));

    // Create the Group
    Teuchos::RCP<NOX::Epetra::Group> grpPtr =
      Teuchos::rcp(new NOX::Epetra::Group(printParams,
            iReq,
            noxSoln,
            linSys));
    NOX::Epetra::Group& grp = *(grpPtr.get());

    // Create the convergence tests
    Teuchos::RCP<NOX::StatusTest::NormF> absresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
    Teuchos::RCP<NOX::StatusTest::NormF> relresid =
      Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
    Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
      Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
    Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
      Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
    Teuchos::RCP<NOX::StatusTest::Combo> converged =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
    converged->addStatusTest(absresid);
    converged->addStatusTest(relresid);
    converged->addStatusTest(wrms);
    converged->addStatusTest(update);
    Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters =
      Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
    Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
      Teuchos::rcp(new NOX::StatusTest::FiniteValue);
    Teuchos::RCP<NOX::StatusTest::Combo> combo =
      Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
    combo->addStatusTest(fv);
    combo->addStatusTest(converged);
    combo->addStatusTest(maxiters);

    // Initialize time integration parameters
    int maxTimeSteps = 10;
    int timeStep = 0;
    double time = 0.0;

#ifdef PRINT_RESULTS_TO_FILES
    // Print initial solution
    char file_name[25];
    FILE *ifp;
    int NumMyElements = soln.Map().NumMyElements();
    (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep);
    ifp = fopen(file_name, "w");
    for (int i=0; i<NumMyElements; i++)
      fprintf(ifp, "%d  %E  %E\n", soln.Map().MinMyGID()+i,
          interface->getMesh()[i], soln[i]);
    fclose(ifp);
#endif

    // Create the solver
    Teuchos::RCP<NOX::Solver::Generic> solver =
      NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);

    // Overall status flag
    int ierr = 0;

    // Time integration loop
    while(timeStep < maxTimeSteps) {
      timeStep++;
      time += dt;

      utils.out() << "Time Step: " << timeStep << ",\tTime: " << time << std::endl;

      NOX::StatusTest::StatusType status = solver->solve();

      // Check for convergence
      if (status != NOX::StatusTest::Converged) {
        ierr++;
        if (utils.isPrintType(NOX::Utils::Error))
          utils.out() << "Nonlinear solver failed to converge!" << std::endl;
      }


      // Get the Epetra_Vector with the final solution from the solver
      const NOX::Epetra::Group& finalGroup = dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
      const Epetra_Vector& finalSolution = (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).getEpetraVector();
      //Epetra_Vector& exactSolution = interface->getExactSoln(time);


      // End Nonlinear Solver **************************************

#ifdef PRINT_RESULTS_TO_FILES
      // Print solution
      (void) sprintf(file_name, "output.%d_%d",MyPID,timeStep);
      ifp = fopen(file_name, "w");
      for (int i=0; i<NumMyElements; i++)
        fprintf(ifp, "%d  %E  %E  %E\n", soln.Map().MinMyGID()+i,
            interface->getMesh()[i], finalSolution[i],exactSolution[i]);
      fclose(ifp);
#endif

      interface->reset(finalSolution);
      grp.setX(finalSolution);
      solver->reset(grp.getX(), combo);
      grp.computeF();

    } // end time step while loop

    // Output the parameter list
    if (utils.isPrintType(NOX::Utils::Parameters)) {
      utils.out() << std::endl << "Final Parameters" << std::endl
        << "****************" << std::endl;
      solver->getList().print(utils.out());
      utils.out() << std::endl;
    }

    // Test for convergence

#ifndef HAVE_MPI
    // 1. Linear solve iterations on final time step (30)- SERIAL TEST ONLY!
    //    The number of linear iterations changes with # of procs.
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Direction").sublist("Newton").sublist("Linear Solver").sublist("Output").get("Total Number of Linear Iterations",0) != 30) {
      ierr = 1;
    }
#endif
    // 2. Nonlinear solve iterations on final time step (3)
    if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 3)
      ierr = 2;

    success = ierr==0;
    // Summarize test results
    if (success)
      utils.out() << "Test passed!" << std::endl;
    else
      utils.out() << "Test failed!" << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(verbose, std::cerr, success);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return ( success ? EXIT_SUCCESS : EXIT_FAILURE );
}
QUICKGRID::Data PeridigmNS::TextFileDiscretization::getDecomp(const string& textFileName,
                                                              const Teuchos::RCP<Teuchos::ParameterList>& params) {

  // Read data from the text file
  vector<double> coordinates;
  vector<double> volumes;
  vector<int> blockIds;

  // Read the text file on the root processor
  if(myPID == 0){
    ifstream inFile(textFileName.c_str());
    TEUCHOS_TEST_FOR_EXCEPT_MSG(!inFile.is_open(), "**** Error opening discretization text file.\n");
    while(inFile.good()){
      string str;
      getline(inFile, str);
      boost::trim(str);
      // Ignore comment lines, otherwise parse
      if( !(str[0] == '#' || str[0] == '/' || str[0] == '*' || str.size() == 0) ){
        istringstream iss(str);
        vector<double> data;
        copy(istream_iterator<double>(iss),
             istream_iterator<double>(),
             back_inserter<vector<double> >(data));
        // Check for obvious problems with the data
        if(data.size() != 5){
          string msg = "\n**** Error parsing text file, invalid line: " + str + "\n";
          TEUCHOS_TEST_FOR_EXCEPT_MSG(data.size() != 5, msg);
        }
        // Store the coordinates, block id, and volumes
        coordinates.push_back(data[0]);
        coordinates.push_back(data[1]);
        coordinates.push_back(data[2]);
        blockIds.push_back(static_cast<int>(data[3]));
        volumes.push_back(data[4]);
      }
    }
    inFile.close();
  }

  int numElements = static_cast<int>(blockIds.size());
  TEUCHOS_TEST_FOR_EXCEPT_MSG(myPID == 0 && numElements < 1, "**** Error reading discretization text file, no data found.\n");

  // Record the block ids on the root processor
  set<int> uniqueBlockIds;
  if(myPID == 0){
    for(unsigned int i=0 ; i<blockIds.size() ; ++i)
      uniqueBlockIds.insert(blockIds[i]);
  }

  // Broadcast necessary data from root processor
  Teuchos::RCP<const Teuchos::Comm<int> > teuchosComm = Teuchos::createMpiComm<int>(Teuchos::opaqueWrapper<MPI_Comm>(MPI_COMM_WORLD));
  int numGlobalElements;
  reduceAll(*teuchosComm, Teuchos::REDUCE_SUM, 1, &numElements, &numGlobalElements);

  // Broadcast the unique block ids so that all processors are aware of the full block list
  // This is necessary because if a processor does not have any elements for a given block, it will be unaware the
  // given block exists, which causes problems downstream
  int numLocalUniqueBlockIds = static_cast<int>( uniqueBlockIds.size() );
  int numGlobalUniqueBlockIds;
  reduceAll(*teuchosComm, Teuchos::REDUCE_SUM, 1, &numLocalUniqueBlockIds, &numGlobalUniqueBlockIds);
  vector<int> uniqueLocalBlockIds(numGlobalUniqueBlockIds, 0);
  int index = 0;
  for(set<int>::const_iterator it = uniqueBlockIds.begin() ; it != uniqueBlockIds.end() ; it++)
    uniqueLocalBlockIds[index++] = *it;
  vector<int> uniqueGlobalBlockIds(numGlobalUniqueBlockIds);  
  reduceAll(*teuchosComm, Teuchos::REDUCE_SUM, numGlobalUniqueBlockIds, &uniqueLocalBlockIds[0], &uniqueGlobalBlockIds[0]);

  // Create list of global ids
  vector<int> globalIds(numElements);
  for(unsigned int i=0 ; i<globalIds.size() ; ++i)
    globalIds[i] = i;

  // Copy data into a decomp object
  int dimension = 3;
  QUICKGRID::Data decomp = QUICKGRID::allocatePdGridData(numElements, dimension);
  decomp.globalNumPoints = numGlobalElements;
  memcpy(decomp.myGlobalIDs.get(), &globalIds[0], numElements*sizeof(int)); 
  memcpy(decomp.cellVolume.get(), &volumes[0], numElements*sizeof(double)); 
  memcpy(decomp.myX.get(), &coordinates[0], 3*numElements*sizeof(double));

  // Create a blockID vector in the current configuration
  // That is, the configuration prior to load balancing
  Epetra_BlockMap tempOneDimensionalMap(decomp.globalNumPoints,
                                        decomp.numPoints,
                                        decomp.myGlobalIDs.get(),
                                        1,
                                        0,
                                        *comm);
  Epetra_Vector tempBlockID(tempOneDimensionalMap);
  double* tempBlockIDPtr;
  tempBlockID.ExtractView(&tempBlockIDPtr);
  for(unsigned int i=0 ; i<blockIds.size() ; ++i)
    tempBlockIDPtr[i] = blockIds[i];

  // call the rebalance function on the current-configuration decomp
  decomp = PDNEIGH::getLoadBalancedDiscretization(decomp);

  // create a (throw-away) one-dimensional owned map in the rebalanced configuration
  Epetra_BlockMap rebalancedMap(decomp.globalNumPoints, decomp.numPoints, decomp.myGlobalIDs.get(), 1, 0, *comm);

  // Create a (throw-away) blockID vector corresponding to the load balanced decomposition
  Epetra_Vector rebalancedBlockID(rebalancedMap);
  Epetra_Import rebalancedImporter(rebalancedBlockID.Map(), tempBlockID.Map());
  rebalancedBlockID.Import(tempBlockID, rebalancedImporter, Insert);

  // Initialize the element list for each block
  // Force blocks with no on-processor elements to have an entry in the elementBlocks map
  for(unsigned int i=0 ; i<uniqueGlobalBlockIds.size() ; i++){
    stringstream blockName;
    blockName << "block_" << uniqueGlobalBlockIds[i];
    (*elementBlocks)[blockName.str()] = std::vector<int>();
  }

  // Create the element list for each block
  for(int i=0 ; i<rebalancedBlockID.MyLength() ; ++i){
    stringstream blockName;
    blockName << "block_" << rebalancedBlockID[i];
    TEUCHOS_TEST_FOR_EXCEPT_MSG(elementBlocks->find(blockName.str()) == elementBlocks->end(),
                                "\n**** Error in TextFileDiscretization::getDecomp(), invalid block id.\n");
    int globalID = rebalancedBlockID.Map().GID(i);
    (*elementBlocks)[blockName.str()].push_back(globalID);
  }

  // Record the horizon for each point
  PeridigmNS::HorizonManager& horizonManager = PeridigmNS::HorizonManager::self();
  Teuchos::RCP<Epetra_Vector> rebalancedHorizonForEachPoint = Teuchos::rcp(new Epetra_Vector(rebalancedMap));
  double* rebalancedX = decomp.myX.get();
  for(map<string, vector<int> >::const_iterator it = elementBlocks->begin() ; it != elementBlocks->end() ; it++){
    const string& blockName = it->first;
    const vector<int>& globalIds = it->second;

    bool hasConstantHorizon = horizonManager.blockHasConstantHorizon(blockName);
    double constantHorizonValue(0.0);
    if(hasConstantHorizon)
      constantHorizonValue = horizonManager.getBlockConstantHorizonValue(blockName);

    for(unsigned int i=0 ; i<globalIds.size() ; ++i){
      int localId = rebalancedMap.LID(globalIds[i]);
      if(hasConstantHorizon){
        (*rebalancedHorizonForEachPoint)[localId] = constantHorizonValue;
      }
      else{
        double x = rebalancedX[localId*3];
        double y = rebalancedX[localId*3 + 1];
        double z = rebalancedX[localId*3 + 2];
        double horizon = horizonManager.evaluateHorizon(blockName, x, y, z);
        (*rebalancedHorizonForEachPoint)[localId] = horizon;
      }
    }
  }

  // execute neighbor search and update the decomp to include resulting ghosts
  std::tr1::shared_ptr<const Epetra_Comm> commSp(comm.getRawPtr(), NonDeleter<const Epetra_Comm>());
  Teuchos::RCP<PDNEIGH::NeighborhoodList> list;
  TEUCHOS_TEST_FOR_EXCEPT_MSG(bondFilters.size() > 1, "\n****Error:  Multiple bond filters currently unsupported.\n");

  if(bondFilters.size() == 0)
    list = Teuchos::rcp(new PDNEIGH::NeighborhoodList(commSp,decomp.zoltanPtr.get(),decomp.numPoints,decomp.myGlobalIDs,decomp.myX,rebalancedHorizonForEachPoint));
  else if(bondFilters.size() == 1)
    list = Teuchos::rcp(new PDNEIGH::NeighborhoodList(commSp,decomp.zoltanPtr.get(),decomp.numPoints,decomp.myGlobalIDs,decomp.myX,rebalancedHorizonForEachPoint,bondFilters[0]));
  decomp.neighborhood=list->get_neighborhood();
  decomp.sizeNeighborhoodList=list->get_size_neighborhood_list();
  decomp.neighborhoodPtr=list->get_neighborhood_ptr();

  // Create all the maps.
  createMaps(decomp);

  // Create the blockID vector corresponding to the load balanced decomposition
  blockID = Teuchos::rcp(new Epetra_Vector(*oneDimensionalMap));
  Epetra_Import tempImporter(blockID->Map(), tempBlockID.Map());
  blockID->Import(tempBlockID, tempImporter, Insert);

  // Create the horizonForEachPonit vector corresponding to the load balanced decomposition
  horizonForEachPoint = Teuchos::rcp(new Epetra_Vector(*oneDimensionalMap));
  Epetra_Import horizonImporter(horizonForEachPoint->Map(), rebalancedHorizonForEachPoint->Map());
  horizonForEachPoint->Import(*rebalancedHorizonForEachPoint, horizonImporter, Insert);

  return decomp;
}
Exemplo n.º 29
0
int main(int argc, char** argv) {

  int fail = 0, dim=0;

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD, &localProc);
  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
  const Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  const Epetra_SerialComm Comm;
#endif

  // =============================================================
  // get command line options
  // =============================================================

  Teuchos::CommandLineProcessor clp(false,true);

  std::string *inputFile = new std::string("simple.coords");
  bool verbose = false;

  clp.setOption( "f", inputFile, "Name of coordinate input file");

  clp.setOption( "v", "q", &verbose,
		"Display coordinates and weights before and after partitioning.");

  Teuchos::CommandLineProcessor::EParseCommandLineReturn parse_return =
    clp.parse(argc,argv);

  if( parse_return == Teuchos::CommandLineProcessor::PARSE_HELP_PRINTED){
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
  }
  if( parse_return != Teuchos::CommandLineProcessor::PARSE_SUCCESSFUL ) {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 1;
  }

  // =============================================================
  // Open file of coordinates and distribute them across processes
  // so they are unbalanced.
  // =============================================================

  Epetra_MultiVector *mv = ispatest::file2multivector(Comm, *inputFile);

  if (!mv || ((dim = mv->NumVectors()) < 1)){
    if (localProc == 0)
      std::cerr << "Invalid input file " << *inputFile << std::endl;
    exit(1);
  }

  if (localProc == 0){
    std::cerr << "Found input file " << *inputFile << ", " ;
    std::cerr << dim << " dimensional coordinates" << std::endl;
  }

  delete inputFile;

  int base = mv->Map().IndexBase();
  int globalSize = mv->GlobalLength();
  int myShare = 0;
  int n = numProcs - 1;

  if (n){
    if (localProc < n){
      int oneShare = globalSize / n;
      int leftOver = globalSize - (n * oneShare);
      myShare = oneShare + ((localProc < leftOver) ? 1 : 0);
    }
  }
  else{
    myShare = globalSize;
  }

  Epetra_BlockMap unbalancedMap(globalSize, myShare, 1, base, mv->Map().Comm());
  Epetra_Import importer(unbalancedMap, mv->Map());
  Epetra_MultiVector umv(unbalancedMap, dim);
  umv.Import(*mv, importer, Insert);

  delete mv;

  Teuchos::RCP<const Epetra_MultiVector> coords =
    Teuchos::rcp(new const Epetra_MultiVector(umv));

  // =============================================================
  // Create some different coordinate weight vectors
  // =============================================================

  Epetra_MultiVector *unitWgts = ispatest::makeWeights(coords->Map(), &ispatest::unitWeights);
  Epetra_MultiVector *veeWgts = ispatest::makeWeights(coords->Map(), &ispatest::veeWeights);
  Epetra_MultiVector *altWgts = ispatest::makeWeights(coords->Map(), &ispatest::alternateWeights);

  Teuchos::RCP<const Epetra_MultiVector> unit_weights_rcp = Teuchos::rcp(unitWgts);
  Teuchos::RCP<const Epetra_MultiVector> vee_weights_rcp = Teuchos::rcp(veeWgts);
  Teuchos::RCP<const Epetra_MultiVector> alt_weights_rcp = Teuchos::rcp(altWgts);

  if (localProc == 0){
    std::cerr << "Unit weights: Each object has weight 1.0" << std::endl;
    std::cerr << "V weights: Low and high GIDs have high weights, center GIDs have low weights" << std::endl;
    std::cerr << "Alternate weights: Objects on even rank processes have one weight, on odd another weight" << std::endl;
    std::cerr << std::endl;
  }

  // ======================================================================
  //  Create a parameter list for Zoltan, and one for internal partitioning
  // ======================================================================

  Teuchos::ParameterList internalParams;

  internalParams.set("PARTITIONING_METHOD", "SIMPLE_LINEAR");


  Teuchos::ParameterList zoltanParams;

  Teuchos::ParameterList sublist = zoltanParams.sublist("ZOLTAN");

  //sublist.set("DEBUG_LEVEL", "1"); // Zoltan will print out parameters
  //sublist.set("DEBUG_LEVEL", "5");   // proc 0 will trace Zoltan calls
  //sublist.set("DEBUG_MEMORY", "2");  // Zoltan will trace alloc & free

  // =============================================================
  // Run some tests
  // =============================================================
  zoltanParams.set("PARTITIONING METHOD", "RCB");

  if (localProc == 0){
    std::cerr << "RCB - unit weights" << std::endl;
  }

  fail = run_test(coords, unit_weights_rcp, zoltanParams);

  if (fail) goto failure;

  if (localProc == 0){
    std::cerr << "PASS" << std::endl << std::endl;
  }
  // *************************************************************

  if (localProc == 0){
    std::cerr << "HSFC - V weights" << std::endl;
  }


  zoltanParams.set("PARTITIONING METHOD", "HSFC");
  fail = run_test(coords, vee_weights_rcp, zoltanParams);

  if (fail) goto failure;

  if (localProc == 0){
    std::cerr << "PASS" << std::endl << std::endl;
  }

  // *************************************************************

  if (localProc == 0){
    std::cerr << "RIB - alternate weights" << std::endl;
  }
  zoltanParams.set("PARTITIONING METHOD", "RIB");

  fail = run_test(coords, alt_weights_rcp, zoltanParams);

  if (fail) goto failure;

  if (localProc == 0){
    std::cerr << "PASS" << std::endl << std::endl;
  }

  // *************************************************************

  if (localProc == 0){
    std::cerr << "RIB - no weights supplied" << std::endl;
  }
  zoltanParams.set("PARTITIONING METHOD", "RIB");

  fail = run_test(coords, zoltanParams);

  if (fail) goto failure;

  if (localProc == 0){
    std::cerr << "PASS" << std::endl << std::endl;
  }

  // *************************************************************

  goto done;


failure:

  if (localProc == 0){
    std::cerr << "FAIL: test failed" << std::endl;
  }

done:

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return fail;
}
Exemplo n.º 30
0
int main(int argc, char *argv[])
{
 
  // Initialize MPI
  Teuchos::GlobalMPISession mpiSession(&argc,&argv);

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  // Check verbosity level
  bool verbose = false;
  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the number of elements from the command line
  int NumGlobalElements = 0;
  if ((argc > 2) && (verbose))
    NumGlobalElements = atoi(argv[2]) + 1;
  else if ((argc > 1) && (!verbose))
    NumGlobalElements = atoi(argv[1]) + 1;
  else 
    NumGlobalElements = 101;

  // The number of unknowns must be at least equal to the 
  // number of processors.
  if (NumGlobalElements < NumProc) {
    cout << "numGlobalBlocks = " << NumGlobalElements 
	 << " cannot be < number of processors = " << NumProc << endl;
    cout << "Test failed!" << endl;
    throw "NOX Error";
  }

  // Create the interface between NOX and the application
  // This object is derived from NOX::Epetra::Interface
  Teuchos::RCP<Interface> interface = 
    Teuchos::rcp(new Interface(NumGlobalElements, Comm));

  // Get the vector from the Problem
  Teuchos::RCP<Epetra_Vector> soln = interface->getSolution();
  Teuchos::RCP<NOX::Epetra::Vector> noxSoln = 
    Teuchos::rcp(new NOX::Epetra::Vector(soln, 
					 NOX::Epetra::Vector::CreateView));

  // Set the PDE factor (for nonlinear forcing term).  This could be specified
  // via user input.
  interface->setPDEfactor(1000.0);

  // Set the initial guess 
  soln->PutScalar(1.0);

  // Begin Nonlinear Solver ************************************

  // Create the top level parameter list
  Teuchos::RCP<Teuchos::ParameterList> nlParamsPtr =
    Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList& nlParams = *(nlParamsPtr.get());

  // Set the nonlinear solver method
  nlParams.set("Nonlinear Solver", "Line Search Based");

  // Set the printing parameters in the "Printing" sublist
  Teuchos::ParameterList& printParams = nlParams.sublist("Printing");
  printParams.set("MyPID", MyPID); 
  printParams.set("Output Precision", 3);
  printParams.set("Output Processor", 0);
  if (verbose)
    printParams.set("Output Information", 
			     NOX::Utils::OuterIteration + 
			     NOX::Utils::OuterIterationStatusTest + 
			     NOX::Utils::InnerIteration +
			     NOX::Utils::LinearSolverDetails +
			     NOX::Utils::Parameters + 
			     NOX::Utils::Details + 
			     NOX::Utils::Warning +
                             NOX::Utils::Debug +
			     NOX::Utils::TestDetails +
			     NOX::Utils::Error);
  else
    printParams.set("Output Information", NOX::Utils::Error +
			     NOX::Utils::TestDetails);

  // Create a print class for controlling output below
  NOX::Utils printing(printParams);

  // Sublist for line search 
  Teuchos::ParameterList& searchParams = nlParams.sublist("Line Search");
  searchParams.set("Method", "Full Step");

  // Sublist for direction
  Teuchos::ParameterList& dirParams = nlParams.sublist("Direction");
  dirParams.set("Method", "Newton");
  Teuchos::ParameterList& newtonParams = dirParams.sublist("Newton");
    newtonParams.set("Forcing Term Method", "Constant");

  // Alternative linear solver list for Stratimikos
  Teuchos::ParameterList& stratLinSolParams = newtonParams.sublist("Stratimikos Linear Solver");
  Teuchos::ParameterList& noxStratParams = stratLinSolParams.sublist("NOX Stratimikos Options");
  Teuchos::ParameterList& stratParams = stratLinSolParams.sublist("Stratimikos");

  noxStratParams.set("Preconditioner Reuse Policy","Rebuild");

  stratParams.set("Linear Solver Type", "AztecOO");
  stratParams.set("Preconditioner Type", "ML");
  if (verbose) stratParams.sublist("Linear Solver Types")
                 .sublist("AztecOO").sublist("Forward Solve")
                 .sublist("AztecOO Settings").set("Output Frequency", 1);

  // Let's force all status tests to do a full check
  nlParams.sublist("Solver Options").set("Status Test Check Type", "Complete");

  // Create all possible Epetra_Operators.
  // 1. User supplied (Epetra_RowMatrix)
  Teuchos::RCP<Epetra_RowMatrix> Analytic = interface->getJacobian();
  // 2. Matrix-Free (Epetra_Operator)
  Teuchos::RCP<NOX::Epetra::MatrixFree> MF = 
    Teuchos::rcp(new NOX::Epetra::MatrixFree(printParams, interface, 
					     *noxSoln));
  // 3. Finite Difference (Epetra_RowMatrix)
  Teuchos::RCP<NOX::Epetra::FiniteDifference> FD = 
    Teuchos::rcp(new NOX::Epetra::FiniteDifference(printParams, interface, 
						   *soln));

  // Create the linear system
  Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;
  Teuchos::RCP<NOX::Epetra::Interface::Jacobian> iJac = interface;
  Teuchos::RCP<NOX::Epetra::Interface::Preconditioner> iPrec = FD;
  Teuchos::RCP<NOX::Epetra::LinearSystem> linSys = 
    Teuchos::rcp(new NOX::Epetra::LinearSystemStratimikos(printParams, stratLinSolParams,
                                                      iJac, Analytic, 
                                                      iPrec, FD, 
						      *soln, false));
  
  // Create the Group
  NOX::Epetra::Vector initialGuess(soln, NOX::Epetra::Vector::CreateView);
  Teuchos::RCP<NOX::Epetra::Group> grpPtr = 
    Teuchos::rcp(new NOX::Epetra::Group(printParams, 
					iReq, 
					initialGuess, 
					linSys));  
  NOX::Epetra::Group& grp = *grpPtr;

  // uncomment the following for loca supergroups
  //MF->setGroupForComputeF(*grpPtr);
  //FD->setGroupForComputeF(*grpPtr);

  // Create the convergence tests
  Teuchos::RCP<NOX::StatusTest::NormF> absresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(1.0e-8));
  Teuchos::RCP<NOX::StatusTest::NormF> relresid = 
    Teuchos::rcp(new NOX::StatusTest::NormF(grp, 1.0e-2));
  Teuchos::RCP<NOX::StatusTest::NormUpdate> update =
    Teuchos::rcp(new NOX::StatusTest::NormUpdate(1.0e-5));
  Teuchos::RCP<NOX::StatusTest::NormWRMS> wrms =
    Teuchos::rcp(new NOX::StatusTest::NormWRMS(1.0e-2, 1.0e-8));
  Teuchos::RCP<NOX::StatusTest::Combo> converged =
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::AND));
  converged->addStatusTest(absresid);
  converged->addStatusTest(relresid);
  converged->addStatusTest(wrms);
  converged->addStatusTest(update);
  Teuchos::RCP<NOX::StatusTest::MaxIters> maxiters = 
    Teuchos::rcp(new NOX::StatusTest::MaxIters(20));
  Teuchos::RCP<NOX::StatusTest::FiniteValue> fv =
    Teuchos::rcp(new NOX::StatusTest::FiniteValue);
  Teuchos::RCP<NOX::StatusTest::Combo> combo = 
    Teuchos::rcp(new NOX::StatusTest::Combo(NOX::StatusTest::Combo::OR));
  combo->addStatusTest(fv);
  combo->addStatusTest(converged);
  combo->addStatusTest(maxiters);

  // Create the solver
  Teuchos::RCP<NOX::Solver::Generic> solver = 
    NOX::Solver::buildSolver(grpPtr, combo, nlParamsPtr);
  NOX::StatusTest::StatusType solvStatus = solver->solve();

  // End Nonlinear Solver **************************************

  // Get the Epetra_Vector with the final solution from the solver
  const NOX::Epetra::Group& finalGroup = 
    dynamic_cast<const NOX::Epetra::Group&>(solver->getSolutionGroup());
  const Epetra_Vector& finalSolution = 
    (dynamic_cast<const NOX::Epetra::Vector&>(finalGroup.getX())).
    getEpetraVector();

  // Output the parameter list
  if (verbose) {
    if (printing.isPrintType(NOX::Utils::Parameters)) {
      printing.out() << endl << "Final Parameters" << endl
	   << "****************" << endl;
      solver->getList().print(printing.out());
      printing.out() << endl;
    }
  }

  // Print solution
  char file_name[25];
  FILE *ifp;
  int NumMyElements = soln->Map().NumMyElements();
  (void) sprintf(file_name, "output.%d",MyPID);
  ifp = fopen(file_name, "w");
  for (int i=0; i<NumMyElements; i++)
    fprintf(ifp, "%d  %E\n", soln->Map().MinMyGID()+i, finalSolution[i]);
  fclose(ifp);


  // Tests
  int status = 0; // Converged
  
  // 1. Convergence
  if (solvStatus != NOX::StatusTest::Converged) {
      status = 1;
      if (printing.isPrintType(NOX::Utils::Error))
	printing.out() << "Nonlinear solver failed to converge!" << endl;
  }

  // 3. Nonlinear solve iterations (10)
  if (const_cast<Teuchos::ParameterList&>(solver->getList()).sublist("Output").get("Nonlinear Iterations", 0) != 10)
    status = 3;

  // Summarize test results 
  if (status == 0)
    printing.out() << "Test passed!" << endl;
  else 
    printing.out() << "Test failed!" << endl;
  
  printing.out() << "Status = " << status << endl;

  // Final return value (0 = successfull, non-zero = failure)
  return status;
}