Example #1
0
Teuchos::RCP<const Thyra::VectorSpaceBase<double> >
Thyra::create_VectorSpace(
  const RCP<const Epetra_Map> &epetra_map
  )
{
#ifdef TEUCHOS_DEBUG
  TEUCHOS_TEST_FOR_EXCEPTION(
    !epetra_map.get(), std::invalid_argument,
    "create_VectorSpace::initialize(...): Error!" );
#endif // TEUCHOS_DEBUG
  RCP<const Teuchos::Comm<Ordinal> >
    comm = create_Comm(Teuchos::rcp(&epetra_map->Comm(),false)).assert_not_null();
  Teuchos::set_extra_data( epetra_map, "epetra_map", Teuchos::inOutArg(comm) );
  const Ordinal localSubDim = epetra_map->NumMyElements();
  RCP<DefaultSpmdVectorSpace<double> > vs =
    defaultSpmdVectorSpace<double>(
      comm, localSubDim, epetra_map->NumGlobalElements());
#ifndef TEUCHOS_DEBUG
  TEUCHOS_TEST_FOR_EXCEPTION(
    vs->dim() != epetra_map->NumGlobalElements(), std::logic_error
    ,"create_VectorSpace(...): Error, vs->dim() = "<<vs->dim()<<" != "
    "epetra_map->NumGlobalElements() = "<<epetra_map->NumGlobalElements()<<"!"
    );
#endif		
  Teuchos::set_extra_data( epetra_map, "epetra_map", Teuchos::inOutArg(vs) );
  return vs;
}
Example #2
0
Teuchos::RCP<const Teuchos::Comm<Thyra::Ordinal> >
Thyra::create_Comm( const RCP<const Epetra_Comm> &epetraComm )
{
  using Teuchos::rcp;
  using Teuchos::rcp_dynamic_cast;
  using Teuchos::set_extra_data;

  RCP<const Epetra_SerialComm>
    serialEpetraComm = rcp_dynamic_cast<const Epetra_SerialComm>(epetraComm);
  if( serialEpetraComm.get() ) {
    RCP<const Teuchos::SerialComm<Ordinal> >
      serialComm = rcp(new Teuchos::SerialComm<Ordinal>());
    set_extra_data( serialEpetraComm, "serialEpetraComm", Teuchos::inOutArg(serialComm) );
    return serialComm;
  }

#ifdef HAVE_MPI
  
  RCP<const Epetra_MpiComm>
    mpiEpetraComm = rcp_dynamic_cast<const Epetra_MpiComm>(epetraComm);
  if( mpiEpetraComm.get() ) {
    RCP<const Teuchos::OpaqueWrapper<MPI_Comm> >
      rawMpiComm = Teuchos::opaqueWrapper(mpiEpetraComm->Comm());
    set_extra_data( mpiEpetraComm, "mpiEpetraComm", Teuchos::inOutArg(rawMpiComm) );
    RCP<const Teuchos::MpiComm<Ordinal> >
      mpiComm = rcp(new Teuchos::MpiComm<Ordinal>(rawMpiComm));
    return mpiComm;
  }

#endif // HAVE_MPI
  
  // If you get here then the failed!
  return Teuchos::null;

}
const Epetra_Comm& EpetraOperator::Comm() const {
    RCP<Matrix> A = Hierarchy_->GetLevel(0)->Get<RCP<Matrix> >("A");

    //TODO: This code is not pretty
    RCP<Xpetra::BlockedCrsMatrix<double, int, int> > epbA = Teuchos::rcp_dynamic_cast<Xpetra::BlockedCrsMatrix<double, int, int> >(A);
    if (epbA != Teuchos::null) {
        RCP<const Xpetra::EpetraCrsMatrix> tmp_ECrsMtx = rcp_dynamic_cast<Xpetra::EpetraCrsMatrix >(epbA->getMatrix(0,0));
        if (tmp_ECrsMtx == Teuchos::null)
            throw Exceptions::BadCast("Cast from Xpetra::CrsMatrix to Xpetra::EpetraCrsMatrix failed");

        RCP<Epetra_CrsMatrix> epA = tmp_ECrsMtx->getEpetra_CrsMatrixNonConst();
        return epA->Comm();
    }

    RCP<Epetra_CrsMatrix> epA = Utils::Op2NonConstEpetraCrs(A);
    return epA->Comm();
}
Example #4
0
EpetraVectorSpace::EpetraVectorSpace(const RCP<const Epetra_Map>& m)
  : ScalarProdVectorSpaceBase<double>(),
    SpmdVectorSpaceBase<double>(),
    smallVecSpcFactory_(rcp(new DefaultSpmdVectorSpaceFactory<double>())),
    epetraMap_(m),
    comm_(epetraCommToTeuchosComm(m->Comm())),
    localSubDim_(epetraMap_->NumMyElements()),
    localOffset_(epetraMap_->MinMyGID())
{}
    Teuchos::RCP<const Teuchos::Comm<int> >
    extractTeuchosComm (const Teuchos::RCP<const Epetra_Comm>& epetraComm)
    {
      using Teuchos::RCP;
      using Teuchos::rcp;
      using Teuchos::rcp_dynamic_cast;
      using Teuchos::rcp_implicit_cast;

#ifdef EPETRA_MPI
      RCP<const Epetra_MpiComm> epetraMpiComm =
        rcp_dynamic_cast<const Epetra_MpiComm> (epetraComm, false);
      if (! epetraMpiComm.is_null ()) {
        // It's OK to extract and use the raw MPI_Comm object, since
        // an Epetra_MpiComm doesn't own (is not responsible for
        // calling MPI_Comm_free on) its underlying MPI_Comm object.
        MPI_Comm rawMpiComm = epetraMpiComm->Comm ();
        RCP<const Teuchos::MpiComm<int> > teuchosMpiComm =
          rcp (new Teuchos::MpiComm<int> (rawMpiComm));
        return rcp_implicit_cast<const Teuchos::Comm<int> > (teuchosMpiComm);
      }
#endif // EPETRA_MPI

      RCP<const Epetra_SerialComm> epetraSerialComm =
        rcp_dynamic_cast<const Epetra_SerialComm> (epetraComm, false);
      if (! epetraSerialComm.is_null ()) {
        RCP<const Teuchos::SerialComm<int> > teuchosSerialComm =
          rcp (new Teuchos::SerialComm<int> ());
        return rcp_implicit_cast<const Teuchos::Comm<int> > (teuchosSerialComm);
      }

      TEUCHOS_TEST_FOR_EXCEPTION(
        true, std::invalid_argument, "The input Epetra_Comm object is neither "
        "an Epetra_MpiComm nor an Epetra_SerialComm.  This means that we don't"
        " know how to convert it into a Teuchos::Comm<int> instance.  You are "
        "probably seeing this error message as a result of trying to invoke "
        "TSQR (the Tall Skinny QR factorization) on an Epetra_MultiVector, "
        "probably as a block orthogonalization method in either Anasazi or "
        "Belos.  TSQR currently only works on an Epetra_MultiVector if the "
        "latter's communicator is either an Epetra_MpiComm (wraps MPI) or an "
        "Epetra_SerialComm (\"stub\" communicator for a single process without "
        "MPI).");
    }
    /// If the input Epetra_Comm is really an Epetra_MpiComm, return its
    /// raw MPI_Comm MPI communicator object.  Otherwise, return
    /// MPI_COMM_NULL.  (The Epetra_Comm interface doesn't define sends
    /// and receives, which TSQR needs.  That's why TSQR wants the raw
    /// MPI_COMM object.)
    ///
    /// \return (The MPI_Comm object, and whether it's valid)
    static std::pair< MPI_Comm, bool >
    extractRawMpiComm (const Teuchos::RCP< const Epetra_Comm >& pComm)
    {
      using Teuchos::RCP;
      using Teuchos::rcp_dynamic_cast;

      MPI_Comm rawMpiComm = MPI_COMM_NULL;
      bool haveMpiComm = false;
      
      RCP< const Epetra_MpiComm > pMpiComm = 
        rcp_dynamic_cast< const Epetra_MpiComm > (pComm, false);
      if (pMpiComm.get() == NULL) 
        haveMpiComm = false;
      else
        {
          rawMpiComm = pMpiComm->Comm();
          haveMpiComm = true;
        }
      return std::make_pair (rawMpiComm, haveMpiComm);
    }
Example #7
0
int main(int argc, char *argv[]) {

  int status=0; // 0 = pass, failures are incremented
  bool success = true;

#ifdef ALBANY_DEBUG
  Teuchos::GlobalMPISession mpiSession(&argc, &argv);
#else // bypass printing process startup info
  Teuchos::GlobalMPISession mpiSession(&argc, &argv, NULL);
#endif

#ifdef ENABLE_CHECK_FPE
   // Catch FPEs
   _mm_setcsr(_MM_MASK_MASK &~
		(_MM_MASK_OVERFLOW | _MM_MASK_INVALID | _MM_MASK_DIV_ZERO) );
#endif

  using Teuchos::RCP;
  using Teuchos::rcp;

  RCP<Teuchos::FancyOStream> out(Teuchos::VerboseObjectBase::getDefaultOStream());

  // Command-line argument for input file
  std::string xmlfilename;
  if(argc > 1){

    if(!strcmp(argv[1],"--help")){
      printf("albany [inputfile.xml]\n");
      exit(1);
    }
    else
      xmlfilename = argv[1];

  }
  else
    xmlfilename = "input.xml";

  try {
    RCP<Teuchos::Time> totalTime =
      Teuchos::TimeMonitor::getNewTimer("Albany: ***Total Time***");

    RCP<Teuchos::Time> setupTime =
      Teuchos::TimeMonitor::getNewTimer("Albany: Setup Time");
    Teuchos::TimeMonitor totalTimer(*totalTime); //start timer
    Teuchos::TimeMonitor setupTimer(*setupTime); //start timer

    Albany::SolverFactory slvrfctry(xmlfilename, Albany_MPI_COMM_WORLD);
    RCP<Epetra_Comm> appComm = Albany::createEpetraCommFromMpiComm(Albany_MPI_COMM_WORLD);
    RCP<Albany::Application> app;
    const RCP<Thyra::ModelEvaluator<double> > solver =
      slvrfctry.createThyraSolverAndGetAlbanyApp(app, appComm, appComm);

    setupTimer.~TimeMonitor();

    Teuchos::ParameterList &solveParams =
      slvrfctry.getAnalysisParameters().sublist("Solve", /*mustAlreadyExist =*/ false);
    // By default, request the sensitivities if not explicitly disabled
    solveParams.get("Compute Sensitivities", true);

    Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > thyraResponses;
    Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > thyraSensitivities;

       // The PoissonSchrodinger_SchroPo and PoissonSchroMosCap1D tests seg fault as albanyApp is null -
       // For now, do not resize the response vectors. FIXME sort out this issue.
    if(Teuchos::nonnull(app))
      Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities, app->getAdaptSolMgr()->getSolObserver());
    else
      Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities);

    Teuchos::Array<Teuchos::RCP<const Epetra_Vector> > responses;
    Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > > sensitivities;
    epetraFromThyra(appComm, thyraResponses, thyraSensitivities, responses, sensitivities);

    const int num_p = solver->Np(); // Number of *vectors* of parameters
    const int num_g = solver->Ng(); // Number of *vectors* of responses

    *out << "Finished eval of first model: Params, Responses "
      << std::setprecision(12) << std::endl;

    const Thyra::ModelEvaluatorBase::InArgs<double> nominal = solver->getNominalValues();
    for (int i=0; i<num_p; i++) {
      const Teuchos::RCP<const Epetra_Vector> p_init = epetraVectorFromThyra(appComm, nominal.get_p(i));
      p_init->Print(*out << "\nParameter vector " << i << ":\n");
    }

    for (int i=0; i<num_g-1; i++) {
      const RCP<const Epetra_Vector> g = responses[i];
      bool is_scalar = true;

      if (app != Teuchos::null)
        is_scalar = app->getResponse(i)->isScalarResponse();

      if (is_scalar) {
        g->Print(*out << "\nResponse vector " << i << ":\n");

        if (num_p == 0) {
          // Just calculate regression data
          status += slvrfctry.checkSolveTestResults(i, 0, g.get(), NULL);
        } else {
          for (int j=0; j<num_p; j++) {
            const RCP<const Epetra_MultiVector> dgdp = sensitivities[i][j];
            if (Teuchos::nonnull(dgdp)) {
              dgdp->Print(*out << "\nSensitivities (" << i << "," << j << "):!\n");
            }
            status += slvrfctry.checkSolveTestResults(i, j, g.get(), dgdp.get());
          }
        }
      }
    }

    const RCP<const Epetra_Vector> xfinal = responses.back();
    double mnv; xfinal->MeanValue(&mnv);

    // Create debug output object
    Teuchos::ParameterList &debugParams =
      slvrfctry.getParameters().sublist("Debug Output", true);
    bool writeToMatrixMarketSoln = debugParams.get("Write Solution to MatrixMarket", false);
    bool writeToCoutSoln = debugParams.get("Write Solution to Standard Output", false);
    if (writeToMatrixMarketSoln == true) { 

      //create serial map that puts the whole solution on processor 0
      int numMyElements = (xfinal->Comm().MyPID() == 0) ? app->getDiscretization()->getMap()->NumGlobalElements() : 0;
      const Epetra_Map serial_map(-1, numMyElements, 0, xfinal->Comm());

      //create importer from parallel map to serial map and populate serial solution xfinal_serial
      Epetra_Import importOperator(serial_map, *app->getDiscretization()->getMap());
      Epetra_Vector xfinal_serial(serial_map);
      xfinal_serial.Import(*app->getDiscretization()->getSolutionField(), importOperator, Insert);

      //writing to MatrixMarket file
      EpetraExt::MultiVectorToMatrixMarketFile("xfinal.mm", xfinal_serial);
    }
    if (writeToCoutSoln == true) 
       std::cout << "xfinal: " << *xfinal << std::endl;

    *out << "Main_Solve: MeanValue of final solution " << mnv << std::endl;
    *out << "\nNumber of Failed Comparisons: " << status << std::endl;
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
  if (!success) status+=10000;

  Teuchos::TimeMonitor::summarize(*out,false,true,false/*zero timers*/);
  return status;
}
Example #8
0
int main(int argc, char *argv[]) {

  int status=0; // 0 = pass, failures are incremented
  bool success = true;

#ifdef ALBANY_DEBUG
  Teuchos::GlobalMPISession mpiSession(&argc, &argv);
#else // bypass printing process startup info
  Teuchos::GlobalMPISession mpiSession(&argc, &argv, NULL);
#endif

  Kokkos::initialize(argc, argv);

#ifdef ALBANY_FLUSH_DENORMALS
  _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
  _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
#endif

#ifdef ALBANY_CHECK_FPE
   // Catch FPEs. Follow Main_SolveT.cpp's approach to checking for floating
   // point exceptions.
   //_mm_setcsr(_MM_MASK_MASK &~ (_MM_MASK_OVERFLOW | _MM_MASK_INVALID | _MM_MASK_DIV_ZERO) );
   _MM_SET_EXCEPTION_MASK(_MM_GET_EXCEPTION_MASK() & ~_MM_MASK_INVALID);
#endif

  using Teuchos::RCP;
  using Teuchos::rcp;

  RCP<Teuchos::FancyOStream> out(Teuchos::VerboseObjectBase::getDefaultOStream());

  // Command-line argument for input file
  Albany::CmdLineArgs cmd;
  cmd.parse_cmdline(argc, argv, *out);

  try {

    RCP<Teuchos::Time> totalTime =
      Teuchos::TimeMonitor::getNewTimer("Albany: ***Total Time***");

    RCP<Teuchos::Time> setupTime =
      Teuchos::TimeMonitor::getNewTimer("Albany: Setup Time");
    Teuchos::TimeMonitor totalTimer(*totalTime); //start timer
    Teuchos::TimeMonitor setupTimer(*setupTime); //start timer

    RCP<const Teuchos_Comm> comm =
      Tpetra::DefaultPlatform::getDefaultPlatform().getComm();

    // Connect vtune for performance profiling
    if (cmd.vtune) {
      Albany::connect_vtune(comm->getRank());
    }

    Albany::SolverFactory slvrfctry(cmd.xml_filename, comm);
    RCP<Epetra_Comm> appComm = Albany::createEpetraCommFromTeuchosComm(comm);
    RCP<Albany::Application> app;
    const RCP<Thyra::ModelEvaluator<double> > solver =
      slvrfctry.createThyraSolverAndGetAlbanyApp(app, appComm, appComm);

    setupTimer.~TimeMonitor();

//    PHX::InitializeKokkosDevice();
   
    Teuchos::ParameterList &solveParams =
      slvrfctry.getAnalysisParameters().sublist("Solve", /*mustAlreadyExist =*/ false);
    // By default, request the sensitivities if not explicitly disabled
    solveParams.get("Compute Sensitivities", true);

    Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > thyraResponses;
    Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > thyraSensitivities;

       // The PoissonSchrodinger_SchroPo and PoissonSchroMosCap1D tests seg fault as albanyApp is null -
       // For now, do not resize the response vectors. FIXME sort out this issue.
    if(Teuchos::nonnull(app))
      Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities, app->getAdaptSolMgr()->getSolObserver());
    else
      Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities);

    Teuchos::Array<Teuchos::RCP<const Epetra_Vector> > responses;
    Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > > sensitivities;
    epetraFromThyra(appComm, thyraResponses, thyraSensitivities, responses, sensitivities);

    const int num_p = solver->Np(); // Number of *vectors* of parameters
    const int num_g = solver->Ng(); // Number of *vectors* of responses

    *out << "Finished eval of first model: Params, Responses "
      << std::setprecision(12) << std::endl;

    Teuchos::ParameterList& parameterParams = slvrfctry.getParameters().sublist("Problem").sublist("Parameters");
    int num_param_vecs = (parameterParams.isType<int>("Number")) ?
        int(parameterParams.get("Number", 0) > 0) :
        parameterParams.get("Number of Parameter Vectors", 0);

    const Thyra::ModelEvaluatorBase::InArgs<double> nominal = solver->getNominalValues();
    double norm2;
    for (int i=0; i<num_p; i++) {
      const Teuchos::RCP<const Epetra_Vector> p_init = epetraVectorFromThyra(appComm, nominal.get_p(i));
      if(i < num_param_vecs)
        p_init->Print(*out << "\nParameter vector " << i << ":\n");
      else { //distributed parameters, we print only 2-norm
        p_init->Norm2(&norm2);
        *out << "\nDistributed Parameter " << i << ":  " << norm2 << " (two-norm)\n" << std::endl;
      }
    }

    for (int i=0; i<num_g-1; i++) {
      const RCP<const Epetra_Vector> g = responses[i];
      bool is_scalar = true;

      if (app != Teuchos::null)
        is_scalar = app->getResponse(i)->isScalarResponse();

      if (is_scalar) {
        g->Print(*out << "\nResponse vector " << i << ":\n");

        if (num_p == 0) {
          // Just calculate regression data
          status += slvrfctry.checkSolveTestResults(i, 0, g.get(), NULL);
        } else {
          for (int j=0; j<num_p; j++) {
            const RCP<const Epetra_MultiVector> dgdp = sensitivities[i][j];
            if (Teuchos::nonnull(dgdp)) {
              if(j < num_param_vecs) {
                dgdp->Print(*out << "\nSensitivities (" << i << "," << j << "): \n");
                status += slvrfctry.checkSolveTestResults(i, j, g.get(), dgdp.get());
              }
              else {
                const Epetra_Map serial_map(-1, 1, 0, dgdp.get()->Comm());
                Epetra_MultiVector norms(serial_map,dgdp->NumVectors());
              //  RCP<Albany::ScalarResponseFunction> response = rcp_dynamic_cast<Albany::ScalarResponseFunction>(app->getResponse(i));
               // int numResponses = response->numResponses();
                *out << "\nSensitivities (" << i << "," << j  << ") for Distributed Parameters:  (two-norm)\n";
                *out << "    ";
                for(int ir=0; ir<dgdp->NumVectors(); ++ir) {
                  (*dgdp)(ir)->Norm2(&norm2);
                  (*norms(ir))[0] = norm2;
                  *out << "    " << norm2;
                }
                *out << "\n" << std::endl;
                status += slvrfctry.checkSolveTestResults(i, j, g.get(), &norms);
              }
            }
          }
        }
      }
    }

    // Create debug output object
    Teuchos::ParameterList &debugParams =
      slvrfctry.getParameters().sublist("Debug Output", true);
    bool writeToMatrixMarketSoln = debugParams.get("Write Solution to MatrixMarket", false);
    bool writeToMatrixMarketDistrSolnMap = debugParams.get("Write Distributed Solution and Map to MatrixMarket", false);
    bool writeToCoutSoln = debugParams.get("Write Solution to Standard Output", false);


    const RCP<const Epetra_Vector> xfinal = responses.back();
    double mnv; xfinal->MeanValue(&mnv);
    *out << "Main_Solve: MeanValue of final solution " << mnv << std::endl;
    *out << "\nNumber of Failed Comparisons: " << status << std::endl;
    if (writeToCoutSoln == true) 
       std::cout << "xfinal: " << *xfinal << std::endl;

#ifdef ALBANY_PERIDIGM
#if defined(ALBANY_EPETRA)
    if (Teuchos::nonnull(LCM::PeridigmManager::self())) {
      *out << setprecision(12) << "\nPERIDIGM-ALBANY OPTIMIZATION-BASED COUPLING FINAL FUNCTIONAL VALUE = "
           << LCM::PeridigmManager::self()->obcEvaluateFunctional()  << "\n" << std::endl;
    }
#endif
#endif

    if (debugParams.get<bool>("Analyze Memory", false))
      Albany::printMemoryAnalysis(std::cout, comm);

    if (writeToMatrixMarketSoln == true) { 

      //create serial map that puts the whole solution on processor 0
      int numMyElements = (xfinal->Comm().MyPID() == 0) ? app->getDiscretization()->getMap()->NumGlobalElements() : 0;
      const Epetra_Map serial_map(-1, numMyElements, 0, xfinal->Comm());

      //create importer from parallel map to serial map and populate serial solution xfinal_serial
      Epetra_Import importOperator(serial_map, *app->getDiscretization()->getMap());
      Epetra_Vector xfinal_serial(serial_map);
      xfinal_serial.Import(*app->getDiscretization()->getSolutionField(), importOperator, Insert);

      //writing to MatrixMarket file
      EpetraExt::MultiVectorToMatrixMarketFile("xfinal.mm", xfinal_serial);
    }
    if (writeToMatrixMarketDistrSolnMap == true) {
      //writing to MatrixMarket file
      EpetraExt::MultiVectorToMatrixMarketFile("xfinal_distributed.mm", *xfinal);
      EpetraExt::BlockMapToMatrixMarketFile("xfinal_distributed_map.mm", *app->getDiscretization()->getMap());
    }
  }
  TEUCHOS_STANDARD_CATCH_STATEMENTS(true, std::cerr, success);
  if (!success) status+=10000;
  

  Teuchos::TimeMonitor::summarize(*out,false,true,false/*zero timers*/);

  Kokkos::finalize_all();
 
  return status;
}