Teuchos::RCP<Map> Cartesian1D(const Teuchos::RCP<const Teuchos::Comm<int> >& comm, const GlobalOrdinal nx, const GlobalOrdinal mx) { if (nx <= 0 || mx <= 0 || (mx > nx)) throw Exception(__FILE__, __LINE__, "Incorrect input parameter to Maps::Cartesian1D()", "nx = " + toString(nx) + ", mx = " + toString(mx)); typedef GlobalOrdinal GO; int myPID = comm->getRank(); GO startx, endx; Utils::getSubdomainData<GO>(nx, mx, myPID, startx, endx); size_t numMyElements = endx - startx; std::vector<GO> myGlobalElements(numMyElements); size_t count = 0; for (GO i = startx; i < endx; i++) myGlobalElements[count++] = i; const Teuchos::ArrayView<const GO> elementList(myGlobalElements); global_size_t numGlobalElements = nx; return MapTraits<GO,Map>::Build(numGlobalElements, elementList, 0/*indexBase*/, comm /*TODO:node*/); }
MapEpetra::MapEpetra ( const Int size, const commPtr_Type& commPtr ) : M_exporter (new std::shared_ptr<Epetra_Export>()), M_importer (new std::shared_ptr<Epetra_Import>()), M_commPtr ( commPtr ) { ASSERT (M_commPtr.get()!=0, "Error! The communicator pointer is not valid.\n"); Int numGlobalElements ( size ); Int numMyElements ( numGlobalElements ); std::vector<Int> myGlobalElements ( size ); for ( Int i (0); i < numGlobalElements; ++i ) { myGlobalElements[i] = i; } M_repeatedMapEpetra.reset ( new Epetra_Map ( numGlobalElements, numMyElements, &myGlobalElements[0], 0, *commPtr ) ); if ( commPtr->MyPID() != 0 ) { numMyElements = 0; } M_uniqueMapEpetra.reset ( new Epetra_Map ( numGlobalElements, numMyElements, &myGlobalElements[0], 0, *commPtr ) ); }
Teuchos::RCP<Map> Cartesian2D(const Teuchos::RCP<const Teuchos::Comm<int> >& comm, const GlobalOrdinal nx, const GlobalOrdinal ny, const GlobalOrdinal mx, const GlobalOrdinal my) { if (nx <= 0 || ny <= 0 || mx <= 0 || my <= 0 || (mx > nx) || (my > ny)) throw(Exception(__FILE__, __LINE__, "Incorrect input parameter to Maps::Cartesian2D()", "nx = " + toString(nx) + ", ny = " + toString(ny) + ", mx = " + toString(mx) + ", my = " + toString(my))); typedef GlobalOrdinal GO; int myPID = comm->getRank(); GO startx, starty, endx, endy; Utils::getSubdomainData(nx, mx, myPID % mx, startx, endx); Utils::getSubdomainData(ny, my, myPID / mx, starty, endy); size_t numMyElements = (endx - startx) * (endy - starty); std::vector<GO> myGlobalElements(numMyElements); size_t count = 0; for (GO i = startx; i < endx; i++) for (GO j = starty; j < endy; j++) myGlobalElements[count++] = j*nx + i; const Teuchos::ArrayView<const GO> elementList(myGlobalElements); global_size_t numGlobalElements = nx * ny; return MapTraits<GO,Map>::Build(numGlobalElements, elementList, 0/*indexBase*/, comm /*TODO:node*/); }
// ============================================================================= Teuchos::RCP<Epetra_Map> VIO::EpetraMesh::Reader:: createComplexValuesMap_ ( const Epetra_Map & nodesMap ) const { // get view for the global indices of the global elements int numMyElements = nodesMap.NumMyElements(); Teuchos::ArrayRCP<int> myGlobalElements( numMyElements ); nodesMap.MyGlobalElements( myGlobalElements.getRawPtr() ); // Construct the map in such a way that all complex entries on processor K // are split up into real and imaginary part, which will both reside on // processor K again. int numMyComplexElements = 2*numMyElements; Teuchos::ArrayRCP<int> myComplexGlobalElements ( numMyComplexElements ); for ( int k = 0; k < numMyElements; k++ ) { myComplexGlobalElements[2*k ] = 2 * myGlobalElements[k]; myComplexGlobalElements[2*k+1] = 2 * myGlobalElements[k] + 1; } return Teuchos::rcp ( new Epetra_Map ( -1, myComplexGlobalElements.size(), myComplexGlobalElements.getRawPtr(), nodesMap.IndexBase(), nodesMap.Comm() ) ); }
void MultiscaleModelFSI1D::setupModel() { #ifdef HAVE_LIFEV_DEBUG debugStream ( 8130 ) << "MultiscaleModelFSI1D::setupModel() \n"; #endif //FEspace setupFESpace(); //Setup solution M_solver->setupSolution ( *M_solution ); M_solver->setupSolution ( *M_solution_tn ); //Set default BC (has to be called after setting other BC) M_bc->handler()->setDefaultBC(); M_bc->setPhysicalSolver ( M_solver ); M_bc->setSolution ( M_solution ); M_bc->setFluxSource ( M_flux, M_source ); //Post-processing #ifdef HAVE_HDF5 M_exporter->setMeshProcId ( M_exporterMesh, M_comm->MyPID() ); DOF tmpDof ( *M_exporterMesh, M_feSpace->refFE() ); std::vector<Int> myGlobalElements ( tmpDof.globalElements ( *M_exporterMesh ) ); MapEpetra map ( -1, myGlobalElements.size(), &myGlobalElements[0], M_comm ); M_solver->setupSolution ( *M_exporterSolution, map, true ); M_exporter->addVariable ( IOData_Type::ScalarField, "Area ratio (fluid)", M_feSpace, (*M_exporterSolution) ["AoverA0minus1"], static_cast <UInt> ( 0 ) ); M_exporter->addVariable ( IOData_Type::ScalarField, "Flow rate (fluid)", M_feSpace, (*M_exporterSolution) ["Q"], static_cast <UInt> ( 0 ) ); //M_exporter->addVariable( IOData_Type::ScalarField, "W1", M_feSpace, (*M_exporterSolution)["W1"], static_cast <UInt> ( 0 ), M_feSpace->dof().numTotalDof() ); //M_exporter->addVariable( IOData_Type::ScalarField, "W2", M_feSpace, (*M_exporterSolution)["W2"], static_cast <UInt> ( 0 ), M_feSpace->dof().numTotalDof() ); M_exporter->addVariable ( IOData_Type::ScalarField, "Pressure (fluid)", M_feSpace, (*M_exporterSolution) ["P"], static_cast <UInt> ( 0 ) ); #endif #ifdef HAVE_MATLAB_POSTPROCESSING M_solver->resetOutput ( *M_exporterSolution ); #endif //Setup solution initializeSolution(); #ifdef JACOBIAN_WITH_FINITEDIFFERENCE if ( M_couplings.size() > 0 ) { createLinearBC(); updateLinearBC ( *M_solution ); setupLinearModel(); // Initialize the linear solution copySolution ( *M_solution, *M_linearSolution ); } #endif }
// =================================================== // Protected Methods // =================================================== void MultiscaleCoupling::createLocalVectors() { // Build a repeated list of GlobalElements std::vector<Int> myGlobalElements ( M_couplingVariablesNumber ); for ( UInt i = 0 ; i < myGlobalElements.size() ; ++i ) { myGlobalElements[i] = i; } // Build a repeated map for the couplings MapEpetra map ( -1, static_cast< Int > ( myGlobalElements.size() ), &myGlobalElements[0], M_comm ); // Create local repeated vectors M_localCouplingVariables.push_back ( multiscaleVectorPtr_Type ( new VectorEpetra ( map, Repeated ) ) ); M_localCouplingResiduals.reset ( new VectorEpetra ( map, Repeated ) ); }
MapEpetra::MapEpetra ( const Int numGlobalElements, const Int /*notUsed*/, const commPtr_Type& commPtr ) : M_exporter (new boost::shared_ptr<Epetra_Export>()), M_importer (new boost::shared_ptr<Epetra_Import>()), M_commPtr ( commPtr ) { ASSERT (M_commPtr.get()!=0, "Error! The communicator pointer is not valid.\n"); std::vector<Int> myGlobalElements ( numGlobalElements ); for ( Int i = 0; i < numGlobalElements; ++i ) { myGlobalElements[i] = i; } M_repeatedMapEpetra.reset ( new Epetra_Map ( -1, numGlobalElements, &myGlobalElements[0], 0, *commPtr ) ); M_uniqueMapEpetra.reset ( new Epetra_Map ( numGlobalElements, 0, *commPtr ) ); }