Teuchos::RCP<PeridigmNS::NeighborhoodData> PeridigmNS::Block::createNeighborhoodDataFromGlobalNeighborhoodData(Teuchos::RCP<const Epetra_BlockMap> globalOverlapScalarPointMap, Teuchos::RCP<const PeridigmNS::NeighborhoodData> globalNeighborhoodData) { int numOwnedPoints = ownedScalarPointMap->NumMyElements(); int* ownedPointGlobalIDs = ownedScalarPointMap->MyGlobalElements(); vector<int> ownedIDs(numOwnedPoints); vector<int> neighborhoodList; vector<int> neighborhoodPtr(numOwnedPoints); int* const globalNeighborhoodList = globalNeighborhoodData->NeighborhoodList(); int* const globalNeighborhoodPtr = globalNeighborhoodData->NeighborhoodPtr(); // Create the neighborhoodList and neighborhoodPtr for this block. // All the IDs in the neighborhoodList and neighborhoodPtr are local IDs into // the block-specific overlap map. for(int i=0 ; i<numOwnedPoints ; ++i){ neighborhoodPtr[i] = (int)(neighborhoodList.size()); int globalID = ownedPointGlobalIDs[i]; ownedIDs[i] = overlapScalarPointMap->LID(globalID); int globalNeighborhoodListIndex = globalNeighborhoodPtr[globalOverlapScalarPointMap->LID(globalID)]; int numNeighbors = globalNeighborhoodList[globalNeighborhoodListIndex++]; neighborhoodList.push_back(numNeighbors); for(int j=0 ; j<numNeighbors ; ++j){ int globalNeighborID = globalOverlapScalarPointMap->GID(globalNeighborhoodList[globalNeighborhoodListIndex++]); neighborhoodList.push_back( overlapScalarPointMap->LID(globalNeighborID) ); } } // create the NeighborhoodData for this block Teuchos::RCP<PeridigmNS::NeighborhoodData> blockNeighborhoodData = Teuchos::rcp(new PeridigmNS::NeighborhoodData); blockNeighborhoodData->SetNumOwned(ownedIDs.size()); if(ownedIDs.size() > 0){ memcpy(blockNeighborhoodData->OwnedIDs(), &ownedIDs.at(0), ownedIDs.size()*sizeof(int)); } if(neighborhoodPtr.size() > 0){ memcpy(blockNeighborhoodData->NeighborhoodPtr(), &neighborhoodPtr.at(0), neighborhoodPtr.size()*sizeof(int)); } blockNeighborhoodData->SetNeighborhoodListSize(neighborhoodList.size()); if(neighborhoodList.size() > 0){ memcpy(blockNeighborhoodData->NeighborhoodList(), &neighborhoodList.at(0), neighborhoodList.size()*sizeof(int)); } return blockNeighborhoodData; }
// ============================================================================= Teuchos::RCP<Epetra_Vector> VIO::EpetraMesh::Reader:: extractStateData_ ( const vtkSmartPointer<vtkDataSet> & vtkData, const Teuchos::RCP<const Epetra_Comm> & comm ) const { vtkIdType numArrays = vtkData->GetPointData()->GetNumberOfArrays(); TEUCHOS_ASSERT_EQUALITY ( numArrays, 1 ); const vtkSmartPointer<vtkDataArray> & array = vtkData->GetPointData()->GetArray(0); vtkIdType numComponents = array->GetNumberOfComponents(); TEUCHOS_ASSERT_EQUALITY ( numComponents, 2 ); // for *complex* values // this is the total number of grid points vtkIdType numPoints = array->GetNumberOfTuples(); // Create maps. // TODO They are created at another spot already. Avoid the work. Teuchos::RCP<Epetra_Map> nodesMap = Teuchos::rcp( new Epetra_Map( numPoints, 0, *comm ) ); Teuchos::RCP<Epetra_Map> complexValuesMap = createComplexValuesMap_ ( *nodesMap ); Teuchos::RCP<Epetra_Vector> z = Teuchos::rcp ( new Epetra_Vector ( *complexValuesMap ) ); // fill z double val[2]; for ( int k = 0; k < nodesMap->NumMyElements(); k++ ) { array->GetTuple( nodesMap->GID(k), val ); z->ReplaceMyValue( 2*k , 0, val[0] ); z->ReplaceMyValue( 2*k+1, 0, val[1] ); } return z; }
void PeridigmNS::ElasticPlasticMaterial::computeAutomaticDifferentiationJacobian(const double dt, const int numOwnedPoints, const int* ownedIDs, const int* neighborhoodList, PeridigmNS::DataManager& dataManager, PeridigmNS::SerialMatrix& jacobian, PeridigmNS::Material::JacobianType jacobianType) const { // Compute contributions to the tangent matrix on an element-by-element basis // To reduce memory re-allocation, use static variable to store Fad types for // current coordinates (independent variables). static vector<Sacado::Fad::DFad<double> > y_AD; // Loop over all points. int neighborhoodListIndex = 0; for(int iID=0 ; iID<numOwnedPoints ; ++iID){ // Create a temporary neighborhood consisting of a single point and its neighbors. int numNeighbors = neighborhoodList[neighborhoodListIndex++]; int numEntries = numNeighbors+1; int numDof = 3*numEntries; vector<int> tempMyGlobalIDs(numEntries); // Put the node at the center of the neighborhood at the beginning of the list. tempMyGlobalIDs[0] = dataManager.getOwnedScalarPointMap()->GID(iID); vector<int> tempNeighborhoodList(numEntries); tempNeighborhoodList[0] = numNeighbors; for(int iNID=0 ; iNID<numNeighbors ; ++iNID){ int neighborID = neighborhoodList[neighborhoodListIndex++]; tempMyGlobalIDs[iNID+1] = dataManager.getOverlapScalarPointMap()->GID(neighborID); tempNeighborhoodList[iNID+1] = iNID+1; } Epetra_SerialComm serialComm; Teuchos::RCP<Epetra_BlockMap> tempOneDimensionalMap = Teuchos::rcp(new Epetra_BlockMap(numEntries, numEntries, &tempMyGlobalIDs[0], 1, 0, serialComm)); Teuchos::RCP<Epetra_BlockMap> tempThreeDimensionalMap = Teuchos::rcp(new Epetra_BlockMap(numEntries, numEntries, &tempMyGlobalIDs[0], 3, 0, serialComm)); Teuchos::RCP<Epetra_BlockMap> tempBondMap = Teuchos::rcp(new Epetra_BlockMap(1, 1, &tempMyGlobalIDs[0], numNeighbors, 0, serialComm)); // Create a temporary DataManager containing data for this point and its neighborhood. PeridigmNS::DataManager tempDataManager; tempDataManager.setMaps(Teuchos::RCP<const Epetra_BlockMap>(), tempOneDimensionalMap, Teuchos::RCP<const Epetra_BlockMap>(), tempThreeDimensionalMap, tempBondMap); // The temporary data manager will have the same fields and data as the real data manager. vector<int> fieldIds = dataManager.getFieldIds(); tempDataManager.allocateData(fieldIds); tempDataManager.copyLocallyOwnedDataFromDataManager(dataManager); // Set up numOwnedPoints and ownedIDs. // There is only one owned ID, and it has local ID zero in the tempDataManager. int tempNumOwnedPoints = 1; vector<int> tempOwnedIDs(tempNumOwnedPoints); tempOwnedIDs[0] = 0; // Use the scratchMatrix as sub-matrix for storing tangent values prior to loading them into the global tangent matrix. // Resize scratchMatrix if necessary if(scratchMatrix.Dimension() < numDof) scratchMatrix.Resize(numDof); // Create a list of global indices for the rows/columns in the scratch matrix. vector<int> globalIndices(numDof); for(int i=0 ; i<numEntries ; ++i){ int globalID = tempOneDimensionalMap->GID(i); for(int j=0 ; j<3 ; ++j) globalIndices[3*i+j] = 3*globalID+j; } // Extract pointers to the underlying data in the constitutiveData array. double *x, *y, *cellVolume, *weightedVolume, *damage, *bondDamage, *edpN, *lambdaN; tempDataManager.getData(m_modelCoordinatesFieldId, PeridigmField::STEP_NONE)->ExtractView(&x); tempDataManager.getData(m_coordinatesFieldId, PeridigmField::STEP_NP1)->ExtractView(&y); tempDataManager.getData(m_volumeFieldId, PeridigmField::STEP_NONE)->ExtractView(&cellVolume); tempDataManager.getData(m_weightedVolumeFieldId, PeridigmField::STEP_NONE)->ExtractView(&weightedVolume); tempDataManager.getData(m_damageFieldId, PeridigmField::STEP_NP1)->ExtractView(&damage); tempDataManager.getData(m_bondDamageFieldId, PeridigmField::STEP_NP1)->ExtractView(&bondDamage); tempDataManager.getData(m_deviatoricPlasticExtensionFieldId, PeridigmField::STEP_N)->ExtractView(&edpN); tempDataManager.getData(m_lambdaFieldId, PeridigmField::STEP_N)->ExtractView(&lambdaN); // Create arrays of Fad objects for the current coordinates, dilatation, and force density // Modify the existing vector of Fad objects for the current coordinates if((int)y_AD.size() < numDof) y_AD.resize(numDof); for(int i=0 ; i<numDof ; ++i){ y_AD[i].diff(i, numDof); y_AD[i].val() = y[i]; } // Create vectors of empty AD types for the dependent variables vector<Sacado::Fad::DFad<double> > dilatation_AD(numEntries); vector<Sacado::Fad::DFad<double> > lambdaNP1_AD(numEntries); int numBonds = tempDataManager.getData(m_deviatoricPlasticExtensionFieldId, PeridigmField::STEP_N)->MyLength(); vector<Sacado::Fad::DFad<double> > edpNP1(numBonds); vector<Sacado::Fad::DFad<double> > force_AD(numDof); // Evaluate the constitutive model using the AD types MATERIAL_EVALUATION::computeDilatation(x,&y_AD[0],weightedVolume,cellVolume,bondDamage,&dilatation_AD[0],&tempNeighborhoodList[0],tempNumOwnedPoints,m_horizon); MATERIAL_EVALUATION::computeInternalForceIsotropicElasticPlastic ( x, &y_AD[0], weightedVolume, cellVolume, &dilatation_AD[0], bondDamage, edpN, &edpNP1[0], lambdaN, &lambdaNP1_AD[0], &force_AD[0], &tempNeighborhoodList[0], tempNumOwnedPoints, m_bulkModulus, m_shearModulus, m_horizon, m_yieldStress, m_isPlanarProblem, m_thickness); // Load derivative values into scratch matrix // Multiply by volume along the way to convert force density to force for(int row=0 ; row<numDof ; ++row){ for(int col=0 ; col<numDof ; ++col){ scratchMatrix(row, col) = force_AD[row].dx(col) * cellVolume[row/3]; } } // Sum the values into the global tangent matrix (this is expensive). jacobian.addValues((int)globalIndices.size(), &globalIndices[0], scratchMatrix.Data()); } }
// The solve is done in the felix_driver_run function, and the solution is passed back to Glimmer-CISM // IK, 12/3/13: time_inc_yr and cur_time_yr are not used here... void felix_driver_run(FelixToGlimmer * ftg_ptr, double& cur_time_yr, double time_inc_yr) { //IK, 12/9/13: how come FancyOStream prints an all processors?? Teuchos::RCP<Teuchos::FancyOStream> out(Teuchos::VerboseObjectBase::getDefaultOStream()); if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) { std::cout << "In felix_driver_run, cur_time, time_inc = " << cur_time_yr << " " << time_inc_yr << std::endl; } // --------------------------------------------- // get u and v velocity solution from Glimmer-CISM // IK, 11/26/13: need to concatenate these into a single solve for initial condition for Albany/FELIX solve // IK, 3/14/14: moved this step to felix_driver_run from felix_driver init, since we still want to grab and u and v velocities for CISM if the mesh hasn't changed, // in which case only felix_driver_run will be called, not felix_driver_init. // --------------------------------------------- if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) std::cout << "In felix_driver_run: grabbing pointers to u and v velocities in CISM..." << std::endl; uVel_ptr = ftg_ptr ->getDoubleVar("uvel", "velocity"); vVel_ptr = ftg_ptr ->getDoubleVar("vvel", "velocity"); // --------------------------------------------- // Set restart solution to the one passed from CISM // IK, 3/14/14: moved this from felix_driver_init to felix_driver_run. // --------------------------------------------- if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) std::cout << "In felix_driver_run: setting initial condition from CISM..." << std::endl; //Check what kind of ordering you have in the solution & create solutionField object. interleavedOrdering = meshStruct->getInterleavedOrdering(); Albany::AbstractSTKFieldContainer::VectorFieldType* solutionField; if(interleavedOrdering) solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<true> >(meshStruct->getFieldContainer())->getSolutionField(); else solutionField = Teuchos::rcp_dynamic_cast<Albany::OrdinarySTKFieldContainer<false> >(meshStruct->getFieldContainer())->getSolutionField(); //Create vector used to renumber nodes on each processor from the Albany convention (horizontal levels first) to the CISM convention (vertical layers first) nNodes2D = (global_ewn + 1)*(global_nsn+1); //number global nodes in the domain in 2D nNodesProc2D = (nsn-2*nhalo+1)*(ewn-2*nhalo+1); //number of nodes on each processor in 2D cismToAlbanyNodeNumberMap.resize(upn*nNodesProc2D); for (int j=0; j<nsn-2*nhalo+1;j++) { for (int i=0; i<ewn-2*nhalo+1; i++) { for (int k=0; k<upn; k++) { int index = k+upn*i + j*(ewn-2*nhalo+1)*upn; cismToAlbanyNodeNumberMap[index] = k*nNodes2D + global_node_id_owned_map_Ptr[i+j*(ewn-2*nhalo+1)]; //if (mpiComm->MyPID() == 0) // std::cout << "index: " << index << ", cismToAlbanyNodeNumberMap: " << cismToAlbanyNodeNumberMap[index] << std::endl; } } } //The way it worked out, uVel_ptr and vVel_ptr have more nodes than the nodes in the mesh passed to Albany/CISM for the solve. In particular, //there is 1 row of halo elements in uVel_ptr and vVel_ptr. To account for this, we copy uVel_ptr and vVel_ptr into std::vectors, which do not have the halo elements. std::vector<double> uvel_vec(upn*nNodesProc2D); std::vector<double> vvel_vec(upn*nNodesProc2D); int counter1 = 0; int counter2 = 0; int local_nodeID; for (int j=0; j<nsn-1; j++) { for (int i=0; i<ewn-1; i++) { for (int k=0; k<upn; k++) { if (j >= nhalo-1 & j < nsn-nhalo) { if (i >= nhalo-1 & i < ewn-nhalo) { #ifdef CISM_USE_EPETRA local_nodeID = node_map->LID(cismToAlbanyNodeNumberMap[counter1]); #else local_nodeID = node_map->getLocalElement(cismToAlbanyNodeNumberMap[counter1]); #endif uvel_vec[counter1] = uVel_ptr[counter2]; vvel_vec[counter1] = vVel_ptr[counter2]; counter1++; } } counter2++; } } } //Loop over all the elements to find which nodes are active. For the active nodes, copy uvel and vvel from CISM into Albany solution array to //use as initial condition. //NOTE: there is some inefficiency here by looping over all the elements. TO DO? pass only active nodes from Albany-CISM to improve this? double velScale = seconds_per_year*vel_scaling_param; for (int i=0; i<nElementsActive; i++) { for (int j=0; j<8; j++) { int node_GID = global_element_conn_active_Ptr[i + nElementsActive*j]; //node_GID is 1-based #ifdef CISM_USE_EPETRA int node_LID = node_map->LID(node_GID); //node_LID is 0-based #else int node_LID = node_map->getLocalElement(node_GID); //node_LID is 0-based #endif stk::mesh::Entity node = meshStruct->bulkData->get_entity(stk::topology::NODE_RANK, node_GID); double* sol = stk::mesh::field_data(*solutionField, node); //IK, 3/18/14: added division by velScale to convert uvel and vvel from dimensionless to having units of m/year (the Albany units) sol[0] = uvel_vec[node_LID]/velScale; sol[1] = vvel_vec[node_LID]/velScale; } } // --------------------------------------------------------------------------------------------------- // Solve // --------------------------------------------------------------------------------------------------- if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) std::cout << "In felix_driver_run: starting the solve... " << std::endl; //Need to set HasRestart solution such that uvel_Ptr and vvel_Ptr (u and v from Glimmer/CISM) are always set as initial condition? meshStruct->setHasRestartSolution(!first_time_step); //Turn off homotopy if we're not in the first time-step. //NOTE - IMPORTANT: Glen's Law Homotopy parameter should be set to 1.0 in the parameter list for this logic to work!!! if (!first_time_step) { meshStruct->setRestartDataTime(parameterList->sublist("Problem").get("Homotopy Restart Step", 1.)); double homotopy = parameterList->sublist("Problem").sublist("FELIX Viscosity").get("Glen's Law Homotopy Parameter", 1.0); if(meshStruct->restartDataTime()== homotopy) { parameterList->sublist("Problem").set("Solution Method", "Steady"); parameterList->sublist("Piro").set("Solver Type", "NOX"); } } albanyApp->createDiscretization(); //IK, 10/30/14: Check that # of elements from previous time step hasn't changed. //If it has not, use previous solution as initial guess for current time step. //Otherwise do not set initial solution. It's possible this can be improved so some part of the previous solution is used //defined on the current mesh (if it receded, which likely it will in dynamic ice sheet simulations...). if (nElementsActivePrevious != nElementsActive) previousSolution = Teuchos::null; albanyApp->finalSetUp(parameterList, previousSolution); //if (!first_time_step) // std::cout << "previousSolution: " << *previousSolution << std::endl; #ifdef CISM_USE_EPETRA solver = slvrfctry->createThyraSolverAndGetAlbanyApp(albanyApp, mpiComm, mpiComm, Teuchos::null, false); #else solver = slvrfctry->createAndGetAlbanyAppT(albanyApp, mpiCommT, mpiCommT, Teuchos::null, false); #endif Teuchos::ParameterList solveParams; solveParams.set("Compute Sensitivities", true); Teuchos::Array<Teuchos::RCP<const Thyra::VectorBase<double> > > thyraResponses; Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Thyra::MultiVectorBase<double> > > > thyraSensitivities; Piro::PerformSolveBase(*solver, solveParams, thyraResponses, thyraSensitivities); #ifdef CISM_USE_EPETRA const Epetra_Map& ownedMap(*albanyApp->getDiscretization()->getMap()); //owned map const Epetra_Map& overlapMap(*albanyApp->getDiscretization()->getOverlapMap()); //overlap map Epetra_Import import(overlapMap, ownedMap); //importer from ownedMap to overlapMap Epetra_Vector solutionOverlap(overlapMap); //overlapped solution solutionOverlap.Import(*albanyApp->getDiscretization()->getSolutionField(), import, Insert); #else Teuchos::RCP<const Tpetra_Map> ownedMap = albanyApp->getDiscretization()->getMapT(); //owned map Teuchos::RCP<const Tpetra_Map> overlapMap = albanyApp->getDiscretization()->getOverlapMapT(); //overlap map Teuchos::RCP<Tpetra_Import> import = Teuchos::rcp(new Tpetra_Import(ownedMap, overlapMap)); Teuchos::RCP<Tpetra_Vector> solutionOverlap = Teuchos::rcp(new Tpetra_Vector(overlapMap)); solutionOverlap->doImport(*albanyApp->getDiscretization()->getSolutionFieldT(), *import, Tpetra::INSERT); Teuchos::ArrayRCP<const ST> solutionOverlap_constView = solutionOverlap->get1dView(); #endif #ifdef WRITE_TO_MATRIX_MARKET #ifdef CISM_USE_EPETRA //For debug: write solution and maps to matrix market file EpetraExt::BlockMapToMatrixMarketFile("node_map.mm", *node_map); EpetraExt::BlockMapToMatrixMarketFile("map.mm", ownedMap); EpetraExt::BlockMapToMatrixMarketFile("overlap_map.mm", overlapMap); EpetraExt::MultiVectorToMatrixMarketFile("solution.mm", *albanyApp->getDiscretization()->getSolutionField()); #else Tpetra_MatrixMarket_Writer::writeMapFile("node_map.mm", *node_map); Tpetra_MatrixMarket_Writer::writeMapFile("map.mm", *ownedMap); Tpetra_MatrixMarket_Writer::writeMapFile("overlap_map.mm", *overlapMap); Tpetra_MatrixMarket_Writer::writeDenseFile("solution.mm", app->getDiscretization()->getSolutionFieldT()); #endif #endif //set previousSolution (used as initial guess for next time step) to final Albany solution. previousSolution = Teuchos::rcp(new Tpetra_Vector(*albanyApp->getDiscretization()->getSolutionFieldT())); nElementsActivePrevious = nElementsActive; //std::cout << "Final solution: " << *albanyApp->getDiscretization()->getSolutionField() << std::endl; // --------------------------------------------------------------------------------------------------- // Compute sensitivies / responses and perform regression tests // IK, 12/9/13: how come this is turned off in mpas branch? // --------------------------------------------------------------------------------------------------- if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) std::cout << "Computing responses and sensitivities..." << std::endl; int status=0; // 0 = pass, failures are incremented #ifdef CISM_USE_EPETRA Teuchos::Array<Teuchos::RCP<const Epetra_Vector> > responses; Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Epetra_MultiVector> > > sensitivities; epetraFromThyra(mpiComm, thyraResponses, thyraSensitivities, responses, sensitivities); #else Teuchos::Array<Teuchos::RCP<const Tpetra_Vector> > responses; Teuchos::Array<Teuchos::Array<Teuchos::RCP<const Tpetra_MultiVector> > > sensitivities; tpetraFromThyra(thyraResponses, thyraSensitivities, responses, sensitivities); #endif const int num_p = solver->Np(); // Number of *vectors* of parameters const int num_g = solver->Ng(); // Number of *vectors* of responses if (debug_output_verbosity != 0) { *out << "Finished eval of first model: Params, Responses " << std::setprecision(12) << std::endl; } const Thyra::ModelEvaluatorBase::InArgs<double> nominal = solver->getNominalValues(); if (debug_output_verbosity != 0) { for (int i=0; i<num_p; i++) { #ifdef CISM_USE_EPETRA const Teuchos::RCP<const Epetra_Vector> p_init = epetraVectorFromThyra(mpiComm, nominal.get_p(i)); p_init->Print(*out << "\nParameter vector " << i << ":\n"); #else Albany::printTpetraVector(*out << "\nParameter vector " << i << ":\n", ConverterT::getConstTpetraVector(nominal.get_p(i))); #endif } } for (int i=0; i<num_g-1; i++) { #ifdef CISM_USE_EPETRA const Teuchos::RCP<const Epetra_Vector> g = responses[i]; #else const Teuchos::RCP<const Tpetra_Vector> g = responses[i]; #endif bool is_scalar = true; if (albanyApp != Teuchos::null) is_scalar = albanyApp->getResponse(i)->isScalarResponse(); if (is_scalar) { if (debug_output_verbosity != 0) { #ifdef CISM_USE_EPETRA g->Print(*out << "\nResponse vector " << i << ":\n"); #else Albany::printTpetraVector(*out << "\nResponse vector " << i << ":\n", g); #endif } if (num_p == 0 && cur_time_yr == final_time) { // Just calculate regression data -- only if in final time step #ifdef CISM_USE_EPETRA status += slvrfctry->checkSolveTestResults(i, 0, g.get(), NULL); #else status += slvrfctry->checkSolveTestResultsT(i, 0, g.get(), NULL); #endif } else { for (int j=0; j<num_p; j++) { #ifdef CISM_USE_EPETRA const Teuchos::RCP<const Epetra_MultiVector> dgdp = sensitivities[i][j]; #else const Teuchos::RCP<const Tpetra_MultiVector> dgdp = sensitivities[i][j]; #endif if (debug_output_verbosity != 0) { if (Teuchos::nonnull(dgdp)) { #ifdef CISM_USE_EPETRA dgdp->Print(*out << "\nSensitivities (" << i << "," << j << "):!\n"); #else Albany::printTpetraVector(*out << "\nSensitivities (" << i << "," << j << "):!\n", dgdp); #endif } } if (cur_time_yr == final_time) { #ifdef CISM_USE_EPETRA status += slvrfctry->checkSolveTestResults(i, j, g.get(), dgdp.get()); #else status += slvrfctry->checkSolveTestResultsT(i, j, g.get(), dgdp.get()); #endif } } } } } if (debug_output_verbosity != 0 && cur_time_yr == final_time) //only print regression test result if you're in the final time step *out << "\nNumber of Failed Comparisons: " << status << std::endl; //IK, 10/30/14: added the following line so that when you run ctest from CISM the test fails if there are some failed comparisons. if (status > 0) TEUCHOS_TEST_FOR_EXCEPTION(true, std::logic_error, "All regression comparisons did not pass!" << std::endl); // --------------------------------------------------------------------------------------------------- // Copy solution back to glimmer uvel and vvel arrays to be passed back // --------------------------------------------------------------------------------------------------- //std::cout << "overlapMap # global elements: " << overlapMap.NumGlobalElements() << std::endl; //std::cout << "overlapMap # my elements: " << overlapMap.NumMyElements() << std::endl; //std::cout << "overlapMap: " << overlapMap << std::endl; //std::cout << "map # global elements: " << ownedMap.NumGlobalElements() << std::endl; //std::cout << "map # my elements: " << ownedMap.NumMyElements() << std::endl; //std::cout << "node_map # global elements: " << node_map->NumGlobalElements() << std::endl; //std::cout << "node_map # my elements: " << node_map->NumMyElements() << std::endl; //std::cout << "node_map: " << *node_map << std::endl; if (debug_output_verbosity != 0 & mpiCommT->getRank() == 0) std::cout << "In felix_driver_run: copying Albany solution to uvel and vvel to send back to CISM... " << std::endl; #ifdef CISM_USE_EPETRA //Epetra_Vectors to hold uvel and vvel to be passed to Glimmer/CISM Epetra_Vector uvel(*node_map, true); Epetra_Vector vvel(*node_map, true); #else //Tpetra_Vectors to hold uvel and vvel to be passed to Glimmer/CISM Teuchos::RCP<Tpetra_Vector> uvel = Teuchos::rcp(new Tpetra_Vector(node_map, true)); Teuchos::RCP<Tpetra_Vector> vvel = Teuchos::rcp(new Tpetra_Vector(node_map, true)); #endif #ifdef CISM_USE_EPETRA if (interleavedOrdering == true) { for (int i=0; i<overlapMap.NumMyElements(); i++) { int global_dof = overlapMap.GID(i); double sol_value = solutionOverlap[i]; int modulo = (global_dof % 2); //check if dof is for u or for v int vel_global_dof, vel_local_dof; if (modulo == 0) { //u dof vel_global_dof = global_dof/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map //std::cout << "uvel: global_dof = " << global_dof << ", uvel_global_dof = " << vel_global_dof << ", uvel_local_dof = " << vel_local_dof << std::endl; uvel.ReplaceMyValues(1, &sol_value, &vel_local_dof); } else { // v dof vel_global_dof = (global_dof-1)/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map vvel.ReplaceMyValues(1, &sol_value, & vel_local_dof); } } } else { //note: the case with non-interleaved ordering has not been tested... int numDofs = overlapMap.NumGlobalElements(); for (int i=0; i<overlapMap.NumMyElements(); i++) { int global_dof = overlapMap.GID(i); double sol_value = solutionOverlap[i]; int vel_global_dof, vel_local_dof; if (global_dof < numDofs/2) { //u dof vel_global_dof = global_dof+1; //add 1 because node_map is 1-based vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map uvel.ReplaceMyValues(1, &sol_value, &vel_local_dof); } else { //v dofs vel_global_dof = global_dof-numDofs/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->LID(vel_global_dof); //look up local id corresponding to global id in node_map vvel.ReplaceMyValues(1, &sol_value, & vel_local_dof); } } } #else if (interleavedOrdering == true) { for (int i=0; i<overlapMap->getNodeNumElements(); i++) { int global_dof = overlapMap->getGlobalElement(i); double sol_value = solutionOverlap_constView[i]; int modulo = (global_dof % 2); //check if dof is for u or for v int vel_global_dof, vel_local_dof; if (modulo == 0) { //u dof vel_global_dof = global_dof/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->getLocalElement(vel_global_dof); //look up local id corresponding to global id in node_map //std::cout << "uvel: global_dof = " << global_dof << ", uvel_global_dof = " << vel_global_dof << ", uvel_local_dof = " << vel_local_dof << std::endl; uvel->replaceLocalValue(vel_local_dof, sol_value); } else { // v dof vel_global_dof = (global_dof-1)/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->getLocalElement(vel_global_dof); //look up local id corresponding to global id in node_map vvel->replaceLocalValue(vel_local_dof, sol_value); } } } else { //note: the case with non-interleaved ordering has not been tested... int numDofs = overlapMap->getGlobalNumElements(); for (int i=0; i<overlapMap->getNodeNumElements(); i++) { int global_dof = overlapMap->getGlobalElement(i); double sol_value = solutionOverlap_constView[i]; int vel_global_dof, vel_local_dof; if (global_dof < numDofs/2) { //u dof vel_global_dof = global_dof+1; //add 1 because node_map is 1-based vel_local_dof = node_map->getLocalElement(vel_global_dof); //look up local id corresponding to global id in node_map uvel->replaceLocalValue(vel_local_dof, sol_value); } else { //v dofs vel_global_dof = global_dof-numDofs/2+1; //add 1 because node_map is 1-based vel_local_dof = node_map->getLocalElement(vel_global_dof); //look up local id corresponding to global id in node_map vvel->replaceLocalValue(vel_local_dof, sol_value); } } } #endif #ifdef WRITE_TO_MATRIX_MARKET //For debug: write solution to matrix market file #ifdef CISM_USE_EPETRA EpetraExt::MultiVectorToMatrixMarketFile("uvel.mm", uvel); EpetraExt::MultiVectorToMatrixMarketFile("vvel.mm", vvel); #else Tpetra_MatrixMarket_Writer::writeDenseFile("uvel.mm", uvel); Tpetra_MatrixMarket_Writer::writeDenseFile("vvel.mm", vvel); #endif #endif //Copy uvel and vvel into uVel_ptr and vVel_ptr respectively (the arrays passed back to CISM) according to the numbering consistent w/ CISM. counter1 = 0; counter2 = 0; #ifdef CISM_USE_EPETRA #else Teuchos::ArrayRCP<const ST> uvel_constView = uvel->get1dView(); Teuchos::ArrayRCP<const ST> vvel_constView = vvel->get1dView(); #endif local_nodeID = 0; for (int j=0; j<nsn-1; j++) { for (int i=0; i<ewn-1; i++) { for (int k=0; k<upn; k++) { if (j >= nhalo-1 & j < nsn-nhalo) { if (i >= nhalo-1 & i < ewn-nhalo) { #ifdef CISM_USE_EPETRA local_nodeID = node_map->LID(cismToAlbanyNodeNumberMap[counter1]); //if (mpiComm->MyPID() == 0) //std::cout << "counter1:" << counter1 << ", cismToAlbanyNodeNumberMap[counter1]: " << cismToAlbanyNodeNumberMap[counter1] << ", local_nodeID: " //<< local_nodeID << ", uvel: " << uvel[local_nodeID] << std::endl; //uvel[local_nodeID] << std::endl; uVel_ptr[counter2] = uvel[local_nodeID]; vVel_ptr[counter2] = vvel[local_nodeID]; #else local_nodeID = node_map->getLocalElement(cismToAlbanyNodeNumberMap[counter1]); uVel_ptr[counter2] = uvel_constView[local_nodeID]; vVel_ptr[counter2] = vvel_constView[local_nodeID]; #endif counter1++; } } else { uVel_ptr[counter2] = 0.0; vVel_ptr[counter2] = 0.0; } counter2++; } } } first_time_step = false; }
TEUCHOS_UNIT_TEST(interlaced_op, test) { #ifdef HAVE_MPI Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_MpiComm(MPI_COMM_WORLD)); #else Teuchos::RCP<const Epetra_Comm> comm = Teuchos::rcp(new Epetra_SerialComm); #endif //int rank = comm->MyPID(); int numProc = comm->NumProc(); int num_KL = 1; int porder = 5; bool full_expansion = false; Teuchos::RCP<const Stokhos::CompletePolynomialBasis<int,double> > basis = buildBasis(num_KL,porder); Teuchos::RCP<Stokhos::Sparse3Tensor<int,double> > Cijk; Teuchos::RCP<Stokhos::ParallelData> sg_parallel_data; Teuchos::RCP<Stokhos::OrthogPolyExpansion<int,double> > expansion; { if(full_expansion) Cijk = basis->computeTripleProductTensor(); else Cijk = basis->computeLinearTripleProductTensor(); Teuchos::ParameterList parallelParams; parallelParams.set("Number of Spatial Processors", numProc); sg_parallel_data = Teuchos::rcp(new Stokhos::ParallelData(basis, Cijk, comm, parallelParams)); expansion = Teuchos::rcp(new Stokhos::AlgebraicOrthogPolyExpansion<int,double>(basis, Cijk)); } Teuchos::RCP<const EpetraExt::MultiComm> sg_comm = sg_parallel_data->getMultiComm(); // determinstic PDE graph Teuchos::RCP<Epetra_Map> determRowMap = Teuchos::rcp(new Epetra_Map(-1,10,0,*comm)); Teuchos::RCP<Epetra_CrsGraph> determGraph = Teuchos::rcp(new Epetra_CrsGraph(Copy,*determRowMap,1)); for(int row=0;row<determRowMap->NumMyElements();row++) { int gid = determRowMap->GID(row); determGraph->InsertGlobalIndices(gid,1,&gid); } for(int row=1;row<determRowMap->NumMyElements()-1;row++) { int gid = determRowMap->GID(row); int indices[2] = {gid-1,gid+1}; determGraph->InsertGlobalIndices(gid,2,indices); } determGraph->FillComplete(); Teuchos::RCP<Teuchos::ParameterList> params = Teuchos::rcp(new Teuchos::ParameterList); params->set("Scale Operator by Inverse Basis Norms", false); params->set("Include Mean", true); params->set("Only Use Linear Terms", false); Teuchos::RCP<Stokhos::EpetraSparse3Tensor> epetraCijk = Teuchos::rcp(new Stokhos::EpetraSparse3Tensor(basis,Cijk,sg_comm)); Teuchos::RCP<Stokhos::EpetraOperatorOrthogPoly> W_sg_blocks = Teuchos::rcp(new Stokhos::EpetraOperatorOrthogPoly(basis, epetraCijk->getStochasticRowMap(), determRowMap, determRowMap, sg_comm)); for(int i=0; i<W_sg_blocks->size(); i++) { Teuchos::RCP<Epetra_CrsMatrix> crsMat = Teuchos::rcp(new Epetra_CrsMatrix(Copy,*determGraph)); crsMat->PutScalar(1.0 + i); W_sg_blocks->setCoeffPtr(i,crsMat); // allocate a bunch of matrices } Teuchos::RCP<const Epetra_Map> sg_map = Teuchos::rcp(EpetraExt::BlockUtility::GenerateBlockMap( *determRowMap, *(epetraCijk->getStochasticRowMap()), *(epetraCijk->getMultiComm()))); // build an interlaced operator (object under test) and a benchmark // fully assembled operator /////////////////////////////////////////////////////////////////////// Stokhos::InterlacedOperator op(sg_comm,basis,epetraCijk,determGraph,params); op.PutScalar(0.0); op.setupOperator(W_sg_blocks); Stokhos::FullyAssembledOperator full_op(sg_comm,basis,epetraCijk,determGraph,sg_map,sg_map,params); full_op.PutScalar(0.0); full_op.setupOperator(W_sg_blocks); // here we test interlaced operator against the fully assembled operator /////////////////////////////////////////////////////////////////////// bool result = true; for(int i=0;i<100;i++) { // build vector for fully assembled operator (blockwise) Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> x_vec_blocks = Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm())); Teuchos::RCP<Stokhos::EpetraVectorOrthogPoly> f_vec_blocks = Teuchos::rcp(new Stokhos::EpetraVectorOrthogPoly(basis,epetraCijk->getStochasticRowMap(),determRowMap,epetraCijk->getMultiComm())); Teuchos::RCP<Epetra_Vector> x_vec_blocked = x_vec_blocks->getBlockVector(); Teuchos::RCP<Epetra_Vector> f_vec_blocked = f_vec_blocks->getBlockVector(); x_vec_blocked->Random(); // build an initial vector f_vec_blocked->PutScalar(0.0); // build interlaced vectors Teuchos::RCP<Epetra_Vector> x_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorDomainMap())); Teuchos::RCP<Epetra_Vector> f_vec_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap())); Teuchos::RCP<Epetra_Vector> f_vec_blk_inter = Teuchos::rcp(new Epetra_Vector(op.OperatorRangeMap())); Stokhos::SGModelEvaluator_Interlaced::copyToInterlacedVector(*x_vec_blocks,*x_vec_inter); // copy random x to f_vec_inter->PutScalar(0.0); full_op.Apply(*x_vec_blocked,*f_vec_blocked); op.Apply(*x_vec_inter,*f_vec_inter); // copy blocked action to interlaced for comparison Stokhos::SGModelEvaluator_Interlaced::copyToInterlacedVector(*f_vec_blocks,*f_vec_blk_inter); // compute norm double error = 0.0; double true_norm = 0.0; f_vec_blk_inter->NormInf(&true_norm); f_vec_blk_inter->Update(-1.0,*f_vec_inter,1.0); f_vec_blk_inter->NormInf(&error); out << "rel error = " << error/true_norm << " ( " << true_norm << " ), "; result &= (error/true_norm < 1e-14); } out << std::endl; TEST_ASSERT(result); }
void PeridigmNS::Block::createMapsFromGlobalMaps(Teuchos::RCP<const Epetra_BlockMap> globalOwnedScalarPointMap, Teuchos::RCP<const Epetra_BlockMap> globalOverlapScalarPointMap, Teuchos::RCP<const Epetra_BlockMap> globalOwnedVectorPointMap, Teuchos::RCP<const Epetra_BlockMap> globalOverlapVectorPointMap, Teuchos::RCP<const Epetra_BlockMap> globalOwnedScalarBondMap, Teuchos::RCP<const Epetra_Vector> globalBlockIds, Teuchos::RCP<const PeridigmNS::NeighborhoodData> globalNeighborhoodData, Teuchos::RCP<const PeridigmNS::NeighborhoodData> globalContactNeighborhoodData) { double* globalBlockIdsPtr; globalBlockIds->ExtractView(&globalBlockIdsPtr); // Create a list of all the on-processor elements that are part of this block vector<int> IDs; IDs.reserve(globalOverlapScalarPointMap->NumMyElements()); // upper bound vector<int> bondIDs; bondIDs.reserve(globalOverlapScalarPointMap->NumMyElements()); vector<int> bondElementSize; bondElementSize.reserve(globalOwnedScalarPointMap->NumMyElements()); for(int iLID=0 ; iLID<globalOwnedScalarPointMap->NumMyElements() ; ++iLID){ if(globalBlockIdsPtr[iLID] == blockID) { int globalID = globalOwnedScalarPointMap->GID(iLID); IDs.push_back(globalID); } } // Record the size of these elements in the bond map // Note that if an element has no bonds, it has no entry in the bondMap // So, the bond map and the scalar map can have a different number of entries (different local IDs) for(int iLID=0 ; iLID<globalOwnedScalarBondMap->NumMyElements() ; ++iLID){ int globalID = globalOwnedScalarBondMap->GID(iLID); int localID = globalOwnedScalarPointMap->LID(globalID); if(globalBlockIdsPtr[localID] == blockID){ bondIDs.push_back(globalID); bondElementSize.push_back(globalOwnedScalarBondMap->ElementSize(iLID)); } } // Create the owned scalar point map, the owned vector point map, and the owned scalar bond map int numGlobalElements = -1; int numMyElements = IDs.size(); int* myGlobalElements = 0; if(numMyElements > 0) myGlobalElements = &IDs.at(0); int elementSize = 1; int indexBase = 0; ownedScalarPointMap = Teuchos::rcp(new Epetra_BlockMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, globalOwnedScalarPointMap->Comm())); elementSize = 3; ownedVectorPointMap = Teuchos::rcp(new Epetra_BlockMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, globalOwnedScalarPointMap->Comm())); numMyElements = bondElementSize.size(); myGlobalElements = 0; int* elementSizeList = 0; if(numMyElements > 0){ myGlobalElements = &bondIDs.at(0); elementSizeList = &bondElementSize.at(0); } ownedScalarBondMap = Teuchos::rcp(new Epetra_BlockMap(numGlobalElements, numMyElements, myGlobalElements, elementSizeList, indexBase, globalOwnedScalarPointMap->Comm())); // Create a list of nodes that need to be ghosted (both across material boundaries and across processor boundaries) set<int> ghosts; // Check the neighborhood list for things that need to be ghosted int* const globalNeighborhoodList = globalNeighborhoodData->NeighborhoodList(); int globalNeighborhoodListIndex = 0; for(int iLID=0 ; iLID<globalNeighborhoodData->NumOwnedPoints() ; ++iLID){ int numNeighbors = globalNeighborhoodList[globalNeighborhoodListIndex++]; if(globalBlockIdsPtr[iLID] == blockID) { for(int i=0 ; i<numNeighbors ; ++i){ int neighborGlobalID = globalOverlapScalarPointMap->GID( globalNeighborhoodList[globalNeighborhoodListIndex + i] ); ghosts.insert(neighborGlobalID); } } globalNeighborhoodListIndex += numNeighbors; } // Check the contact neighborhood list for things that need to be ghosted if(!globalContactNeighborhoodData.is_null()){ int* const globalContactNeighborhoodList = globalContactNeighborhoodData->NeighborhoodList(); int globalContactNeighborhoodListIndex = 0; for(int iLID=0 ; iLID<globalContactNeighborhoodData->NumOwnedPoints() ; ++iLID){ int numNeighbors = globalContactNeighborhoodList[globalContactNeighborhoodListIndex++]; if(globalBlockIdsPtr[iLID] == blockID) { for(int i=0 ; i<numNeighbors ; ++i){ int neighborGlobalID = globalOverlapScalarPointMap->GID( globalContactNeighborhoodList[globalContactNeighborhoodListIndex + i] ); ghosts.insert(neighborGlobalID); } } globalContactNeighborhoodListIndex += numNeighbors; } } // Remove entries from ghosts that are already in IDs for(unsigned int i=0 ; i<IDs.size() ; ++i) ghosts.erase(IDs[i]); // Copy IDs, this is the owned global ID list vector<int> ownedIDs(IDs.begin(), IDs.end()); // Append ghosts to IDs // This creates the overlap global ID list for(set<int>::iterator it=ghosts.begin() ; it!=ghosts.end() ; ++it) IDs.push_back(*it); // Create the overlap scalar point map and the overlap vector point map numMyElements = IDs.size(); myGlobalElements = 0; if(numMyElements > 0) myGlobalElements = &IDs.at(0); elementSize = 1; overlapScalarPointMap = Teuchos::rcp(new Epetra_BlockMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, globalOwnedScalarPointMap->Comm())); elementSize = 3; overlapVectorPointMap = Teuchos::rcp(new Epetra_BlockMap(numGlobalElements, numMyElements, myGlobalElements, elementSize, indexBase, globalOwnedScalarPointMap->Comm())); // Invalidate the importers oneDimensionalImporter = Teuchos::RCP<Epetra_Import>(); threeDimensionalImporter = Teuchos::RCP<Epetra_Import>(); }
// ============================================================================= Teuchos::RCP<VIO::EpetraMesh::Mesh> VIO::EpetraMesh::Reader:: extractMeshData_( const vtkSmartPointer<vtkUnstructuredGrid> & vtkMesh, const Teuchos::RCP<const Epetra_Comm> & comm ) const { // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // create the maps based on the number of points int numPoints = vtkMesh->GetNumberOfPoints(); Teuchos::RCP<Epetra_Map> nodesMap = Teuchos::rcp( new Epetra_Map( numPoints, 0, *comm ) ); Teuchos::RCP<Epetra_Map> complexValuesMap = createComplexValuesMap_ ( *nodesMap ); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Teuchos::RCP<Mesh> mesh = Teuchos::rcp( new Mesh( nodesMap, complexValuesMap ) ); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // get points Teuchos::ArrayRCP<Point> points( numPoints ); for ( unsigned int k=0; k<numPoints; k++ ) vtkMesh->GetPoint( k, points[k].getRawPtr() ); mesh->setNodes( points ); // for ( int k=0; k<points.size(); k++ ) // std::cout << points[k] << std::endl; // std::cout << "VVV" << std::endl; // vtkMesh->Print( std::cout ); // determine the boundary points // transform vtkMesh into vtkPolyData vtkSmartPointer<vtkDataSetSurfaceFilter> surfaceFilter = vtkSmartPointer<vtkDataSetSurfaceFilter>::New(); surfaceFilter->SetInput(vtkMesh); surfaceFilter->Update(); // vtkPolyData* polydata = surfaceFilter->GetOutput(); // filter out the boundary edges vtkFeatureEdges * pEdges = vtkFeatureEdges::New(); pEdges->SetInput( surfaceFilter->GetOutput() ); pEdges->BoundaryEdgesOn(); pEdges->FeatureEdgesOff(); pEdges->NonManifoldEdgesOff(); pEdges->ManifoldEdgesOff(); pEdges->Update(); vtkPolyData * poly = pEdges->GetOutput(); // pEdges->ColoringOff(); // // pEdges->GetOutput()->BuildCells(); // // std::cout << "V1" << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfCells() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfLines() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfPieces() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfPolys() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfStrips() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfVerts() << std::endl; // std::cout << pEdges->GetOutput()->GetNumberOfPoints() << std::endl; // // std::cout << pEdges->GetOutput()->GetNumberOfElements() << std::endl; // // // pEdges->GetOutput()->GetLines()->GetCell(0)->Print( std::cout ); // // // pEdges->GetOutput()->BuildCells(); // TEUCHOS_ASSERT_EQUALITY( 0, pEdges->GetErrorCode() ); // // std::cout << "W-1" << std::endl; // int numLines = poly->GetNumberOfLines(); // vtkIdType * pts; // // vtkIdList * pts2; // for ( vtkIdType lineId=0; lineId<numLines; lineId++ ) // { // vtkIdType numPoints; // pEdges->GetOutput()->GetLines()->GetCell( lineId, numPoints, pts ); // // pEdges->GetOutput()->GetCellPoints( cellId, pts2 ); // // if ( numPoints!=2 ) // { // std::cout << "AAAAAAAAH!" << std::endl; // continue; // } // // make sure we're dealing with an actual *edge* here. // TEUCHOS_ASSERT_EQUALITY( numPoints, 2 ); // std::cout << pts[0] << " " << pts[1] << std::endl; // } // std::cout << "W0" << std::endl; // poly->GetLines()->Print( std::cout ); // // std::cout << "W1" << std::endl; // poly->GetPoints()->Print( std::cout ); // // std::cout << "W2" << std::endl; double x[3]; // Teuchos::ArrayRCP<bool> boundaryPoints( numPoints, false ); Teuchos::ArrayRCP<bool> isBoundaryNodes( numPoints ); for ( int k=0; k<poly->GetNumberOfPoints(); k++ ) { poly->GetPoint( k, x ); vtkIdType ptId = vtkMesh->FindPoint( x ); isBoundaryNodes[ ptId ] = true; } mesh->setBoundaryNodes( isBoundaryNodes ); // for ( int k=0; k<boundaryPoints.size(); k++ ) // std::cout << boundaryPoints[k] << std::endl; // std::cout << "WWW" << std::endl; // poly->Print( std::cout ); // vtkCellArray * verts = poly->GetVerts(); // verts->Print( std::cout ); // std::cout << "XXX" << std::endl; // poly->GetData()->Print( std::cout ); // int numBoundaryPoints = poly->GetNumberOfPoints(); // Teuchos::ArrayRCP<Teuchos::Tuple<double,3> > boundaryPoints( numBoundaryPoints ); // for ( unsigned int k=0; k<numBoundaryPoints; k++ ) // poly->GetPoint( k, boundaryPoints[k].getRawPtr() ); // mesh->setBoundaryNodes( boundaryPoints ); // for ( int k=0; k<points.size(); k++ ) // std::cout << boundaryPoints[k] << std::endl; // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // get cells int globalNumElems = vtkMesh->GetNumberOfCells(); // create an appropriate map Teuchos::RCP<const Epetra_Map> elemsMap = Teuchos::rcp ( new Epetra_Map ( globalNumElems, 0, *comm ) ); int localNumElems = elemsMap->NumMyElements(); Teuchos::ArrayRCP<Teuchos::ArrayRCP<int> > elems( localNumElems ); Teuchos::ArrayRCP<Mesh::ElementType> elemTypes( localNumElems ); for ( unsigned int k=0; k<localNumElems; k++ ) { // set the connectivity table vtkCell * cell = vtkMesh->GetCell( elemsMap->GID(k) ); int numPoints = cell->GetNumberOfPoints(); elems[k] = Teuchos::ArrayRCP<int>( numPoints ); for ( unsigned int l=0; l<numPoints; l++ ) elems[k][l] = cell->GetPointId( l ); // set the element type switch( cell->GetCellType() ) { case VTK_LINE: elemTypes[k] = Mesh::EDGE2; break; case VTK_QUADRATIC_EDGE: elemTypes[k] = Mesh::EDGE3; break; case VTK_TRIANGLE: elemTypes[k] = Mesh::TRI3; break; case VTK_QUADRATIC_TRIANGLE: elemTypes[k] = Mesh::TRI6; break; case VTK_QUAD: elemTypes[k] = Mesh::QUAD4; break; case VTK_QUADRATIC_QUAD: elemTypes[k] = Mesh::QUAD8; break; case VTK_BIQUADRATIC_QUAD: elemTypes[k] = Mesh::QUAD9; break; case VTK_TETRA: elemTypes[k] = Mesh::TET4; break; case VTK_QUADRATIC_TETRA: elemTypes[k] = Mesh::TET10; break; case VTK_HEXAHEDRON: elemTypes[k] = Mesh::HEX8; break; case VTK_QUADRATIC_HEXAHEDRON: elemTypes[k] = Mesh::HEX20; break; case VTK_WEDGE: elemTypes[k] = Mesh::PRISM6; break; case VTK_HIGHER_ORDER_WEDGE: elemTypes[k] = Mesh::PRISM15; break; case VTK_PYRAMID: elemTypes[k] = Mesh::PYRAMID5; break; default: TEST_FOR_EXCEPTION( true, std::logic_error, "Unknown type \""<< cell->GetCellType() <<"\"." ); } } mesh->setElems( elems ); mesh->setElemTypes( elemTypes ); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - return mesh; }