void dynamic_matrix::assemble_mass() { M.resize(mesh.active_dofs(), mesh.active_dofs()); std::vector<doublet<int>> doublets; doublets.reserve(mesh.active_dofs()); for (auto const& submesh : mesh.meshes()) { for (std::int64_t element = 0; element < submesh.elements(); element++) { auto const dof_view = submesh.local_dof_view(element); for (std::int64_t p{0}; p < dof_view.size(); ++p) { for (std::int64_t q{0}; q < dof_view.size(); ++q) { doublets.emplace_back(dof_view(p), dof_view(q)); } } } } M.setFromTriplets(doublets.begin(), doublets.end()); doublets.clear(); auto const start = std::chrono::steady_clock::now(); for (auto const& submesh : mesh.meshes()) { for (std::int64_t element = 0; element < submesh.elements(); ++element) { auto const& [dofs, m] = submesh.consistent_mass(element); for (std::int64_t a{0}; a < dofs.size(); a++) { for (std::int64_t b{0}; b < dofs.size(); b++) { M.add_to(dofs(a), dofs(b), m(a, b)); } } } } auto const end = std::chrono::steady_clock::now(); std::chrono::duration<double> const elapsed_seconds = end - start; std::cout << std::string(6, ' ') << "Mass assembly took " << elapsed_seconds.count() << "s\n"; }
void seissol::checkpoint::mpio::Wavefield::load(double &time, int ×tepWaveField) { logInfo(rank()) << "Loading wave field checkpoint"; seissol::checkpoint::CheckPoint::load(); MPI_File file = open(); if (file == MPI_FILE_NULL) logError() << "Could not open checkpoint file"; // Read and broadcast header checkMPIErr(setHeaderView(file)); Header header; if (rank() == 0) checkMPIErr(MPI_File_read(file, &header, 1, headerType(), MPI_STATUS_IGNORE)); MPI_Bcast(&header, 1, headerType(), 0, comm()); time = header.time; timestepWaveField = header.timestepWavefield; // Read dofs checkMPIErr(setDataView(file)); checkMPIErr(MPI_File_read_all(file, dofs(), numDofs(), MPI_DOUBLE, MPI_STATUS_IGNORE)); // Close the file checkMPIErr(MPI_File_close(&file)); }
void seissol::checkpoint::mpio::Wavefield::write(double time, int timestepWaveField) { EPIK_TRACER("CheckPoint_write"); SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Writing check point."; // Write the header writeHeader(time, timestepWaveField); // Save data EPIK_USER_REG(r_write_wavefield, "checkpoint_write_wavefield"); SCOREP_USER_REGION_DEFINE(r_write_wavefield); EPIK_USER_START(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); checkMPIErr(setDataView(file())); checkMPIErr(MPI_File_write_all(file(), dofs(), numDofs(), MPI_DOUBLE, MPI_STATUS_IGNORE)); EPIK_USER_END(r_write_wavefield); SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(); logInfo(rank()) << "Writing check point. Done."; }
void seissol::checkpoint::sionlib::Wavefield::write(const void* header, size_t headerSize) { SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Checkpoint backend: Writing."; int file = open(dataFile(odd()), writeMode()); checkErr(file); // Write the header SCOREP_USER_REGION_DEFINE(r_write_header); SCOREP_USER_REGION_BEGIN(r_write_header, "checkpoint_write_header", SCOREP_USER_REGION_TYPE_COMMON); checkErr(sion_coll_fwrite(header, headerSize, 1, file), 1); SCOREP_USER_REGION_END(r_write_header); // Save data SCOREP_USER_REGION_DEFINE(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); checkErr(sion_coll_fwrite(dofs(), sizeof(real), numDofs(), file), numDofs()); SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(file); logInfo(rank()) << "Checkpoint backend: Writing. Done."; }
void seissol::checkpoint::posix::Wavefield::load(double &time, int ×tepWaveField) { logInfo(rank()) << "Loading wave field checkpoint"; seissol::checkpoint::CheckPoint::load(); int file = open(); checkErr(file); // Skip identifier checkErr(lseek64(file, sizeof(unsigned long), SEEK_SET)); // Read header checkErr(read(file, &time, sizeof(time)), sizeof(time)); checkErr(read(file, ×tepWaveField, sizeof(timestepWaveField)), sizeof(timestepWaveField)); // Convert to char* to do pointer arithmetic char* buffer = reinterpret_cast<char*>(dofs()); unsigned long left = numDofs()*sizeof(real); // Read dofs while (left > 0) { unsigned long readSize = read(file, buffer, left); if (readSize <= 0) checkErr(readSize, left); buffer += readSize; left -= readSize; } // Close the file checkErr(::close(file)); }
void seissol::checkpoint::mpio::WavefieldAsync::writePrepare(double time, int timestepWaveField) { EPIK_TRACER("CheckPoint_writePrepare"); SCOREP_USER_REGION("CheckPoint_writePrepare", SCOREP_USER_REGION_TYPE_FUNCTION); // Write the header writeHeader(time, timestepWaveField); // Create copy of the dofs memcpy(m_dofsCopy, dofs(), numDofs()*sizeof(real)); // Save data EPIK_USER_REG(r_write_wavefield, "checkpoint_write_begin_wavefield"); SCOREP_USER_REGION_DEFINE(r_write_wavefield); EPIK_USER_START(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_begin_wavefield", SCOREP_USER_REGION_TYPE_COMMON); checkMPIErr(setDataView(file())); checkMPIErr(MPI_File_write_all_begin(file(), m_dofsCopy, numDofs(), MPI_DOUBLE)); EPIK_USER_END(r_write_wavefield); SCOREP_USER_REGION_END(r_write_wavefield); m_started = true; logInfo(rank()) << "Checkpoint backend: Writing. Done."; }
Eigen::VectorXd Skeleton::getConfig(std::vector<int> _id) { Eigen::VectorXd dofs(_id.size()); for(unsigned int i = 0; i < _id.size(); i++) { dofs[i] = mDofs[_id[i]]->getValue(); } return dofs; }
bool RichardsMultiphaseProblem::updateSolution(NumericVector<Number>& vec_solution, NumericVector<Number>& ghosted_solution) { bool updatedSolution = false; // this gets set to true if we needed to enforce the bound at any node unsigned int sys_num = getNonlinearSystem().number(); // For parallel procs i believe that i have to use local_nodes_begin, rather than just nodes_begin // _mesh comes from SystemBase (_mesh = getNonlinearSystem().subproblem().mesh(), and subproblem is this object) MeshBase::node_iterator nit = _mesh.getMesh().local_nodes_begin(); const MeshBase::node_iterator nend = _mesh.getMesh().local_nodes_end(); for ( ; nit != nend; ++nit) { const Node & node = *(*nit); // dofs[0] is the dof number of the bounded variable at this node // dofs[1] is the dof number of the lower variable at this node std::vector<unsigned int> dofs(2); dofs[0] = node.dof_number(sys_num, _bounded_var_num, 0); dofs[1] = node.dof_number(sys_num, _lower_var_num, 0); // soln[0] is the value of the bounded variable at this node // soln[1] is the value of the lower variable at this node std::vector<Number> soln(2); vec_solution.get(dofs, soln); // do the bounding if (soln[0] < soln[1]) { /* dof_id_type nd = node.id(); Moose::out << "nd = " << nd << " dof_bounded = " << dofs[0] << " dof_lower = " << dofs[1] << "\n"; Moose::out << " bounded_value = " << soln[0] << " lower_value = " << soln[1] << "\n"; */ vec_solution.set(dofs[0], soln[1]); // set the bounded variable equal to the lower value updatedSolution = true; } } // The above vec_solution.set calls potentially added "set" commands to a queue // The following actions the queue (doing MPI commands if necessary), so // vec_solution will actually be modified by this following command vec_solution.close(); // if any proc updated the solution, all procs will know about it _communicator.max(updatedSolution); if (updatedSolution) { ghosted_solution = vec_solution; ghosted_solution.close(); } return updatedSolution; }
Eigen::VectorXd Skeleton::getConfig(std::vector<int> _id) { Eigen::VectorXd dofs(_id.size()); for(unsigned int i = 0; i < _id.size(); i++) dofs[i] = mGenCoords[_id[i]]->get_q(); return dofs; }
void seissol::checkpoint::posix::Wavefield::write(double time, int timestepWaveField) { EPIK_TRACER("CheckPoint_write"); SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Checkpoint backend: Writing."; // Start at the beginning checkErr(lseek64(file(), 0, SEEK_SET)); // Write the header EPIK_USER_REG(r_write_header, "checkpoint_write_header"); SCOREP_USER_REGION_DEFINE(r_write_header); EPIK_USER_START(r_write_header); SCOREP_USER_REGION_BEGIN(r_write_header, "checkpoint_write_header", SCOREP_USER_REGION_TYPE_COMMON); WavefieldHeader header; header.time = time; header.timestepWaveField = timestepWaveField; writeHeader(file(), header); EPIK_USER_END(r_write_header); SCOREP_USER_REGION_END(r_write_header); // Save data EPIK_USER_REG(r_write_wavefield, "checkpoint_write_wavefield"); SCOREP_USER_REGION_DEFINE(r_write_wavefield); EPIK_USER_START(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); // Convert to char* to do pointer arithmetic const char* buffer = reinterpret_cast<const char*>(dofs()); unsigned long left = numDofs()*sizeof(real); if (alignment()) { left = (left + alignment() - 1) / alignment(); left *= alignment(); } while (left > 0) { unsigned long written = ::write(file(), buffer, left); if (written <= 0) checkErr(written, left); buffer += written; left -= written; } EPIK_USER_END(r_write_wavefield); SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(); logInfo(rank()) << "Checkpoint backend: Writing. Done."; }
void seissol::checkpoint::posix::Wavefield::write(double time, int timestepWaveField) { EPIK_TRACER("CheckPoint_write"); SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Writing check point."; // Skip identifier checkErr(lseek64(file(), sizeof(unsigned long), SEEK_SET)); // Write the header EPIK_USER_REG(r_write_header, "checkpoint_write_header"); SCOREP_USER_REGION_DEFINE(r_write_header); EPIK_USER_START(r_write_header); SCOREP_USER_REGION_BEGIN(r_write_header, "checkpoint_write_header", SCOREP_USER_REGION_TYPE_COMMON); checkErr(::write(file(), &time, sizeof(time)), sizeof(time)); checkErr(::write(file(), ×tepWaveField, sizeof(timestepWaveField)), sizeof(timestepWaveField)); EPIK_USER_END(r_write_header); SCOREP_USER_REGION_END(r_write_header); // Save data EPIK_USER_REG(r_write_wavefield, "checkpoint_write_wavefield"); SCOREP_USER_REGION_DEFINE(r_write_wavefield); EPIK_USER_START(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); // Convert to char* to do pointer arithmetic const char* buffer = reinterpret_cast<const char*>(dofs()); unsigned long left = numDofs()*sizeof(real); while (left > 0) { unsigned long written = ::write(file(), buffer, left); if (written <= 0) checkErr(written, left); buffer += written; left -= written; } EPIK_USER_END(r_write_wavefield); SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(); logInfo(rank()) << "Writing check point. Done."; }
// support deprecated syntax, later take std::string arg directly (more efficient) void State::blockedDOFs_vec_set(const python::object& dofs0){ python::extract<std::string> dofStr(dofs0); python::extract<std::vector<std::string> > dofLst(dofs0); blockedDOFs=0; if(dofStr.check()){ string dofs(dofStr()); #else void State::blockedDOFs_vec_set(const std::string& dofs){ blockedDOFs=0; #endif FOREACH(char c, dofs){ #define _GET_DOF(DOF_ANY,ch) if(c==ch) { blockedDOFs|=State::DOF_ANY; continue; } _GET_DOF(DOF_X,'x'); _GET_DOF(DOF_Y,'y'); _GET_DOF(DOF_Z,'z'); _GET_DOF(DOF_RX,'X'); _GET_DOF(DOF_RY,'Y'); _GET_DOF(DOF_RZ,'Z'); #undef _GET_DOF throw std::invalid_argument("Invalid DOF specification `"+lexical_cast<string>(c)+"' in '"+dofs+"', characters must be ∈{x,y,z,X,Y,Z}."); }
void seissol::checkpoint::mpio::Wavefield::write(const void* header, size_t headerSize) { SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Checkpoint backend: Writing."; // Write the header writeHeader(header, headerSize); // Save data SCOREP_USER_REGION_DEFINE(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); checkMPIErr(setDataView(file())); unsigned int totalIter = totalIterations(); unsigned int iter = iterations(); unsigned int count = dofsPerIteration(); if (m_useLargeBuffer) { totalIter = (totalIter + sizeof(real) - 1) / sizeof(real); iter = (iter + sizeof(real) - 1) / sizeof(real); count *= sizeof(real); } unsigned long offset = 0; for (unsigned int i = 0; i < totalIter; i++) { if (i == iter-1) // Last iteration count = numDofs() - (iter-1) * count; checkMPIErr(MPI_File_write_all(file(), const_cast<real*>(&dofs()[offset]), count, MPI_DOUBLE, MPI_STATUS_IGNORE)); if (i < iter-1) offset += count; // otherwise we just continue writing the last chunk over and over else if (i != totalIter-1) checkMPIErr(MPI_File_seek(file(), -count * sizeof(real), MPI_SEEK_CUR)); } SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(); logInfo(rank()) << "Checkpoint backend: Writing. Done."; }
void StokesFlowVelocityHomogenization :: computeTangent(FloatMatrix &answer, TimeStep *tStep) { IntArray loc, col; Domain *domain = this->giveDomain(1); int nsd = domain->giveNumberOfSpatialDimensions(); int ndof = this->giveNumberOfDomainEquations( 1, EModelDefaultEquationNumbering() ); // Build F matrix IntArray dofs(nsd); for ( int i = 0; i < nsd; ++i ) { dofs[i] = V_u + i; ///@todo This is a hack. We should have these as user input instead. } FloatMatrix F(ndof, nsd), Fe, N; col.enumerate(nsd); for ( auto &elem : domain->giveElements() ) { this->integrateNMatrix(N, *elem, tStep); elem->giveLocationArray( loc, dofs, EModelDefaultEquationNumbering() ); Fe.beTranspositionOf(N); F.assemble(Fe, loc, col); } FloatMatrix H; std :: unique_ptr< SparseLinearSystemNM > solver( classFactory.createSparseLinSolver(solverType, this->giveDomain(1), this) ); H.resize( F.giveNumberOfRows(), F.giveNumberOfColumns() ); H.zero(); // For correct homogenization, the tangent at the converged values should be used // (the one from the Newton iterations from solveYourselfAt is not updated to contain the latest values). SparseMtrxType stype = solver->giveRecommendedMatrix(true); std :: unique_ptr< SparseMtrx > Kff( classFactory.createSparseMtrx( stype ) ); Kff->buildInternalStructure(this, domain->giveNumber(), EModelDefaultEquationNumbering() ); this->assemble(*Kff, tStep, TangentStiffnessMatrix, EModelDefaultEquationNumbering(), domain); solver->solve(*Kff, F, H); answer.beTProductOf(H, F); answer.times( 1. / this->giveAreaOfRVE() ); }
/** Attempts to read the index file datafilename.obindx successively from the following directories: - the current directory - that in the environment variable BABEL_DATADIR or in the macro BABEL_DATADIR if the environment variable is not set - in a subdirectory of the BABEL_DATADIR directory with the version of OpenBabel as its name An index of type NameIndexType is then constructed. NameIndexType is defined in obmolecformat.h and may be a std::tr1::unordered_map (a hash_map) or std::map. In any case it is searched by @code NameIndexType::iterator itr = index.find(molecule_name); if(itr!=index.end()) unsigned pos_in_datafile = itr->second; @endcode pos_in_datafile is used as a parameter in seekg() to read from the datafile If no index is found, it is constructed from the datafile by reading all of it using the format pInFormat, and written to the directory containing the datafile. This means that this function can be used without worrying whether there is an index. It will be slow to execute the first time, but subsequent uses get the speed benefit of indexed access to the datafile. The serialization and de-serialization of the NameIndexType is entirely in this routine and could possibly be improved. Currently re-hashing is done every time the index is read. **/ bool OBMoleculeFormat::ReadNameIndex(NameIndexType& index, const string& datafilename, OBFormat* pInFormat) { struct headertype { char filename[256]; unsigned size; } header; NameIndexType::iterator itr; ifstream indexstream; OpenDatafile(indexstream, datafilename + ".obindx"); if(!indexstream) { //Need to prepare the index ifstream datastream; string datafilepath = OpenDatafile(datastream, datafilename); if(!datastream) { obErrorLog.ThrowError(__FUNCTION__, datafilename + " was not found or could not be opened", obError); return false; } OBConversion Conv(&datastream,NULL); Conv.SetInFormat(pInFormat); OBMol mol; streampos pos; while(Conv.Read(&mol)) { string name = mol.GetTitle(); if(!name.empty()) index.insert(make_pair(name, pos)); mol.Clear(); pos = datastream.tellg(); } obErrorLog.ThrowError(__FUNCTION__, "Prepared an index for " + datafilepath, obAuditMsg); //Save index to file ofstream dofs((datafilepath + ".obindx").c_str(), ios_base::out|ios_base::binary); if(!dofs) return false; strncpy(header.filename,datafilename.c_str(), sizeof(header.filename)); header.filename[sizeof(header.filename) - 1] = '\0'; header.size = index.size(); dofs.write((const char*)&header, sizeof(headertype)); for(itr=index.begin();itr!=index.end();++itr) { //#chars; chars; ofset(4bytes). const char n = itr->first.size(); dofs.put(n); dofs.write(itr->first.c_str(),n); dofs.write((const char*)&itr->second,sizeof(unsigned)); } } else { //Read index data from file and put into hash_map indexstream.read((char*)&header,sizeof(headertype)); itr=index.begin(); // for hint for(unsigned int i=0;i<header.size;++i) { char len; indexstream.get(len); string title(len, 0); unsigned pos; indexstream.read(&title[0],len); indexstream.read((char*)&pos,sizeof(unsigned)); index.insert(itr, make_pair(title,pos)); } } return true; }
void seissol::checkpoint::h5::Wavefield::write(double time, int waveFieldTimeStep) { EPIK_TRACER("CheckPoint_write"); SCOREP_USER_REGION("CheckPoint_write", SCOREP_USER_REGION_TYPE_FUNCTION); logInfo(rank()) << "Writing check point."; EPIK_USER_REG(r_header, "checkpoint_write_header"); SCOREP_USER_REGION_DEFINE(r_header); EPIK_USER_START(r_header); SCOREP_USER_REGION_BEGIN(r_header, "checkpoint_write_header", SCOREP_USER_REGION_TYPE_COMMON); // Time checkH5Err(H5Awrite(m_h5time[odd()], H5T_NATIVE_DOUBLE, &time)); // Wavefield writer checkH5Err(H5Awrite(m_h5timestepWavefield[odd()], H5T_NATIVE_INT, &waveFieldTimeStep)); EPIK_USER_END(r_header); SCOREP_USER_REGION_END(r_header); // Save data EPIK_USER_REG(r_write_wavefield, "checkpoint_write_wavefield"); SCOREP_USER_REGION_DEFINE(r_write_wavefield); EPIK_USER_START(r_write_wavefield); SCOREP_USER_REGION_BEGIN(r_write_wavefield, "checkpoint_write_wavefield", SCOREP_USER_REGION_TYPE_COMMON); // Write the wave field unsigned int offset = 0; hsize_t fStart = fileOffset(); hsize_t count = dofsPerIteration(); hid_t h5memSpace = H5Screate_simple(1, &count, 0L); checkH5Err(h5memSpace); checkH5Err(H5Sselect_all(h5memSpace)); for (unsigned int i = 0; i < totalIterations()-1; i++) { checkH5Err(H5Sselect_hyperslab(m_h5fSpaceData, H5S_SELECT_SET, &fStart, 0L, &count, 0L)); checkH5Err(H5Dwrite(m_h5data[odd()], H5T_NATIVE_DOUBLE, h5memSpace, m_h5fSpaceData, h5XferList(), &const_cast<real*>(dofs())[offset])); // We are finished in less iterations, read data twice // so everybody needs the same number of iterations if (i < iterations()-1) { fStart += count; offset += count; } } checkH5Err(H5Sclose(h5memSpace)); // Save reminding data in the last iteration count = numDofs() - (iterations() - 1) * count; h5memSpace = H5Screate_simple(1, &count, 0L); checkH5Err(h5memSpace); checkH5Err(H5Sselect_all(h5memSpace)); checkH5Err(H5Sselect_hyperslab(m_h5fSpaceData, H5S_SELECT_SET, &fStart, 0L, &count, 0L)); checkH5Err(H5Dwrite(m_h5data[odd()], H5T_NATIVE_DOUBLE, h5memSpace, m_h5fSpaceData, h5XferList(), &dofs()[offset])); checkH5Err(H5Sclose(h5memSpace)); EPIK_USER_END(r_write_wavefield); SCOREP_USER_REGION_END(r_write_wavefield); // Finalize the checkpoint finalizeCheckpoint(); logInfo(rank()) << "Writing check point. Done."; }
void RieszRep::distributeDofs(){ int myRank = Teuchos::GlobalMPISession::getRank(); int numRanks = Teuchos::GlobalMPISession::getNProc(); #ifdef HAVE_MPI Epetra_MpiComm Comm(MPI_COMM_WORLD); //cout << "rank: " << rank << " of " << numProcs << endl; #else Epetra_SerialComm Comm; #endif // the code below could stand to be reworked; I'm pretty sure this is not the best way to distribute the data, and it would also be best to get rid of the iteration over the global set of active elements. But a similar point could be made about this method as a whole: do we really need to distribute all the dofs to every rank? It may be best to eliminate this method altogether. vector<GlobalIndexType> cellIDsByPartitionOrdering; for (int rank=0; rank<numRanks; rank++) { set<GlobalIndexType> cellIDsForRank = _mesh->globalDofAssignment()->cellsInPartition(rank); cellIDsByPartitionOrdering.insert(cellIDsByPartitionOrdering.end(), cellIDsForRank.begin(), cellIDsForRank.end()); } // determine inverse map: map<GlobalIndexType,int> ordinalForCellID; for (int ordinal=0; ordinal<cellIDsByPartitionOrdering.size(); ordinal++) { GlobalIndexType cellID = cellIDsByPartitionOrdering[ordinal]; ordinalForCellID[cellID] = ordinal; // cout << "ordinalForCellID[" << cellID << "] = " << ordinal << endl; } for (int cellOrdinal=0; cellOrdinal<cellIDsByPartitionOrdering.size(); cellOrdinal++) { GlobalIndexType cellID = cellIDsByPartitionOrdering[cellOrdinal]; ElementTypePtr elemTypePtr = _mesh->getElementType(cellID); DofOrderingPtr testOrderingPtr = elemTypePtr->testOrderPtr; int numDofs = testOrderingPtr->totalDofs(); int cellIDPartition = _mesh->partitionForCellID(cellID); bool isInPartition = (cellIDPartition == myRank); int numMyDofs; FieldContainer<double> dofs(numDofs); if (isInPartition){ // if in partition numMyDofs = numDofs; dofs = _rieszRepDofs[cellID]; } else{ numMyDofs = 0; } Epetra_Map dofMap(numDofs,numMyDofs,0,Comm); Epetra_Vector distributedRieszDofs(dofMap); if (isInPartition) { for (int i = 0;i<numMyDofs;i++) { // shouldn't activate on off-proc partitions distributedRieszDofs.ReplaceGlobalValues(1,&dofs(i),&i); } } Epetra_Map importMap(numDofs,numDofs,0,Comm); // every proc should own their own copy of the dofs Epetra_Import testDofImporter(importMap, dofMap); Epetra_Vector globalRieszDofs(importMap); globalRieszDofs.Import(distributedRieszDofs, testDofImporter, Insert); if (!isInPartition){ for (int i = 0;i<numDofs;i++){ dofs(i) = globalRieszDofs[i]; } } _rieszRepDofsGlobal[cellID] = dofs; // { // debugging // ostringstream cellIDlabel; // cellIDlabel << "cell " << cellID << " _rieszRepDofsGlobal, after global import"; // TestSuite::serializeOutput(cellIDlabel.str(), _rieszRepDofsGlobal[cellID]); // } } // distribute norms as well GlobalIndexType numElems = _mesh->numActiveElements(); set<GlobalIndexType> rankLocalCellIDs = _mesh->cellIDsInPartition(); IndexType numMyElems = rankLocalCellIDs.size(); GlobalIndexType myElems[numMyElems]; // build cell index GlobalIndexType myCellOrdinal = 0; double rankLocalRieszNorms[numMyElems]; for (set<GlobalIndexType>::iterator cellIDIt = rankLocalCellIDs.begin(); cellIDIt != rankLocalCellIDs.end(); cellIDIt++) { GlobalIndexType cellID = *cellIDIt; myElems[myCellOrdinal] = ordinalForCellID[cellID]; rankLocalRieszNorms[myCellOrdinal] = _rieszRepNormSquared[cellID]; myCellOrdinal++; } Epetra_Map normMap((GlobalIndexTypeToCast)numElems,(int)numMyElems,(GlobalIndexTypeToCast *)myElems,(GlobalIndexTypeToCast)0,Comm); Epetra_Vector distributedRieszNorms(normMap); int err = distributedRieszNorms.ReplaceGlobalValues(numMyElems,rankLocalRieszNorms,(GlobalIndexTypeToCast *)myElems); if (err != 0) { cout << "RieszRep::distributeDofs(): on rank" << myRank << ", ReplaceGlobalValues returned error code " << err << endl; } Epetra_Map normImportMap((GlobalIndexTypeToCast)numElems,(GlobalIndexTypeToCast)numElems,0,Comm); Epetra_Import normImporter(normImportMap,normMap); Epetra_Vector globalNorms(normImportMap); globalNorms.Import(distributedRieszNorms, normImporter, Add); // add should be OK (everything should be zeros) for (int cellOrdinal=0; cellOrdinal<cellIDsByPartitionOrdering.size(); cellOrdinal++) { GlobalIndexType cellID = cellIDsByPartitionOrdering[cellOrdinal]; _rieszRepNormSquaredGlobal[cellID] = globalNorms[cellOrdinal]; // if (myRank==0) cout << "_rieszRepNormSquaredGlobal[" << cellID << "] = " << globalNorms[cellOrdinal] << endl; } }
//! @brief Handle the constraints. //! //! Determines the number of FE\_Elements and DOF\_Groups needed from the //! Domain (a one to one mapping between Elements and FE\_Elements and //! Nodes and DOF\_Groups) Creates two arrays of pointers to store the //! FE\_elements and DOF\_Groups, returning a warning message and a \f$-2\f$ //! or \f$-3\f$ if not enough memory is available for these arrays. Then the //! object will iterate through the Nodes of the Domain, creating a //! DOF\_Group for each node and setting the initial id for each dof to //! \f$-2\f$ if no SFreedom\_Constraint exists for the dof, or \f$-1\f$ if a //! SFreedom\_Constraint exists or \f$-3\f$ if the node identifier is in {\em //! nodesToBeNumberedLast}. The object then iterates through the Elements //! of the Domain creating a FE\_Element for each Element, if the Element //! is a Subdomain setFE\_ElementPtr() is invoked on the Subdomain //! with the new FE\_Element as the argument. If not enough memory is //! available for any DOF\_Group or FE\_element a warning message is //! printed and a \f$-4\f$ or \f$-5\f$ is returned. If any MFreedom\_Constraint //! objects exist in the Domain a warning message is printed and \f$-6\f$ is //! returned. If all is successful, the method returns the number of //! degrees-of-freedom associated with the DOF\_Groups in {\em //! nodesToBeNumberedLast}. int XC::PlainHandler::handle(const ID *nodesLast) { // first check links exist to a Domain and an AnalysisModel object Domain *theDomain = this->getDomainPtr(); AnalysisModel *theModel = this->getAnalysisModelPtr(); Integrator *theIntegrator = this->getIntegratorPtr(); if((!theDomain) || (!theModel) || (!theIntegrator)) { std::cerr << getClassName() << "::" << __FUNCTION__ << "; domain, model or integrator was not set.\n"; return -1; } // initialse the DOF_Groups and add them to the AnalysisModel. // : must of course set the initial IDs NodeIter &theNod= theDomain->getNodes(); Node *nodPtr= nullptr; SFreedom_Constraint *spPtr= nullptr; DOF_Group *dofPtr= nullptr; int numDOF = 0; int count3 = 0; int countDOF =0; while((nodPtr = theNod()) != nullptr) { dofPtr= theModel->createDOF_Group(numDOF++, nodPtr); // initially set all the ID value to -2 countDOF+= dofPtr->inicID(-2); // loop through the SFreedom_Constraints to see if any of the // DOFs are constrained, if so set initial XC::ID value to -1 int nodeID = nodPtr->getTag(); SFreedom_ConstraintIter &theSPs = theDomain->getConstraints().getDomainAndLoadPatternSPs(); while((spPtr = theSPs()) != 0) if(spPtr->getNodeTag() == nodeID) { if(spPtr->isHomogeneous() == false) std::cerr << getClassName() << "::" << __FUNCTION__ << "; non-homogeneos constraint" << " for node " << spPtr->getNodeTag() << " h**o assumed\n"; const ID &id = dofPtr->getID(); int dof = spPtr->getDOF_Number(); if(id(dof) == -2) { dofPtr->setID(spPtr->getDOF_Number(),-1); countDOF--; } else std::cerr << getClassName() << "::" << __FUNCTION__ << "; multiple single pointconstraints at DOF " << dof << " for node " << spPtr->getNodeTag() << std::endl; } // loop through the MFreedom_Constraints to see if any of the // DOFs are constrained, note constraint matrix must be diagonal // with 1's on the diagonal MFreedom_ConstraintIter &theMPs = theDomain->getConstraints().getMPs(); MFreedom_Constraint *mpPtr; while((mpPtr = theMPs()) != 0) { if(mpPtr->getNodeConstrained() == nodeID) { if(mpPtr->isTimeVarying() == true) std::cerr << getClassName() << "::" << __FUNCTION__ << "; time-varying constraint" << " for node " << nodeID << " non-varying assumed\n"; const Matrix &C = mpPtr->getConstraint(); int numRows = C.noRows(); int numCols = C.noCols(); if(numRows != numCols) std::cerr << getClassName() << "::" << __FUNCTION__ << " constraint matrix not diagonal," << " ignoring constraint for node " << nodeID << std::endl; else { int ok = 0; for(int i=0; i<numRows; i++) { if(C(i,i) != 1.0) ok = 1; for(int j=0; j<numRows; j++) if(i != j) if(C(i,j) != 0.0) ok = 1; } if(ok != 0) std::cerr << getClassName() << "::" << __FUNCTION__ << "; constraint matrix not identity," << " ignoring constraint for node " << nodeID << std::endl; else { const ID &dofs = mpPtr->getConstrainedDOFs(); const ID &id = dofPtr->getID(); for(int i=0; i<dofs.Size(); i++) { int dof = dofs(i); if(id(dof) == -2) { dofPtr->setID(dof,-4); countDOF--; } else std::cerr << getClassName() << "::" << __FUNCTION__ << "; constraint at dof " << dof << " already specified for constrained node" << " in MFreedom_Constraint at node " << nodeID << std::endl; } } } } } // loop through the MFreedom_Constraints to see if any of the // DOFs are constrained, note constraint matrix must be diagonal // with 1's on the diagonal MRMFreedom_ConstraintIter &theMRMPs = theDomain->getConstraints().getMRMPs(); MRMFreedom_Constraint *mrmpPtr; while((mrmpPtr = theMRMPs()) != 0) { std::cerr << getClassName() << "::" << __FUNCTION__ << "; loop through the MRMFreedom_Constraints." << std::endl; } } // set the number of eqn in the model theModel->setNumEqn(countDOF); // now see if we have to set any of the dof's to -3 // int numLast = 0; if(nodesLast != 0) for(int i=0; i<nodesLast->Size(); i++) { int nodeID = (*nodesLast)(i); Node *nodPtr = theDomain->getNode(nodeID); if(nodPtr != 0) { dofPtr = nodPtr->getDOF_GroupPtr(); const ID &id = dofPtr->getID(); // set all the dof values to -3 for (int j=0; j < id.Size(); j++) if(id(j) == -2) { dofPtr->setID(j,-3); count3++; } else std::cerr << getClassName() << "::" << __FUNCTION__ << "; boundary sp constraint in subdomain" << " this should not be - results suspect \n"; } } // initialise the FE_Elements and add to the XC::AnalysisModel. ElementIter &theEle = theDomain->getElements(); Element *elePtr; int numFe = 0; FE_Element *fePtr; while((elePtr = theEle()) != 0) { fePtr= theModel->createFE_Element(numFe++, elePtr); } return count3; }
void RieszRep::computeRieszRep(int cubatureEnrichment){ #ifdef HAVE_MPI Epetra_MpiComm Comm(MPI_COMM_WORLD); //cout << "rank: " << rank << " of " << numProcs << endl; #else Epetra_SerialComm Comm; #endif set<GlobalIndexType> cellIDs = _mesh->cellIDsInPartition(); for (set<GlobalIndexType>::iterator cellIDIt=cellIDs.begin(); cellIDIt !=cellIDs.end(); cellIDIt++){ GlobalIndexType cellID = *cellIDIt; ElementTypePtr elemTypePtr = _mesh->getElementType(cellID); DofOrderingPtr testOrderingPtr = elemTypePtr->testOrderPtr; int numTestDofs = testOrderingPtr->totalDofs(); BasisCachePtr basisCache = BasisCache::basisCacheForCell(_mesh,cellID,true,cubatureEnrichment); FieldContainer<double> rhsValues(1,numTestDofs); _rhs->integrate(rhsValues, testOrderingPtr, basisCache); if (_printAll){ cout << "RieszRep: LinearTerm values for cell " << cellID << ":\n " << rhsValues << endl; } FieldContainer<double> ipMatrix(1,numTestDofs,numTestDofs); _ip->computeInnerProductMatrix(ipMatrix,testOrderingPtr, basisCache); bool printOutRiesz = false; if (printOutRiesz){ cout << " ============================ In RIESZ ==========================" << endl; cout << "matrix: \n" << ipMatrix; } FieldContainer<double> rieszRepDofs(numTestDofs,1); ipMatrix.resize(numTestDofs,numTestDofs); rhsValues.resize(numTestDofs,1); int success = SerialDenseWrapper::solveSystemUsingQR(rieszRepDofs, ipMatrix, rhsValues); if (success != 0) { cout << "RieszRep::computeRieszRep: Solve FAILED with error: " << success << endl; } // rieszRepDofs.Multiply(true,rhsVectorCopy, normSq); // equivalent to e^T * R_V * e double normSquared = SerialDenseWrapper::dot(rieszRepDofs, rhsValues); _rieszRepNormSquared[cellID] = normSquared; // cout << "normSquared for cell " << cellID << ": " << _rieszRepNormSquared[cellID] << endl; if (printOutRiesz){ cout << "rhs: \n" << rhsValues; cout << "dofs: \n" << rieszRepDofs; cout << " ================================================================" << endl; } FieldContainer<double> dofs(numTestDofs); for (int i = 0;i<numTestDofs;i++){ dofs(i) = rieszRepDofs(i,0); } _rieszRepDofs[cellID] = dofs; } distributeDofs(); _repsNotComputed = false; }