// Insert row and associated columns into matrix complain if row is already there // and it isn't the same void WMat::InsertRow(const Entry &row, const std::vector<Entry> &cols) { std::pair<WeightMap::iterator, bool> wi = weights.insert(std::make_pair(row, cols)); if (wi.second == false) { // Just verify that entries are the same std::vector<Entry> &tcol = wi.first->second; ThrowRequire(tcol.size() == cols.size()); for (UInt i = 0; i < cols.size(); i++) { ThrowRequire(tcol[i].id == cols[i].id); ThrowRequire(tcol[i].idx == cols[i].idx); ThrowRequire(std::abs(tcol[i].value-cols[i].value) < 1e-5); } } else { // Sort the column entries (invariant used elsewhere). //std::sort(wi.first->second.begin(), wi.first->second.end()); //std::cout << " i am there" << std::endl; // compress storage std::vector<Entry>(wi.first->second).swap(wi.first->second); } }
void EpetraLinearSolver::setSystemObjects( Epetra_FECrsMatrix * matrix, Epetra_FEVector * rhs) { ThrowRequire(matrix); ThrowRequire(rhs); ThrowRequire(solver_); solver_->SetUserMatrix(matrix); solver_->SetRHS(rhs); }
void TpetraLinearSolver::setSystemObjects( Teuchos::RCP<LinSys::Matrix> matrix, Teuchos::RCP<LinSys::Vector> rhs) { ThrowRequire(!matrix.is_null()); ThrowRequire(!rhs.is_null()); //ThrowRequire(solver_); matrix_ = matrix; rhs_ = rhs; }
int EpetraLinearSolver::solve( Epetra_Vector * sln, int & iteration_count, double & scaledResidual) { ThrowRequire(solver_); ThrowRequire(sln); Epetra_RowMatrix * matrix = solver_->GetUserMatrix(); ThrowRequire(matrix); ThrowRequire(solver_->GetRHS()); solver_->SetLHS(sln); if (activateML_) { if (mlPreconditioner_ == 0) mlPreconditioner_ = new ML_Epetra::MultiLevelPreconditioner(*matrix, *mlParams_); solver_->SetPrecOperator(mlPreconditioner_); } if (activateMueLu_) { Teuchos::RCP<Teuchos::Time> tm = Teuchos::TimeMonitor::getNewTimer("nalu MueLu preconditioner setup"); Teuchos::TimeMonitor timeMon(*tm); if (recomputePreconditioner_ || mueLuPreconditioner_ == Teuchos::null) { std::string xmlFileName = config_->muelu_xml_file(); mueLuPreconditioner_ = MueLu::CreateEpetraPreconditioner(mueLuMat_, xmlFileName, mueLuCoordinates_); } else if (reusePreconditioner_) { MueLu::ReuseEpetraPreconditioner(mueLuMat_, *mueLuPreconditioner_); } if (config_->getSummarizeMueluTimer()) Teuchos::TimeMonitor::summarize(std::cout, false, true, false, Teuchos::Union); solver_->SetPrecOperator(mueLuPreconditioner_.getRawPtr()); } const int max_iterations = solver_->GetAztecOption(AZ_max_iter); const double tol = solver_->GetAllAztecParams()[AZ_tol]; const int status = solver_->Iterate(max_iterations, tol); iteration_count = solver_->NumIters(); scaledResidual = solver_->ScaledResidual(); if (activateML_ && recomputePreconditioner_) { delete mlPreconditioner_; mlPreconditioner_ = 0; } return status; }
void MEField<SField>::set_diff(UInt offset, UInt total_dofs) { UInt idx = offset; ThrowRequire(cur_elems.size() == 1); UInt fdim = f.dim(); MeshObj &elem = *cur_elems[0]; MasterElementBase &meb = GetME( f, elem); UInt nfunc = meb.num_functions(); // Loop dofs for (UInt df = 0; df < meb.num_functions(); df++) { const int *dd = meb.GetDofDescription(df); // Get the object; MeshObj *dofobj = NULL; if (dof2mtype(dd[0]) != MeshObj::ELEMENT) { MeshObjRelationList::const_iterator ri = MeshObjConn::find_relation(elem, dof2mtype(dd[0]), dd[1], MeshObj::USES); ThrowRequire(ri != elem.Relations.end()); dofobj = ri->obj; } else dofobj = &elem; SField &llf = *Getfield(); fad_type *fad = llf.data(*dofobj); //Par::Out() << "sdof: " << llf.name() << std::endl; for (UInt d = 0; d < fdim; ++d) { /* Par::Out() << "\tfad[" << dd[2]*fdim+d << "] = diff:" << offset+d*nfunc+df << std::endl; Par::Out() << "\t&fad[" << &fad[dd[2]*fdim+d] << "] = diff:" << offset+d*nfunc+df << std::endl; fad[dd[2]*fdim+d] = df; */ fad[dd[2]*fdim+d].diff(offset + d*nfunc + df,total_dofs); } } // num_func }
void SparseMsg::setPattern(UInt num, const UInt *proc) { UInt csize = Par::Size(); // Set dest proc nsend = num; std::vector<int> sendto(nproc, 0); std::vector<int> counts(nproc, 1); for (UInt i = 0; i < num; i++) { ThrowRequire(proc[i] < csize); sendto[proc[i]] = 1; if (proc[i] == (UInt) rank) { sendself = true; self_idx = i; } } !Par::Serial() ? MPI_Reduce_scatter(&sendto[0], &num_incoming, &counts[0], MPI_INT, MPI_SUM, comm) : num_incoming = sendto[0]; //std::cout << "Proc:" << rank << "to receive " << num_incoming << " messages" << std::endl; // Set up send buffers (so we don't have save the proc ids if (nsend > 0) outBuffers.resize(nsend); else outBuffers.clear(); for (UInt i = 0; i < nsend; i++) { outBuffers[i].proc = proc[i]; } }
int TpetraLinearSolver::residual_norm(int whichNorm, Teuchos::RCP<LinSys::Vector> sln, double& norm) { LinSys::Vector resid(rhs_->getMap()); ThrowRequire(! (sln.is_null() || rhs_.is_null() ) ); if (matrix_->isFillActive() ) { // FIXME //!matrix_->fillComplete(map_, map_); throw std::runtime_error("residual_norm"); } matrix_->apply(*sln, resid); LinSys::OneDVector rhs = rhs_->get1dViewNonConst (); LinSys::OneDVector res = resid.get1dViewNonConst (); for (int i=0; i<rhs.size(); ++i) res[i] -= rhs[i]; if ( whichNorm == 0 ) norm = resid.normInf(); else if ( whichNorm == 1 ) norm = resid.norm1(); else if ( whichNorm == 2 ) norm = resid.norm2(); else return 1; return 0; }
void FieldReg::MatchFields(UInt nfields, MEField<> **fds, std::vector<MEField<>*> &res) const { for (UInt i = 0; i < nfields; i++) { MEField<> *mf = GetField(fds[i]->name()); ThrowRequire(mf); res.push_back(mf); } }
void CommReg::SendFields(UInt nfields, MEField<> *const *sfields, MEField<> *const *rfields) { // Get all the subfields and send out over the specs. std::vector<_field*> sf; std::vector<_field*> rf; UInt obj_type = 0; for (UInt i = 0; i < nfields; i++) { if (&sfields[i]->GetMEFamily() != &rfields[i]->GetMEFamily()) throw Ex() << "Send fields, me for " << sfields[i]->name() << " does not match rfield:" << rfields[i]->name(); sfields[i]->Getfields(sf); rfields[i]->Getfields(rf); } for (UInt j = 0; j < sf.size(); j++) obj_type |= sf[j]->GetAttr().GetType(); /* std::cout << "sf size=" << sf.size() << ". _fields are:"; for (UInt i = 0; i < sf.size(); i++) { std::cout << sf[i]->name() << ", dim=" << sf[i]->dim() << std::endl; } std::cout << "rf size=" << rf.size() << ". _fields are:"; for (UInt i = 0; i < rf.size(); i++) { std::cout << rf[i]->name() << ", dim=" << rf[i]->dim() << std::endl; } */ ThrowRequire(sf.size() == rf.size()); // Now send via the spec(s) // TODO: be smarter: select only the relevant spec to send each field. if ((obj_type & MeshObj::NODE)) node_rel.send_fields(sf.size(), &sf[0], &rf[0]); if ((obj_type & MeshObj::EDGE)) edge_rel.send_fields(sf.size(), &sf[0], &rf[0]); if ((obj_type & MeshObj::FACE)) face_rel.send_fields(sf.size(), &sf[0], &rf[0]); if ((obj_type & MeshObj::ELEMENT)) elem_rel.send_fields(sf.size(), &sf[0], &rf[0]); }
int TpetraLinearSolver::solve( Teuchos::RCP<LinSys::Vector> sln, int & iters, double & finalResidNrm) { ThrowRequire(!sln.is_null()); const int status = 0; int whichNorm = 2; finalResidNrm=0.0; if (activateMueLu_) { setMueLu(); } else { preconditioner_->compute(); } problem_->setProblem(); solver_->solve(); iters = solver_->getNumIters(); residual_norm(whichNorm, sln, finalResidNrm); return status; }
// // Set or get the gemini version, if passed value is not unknown, set the version, either way return the version // GeminiSCIVersion GetGeminiVersion(GeminiSCIVersion ver) { static GeminiSCIVersion GeminiSCIVersionValue = GEMINI_SCI_1; //This is the default gemini version if(ver != GEMINI_SCI_UNKNOWN) { GeminiSCIVersionValue = ver; } ThrowRequire(GeminiSCIVersionValue != GEMINI_SCI_UNKNOWN); return GeminiSCIVersionValue; }
Partition *BucketRepository::get_or_create_partition( const EntityRank arg_entity_rank , const OrdinalVector &parts) { enum { KEY_TMP_BUFFER_SIZE = 64 }; ThrowRequireMsg(MetaData::get(m_mesh).check_rank(arg_entity_rank), "Entity rank " << arg_entity_rank << " is invalid"); if (m_buckets.empty()) { size_t entity_rank_count = m_mesh.mesh_meta_data().entity_rank_count(); ThrowRequireMsg( entity_rank_count > 0, "MetaData doesn't have any entity-ranks! Did you forget to initialize MetaData before creating BulkData?"); m_buckets.resize(entity_rank_count); m_partitions.resize(entity_rank_count); m_need_sync_from_partitions.resize(entity_rank_count, false); } std::vector<Partition *> & partitions = m_partitions[ arg_entity_rank ]; const size_t part_count = parts.size(); std::vector<unsigned> key(2 + part_count) ; //---------------------------------- // Key layout: // { part_count + 1 , { part_ordinals } , partition_count } // Thus partition_count = key[ key[0] ] // // for upper bound search use the maximum key for a bucket in the partition. const unsigned max = static_cast<unsigned>(-1); key[0] = part_count+1; key[ key[0] ] = max ; { for ( unsigned i = 0 ; i < part_count ; ++i ) { key[i+1] = parts[i] ; } } // If the partition is found, the iterator will be right after it, thanks to the // trickiness above. const std::vector<Partition *>::iterator ik = lower_bound( partitions , &key[0] ); const bool partition_exists = (ik != partitions.begin()) && raw_part_equal( ik[-1]->key() , &key[0] ); if (partition_exists) { return ik[-1]; } key[key[0]] = 0; Partition *partition = new Partition(m_mesh, this, arg_entity_rank, key); ThrowRequire(partition != NULL); m_need_sync_from_partitions[arg_entity_rank] = true; partitions.insert( ik , partition ); return partition ; }
Partition *BucketRepository::get_or_create_partition( const EntityRank arg_entity_rank , const OrdinalVector &parts) { enum { KEY_TMP_BUFFER_SIZE = 64 }; TraceIf("stk::mesh::impl::BucketRepository::get_or_create_partition", LOG_BUCKET); ThrowRequireMsg(MetaData::get(m_mesh).check_rank(arg_entity_rank), "Entity rank " << arg_entity_rank << " is invalid"); // Somehow, this can happen. ThrowRequireMsg( !m_buckets.empty(), "m_buckets is empty! Did you forget to initialize MetaData before creating BulkData?"); std::vector<Partition *> & partitions = m_partitions[ arg_entity_rank ]; const size_t part_count = parts.size(); std::vector<unsigned> key(2 + part_count) ; //---------------------------------- // Key layout: // { part_count + 1 , { part_ordinals } , partition_count } // Thus partition_count = key[ key[0] ] // // for upper bound search use the maximum key for a bucket in the partition. const unsigned max = static_cast<unsigned>(-1); key[0] = part_count+1; key[ key[0] ] = max ; { for ( unsigned i = 0 ; i < part_count ; ++i ) { key[i+1] = parts[i] ; } } // If the partition is found, the iterator will be right after it, thanks to the // trickiness above. const std::vector<Partition *>::iterator ik = lower_bound( partitions , &key[0] ); const bool partition_exists = (ik != partitions.begin()) && raw_part_equal( ik[-1]->key() , &key[0] ); if (partition_exists) { return ik[-1]; } key[key[0]] = 0; typedef tracking_allocator<Partition, PartitionTag> partition_allocator; Partition *partition = partition_allocator().allocate(1); ThrowRequire(partition != NULL); partition = new (partition) Partition(m_mesh, this, arg_entity_rank, key); m_need_sync_from_partitions[arg_entity_rank] = true; partitions.insert( ik , partition ); return partition ; }
void init(int nx, int ny, int nz, int in_polyOrder) { auto aura = stk::mesh::BulkData::NO_AUTO_AURA; fixture = sierra::nalu::make_unique<stk::mesh::fixtures::HexFixture>(comm, nx, ny, nz, aura); meta = &fixture->m_meta; bulk = &fixture->m_bulk_data; surfSupPart = nullptr; surfSubPart = nullptr; topo = stk::topology::HEX_8; hexPart = fixture->m_elem_parts[0]; ThrowRequire(hexPart != nullptr); dnvField = &meta->declare_field<ScalarFieldType>(stk::topology::NODE_RANK, "dual_nodal_volume"); qField = &meta->declare_field<ScalarFieldType>(stk::topology::NODE_RANK, "scalar"); dqdxField = &meta->declare_field<VectorFieldType>(stk::topology::NODE_RANK, "dqdx"); coordField = &meta->declare_field<VectorFieldType>(stk::topology::NODE_RANK, "coords"); intField = &meta->declare_field<ScalarIntFieldType>(stk::topology::NODE_RANK, "integer field"); poly_order = in_polyOrder; surfSupPart = &meta->declare_part("surface_1", stk::topology::FACE_RANK); surfSubPart = &meta->declare_part_with_topology("surface_1_hex8_quad4", stk::topology::QUAD_4); meta->declare_part_subset(*surfSupPart, *surfSubPart); edgePart = &meta->declare_part("edge_part", stk::topology::EDGE_RANK); facePart = &meta->declare_part("face_part", stk::topology::FACE_RANK); baseParts = {hexPart, surfSupPart}; setup_promotion(); const double zeroDouble = 0.0; stk::mesh::put_field(*dnvField, meta->universal_part(), 1, &zeroDouble); stk::mesh::put_field(*qField, meta->universal_part(), 1, &zeroDouble); stk::mesh::put_field(*dqdxField, meta->universal_part(), nDim, &zeroDouble); stk::mesh::put_field(*coordField, meta->universal_part(), nDim, &zeroDouble); int zeroInt = 0; stk::mesh::put_field(*intField, meta->universal_part(), 1, &zeroInt); fixture->m_meta.commit(); fixture->generate_mesh(stk::mesh::fixtures::FixedCartesianCoordinateMapping(nx, ny, nz, nx, ny, nz)); stk::mesh::PartVector surfParts = {surfSubPart}; stk::mesh::skin_mesh(*bulk, surfParts); const auto& meshCoordField = *static_cast<const VectorFieldType*>(meta->coordinate_field()); double Q[9] = {1, 2, -4, 0, 1, 1, .5, 0, 1 }; sierra::nalu::bucket_loop(bulk->get_buckets(stk::topology::NODE_RANK, meta->universal_part()), [&](stk::mesh::Entity node) { double* coords = stk::mesh::field_data(*coordField, node); const double* model_coords = stk::mesh::field_data(meshCoordField, node); for (unsigned d = 0; d < meta->spatial_dimension(); ++d) { coords[0] = Q[0]*model_coords[0]+Q[1]*model_coords[1]+Q[2]*model_coords[2]; coords[1] = Q[3]*model_coords[0]+Q[4]*model_coords[1]+Q[5]*model_coords[2]; coords[2] = Q[6]*model_coords[0]+Q[7]*model_coords[1]+Q[8]*model_coords[2]; } }); }
inline void setupKeyholeMesh3D_case2(stk::mesh::BulkData& bulk) { ThrowRequire(bulk.parallel_size() == 3); stk::io::fill_mesh("generated:3x1x3", bulk); stk::mesh::EntityProcVec elementProcChanges; if (bulk.parallel_rank() == 1) { elementProcChanges.push_back(stk::mesh::EntityProc(bulk.get_entity(stk::topology::ELEM_RANK,4),2)); elementProcChanges.push_back(stk::mesh::EntityProc(bulk.get_entity(stk::topology::ELEM_RANK,6),2)); } bulk.change_entity_owner(elementProcChanges); bulk.modification_begin(); if (bulk.parallel_rank() == 1) { stk::mesh::Entity local_element5 = bulk.get_entity(stk::topology::ELEM_RANK,5); const bool delete_success = bulk.destroy_entity(local_element5); ThrowRequire(delete_success); } bulk.modification_end(); }
relation_stencil_ptr get_element_node_stencil( size_t spatial_dimension) { ThrowRequire(spatial_dimension == 2 || spatial_dimension == 3); if (spatial_dimension == 3) return & element_node_stencil_3d; else // if (spatial_dimension == 2) return & element_node_stencil_2d; }
void Entity::erase_and_clear_if_empty(RelationIterator rel_itr) { ThrowRequire(!internal_is_handled_generically(rel_itr->getRelationType())); RelationVector& aux_relations = m_fmwk_attrs->aux_relations; aux_relations.erase(aux_relations.begin() + (rel_itr - aux_relations.begin())); // Need to convert to non-const iterator if (aux_relations.empty()) { reserve_relation(0); } }
//protected void GameofLife::update_this_element(stk::mesh::Entity elem) { if (stk::topology::QUAD_4 == m_elemType) update_quad(elem); else if (stk::topology::HEX_8 == m_elemType) update_hex(elem); else if (stk::topology::TRIANGLE_3 == m_elemType) update_tri(elem); else ThrowRequire(true); }
UInt MEField<SField>::do_assign_elements(std::vector<MeshObj*> &elems) { UInt fdim = f.dim(); primaryfield->Reset(); field_objs.clear(); UInt dof_count; // Count the unique degrees of freedom. Set aside the objects for each // sfield. for (UInt e = 0; e < elems.size(); ++e) { MeshObj &elem = *cur_elems[e]; MasterElementBase &meb = GetME( f, elem); // Loop dofs for (UInt df = 0; df < meb.num_functions(); df++) { const int *dd = meb.GetDofDescription(df); // Get the object; MeshObj *dofobj = NULL; if (dof2mtype(dd[0]) != MeshObj::ELEMENT) { MeshObjRelationList::const_iterator ri = MeshObjConn::find_relation(elem, dof2mtype(dd[0]), dd[1], MeshObj::USES); ThrowRequire(ri != elem.Relations.end()); dofobj = ri->obj; } else dofobj = &elem; field_objs.insert(dofobj); } // num_func } // for e // Count of dofs is: UInt count = 0; std::set<MeshObj*>::iterator fsi = field_objs.begin(), fse = field_objs.end(); for (; fsi != fse; ++fsi) count += primaryfield->dim(**fsi); return count; }
//GoL!!! void MeshBuilder::create_life_and_neighbor_fields(ScalarIntField*& lifeField, ScalarIntField*& neighborField) { ThrowRequire(!m_metaData.is_commit()); lifeField = &m_metaData.declare_field<ScalarIntField>( stk::topology::ELEM_RANK, "lifeField"); neighborField = &m_metaData.declare_field<ScalarIntField>( stk::topology::ELEM_RANK, "neighborField"); int val = 0; stk::mesh::put_field(*lifeField, m_metaData.universal_part(), &val); stk::mesh::put_field(*neighborField, m_metaData.universal_part(), &val); }
// element ids / proc_id: // |-------|-------|-------| // | | | | // | 1/0 | 4/2 | 7/2 | // | | | | // |-------|-------|-------| // | | | | // | 2/0 | 5/1 | 8/2 | // | | | | // |-------|-------|-------| // | | | | // | 3/0 | 6/2 | 9/2 | // | | | | // |-------|-------|-------| inline void setupKeyholeMesh3D_case1(stk::mesh::BulkData& bulk) { ThrowRequire(bulk.parallel_size() == 3); stk::io::fill_mesh("generated:3x1x3", bulk); stk::mesh::EntityProcVec elementProcChanges; if (bulk.parallel_rank() == 1) { elementProcChanges.push_back(stk::mesh::EntityProc(bulk.get_entity(stk::topology::ELEM_RANK,4u),2)); elementProcChanges.push_back(stk::mesh::EntityProc(bulk.get_entity(stk::topology::ELEM_RANK,6u),2)); } bulk.change_entity_owner(elementProcChanges); }
void parallel_data_exchange_t(std::vector< std::vector<T> > &send_lists, std::vector< std::vector<T> > &recv_lists, MPI_Comm &mpi_communicator ) { // // Determine the number of processors involved in this communication // const int msg_tag = 10242; int num_procs; MPI_Comm_size(mpi_communicator, &num_procs); int my_proc; MPI_Comm_rank(mpi_communicator, &my_proc); ThrowRequire((unsigned int) num_procs == send_lists.size() && (unsigned int) num_procs == recv_lists.size()); int class_size = sizeof(T); // // Determine number of items each other processor will send to the current processor // std::vector<int> global_number_to_send(num_procs); for(int iproc=0; iproc<num_procs; ++iproc) { global_number_to_send[iproc] = send_lists[iproc].size(); } std::vector<int> numToRecvFrom = ComputeReceiveList(global_number_to_send, mpi_communicator); // // Send the actual messages as raw byte streams. // std::vector<MPI_Request> recv_handles(num_procs); for(int iproc = 0; iproc < num_procs; ++iproc) { recv_lists[iproc].resize(numToRecvFrom[iproc]); if(recv_lists[iproc].size() > 0) { char* recv_buffer = (char*)&recv_lists[iproc][0]; int recv_size = recv_lists[iproc].size()*class_size; MPI_Irecv(recv_buffer, recv_size, MPI_CHAR, iproc, msg_tag, mpi_communicator, &recv_handles[iproc]); } } MPI_Barrier(mpi_communicator); for(int iproc = 0; iproc < num_procs; ++iproc) { if(send_lists[iproc].size() > 0) { char* send_buffer = (char*)&send_lists[iproc][0]; int send_size = send_lists[iproc].size()*class_size; MPI_Send(send_buffer, send_size, MPI_CHAR, iproc, msg_tag, mpi_communicator); } } for(int iproc = 0; iproc < num_procs; ++iproc) { if(recv_lists[iproc].size() > 0) { MPI_Status status; MPI_Wait( &recv_handles[iproc], &status ); } } }
UInt _fieldStore::insert() { ThrowRequire(is_committed); if (next_free == TABLE_FULL) Throw() << "Inserting into full freeStore!!"; UInt idx = next_free; //std::cout << "next_free=" << next_free << " fs:" << free_stride << std::endl; next_free = nfree[next_free*free_stride]; num_objs++; return idx; }
stk::mesh::Entity MeshBuilder::create_element(stk::mesh::EntityId elemId, const stk::mesh::EntityIdVector& nodeIds, int chosenProc) { ThrowRequire(chosenProc < num_procs()); stk::mesh::Entity elem = stk::mesh::Entity(); if (m_procRank == chosenProc) elem = generate_element(elemId, nodeIds); else share_shared_nodes(nodeIds, chosenProc); m_usedElemIds.insert(elemId); return elem; }
Bucket *BucketRepository::allocate_bucket(EntityRank arg_entity_rank, const std::vector<unsigned> & arg_key, size_t arg_capacity ) { BucketVector &bucket_vec = m_buckets[arg_entity_rank]; const unsigned bucket_id = bucket_vec.size(); Bucket * new_bucket = new Bucket(m_mesh, arg_entity_rank, arg_key, arg_capacity, m_connectivity_map, bucket_id); ThrowRequire(new_bucket != NULL); bucket_vec.push_back(new_bucket); m_need_sync_from_partitions[arg_entity_rank] = true; return new_bucket; }
void Entity::set_relation_orientation(RelationIterator rel, unsigned orientation) { const Relation::RelationType backRelType = back_relation_type(rel->getRelationType()); Entity & meshObj = *rel->getMeshObj(); Relation backRel_obj(const_cast<Entity*>(this), backRelType, rel->getOrdinal(), rel->getOrientation()); RelationIterator backRel_itr = meshObj.find_relation(backRel_obj); const bool exists = backRel_itr != meshObj.internal_end_relation(backRelType) && *backRel_itr == backRel_obj; ThrowRequire(exists); // Allow clients to make changes to orientation // Orientations do not affect Relation ordering, so this is safe. const_cast<Relation*>(&*rel)->setOrientation(orientation); const_cast<Relation*>(&*backRel_itr)->setOrientation(orientation); }
void Entity::internal_swap_in_real_entity(const int globalId) { ThrowRequire(globalId > 0); m_fmwk_attrs->global_id = globalId; BulkData::get(*this).change_entity_id(globalId, *this); internal_verify_initialization_invariant(); // Issue: Fmwk-managed relations (also called auxiliary relations, are not // being resorted here, so we have to use a different < operator // when looking for relations #ifndef NDEBUG internal_verify_meshobj_invariant(); #endif }
void CommReg::HaloFields(UInt nfields, MEField<> **sfields) { Trace __trace("CommReg::HaloFields(UInt nfields, MEField<> **sfields)"); ThrowRequire(dom==ran); // Get all the subfields and send out over the specs. std::vector<_field*> sf; UInt obj_type = 0; for (UInt i = 0; i < nfields; i++) { sfields[i]->Getfields(sf); } for (UInt j = 0; j < sf.size(); j++) obj_type |= sf[j]->GetAttr().GetType(); // Now send via the spec(s) // TODO: be smarter: select only the relevant spec to send each field. if (obj_type & MeshObj::NODE) node_rel.halo_fields(sf.size(), &sf[0]); if (obj_type & MeshObj::EDGE) edge_rel.halo_fields(sf.size(), &sf[0]); if (obj_type & MeshObj::FACE) face_rel.halo_fields(sf.size(), &sf[0]); // doesnt make sense for halo if (obj_type & MeshObj::ELEMENT) elem_rel.halo_fields(sf.size(), &sf[0]); }
void CommReg::SwapOp(UInt nfields, MEField<> **sfields, int op) { ThrowRequire(dom==ran); // Get all the subfields and send out over the specs. std::vector<_field*> sf; UInt obj_type = 0; for (UInt i = 0; i < nfields; i++) { sfields[i]->Getfields(sf); } for (UInt j = 0; j < sf.size(); j++) obj_type |= sf[j]->GetAttr().GetType(); // Now send via the spec(s) // TODO: be smarter: select only the relevant spec to send each field. // The template keyword below is to make older versions of g++ happy. if (obj_type & MeshObj::NODE) node_rel.template swap_op<VTYPE,_field>(sf.size(), &sf[0], op); if (obj_type & MeshObj::EDGE) edge_rel.template swap_op<VTYPE,_field>(sf.size(), &sf[0], op); if (obj_type & MeshObj::FACE) face_rel.template swap_op<VTYPE,_field>(sf.size(), &sf[0], op); // doesnt make sense for halo if (obj_type & MeshObj::ELEMENT) elem_rel.halo_fields(sf.size(), &sf[0]); }
int check_connectivity(stk::mesh::EntityId elem1_id, stk::mesh::EntityId elem2_id) { int side = -1; stk::mesh::Entity elem1 = m_bulk_data.get_entity(stk::topology::ELEM_RANK, elem1_id); stk::mesh::Entity elem2 = m_bulk_data.get_entity(stk::topology::ELEM_RANK, elem2_id); bool isElem1Local = m_bulk_data.is_valid(elem1) && m_bulk_data.bucket(elem1).owned(); bool isElem2Local = m_bulk_data.is_valid(elem2) && m_bulk_data.bucket(elem2).owned(); ThrowRequire(isElem1Local); if(isElem2Local) { side = check_local_connectivity(elem1, elem2); } else { side = check_remote_connectivity(elem1, elem2_id); } return side; }