const EdgeSet Face::edgesWithVertex(const int & vertex) { EdgeSet eSet; if (vertex == vIndex[0]){ eSet.insert(Edge(vertex, vIndex[1])); eSet.insert(Edge(vertex, vIndex[2])); } else if (vertex == vIndex[1]){ eSet.insert(Edge(vertex, vIndex[0])); eSet.insert(Edge(vertex, vIndex[2])); } else if (vertex == vIndex[2]){ eSet.insert(Edge(vertex, vIndex[0])); eSet.insert(Edge(vertex, vIndex[1])); } return eSet; }
bool MSConnectivityScore::check_assignment(NNGraph &G, unsigned int node_handle, Assignment const &assignment, EdgeSet &picked) const { MSConnectivityRestraint::ExperimentalTree::Node const *node = tree_.get_node(node_handle); MSConnectivityRestraint::ExperimentalTree::Node::Label const &lb = node->get_label(); Vector<Tuples> new_tuples; Ints empty_vector; for (unsigned int i = 0; i < lb.size(); ++i) { int prot_count = lb[i].second; unsigned int id = lb[i].first; while (new_tuples.size() < id) new_tuples.push_back(Tuples(empty_vector, 0)); if (prot_count > 0) { if (!assignment[id].empty()) { Ints const &configuration = assignment[id].get_tuple(); if (prot_count > int(configuration.size())) { IMP_THROW("Experimental tree is inconsistent", IMP::ValueException); } new_tuples.push_back(Tuples(configuration, prot_count)); } else { IMP_THROW("Experimental tree is inconsistent", IMP::ValueException); } } else new_tuples.push_back(Tuples(empty_vector, 0)); } while (new_tuples.size() < restraint_.particle_matrix_.get_number_of_classes()) new_tuples.push_back(Tuples(empty_vector, 0)); Assignment new_assignment(new_tuples); if (new_assignment.empty()) return false; do { NNGraph ng = build_subgraph_from_assignment(G, new_assignment); if (is_connected(ng)) { EdgeSet n_picked; bool good = true; for (unsigned int i = 0; i < node->get_number_of_children(); ++i) { unsigned int child_handle = node->get_child(i); if (!check_assignment(ng, child_handle, new_assignment, n_picked)) { good = false; break; } } if (good) { add_edges_to_set(ng, n_picked); picked.insert(n_picked.begin(), n_picked.end()); return true; } } } while (new_assignment.next()); return false; }
EdgeSet MSConnectivityScore::get_all_edges(NNGraph &G) const { boost::property_map<NNGraph, boost::vertex_name_t>::type vertex_id = boost::get(boost::vertex_name, G); EdgeSet result; NNGraph::edge_iterator e, end; for (boost::tie(e, end) = edges(G); e != end; ++e) { unsigned int src = boost::get(vertex_id, source(*e, G)); unsigned int dst = boost::get(vertex_id, target(*e, G)); if (src > dst) std::swap(src, dst); result.insert(std::make_pair(src, dst)); } return result; }
void AlgorithmMAXIMALMATCHING::FindMaximalMatching(Graph& G, EdgeSet& Matching, VertexSet& MatchedVertices) { for(EdgeIterator e = G.BeginEdges(); e != G.EndEdges(); e++) { VertexSet Incident = (*e)->CollectIncidentVertices(1,1,1); if(SetHelper::IntersectionEmpty(MatchedVertices,Incident)) { Matching.insert(*e); SetHelper::DestructiveUnion(MatchedVertices, Incident); } } }
void MSConnectivityScore::add_edges_to_set(NNGraph &G, EdgeSet &edge_set) const { boost::property_map<NNGraph, boost::vertex_name_t>::type vertex_id = boost::get(boost::vertex_name, G); NNGraph ng(num_vertices(G)); Ints vertex_id_to_n(restraint_.particle_matrix_.size(), -1); for (unsigned int i = 0; i < num_vertices(ng); ++i) { unsigned int id = boost::get(vertex_id, i); vertex_id_to_n[id] = i; } for (EdgeSet::iterator p = edge_set.begin(); p != edge_set.end(); ++p) { unsigned int i_from = vertex_id_to_n[(*p).first]; unsigned int i_to = vertex_id_to_n[(*p).second]; add_edge(i_from, i_to, ng); } Ints components(num_vertices(ng)); int ncomp = boost::connected_components(ng, &components[0]); if (ncomp == 1) return; Vector<std::pair<unsigned int, unsigned int> > candidates; NNGraph::edge_iterator e, end; for (boost::tie(e, end) = edges(G); e != end; ++e) { unsigned int src = boost::get(vertex_id, source(*e, G)); unsigned int dst = boost::get(vertex_id, target(*e, G)); if (src > dst) std::swap(src, dst); std::pair<unsigned int, unsigned int> candidate = std::make_pair(src, dst); if (edge_set.find(candidate) == edge_set.end()) candidates.push_back(candidate); } std::sort(candidates.begin(), candidates.end(), EdgeScoreComparator(restraint_)); unsigned int idx = 0; while (ncomp > 1 && idx < candidates.size()) { unsigned int i_from = vertex_id_to_n[candidates[idx].first]; unsigned int i_to = vertex_id_to_n[candidates[idx].second]; if (components[i_from] != components[i_to]) { int old_comp = components[i_to]; for (unsigned int i = 0; i < components.size(); ++i) if (components[i] == old_comp) components[i] = components[i_from]; --ncomp; edge_set.insert(candidates[idx]); } ++idx; } BOOST_ASSERT(ncomp == 1); }
void Delaunay::HandleEdge(const Vertex* p0, const Vertex* p1, EdgeSet& edges) { const Vertex* pv0(nullptr); const Vertex* pv1(nullptr); if (*p0 < *p1) { pv0 = p0; pv1 = p1; } else { pv0 = p1; pv1 = p0; } edges.insert(Edge(pv0, pv1)); }
void Network::clearEdges() { GraphMap::iterator neit = _node_to_edges.begin(); EdgeSet edges; for(neit = _node_to_edges.begin(); neit != _node_to_edges.end(); neit++) { edges.insert( neit->second.begin(), neit->second.end() ); //Clear this set of edges: neit->second.clear(); } //Now actually remove the edges: EdgeSet::iterator eit; for( eit = edges.begin(); eit != edges.end(); eit++ ) { decrementRefCount(*eit); } }
bool MSConnectivityScore::perform_search(NNGraph &G, EdgeSet &picked) const { unsigned int root_handle = tree_.get_root(); MSConnectivityRestraint::ExperimentalTree::Node const *node = tree_.get_node(root_handle); MSConnectivityRestraint::ExperimentalTree::Node::Label const &lb = node->get_label(); Vector<Tuples> tuples; Ints empty_vector; for (unsigned int i = 0; i < lb.size(); ++i) { int prot_count = lb[i].second; unsigned int id = lb[i].first; while (tuples.size() < id) tuples.push_back(Tuples(empty_vector, 0)); if (prot_count > 0) { tuples.push_back( Tuples(restraint_.particle_matrix_.get_all_proteins_in_class(id), prot_count)); } else tuples.push_back(Tuples(empty_vector, 0)); } while (tuples.size() < restraint_.particle_matrix_.get_number_of_classes()) tuples.push_back(Tuples(empty_vector, 0)); Assignment assignment(tuples); if (assignment.empty()) return false; do { NNGraph ng = build_subgraph_from_assignment(G, assignment); if (is_connected(ng)) { EdgeSet n_picked; bool good = true; for (unsigned int i = 0; i < node->get_number_of_children(); ++i) { unsigned int child_handle = node->get_child(i); if (!check_assignment(ng, child_handle, assignment, n_picked)) { good = false; break; } } if (good) { add_edges_to_set(ng, n_picked); picked.insert(n_picked.begin(), n_picked.end()); return true; } } } while (assignment.next()); return false; }
void NIXMLEdgesHandler::addRoundabout(const SUMOSAXAttributes& attrs) { if (attrs.hasAttribute(SUMO_ATTR_EDGES)) { std::vector<std::string> edgeIDs = attrs.getStringVector(SUMO_ATTR_EDGES); EdgeSet roundabout; for (std::vector<std::string>::iterator it = edgeIDs.begin(); it != edgeIDs.end(); ++it) { NBEdge* edge = myEdgeCont.retrieve(*it); if (edge == 0) { if (!myEdgeCont.wasIgnored(*it)) { WRITE_ERROR("Unknown edge '" + (*it) + "' in roundabout"); } } else { roundabout.insert(edge); } } myEdgeCont.addRoundabout(roundabout); } else { WRITE_ERROR("Empty edges in roundabout."); } }
SEXP attribute_hidden do_provenance_graph(SEXP call, SEXP op, SEXP args, SEXP rho) { #ifndef PROVENANCE_TRACKING Rf_error(_("provenance tracking not implemented in this build")); return 0; #else int nargs = length(args); if (nargs != 1) Rf_error(_("%d arguments passed to 'provenance.graph' which requires 1"), nargs); SEXP arg1 = CAR(args); if (!arg1 || arg1->sexptype() != STRSXP) Rf_error(_("invalid 'names' argument")); Environment* env = static_cast<Environment*>(rho); Provenance::Set provs; StringVector* sv = static_cast<StringVector*>(arg1); for (size_t i = 0; i < sv->size(); i++) { const char* name = (*sv)[i]->c_str(); Symbol* sym = Symbol::obtain(name); Frame::Binding* bdg = env->findBinding(sym); if (!bdg) Rf_error(_("symbol '%s' not found"), name); else { Provenance* prov = const_cast<Provenance*>(bdg->provenance()); if (!prov) Rf_warning(_("'%s' does not have provenance information"), name); else provs.insert(prov); } } Provenance::Set* ancestors = Provenance::ancestors(provs); GCStackRoot<ListVector> ans(CXXR_NEW(ListVector(7))); std::map<const Provenance*, unsigned int> ancestor_index; std::vector<std::pair<unsigned int, const RObject*> > xenogenous_bdgs; // Assemble information on graph nodes: { size_t n = ancestors->size(); GCStackRoot<ListVector> symbols(CXXR_NEW(ListVector(n))); GCStackRoot<ListVector> commands(CXXR_NEW(ListVector(n))); GCStackRoot<RealVector> timestamps(CXXR_NEW(RealVector(n))); size_t i = 0; for (Provenance::Set::iterator it = ancestors->begin(); it != ancestors->end(); ++it) { const Provenance* p = *it; (*symbols)[i] = const_cast<Symbol*>(p->symbol()); (*commands)[i] = const_cast<RObject*>(p->command()); (*timestamps)[i] = p->timestamp(); ++i; ancestor_index[p] = i; if (p->isXenogenous()) xenogenous_bdgs.push_back(std::make_pair(i, p->value())); } (*ans)[0] = symbols; (*ans)[1] = commands; (*ans)[2] = timestamps; } // Record information on xenogenous bindings: { size_t xn = xenogenous_bdgs.size(); GCStackRoot<IntVector> xenogenous(CXXR_NEW(IntVector(xn))); GCStackRoot<ListVector> values(CXXR_NEW(ListVector(xn))); for (unsigned int i = 0; i < xn; ++i) { std::pair<unsigned int, const RObject*>& pr = xenogenous_bdgs[i]; (*xenogenous)[i] = pr.first; (*values)[i] = const_cast<RObject*>(pr.second); } (*ans)[3] = xenogenous; (*ans)[4] = values; } // Assemble information on graph edges: { typedef std::set<std::pair<unsigned int, unsigned int> > EdgeSet; EdgeSet edges; for (Provenance::Set::iterator it = ancestors->begin(); it != ancestors->end(); ++it) { const Provenance* child = *it; unsigned int child_idx = ancestor_index[child]; std::pair<CommandChronicle::ParentVector::const_iterator, CommandChronicle::ParentVector::const_iterator> pr = child->parents(); for (CommandChronicle::ParentVector::const_iterator it = pr.first; it != pr.second; ++it) { const Provenance* parent = *it; unsigned int parent_idx = ancestor_index[parent]; edges.insert(std::make_pair(parent_idx, child_idx)); } } size_t en = edges.size(); GCStackRoot<IntVector> parents(CXXR_NEW(IntVector(en))); GCStackRoot<IntVector> children(CXXR_NEW(IntVector(en))); unsigned int i = 0; for (EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { const std::pair<unsigned int, unsigned int>& edge = *it; (*parents)[i] = edge.first; (*children)[i] = edge.second; ++i; } (*ans)[5] = parents; (*ans)[6] = children; } delete ancestors; return ans; #endif // PROVENANCE_TRACKING }
bool NVMeshMender::MungeD3DX( const NVMeshMender::VAVector& input, NVMeshMender::VAVector& output, const float bSmoothCreaseAngleRadians, const float* pTextureMatrix, const Option _FixTangents, const Option _FixCylindricalTexGen, const Option _WeightNormalsByFaceSize ) { typedef std::map< std::string, unsigned int > Mapping; typedef std::set< Edge > EdgeSet; typedef std::vector< std::set< unsigned int > > IdenticalVertices; IdenticalVertices IdenticalVertices_; Mapping inmap; Mapping outmap; for ( unsigned int a = 0; a < input.size(); ++a ) { inmap[ input[ a ].Name_ ] = a; } for ( unsigned int b = 0; b < output.size(); ++b ) { output[ b ].intVector_.clear(); output[ b ].floatVector_.clear(); outmap[ output[ b ].Name_ ] = b; } for ( unsigned int c = 0; c < output.size(); ++c ) { // for every output that has a match in the input, just copy it over Mapping::iterator in = inmap.find( output[ c ].Name_ ); if ( in != inmap.end() ) { // copy over existing indices, position, or whatever output[ c ] = input[ (*in).second ]; } } Mapping::iterator want = outmap.find( "indices" ); Mapping::iterator have = inmap.find( "indices" ); if ( have == inmap.end() ) { SetLastError( "Missing indices from input" ); return false; } if ( want == outmap.end() ) { SetLastError( "Missing indices from output" ); return false; } // Go through all required outputs & generate as necessary want = outmap.find( "position" ); have = inmap.find( "position" ); if ( have == inmap.end() ) { SetLastError( "Missing position from input" ); return false; } if ( want == outmap.end() ) { SetLastError( "Missing position from output" ); return false; } Mapping::iterator pos = outmap.find( "position" ); VertexAttribute::FloatVector& positions = output[ (*pos).second ].floatVector_; D3DXVECTOR3* pPositions = (D3DXVECTOR3*)( &( positions[ 0 ] ) ); std::set< unsigned int > EmptySet; for ( unsigned int i = 0; i < positions.size(); i += 3 ) { IdenticalVertices_.push_back( EmptySet ); } // initialize all attributes for ( unsigned int att = 0; att < output.size(); ++att ) { if ( output[ att ].Name_ != "indices" ) { if ( output[ att ].floatVector_.size() == 0 ) { output[ att ].floatVector_ = positions; } } } Mapping::iterator ind = outmap.find( "indices" ); VertexAttribute::IntVector& indices = output[ (*ind).second ].intVector_; int* pIndices = (int*)( &( indices[ 0 ] ) ); D3DXVECTOR3* pNormals = 0; D3DXVECTOR3* pBiNormals = 0; D3DXVECTOR3* pTangents = 0; D3DXVECTOR3* pTex0 = 0; bool bNeedNormals = false; bool bNeedTexCoords = false; bool bComputeTangentSpace = false; // see if texture coords are needed if ( outmap.find( "tex0" ) != outmap.end() ) { bNeedTexCoords = true; } // see if tangent or binormal are needed if ( ( outmap.find( "binormal" ) != outmap.end() ) || ( outmap.find( "tangent" ) != outmap.end() ) ) { bComputeTangentSpace = true; } // see if normals are needed if ( outmap.find( "normal" ) != outmap.end() ) { bNeedNormals = true; } // Compute normals. want = outmap.find( "normal" ); have = inmap.find( "normal" ); bool have_normals = ( inmap.find( "normal" ) != inmap.end() ) ? true : false; if ( bNeedNormals || bComputeTangentSpace ) { // see if normals are provided if ( !have_normals ) { // create normals if ( want == outmap.end() ) { VertexAttribute norAtt; norAtt.Name_ = "normal"; output.push_back( norAtt ); outmap[ "normal" ] = output.size() - 1; want = outmap.find( "normal" ); } // just initialize array so it's the correct size output[ (*want).second ].floatVector_ = positions; VertexAttribute::FloatVector& normals = output[ (*want).second ].floatVector_; // zero out normals for ( unsigned n = 0; n < positions.size(); ++n ) { output[ (*want).second ].floatVector_[ n ] = 0.0f; } pNormals = (D3DXVECTOR3*)( &( output[ (*want).second ].floatVector_[0] ) ); // calculate face normals for each face // & add its normal to vertex normal total for ( unsigned int t = 0; t < indices.size(); t += 3 ) { D3DXVECTOR3 edge0, nedge0; D3DXVECTOR3 edge1, nedge1; edge0 = pPositions[ indices[ t + 1 ] ] - pPositions[ indices[ t + 0 ] ]; edge1 = pPositions[ indices[ t + 2 ] ] - pPositions[ indices[ t + 0 ] ]; D3DXVec3Normalize(&nedge0, &edge0); D3DXVec3Normalize(&nedge1, &edge1); D3DXVECTOR3 faceNormal; D3DXVec3Cross( &faceNormal, &nedge0, &nedge1 ); if ( _WeightNormalsByFaceSize == DontWeightNormalsByFaceSize ) { // Renormalize face normal, so it's not weighted by face size D3DXVec3Normalize( &faceNormal, &faceNormal ); } else { // Leave it as-is, to weight by face size naturally by the cross product result } pNormals[ indices[ t + 0 ] ] += faceNormal; pNormals[ indices[ t + 1 ] ] += faceNormal; pNormals[ indices[ t + 2 ] ] += faceNormal; } // Renormalize each vertex normal for ( unsigned int v = 0; v < output[ (*want).second ].floatVector_.size() / 3; ++v ) { D3DXVec3Normalize( &( pNormals[ v ] ), &( pNormals[ v ] ) ); } } } // Compute texture coordinates. if ( bNeedTexCoords || bComputeTangentSpace ) { have = inmap.find( "tex0" ); want = outmap.find("tex0"); bool have_texcoords = (inmap.find( "tex0" ) != inmap.end()) ? true : false; // see if texcoords are provided if ( !have_texcoords ) { // compute texcoords. if ( want == outmap.end() ) { VertexAttribute texCoordAtt; texCoordAtt.Name_ = "tex0"; output.push_back( texCoordAtt ); outmap[ "tex0" ] = output.size() - 1; want = outmap.find( "tex0" ); } // just initialize array so it's the correct size output[ (*want).second ].floatVector_ = positions; pTex0 = (D3DXVECTOR3*)( &(output[ (*want).second ].floatVector_[ 0 ]) ); // Generate cylindrical coordinates // Find min and max positions for object bounding box D3DXVECTOR3 maxPosition( -FLT_MAX, -FLT_MAX, -FLT_MAX ); D3DXVECTOR3 minPosition( FLT_MAX, FLT_MAX, FLT_MAX ); // there are 1/3 as many vectors as floats const unsigned int theCount = static_cast<unsigned int>(positions.size() / 3.0f); for ( unsigned int i = 0; i < theCount; ++i ) { #ifndef __GNUC__ maxPosition.x = max( maxPosition.x, pPositions[ i ].x ); maxPosition.y = max( maxPosition.y, pPositions[ i ].y ); maxPosition.z = max( maxPosition.z, pPositions[ i ].z ); minPosition.x = min( minPosition.x, pPositions[ i ].x ); minPosition.y = min( minPosition.y, pPositions[ i ].y ); minPosition.z = min( minPosition.z, pPositions[ i ].z ); #endif } // Find major, minor and other axis for cylindrical texgen D3DXVECTOR3 delta = maxPosition - minPosition; delta.x = (float)fabs( delta.x ); delta.y = (float)fabs( delta.y ); delta.z = (float)fabs( delta.z ); bool maxx,maxy,maxz; maxx = maxy = maxz = false; bool minz,miny,minx; minx = miny = minz = false; float deltaMajor; if ( ( delta.x >= delta.y ) && ( delta.x >= delta.z ) ) { maxx = true; deltaMajor = delta.x; if ( delta.y > delta.z ) { minz = true; } else { miny = true; } } else if ( ( delta.z >= delta.y ) && ( delta.z >= delta.x ) ) { maxz = true; deltaMajor = delta.z; if ( delta.y > delta.x ) { minx = true; } else { miny = true; } } else if ( ( delta.y >= delta.z ) && ( delta.y >= delta.x ) ) { maxy = true; deltaMajor = delta.y; if ( delta.x > delta.z ) { minz = true; } else { minx = true; } } for ( unsigned int p = 0; p < theCount; ++p ) { // Find position relative to center of bounding box D3DXVECTOR3 texCoords = ( ( maxPosition + minPosition ) / 2.0f ) - pPositions[ p ]; float Major, Minor, Other = 0.0f; if ( maxx ) { Major = texCoords.x; if ( miny ) { Minor = texCoords.y; Other = texCoords.z; } else { Minor = texCoords.z; Other = texCoords.y; } } else if ( maxy ) { Major = texCoords.y; if ( minx ) { Minor = texCoords.x; Other = texCoords.z; } else { Minor = texCoords.z; Other = texCoords.x; } } else if ( maxz ) { Major = texCoords.z; if ( miny ) { Minor = texCoords.y; Other = texCoords.x; } else { Minor = texCoords.x; Other = texCoords.y; } } float longitude = 0.0f; // Prevent zero or near-zero from being passed into atan2 if ( fabs( Other ) < 0.0001f ) { if ( Other >= 0.0f ) { Other = 0.0001f; } else { Other = -0.0001f; } } // perform cylindrical mapping onto object, and remap from -pi,pi to -1,1 longitude = (float)(( atan2( Minor, Other ) ) / 3.141592654); texCoords.x = 0.5f * longitude + 0.5f; texCoords.y = (Major/deltaMajor) + 0.5f; #ifndef __GNUC__ texCoords.x = max( texCoords.x, 0.0f ); texCoords.y = max( texCoords.y, 0.0f ); texCoords.x = min( texCoords.x, 1.0f ); texCoords.y = min( texCoords.y, 1.0f ); #endif pTex0[ p ].x = texCoords.x-0.25f; if ( pTex0[ p ].x < 0.0f ) pTex0[ p ].x += 1.0; pTex0[ p ].y = 1.0f-texCoords.y; pTex0[ p ].z = 1.0f; } } if ( _FixCylindricalTexGen == FixCylindricalTexGen ) { Mapping::iterator texIter = outmap.find( "tex0" ); VertexAttribute::FloatVector& texcoords = ( output[ (*texIter).second ].floatVector_ ); const unsigned int theSize = indices.size(); for ( unsigned int f = 0; f < theSize; f += 3 ) { for ( int v = 0; v < 3; ++v ) { int start = f + v; int end = start + 1; if ( v == 2 ) { end = f; } float dS = texcoords[ indices[ end ] * 3 + 0 ] - texcoords[ indices[ start ] * 3 + 0 ]; float newS = 0.0f; bool bDoS = false; unsigned int theOneToChange = start; if ( fabs( dS ) >= 0.5f ) { bDoS = true; if ( texcoords[ indices[ start ] * 3 + 0 ] < texcoords[ indices[ end ] * 3 + 0 ] ) { newS = texcoords[ indices[ start ]* 3 + 0 ] + 1.0f; } else { theOneToChange = end; newS = texcoords[ indices[ end ] * 3 + 0 ] + 1.0f; } } if ( bDoS == true ) { unsigned int theNewIndex = texcoords.size() / 3; // Duplicate every part of the vertex for ( unsigned int att = 0; att < output.size(); ++att ) { // No new indices are created, just vertex attributes if ( output[ att ].Name_ != "indices" ) { if ( output[ att ].Name_ == "tex0" ) { output[ att ].floatVector_.push_back( newS ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 1 ] ); // x output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 2 ] ); // z } else { // *3 b/c we are looking up 3vectors in an array of floats output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 0 ] ); // x output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 1 ] ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 2 ] ); // z } } } IdenticalVertices_.push_back( EmptySet ); IdenticalVertices_[ indices[ theOneToChange ] ].insert( theNewIndex ); IdenticalVertices_[ theNewIndex ].insert( indices[ theOneToChange ] ); // point to where the new vertices will go indices[ theOneToChange ] = theNewIndex; } } // for v { for ( int v = 0; v < 3; ++v ) { int start = f + v; int end = start + 1; if ( v == 2 ) { end = f; } float dT = texcoords[ indices[ end ] * 3 + 1 ] - texcoords[ indices[ start ] * 3 + 1 ]; float newT = 0.0f; bool bDoT = false; unsigned int theOneToChange = start; if ( fabs( dT ) >= 0.5f ) { bDoT = true; if ( texcoords[ indices[ start ] * 3 + 1 ] < texcoords[ indices[ end ] * 3 + 1 ] ) { newT = texcoords[ indices[ start ] * 3 + 1 ] + 1.0f; } else { theOneToChange = end; newT = texcoords[ indices[ end ] * 3 + 1 ] + 1.0f; } } if ( bDoT == true ) { unsigned int theNewIndex = texcoords.size() / 3; // Duplicate every part of the vertex for ( unsigned int att = 0; att < output.size(); ++att ) { // No new indices are created, just vertex attributes if ( output[ att ].Name_ != "indices" ) { if ( output[ att ].Name_ == "tex0" ) { output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 0 ] ); // x output[ att ].floatVector_.push_back( newT ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 2 ] ); // z } else { // *3 b/c we are looking up 3vectors in an array of floats output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 0 ] ); // x output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 1 ] ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ theOneToChange ] * 3 + 2 ] ); // z } } } IdenticalVertices_.push_back( EmptySet ); IdenticalVertices_[ theNewIndex ].insert( indices[ theOneToChange ] ); IdenticalVertices_[ indices[ theOneToChange ] ].insert( theNewIndex ); // point to where the new vertices will go indices[ theOneToChange ] = theNewIndex; } } } // for v } // for f } // if fix texgen D3DXMATRIX theMatrix( 1,0,0,0, 1,0,0,0, 1,0,0,0, 1,0,0,2); D3DXVECTOR3 v(1, 2, 3); D3DXVec3TransformCoord( &v, &v, &theMatrix); if ( pTextureMatrix ) { Mapping::iterator texIter = outmap.find( "tex0" ); VertexAttribute::FloatVector& texcoords = ( output[ (*texIter).second ].floatVector_ ); // now apply matrix for ( unsigned int v = 0; v < texcoords.size(); v += 3 ) { D3DXVECTOR3* pVector = (D3DXVECTOR3*)( &( texcoords[ v ] ) ); D3DXMATRIX theMatrix( pTextureMatrix[ 0 ], pTextureMatrix[ 1 ], pTextureMatrix[ 2 ], pTextureMatrix[ 3 ], pTextureMatrix[ 4 ], pTextureMatrix[ 5 ], pTextureMatrix[ 6 ], pTextureMatrix[ 7 ], pTextureMatrix[ 8 ], pTextureMatrix[ 9 ], pTextureMatrix[ 10], pTextureMatrix[ 11], pTextureMatrix[ 12], pTextureMatrix[ 13], pTextureMatrix[ 14], pTextureMatrix[ 15] ); D3DXVec3TransformCoord( pVector, pVector, (D3DXMATRIX*)(pTextureMatrix)); } } } if ( bComputeTangentSpace ) { Mapping::iterator texIter = outmap.find( "tex0" ); D3DXVECTOR3* tex = (D3DXVECTOR3*)&( output[ (*texIter).second ].floatVector_[ 0 ] ); typedef std::vector< D3DXVECTOR3 > VecVector; // create tangents want = outmap.find( "tangent" ); if ( want == outmap.end() ) { VertexAttribute tanAtt; tanAtt.Name_ = "tangent"; output.push_back( tanAtt ); outmap[ "tangent" ] = output.size() - 1; want = outmap.find( "tangent" ); } // just initialize array so it's the correct size output[ (*want).second ].floatVector_ = positions; // create binormals want = outmap.find( "binormal" ); if ( want == outmap.end() ) { VertexAttribute binAtt; binAtt.Name_ = "binormal"; output.push_back( binAtt ); outmap[ "binormal" ] = output.size() - 1; want = outmap.find( "binormal" ); } // just initialize array so it's the correct size output[ (*want).second ].floatVector_ = positions; // Create a vector of s,t and sxt for each face of the model VecVector sVector; VecVector tVector; VecVector sxtVector; EdgeSet Edges; const unsigned int theSize = indices.size(); // for each face, calculate its S,T & SxT vector, & store its edges for ( unsigned int f = 0; f < theSize; f += 3 ) { D3DXVECTOR3 edge0; D3DXVECTOR3 edge1; D3DXVECTOR3 s; D3DXVECTOR3 t; // grap position & tex coords again in case they were reallocated pPositions = (D3DXVECTOR3*)( &( positions[ 0 ] ) ); tex = (D3DXVECTOR3*)&( output[ (*texIter).second ].floatVector_[ 0 ] ); // create an edge out of x, s and t edge0.x = pPositions[ indices[ f + 1 ] ].x - pPositions[ indices[ f ] ].x; edge0.y = tex[ indices[ f + 1 ] ].x - tex[ indices[ f ] ].x; edge0.z = tex[ indices[ f + 1 ] ].y - tex[ indices[ f ] ].y; // create an edge out of x, s and t edge1.x = pPositions[ indices[ f + 2 ] ].x - pPositions[ indices[ f ] ].x; edge1.y = tex[ indices[ f + 2 ] ].x - tex[ indices[ f ] ].x; edge1.z = tex[ indices[ f + 2 ] ].y - tex[ indices[ f ] ].y; D3DXVECTOR3 sxt; D3DXVec3Cross( &sxt, &edge0, &edge1 ); float a = sxt.x; float b = sxt.y; float c = sxt.z; float ds_dx = 0.0f; if ( fabs( a ) > 0.000001f ) { ds_dx = - b / a; } float dt_dx = 0.0f; if ( fabs( a ) > 0.000001f ) { dt_dx = - c / a; } // create an edge out of y, s and t edge0.x = pPositions[ indices[ f + 1 ] ].y - pPositions[ indices[ f ] ].y; edge0.y = tex[ indices[ f + 1 ] ].x - tex[ indices[ f ] ].x; edge0.z = tex[ indices[ f + 1 ] ].y - tex[ indices[ f ] ].y; // create an edge out of y, s and t edge1.x = pPositions[ indices[ f + 2 ] ].y - pPositions[ indices[ f ] ].y; edge1.y = tex[ indices[ f + 2 ] ].x - tex[ indices[ f ] ].x; edge1.z = tex[ indices[ f + 2 ] ].y - tex[ indices[ f ] ].y; D3DXVec3Cross( &sxt, &edge0, &edge1 ); a = sxt.x; b = sxt.y; c = sxt.z; float ds_dy = 0.0f; if ( fabs( a ) > 0.000001f ) { ds_dy = -b / a; } float dt_dy = 0.0f; if ( fabs( a ) > 0.000001f ) { dt_dy = -c / a; } // create an edge out of z, s and t edge0.x = pPositions[ indices[ f + 1 ] ].z - pPositions[ indices[ f ] ].z; edge0.y = tex[ indices[ f + 1 ] ].x - tex[ indices[ f ] ].x; edge0.z = tex[ indices[ f + 1 ] ].y - tex[ indices[ f ] ].y; // create an edge out of z, s and t edge1.x = pPositions[ indices[ f + 2 ] ].z - pPositions[ indices[ f ] ].z; edge1.y = tex[ indices[ f + 2 ] ].x - tex[ indices[ f ] ].x; edge1.z = tex[ indices[ f + 2 ] ].y - tex[ indices[ f ] ].y; D3DXVec3Cross( &sxt, &edge0, &edge1 ); a = sxt.x; b = sxt.y; c = sxt.z; float ds_dz = 0.0f; if ( fabs( a ) > 0.000001f ) { ds_dz = -b / a; } float dt_dz = 0.0f; if ( fabs( a ) > 0.000001f ) { dt_dz = -c / a; } // generate coordinate frame from the gradients s = D3DXVECTOR3( ds_dx, ds_dy, ds_dz ); t = D3DXVECTOR3( dt_dx, dt_dy, dt_dz ); D3DXVec3Normalize(&s, &s); D3DXVec3Normalize(&t, &t); D3DXVec3Cross( &sxt, &s, &t ); D3DXVec3Normalize( &sxt, &sxt ); // save vectors for this face sVector.push_back( s ); tVector.push_back( t ); sxtVector.push_back( sxt ); if ( _FixTangents == FixTangents ) { // Look for each edge of the triangle in the edge map, in order to find // a neighboring face along the edge for ( int e = 0; e < 3; ++e ) { Edge edge; int start = f + e; int end = start + 1; if ( e == 2 ) { end = f; } #ifndef __GNUC__ // order vertex indices ( low, high ) edge.v0 = min( indices[ start ], indices[ end ] ); edge.v1 = max( indices[ start ], indices[ end ] ); #endif EdgeSet::iterator iter = Edges.find( edge ); // if we are the only triangle with this edge... if ( iter == Edges.end() ) { // ...then add us to the set of edges edge.face = f / 3; Edges.insert( edge ); } else { // otherwise, check our neighbor's s,t & sxt vectors vs our own const float sAgreement = D3DXVec3Dot( &s, &(sVector[ (*iter).face ]) ); const float tAgreement = D3DXVec3Dot( &t, &(tVector[ (*iter).face ]) ); const float sxtAgreement = D3DXVec3Dot( &sxt, &(sxtVector[ (*iter).face ]) ); // Check Radian angle split limit const float epsilon = (float)cos( bSmoothCreaseAngleRadians ); // if the discontinuity in s, t, or sxt is greater than some epsilon, // duplicate the vertex so it won't smooth with its neighbor anymore if ( ( fabs( sAgreement ) < epsilon ) || ( fabs( tAgreement ) < epsilon ) || ( fabs( sxtAgreement ) < epsilon ) ) { // Duplicate two vertices of this edge for this triangle only. // This way the faces won't smooth with each other, thus // preventing the tangent basis from becoming degenerate // divide by 3 b/c vector is of floats and not vectors const unsigned int theNewIndex = positions.size() / 3; // Duplicate every part of the vertex for ( unsigned int att = 0; att < output.size(); ++att ) { // No new indices are created, just vertex attributes if ( output[ att ].Name_ != "indices" ) { // *3 b/c we are looking up 3vectors in an array of floats output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ start ] * 3 + 0 ] ); // x output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ start ] * 3 + 1 ] ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ start ] * 3 + 2 ] ); // z output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ end ] * 3 + 0 ] ); // x output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ end ] * 3 + 1 ] ); // y output[ att ].floatVector_.push_back( output[ att ].floatVector_[ indices[ end ] * 3 + 2 ] ); // z } } IdenticalVertices_.push_back( EmptySet ); IdenticalVertices_.push_back( EmptySet ); // point to where the new vertices will go indices[ start ] = theNewIndex; indices[ end ] = theNewIndex + 1; } // Now that the vertices are duplicated, smoothing won't occur over this edge, // because the two faces will sum their tangent basis vectors into separate indices } } } // if fixtangents } // Allocate std::vector & Zero out average basis for tangent space smoothing VecVector avgS; VecVector avgT; for ( unsigned int p = 0; p < positions.size(); p += 3 ) { avgS.push_back( D3DXVECTOR3( 0.0f, 0.0f, 0.0f ) ); // do S avgT.push_back( D3DXVECTOR3( 0.0f, 0.0f, 0.0f ) ); // now t } // go through faces and add up the bases for each vertex const int theFaceCount = indices.size() / 3; for ( unsigned int face = 0; face < (unsigned int)theFaceCount; ++face ) { // sum bases, so we smooth the tangent space across edges avgS[ pIndices[ face * 3 ] ] += sVector[ face ]; avgT[ pIndices[ face * 3 ] ] += tVector[ face ]; avgS[ pIndices[ face * 3 + 1 ] ] += sVector[ face ]; avgT[ pIndices[ face * 3 + 1 ] ] += tVector[ face ]; avgS[ pIndices[ face * 3 + 2 ] ] += sVector[ face ]; avgT[ pIndices[ face * 3 + 2 ] ] += tVector[ face ]; } if ( _FixCylindricalTexGen == FixCylindricalTexGen ) { for ( unsigned int v = 0; v < IdenticalVertices_.size(); ++v ) { // go through each vertex & sum up it's true neighbors for ( std::set< unsigned int >::iterator iter = IdenticalVertices_[ v ].begin(); iter != IdenticalVertices_[ v ].end(); ++iter ) { avgS[ v ] += avgS[ *iter ]; avgT[ v ] += avgT[ *iter ]; } } } Mapping::iterator tangent = outmap.find( "tangent" ); Mapping::iterator binormal = outmap.find( "binormal" ); // now renormalize for ( unsigned int b = 0; b < positions.size(); b += 3 ) { D3DXVECTOR3* vecTangent = (D3DXVECTOR3*)&output[ (*tangent).second ].floatVector_[ b ]; D3DXVECTOR3* vecBinormals = (D3DXVECTOR3*)&output[ (*binormal).second ].floatVector_[ b ]; D3DXVec3Normalize( vecTangent, &avgS[ b / 3 ] ); // s D3DXVec3Normalize( vecBinormals, &avgT[ b / 3 ] ); // T } } // At this point, tex coords, normals, binormals and tangents should be generated if necessary, // and other attributes are simply copied as available return true; }
PoseGraph::EdgeSet PoseGraph::allEdges () const { EdgeSet edges; BOOST_FOREACH (const EdgeMap::value_type& e, _edgeMap) { edges.insert(e.first); }
void NIImporter_SUMO::_loadNetwork(OptionsCont& oc) { // check whether the option is set (properly) if (!oc.isUsableFileList("sumo-net-file")) { return; } // parse file(s) std::vector<std::string> files = oc.getStringVector("sumo-net-file"); for (std::vector<std::string>::const_iterator file = files.begin(); file != files.end(); ++file) { if (!FileHelpers::isReadable(*file)) { WRITE_ERROR("Could not open sumo-net-file '" + *file + "'."); return; } setFileName(*file); PROGRESS_BEGIN_MESSAGE("Parsing sumo-net from '" + *file + "'"); XMLSubSys::runParser(*this, *file, true); PROGRESS_DONE_MESSAGE(); } // build edges for (std::map<std::string, EdgeAttrs*>::const_iterator i = myEdges.begin(); i != myEdges.end(); ++i) { EdgeAttrs* ed = (*i).second; // skip internal edges if (ed->func == EDGEFUNC_INTERNAL || ed->func == EDGEFUNC_CROSSING || ed->func == EDGEFUNC_WALKINGAREA) { continue; } // get and check the nodes NBNode* from = myNodeCont.retrieve(ed->fromNode); NBNode* to = myNodeCont.retrieve(ed->toNode); if (from == 0) { WRITE_ERROR("Edge's '" + ed->id + "' from-node '" + ed->fromNode + "' is not known."); continue; } if (to == 0) { WRITE_ERROR("Edge's '" + ed->id + "' to-node '" + ed->toNode + "' is not known."); continue; } // edge shape PositionVector geom; if (ed->shape.size() > 0) { geom = ed->shape; } else { // either the edge has default shape consisting only of the two node // positions or we have a legacy network geom = reconstructEdgeShape(ed, from->getPosition(), to->getPosition()); } // build and insert the edge NBEdge* e = new NBEdge(ed->id, from, to, ed->type, ed->maxSpeed, (unsigned int) ed->lanes.size(), ed->priority, NBEdge::UNSPECIFIED_WIDTH, NBEdge::UNSPECIFIED_OFFSET, geom, ed->streetName, "", ed->lsf, true); // always use tryIgnoreNodePositions to keep original shape e->setLoadedLength(ed->length); if (!myNetBuilder.getEdgeCont().insert(e)) { WRITE_ERROR("Could not insert edge '" + ed->id + "'."); delete e; continue; } ed->builtEdge = myNetBuilder.getEdgeCont().retrieve(ed->id); } // assign further lane attributes (edges are built) for (std::map<std::string, EdgeAttrs*>::const_iterator i = myEdges.begin(); i != myEdges.end(); ++i) { EdgeAttrs* ed = (*i).second; NBEdge* nbe = ed->builtEdge; if (nbe == 0) { // inner edge or removed by explicit list, vclass, ... continue; } for (unsigned int fromLaneIndex = 0; fromLaneIndex < (unsigned int) ed->lanes.size(); ++fromLaneIndex) { LaneAttrs* lane = ed->lanes[fromLaneIndex]; // connections const std::vector<Connection>& connections = lane->connections; for (std::vector<Connection>::const_iterator c_it = connections.begin(); c_it != connections.end(); c_it++) { const Connection& c = *c_it; if (myEdges.count(c.toEdgeID) == 0) { WRITE_ERROR("Unknown edge '" + c.toEdgeID + "' given in connection."); continue; } NBEdge* toEdge = myEdges[c.toEdgeID]->builtEdge; if (toEdge == 0) { // removed by explicit list, vclass, ... continue; } if (nbe->hasConnectionTo(toEdge, c.toLaneIdx)) { WRITE_WARNING("Target lane '" + toEdge->getLaneID(c.toLaneIdx) + "' has multiple connections from '" + nbe->getID() + "'."); } nbe->addLane2LaneConnection( fromLaneIndex, toEdge, c.toLaneIdx, NBEdge::L2L_VALIDATED, true, c.mayDefinitelyPass, c.keepClear, c.contPos); // maybe we have a tls-controlled connection if (c.tlID != "" && myRailSignals.count(c.tlID) == 0) { const std::map<std::string, NBTrafficLightDefinition*>& programs = myTLLCont.getPrograms(c.tlID); if (programs.size() > 0) { std::map<std::string, NBTrafficLightDefinition*>::const_iterator it; for (it = programs.begin(); it != programs.end(); it++) { NBLoadedSUMOTLDef* tlDef = dynamic_cast<NBLoadedSUMOTLDef*>(it->second); if (tlDef) { tlDef->addConnection(nbe, toEdge, fromLaneIndex, c.toLaneIdx, c.tlLinkNo); } else { throw ProcessError("Corrupt traffic light definition '" + c.tlID + "' (program '" + it->first + "')"); } } } else { WRITE_ERROR("The traffic light '" + c.tlID + "' is not known."); } } } // allow/disallow XXX preferred nbe->setPermissions(parseVehicleClasses(lane->allow, lane->disallow), fromLaneIndex); // width, offset nbe->setLaneWidth(fromLaneIndex, lane->width); nbe->setEndOffset(fromLaneIndex, lane->endOffset); nbe->setSpeed(fromLaneIndex, lane->maxSpeed); } nbe->declareConnectionsAsLoaded(); if (!nbe->hasLaneSpecificWidth() && nbe->getLanes()[0].width != NBEdge::UNSPECIFIED_WIDTH) { nbe->setLaneWidth(-1, nbe->getLaneWidth(0)); } if (!nbe->hasLaneSpecificEndOffset() && nbe->getEndOffset(0) != NBEdge::UNSPECIFIED_OFFSET) { nbe->setEndOffset(-1, nbe->getEndOffset(0)); } } // insert loaded prohibitions for (std::vector<Prohibition>::const_iterator it = myProhibitions.begin(); it != myProhibitions.end(); it++) { NBEdge* prohibitedFrom = myEdges[it->prohibitedFrom]->builtEdge; NBEdge* prohibitedTo = myEdges[it->prohibitedTo]->builtEdge; NBEdge* prohibitorFrom = myEdges[it->prohibitorFrom]->builtEdge; NBEdge* prohibitorTo = myEdges[it->prohibitorTo]->builtEdge; if (prohibitedFrom == 0) { WRITE_WARNING("Edge '" + it->prohibitedFrom + "' in prohibition was not built"); } else if (prohibitedTo == 0) { WRITE_WARNING("Edge '" + it->prohibitedTo + "' in prohibition was not built"); } else if (prohibitorFrom == 0) { WRITE_WARNING("Edge '" + it->prohibitorFrom + "' in prohibition was not built"); } else if (prohibitorTo == 0) { WRITE_WARNING("Edge '" + it->prohibitorTo + "' in prohibition was not built"); } else { NBNode* n = prohibitedFrom->getToNode(); n->addSortedLinkFoes( NBConnection(prohibitorFrom, prohibitorTo), NBConnection(prohibitedFrom, prohibitedTo)); } } if (!myHaveSeenInternalEdge) { myNetBuilder.haveLoadedNetworkWithoutInternalEdges(); } if (oc.isDefault("lefthand")) { oc.set("lefthand", toString(myAmLefthand)); } if (oc.isDefault("junctions.corner-detail")) { oc.set("junctions.corner-detail", toString(myCornerDetail)); } if (oc.isDefault("junctions.internal-link-detail") && myLinkDetail > 0) { oc.set("junctions.internal-link-detail", toString(myLinkDetail)); } if (!deprecatedVehicleClassesSeen.empty()) { WRITE_WARNING("Deprecated vehicle class(es) '" + toString(deprecatedVehicleClassesSeen) + "' in input network."); deprecatedVehicleClassesSeen.clear(); } // add loaded crossings if (!oc.getBool("no-internal-links")) { for (std::map<std::string, std::vector<Crossing> >::const_iterator it = myPedestrianCrossings.begin(); it != myPedestrianCrossings.end(); ++it) { NBNode* node = myNodeCont.retrieve((*it).first); for (std::vector<Crossing>::const_iterator it_c = (*it).second.begin(); it_c != (*it).second.end(); ++it_c) { const Crossing& crossing = (*it_c); EdgeVector edges; for (std::vector<std::string>::const_iterator it_e = crossing.crossingEdges.begin(); it_e != crossing.crossingEdges.end(); ++it_e) { NBEdge* edge = myNetBuilder.getEdgeCont().retrieve(*it_e); // edge might have been removed due to options if (edge != 0) { edges.push_back(edge); } } if (edges.size() > 0) { node->addCrossing(edges, crossing.width, crossing.priority, true); } } } } // add roundabouts for (std::vector<std::vector<std::string> >::const_iterator it = myRoundabouts.begin(); it != myRoundabouts.end(); ++it) { EdgeSet roundabout; for (std::vector<std::string>::const_iterator it_r = it->begin(); it_r != it->end(); ++it_r) { NBEdge* edge = myNetBuilder.getEdgeCont().retrieve(*it_r); if (edge == 0) { if (!myNetBuilder.getEdgeCont().wasIgnored(*it_r)) { WRITE_ERROR("Unknown edge '" + (*it_r) + "' in roundabout"); } } else { roundabout.insert(edge); } } myNetBuilder.getEdgeCont().addRoundabout(roundabout); } }