const set<const TypeRef *> & TypeHierarchy::leaves(PTypeRef & t) { static Nodes empty; GI gi = leafNodes.find(&t); if(gi != leafNodes.end()) { return (gi->second); }; gi = downGraph.find(&t); if(gi == downGraph.end()) return empty; Nodes ns; PTypeRef pt(0); closure(downGraph,gi,ns,gi,&pt); Nodes ms; for(Nodes::const_iterator i = ns.begin();i != ns.end();++i) { Nodes xs; gi = downGraph.find(*i); closure(downGraph,gi,xs,gi,&pt); if(xs.empty()) ms.insert(*i); }; leafNodes[downGraph.find(&t)->first] = ms; return leafNodes[downGraph.find(&t)->first]; };
Nodes NetworkManager::getNodeCandidates() { Nodes result; while ( !depthSearcher->isExhausted() ) { Elements elements = depthSearcher->getElementCandidates(); cerr << "[NM]\tDepth search returned " << elements.size() << endl; for ( Elements::iterator i = elements.begin(); i != elements.end(); i++ ) { Element * element = *i; if ( ! element->isNode() ) continue; Node * node = (Node *) element; if ( rejectedNodes.find(node) != rejectedNodes.end() ) continue; result.insert(node); } depthSearcher->increaseSearchSpace(); if ( !result.empty() ) break; } rejectedNodes.insert(result.begin(), result.end()); cerr << "[NM]\tPrepared " << result.size() << " candidates" << endl; return result; }
void ObjectCM::push( const uint128_t& groupID, const uint128_t& typeID, const Nodes& nodes ) { LBASSERT( _object ); LBASSERT( !nodes.empty( )); if( nodes.empty( )) return; ObjectInstanceDataOStream os( this ); os.enablePush( getVersion(), nodes ); _object->getInstanceData( os ); // Send push notification to remote cmd thread while connections are valid OCommand( os.getConnections(), CMD_NODE_OBJECT_PUSH ) << _object->getID() << groupID << typeID; os.disable(); // handled by remote recv thread }
osgDB::ReaderWriter::ReadResult readNodeFromArchive(osgDB::Archive& archive, const osgDB::ReaderWriter::Options* options) const { osgDB::ReaderWriter::ReadResult result(osgDB::ReaderWriter::ReadResult::FILE_NOT_FOUND); if (!archive.getMasterFileName().empty()) { result = archive.readNode(archive.getMasterFileName(), options); } else { osgDB::Archive::FileNameList fileNameList; if (archive.getFileNames(fileNameList)) { typedef std::list< osg::ref_ptr<osg::Node> > Nodes; Nodes nodes; for(osgDB::Archive::FileNameList::iterator itr = fileNameList.begin(); itr != fileNameList.end(); ++itr) { result = archive.readNode(*itr, options); if (result.validNode()) nodes.push_back(result.getNode()); } if (!nodes.empty()) { if (nodes.size()==1) { result = osgDB::ReaderWriter::ReadResult(nodes.front().get()); } else { osg::ref_ptr<osg::Group> group = new osg::Group; for(Nodes::iterator itr = nodes.begin(); itr != nodes.end(); ++itr) { group->addChild(itr->get()); } result = osgDB::ReaderWriter::ReadResult(group.get()); } } } } return result; }
void Object::notifyDetach() { if( !isMaster( )) return; // unmap slaves const Nodes slaves = impl_->cm->getSlaveNodes(); if( slaves.empty( )) return; LBWARN << slaves.size() << " slaves subscribed during deregisterObject of " << lunchbox::className( this ) << " id " << impl_->id << std::endl; for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i ) { NodePtr node = *i; node->send( CMD_NODE_UNMAP_OBJECT ) << impl_->id; } }
void Object::notifyDetach() { if( !isMaster( )) return; // unmap slaves const Nodes slaves = _cm->getSlaveNodes(); if( slaves.empty( )) return; EQWARN << slaves.size() << " slaves subscribed during deregisterObject of " << base::className( this ) << " id " << _id << std::endl; NodeUnmapObjectPacket packet; packet.objectID = _id; for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i ) { NodePtr node = *i; node->send( packet ); } }
void LeaflessOrthoRouter::route(Logger *logger) { // Set up for logging. unsigned ln = logger != nullptr ? logger->nextLoggingIndex : 0; std::function<void(unsigned)> log = [ln, this, logger](unsigned n)->void{ if (logger!=nullptr) { std::string fn = string_format("%02d_%02d_routing_attempt", ln, n); std::string path = logger->writeFullPathForFilename(fn); this->m_ra.router.outputInstanceToSVG(path); } }; /* * We may need to route multiple times to ensure that at least two sides of each node are being used, * but in theory we should never have to route more than 4n+1 times. * * Proof: We always begin with an initial routing. We want to show it could be necessary to re-route * at most 4n times. * * In order to see this, we first argue that the worst-case-scenario for any single node is that it * require four routings. Consider then some node u all of whose edges have been routed to one side, s0. We * then pick some edge e0 incident to u, say that it may not connect to side s0, and we re-route for the first time. * * While unlikely, it could be that, for whatever reason, now all edges incident to node u are routed to some other side, * s1. We then pick some edge e1 (could be the same or different from e0), forbid it from connecting to * side s1, and re-route for a second time. * * Again, for whatever reason, all edges could now connect to one * of the two remaining sides, s2. Continuing in this way, we could be led to re-route a third and a fourth time. But * prior to the fourth re-routing it would be the case that for each side si of node u, there was * some edge ei incident to u that had been forbidden from connecting on side si. Therefore on the fourth * re-routing it would be impossible for all edges to connect on any single side of u. * * So much for the case of a single node. However, in again a highly unlikely worst-case-scenario, it could be * that during the first five routings no other node besides u was a pseudoleaf (had all edges routed to one side), * but after the fifth some other node became a pseudoleaf. In this way we could be led to do four re-routings * for each node in the graph. QED * * In practice, it would probably be very rare for more that two routings to ever be necessary. For this * requires the odd circumstance, considered in the proof, that forbidding one edge from connecting on a * given side somehow results in /all/ edges incident at that node migrating to some other, single side. * * In order that our theory be tested, we use an infinite loop with counter and assertion, instead * of a mere for-loop which would fail silently. */ size_t numRoutings = 0; size_t maxRoutings = 4*m_n + 1; while (true) { m_ra.router.processTransaction(); log(++numRoutings); // As explained in the comments above, at most five routings should ever be needed. COLA_ASSERT(numRoutings <= maxRoutings); // For testing purposes, we may want to record the results of // each routing attempt. if (recordEachAttempt) { m_ra.recordRoutes(true); routingAttemptTglf.push_back(m_graph->writeTglf()); } // Are there any nodes having all of their edges routed // out of just one side? This is what we want to prevent. // Such nodes would become leaves in a planarisation, so we // call them "pseudoleaves". Nodes pseudoLeaves; // For each such Node (if any), there is a sole direction in which // all connectors depart. We keep track of those directions as we work. vector<CardinalDir> soleDepartureDirecs; // Check each Node in the Graph: for (auto p : m_graph->getNodeLookup()) { Node_SP &u = p.second; const EdgesById edgeLookup = u->getEdgeLookup(); // Sanity check, that Node u is not an actual leaf: COLA_ASSERT(edgeLookup.size() > 1); // Determine the departure direction from Node u for its first Edge. auto edge_it = edgeLookup.cbegin(); CardinalDir d0 = departureDir((*edge_it).second, u); // If two or more directions have been used, some edge must depart // in a different direction than this one. (For if all the rest equal // this first one, then all are the same.) bool isPseudoLeaf = true; for (auto jt = ++edge_it; jt != edgeLookup.cend(); ++jt) { CardinalDir d1 = departureDir((*jt).second, u); if (d1 != d0) { isPseudoLeaf = false; break; } } if (isPseudoLeaf) { pseudoLeaves.push_back(u); soleDepartureDirecs.push_back(d0); } } // Are there any pseudoleaves? if (pseudoLeaves.empty()) { // If there are none, then we're done routing, and can break out of the outer while loop. break; } else { // But if there are still pseudoleaves, then we need to work on them. for (size_t i = 0; i < pseudoLeaves.size(); ++i) { // Get the Node and the direction in which all connectors currently depart from it. Node_SP u = pseudoLeaves[i]; CardinalDir d0 = soleDepartureDirecs[i]; // Now among all Edges incident at this Node we must select one that is still // allowed to depart in at least two directions (hence at least one different // from d0), and remove direction d0 from its list of allowed directions. // // If possible, we would like to choose such an Edge e such that if v is the Node // at the other end, then the predominant cardinal direction from Node u to Node v // be different than d0; for such would seem a suitable Edge to depart in a different // direction. However, such an Edge may not exist. In that case, we will just take // any one. Edge_SP candidate; for (auto p : u->getEdgeLookup()) { Edge_SP &e = p.second; // If this Edge is only allowed the one direction, then skip it. if (isSoleDirec(m_allowedConnDirs.at(e->id()).at(u->id()))) continue; // Otherwise mark it as the candidate. candidate = e; // Determine the predominant cardinal direction from Node u to the Node v at // the opposite end of Edge e. Node_SP v = e->getOtherEnd(*u); CardinalDir d1 = Compass::cardinalDirection(u, v); // If this is different from direction d0, then we're happy to accept this candidate. if (d1 != d0) break; } // Start with the directions allowed last time: ConnDirFlags available = m_allowedConnDirs.at(candidate->id()).at(u->id()); // XOR with the connection flag corresponding to cardinal direction d0, // so that this direction is no longer allowed. available ^= Compass::libavoidConnDirs.at(d0); // Record the new value. m_allowedConnDirs[candidate->id()][u->id()] = available; // Set a new ConnEnd. Point p = u->getCentre(); ConnEnd end(p, available); ConnRef *cr = m_ra.edgeIdToConnRef.at(candidate->id()); if (u->id() == candidate->getSourceEnd()->id()) { cr->setSourceEndpoint(end); } else { cr->setDestEndpoint(end); } } } } // Finally, the routing is done and we can set the connector routes in the Edge objects. m_ra.recordRoutes(true); }
void Evaluator::subst_macros() { int cntr = 0; Token gtok(0, 0); while (1) { if (++cntr > MAX_SUBST) throw Err("Too many macro substitutions: " + bug::to_string(MAX_SUBST) + ", possible recursion", gtok); bool weresubs = false; Nodes old = root->children; root->children.clear(); Nodes leftovers; for (auto i : old) { Instruction * pin = get<Instruction>(NOTHR, i); if (pin) { for (auto j : leftovers) i->children[0]->children[1]->children.push_back(j); leftovers.clear(); root->addChild(i); continue; } Macuse * u = get<Macuse>(LNFUN, i); gtok = u->tok(); if (u->name() == "@end") { for (auto j : u->children[0]->children) leftovers.push_back(j); continue; } Nodes inject = Macros(root).process_macuse(*u); if (!inject.empty()) { Pnode pn = inject.front(); Instruction * pin = get<Instruction>(NOTHR, pn); if (pin) { for (auto j : leftovers) pn->children[0]->children[1]->children.push_back(j); } else { Macuse * pu = get<Macuse>(LNFUN, pn); for (auto j : leftovers) pu->children[0]->children.push_back(j); } leftovers.clear(); } for (auto j : inject) root->addChild(j); weresubs = true; } if (!weresubs) { if (leftovers.empty()) break; if (root->children.empty()) throw Err("Labels used in empty program"); throw Err("Program finishes with label (see macro definition)", root->children.back()->tok()); } // this can be improved later (one extra loop) // when expanding macro we can detect that no new macro introduced } // while }