bool IsMirror(Node *root) { typedef list<Node *> NodeList; NodeList leftList; NodeList rightList; if (!root) { // an empty tree is mirrored return true; } leftList.push_back(root->left); rightList.push_back(root->right); // BFS traversal. // Pushing children to the lists and then comparing their values. while (!leftList.empty() && !rightList.empty()) { Node *left = leftList.front(); leftList.pop_front(); Node *right = rightList.front(); rightList.pop_front(); if (!left && !right) { continue; } else if (!left || !right) { return false; } if (left->value != right->value) { return false; } leftList.push_back(left->left); leftList.push_back(left->right); // the insert order is reversed in right sub-tree rightList.push_back(right->right); rightList.push_back(right->left); } // Both lists should be empty, otherwise this is not a mirrored binary tree. return leftList.empty() && rightList.empty(); }
bool MPTreeMgr::buildRegionTree( NodeList & node ) { if ( node.empty() ) return false; Node * pNode , * pPrev , * pBottom; int bound , curX , i , n; pBottom = pPrev = node[0]; bound = _chipWidth / 2; curX = node[0]->width(); for ( i = 1 , n = node.size() ; i < n ; ++i ) { pNode = node[i]; if ( curX >= bound ) { pBottom->_curPtr._right = pNode; pNode->_curPtr._p = pBottom; pBottom = pNode; curX = pNode->width(); } else { pPrev->_curPtr._left = pNode; pNode->_curPtr._p = pPrev; curX += pNode->width(); } pPrev = pNode; } return true; }
double maxFlow(int ss, int tt) { s = ss; t = tt; initFlow(); while (!lst.empty()) { int v = lst.remove(); discharge(v); } return e[t]; }
void XmlRenderer::displayNodeList(zstring tagName, const NodeList& nodelist) { if (!nodelist.empty()) { displayOpenTag(tagName); for (auto& nd : nodelist) { if (nd) render(*nd); } displayCloseTag(tagName); } }
Node* osgDB::readNodeFiles(std::vector<std::string>& commandLine,const ReaderWriter::Options* options) { typedef std::vector<osg::Node*> NodeList; NodeList nodeList; // note currently doesn't delete the loaded file entries from the command line yet... for(std::vector<std::string>::iterator itr=commandLine.begin(); itr!=commandLine.end(); ++itr) { if ((*itr)[0]!='-') { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( *itr , options ); if( node != (osg::Node *)0L ) { if (node->getName().empty()) node->setName( *itr ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
std::pair<NodeSet,bool> Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA, NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) { if (Nest > MaxNest) return { NodeSet(), false }; // Collect all defined registers. Do not consider phis to be defining // anything, only collect "real" definitions. RegisterAggr DefRRs(PRI); for (NodeId D : Defs) { const auto DA = DFG.addr<const DefNode*>(D); if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) DefRRs.insert(DA.Addr->getRegRef(DFG)); } NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs); if (RDs.empty()) return { Defs, true }; // Make a copy of the preexisting definitions and add the newly found ones. NodeSet TmpDefs = Defs; for (NodeAddr<NodeBase*> R : RDs) TmpDefs.insert(R.Id); NodeSet Result = Defs; for (NodeAddr<DefNode*> DA : RDs) { Result.insert(DA.Id); if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) continue; NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG); if (Visited.count(PA.Id)) continue; Visited.insert(PA.Id); // Go over all phi uses and get the reaching defs for each use. for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) { const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs, Nest+1, MaxNest); if (!T.second) return { T.first, false }; Result.insert(T.first.begin(), T.first.end()); } } return { Result, true }; }
static bool Graph_divide(Graph& graph, size_t loops, PositionList* position_tbl) { typedef std::set< size_t > NodeList; NodeList nodes; // nodes for (Graph::const_iterator i = graph.begin(); i != graph.end(); ++i) { nodes.insert(i->first); } while (!nodes.empty()) { // BFS Graph component; std::deque< size_t > Q = boost::assign::list_of(*nodes.begin()); while (!Q.empty()) { size_t xi = Q.front(); Q.pop_front(); if (nodes.find(xi) == nodes.end()) { continue; } nodes.erase(xi); Graph::const_iterator i = graph.find(xi); if (i != graph.end()) { for (Children::const_iterator j = i->second.children.begin(); j != i->second.children.end(); ++j) { Graph_addEdge(component, xi, j->first, j->second); Q.push_back(j->first); } for (Parents::const_iterator j = i->second.parents.begin(); j != i->second.parents.end(); ++j) { Q.push_back(*j); } } } LOG4CXX_TRACE(logger, boost::format("component:%d/%d") % component.size() % graph.size()); if (!Graph_solve(component, loops, position_tbl)) { LOG4CXX_ERROR(logger, "solve component failed"); return false; } } return true; }
virtual ReadResult readNode(std::istream& fin, const Options* options) const { loadWrappers(); fin.imbue(std::locale::classic()); Input fr; fr.attach(&fin); fr.setOptions(options); typedef std::vector<osg::Node*> NodeList; NodeList nodeList; // load all nodes in file, placing them in a group. while(!fr.eof()) { Node *node = fr.readNode(); if (node) nodeList.push_back(node); else fr.advanceOverCurrentFieldOrBlock(); } if (nodeList.empty()) { return ReadResult("No data loaded"); } else if (nodeList.size()==1) { return nodeList.front(); } else { Group* group = new Group; group->setName("import group"); for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
Node* osgDB::readNodeFiles(std::vector<std::string>& fileList,const Options* options) { typedef std::vector<osg::Node*> NodeList; NodeList nodeList; for(std::vector<std::string>::iterator itr=fileList.begin(); itr!=fileList.end(); ++itr) { osg::Node *node = osgDB::readNodeFile( *itr , options ); if( node != (osg::Node *)0L ) { if (node->getName().empty()) node->setName( *itr ); nodeList.push_back(node); } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild(*itr); } return group; } }
Node * extractBestNode() { // TODO use a better datastructure if(nodes.empty()) { return NULL; } NodeList::iterator best = nodes.begin(); float cost = std::numeric_limits<float>::max(); for(NodeList::iterator i = nodes.begin(); i != nodes.end(); ++i) { if((*i)->getCost() < cost) { cost = (*i)->getCost(); best = i; } } Node * node = *best; nodes.erase(best); return node; }
int findPath() { init(); Node s=Node(sta,0,0,make_pair(-1,-1)),tmp; pri_List.push(s); mazer.pushQue(s.pos); while (!pri_List.empty()) { tmp=pri_List.top(); printf("%d %d %d %d %d\n",tmp.pos.first,tmp.pos.second,tmp.parent.first,tmp.parent.second,tmp.f); mazer.pushVis(tmp.pos); pri_List.pop(); if (tmp.pos == end) { backtrace(end.first,end.second); return 1; } expandSuccessors(tmp); } return 0; }
NodeSet Liveness::getAllReachingDefsRec(RegisterRef RefRR, NodeAddr<RefNode*> RefA, NodeSet &Visited, const NodeSet &Defs) { // Collect all defined registers. Do not consider phis to be defining // anything, only collect "real" definitions. RegisterAggr DefRRs(DFG.getLMI(), TRI); for (NodeId D : Defs) { const auto DA = DFG.addr<const DefNode*>(D); if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) DefRRs.insert(DA.Addr->getRegRef()); } NodeList RDs = getAllReachingDefs(RefRR, RefA, true, DefRRs); if (RDs.empty()) return Defs; // Make a copy of the preexisting definitions and add the newly found ones. NodeSet TmpDefs = Defs; for (NodeAddr<NodeBase*> R : RDs) TmpDefs.insert(R.Id); NodeSet Result = Defs; for (NodeAddr<DefNode*> DA : RDs) { Result.insert(DA.Id); if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef)) continue; NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG); if (Visited.count(PA.Id)) continue; Visited.insert(PA.Id); // Go over all phi uses and get the reaching defs for each use. for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) { const auto &T = getAllReachingDefsRec(RefRR, U, Visited, TmpDefs); Result.insert(T.begin(), T.end()); } } return Result; }
Edges *MaxAcyclicSubgraph::find_subgraph() { Edges *Ea = new Edges; stack< NodeList::iterator > toRemove; NodeList *copy = copy_graph(); Node *node = NULL; COLA_ASSERT(!copy->empty()); COLA_ASSERT(!edges->empty()); #ifdef COPY_ADJ_DEBUG cout << "COPY OF MATRIX: " << endl; printNodes(copy); #endif // while the graph is not empty while (!copy->empty()) { COLA_ASSERT(toRemove.empty()); // do we have any sinks for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is a sink if there are no outgoing edges node = *ni; if (node->outgoing.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is a SINK" << endl; #endif // append it's incoming edges to Ea for (unsigned j = 0; j < node->incoming.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->incoming[j].first << ", " << node->incoming[j].second << ")" << endl; #endif Ea->push_back(node->incoming[j]); // find the edge from a vertex where the edge is outgoing Node *out = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->incoming[j].first) { out = (*copy)[q]; } } COLA_ASSERT(out != NULL); #ifdef RUN_DEBUG cout << "Searching through OUTGOING list for vertex(" << out->id << ")" << endl; #endif Edges::iterator oi; for (oi = out->outgoing.begin(); oi != out->outgoing.end(); oi++) { cola::Edge e = *oi; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->incoming[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*oi).first << ", " << (*oi).second << ") from OUTGOING list of vertex(" << out->id << ")" << endl; #endif out->outgoing.erase(oi); } // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SINKS removed): " << endl; printNodes(copy); #endif // do we have any isolated vertices for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is an isolated vertice if there are no incoming or outgoing edges node = *ni; if (node->incoming.empty() && node->outgoing.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is ISOLATED" << endl; #endif // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after isolated vertices removed): " << endl; printNodes(copy); #endif // do we have any sources for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { // is is a sink if there are no incoming edges node = *ni; if (node->incoming.empty()) { #ifdef RUN_DEBUG cout << "vertex(" << node->id << ") is a SOURCE" << endl; #endif // append it's outgoing edges to Ea for (unsigned j = 0; j < node->outgoing.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->outgoing[j].first << ", " << node->outgoing[j].second << ")" << endl; #endif Ea->push_back(node->outgoing[j]); // find the edge from a vertex where the edge is incoming Node *in = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->outgoing[j].second) { in = (*copy)[q]; } } COLA_ASSERT(in != NULL); #ifdef RUN_DEBUG cout << "Searching through INCOMING list for vertex(" << in->id << ")" << endl; #endif Edges::iterator ii; for (ii = in->incoming.begin(); ii != in->incoming.end(); ii++) { cola::Edge e = *ii; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->outgoing[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*ii).first << ", " << (*ii).second << ") from INCOMING list of vertex(" << in->id << ")" << endl; #endif in->incoming.erase(ii); } // say that we want to remove this vertex from the graph. toRemove.push(ni); } } // remove all necessary vertices while (!toRemove.empty()) { copy->erase(toRemove.top()); toRemove.pop(); } COLA_ASSERT(toRemove.empty()); #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SOURCES removed): " << endl; printNodes(copy); #endif // if the graph is not empty if (!copy->empty()) { // find the vertex with the highest degree of "source" int degree = -1000; NodeList::iterator theNode; for (NodeList::iterator ni = copy->begin(); ni != copy->end(); ni++) { node = *ni; int t = node->outgoing.size() - node->incoming.size(); if (t > degree) { #ifdef RUN_DEBUG cout << "Sourceiest node: " << node->id << "(d:" << degree << ", t:" << t << ")" << endl; #endif degree = t; theNode = ni; } } // add this node's outgoing edges to Ea node = *theNode; for (unsigned j = 0; j < node->outgoing.size(); j++) { #ifdef RUN_DEBUG cout << "Appending to Ea: Edge(" << node->outgoing[j].first << ", " << node->outgoing[j].second << ")" << endl; #endif Ea->push_back(node->outgoing[j]); // find the edge from a vertex where the edge is incoming Node *in = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->outgoing[j].second) { in = (*copy)[q]; } } COLA_ASSERT(in != NULL); #ifdef RUN_DEBUG cout << "Searching through INCOMING list for vertex(" << in->id << ")" << endl; #endif Edges::iterator ii; for (ii = in->incoming.begin(); ii != in->incoming.end(); ii++) { cola::Edge e = *ii; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->outgoing[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*ii).first << ", " << (*ii).second << ") from INCOMING list of vertex(" << in->id << ")" << endl; #endif in->incoming.erase(ii); } // for all of the incoming edges this node possesses, delete then from other node's outgoing edge list for (unsigned j = 0; j < node->incoming.size(); j++) { // find the edge from a vertex where the edge is outgoing Node *out = NULL; for (unsigned q = 0; q < copy->size(); q++) { if ((*copy)[q]->id == node->incoming[j].first) { out = (*copy)[q]; } } COLA_ASSERT(out != NULL); #ifdef RUN_DEBUG cout << "Searching through OUTGOING list for vertex(" << out->id << ")" << endl; #endif Edges::iterator oi; for (oi = out->outgoing.begin(); oi != out->outgoing.end(); oi++) { cola::Edge e = *oi; #ifdef RUN_DEBUG cout << "Looking at Edge(" << e.first << ", " << e.second << ")" << endl; #endif if (e == node->incoming[j]) { break; } } #ifdef RUN_DEBUG cout << "Erasing Edge(" << (*oi).first << ", " << (*oi).second << ") from OUTGOING list of vertex(" << out->id << ")" << endl; #endif out->outgoing.erase(oi); } // delete this vertex copy->erase(theNode); } #ifdef EA_DEBUG cout << "EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif #ifdef RUN_DEBUG cout << "COPY OF MATRIX (after SOURCIEST node removed): " << endl; printNodes(copy); #endif } // delete the copy if (copy != NULL) { for (unsigned i = 0; i < copy->size(); i++) { if ((*copy)[i] != NULL) { delete (*copy)[i]; } } delete copy; } #ifdef EA_DEBUG cout << "Returning EA: "; for (unsigned i = 0; i < Ea->size(); i++) { cout << "(" << (*Ea)[i].first << ", " << (*Ea)[i].second << ") "; } cout << endl; #endif return Ea; }
Node* osgDB::readNodeFiles(osg::ArgumentParser& arguments,const Options* options) { typedef std::vector< osg::ref_ptr<osg::Node> > NodeList; NodeList nodeList; std::string filename; while (arguments.read("--file-cache",filename)) { osgDB::Registry::instance()->setFileCache(new osgDB::FileCache(filename)); } while (arguments.read("--image",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), options); if (image.valid()) { osg::Geode* geode = osg::createGeodeForImage(image.get()); if (image->isImageTranslucent()) { OSG_INFO<<"Image "<<image->getFileName()<<" is translucent; setting up blending."<<std::endl; geode->getOrCreateStateSet()->setMode(GL_BLEND, osg::StateAttribute::ON); geode->getOrCreateStateSet()->setRenderingHint(osg::StateSet::TRANSPARENT_BIN); } nodeList.push_back(geode); } } while (arguments.read("--movie",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), options); osg::ref_ptr<osg::ImageStream> imageStream = dynamic_cast<osg::ImageStream*>(image.get()); if (imageStream.valid()) { bool flip = image->getOrigin()==osg::Image::TOP_LEFT; // start the stream playing. imageStream->play(); osg::ref_ptr<osg::Geometry> pictureQuad = 0; bool useTextureRectangle = true; if (useTextureRectangle) { pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f), osg::Vec3(image->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,image->t()), 0.0f, flip ? image->t() : 0.0, image->s(), flip ? 0.0 : image->t()); pictureQuad->getOrCreateStateSet()->setTextureAttributeAndModes(0, new osg::TextureRectangle(image.get()), osg::StateAttribute::ON); } else { pictureQuad = osg::createTexturedQuadGeometry(osg::Vec3(0.0f,0.0f,0.0f), osg::Vec3(image->s(),0.0f,0.0f), osg::Vec3(0.0f,0.0f,image->t()), 0.0f, flip ? 1.0f : 0.0f , 1.0f, flip ? 0.0f : 1.0f); pictureQuad->getOrCreateStateSet()->setTextureAttributeAndModes(0, new osg::Texture2D(image.get()), osg::StateAttribute::ON); } if (pictureQuad.valid()) { osg::ref_ptr<osg::Geode> geode = new osg::Geode; geode->addDrawable(pictureQuad.get()); nodeList.push_back(geode.get()); } } else if (image.valid()) { nodeList.push_back(osg::createGeodeForImage(image.get())); } } while (arguments.read("--dem",filename)) { osg::HeightField* hf = readHeightFieldFile(filename.c_str(), options); if (hf) { osg::Geode* geode = new osg::Geode; geode->addDrawable(new osg::ShapeDrawable(hf)); nodeList.push_back(geode); } } // note currently doesn't delete the loaded file entries from the command line yet... for(int pos=1; pos<arguments.argc(); ++pos) { if (!arguments.isOption(pos)) { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( arguments[pos], options); if(node) { if (node->getName().empty()) node->setName( arguments[pos] ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } if (nodeList.size()==1) { return nodeList.front().release(); } else // size >1 { osg::Group* group = new osg::Group; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { group->addChild((*itr).get()); } return group; } }
osg::ref_ptr<osg::Node> p3d::readShowFiles(osg::ArgumentParser& arguments,const osgDB::ReaderWriter::Options* options) { osg::ref_ptr<osgDB::Options> local_options = createOptions(options); local_options->setOptionString("main"); typedef std::vector< osg::ref_ptr<osg::Node> > NodeList; NodeList nodeList; std::string filename; while (arguments.read("--image",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), local_options.get()); if (image.valid()) nodeList.push_back(osg::createGeodeForImage(image.get())); } while (arguments.read("--movie",filename)) { osg::ref_ptr<osg::Image> image = readImageFile(filename.c_str(), local_options.get()); osg::ref_ptr<osg::ImageStream> imageStream = dynamic_cast<osg::ImageStream*>(image.get()); if (image.valid()) { imageStream->play(); nodeList.push_back(osg::createGeodeForImage(imageStream.get())); } } while (arguments.read("--dem",filename)) { osg::HeightField* hf = readHeightFieldFile(filename.c_str(), local_options.get()); if (hf) { osg::Geode* geode = new osg::Geode; geode->addDrawable(new osg::ShapeDrawable(hf)); nodeList.push_back(geode); } } // note currently doesn't delete the loaded file entries from the command line yet... for(int pos=1;pos<arguments.argc();++pos) { if (!arguments.isOption(pos)) { // not an option so assume string is a filename. osg::Node *node = osgDB::readNodeFile( arguments[pos], local_options.get()); if(node) { if (node->getName().empty()) node->setName( arguments[pos] ); nodeList.push_back(node); } } } if (nodeList.empty()) { return NULL; } osg::ref_ptr<osg::Node> root; if (nodeList.size()==1) { root = nodeList.front().get(); } else // size >1 { osg::Switch* sw = new osg::Switch; for(NodeList::iterator itr=nodeList.begin(); itr!=nodeList.end(); ++itr) { sw->addChild((*itr).get()); } sw->setSingleChildOn(0); sw->setEventCallback(new p3d::ShowEventHandler()); root = sw; } if (root.valid()) { osg::notify(osg::INFO)<<"Got node now adding callback"<<std::endl; AddVolumeEditingCallbackVisitor avecv; root->accept(avecv); } return root; }
void Liveness::computePhiInfo() { RealUseMap.clear(); NodeList Phis; NodeAddr<FuncNode*> FA = DFG.getFunc(); auto Blocks = FA.Addr->members(DFG); for (NodeAddr<BlockNode*> BA : Blocks) { auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG); Phis.insert(Phis.end(), Ps.begin(), Ps.end()); } // phi use -> (map: reaching phi -> set of registers defined in between) std::map<NodeId,std::map<NodeId,RegisterSet>> PhiUp; std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation. // Go over all phis. for (NodeAddr<PhiNode*> PhiA : Phis) { // Go over all defs and collect the reached uses that are non-phi uses // (i.e. the "real uses"). auto &RealUses = RealUseMap[PhiA.Id]; auto PhiRefs = PhiA.Addr->members(DFG); // Have a work queue of defs whose reached uses need to be found. // For each def, add to the queue all reached (non-phi) defs. SetVector<NodeId> DefQ; NodeSet PhiDefs; for (auto R : PhiRefs) { if (!DFG.IsRef<NodeAttrs::Def>(R)) continue; DefQ.insert(R.Id); PhiDefs.insert(R.Id); } for (unsigned i = 0; i < DefQ.size(); ++i) { NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]); NodeId UN = DA.Addr->getReachedUse(); while (UN != 0) { NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN); if (!(A.Addr->getFlags() & NodeAttrs::PhiRef)) RealUses[getRestrictedRegRef(A)].insert(A.Id); UN = A.Addr->getSibling(); } NodeId DN = DA.Addr->getReachedDef(); while (DN != 0) { NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN); for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) { uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags(); // Must traverse the reached-def chain. Consider: // def(D0) -> def(R0) -> def(R0) -> use(D0) // The reachable use of D0 passes through a def of R0. if (!(Flags & NodeAttrs::PhiRef)) DefQ.insert(T.Id); } DN = A.Addr->getSibling(); } } // Filter out these uses that appear to be reachable, but really // are not. For example: // // R1:0 = d1 // = R1:0 u2 Reached by d1. // R0 = d3 // = R1:0 u4 Still reached by d1: indirectly through // the def d3. // R1 = d5 // = R1:0 u6 Not reached by d1 (covered collectively // by d3 and d5), but following reached // defs and uses from d1 will lead here. auto HasDef = [&PhiDefs] (NodeAddr<DefNode*> DA) -> bool { return PhiDefs.count(DA.Id); }; for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) { // For each reached register UI->first, there is a set UI->second, of // uses of it. For each such use, check if it is reached by this phi, // i.e. check if the set of its reaching uses intersects the set of // this phi's defs. auto &Uses = UI->second; for (auto I = Uses.begin(), E = Uses.end(); I != E; ) { auto UA = DFG.addr<UseNode*>(*I); NodeList RDs = getAllReachingDefs(UI->first, UA); if (std::any_of(RDs.begin(), RDs.end(), HasDef)) ++I; else I = Uses.erase(I); } if (Uses.empty()) UI = RealUses.erase(UI); else ++UI; } // If this phi reaches some "real" uses, add it to the queue for upward // propagation. if (!RealUses.empty()) PhiUQ.push_back(PhiA.Id); // Go over all phi uses and check if the reaching def is another phi. // Collect the phis that are among the reaching defs of these uses. // While traversing the list of reaching defs for each phi use, collect // the set of registers defined between this phi (Phi) and the owner phi // of the reaching def. for (auto I : PhiRefs) { if (!DFG.IsRef<NodeAttrs::Use>(I)) continue; NodeAddr<UseNode*> UA = I; auto &UpMap = PhiUp[UA.Id]; RegisterSet DefRRs; for (NodeAddr<DefNode*> DA : getAllReachingDefs(UA)) { if (DA.Addr->getFlags() & NodeAttrs::PhiRef) UpMap[DA.Addr->getOwner(DFG).Id] = DefRRs; else DefRRs.insert(DA.Addr->getRegRef()); } } } if (Trace) { dbgs() << "Phi-up-to-phi map:\n"; for (auto I : PhiUp) { dbgs() << "phi " << Print<NodeId>(I.first, DFG) << " -> {"; for (auto R : I.second) dbgs() << ' ' << Print<NodeId>(R.first, DFG) << Print<RegisterSet>(R.second, DFG); dbgs() << " }\n"; } } // Propagate the reached registers up in the phi chain. // // The following type of situation needs careful handling: // // phi d1<R1:0> (1) // | // ... d2<R1> // | // phi u3<R1:0> (2) // | // ... u4<R1> // // The phi node (2) defines a register pair R1:0, and reaches a "real" // use u4 of just R1. The same phi node is also known to reach (upwards) // the phi node (1). However, the use u4 is not reached by phi (1), // because of the intervening definition d2 of R1. The data flow between // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0. // // When propagating uses up the phi chains, get the all reaching defs // for a given phi use, and traverse the list until the propagated ref // is covered, or until or until reaching the final phi. Only assume // that the reference reaches the phi in the latter case. for (unsigned i = 0; i < PhiUQ.size(); ++i) { auto PA = DFG.addr<PhiNode*>(PhiUQ[i]); auto &RealUses = RealUseMap[PA.Id]; for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) { NodeAddr<UseNode*> UA = U; auto &UpPhis = PhiUp[UA.Id]; for (auto UP : UpPhis) { bool Changed = false; auto &MidDefs = UP.second; // Collect the set UpReached of uses that are reached by the current // phi PA, and are not covered by any intervening def between PA and // the upward phi UP. RegisterSet UpReached; for (auto T : RealUses) { if (!isRestricted(PA, UA, T.first)) continue; if (!RAI.covers(MidDefs, T.first)) UpReached.insert(T.first); } if (UpReached.empty()) continue; // Update the set PRUs of real uses reached by the upward phi UP with // the actual set of uses (UpReached) that the UP phi reaches. auto &PRUs = RealUseMap[UP.first]; for (auto R : UpReached) { unsigned Z = PRUs[R].size(); PRUs[R].insert(RealUses[R].begin(), RealUses[R].end()); Changed |= (PRUs[R].size() != Z); } if (Changed) PhiUQ.push_back(UP.first); } } } if (Trace) { dbgs() << "Real use map:\n"; for (auto I : RealUseMap) { dbgs() << "phi " << Print<NodeId>(I.first, DFG); NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first); NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG); if (!Ds.empty()) { RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(); dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>'; } else { dbgs() << "<noreg>"; } dbgs() << " -> " << Print<RefMap>(I.second, DFG) << '\n'; } } }
// // FindLoops // // Find loops and build loop forest using Havlak's algorithm, which // is derived from Tarjan. Variable names and step numbering has // been chosen to be identical to the nomenclature in Havlak's // paper (which is similar to the one used by Tarjan). // void FindLoops() { if (!CFG_->GetStartBasicBlock()) return; int size = CFG_->GetNumNodes(); IntSetVector non_back_preds(size); IntListVector back_preds(size); IntVector header(size); CharVector type(size); IntVector last(size); NodeVector nodes(size); BasicBlockMap number; // Step a: // - initialize all nodes as unvisited. // - depth-first traversal and numbering. // - unreached BB's are marked as dead. // for (MaoCFG::NodeMap::iterator bb_iter = CFG_->GetBasicBlocks()->begin(); bb_iter != CFG_->GetBasicBlocks()->end(); ++bb_iter) { number[(*bb_iter).second] = kUnvisited; } DFS(CFG_->GetStartBasicBlock(), &nodes, &number, &last, 0); // Step b: // - iterate over all nodes. // // A backedge comes from a descendant in the DFS tree, and non-backedges // from non-descendants (following Tarjan). // // - check incoming edges 'v' and add them to either // - the list of backedges (back_preds) or // - the list of non-backedges (non_back_preds) // for (int w = 0; w < size; w++) { header[w] = 0; type[w] = BB_NONHEADER; BasicBlock *node_w = nodes[w].bb(); if (!node_w) { type[w] = BB_DEAD; continue; // dead BB } if (node_w->GetNumPred()) { for (BasicBlockIter inedges = node_w->in_edges()->begin(); inedges != node_w->in_edges()->end(); ++inedges) { BasicBlock *node_v = *inedges; int v = number[ node_v ]; if (v == kUnvisited) continue; // dead node if (IsAncestor(w, v, &last)) back_preds[w].push_back(v); else non_back_preds[w].insert(v); } } } // Start node is root of all other loops. header[0] = 0; // Step c: // // The outer loop, unchanged from Tarjan. It does nothing except // for those nodes which are the destinations of backedges. // For a header node w, we chase backward from the sources of the // backedges adding nodes to the set P, representing the body of // the loop headed by w. // // By running through the nodes in reverse of the DFST preorder, // we ensure that inner loop headers will be processed before the // headers for surrounding loops. // for (int w = size-1; w >= 0; w--) { NodeList node_pool; // this is 'P' in Havlak's paper BasicBlock *node_w = nodes[w].bb(); if (!node_w) continue; // dead BB // Step d: IntList::iterator back_pred_iter = back_preds[w].begin(); IntList::iterator back_pred_end = back_preds[w].end(); for (; back_pred_iter != back_pred_end; back_pred_iter++) { int v = *back_pred_iter; if (v != w) node_pool.push_back(nodes[v].FindSet()); else type[w] = BB_SELF; } // Copy node_pool to worklist. // NodeList worklist; NodeList::iterator niter = node_pool.begin(); NodeList::iterator nend = node_pool.end(); for (; niter != nend; ++niter) worklist.push_back(*niter); if (!node_pool.empty()) type[w] = BB_REDUCIBLE; // work the list... // while (!worklist.empty()) { UnionFindNode x = *worklist.front(); worklist.pop_front(); // Step e: // // Step e represents the main difference from Tarjan's method. // Chasing upwards from the sources of a node w's backedges. If // there is a node y' that is not a descendant of w, w is marked // the header of an irreducible loop, there is another entry // into this loop that avoids w. // // The algorithm has degenerated. Break and // return in this case. // size_t non_back_size = non_back_preds[x.dfs_number()].size(); if (non_back_size > kMaxNonBackPreds) { lsg_->KillAll(); return; } IntSet::iterator non_back_pred_iter = non_back_preds[x.dfs_number()].begin(); IntSet::iterator non_back_pred_end = non_back_preds[x.dfs_number()].end(); for (; non_back_pred_iter != non_back_pred_end; non_back_pred_iter++) { UnionFindNode y = nodes[*non_back_pred_iter]; UnionFindNode *ydash = y.FindSet(); if (!IsAncestor(w, ydash->dfs_number(), &last)) { type[w] = BB_IRREDUCIBLE; non_back_preds[w].insert(ydash->dfs_number()); } else { if (ydash->dfs_number() != w) { NodeList::iterator nfind = find(node_pool.begin(), node_pool.end(), ydash); if (nfind == node_pool.end()) { worklist.push_back(ydash); node_pool.push_back(ydash); } } } } } // Collapse/Unionize nodes in a SCC to a single node // For every SCC found, create a loop descriptor and link it in. // if (!node_pool.empty() || (type[w] == BB_SELF)) { SimpleLoop* loop = lsg_->CreateNewLoop(); // At this point, one can set attributes to the loop, such as: // // the bottom node: // IntList::iterator iter = back_preds[w].begin(); // loop bottom is: nodes[*backp_iter].node); // // the number of backedges: // back_preds[w].size() // // whether this loop is reducible: // type[w] != BB_IRREDUCIBLE // // TODO(rhundt): Define those interfaces in the Loop Forest. // nodes[w].set_loop(loop); for (niter = node_pool.begin(); niter != node_pool.end(); niter++) { UnionFindNode *node = (*niter); // Add nodes to loop descriptor. header[node->dfs_number()] = w; node->Union(&nodes[w]); // Nested loops are not added, but linked together. if (node->loop()) node->loop()->set_parent(loop); else loop->AddNode(node->bb()); } lsg_->AddLoop(loop); } // node_pool.size } // Step c } // FindLoops
void DataFlow::mergeFlow(Graph<NodeDesc>* f,const Graph<NodeDesc>* g, std::map<Node*,Node*>& mapping) { // mapping is g -> f NodeList queue = g->rootNodes(); while (!queue.empty()) { Node* n = queue.back(); queue.pop_back(); // check predecessors bool allSourcesMapped = true; for (LinkListCIt sIt=n->sources().begin();sIt!=n->sources().end();sIt++) { Link* sLink = *sIt; map<Node*,Node*>::iterator findIt=mapping.find(sLink->source); if (findIt==mapping.end()) { allSourcesMapped = false; break; } } if (!allSourcesMapped) { // some sources miss. Ignore current node for now continue; } if (mapping.find(n)==mapping.end()) { // merge node // all sources are merged, merge current node Node* mergedNode(NULL); // first check if identical node already exists in f NodeList candidates; if (n->sources().size()>0) { candidates = mapping.find(n->sources()[0]->source)->second->targetNodes(); } else { candidates = f->rootNodes(); } for (NodeListIt cIt=candidates.begin();cIt!=candidates.end();cIt++) { if ((**cIt==*n) && sources_match(n->sources(),(*cIt)->sources(),mapping)) { // found identical node mergedNode = *cIt; break; } } if (mergedNode==NULL) { // no identical node found. Create node mergedNode = f->createNode(n->v); for (LinkListCIt sIt=n->sources().begin();sIt!=n->sources().end();sIt++) { f->link(mapping.find((*sIt)->source)->second,(*sIt)->sourceOutputPort, mergedNode, (*sIt)->targetInputPort); } } // register mapping mapping[n] = mergedNode; } // add target nodes to queue NodeList targets = n->targetNodes(); queue.insert(queue.end(),targets.begin(),targets.end()); } // merge names for (NameMapCIt gnIt=g->getNames().begin();gnIt!=g->getNames().end();gnIt++) { NameMapCIt fnIt = f->getNames().find(gnIt->first); if (fnIt!=f->getNames().end()) { // check nodes are merged if (mapping[gnIt->second]!=fnIt->second) { cerr << "ERROR: '" << gnIt->first << "' node exists in the two graphs and cannot be merged !" << endl; } } else { // add named node to f f->setNodeName(mapping[gnIt->second], gnIt->first); } } }
void Liveness::computePhiInfo() { RealUseMap.clear(); NodeList Phis; NodeAddr<FuncNode*> FA = DFG.getFunc(); NodeList Blocks = FA.Addr->members(DFG); for (NodeAddr<BlockNode*> BA : Blocks) { auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG); Phis.insert(Phis.end(), Ps.begin(), Ps.end()); } // phi use -> (map: reaching phi -> set of registers defined in between) std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp; std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation. // Go over all phis. for (NodeAddr<PhiNode*> PhiA : Phis) { // Go over all defs and collect the reached uses that are non-phi uses // (i.e. the "real uses"). RefMap &RealUses = RealUseMap[PhiA.Id]; NodeList PhiRefs = PhiA.Addr->members(DFG); // Have a work queue of defs whose reached uses need to be found. // For each def, add to the queue all reached (non-phi) defs. SetVector<NodeId> DefQ; NodeSet PhiDefs; for (NodeAddr<RefNode*> R : PhiRefs) { if (!DFG.IsRef<NodeAttrs::Def>(R)) continue; DefQ.insert(R.Id); PhiDefs.insert(R.Id); } // Collect the super-set of all possible reached uses. This set will // contain all uses reached from this phi, either directly from the // phi defs, or (recursively) via non-phi defs reached by the phi defs. // This set of uses will later be trimmed to only contain these uses that // are actually reached by the phi defs. for (unsigned i = 0; i < DefQ.size(); ++i) { NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]); // Visit all reached uses. Phi defs should not really have the "dead" // flag set, but check it anyway for consistency. bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead; NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0; while (UN != 0) { NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN); uint16_t F = A.Addr->getFlags(); if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) { RegisterRef R = PRI.normalize(A.Addr->getRegRef(DFG)); RealUses[R.Reg].insert({A.Id,R.Mask}); } UN = A.Addr->getSibling(); } // Visit all reached defs, and add them to the queue. These defs may // override some of the uses collected here, but that will be handled // later. NodeId DN = DA.Addr->getReachedDef(); while (DN != 0) { NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN); for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) { uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags(); // Must traverse the reached-def chain. Consider: // def(D0) -> def(R0) -> def(R0) -> use(D0) // The reachable use of D0 passes through a def of R0. if (!(Flags & NodeAttrs::PhiRef)) DefQ.insert(T.Id); } DN = A.Addr->getSibling(); } } // Filter out these uses that appear to be reachable, but really // are not. For example: // // R1:0 = d1 // = R1:0 u2 Reached by d1. // R0 = d3 // = R1:0 u4 Still reached by d1: indirectly through // the def d3. // R1 = d5 // = R1:0 u6 Not reached by d1 (covered collectively // by d3 and d5), but following reached // defs and uses from d1 will lead here. auto InPhiDefs = [&PhiDefs] (NodeAddr<DefNode*> DA) -> bool { return PhiDefs.count(DA.Id); }; for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) { // For each reached register UI->first, there is a set UI->second, of // uses of it. For each such use, check if it is reached by this phi, // i.e. check if the set of its reaching uses intersects the set of // this phi's defs. NodeRefSet &Uses = UI->second; for (auto I = Uses.begin(), E = Uses.end(); I != E; ) { auto UA = DFG.addr<UseNode*>(I->first); // Undef flag is checked above. assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0); RegisterRef R(UI->first, I->second); NodeList RDs = getAllReachingDefs(R, UA); // If none of the reaching defs of R are from this phi, remove this // use of R. I = any_of(RDs, InPhiDefs) ? std::next(I) : Uses.erase(I); } UI = Uses.empty() ? RealUses.erase(UI) : std::next(UI); } // If this phi reaches some "real" uses, add it to the queue for upward // propagation. if (!RealUses.empty()) PhiUQ.push_back(PhiA.Id); // Go over all phi uses and check if the reaching def is another phi. // Collect the phis that are among the reaching defs of these uses. // While traversing the list of reaching defs for each phi use, accumulate // the set of registers defined between this phi (PhiA) and the owner phi // of the reaching def. NodeSet SeenUses; for (auto I : PhiRefs) { if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id)) continue; NodeAddr<PhiUseNode*> PUA = I; if (PUA.Addr->getReachingDef() == 0) continue; RegisterRef UR = PUA.Addr->getRegRef(DFG); NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs); RegisterAggr DefRRs(PRI); for (NodeAddr<DefNode*> D : Ds) { if (D.Addr->getFlags() & NodeAttrs::PhiRef) { NodeId RP = D.Addr->getOwner(DFG).Id; std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id]; auto F = M.find(RP); if (F == M.end()) M.insert(std::make_pair(RP, DefRRs)); else F->second.insert(DefRRs); } DefRRs.insert(D.Addr->getRegRef(DFG)); } for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA)) SeenUses.insert(T.Id); } } if (Trace) { dbgs() << "Phi-up-to-phi map with intervening defs:\n"; for (auto I : PhiUp) { dbgs() << "phi " << Print<NodeId>(I.first, DFG) << " -> {"; for (auto R : I.second) dbgs() << ' ' << Print<NodeId>(R.first, DFG) << Print<RegisterAggr>(R.second, DFG); dbgs() << " }\n"; } } // Propagate the reached registers up in the phi chain. // // The following type of situation needs careful handling: // // phi d1<R1:0> (1) // | // ... d2<R1> // | // phi u3<R1:0> (2) // | // ... u4<R1> // // The phi node (2) defines a register pair R1:0, and reaches a "real" // use u4 of just R1. The same phi node is also known to reach (upwards) // the phi node (1). However, the use u4 is not reached by phi (1), // because of the intervening definition d2 of R1. The data flow between // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0. // // When propagating uses up the phi chains, get the all reaching defs // for a given phi use, and traverse the list until the propagated ref // is covered, or until reaching the final phi. Only assume that the // reference reaches the phi in the latter case. for (unsigned i = 0; i < PhiUQ.size(); ++i) { auto PA = DFG.addr<PhiNode*>(PhiUQ[i]); NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG); RefMap &RUM = RealUseMap[PA.Id]; for (NodeAddr<UseNode*> UA : PUs) { std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id]; RegisterRef UR = PRI.normalize(UA.Addr->getRegRef(DFG)); for (const std::pair<NodeId,RegisterAggr> &P : PUM) { bool Changed = false; const RegisterAggr &MidDefs = P.second; // Collect the set PropUp of uses that are reached by the current // phi PA, and are not covered by any intervening def between the // currently visited use UA and the the upward phi P. if (MidDefs.hasCoverOf(UR)) continue; // General algorithm: // for each (R,U) : U is use node of R, U is reached by PA // if MidDefs does not cover (R,U) // then add (R-MidDefs,U) to RealUseMap[P] // for (const std::pair<RegisterId,NodeRefSet> &T : RUM) { RegisterRef R = DFG.restrictRef(RegisterRef(T.first), UR); if (!R) continue; for (std::pair<NodeId,LaneBitmask> V : T.second) { RegisterRef S = DFG.restrictRef(RegisterRef(R.Reg, V.second), R); if (!S) continue; if (RegisterRef SS = MidDefs.clearIn(S)) { NodeRefSet &RS = RealUseMap[P.first][SS.Reg]; Changed |= RS.insert({V.first,SS.Mask}).second; } } } if (Changed) PhiUQ.push_back(P.first); } } } if (Trace) { dbgs() << "Real use map:\n"; for (auto I : RealUseMap) { dbgs() << "phi " << Print<NodeId>(I.first, DFG); NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first); NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG); if (!Ds.empty()) { RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG); dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>'; } else { dbgs() << "<noreg>"; } dbgs() << " -> " << Print<RefMap>(I.second, DFG) << '\n'; } } }
void DIETypeListItem::CreateChildren( wxTreeCtrl *tree, const wxTreeItemId& id) { NodeList classTypes; NodeList enumTypes; NodeList structTypes; NodeList otherTypes; for (NodeList::const_iterator it = nodeList_.begin(); it != nodeList_.end(); ++it) { DIType diType(*it); switch (diType.getTag()) { case dwarf::DW_TAG_class_type: classTypes.push_back(*it); break; case dwarf::DW_TAG_enumeration_type: enumTypes.push_back(*it); break; case dwarf::DW_TAG_structure_type: structTypes.push_back(*it); break; case dwarf::DW_TAG_base_type: case dwarf::DW_TAG_inheritance: case dwarf::DW_TAG_member: case dwarf::DW_TAG_subroutine_type: case dwarf::DW_TAG_union_type: case dwarf::DW_TAG_array_type: case dwarf::DW_TAG_pointer_type: break; default: otherTypes.push_back(*it); break; } } if (!classTypes.empty()) { CreateChild(tree, id, new DIEListItem(module_, _("Class Types"), classTypes.begin(), classTypes.end())); } if (!enumTypes.empty()) { CreateChild(tree, id, new DIEListItem(module_, _("Enumeration Types"), enumTypes.begin(), enumTypes.end())); } if (!structTypes.empty()) { CreateChild(tree, id, new DIEListItem(module_, _("Structure Types"), structTypes.begin(), structTypes.end())); } if (!otherTypes.empty()) { CreateChild(tree, id, new DIEListItem(module_, _("Other Types"), otherTypes.begin(), otherTypes.end())); } tree->SortChildren(id); }
void DemandCalculator::CalcDemand(LinkGraphJob &job, Tscaler scaler) { NodeList supplies; NodeList demands; uint num_supplies = 0; uint num_demands = 0; for (NodeID node = 0; node < job.Size(); node++) { scaler.AddNode(job[node]); if (job[node].Supply() > 0) { supplies.push_back(node); num_supplies++; } if (job[node].Demand() > 0) { demands.push_back(node); num_demands++; } } if (num_supplies == 0 || num_demands == 0) return; /* Mean acceptance attributed to each node. If the distribution is * symmetric this is relative to remote supply, otherwise it is * relative to remote demand. */ scaler.SetDemandPerNode(num_demands); uint chance = 0; while (!supplies.empty() && !demands.empty()) { NodeID from_id = supplies.front(); supplies.pop_front(); for (uint i = 0; i < num_demands; ++i) { assert(!demands.empty()); NodeID to_id = demands.front(); demands.pop_front(); if (from_id == to_id) { /* Only one node with supply and demand left */ if (demands.empty() && supplies.empty()) return; demands.push_back(to_id); continue; } int32 supply = scaler.EffectiveSupply(job[from_id], job[to_id]); assert(supply > 0); /* Scale the distance by mod_dist around max_distance */ int32 distance = this->max_distance - (this->max_distance - (int32)job[from_id][to_id].Distance()) * this->mod_dist / 100; /* Scale the accuracy by distance around accuracy / 2 */ int32 divisor = this->accuracy * (this->mod_dist - 50) / 100 + this->accuracy * distance / this->max_distance + 1; assert(divisor > 0); uint demand_forw = 0; if (divisor <= supply) { /* At first only distribute demand if * effective supply / accuracy divisor >= 1 * Others are too small or too far away to be considered. */ demand_forw = supply / divisor; } else if (++chance > this->accuracy * num_demands * num_supplies) { /* After some trying, if there is still supply left, distribute * demand also to other nodes. */ demand_forw = 1; } demand_forw = min(demand_forw, job[from_id].UndeliveredSupply()); scaler.SetDemands(job, from_id, to_id, demand_forw); if (scaler.HasDemandLeft(job[to_id])) { demands.push_back(to_id); } else { num_demands--; } if (job[from_id].UndeliveredSupply() == 0) break; } if (job[from_id].UndeliveredSupply() != 0) { supplies.push_back(from_id); } else { num_supplies--; } } }