void Object::notifyDetach() { if( !isMaster( )) return; // unmap slaves const Nodes slaves = impl_->cm->getSlaveNodes(); if( slaves.empty( )) return; LBWARN << slaves.size() << " slaves subscribed during deregisterObject of " << lunchbox::className( this ) << " id " << impl_->id << std::endl; for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i ) { NodePtr node = *i; node->send( CMD_NODE_UNMAP_OBJECT ) << impl_->id; } }
TEST(TestScriptFetcher, HandleParentArgument) { TemporaryDir tmp_dir; TemporaryFile cmdFile(tmp_dir.createFile()); cmdFile.writeString( "#!/bin/sh\n" "echo $1:foo\n" ); PCHECK(chmod(cmdFile.getFilename().c_str(), 0700) == 0); Config config(dynamic::object ("resources", dynamic::object) ("nodes", dynamic::object ("levels", {"level1", "level2"}) ("node_sources", { dynamic::object ("source", "manual") ("prefs", dynamic::object ("node1", {}) ("node2", {}) ) , dynamic::object ("source", "script") ("prefs", dynamic::object ("parent_level", "level1") ("script", cmdFile.getFilename().native()) ) } ) ) ); Nodes nodes; NodesLoader::_fetchNodesImpl(config, &nodes); ASSERT_EQ(5, nodes.size()); auto n1 = nodes.getNodeVerySlow("node1:foo"); ASSERT_EQ(2, n1->level()); ASSERT_EQ("node1", n1->parent()->name()); auto n2 = nodes.getNodeVerySlow("node2:foo"); ASSERT_EQ(2, n2->level()); ASSERT_EQ("node2", n2->parent()->name()); }
void Object::notifyDetach() { if( !isMaster( )) return; // unmap slaves const Nodes slaves = _cm->getSlaveNodes(); if( slaves.empty( )) return; EQWARN << slaves.size() << " slaves subscribed during deregisterObject of " << base::className( this ) << " id " << _id << std::endl; NodeUnmapObjectPacket packet; packet.objectID = _id; for( NodesCIter i = slaves.begin(); i != slaves.end(); ++i ) { NodePtr node = *i; node->send( packet ); } }
GuiDomain::GuiDomain(Domain *pDomain, OverviewNetworkSimulationWidget* pNetSimWidget):QGraphicsPolygonItem() { mNetSimWidget = pNetSimWidget; mDomain = pDomain; Nodes tNodes = pDomain->GetNodes(); if (tNodes.size() > 0) { Nodes::iterator tIt; for(tIt = tNodes.begin(); tIt != tNodes.end(); tIt++) { GuiNode* tGuiNode = pNetSimWidget->GetGuiNode(*tIt); tGuiNode->AddDomain(this); } } setZValue(-2000.0); setPen(QPen(QColor(0xdd, 0xdd, 0xdd), 1, Qt::SolidLine, Qt::FlatCap, Qt::RoundJoin)); setBrush(QColor(0xdd, 0xdd, 0xdd)); UpdatePosition(); #ifdef DEBUG_GUI_SIMULATION_TOPOLOGY_CREATION LOG(LOG_WARN, "Created GUI domain %s", mDomain->GetDomainAddress().c_str()); #endif }
void LineMerger::buildEdgeStringsForNonDegree2Nodes() { #if GEOS_DEBUG cerr<<__FUNCTION__<<endl; #endif typedef std::vector<Node*> Nodes; Nodes nodes; graph.getNodes(nodes); for (Nodes::size_type i=0, in=nodes.size(); i<in; ++i) { Node *node=nodes[i]; #if GEOS_DEBUG cerr<<"Node "<<i<<": "<<*node<<endl; #endif if (node->getDegree()!=2) { buildEdgeStringsStartingAt(node); node->setMarked(true); #if GEOS_DEBUG cerr<<" setMarked(true) : "<<*node<<endl; #endif } } }
/** * Takes a subgraph of the given graph (all nodes in the graph with the given label), * partitions this subgraph into even smaller subgraphs (using something similar to k-means), * and gives all small subgraphs a unique label (using the given min_label). * * @param graph * @param label_of_connected_component * @param size_of_largest_partition * @param min_label_for_partition_labeling * @return the number of generated partitions */ std::size_t partition_connected_component(UniGraph * graph, std::size_t label_of_connected_component, std::size_t partition_size, std::size_t min_label_for_partition_labeling) { typedef std::size_t Node; typedef std::size_t Label; typedef std::vector<Node> Nodes; Nodes nodes; for (Node node = 0; node < graph->num_nodes(); ++node) if (graph->get_label(node) == label_of_connected_component) nodes.push_back(node); const std::size_t num_partitions = (nodes.size() + partition_size - 1) / partition_size; // division and rounding up /********* k-means clustering *******/ const std::size_t num_kmeans_iterations = 100; Nodes centroids; /* Draw centroids randomly. */ std::default_random_engine generator; std::uniform_int_distribution<std::size_t> distribution(0, nodes.size() - 1); for(std::size_t partition = 0; partition < num_partitions; ++partition) { Node centroid = std::numeric_limits<Node>::max(); while (std::find(centroids.begin(), centroids.end(), centroid) != centroids.end()) centroid = nodes.at(distribution(generator)); centroids.push_back(centroid); } for (std::size_t kmeans_iteration = 0; kmeans_iteration < num_kmeans_iterations; ++kmeans_iteration) { const Label unvisited = std::numeric_limits<Label>::max(); for (Node const & node : nodes) graph->set_label(node, unvisited); /* Put centroids into queues. */ std::vector<Nodes> queues(num_partitions); for (std::size_t i = 0; i < num_partitions; ++i) queues.at(i).push_back(centroids.at(i)); /* Grow regions starting from centroids */ while (std::any_of(queues.begin(), queues.end(), [](Nodes const & queue){return !queue.empty();})) { #pragma omp parallel for for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) { Nodes & old_queue = queues.at(queue_id); std::unordered_set<Node> new_queue; for (Node node : old_queue) graph->set_label(node, min_label_for_partition_labeling + queue_id); // there is a race condition for partition boundary nodes but we don't care for (Node node : old_queue) { /* Copy all unvisited (and not yet inserted) neighbors into new queue. */ for (Node neighbor : graph->get_adj_nodes(node)) if (graph->get_label(neighbor) == unvisited) new_queue.insert(neighbor); } old_queue.clear(); old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end()); } } /* If we are in the final iteration we stop here to keep the graph labels * (they would be removed in the following region shrinking step). */ if (kmeans_iteration == num_kmeans_iterations - 1) break; /* Put partition boundary nodes into queues. */ for (Node const node : nodes) { Label const cur_label = graph->get_label(node); std::size_t const cur_queue = cur_label - min_label_for_partition_labeling; Nodes const & neighbors = graph->get_adj_nodes(node); /* Each node, where any of its neighbors has a different label, is a boundary node. */ if (std::any_of(neighbors.begin(), neighbors.end(), [graph, cur_label] (Node const neighbor) { return graph->get_label(neighbor) != cur_label; } )) queues.at(cur_queue).push_back(node); } /* Shrink regions starting from boundaries to obtain new centroids. */ #pragma omp parallel for for (std::size_t queue_id = 0; queue_id < queues.size(); ++queue_id) { Nodes & old_queue = queues.at(queue_id); while (!old_queue.empty()){ std::unordered_set<Node> new_queue; for (Node node : old_queue) graph->set_label(node, unvisited); for (Node node : old_queue) { /* Copy all neighbors that have not yet been marked (and have not yet been inserted) into new queue. */ for (Node neighbor : graph->get_adj_nodes(node)) if (graph->get_label(neighbor) == min_label_for_partition_labeling + queue_id) new_queue.insert(neighbor); } /* If the new queue is empty we are (almost) finished and use a random node from the old queue as new centroid. */ if (new_queue.empty()) { std::uniform_int_distribution<std::size_t> distribution(0, old_queue.size() - 1); centroids.at(queue_id) = old_queue.at(distribution(generator)); } /* Replace old queue with new one. */ old_queue.clear(); old_queue.insert(old_queue.begin(), new_queue.begin(), new_queue.end()); } } } return num_partitions; }
void LeaflessOrthoRouter::route(Logger *logger) { // Set up for logging. unsigned ln = logger != nullptr ? logger->nextLoggingIndex : 0; std::function<void(unsigned)> log = [ln, this, logger](unsigned n)->void{ if (logger!=nullptr) { std::string fn = string_format("%02d_%02d_routing_attempt", ln, n); std::string path = logger->writeFullPathForFilename(fn); this->m_ra.router.outputInstanceToSVG(path); } }; /* * We may need to route multiple times to ensure that at least two sides of each node are being used, * but in theory we should never have to route more than 4n+1 times. * * Proof: We always begin with an initial routing. We want to show it could be necessary to re-route * at most 4n times. * * In order to see this, we first argue that the worst-case-scenario for any single node is that it * require four routings. Consider then some node u all of whose edges have been routed to one side, s0. We * then pick some edge e0 incident to u, say that it may not connect to side s0, and we re-route for the first time. * * While unlikely, it could be that, for whatever reason, now all edges incident to node u are routed to some other side, * s1. We then pick some edge e1 (could be the same or different from e0), forbid it from connecting to * side s1, and re-route for a second time. * * Again, for whatever reason, all edges could now connect to one * of the two remaining sides, s2. Continuing in this way, we could be led to re-route a third and a fourth time. But * prior to the fourth re-routing it would be the case that for each side si of node u, there was * some edge ei incident to u that had been forbidden from connecting on side si. Therefore on the fourth * re-routing it would be impossible for all edges to connect on any single side of u. * * So much for the case of a single node. However, in again a highly unlikely worst-case-scenario, it could be * that during the first five routings no other node besides u was a pseudoleaf (had all edges routed to one side), * but after the fifth some other node became a pseudoleaf. In this way we could be led to do four re-routings * for each node in the graph. QED * * In practice, it would probably be very rare for more that two routings to ever be necessary. For this * requires the odd circumstance, considered in the proof, that forbidding one edge from connecting on a * given side somehow results in /all/ edges incident at that node migrating to some other, single side. * * In order that our theory be tested, we use an infinite loop with counter and assertion, instead * of a mere for-loop which would fail silently. */ size_t numRoutings = 0; size_t maxRoutings = 4*m_n + 1; while (true) { m_ra.router.processTransaction(); log(++numRoutings); // As explained in the comments above, at most five routings should ever be needed. COLA_ASSERT(numRoutings <= maxRoutings); // For testing purposes, we may want to record the results of // each routing attempt. if (recordEachAttempt) { m_ra.recordRoutes(true); routingAttemptTglf.push_back(m_graph->writeTglf()); } // Are there any nodes having all of their edges routed // out of just one side? This is what we want to prevent. // Such nodes would become leaves in a planarisation, so we // call them "pseudoleaves". Nodes pseudoLeaves; // For each such Node (if any), there is a sole direction in which // all connectors depart. We keep track of those directions as we work. vector<CardinalDir> soleDepartureDirecs; // Check each Node in the Graph: for (auto p : m_graph->getNodeLookup()) { Node_SP &u = p.second; const EdgesById edgeLookup = u->getEdgeLookup(); // Sanity check, that Node u is not an actual leaf: COLA_ASSERT(edgeLookup.size() > 1); // Determine the departure direction from Node u for its first Edge. auto edge_it = edgeLookup.cbegin(); CardinalDir d0 = departureDir((*edge_it).second, u); // If two or more directions have been used, some edge must depart // in a different direction than this one. (For if all the rest equal // this first one, then all are the same.) bool isPseudoLeaf = true; for (auto jt = ++edge_it; jt != edgeLookup.cend(); ++jt) { CardinalDir d1 = departureDir((*jt).second, u); if (d1 != d0) { isPseudoLeaf = false; break; } } if (isPseudoLeaf) { pseudoLeaves.push_back(u); soleDepartureDirecs.push_back(d0); } } // Are there any pseudoleaves? if (pseudoLeaves.empty()) { // If there are none, then we're done routing, and can break out of the outer while loop. break; } else { // But if there are still pseudoleaves, then we need to work on them. for (size_t i = 0; i < pseudoLeaves.size(); ++i) { // Get the Node and the direction in which all connectors currently depart from it. Node_SP u = pseudoLeaves[i]; CardinalDir d0 = soleDepartureDirecs[i]; // Now among all Edges incident at this Node we must select one that is still // allowed to depart in at least two directions (hence at least one different // from d0), and remove direction d0 from its list of allowed directions. // // If possible, we would like to choose such an Edge e such that if v is the Node // at the other end, then the predominant cardinal direction from Node u to Node v // be different than d0; for such would seem a suitable Edge to depart in a different // direction. However, such an Edge may not exist. In that case, we will just take // any one. Edge_SP candidate; for (auto p : u->getEdgeLookup()) { Edge_SP &e = p.second; // If this Edge is only allowed the one direction, then skip it. if (isSoleDirec(m_allowedConnDirs.at(e->id()).at(u->id()))) continue; // Otherwise mark it as the candidate. candidate = e; // Determine the predominant cardinal direction from Node u to the Node v at // the opposite end of Edge e. Node_SP v = e->getOtherEnd(*u); CardinalDir d1 = Compass::cardinalDirection(u, v); // If this is different from direction d0, then we're happy to accept this candidate. if (d1 != d0) break; } // Start with the directions allowed last time: ConnDirFlags available = m_allowedConnDirs.at(candidate->id()).at(u->id()); // XOR with the connection flag corresponding to cardinal direction d0, // so that this direction is no longer allowed. available ^= Compass::libavoidConnDirs.at(d0); // Record the new value. m_allowedConnDirs[candidate->id()][u->id()] = available; // Set a new ConnEnd. Point p = u->getCentre(); ConnEnd end(p, available); ConnRef *cr = m_ra.edgeIdToConnRef.at(candidate->id()); if (u->id() == candidate->getSourceEnd()->id()) { cr->setSourceEndpoint(end); } else { cr->setDestEndpoint(end); } } } } // Finally, the routing is done and we can set the connector routes in the Edge objects. m_ra.recordRoutes(true); }
static SV *node_to_sv(pTHX_ Node *node) { SV *ret = NULL; if (!node) return ret; if (TYPE_match(node, BranchNode)) { BranchNode *branch = dynamic_cast<BranchNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, branch->tk); add_key(hash, "left", branch->left); add_key(hash, "right", branch->right); add_key(hash, "next", branch->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Branch"); } else if (TYPE_match(node, FunctionCallNode)) { FunctionCallNode *call = dynamic_cast<FunctionCallNode *>(node); Nodes *args = call->args; size_t argsize = args->size(); AV *array = new_Array(); for (size_t i = 0; i < argsize; i++) { SV *arg = node_to_sv(aTHX_ args->at(i)); if (!arg) continue; av_push(array, set(arg)); } HV *hash = (HV*)new_Hash(); add_key(hash, "next", call->next); add_token(hash, call->tk); (void)hv_stores(hash, "args", set(new_Ref(array))); ret = bless(aTHX_ hash, "Compiler::Parser::Node::FunctionCall"); } else if (TYPE_match(node, ArrayNode)) { ArrayNode *array = dynamic_cast<ArrayNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, array->tk); add_key(hash, "next", array->next); add_key(hash, "idx", array->idx); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Array"); } else if (TYPE_match(node, HashNode)) { HashNode *h = dynamic_cast<HashNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, h->tk); add_key(hash, "next", h->next); add_key(hash, "key", h->key); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Hash"); } else if (TYPE_match(node, DereferenceNode)) { DereferenceNode *dref = dynamic_cast<DereferenceNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, dref->tk); add_key(hash, "next", dref->next); add_key(hash, "expr", dref->expr); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Dereference"); } else if (TYPE_match(node, FunctionNode)) { FunctionNode *f = dynamic_cast<FunctionNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, f->tk); add_key(hash, "next", f->next); add_key(hash, "body", f->body); add_key(hash, "prototype", f->prototype); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Function"); } else if (TYPE_match(node, BlockNode)) { BlockNode *b = dynamic_cast<BlockNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, b->tk); add_key(hash, "next", b->next); add_key(hash, "body", b->body); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Block"); } else if (TYPE_match(node, ReturnNode)) { ReturnNode *r = dynamic_cast<ReturnNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, r->tk); add_key(hash, "next", r->next); add_key(hash, "body", r->body); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Return"); } else if (TYPE_match(node, SingleTermOperatorNode)) { SingleTermOperatorNode *s = dynamic_cast<SingleTermOperatorNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, s->tk); add_key(hash, "next", s->next); add_key(hash, "expr", s->expr); ret = bless(aTHX_ hash, "Compiler::Parser::Node::SingleTermOperator"); } else if (TYPE_match(node, DoubleTermOperatorNode)) { } else if (TYPE_match(node, LeafNode)) { LeafNode *leaf = dynamic_cast<LeafNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, leaf->tk); add_key(hash, "next", leaf->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Leaf"); } else if (TYPE_match(node, ListNode)) { ListNode *list = dynamic_cast<ListNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, list->tk); add_key(hash, "data", list->data); add_key(hash, "next", list->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::List"); } else if (TYPE_match(node, ArrayRefNode)) { ArrayRefNode *ref = dynamic_cast<ArrayRefNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, ref->tk); add_key(hash, "data", ref->data); add_key(hash, "next", ref->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::ArrayRef"); } else if (TYPE_match(node, HashRefNode)) { HashRefNode *ref = dynamic_cast<HashRefNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, ref->tk); add_key(hash, "data", ref->data); add_key(hash, "next", ref->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::HashRef"); } else if (TYPE_match(node, IfStmtNode)) { IfStmtNode *stmt = dynamic_cast<IfStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "expr", stmt->expr); add_key(hash, "true_stmt", stmt->true_stmt); add_key(hash, "false_stmt", stmt->false_stmt); ret = bless(aTHX_ hash, "Compiler::Parser::Node::IfStmt"); } else if (TYPE_match(node, ElseStmtNode)) { ElseStmtNode *stmt = dynamic_cast<ElseStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "stmt", stmt->stmt); ret = bless(aTHX_ hash, "Compiler::Parser::Node::ElseStmt"); } else if (TYPE_match(node, DoStmtNode)) { DoStmtNode *stmt = dynamic_cast<DoStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "stmt", stmt->stmt); ret = bless(aTHX_ hash, "Compiler::Parser::Node::DoStmt"); } else if (TYPE_match(node, ForStmtNode)) { ForStmtNode *stmt = dynamic_cast<ForStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "init", stmt->init); add_key(hash, "cond", stmt->cond); add_key(hash, "progress", stmt->progress); add_key(hash, "true_stmt", stmt->true_stmt); ret = bless(aTHX_ hash, "Compiler::Parser::Node::ForStmt"); } else if (TYPE_match(node, ForeachStmtNode)) { ForeachStmtNode *stmt = dynamic_cast<ForeachStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "itr", stmt->itr); add_key(hash, "cond", stmt->cond); add_key(hash, "true_stmt", stmt->true_stmt); ret = bless(aTHX_ hash, "Compiler::Parser::Node::ForeachStmt"); } else if (TYPE_match(node, WhileStmtNode)) { WhileStmtNode *stmt = dynamic_cast<WhileStmtNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, stmt->tk); add_key(hash, "next", stmt->next); add_key(hash, "true_stmt", stmt->true_stmt); add_key(hash, "expr", stmt->expr); ret = bless(aTHX_ hash, "Compiler::Parser::Node::WhileStmt"); } else if (TYPE_match(node, ModuleNode)) { ModuleNode *mod = dynamic_cast<ModuleNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, mod->tk); add_key(hash, "next", mod->next); add_key(hash, "args", mod->args); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Module"); } else if (TYPE_match(node, PackageNode)) { PackageNode *pkg = dynamic_cast<PackageNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, pkg->tk); add_key(hash, "next", pkg->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Package"); } else if (TYPE_match(node, RegPrefixNode)) { RegPrefixNode *reg = dynamic_cast<RegPrefixNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, reg->tk); add_key(hash, "next", reg->next); add_key(hash, "option", reg->option); add_key(hash, "expr", reg->exp); ret = bless(aTHX_ hash, "Compiler::Parser::Node::RegPrefix"); } else if (TYPE_match(node, RegReplaceNode)) { RegReplaceNode *reg = dynamic_cast<RegReplaceNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, reg->tk); add_key(hash, "next", reg->next); add_key(hash, "from", reg->from); add_key(hash, "to", reg->to); add_key(hash, "option", reg->option); ret = bless(aTHX_ hash, "Compiler::Parser::Node::RegReplace"); } else if (TYPE_match(node, RegexpNode)) { RegexpNode *reg = dynamic_cast<RegexpNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, reg->tk); add_key(hash, "next", reg->next); add_key(hash, "option", reg->option); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Regexp"); } else if (TYPE_match(node, LabelNode)) { LabelNode *label = dynamic_cast<LabelNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, label->tk); add_key(hash, "next", label->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Label"); } else if (TYPE_match(node, HandleNode)) { HandleNode *fh = dynamic_cast<HandleNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, fh->tk); add_key(hash, "expr", fh->expr); add_key(hash, "next", fh->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::Handle"); } else if (TYPE_match(node, HandleReadNode)) { HandleReadNode *fh = dynamic_cast<HandleReadNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, fh->tk); add_key(hash, "next", fh->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::HandleRead"); } else if (TYPE_match(node, ThreeTermOperatorNode)) { ThreeTermOperatorNode *term = dynamic_cast<ThreeTermOperatorNode *>(node); HV *hash = (HV*)new_Hash(); add_token(hash, term->tk); add_key(hash, "cond", term->cond); add_key(hash, "true_expr", term->true_expr); add_key(hash, "false_expr", term->false_expr); add_key(hash, "next", term->next); ret = bless(aTHX_ hash, "Compiler::Parser::Node::ThreeTermOperator"); } else { assert(0 && "node type is not found"); } return ret; }
int main(int argc, char **argv) { char *file_input = NULL; int c; Node node; Nodes nodes; struct Summary summary; memset(&summary, 0, sizeof(struct Summary)); // options while ((c = getopt(argc, argv, "i:")) != -1) { switch (c) { case 'i': file_input = optarg; break; default: break; } } if (!file_input) { printf("Usage: ./build_tree -i inputs.txt\n"); exit(EXIT_SUCCESS); } // read input file std::ifstream fin(file_input); if (!fin.is_open()) { std::cerr << "open file failure: " << file_input << std::endl; exit(EXIT_FAILURE); } while (!fin.eof()) { std::string uid; std::string balance; if (!std::getline(fin, uid, '\t') || !std::getline(fin, balance, '\n')) { break; } make_user_node(uid.c_str(), atoll(balance.c_str()), &node); nodes.push_back(node); summary.sum += node.sum; } fin.close(); summary.user_count = nodes.size(); // nodes at level 0 should be sorted std::sort(nodes.begin(), nodes.end()); int idx = 0; Nodes parents; parents.reserve(nodes.size()%2 + 1); while (nodes.size() > 1) { if (nodes.size() % 2 == 1) { summary.padding_sum += nodes[nodes.size()-1].sum; nodes.push_back(nodes[nodes.size()-1]); } for (Nodes::iterator it = nodes.begin(); it != nodes.end(); it++) { std::cout << idx++ << "\t" << summary.level << "\t" << it->sum << "\t"; dump_hex(it->hash, 8); std::cout << std::endl; } parents.resize(0); build_parent_nodes(&nodes, &parents); nodes = parents; summary.level++; } std::cout << idx++ << "\t" << summary.level << "\t" << nodes[0].sum << "\t"; dump_hex(nodes[0].hash, 8); std::cout << std::endl; std::cout << "summary:\t" << summary.user_count << "\t" << summary.sum << "\t" << summary.padding_sum << "\t" << summary.level << std::endl; return 0; }
Net_NodeID getUnusedNodeId() { if(nodes.size() == 0) return 2; return nodes.rbegin()->first + 1; }