void SubgraphPlanarizer::CrossingStructure::restore(PlanRep &PG, int cc) { //PG.initCC(cc); Array<node> id2Node(0,m_numCrossings-1,0); SListPure<edge> edges; PG.allEdges(edges); for(SListConstIterator<edge> itE = edges.begin(); itE.valid(); ++itE) { edge ePG = *itE; edge e = PG.original(ePG); SListConstIterator<int> it; for(it = m_crossings[e].begin(); it.valid(); ++it) { node x = id2Node[*it]; edge ePGOld = ePG; ePG = PG.split(ePG); node y = ePG->source(); if(x == 0) { id2Node[*it] = y; } else { PG.moveTarget(ePGOld, x); PG.moveSource(ePG, x); PG.delNode(y); } } } }
// // D e s t r u c t o r // DinoUmlToGraphConverter::~DinoUmlToGraphConverter() { // Delete diagram graphs in UMLGraph format SListConstIterator<UMLGraph*> umlgIt; for (umlgIt = m_diagramGraphsInUMLGraphFormat.begin(); umlgIt.valid(); ++umlgIt){ const Graph & associatedGraph = (const Graph &)(**umlgIt); delete *umlgIt; delete &associatedGraph; } m_diagramGraphsInUMLGraphFormat.clear(); // Delete diagram graphs SListConstIterator<DinoUmlDiagramGraph*> dgIt; for (dgIt = m_diagramGraphs.begin(); dgIt.valid(); ++dgIt){ delete *dgIt; } m_diagramGraphs.clear(); // Destroy model graph delete m_modelGraph; // Destroy parser delete m_xmlParser; }
void SpringEmbedderFRExact::ArrayGraph::initCC(int i) { System::alignedMemoryFree(m_orig); System::alignedMemoryFree(m_src); System::alignedMemoryFree(m_tgt); System::alignedMemoryFree(m_x); System::alignedMemoryFree(m_y); System::alignedMemoryFree(m_nodeWeight); m_numNodes = m_nodesInCC[i].size(); m_numEdges = 0; m_orig = (node *) System::alignedMemoryAlloc16(m_numNodes*sizeof(node)); m_x = (double *) System::alignedMemoryAlloc16(m_numNodes*sizeof(double)); m_y = (double *) System::alignedMemoryAlloc16(m_numNodes*sizeof(double)); m_nodeWeight = (double *) System::alignedMemoryAlloc16(m_numNodes*sizeof(double)); int j = 0; SListConstIterator<node> it; for(it = m_nodesInCC[i].begin(); it.valid(); ++it, ++j) { node v = *it; m_orig[j] = v; m_mapNode[v] = j; m_x[j] = m_ga->x(v); m_y[j] = m_ga->y(v); if (m_useNodeWeight) m_nodeWeight[j] = (m_ga->attributes() & GraphAttributes::nodeWeight) ? m_ga->weight(v) : 1.0; else m_nodeWeight[j] = 1.0; adjEntry adj; forall_adj(adj,v) if(v->index() < adj->twinNode()->index()) ++m_numEdges; } m_src = (int *) System::alignedMemoryAlloc16(m_numEdges*sizeof(int)); m_tgt = (int *) System::alignedMemoryAlloc16(m_numEdges*sizeof(int)); j = 0; int srcId; for(it = m_nodesInCC[i].begin(), srcId = 0; it.valid(); ++it, ++srcId) { node v = *it; adjEntry adj; forall_adj(adj,v) { node w = adj->twinNode(); if(v->index() < w->index()) { m_src[j] = srcId; m_tgt[j] = m_mapNode[w]; ++j; } } }
void FaceSinkGraph::doInit() { const ConstCombinatorialEmbedding &E = *m_pE; NodeArray<node> sinkSwitch(E,nullptr); // corresponding node in F (if any) NodeArray<bool> isSinkSwitch(E,true); NodeArray<int> visited(E,-1); int faceNo = -1; for(face f : E.faces) { faceNo++; node faceNode = newNode(); m_originalFace[faceNode] = f; SListPure<node> nodesInF; adjEntry adj1 = f->firstAdj(), adj = adj1; do { node v = adj->theNode(); // if the graph is not biconnected, then node v can visited more than once if (visited[v] != faceNo) { nodesInF.pushBack(v); visited[v] = faceNo; } if (v == m_source) m_containsSource[faceNode] = true; isSinkSwitch[adj->theEdge()->source()] = false; adj = adj->twin()->cyclicPred(); } while (adj != adj1); SListConstIterator<node> it; for(it = nodesInF.begin(); it.valid(); ++it) { node v = *it; if(isSinkSwitch[v]) { if (sinkSwitch[v] == nullptr) { node vF = newNode(); m_originalNode[vF] = v; sinkSwitch[v] = vF; } newEdge(faceNode,sinkSwitch[v]); } } for(it = nodesInF.begin(); it.valid(); ++it) isSinkSwitch[*it] = true; } }
// // o u t p u t O p e r a t o r for DinoUmlDiagramGraph // ostream &operator<<(ostream &os, const DinoUmlDiagramGraph &diagramGraph) { // Header with diagram name and type os << "\n--- " << diagramGraph.getDiagramTypeString() << " \"" << diagramGraph.m_diagramName << "\" ---\n" << endl; // Nodes // Initialize iterators SListConstIterator<NodeElement*> nodeIt = diagramGraph.m_containedNodes.begin(); SListConstIterator<double> xIt = diagramGraph.m_x.begin(); SListConstIterator<double> yIt = diagramGraph.m_y.begin(); SListConstIterator<double> wIt = diagramGraph.m_w.begin(); SListConstIterator<double> hIt = diagramGraph.m_h.begin(); // Traverse lists while (nodeIt.valid()){ os << "Node " << diagramGraph.m_modelGraph.getNodeLabel(*nodeIt) << " with geometry (" << *xIt << ", " << *yIt << ", " << *wIt << ", " << *hIt << ")." << endl; ++nodeIt; ++xIt; ++yIt; ++wIt; ++hIt; } // while // Edges // Traverse lists SListConstIterator<EdgeElement*> edgeIt = diagramGraph.m_containedEdges.begin(); for (edgeIt = diagramGraph.m_containedEdges.begin(); edgeIt.valid(); ++edgeIt) { os << "Edge between " << diagramGraph.m_modelGraph.getNodeLabel((*edgeIt)->source()) << " and " << diagramGraph.m_modelGraph.getNodeLabel((*edgeIt)->target()) << endl; } return os; } // <<
void UMLGraph::undoGenMergers() { SListConstIterator<edge> it; for(it = m_mergeEdges.begin(); it.valid(); ++it) { edge eMerge = *it; node u = eMerge->source(); const DPolyline &common = bends(eMerge); adjEntry adj, adjSucc; for(adj = u->firstAdj(); adj != nullptr; adj = adjSucc) { adjSucc = adj->succ(); edge e = adj->theEdge(); if(e->target() != u) continue; DPolyline &dpl = bends(e); dpl.pushBack(DPoint(x(u),y(u))); ListConstIterator<DPoint> itDp; for(itDp = common.begin(); itDp.valid(); ++itDp) dpl.pushBack(*itDp); m_pG->moveTarget(e,eMerge->target()); } m_pG->delNode(u); } m_mergeEdges.clear(); }
// computes coordinates pos of horizontal (resp. vertical) segments by // computing longest paths in the constraint graph D void LongestPathCompaction::computeCoords( const CompactionConstraintGraph<int> &D, NodeArray<int> &pos) { const Graph &Gd = D.getGraph(); // compute a first ranking with usual longest paths applyLongestPaths(D,pos); if (m_tighten == true) { // improve cost of ranking by moving pseudo-components moveComponents(D,pos); // find node with minimal position SListConstIterator<node> it = m_pseudoSources.begin(); int min = pos[*it]; for(++it; it.valid(); ++it) { if (pos[*it] < min) min = pos[*it]; } // move all nodes such that node with minimum position has position 0 for(node v : Gd.nodes) pos[v] -= min; } // free resources m_pseudoSources.clear(); m_component.init(); }
void FaceSinkGraph::doInit() { const ConstCombinatorialEmbedding &E = *m_pE; NodeArray<node> sinkSwitch(E,0); // corresponding node in F (if any) NodeArray<bool> isSinkSwitch(E,true); face f; forall_faces(f,E) { node faceNode = newNode(); m_originalFace[faceNode] = f; SListPure<node> nodesInF; adjEntry adj1 = f->firstAdj(), adj = adj1; do { node v = adj->theNode(); nodesInF.pushBack(v); if (v == m_source) m_containsSource[faceNode] = true; isSinkSwitch[adj->theEdge()->source()] = false; adj = adj->twin()->cyclicPred(); } while (adj != adj1); SListConstIterator<node> it; for(it = nodesInF.begin(); it.valid(); ++it) { node v = *it; if(isSinkSwitch[v]) { if (sinkSwitch[v] == 0) { node vF = newNode(); m_originalNode[vF] = v; sinkSwitch[v] = vF; } newEdge(faceNode,sinkSwitch[v]); } } for(it = nodesInF.begin(); it.valid(); ++it) isSinkSwitch[*it] = true; }
void BCandSPQRtrees::insertEdgePath (edge eOrig, const SList<adjEntry>& crossedEdges) { SList<edge> ti; SList<node> tj; SListConstIterator<adjEntry> kt; for (kt=crossedEdges.begin(); kt.valid(); ++kt) { ti.pushBack((*kt)->theEdge()); tj.pushBack((*kt)->theEdge()->target()); } m_pPG->insertEdgePath(eOrig,crossedEdges); Graph::EdgeType typeOfEOrig = m_forbidCrossingGens ? m_pPG->typeOrig(eOrig) : Graph::association; int costOfEOrig = m_costOrig ? eOrig ? (*m_costOrig)[eOrig] : 0 : 1; node v = m_pPG->copy(eOrig->source()); SListConstIterator<edge> it = ti.begin(); SListConstIterator<node> jt = tj.begin(); for (kt=crossedEdges.begin(); it.valid(); ++it, ++jt, ++kt) { edge e = *it; node u = e->target(); adjEntry a; for (a=u->firstAdj(); a->theEdge()->target()!=*jt; a=a->succ()); edge f = a->theEdge(); m_dynamicSPQRForest.updateInsertedNode(e,f); e = m_dynamicSPQRForest.rep(e); f = m_dynamicSPQRForest.rep(f); m_typeOf[f] = m_typeOf[e]; m_cost[f] = m_cost[e]; for (a=u->firstAdj(); a->theEdge()->source()!=v; a=a->succ()); f = a->theEdge(); m_dynamicSPQRForest.updateInsertedEdge(f); f = m_dynamicSPQRForest.rep(f); m_typeOf[f] = typeOfEOrig; m_cost[f] = costOfEOrig; v = u; } node u = m_pPG->copy(eOrig->target()); adjEntry a; for (a=v->firstAdj(); a->theEdge()->target()!=u; a=a->succ()); edge f = a->theEdge(); m_dynamicSPQRForest.updateInsertedEdge(f); f = m_dynamicSPQRForest.rep(f); m_typeOf[f] = typeOfEOrig; m_cost[f] = costOfEOrig; }
KuratowskiConstraint::KuratowskiConstraint(ABA_MASTER *master, int nEdges, SListPure<nodePair> &ks) : ABA_CONSTRAINT(master, 0, ABA_CSENSE::Less, nEdges-1, true, false, true) { SListConstIterator<nodePair> it; for (it = ks.begin(); it.valid(); ++it) { m_subdivision.pushBack(*it); } }
void UpwardPlanarSubgraphSimple::call(const Graph &G, List<edge> &delEdges) { delEdges.clear(); // We construct an auxiliary graph H which represents the current upward // planar subgraph. Graph H; NodeArray<node> mapToH(G); for(node v : G.nodes) mapToH[v] = H.newNode(); // We currently support only single-source acyclic digraphs ... node s; hasSingleSource(G,s); OGDF_ASSERT(s != 0); OGDF_ASSERT(isAcyclic(G)); // We start with a spanning tree of G rooted at the single source. NodeArray<bool> visitedNode(G,false); SListPure<edge> treeEdges; dfsBuildSpanningTree(s,treeEdges,visitedNode); // Mark all edges in the spanning tree so they can be skipped in the // loop below and add (copies of) them to H. EdgeArray<bool> visitedEdge(G,false); SListConstIterator<edge> it; for(it = treeEdges.begin(); it.valid(); ++it) { edge eG = *it; visitedEdge[eG] = true; H.newEdge(mapToH[eG->source()],mapToH[eG->target()]); } // Add subsequently the remaining edges to H and test if the resulting // graph is still upward planar. If not, remove the edge again from H // and add it to delEdges. for(edge eG : G.edges) { if(visitedEdge[eG] == true) continue; edge eH = H.newEdge(mapToH[eG->source()],mapToH[eG->target()]); if (UpwardPlanarity::isUpwardPlanar_singleSource(H) == false) { H.delEdge(eH); delEdges.pushBack(eG); } } }
// builds expansion graph of i-th biconnected component of the original graph void ExpansionGraph::init(int i) { OGDF_ASSERT(0 <= i); OGDF_ASSERT(i <= m_component.high()); // remove previous component for(node v : nodes) { node vOrig = m_vOrig[v]; if (vOrig) m_vCopy[vOrig] = nullptr; } clear(); // create new component SListConstIterator<edge> it; for(it = m_component[i].begin(); it.valid(); ++it) { edge e = *it; edge eCopy = newEdge(getCopy(e->source()),getCopy(e->target())); m_eOrig[eCopy] = e; } // expand vertices for(node v : nodes) { if (original(v) && v->indeg() >= 1 && v->outdeg() >= 1) { node vPrime = newNode(); m_vRep[vPrime] = m_vOrig[v]; SListPure<edge> edges; v->outEdges(edges); SListConstIterator<edge> it; for(it = edges.begin(); it.valid(); ++it) moveSource(*it,vPrime); newEdge(v,vPrime); } } }
// test if graphAcyclicTest plus edges in tmpAugmented is acyclic // removes added edges again bool UpwardPlanarSubgraphSimple::checkAcyclic( GraphCopySimple &graphAcyclicTest, SList<Tuple2<node,node> > &tmpAugmented) { SListPure<edge> added; SListConstIterator<Tuple2<node,node> > it; for(it = tmpAugmented.begin(); it.valid(); ++it) added.pushBack(graphAcyclicTest.newEdge( graphAcyclicTest.copy((*it).x1()), graphAcyclicTest.copy((*it).x2()))); bool acyclic = isAcyclic(graphAcyclicTest); SListConstIterator<edge> itE; for(itE = added.begin(); itE.valid(); ++itE) graphAcyclicTest.delEdge(*itE); return acyclic; }
// Transforms KuratowskiWrapper in KuratowskiSubdivision void BoyerMyrvold::transform( const KuratowskiWrapper& source, KuratowskiSubdivision& target, NodeArray<int>& count, EdgeArray<int>& countEdge) { // init linear counting structure node kn[6]; int p = 0; SListConstIterator<edge> itE; for (itE = source.edgeList.begin(); itE.valid(); ++itE) { const edge& e(*itE); OGDF_ASSERT(!countEdge[e]); countEdge[e] = 1; if (++count[e->source()] == 3) kn[p++] = e->source(); if (++count[e->target()] == 3) kn[p++] = e->target(); } // transform edgelist of KuratowskiSubdivision to KuratowskiWrapper OGDF_ASSERT(p==5 || p==6); node n; edge e,f,h; List<edge> L; if (p==5) { // K5 kn[5] = 0; target.init(10); for (int k = 0; k<5; k++) { forall_adj_edges(e,kn[k]) { if (!countEdge[e]) continue; n = kn[k]; f = e; // traverse degree-2-path while (count[n = f->opposite(n)] == 2) { L.pushBack(f); forall_adj_edges(h,n) { if (countEdge[h] && h != f) { f = h; break; } } } L.pushBack(f); int i = 0; while (kn[i] != n) i++; if (i > k) { if (k==0) i--; else if (k==1) i+=2; else i += k+2; target[i].conc(L); } else L.clear(); } } } else { // k33
void OptimalRanking::call (const Graph& G, NodeArray<int> &rank) { List<edge> R; m_subgraph.get().call(G,R); EdgeArray<bool> reversed(G,false); for (edge e : R) reversed[e] = true; R.clear(); EdgeArray<int> length(G,1); if(m_separateMultiEdges) { SListPure<edge> edges; EdgeArray<int> minIndex(G), maxIndex(G); parallelFreeSortUndirected(G, edges, minIndex, maxIndex); SListConstIterator<edge> it = edges.begin(); if(it.valid()) { int prevSrc = minIndex[*it]; int prevTgt = maxIndex[*it]; for(it = it.succ(); it.valid(); ++it) { edge e = *it; if (minIndex[e] == prevSrc && maxIndex[e] == prevTgt) length[e] = 2; else { prevSrc = minIndex[e]; prevTgt = maxIndex[e]; } } } } EdgeArray<int> cost(G,1); doCall(G, rank, reversed, length, cost); }
bool isParallelFree(const Graph &G) { if (G.numberOfEdges() <= 1) return true; SListPure<edge> edges; parallelFreeSort(G,edges); SListConstIterator<edge> it = edges.begin(); edge ePrev = *it, e; for(it = ++it; it.valid(); ++it, ePrev = e) { e = *it; if (ePrev->source() == e->source() && ePrev->target() == e->target()) return false; } return true; }
bool isParallelFreeUndirected(const Graph &G) { if (G.numberOfEdges() <= 1) return true; SListPure<edge> edges; EdgeArray<int> minIndex(G), maxIndex(G); parallelFreeSortUndirected(G,edges,minIndex,maxIndex); SListConstIterator<edge> it = edges.begin(); edge ePrev = *it, e; for(it = ++it; it.valid(); ++it, ePrev = e) { e = *it; if (minIndex[ePrev] == minIndex[e] && maxIndex[ePrev] == maxIndex[e]) return false; } return true; }
int numParallelEdges(const Graph &G) { if (G.numberOfEdges() <= 1) return 0; SListPure<edge> edges; parallelFreeSort(G,edges); int num = 0; SListConstIterator<edge> it = edges.begin(); edge ePrev = *it, e; for(it = ++it; it.valid(); ++it, ePrev = e) { e = *it; if (ePrev->source() == e->source() && ePrev->target() == e->target()) ++num; } return num; }
// // p r i n t D i a g r a m s I n U M L G r a p h F o r m a t // void DinoUmlToGraphConverter::printDiagramsInUMLGraphFormat(ofstream &os) { // Traverse diagrams SListConstIterator<UMLGraph*> diagramIt; for (diagramIt = m_diagramGraphsInUMLGraphFormat.begin(); diagramIt.valid(); ++diagramIt) { // Get underlying graphs const Graph &G = (const Graph &)**diagramIt; const GraphAttributes &AG = **diagramIt; // Nodes os << "Classes:" << endl; NodeElement *v; forall_nodes(v,G) { os << "\t" << AG.labelNode(v); os << " with geometry (" << AG.x(v) << ", " << AG.y(v) << ", " << AG.width(v) << ", " << AG.height(v) << ")"; os << endl; } // Edges EdgeElement *e; os << "Relations:" << endl; forall_edges(e,G) { os << "\t"; if (AG.type(e) == Graph::association) os << "Association between "; if (AG.type(e) == Graph::generalization) os << "Generalization between "; os << AG.labelNode(e->source()) << " and " << AG.labelNode(e->target()) << endl; }
// original variant of st-augmentation // Inserts also new nodes representing faces into G. void FaceSinkGraph::stAugmentation( node h, // node corresponding to external face Graph &G, // original graph (not const) SList<node> &augmentedNodes, // list of augmented nodes SList<edge> &augmentedEdges) // list of augmented edges { SListPure<node> roots; for(node v : nodes) { node vOrig = m_originalNode[v]; if (vOrig != nullptr && vOrig->indeg() > 0 && vOrig->outdeg() > 0) roots.pushBack(v); } node vh = dfsStAugmentation(h,nullptr,G,augmentedNodes,augmentedEdges); SListConstIterator<node> it; for(it = roots.begin(); it.valid(); ++it) dfsStAugmentation(*it,nullptr,G,augmentedNodes,augmentedEdges); augmentedEdges.pushBack(G.newEdge(m_source,vh)); }
// builds expansion graph of graph G // for debugging purposes only void ExpansionGraph::init(const Graph &G) { // remove previous component for(node v : nodes) { node vOrig = m_vOrig[v]; if (vOrig) m_vCopy[vOrig] = nullptr; } clear(); // create new component for(node v : G.nodes) getCopy(v); for(edge e : G.edges) { edge eCopy = newEdge(getCopy(e->source()),getCopy(e->target())); m_eOrig[eCopy] = e; } // expand vertices for(node v : nodes) { if (original(v) && v->indeg() >= 1 && v->outdeg() >= 1) { node vPrime = newNode(); SListPure<edge> edges; v->outEdges(edges); SListConstIterator<edge> it; for(it = edges.begin(); it.valid(); ++it) moveSource(*it,vPrime); newEdge(v,vPrime); } } }
// compute grouping for sons of nodes on level i void RadialTreeLayout::ComputeGrouping(int i) { SListConstIterator<node> it; for(it = m_nodes[i].begin(); it.valid(); ++it) { node v = *it; node p = m_parent[v]; Grouping &grouping = m_grouping[v]; ListIterator<Group> currentGroup; adjEntry adj = v->firstAdj(); adjEntry adjStop; if(p != nullptr) { while(adj->twinNode() != p) adj = adj->cyclicSucc(); adjStop = adj; adj = adj->cyclicSucc(); } else { adjStop = adj; } do { node u = adj->twinNode(); if(!currentGroup.valid() || (*currentGroup).isSameType(u) == false) { currentGroup = grouping.pushBack(Group(this,u)); } else { (*currentGroup).append(u); } adj = adj->cyclicSucc(); } while(adj != adjStop); } }
// separate pertinent nodes in the lists of possible different minor-types void FindKuratowskis::splitInMinorTypes( const SListPure<adjEntry>& externalFacePath, int marker) { // mark nodes, which are before stopX or behind stopY in CCW-traversal and add // all extern nodes strictly between stopX and stopY to list // externE for minor E (pertinent nodes are considered because of the // position of z left or right of w) SListConstIterator<adjEntry> itExtern; SListIterator<WInfo> it = k.wNodes.begin(); node x; bool between = false; SListPure<WInfo*> infoList; SListIterator<WInfo*> itList; ExternE externEdummy; // compute list of externE nodes for (itExtern=externalFacePath.begin(); itExtern.valid(); ++itExtern) { x = (*itExtern)->theNode(); if (x==k.stopX || x==k.stopY) { between = (between==false) ? true : false; } else { if (!between) { m_wasHere[x]=marker; } else { if (pBM->externallyActive(x,k.V_DFI)) { externEdummy.theNode = x; // check minor type B and save extern linkage if (it.valid() && (*it).w==x && !m_pertinentRoots[x].empty() && m_lowPoint[m_nodeFromDFI[-m_dfi[m_pertinentRoots[x].back()]]] < k.V_DFI) { WInfo& info(*it); // checking minor type B info.minorType |= WInfo::B; // mark extern node for later extraction externEdummy.startnodes.pushBack(0); // create externE-list k.externE.pushBack(externEdummy); // save extern linkage info.externEStart = k.externE.rbegin(); info.externEEnd = k.externE.rbegin(); } else { // create externE-list externEdummy.startnodes.clear(); k.externE.pushBack(externEdummy); } // save for each wNode the first externally active successor // on the external face for (itList = infoList.begin(); itList.valid(); ++itList) (*itList)->firstExternEAfterW = x; infoList.clear(); } // get appropriate WInfo if (it.valid() && (*it).w==x) { infoList.pushBack(&(*it)); ++it; } } } } // divide wNodes in different minor types // avoids multiple computation of the externE range itExtern = externalFacePath.begin(); SListIterator<ExternE> itExternE = k.externE.begin(); WInfo* oldInfo = NULL; for (it=k.wNodes.begin(); it.valid(); ++it) { WInfo& info(*it); // checking minor type A if (k.RReal!=k.V) info.minorType |= WInfo::A; // if a XYPath exists if (info.highestXYPath!=NULL) { if (m_wasHere[info.highestXYPath->front()->theNode()]==marker) info.pxAboveStopX = true; if (m_wasHere[info.highestXYPath->back()->theNode()]==marker) info.pyAboveStopY = true; // checking minor type C if (info.pxAboveStopX || info.pyAboveStopY) info.minorType |= WInfo::C; // checking minor type D if (info.zPath!=NULL) info.minorType |= WInfo::D; // checking minor type E if (!k.externE.empty()) { node t; // compute valid range of externE-nodes in linear time if (oldInfo!=NULL && info.highestXYPath==oldInfo->highestXYPath) { // found the same highestXYPath as before info.externEStart = oldInfo->externEStart; info.externEEnd = oldInfo->externEEnd; if (oldInfo->minorType & WInfo::E) info.minorType |= WInfo::E; } else { // compute range of a new highestXYPath node px; if (info.pxAboveStopX) px = k.stopX; else px = info.highestXYPath->front()->theNode(); node py; if (info.pyAboveStopY) py = k.stopY; else py = info.highestXYPath->back()->theNode(); while ((*itExtern)->theNode() != px) ++itExtern; t = (*(++itExtern))->theNode(); node start = NULL; node end = NULL; while (t != py) { if (pBM->externallyActive(t,k.V_DFI)) { if (start==NULL) start = t; end = t; } t = (*(++itExtern))->theNode(); } if (start != NULL) { while ((*itExternE).theNode != start) ++itExternE; info.externEStart = itExternE; // mark node to extract external subgraph later (*itExternE).startnodes.pushBack(0); node temp = start; while (temp != end) { temp = (*++itExternE).theNode; // mark node to extract external subgraph later (*itExternE).startnodes.pushBack(0); } info.externEEnd = itExternE; info.minorType |= WInfo::E; } oldInfo = &info; } } } /* // use this to find special kuratowski-structures if ((info.minorType & (WInfo::A|WInfo::B|WInfo::C|WInfo::D|WInfo::E)) == (WInfo::A|WInfo::B|WInfo::C|WInfo::D|WInfo::E)) { char t; cin >> t; } */ } // extract the externalSubgraph of all saved externally active nodes // exclude the already extracted minor b-types #ifdef OGDF_DEBUG int visited = m_nodeMarker+1; #endif for (itExternE=k.externE.begin(); itExternE.valid(); ++itExternE) { if ((*itExternE).startnodes.empty()) continue; ExternE& externE(*itExternE); externE.startnodes.clear(); if (m_bundles) { OGDF_ASSERT(m_wasHere[externE.theNode] < visited); extractExternalSubgraphBundles(externE.theNode,k.V_DFI, k.externalSubgraph,++m_nodeMarker); } else { extractExternalSubgraph(externE.theNode,k.V_DFI,externE.startnodes, externE.endnodes); SListIterator<int> itInt; SListPure<edge> dummy; for (itInt = externE.startnodes.begin(); itInt.valid(); ++itInt) externE.externalPaths.pushBack(dummy); } } }
// remove "arcs" from visibArcs which we already have in the constraint graph // (as basic arcs) void CompactionConstraintGraphBase::removeRedundantVisibArcs( SListPure<Tuple2<node,node> > &visibArcs) { // bucket sort list of all edges SListPure<edge> all; allEdges(all); parallelFreeSort(*this,all); // bucket sort visibArcs BucketFirstIndex bucketSrc; visibArcs.bucketSort(0,maxNodeIndex(),bucketSrc); BucketSecondIndex bucketTgt; visibArcs.bucketSort(0,maxNodeIndex(),bucketTgt); // now, in both lists, arcs are sorted by increasing target index, // and arcs with the same target index by increasing source index. SListConstIterator<edge> itAll = all.begin(); SListIterator<Tuple2<node,node> > it, itNext, itPrev; // for each arc in visibArcs, we check if it is also contained in list all for(it = visibArcs.begin(); it.valid(); it = itNext) { // required since we delete from the list we traverse itNext = it.succ(); int i = (*it).x1()->index(); int j = (*it).x2()->index(); // skip all arcs with smaller target index while(itAll.valid() && (*itAll)->target()->index() < j) ++itAll; // no more arcs => no more duplicates, so return if (!itAll.valid()) break; // if target index is j, we also skip all arcs with target index i // and source index smaller than i while(itAll.valid() && (*itAll)->target()->index() == j && (*itAll)->source()->index() < i) ++itAll; // no more arcs => no more duplicates, so return if (!itAll.valid()) break; // if (i,j) is already present, we delete it from visibArcs if ((*itAll)->source()->index() == i && (*itAll)->target()->index() == j) { //visibArcs.del(it); if (itPrev.valid()) visibArcs.delSucc(itPrev); else visibArcs.popFront(); } else itPrev = it; }//for visibArcs //****************************CHECK for //special treatment for cage visibility //two cases: input node cage: just compare arbitrary node // merger cage: check first if there are mergers itPrev = nullptr; for(it = visibArcs.begin(); it.valid(); it = itNext) { itNext = it.succ(); OGDF_ASSERT(!m_path[(*it).x1()].empty()); OGDF_ASSERT(!m_path[(*it).x1()].empty()); node boundRepresentant1 = m_path[(*it).x1()].front(); node boundRepresentant2 = m_path[(*it).x2()].front(); node en1 = m_pPR->expandedNode(boundRepresentant1); node en2 = m_pPR->expandedNode(boundRepresentant2); //do not allow visibility constraints in fixed cages //due to non-planarity with middle position constraints if ( ( en1 && en2 ) && ( en1 == en2) ) { if (itPrev.valid()) visibArcs.delSucc(itPrev); else visibArcs.popFront(); } else { //check if its a genmergerspanning vis arc, merge cases later node firstn = nullptr, secondn = nullptr; for (node n : m_path[(*it).x1()]) { node en = m_pPR->expandedNode(n); if (!en) continue; if (!(m_pPR->typeOf(n) == Graph::generalizationExpander)) continue; else { firstn = en; break; } }//for for (node n : m_path[(*it).x2()]) { node en = m_pPR->expandedNode(n); if (!en) continue; if (!(m_pPR->typeOf(n) == Graph::generalizationExpander)) continue; else { secondn = en; break; } }//for if ((firstn && secondn) && (firstn == secondn)) { if (itPrev.valid()) visibArcs.delSucc(itPrev); else visibArcs.popFront(); } else itPrev = it; } }//for visibArcs }
void DynamicSPQRForest::createSPQR (node vB) const { Graph GC; NodeArray<node> origNode(GC,0); EdgeArray<edge> origEdge(GC,0); SListConstIterator<edge> iH; for (iH=m_bNode_hEdges[vB].begin(); iH.valid(); ++iH) m_htogc[(*iH)->source()] = m_htogc[(*iH)->target()] = 0; for (iH=m_bNode_hEdges[vB].begin(); iH.valid(); ++iH) { edge eH = *iH; node sH = eH->source(); node tH = eH->target(); node& sGC = m_htogc[sH]; node& tGC = m_htogc[tH]; if (!sGC) { sGC = GC.newNode(); origNode[sGC] = sH; } if (!tGC) { tGC = GC.newNode(); origNode[tGC] = tH; } origEdge[GC.newEdge(sGC,tGC)] = eH; } TricComp tricComp(GC); const GraphCopySimple& GCC = *tricComp.m_pGC; EdgeArray<node> partnerNode(GCC,0); EdgeArray<edge> partnerEdge(GCC,0); for (int i=0; i<tricComp.m_numComp; ++i) { const TricComp::CompStruct &C = tricComp.m_component[i]; if (C.m_edges.empty()) continue; node vT = m_T.newNode(); m_tNode_owner[vT] = vT; switch(C.m_type) { case TricComp::bond: m_tNode_type[vT] = PComp; m_bNode_numP[vB]++; break; case TricComp::polygon: m_tNode_type[vT] = SComp; m_bNode_numS[vB]++; break; case TricComp::triconnected: m_tNode_type[vT] = RComp; m_bNode_numR[vB]++; break; } for (ListConstIterator<edge> iGCC=C.m_edges.begin(); iGCC.valid(); ++iGCC) { edge eGCC = *iGCC; edge eH = GCC.original(eGCC); if (eH) eH = origEdge[eH]; else { node uH = origNode[GCC.original(eGCC->source())]; node vH = origNode[GCC.original(eGCC->target())]; eH = m_H.newEdge(uH,vH); if (!partnerNode[eGCC]) { partnerNode[eGCC] = vT; partnerEdge[eGCC] = eH; } else { m_T.newEdge(partnerNode[eGCC],vT); m_hEdge_twinEdge[eH] = partnerEdge[eGCC]; m_hEdge_twinEdge[partnerEdge[eGCC]] = eH; } } m_hEdge_position[eH] = m_tNode_hEdges[vT].pushBack(eH); m_hEdge_tNode[eH] = vT; } } m_bNode_SPQR[vB] = m_hEdge_tNode[origEdge[GC.firstEdge()]]; m_tNode_hRefEdge[m_bNode_SPQR[vB]] = 0; SList<node> lT; lT.pushBack(m_bNode_SPQR[vB]); lT.pushBack(0); while (!lT.empty()) { node vT = lT.popFrontRet(); node wT = lT.popFrontRet(); for (ListConstIterator<edge> iH=m_tNode_hEdges[vT].begin(); iH.valid(); ++iH) { edge eH = *iH; edge fH = m_hEdge_twinEdge[eH]; if (!fH) continue; node uT = m_hEdge_tNode[fH]; if (uT==wT) m_tNode_hRefEdge[vT] = eH; else { lT.pushBack(uT); lT.pushBack(vT); } } } }
void RadialTreeLayout::ComputeAngles(const Graph &G) { m_angle.init(G); m_wedge.init(G); m_radius.init(m_numLevels); m_grouping.init(G); Queue<node> Q; NodeArray<double> restWeight(G); Q.append(m_root); m_angle[m_root] = 0; m_wedge[m_root] = 2*Math::pi; m_radius[0] = 0; //Grouping grouping; //double D, W; NodeArray<double> D(G), W(G); int iProcessed = 0; while(!Q.empty()) { node v = Q.pop(); node p = m_parent[v]; // nothing to do if v is a leaf if(p != nullptr && v->degree() == 1) continue; int i = m_level[v]; if(i+1 > iProcessed) { m_radius[i+1] = m_radius[i] + 0.5*(m_width[i+1]+m_width[i]) + m_levelDistance; ComputeGrouping(i); SListConstIterator<node> it; for(it = m_nodes[i].begin(); it.valid(); ++it) { node w = *it; m_grouping[w].computeAdd(D[w],W[w]); double deltaL = 0.0; ListConstIterator<Group> itG; for(itG = m_grouping[w].begin(); itG.valid(); ++itG) { const Group &g = *itG; if(g.m_leafGroup) continue; double deltaLG; double weightedAdd = W[w] / g.m_sumW * g.add(); deltaLG = 2 * W[w] / m_leaves[g.leftVertex()] * g.m_leftAdd - weightedAdd; if(deltaLG > deltaL) deltaL = deltaLG; deltaLG = 2 * W[w] / m_leaves[g.rightVertex()] * g.m_rightAdd - weightedAdd; if(deltaLG > deltaL) deltaL = deltaLG; } double r = (deltaL + D[w]) / m_wedge[w]; if(r > m_radius[i+1]) m_radius[i+1] = r; } // ******** /*deltaL = (m_radius[i+1] * 2*Math::pi) - D; double offset = 0; for(itG = grouping.begin(); itG.valid(); ++itG) { const Group &g = *itG; SListConstIterator<node> itV; for(itV = g.m_nodes.begin(); itV.valid(); ++itV) { node v = *itV; double s = m_diameter[v] + m_levelDistance; if(g.m_leafGroup == false) s += m_leaves[v] / g.m_sumW * g.add() + m_leaves[v] / W * deltaL; double desiredWedge = s / m_radius[i+1]; double allowedWedge = 2 * acos(m_radius[i] / m_radius[i+1]); m_wedge[v] = min(desiredWedge,allowedWedge); m_angle[v] = offset + 0.5*desiredWedge; offset += desiredWedge; Q.append(v); } } */ //************************* /* SListConstIterator<node> it; for(it = m_nodes[i].begin(); it.valid(); ++it) { node w = *it; // compute weight of all non-leaves double weight = 0.0; for(adjEntry adjSon : w->adjEdges) { node u = adjSon->twinNode(); if(u == m_parent[w]) continue; if(u->degree() > 1) weight += m_leaves[u]; } restWeight[w] = weight; double D = (w->degree() - 1) * m_levelDistance; for(adjEntry adjSon : w->adjEdges) { node u = adjSon->twinNode(); if(u == m_parent[w]) continue; D += m_diameter[u]; } double r = D / m_wedge[w]; if(r > m_radius[i+1]) m_radius[i+1] = r; }*/ iProcessed = i+1; } double deltaL = (m_radius[i+1] * m_wedge[v]) - D[v]; double offset = m_angle[v] - 0.5*m_wedge[v]; ListConstIterator<Group> itG; for(itG = m_grouping[v].begin(); itG.valid(); ++itG) { const Group &g = *itG; SListConstIterator<node> it; for(it = g.m_nodes.begin(); it.valid(); ++it) { node u = *it; double s = m_diameter[u] + m_levelDistance; if(g.m_leafGroup == false) s += m_leaves[u] / g.m_sumW * g.add() + m_leaves[u] / W[v] * deltaL; double desiredWedge = s / m_radius[i+1]; double allowedWedge = 2 * acos(m_radius[i] / m_radius[i+1]); m_wedge[u] = min(desiredWedge,allowedWedge); m_angle[u] = offset + 0.5*desiredWedge; offset += desiredWedge; Q.append(u); } } /* double restWedge = m_wedge[v]; for(adjEntry adj : v->adjEdges) { node u = adj->twinNode(); if(u == m_parent[v]) continue; m_wedge[u] = (m_diameter[u] + m_levelDistance) / m_radius[i+1]; restWedge -= m_wedge[u]; } double offset = m_angle[v] - 0.5*m_wedge[v]; adjEntry adj = v->firstAdj(); adjEntry adjStop; if(p != 0) { while(adj->twinNode() != p) adj = adj->cyclicSucc(); adjStop = adj; adj = adj->cyclicSucc(); } else { adjStop = adj; } do { node u = adj->twinNode(); double desiredWedge; if(u->degree() == 1) { desiredWedge = m_wedge[u]; } else { desiredWedge = m_wedge[u] + m_leaves[u] / restWeight[v] * restWedge; double allowedWedge = 2 * acos(m_radius[i] / m_radius[i+1]); m_wedge[u] = min(desiredWedge,allowedWedge); } m_angle[u] = offset + 0.5*desiredWedge; offset += desiredWedge; Q.append(u); adj = adj->cyclicSucc(); } while(adj != adjStop);*/ } m_outerRadius = m_radius[m_numLevels-1] + 0.5*m_width[m_numLevels-1]; }
void LongestPathCompaction::moveComponents( const CompactionConstraintGraph<int> &D, NodeArray<int> &pos) { const Graph &Gd = D.getGraph(); // compute for each component the list of nodes contained Array<SListPure<node> > nodesInComp(1,m_pseudoSources.size()); for(node v : Gd.nodes) { if (m_component[v] > 0) nodesInComp[m_component[v]].pushBack(v); } // iterate over all pseudo-sources in reverse topological order for(node v : m_pseudoSources) { int c = m_component[v]; // list of outgoing/incoming edges of pseudo-component C(v) SListPure<edge> outCompV, inCompV; //cout << "component " << c << endl; for(node w : nodesInComp[c]) { //cout << " " << w; edge e; forall_adj_edges(e,w) { if(m_component[e->target()] != c) { outCompV.pushBack(e); } else if (m_component[e->source()] != c) inCompV.pushBack(e); } } //cout << endl; if(outCompV.empty()) continue; SListConstIterator<edge> itE = outCompV.begin(); int costOut = D.cost(*itE); int delta = (pos[(*itE)->target()] - pos[(*itE)->source()]) - D.length(*itE); for(++itE; itE.valid(); ++itE) { costOut += D.cost(*itE); int d = (pos[(*itE)->target()] - pos[(*itE)->source()]) - D.length(*itE); if (d < delta) delta = d; } //cout << " delta = " << delta << ", costOut = " << costOut << endl; // if all outgoing edges have cost 0, we wouldn't save any cost! if (costOut == 0) continue; // move component up by delta; this shortens all outgoing edges and // enlarges all incoming edges (which have cost 0) for(node w : nodesInComp[c]) pos[w] += delta; } }
void UpwardPlanRep::insertEdgePathEmbedded(edge eOrig, SList<adjEntry> crossedEdges, EdgeArray<int> &costOrig) { removeSinkArcs(crossedEdges); //case the copy v of eOrig->source() is a sink switch //we muss remove the sink arcs incident to v, since after inserting eOrig, v is not a sink witch node v = crossedEdges.front()->theNode(); List<edge> outEdges; if (v->outdeg() == 1) v->outEdges(outEdges); // we delete these edges later m_eCopy[eOrig].clear(); adjEntry adjSrc, adjTgt; SListConstIterator<adjEntry> it = crossedEdges.begin(); // iterate over all adjacency entries in crossedEdges except for first // and last adjSrc = *it; List<adjEntry> dirtyList; // left and right face of the element of this list are modified for(++it; it.valid() && it.succ().valid(); ++it) { adjEntry adj = *it; bool isASourceArc = false, isASinkArc = false; if (m_isSinkArc[adj->theEdge()]) isASinkArc = true; if (m_isSourceArc[adj->theEdge()]) isASourceArc = true; int c = 0; if (original(adj->theEdge()) != nullptr) c = costOrig[original(adj->theEdge())]; // split edge node u = m_Gamma.split(adj->theEdge())->source(); if (!m_isSinkArc[adj->theEdge()] && !m_isSourceArc[adj->theEdge()]) crossings = crossings + c; // crossing sink/source arcs cost nothing // determine target adjacency entry and source adjacency entry // in the next iteration step adjTgt = u->firstAdj(); adjEntry adjSrcNext = adjTgt->succ(); if (adjTgt != adj->twin()) std::swap(adjTgt, adjSrcNext); edge e_split = adjTgt->theEdge(); // the new split edge if (e_split->source() != u) e_split = adjSrcNext->theEdge(); if (isASinkArc) m_isSinkArc[e_split] = true; if (isASourceArc) m_isSourceArc[e_split] = true; // insert a new edge into the face edge eNew = m_Gamma.splitFace(adjSrc,adjTgt); m_eIterator[eNew] = GraphCopy::m_eCopy[eOrig].pushBack(eNew); m_eOrig[eNew] = eOrig; dirtyList.pushBack(eNew->adjSource()); adjSrc = adjSrcNext; } // insert last edge edge eNew = m_Gamma.splitFace(adjSrc,*it); m_eIterator[eNew] = m_eCopy[eOrig].pushBack(eNew); m_eOrig[eNew] = eOrig; dirtyList.pushBack(eNew->adjSource()); // remove the sink arc incident to v if(!outEdges.empty()) { edge e = outEdges.popFrontRet(); if (m_isSinkArc[e]) m_Gamma.joinFaces(e); } m_Gamma.setExternalFace(m_Gamma.rightFace(extFaceHandle)); //computeSinkSwitches(); FaceSinkGraph fsg(m_Gamma, s_hat); List<adjEntry> dummyList; FaceArray< List<adjEntry> > sinkSwitches(m_Gamma, dummyList); fsg.sinkSwitches(sinkSwitches); //construct sinkArc for the dirty faces for(adjEntry adj : dirtyList) { face fLeft = m_Gamma.leftFace(adj); face fRight = m_Gamma.rightFace(adj); List<adjEntry> switches = sinkSwitches[fLeft]; OGDF_ASSERT(!switches.empty()); constructSinkArcs(fLeft, switches.front()->theNode()); OGDF_ASSERT(!switches.empty()); switches = sinkSwitches[fRight]; constructSinkArcs(fRight, switches.front()->theNode()); } m_Gamma.setExternalFace(m_Gamma.rightFace(extFaceHandle)); computeSinkSwitches(); }
void UpwardPlanarSubgraphSimple::call(GraphCopy &GC, List<edge> &delEdges) { const Graph &G = GC.original(); delEdges.clear(); // We construct an auxiliary graph H which represents the current upward // planar subgraph. Graph H; NodeArray<node> mapToH(G,nullptr); NodeArray<node> mapToG(H,nullptr); for(node v : G.nodes) mapToG[ mapToH[v] = H.newNode() ] = v; // We currently support only single-source acyclic digraphs ... node s; hasSingleSource(G,s); OGDF_ASSERT(s != 0); OGDF_ASSERT(isAcyclic(G)); // We start with a spanning tree of G rooted at the single source. NodeArray<bool> visitedNode(G,false); SListPure<edge> treeEdges; dfsBuildSpanningTree(s,treeEdges,visitedNode); // Mark all edges in the spanning tree so they can be skipped in the // loop below and add (copies of) them to H. EdgeArray<bool> visitedEdge(G,false); SListConstIterator<edge> it; for(it = treeEdges.begin(); it.valid(); ++it) { edge eG = *it; visitedEdge[eG] = true; H.newEdge(mapToH[eG->source()],mapToH[eG->target()]); } // Add subsequently the remaining edges to H and test if the resulting // graph is still upward planar. If not, remove the edge again from H // and add it to delEdges. SList<Tuple2<node,node> > augmented; GraphCopySimple graphAcyclicTest(G); for(edge eG : G.edges) { // already treated ? if(visitedEdge[eG] == true) continue; // insert edge into H edge eH = H.newEdge(mapToH[eG->source()],mapToH[eG->target()]); node superSink; SList<edge> augmentedEdges; if (UpwardPlanarity::upwardPlanarAugment_singleSource(H,superSink,augmentedEdges) == false) { // if H is no longer upward planar, remove eG from subgraph H.delEdge(eH); delEdges.pushBack(eG); } else { // add augmented edges as node-pair to tmpAugmented and remove // all augmented edges from H again SList<Tuple2<node,node> > tmpAugmented; SListConstIterator<edge> it; for(it = augmentedEdges.begin(); it.valid(); ++it) { node v = mapToG[(*it)->source()]; node w = mapToG[(*it)->target()]; if (v && w) tmpAugmented.pushBack(Tuple2<node,node>(v,w)); H.delEdge(*it); } if (mapToG[superSink] == nullptr) H.delNode(superSink); //**************************************************************** // The following is a simple workaround to assure the following // property of the upward planar subgraph: // The st-augmented upward planar subgraph plus the edges not // in the subgraph must be acyclic. (This is a special property // of the embedding, not the augmentation.) // The upward-planar embedding function gives us ANY upward-planar // embedding. We check if the property above holds with this // embedding. If it doesn't, we have actually no idea if another // embedding would do. // The better solution would be to incorporate the acyclicity // property into the upward-planarity test, but this is compicated. //**************************************************************** // test if original graph plus augmented edges is still acyclic if(checkAcyclic(graphAcyclicTest,tmpAugmented) == true) { augmented = tmpAugmented; } else { // if not, remove eG from subgraph H.delEdge(eH); delEdges.pushBack(eG); } } } // remove edges not in the subgraph from GC ListConstIterator<edge> itE; for(itE = delEdges.begin(); itE.valid(); ++itE) GC.delEdge(GC.copy(*itE)); // add augmented edges to GC SListConstIterator<Tuple2<node,node> > itP; for(itP = augmented.begin(); itP.valid(); ++itP) { node v = (*itP).x1(); node w = (*itP).x2(); GC.newEdge(GC.copy(v),GC.copy(w)); } // add super sink to GC node sGC = nullptr; SList<node> sinks; for(node v : GC.nodes) { if(v->indeg() == 0) sGC = v; if(v->outdeg() == 0) sinks.pushBack(v); } node superSinkGC = GC.newNode(); SListConstIterator<node> itV; for(itV = sinks.begin(); itV.valid(); ++itV) GC.newEdge(*itV,superSinkGC); // add st-edge to GC, so that we now have a planar st-digraph GC.newEdge(sGC,superSinkGC); OGDF_ASSERT(isAcyclic(GC)); OGDF_ASSERT(isPlanar(GC)); }
//--------------------------------------------------------- // actual call (called by all variations of call) // crossing of generalizations is forbidden if forbidCrossingGens = true // edge costs are obeyed if costOrig != 0 // Module::ReturnType FixedEmbeddingInserter::doCall( PlanRep &PG, const List<edge> &origEdges, bool forbidCrossingGens, const EdgeArray<int> *costOrig, const EdgeArray<bool> *forbiddenEdgeOrig, const EdgeArray<unsigned int> *edgeSubGraph) { double T; usedTime(T); ReturnType retValue = retFeasible; m_runsPostprocessing = 0; PG.embed(); OGDF_ASSERT(PG.representsCombEmbedding() == true); if (origEdges.size() == 0) return retOptimal; // nothing to do // initialization CombinatorialEmbedding E(PG); // embedding of PG m_dual.clear(); m_primalAdj.init(m_dual); m_nodeOf.init(E); // construct dual graph m_primalIsGen.init(m_dual,false); OGDF_ASSERT(forbidCrossingGens == false || forbiddenEdgeOrig == 0); if(forbidCrossingGens) constructDualForbidCrossingGens((const PlanRepUML&)PG,E); else constructDual(PG,E,forbiddenEdgeOrig); // m_delFaces and m_newFaces are used by removeEdge() // if we can't allocate memory for them, we throw an exception if (removeReinsert() != rrNone) { m_delFaces = new FaceSetSimple(E); if (m_delFaces == 0) OGDF_THROW(InsufficientMemoryException); m_newFaces = new FaceSetPure(E); if (m_newFaces == 0) { delete m_delFaces; OGDF_THROW(InsufficientMemoryException); } // no postprocessing -> no removeEdge() } else { m_delFaces = 0; m_newFaces = 0; } SListPure<edge> currentOrigEdges; if(removeReinsert() == rrIncremental) { edge e; forall_edges(e,PG) currentOrigEdges.pushBack(PG.original(e)); } // insertion of edges ListConstIterator<edge> it; for(it = origEdges.begin(); it.valid(); ++it) { edge eOrig = *it; int eSubGraph = 0; // edgeSubGraph-data of eOrig if(edgeSubGraph!=0) eSubGraph = (*edgeSubGraph)[eOrig]; SList<adjEntry> crossed; if(costOrig != 0) { findShortestPath(PG, E, *costOrig, PG.copy(eOrig->source()),PG.copy(eOrig->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrig) : Graph::association, crossed, edgeSubGraph, eSubGraph); } else { findShortestPath(E, PG.copy(eOrig->source()),PG.copy(eOrig->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrig) : Graph::association, crossed); } insertEdge(PG,E,eOrig,crossed,forbidCrossingGens,forbiddenEdgeOrig); if(removeReinsert() == rrIncremental) { currentOrigEdges.pushBack(eOrig); bool improved; do { ++m_runsPostprocessing; improved = false; SListConstIterator<edge> itRR; for(itRR = currentOrigEdges.begin(); itRR.valid(); ++itRR) { edge eOrigRR = *itRR; int pathLength; if(costOrig != 0) pathLength = costCrossed(eOrigRR,PG,*costOrig,edgeSubGraph); else pathLength = PG.chain(eOrigRR).size() - 1; if (pathLength == 0) continue; // cannot improve removeEdge(PG,E,eOrigRR,forbidCrossingGens,forbiddenEdgeOrig); // try to find a better insertion path SList<adjEntry> crossed; if(costOrig != 0) { int eSubGraph = 0; // edgeSubGraph-data of eOrig if(edgeSubGraph!=0) eSubGraph = (*edgeSubGraph)[eOrigRR]; findShortestPath(PG, E, *costOrig, PG.copy(eOrigRR->source()),PG.copy(eOrigRR->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrigRR) : Graph::association, crossed, edgeSubGraph, eSubGraph); } else { findShortestPath(E, PG.copy(eOrigRR->source()),PG.copy(eOrigRR->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrigRR) : Graph::association, crossed); } // re-insert edge (insertion path cannot be longer) insertEdge(PG,E,eOrigRR,crossed,forbidCrossingGens,forbiddenEdgeOrig); int newPathLength = (costOrig != 0) ? costCrossed(eOrigRR,PG,*costOrig,edgeSubGraph) : (PG.chain(eOrigRR).size() - 1); OGDF_ASSERT(newPathLength <= pathLength); if(newPathLength < pathLength) improved = true; } } while (improved); } } const Graph &G = PG.original(); if(removeReinsert() != rrIncremental) { // postprocessing (remove-reinsert heuristc) SListPure<edge> rrEdges; switch(removeReinsert()) { case rrAll: case rrMostCrossed: { const List<node> &origInCC = PG.nodesInCC(); ListConstIterator<node> itV; for(itV = origInCC.begin(); itV.valid(); ++itV) { node vG = *itV; adjEntry adj; forall_adj(adj,vG) { if ((adj->index() & 1) == 0) continue; edge eG = adj->theEdge(); rrEdges.pushBack(eG); } } } break; case rrInserted: for(ListConstIterator<edge> it = origEdges.begin(); it.valid(); ++it) rrEdges.pushBack(*it); break; case rrNone: case rrIncremental: break; } // marks the end of the interval of rrEdges over which we iterate // initially set to invalid iterator which means all edges SListConstIterator<edge> itStop; bool improved; do { // abort postprocessing if time limit reached if (m_timeLimit >= 0 && m_timeLimit <= usedTime(T)) { retValue = retTimeoutFeasible; break; } ++m_runsPostprocessing; improved = false; if(removeReinsert() == rrMostCrossed) { FEICrossingsBucket bucket(&PG); rrEdges.bucketSort(bucket); const int num = int(0.01 * percentMostCrossed() * G.numberOfEdges()); itStop = rrEdges.get(num); } SListConstIterator<edge> it; for(it = rrEdges.begin(); it != itStop; ++it) { edge eOrig = *it; // remove only if crossings on edge; // in especially: forbidden edges are never handled by postprocessing // since there are no crossings on such edges int pathLength; if(costOrig != 0) pathLength = costCrossed(eOrig,PG,*costOrig,edgeSubGraph); else pathLength = PG.chain(eOrig).size() - 1; if (pathLength == 0) continue; // cannot improve removeEdge(PG,E,eOrig,forbidCrossingGens,forbiddenEdgeOrig); // try to find a better insertion path SList<adjEntry> crossed; if(costOrig != 0) { int eSubGraph = 0; // edgeSubGraph-data of eOrig if(edgeSubGraph!=0) eSubGraph = (*edgeSubGraph)[eOrig]; findShortestPath(PG, E, *costOrig, PG.copy(eOrig->source()),PG.copy(eOrig->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrig) : Graph::association, crossed, edgeSubGraph, eSubGraph); } else { findShortestPath(E, PG.copy(eOrig->source()),PG.copy(eOrig->target()), forbidCrossingGens ? ((const PlanRepUML&)PG).typeOrig(eOrig) : Graph::association, crossed); } // re-insert edge (insertion path cannot be longer) insertEdge(PG,E,eOrig,crossed,forbidCrossingGens,forbiddenEdgeOrig); int newPathLength = (costOrig != 0) ? costCrossed(eOrig,PG,*costOrig,edgeSubGraph) : (PG.chain(eOrig).size() - 1); OGDF_ASSERT(newPathLength <= pathLength); if(newPathLength < pathLength) improved = true; } } while(improved); // iterate as long as we improve }