bool check_for_edges(int64_t old_node_id, bool old_node_is_reverse, int64_t new_node_id, bool new_node_is_reverse, xg::XG& index) { // What edge are we following Edge edge_taken = make_edge(old_node_id, old_node_is_reverse, new_node_id, new_node_is_reverse); // Make sure we find it bool edge_found = false; vector<Edge> edges = new_node_is_reverse ? index.edges_on_end(new_node_id) : index.edges_on_start(new_node_id); for(auto& edge : edges) { // Look at every edge in order. if(edges_equivalent(edge, edge_taken)) { // If we found the edge we're taking, break. edge_found = true; break; } } if(edge_found == false) { cerr << "did not find edge between" << old_node_id << " and " << new_node_id << endl;} return edge_found; }
void haplo_d::seeded_log_calculate_Is(xg::XG& graph) { // Things which were calculated in the constructor: // -- A's // -- J for the top continuing and any new rectangle // -- I for any new rectangle // ostream& stream = cout; vector<Edge> edges_out; vector<Edge> edges_in; for(int b = 1; b < cs.size(); b++) { vector<rectangle>& prevAs = cs[b-1].S; vector<rectangle>& currAs = cs[b].S; // xg::XG::ThreadMapping lastnode; // if(cs[b-1].bridge.size() == 0) { // lastnode = cs[b-1].get_node(); // } else { // lastnode = cs[b-1].bridge.back(); // } // edges_out = lastnode.is_reverse ? graph.edges_on_start(lastnode.node_id) : graph.edges_on_end(lastnode.node_id); edges_out = cs[b-1].get_last_node().is_reverse ? graph.edges_on_start(cs[b-1].get_last_node().node_id) : graph.edges_on_end(cs[b-1].get_last_node().node_id); edges_in = cs[b].get_node().is_reverse ? graph.edges_on_end(cs[b].get_node().node_id) : graph.edges_on_start(cs[b].get_node().node_id); bool new_threads = (prevAs[0].next == 1); // make sure that there is at least one rectangle here if(prevAs.size() == 0) { cerr << "[vg haplo error] no consistent haplotypes at node " << cs[b-1].get_node().node_id << endl; } else if(prevAs.size() == 1) { currAs.back().I = currAs.back().J; // currAs has size at most 2 if(currAs.size() == 2) { currAs[0].I = currAs[0].J - currAs[1].J; } } else if(prevAs.size() >= 2) { // We're going to have to extend, so let's grab the next node XG::ThreadMapping next_node = cs[b].get_node(); // Let's also grab the nodes which we'll skip over between this and the last node thread_t extension = cs[b-1].bridge; extension.push_back(next_node); // if J = 0 for a rectangle, then J must be 0 for all older rectangles if(currAs.back().J == 0) { currAs.pop_back(); } else { int deltaJ = prevAs[0].J - currAs[prevAs[0].next].J; if(deltaJ == 0) { // cerr << b << ", deltaJ = 0" << endl; currAs[prevAs[0].next].I = prevAs[0].I; int delta_start = prevAs[0].state.range_start - currAs[prevAs[0].next].state.range_start; int delta_end = prevAs[0].state.range_end - currAs[prevAs[0].next].state.range_end; for(int a = 1; a < prevAs.size(); a++) { rectangle new_rect = prevAs[a]; new_rect.simple_extend(extension, graph, delta_start, delta_end); new_rect.prev = a; currAs.push_back(new_rect); prevAs[a].next = currAs.size()-1; } } else { vector<int> previously_big; int big_cutoff = 400; for(int i = 1; i < prevAs.size(); i++) { if(prevAs[i].I >= big_cutoff) { previously_big.push_back(i); } } // cerr << "made big list, it's size " << previously_big.size() << endl; vector<rectangle> big_rectangles; vector<int> big_deltas; vector<int> big_Js; for(int i = 0; i < previously_big.size(); i++) { big_rectangles.push_back(prevAs[previously_big[i]]); int Jbig = big_rectangles.back().get_next_J(extension,graph, edges_in, edges_out); // cerr << Jbig << "\t" << flush; big_Js.push_back(Jbig); big_deltas.push_back(prevAs[previously_big[i]].J - Jbig); if(Jbig == 0) { break; } } // cerr << endl; // cerr << "collected attributes of big rectangles, " << big_Js.size() << " are nonempty" << endl; if(big_Js.size() > 0) { int Aabove = 0; int Jabove = currAs[prevAs[0].next].J; int dJabove = prevAs[0].J - currAs[prevAs[0].next].J; for(int i = 0; i < previously_big.size(); i++) { if(big_Js[i] == Jabove) { // all rectangles between are actually empty prevAs[currAs.back().prev].next = -1; currAs.pop_back(); } else { binaryI(graph, extension, b, Aabove, previously_big[i], dJabove, big_deltas[i], Jabove, big_Js[i], 0, edges_in, edges_out); } Aabove = previously_big[i]; Jabove = big_Js[i]; dJabove = big_deltas[i]; if(big_Js[i] == 0) { // Don't build smaller rectangles // Don't add this rectangle break; } else { big_rectangles[i].prev = previously_big[i]; currAs.push_back(big_rectangles[i]); prevAs[previously_big[i]].next = currAs.size()-1; } } if(big_Js.back() != 0) { binaryI(graph, extension, b, previously_big.back(), prevAs.size(), big_deltas.back(), 0, big_Js.back(), 0, 0, edges_in, edges_out); } } else { binaryI(graph, extension, b, 0, prevAs.size(), deltaJ, 0, currAs[prevAs[0].next].J, 0, 0, edges_in, edges_out); } for(int a = 0; a < currAs.size() - 1; a++) { currAs[a].I = currAs[a].J - currAs[a+1].J; } currAs.back().I = currAs.back().J; } } } } }
void haplo_d::log_calculate_Is(xg::XG& graph) { // Things which were calculated in the constructor: // -- A's // -- J for the top continuing and any new rectangle // -- I for any new rectangle // ostream& stream = cout; vector<Edge> edges_out; vector<Edge> edges_in; for(int b = 1; b < cs.size(); b++) { // xg::XG::ThreadMapping lastnode; // if(cs[b-1].bridge.size() == 0) { // lastnode = cs[b-1].get_node(); // } else { // lastnode = cs[b-1].bridge.back(); // } edges_out = cs[b-1].get_last_node().is_reverse ? graph.edges_on_start(cs[b-1].get_last_node().node_id) : graph.edges_on_end(cs[b-1].get_last_node().node_id); // edges_out = lastnode.is_reverse ? graph.edges_on_start(lastnode.node_id) : graph.edges_on_end(lastnode.node_id); edges_in = cs[b].get_node().is_reverse ? graph.edges_on_end(cs[b].get_node().node_id) : graph.edges_on_start(cs[b].get_node().node_id); vector<rectangle>& prevAs = cs[b-1].S; vector<rectangle>& currAs = cs[b].S; bool new_threads = (prevAs[0].next == 1); // make sure that there is at least one rectangle here if(prevAs.size() == 0) { cerr << "[vg haplo error] no consistent haplotypes at node " << cs[b-1].get_node().node_id << endl; } else if(prevAs.size() == 1) { currAs.back().I = currAs.back().J; // currAs has size at most 2 if(currAs.size() == 2) { currAs[0].I = currAs[0].J - currAs[1].J; } } else if(prevAs.size() >= 2) { // We're going to have to extend, so let's grab the next node XG::ThreadMapping next_node = cs[b].get_node(); // Let's also grab the nodes which we'll skip over between this and the last node thread_t extension = cs[b-1].bridge; extension.push_back(next_node); // if J = 0 for a rectangle, then J must be 0 for all older rectangles if(currAs.back().J == 0) { currAs.pop_back(); } else { int deltaJ = prevAs[0].J - currAs[prevAs[0].next].J; if(deltaJ == 0) { // cerr << b << ", deltaJ = 0" << endl; currAs[prevAs[0].next].I = prevAs[0].I; int delta_start = prevAs[0].state.range_start - currAs[prevAs[0].next].state.range_start; int delta_end = prevAs[0].state.range_end - currAs[prevAs[0].next].state.range_end; for(int a = 1; a < prevAs.size(); a++) { rectangle new_rect = prevAs[a]; new_rect.simple_extend(extension, graph, delta_start, delta_end); new_rect.prev = a; currAs.push_back(new_rect); prevAs[a].next = currAs.size()-1; } } else { // binaryI(XG&, thread_t, b, atop, abott, dJtop, dJbott, Jtop, Jbott, indent level) binaryI(graph, extension, b, 0, prevAs.size(), deltaJ, 0, currAs[prevAs[0].next].J, 0, 0, edges_in, edges_out); for(int a = 0; a < currAs.size() - 1; a++) { currAs[a].I = currAs[a].J - currAs[a+1].J; } currAs.back().I = currAs.back().J; } } } } }
haplo_d recombine_arms(haplo_d& left, haplo_d& right, int left_cut, int right_join, xg::XG& graph) { haplo_d to_return; if(!right.has_joining_node(right_join)) { return to_return; } else { vector<rectangle*> boundary = right.trace_strip(right_join, 0, -1); rectangle rect = left.cs[left_cut].S[0]; thread_t extension = left.cs[left_cut].bridge; int lastJ = rect.J; vector<int> boundaryDeltas; vector<int> boundaryJs; for(int i = 0; i < boundary.size(); i++) { to_return.cs.push_back(right.cs[right_join + i].cs_shell()); extension.push_back(to_return.cs[i].get_node()); int new_J = rect.get_next_J(extension,graph); boundaryDeltas.push_back(new_J - lastJ); if(new_J > 0) { boundaryJs.push_back(new_J); } else { break; } if(boundary[i]->J - new_J > 0) { rectangle joiners; if(i > 0) { joiners.prev = 0; to_return.cs[i-1].S[0].next = 0; } else { joiners.prev = -1; } joiners.J = boundary[i]->J; joiners.I = boundary[i]->J - new_J; to_return.cs[i].S.push_back(joiners); } if(new_J > 0) { rectangle continuing; continuing.J = new_J; if(i > 0) { continuing.prev = to_return.cs[i-1].S.size() - 1; to_return.cs[i-1].S.back().next = to_return.cs[i].S.size(); } to_return.cs[i].S.push_back(continuing); } lastJ = new_J; extension = left.cs[left_cut].bridge; } for(int i = 0; i < left.cs[left_cut].S.size(); i++) { //build first column } vector<Edge> edges_out; vector<Edge> edges_in; for(int i = 1; i < boundaryJs.size(); i++) { edges_out = to_return.cs[i-1].get_last_node().is_reverse ? graph.edges_on_start(to_return.cs[i-1].get_last_node().node_id) : graph.edges_on_end(to_return.cs[i-1].get_last_node().node_id); edges_in = to_return.cs[i].get_node().is_reverse ? graph.edges_on_end(to_return.cs[i].get_node().node_id) : graph.edges_on_start(to_return.cs[i].get_node().node_id); to_return.binaryI(graph, extension, i, to_return.cs[i].S.back().prev, to_return.cs[i-1].S.size(), boundaryDeltas[i], 0, boundaryJs[i], 0, 0, edges_in, edges_out); for(int a = 0; a < to_return.cs[i].S.size() - 1; a++) { to_return.cs[i].S[a].I = to_return.cs[i].S[a].J - to_return.cs[i].S[a+1].J; } to_return.cs[i].S.back().I = to_return.cs[i].S.back().J; } } }