void test_graph_crawler() { test_status("Testing graph crawler..."); // Construct 1 colour graph with kmer-size=11 dBGraph graph; const size_t kmer_size = 11, ncols = 3; db_graph_alloc(&graph, kmer_size, ncols, 1, 2048, DBG_ALLOC_EDGES | DBG_ALLOC_NODE_IN_COL | DBG_ALLOC_BKTLOCKS); char graphseq[3][77] = // < X X X............... {"GTTCCAGAGCGGAGGTCTCCCAACAACATGGTATAAGTTGTCTAGCCCCGGTTCGCGCGGGTACTTCTTACAGCGC", "GTTCCAGAGCGGAGGTCTCCCAACAACTTGGTATAAGTTGTCTAGTCCCGGTTCGCGCGGCATTTCAGCATTGTTA", "GTTCCAGAGCGCGACAGAGTGCATATCACGCTAAGCACAGCCCTCTTCTATCTGCTTTTAAATGGATCAATAATCG"}; build_graph_from_str_mt(&graph, 0, graphseq[0], strlen(graphseq[0])); build_graph_from_str_mt(&graph, 1, graphseq[1], strlen(graphseq[1])); build_graph_from_str_mt(&graph, 2, graphseq[2], strlen(graphseq[2])); // Crawl graph GraphCrawler crawler; graph_crawler_alloc(&crawler, &graph); dBNode node = db_graph_find_str(&graph, graphseq[0]); dBNode next_node = db_graph_find_str(&graph, graphseq[0]+1); TASSERT(node.key != HASH_NOT_FOUND); TASSERT(next_node.key != HASH_NOT_FOUND); BinaryKmer bkey = db_node_get_bkmer(&graph, node.key); Edges edges = db_node_get_edges(&graph, node.key, 0); dBNode next_nodes[4]; Nucleotide next_nucs[4]; size_t i, p, num_next, next_idx; num_next = db_graph_next_nodes(&graph, bkey, node.orient, edges, next_nodes, next_nucs); next_idx = 0; while(next_idx < num_next && !db_nodes_are_equal(next_nodes[next_idx],next_node)) next_idx++; TASSERT(next_idx < num_next && db_nodes_are_equal(next_nodes[next_idx],next_node)); // Crawl in all colours graph_crawler_fetch(&crawler, node, next_nodes, next_idx, num_next, NULL, graph.num_of_cols, NULL, NULL, NULL); TASSERT2(crawler.num_paths == 2, "crawler.num_paths: %u", crawler.num_paths); // Fetch paths dBNodeBuffer nbuf; db_node_buf_alloc(&nbuf, 16); StrBuf sbuf; strbuf_alloc(&sbuf, 128); for(p = 0; p < crawler.num_paths; p++) { db_node_buf_reset(&nbuf); graph_crawler_get_path_nodes(&crawler, p, &nbuf); strbuf_ensure_capacity(&sbuf, nbuf.len+graph.kmer_size); sbuf.end = db_nodes_to_str(nbuf.b, nbuf.len, &graph, sbuf.b); for(i = 0; i < 3 && strcmp(graphseq[i]+1,sbuf.b) != 0; i++) {} TASSERT2(i < 3, "seq: %s", sbuf.b); TASSERT2(sbuf.end == 75, "sbuf.end: %zu", sbuf.end); TASSERT2(nbuf.len == 65, "nbuf.len: %zu", nbuf.len); } strbuf_dealloc(&sbuf); db_node_buf_dealloc(&nbuf); graph_crawler_dealloc(&crawler); db_graph_dealloc(&graph); }
static BreakpointCaller* brkpt_callers_new(size_t num_callers, gzFile gzout, size_t min_ref_flank, size_t max_ref_flank, const KOGraph kograph, const dBGraph *db_graph) { ctx_assert(num_callers > 0); const size_t ncols = db_graph->num_of_cols; BreakpointCaller *callers = ctx_malloc(num_callers * sizeof(BreakpointCaller)); pthread_mutex_t *out_lock = ctx_malloc(sizeof(pthread_mutex_t)); if(pthread_mutex_init(out_lock, NULL) != 0) die("mutex init failed"); size_t *callid = ctx_calloc(1, sizeof(size_t)); // Each colour in each caller can have a GraphCache path at once PathRefRun *path_ref_runs = ctx_calloc(num_callers*MAX_REFRUNS_PER_CALLER(ncols), sizeof(PathRefRun)); size_t i; for(i = 0; i < num_callers; i++) { BreakpointCaller tmp = {.threadid = i, .nthreads = num_callers, .kograph = kograph, .db_graph = db_graph, .gzout = gzout, .out_lock = out_lock, .callid = callid, .allele_refs = path_ref_runs, .flank5p_refs = path_ref_runs+MAX_REFRUNS_PER_ORIENT(ncols), .min_ref_nkmers = min_ref_flank, .max_ref_nkmers = max_ref_flank}; memcpy(&callers[i], &tmp, sizeof(BreakpointCaller)); path_ref_runs += MAX_REFRUNS_PER_CALLER(ncols); db_node_buf_alloc(&callers[i].allelebuf, 1024); db_node_buf_alloc(&callers[i].flank5pbuf, 1024); kmer_run_buf_alloc(&callers[i].koruns_5p, 128); kmer_run_buf_alloc(&callers[i].koruns_5p_ended, 128); kmer_run_buf_alloc(&callers[i].koruns_3p, 128); kmer_run_buf_alloc(&callers[i].koruns_3p_ended, 128); kmer_run_buf_alloc(&callers[i].allele_run_buf, 128); kmer_run_buf_alloc(&callers[i].flank5p_run_buf, 128); graph_crawler_alloc(&callers[i].crawlers[0], db_graph); graph_crawler_alloc(&callers[i].crawlers[1], db_graph); } return callers; } static void brkpt_callers_destroy(BreakpointCaller *callers, size_t num_callers) { size_t i; for(i = 0; i < num_callers; i++) { db_node_buf_dealloc(&callers[i].allelebuf); db_node_buf_dealloc(&callers[i].flank5pbuf); kmer_run_buf_dealloc(&callers[i].koruns_5p); kmer_run_buf_dealloc(&callers[i].koruns_5p_ended); kmer_run_buf_dealloc(&callers[i].koruns_3p); kmer_run_buf_dealloc(&callers[i].koruns_3p_ended); kmer_run_buf_dealloc(&callers[i].allele_run_buf); kmer_run_buf_dealloc(&callers[i].flank5p_run_buf); graph_crawler_dealloc(&callers[i].crawlers[0]); graph_crawler_dealloc(&callers[i].crawlers[1]); } pthread_mutex_destroy(callers[0].out_lock); ctx_free(callers[0].out_lock); ctx_free(callers[0].callid); ctx_free(callers[0].allele_refs); ctx_free(callers); }