pool *pet_cvg(const char *pet_fn, const ass_opt *opt) { bwa_seq_t *pets, *query, *p, *p2; int i = 0, j = 0, k = 0; index64 mate_i = 0; pool *good_pets = new_pool(), *repeat_pets = new_pool(); alignarray *align, *align_2; alg *a; hash_table *ht; ht = pe_load_hash(pet_fn); pets = ht->seqs; fprintf(stderr, "[pe_cvg] Converging RNA-PETs... \n"); // for (i = n_pets - 1; i >= 0; i -= 2) { for (i = 0; i < ht->n_seqs; i += 2) { p = &pets[i]; p2 = &pets[i + 1]; if (binary_exists(repeat_pets->reads, p) || binary_exists(good_pets->reads, p)) continue; for (k = p->len - opt->ol; k >= 0; k--) { query = new_seq(p, opt->ol, k); // p_query(query); pe_aln_query(query, query->seq, ht, opt->nm + 2, opt->ol, 0, align); pool_sort_ins(good_pets, p); // p_align(align); query = new_seq(p2, opt->ol, k); // p_query(query); pe_aln_query(query, query->seq, ht, opt->nm + 2, opt->ol, 0, align_2); pool_sort_ins(good_pets, p2); // p_align(align_2); for (j = 0; j < align->len; j++) { a = g_ptr_array_index(align, j); // The aligned seq is the query itself if (a->r_id == atoll(p->name)) continue; mate_i = get_mate_index(a->r_id); // If the right mate is also aligned if (!aligned(align_2, mate_i)) continue; pool_sort_ins(repeat_pets, &pets[a->r_id]); pool_sort_ins(repeat_pets, &pets[mate_i]); } } // p_pool("Good Pets: ", good_pets); // p_pool("Repeat Pets: ", repeat_pets); } fprintf(stderr, "[pet_cvg] Converged to %zd RNA-PETs... \n", (good_pets->n)); fprintf(stderr, "[pet_cvg] ------------------------------ \n"); // p_pool("Good Pets: ", good_pets); return good_pets; }
void *getmem(size_t size) { void *ret; size = (size + sizeof(void *) - 1) / sizeof(void *); size *= sizeof(void *); if (!freespace) new_pool(size > 256000 ? size : 256000); if (freespace->pool_size - freespace->free_index < size) new_pool(size > 256000 ? size : 256000); ret = freespace->mem + freespace->free_index; freespace->free_index += size; return ret; }
/*-------------------------------------------------------------------------*/ Mempool new_lifopool (size_t iSize) /* Create a new Lifopool for a typical allocation size of <iSize> * bytes per memory block and prepare * Result is the pointer to the mempool structure, or NULL if an error * occurs. */ { Mempool pPool; iSize += SIZEOF_LIFO_T; /* Include space for the sentinel block */ pPool = new_pool(iSize, LIFOPOOL); if (pPool) { /* Add a sentinel (pseudo-used block) at the end of the arena. */ struct memblock_s * pBlock = pPool->pBlocks; lifo_t *p = (lifo_t *)(pBlock->pMark - SIZEOF_LIFO_T); p->length = 1; p->pBlock = pBlock; /* Update the pMark pointer */ pBlock->pMark = (char *)p; } return pPool; } /* new_lifopool() */
void *pool_alloc(pool_alloc_t *p) { pool_t *pool; void *ret; /* Look on free list */ if (NULL != p->free) { ret = p->free; p->free = *((void **)p->free); return ret; } /* Look for space in the last pool */ if (p->npools) { pool = &p->pools[p->npools - 1]; if (pool->used + p->dsize < PSIZE) { ret = ((char *) pool->pool) + pool->used; pool->used += p->dsize; return ret; } } /* Need a new pool */ pool = new_pool(p); if (NULL == pool) return NULL; pool->used = p->dsize; return pool->pool; }
/** * From current edge, get all mates of the used reads. */ pool *get_mate_pool_from_edge(edge *eg, const hash_table *ht, const int ori) { int i = 0; bwa_seq_t *s = NULL, *mate = NULL, *seqs = NULL; pool *mate_pool = NULL; mate_pool = new_pool(); seqs = ht->seqs; for (i = 0; i < eg->reads->len; i++) { s = g_ptr_array_index(eg->reads, i); mate = get_mate(s, seqs); //p_query("READ", s); //p_query("MATE", mate); // If the mate should have been used. if (is_paired(s, ori)) continue; // If the insert size is not in the range if (abs(eg->len - s->shift) > (insert_size + sd_insert_size * SD_TIMES)) { continue; } // If the mate is already in use, either by current or another thread if (mate->tid != -1 || mate->status == USED || mate->status == DEAD) continue; // The read should be used by current edge already // and the mate has not been used by this template before. if (!(s->status == TRIED && s->contig_id == eg->id) || (mate->status == TRIED && mate->contig_id == eg->id)) continue; mate->rev_com = s->rev_com; mate_pool_add(mate_pool, mate, eg->tid); } return mate_pool; }
/// Test that copying the shared_pool works as expected. /// /// For a type to be regular then: /// /// T a = b; assert(a == b); /// T a; a = b; <-> T a = b; /// T a = c; T b = c; a = d; assert(b == c); /// T a = c; T b = c; zap(a); assert(b == c && a != b); /// TEST(test_unique_pool, copy_constructor) { recycle::unique_pool<dummy_one> pool; auto o1 = pool.allocate(); auto o2 = pool.allocate(); o1.reset(); recycle::unique_pool<dummy_one> new_pool(pool); EXPECT_EQ(pool.unused_resources(), 1U); EXPECT_EQ(new_pool.unused_resources(), 1U); o2.reset(); EXPECT_EQ(pool.unused_resources(), 2U); EXPECT_EQ(new_pool.unused_resources(), 1U); EXPECT_EQ(dummy_one::m_count, 3); pool.free_unused(); new_pool.free_unused(); EXPECT_EQ(dummy_one::m_count, 0); }
void correct_bases(bwa_seq_t *seqs, bwa_seq_t *ori_read, alignarray *aligns, const int tid) { pool *p = NULL; int j = 0, cursor = 0, i = 0, index = 0, has_hit = 0; alg *a = NULL; bwa_seq_t *s = NULL; int *counter = NULL; //p_query("ORI", ori_read); p = new_pool(); ori_read->cursor = 0; for (i = 0; i < aligns->len; i++) { a = g_ptr_array_index(aligns, i); index = a->r_id; s = &seqs[index]; if (s->is_in_c_pool > 0 && s->is_in_c_pool != tid) continue; if (s->status == USED || strcmp(s->name, ori_read->name) == 0) continue; s->rev_com = a->rev_comp; if (s->rev_com) s->cursor = s->len - ori_read->len - a->pos; else s->cursor = a->pos; if (s->is_in_c_pool == 0) pool_add(p, s, tid); } if (p->n >= 4) { //p_pool(__func__, p, NULL); counter = (int*) calloc(5, sizeof(int)); for (j = 0; j < ori_read->len; j++) { reset_c(counter, NULL); has_hit = 0; for (i = 0; i < p->n; i++) { s = g_ptr_array_index(p->reads, i); cursor = s->cursor + j; if (cursor >= 0 || cursor < s->len) { has_hit = 1; if (s->rev_com) { counter[s->rseq[cursor]]++; } else { counter[s->seq[cursor]]++; } } } //show_debug_msg(__func__, "Correcting %d: %d:%d:%d:%d\n", j, counter[0], // counter[1], counter[2], counter[3]); if (has_hit) { ori_read->seq[j] = get_pure_most(counter); ori_read->rseq[ori_read->len - 1 - j] = 3 - ori_read->seq[j]; } } free(counter); } free_pool(p); //p_query("AFT", ori_read); }
pool_type* get_pool(size_type obj_size) { pool_type* pool = get_pool(obj_size, std::nothrow); if (pool != nullptr) { return pool; } pool_type new_pool(obj_size); m_pools.emplace_back(std::move(new_pool), 1); return &m_pools.back().first; }
void deallocate_impl_unsafe (T * n) { void * node = n; tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); new_pool->next.set_ptr(old_pool.get_ptr()); pool_.store(new_pool, memory_order_relaxed); }
static void setup(void) { setup_fixture_path(); read_document("fixtures/complete_tag.atom"); system("rm -Rf /tmp/valid-copy && cp -R fixtures/valid /tmp/valid-copy && chmod -R 755 /tmp/valid-copy"); item_cache_create(&item_cache, "/tmp/valid-copy", &item_cache_options); tagger = build_tagger(document, item_cache); train_tagger(tagger, item_cache); tagger->probability_function = &probability_function; assert_equal(TAGGER_TRAINED, tagger->state); random_background = new_pool(); }
/*-------------------------------------------------------------------------*/ Mempool new_mempool (size_t iSize) /* Create a new Mempool for a typical allocation size of <iSize> * bytes per memory block and prepare * Result is the pointer to the mempool structure, or NULL if an error * occurs. */ { return new_pool(iSize, MEMPOOL); } /* new_mempool() */
void deallocate_impl (T * n) { void * node = n; tagged_node_ptr old_pool = pool_.load(memory_order_consume); freelist_node * new_pool_ptr = reinterpret_cast<freelist_node*>(node); for(;;) { tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_tag()); new_pool->next.set_ptr(old_pool.get_ptr()); if (pool_.compare_exchange_weak(old_pool, new_pool)) return; } }
/// Test move constructor TEST(test_unique_pool, move_constructor) { recycle::unique_pool<dummy_one> pool; auto o1 = pool.allocate(); auto o2 = pool.allocate(); o1.reset(); recycle::unique_pool<dummy_one> new_pool(std::move(pool)); o2.reset(); EXPECT_EQ(new_pool.unused_resources(), 2U); }
int init_connections(size_t n) { size_t i; struct connection *cn; connection_array = malloc(n * sizeof *connection_array); if (connection_array == 0) { log_d("init_connections: out of memory"); return -1; } for (i = 0; i < n; i++) { cn = connection_array + i; if ((cn->r = malloc(sizeof *cn->r)) == 0) { log_d("init_connections: out of memory"); return -1; } if (tuning.num_headers == 0) cn->r->headers = 0; else if ((cn->r->headers = malloc(tuning.num_headers * sizeof *cn->r->headers)) == 0) { log_d("init_connections: out of memory"); return -1; } if (new_pool(&cn->header_input, tuning.input_buf_size) == -1) return -1; if (new_pool(&cn->output, tuning.buf_size) == -1) return -1; if (new_pool(&cn->client_input, tuning.script_buf_size) == -1) return -1; if (new_pool(&cn->script_input, tuning.script_buf_size) == -1) return -1; cn->r->cn = cn; cn->connection_state = HC_UNATTACHED; set_connection_state(cn, HC_FREE); } return 0; }
T * allocate_impl_unsafe (void) { tagged_node_ptr old_pool = pool_.load(memory_order_relaxed); if (!old_pool.get_ptr()) { if (!Bounded) return Alloc::allocate(1); else return 0; } freelist_node * new_pool_ptr = old_pool->next.get_ptr(); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); pool_.store(new_pool, memory_order_relaxed); void * ptr = old_pool.get_ptr(); return reinterpret_cast<T*>(ptr); }
static void setup(void) { setup_fixture_path(); read_document("fixtures/complete_tag.atom"); random_background = new_pool(); system("rm -Rf /tmp/valid-copy && cp -R fixtures/valid /tmp/valid-copy && chmod -R 755 /tmp/valid-copy"); item_cache_create(&item_cache, "/tmp/valid-copy", &item_cache_options); tagger = build_tagger(document, item_cache); train_tagger(tagger, item_cache); tagger->probability_function = &naive_bayes_probability; tagger->classification_function = &mock_classify; precompute_tagger(tagger, random_background); assert_equal(TAGGER_PRECOMPUTED, tagger->state); classified_item = NULL; int freeit; item = item_cache_fetch_item(item_cache, (unsigned char*) "urn:peerworks.org:entry#709254", &freeit); }
T * allocate_impl (void) { tagged_node_ptr old_pool = pool_.load(memory_order_consume); for(;;) { if (!old_pool.get_ptr()) { if (!Bounded) return Alloc::allocate(1); else return 0; } freelist_node * new_pool_ptr = old_pool->next.get_ptr(); tagged_node_ptr new_pool (new_pool_ptr, old_pool.get_next_tag()); if (pool_.compare_exchange_weak(old_pool, new_pool)) { void * ptr = old_pool.get_ptr(); return reinterpret_cast<T*>(ptr); } } }
void mem_requirements_hint(size_t size) { if (freespace) return; new_pool(size); }