Example #1
0
/*
 * heap_reclaim_garbage -- (internal) creates volatile state of unused runs
 */
static int
heap_reclaim_garbage(struct palloc_heap *heap)
{
	struct memory_block m;
	for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
		while (recycler_get(heap->rt->recyclers[i], &m) == 0) {
			m.m_ops->claim_revoke(&m);
		}
	}

	int ret = ENOMEM;
	for (unsigned i = 0; i < heap->rt->zones_exhausted; ++i) {
		if (heap_reclaim_zone_garbage(heap, i, 0 /* not init */) == 0)
			ret = 0;
	}

	return ret;
}
Example #2
0
static void
test_recycler(void)
{
	struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
		Ut_mmap_align);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->p_ops.persist = obj_heap_persist;
	pop->p_ops.memset_persist = obj_heap_memset_persist;
	pop->p_ops.base = pop;
	pop->set = MALLOC(sizeof(*(pop->set)));
	pop->set->options = 0;
	pop->set->directory_based = 0;

	void *heap_start = (char *)pop + pop->heap_offset;
	uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	struct palloc_heap *heap = &pop->heap;
	struct pmem_ops *p_ops = &pop->p_ops;

	struct stats *s = stats_new(pop);
	UT_ASSERTne(s, NULL);

	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
	UT_ASSERT(heap_init(heap_start, heap_size,
		&pop->heap_size, p_ops) == 0);
	UT_ASSERT(heap_boot(heap, heap_start, heap_size,
		&pop->heap_size,
		pop, p_ops, s, pop->set) == 0);
	UT_ASSERT(heap_buckets_init(heap) == 0);
	UT_ASSERT(pop->heap.rt != NULL);

	/* trigger heap bucket populate */
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.size_idx = 1;
	struct bucket *b = heap_bucket_acquire_by_id(heap,
		DEFAULT_ALLOC_CLASS_ID);
	UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
	heap_bucket_release(heap, b);

	int ret;

	struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */);
	UT_ASSERTne(r, NULL);

	init_run_with_score(pop->heap.layout, 0, 64);
	init_run_with_score(pop->heap.layout, 1, 128);

	init_run_with_score(pop->heap.layout, 15, 0);

	struct memory_block mrun = {0, 0, 1, 0};
	struct memory_block mrun2 = {1, 0, 1, 0};

	memblock_rebuild_state(&pop->heap, &mrun);
	memblock_rebuild_state(&pop->heap, &mrun2);

	ret = recycler_put(r, &mrun,
		recycler_calc_score(&pop->heap, &mrun, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun2,
		recycler_calc_score(&pop->heap, &mrun2, NULL));
	UT_ASSERTeq(ret, 0);

	struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
	mrun_ret.size_idx = 1;
	struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
	mrun2_ret.size_idx = 1;

	ret = recycler_get(r, &mrun_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun2_ret);
	UT_ASSERTeq(ret, 0);
	UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
	UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);

	init_run_with_score(pop->heap.layout, 7, 256);
	init_run_with_score(pop->heap.layout, 2, 64);
	init_run_with_score(pop->heap.layout, 5, 512);
	init_run_with_score(pop->heap.layout, 10, 128);

	mrun.chunk_id = 7;
	mrun2.chunk_id = 2;
	struct memory_block mrun3 = {5, 0, 1, 0};
	struct memory_block mrun4 = {10, 0, 1, 0};
	memblock_rebuild_state(&pop->heap, &mrun3);
	memblock_rebuild_state(&pop->heap, &mrun4);

	mrun_ret.size_idx = 1;
	mrun2_ret.size_idx = 1;
	struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
	mrun3_ret.size_idx = 1;
	struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
	mrun4_ret.size_idx = 1;

	ret = recycler_put(r, &mrun,
		recycler_calc_score(&pop->heap, &mrun, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun2,
		recycler_calc_score(&pop->heap, &mrun2, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun3,
		recycler_calc_score(&pop->heap, &mrun3, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun4,
		recycler_calc_score(&pop->heap, &mrun4, NULL));
	UT_ASSERTeq(ret, 0);

	ret = recycler_get(r, &mrun2_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun4_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun3_ret);
	UT_ASSERTeq(ret, 0);
	UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
	UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
	UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
	UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);

	init_run_with_max_block(pop->heap.layout, 1);
	struct memory_block mrun5 = {1, 0, 1, 0};
	memblock_rebuild_state(&pop->heap, &mrun5);

	ret = recycler_put(r, &mrun5,
		recycler_calc_score(&pop->heap, &mrun5, NULL));
	UT_ASSERTeq(ret, 0);

	struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
	mrun5_ret.size_idx = 11;
	ret = recycler_get(r, &mrun5_ret);
	UT_ASSERTeq(ret, ENOMEM);

	mrun5_ret = MEMORY_BLOCK_NONE;
	mrun5_ret.size_idx = 10;
	ret = recycler_get(r, &mrun5_ret);
	UT_ASSERTeq(ret, 0);

	recycler_delete(r);

	stats_delete(pop, s);
	heap_cleanup(heap);
	UT_ASSERT(heap->rt == NULL);

	FREE(pop->set);
	MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
Example #3
0
/*
 * heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
 */
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
	uint32_t units)
{
	ASSERTeq(b->aclass->type, CLASS_RUN);

	if (b->is_active) {
		b->c_ops->rm_all(b->container);
		b->active_memory_block.m_ops
			->claim_revoke(&b->active_memory_block);

		b->is_active = 0;
	}

	struct heap_rt *h = heap->rt;
	struct memory_block m = MEMORY_BLOCK_NONE;

	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);

		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	m.size_idx = b->aclass->run.size_idx;

	/* cannot reuse an existing run, create a new one */
	struct bucket *defb = heap_get_default_bucket(heap);
	util_mutex_lock(&defb->lock);
	if (heap_get_bestfit_block(heap, defb, &m) == 0) {
		ASSERTeq(m.block_off, 0);

		heap_create_run(heap, b, &m);

		b->active_memory_block = m;
		b->is_active = 1;

		util_mutex_unlock(&defb->lock);
		return 0;
	}
	util_mutex_unlock(&defb->lock);

	/*
	 * Try the recycler again, the previous call to the bestfit_block for
	 * huge chunks might have reclaimed some unused runs.
	 */
	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);
		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		/*
		 * To verify that the recycler run is not able to satisfy our
		 * request we attempt to retrieve a block. This is not ideal,
		 * and should be replaced by a different heuristic once proper
		 * memory block scoring is implemented.
		 */
		struct memory_block tmp = MEMORY_BLOCK_NONE;
		tmp.size_idx = units;
		if (b->c_ops->get_rm_bestfit(b->container, &tmp) != 0) {
			b->c_ops->rm_all(b->container);
			m.m_ops->claim_revoke(&m);
			return ENOMEM;
		} else {
			bucket_insert_block(b, &tmp);
		}

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	return ENOMEM;
}