示例#1
0
文件: obj_heap.c 项目: wojtuss/nvml
static void
test_recycler(void)
{
	struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
		Ut_mmap_align);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->p_ops.persist = obj_heap_persist;
	pop->p_ops.memset_persist = obj_heap_memset_persist;
	pop->p_ops.base = pop;
	pop->set = MALLOC(sizeof(*(pop->set)));
	pop->set->options = 0;
	pop->set->directory_based = 0;

	void *heap_start = (char *)pop + pop->heap_offset;
	uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	struct palloc_heap *heap = &pop->heap;
	struct pmem_ops *p_ops = &pop->p_ops;

	struct stats *s = stats_new(pop);
	UT_ASSERTne(s, NULL);

	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
	UT_ASSERT(heap_init(heap_start, heap_size,
		&pop->heap_size, p_ops) == 0);
	UT_ASSERT(heap_boot(heap, heap_start, heap_size,
		&pop->heap_size,
		pop, p_ops, s, pop->set) == 0);
	UT_ASSERT(heap_buckets_init(heap) == 0);
	UT_ASSERT(pop->heap.rt != NULL);

	/* trigger heap bucket populate */
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.size_idx = 1;
	struct bucket *b = heap_bucket_acquire_by_id(heap,
		DEFAULT_ALLOC_CLASS_ID);
	UT_ASSERT(heap_get_bestfit_block(heap, b, &m) == 0);
	heap_bucket_release(heap, b);

	int ret;

	struct recycler *r = recycler_new(&pop->heap, 10000 /* never recalc */);
	UT_ASSERTne(r, NULL);

	init_run_with_score(pop->heap.layout, 0, 64);
	init_run_with_score(pop->heap.layout, 1, 128);

	init_run_with_score(pop->heap.layout, 15, 0);

	struct memory_block mrun = {0, 0, 1, 0};
	struct memory_block mrun2 = {1, 0, 1, 0};

	memblock_rebuild_state(&pop->heap, &mrun);
	memblock_rebuild_state(&pop->heap, &mrun2);

	ret = recycler_put(r, &mrun,
		recycler_calc_score(&pop->heap, &mrun, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun2,
		recycler_calc_score(&pop->heap, &mrun2, NULL));
	UT_ASSERTeq(ret, 0);

	struct memory_block mrun_ret = MEMORY_BLOCK_NONE;
	mrun_ret.size_idx = 1;
	struct memory_block mrun2_ret = MEMORY_BLOCK_NONE;
	mrun2_ret.size_idx = 1;

	ret = recycler_get(r, &mrun_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun2_ret);
	UT_ASSERTeq(ret, 0);
	UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
	UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);

	init_run_with_score(pop->heap.layout, 7, 256);
	init_run_with_score(pop->heap.layout, 2, 64);
	init_run_with_score(pop->heap.layout, 5, 512);
	init_run_with_score(pop->heap.layout, 10, 128);

	mrun.chunk_id = 7;
	mrun2.chunk_id = 2;
	struct memory_block mrun3 = {5, 0, 1, 0};
	struct memory_block mrun4 = {10, 0, 1, 0};
	memblock_rebuild_state(&pop->heap, &mrun3);
	memblock_rebuild_state(&pop->heap, &mrun4);

	mrun_ret.size_idx = 1;
	mrun2_ret.size_idx = 1;
	struct memory_block mrun3_ret = MEMORY_BLOCK_NONE;
	mrun3_ret.size_idx = 1;
	struct memory_block mrun4_ret = MEMORY_BLOCK_NONE;
	mrun4_ret.size_idx = 1;

	ret = recycler_put(r, &mrun,
		recycler_calc_score(&pop->heap, &mrun, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun2,
		recycler_calc_score(&pop->heap, &mrun2, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun3,
		recycler_calc_score(&pop->heap, &mrun3, NULL));
	UT_ASSERTeq(ret, 0);
	ret = recycler_put(r, &mrun4,
		recycler_calc_score(&pop->heap, &mrun4, NULL));
	UT_ASSERTeq(ret, 0);

	ret = recycler_get(r, &mrun2_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun4_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun_ret);
	UT_ASSERTeq(ret, 0);
	ret = recycler_get(r, &mrun3_ret);
	UT_ASSERTeq(ret, 0);
	UT_ASSERTeq(mrun.chunk_id, mrun_ret.chunk_id);
	UT_ASSERTeq(mrun2.chunk_id, mrun2_ret.chunk_id);
	UT_ASSERTeq(mrun3.chunk_id, mrun3_ret.chunk_id);
	UT_ASSERTeq(mrun4.chunk_id, mrun4_ret.chunk_id);

	init_run_with_max_block(pop->heap.layout, 1);
	struct memory_block mrun5 = {1, 0, 1, 0};
	memblock_rebuild_state(&pop->heap, &mrun5);

	ret = recycler_put(r, &mrun5,
		recycler_calc_score(&pop->heap, &mrun5, NULL));
	UT_ASSERTeq(ret, 0);

	struct memory_block mrun5_ret = MEMORY_BLOCK_NONE;
	mrun5_ret.size_idx = 11;
	ret = recycler_get(r, &mrun5_ret);
	UT_ASSERTeq(ret, ENOMEM);

	mrun5_ret = MEMORY_BLOCK_NONE;
	mrun5_ret.size_idx = 10;
	ret = recycler_get(r, &mrun5_ret);
	UT_ASSERTeq(ret, 0);

	recycler_delete(r);

	stats_delete(pop, s);
	heap_cleanup(heap);
	UT_ASSERT(heap->rt == NULL);

	FREE(pop->set);
	MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
示例#2
0
文件: obj_heap.c 项目: wojtuss/nvml
static void
test_heap(void)
{
	struct mock_pop *mpop = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE,
		Ut_mmap_align);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->p_ops.persist = obj_heap_persist;
	pop->p_ops.memset_persist = obj_heap_memset_persist;
	pop->p_ops.base = pop;
	pop->set = MALLOC(sizeof(*(pop->set)));
	pop->set->options = 0;
	pop->set->directory_based = 0;

	struct stats *s = stats_new(pop);
	UT_ASSERTne(s, NULL);

	void *heap_start = (char *)pop + pop->heap_offset;
	uint64_t heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	struct palloc_heap *heap = &pop->heap;
	struct pmem_ops *p_ops = &pop->p_ops;

	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
	UT_ASSERT(heap_init(heap_start, heap_size,
		&pop->heap_size, p_ops) == 0);
	UT_ASSERT(heap_boot(heap, heap_start, heap_size,
		&pop->heap_size,
		pop, p_ops, s, pop->set) == 0);
	UT_ASSERT(heap_buckets_init(heap) == 0);
	UT_ASSERT(pop->heap.rt != NULL);

	test_alloc_class_bitmap_correctness();

	test_container((struct block_container *)container_new_ravl(heap),
		heap);

	test_container((struct block_container *)container_new_seglists(heap),
		heap);

	struct alloc_class *c_small = heap_get_best_class(heap, 1);
	struct alloc_class *c_big = heap_get_best_class(heap, 2048);

	UT_ASSERT(c_small->unit_size < c_big->unit_size);

	/* new small buckets should be empty */
	UT_ASSERT(c_big->type == CLASS_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	struct bucket *b_def = heap_bucket_acquire_by_id(heap,
		DEFAULT_ALLOC_CLASS_ID);

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(heap, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}
	heap_bucket_release(heap, b_def);

	struct memory_block old_run = {0, 0, 1, 0};
	struct memory_block new_run = {0, 0, 0, 0};
	struct alloc_class *c_run = heap_get_best_class(heap, 1024);
	struct bucket *b_run = heap_bucket_acquire(heap, c_run);

	/*
	 * Allocate blocks from a run until one run is exhausted.
	 */
	UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &old_run), ENOMEM);
	int *nresv = bucket_current_resvp(b_run);

	do {
		new_run.chunk_id = 0;
		new_run.block_off = 0;
		new_run.size_idx = 1;
		UT_ASSERTne(heap_get_bestfit_block(heap, b_run, &new_run),
			ENOMEM);
		UT_ASSERTne(new_run.size_idx, 0);
		*nresv = 0;
	} while (old_run.block_off != new_run.block_off);
	*nresv = 0;

	heap_bucket_release(heap, b_run);

	stats_delete(pop, s);
	UT_ASSERT(heap_check(heap_start, heap_size) == 0);
	heap_cleanup(heap);
	UT_ASSERT(heap->rt == NULL);

	FREE(pop->set);
	MUNMAP_ANON_ALIGNED(mpop, MOCK_POOL_SIZE);
}
示例#3
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	struct memory_block m = {0, 0, 0, 0};

	m.size_idx = bucket_calc_units(b, sizeh);

	err = heap_get_bestfit_block(pop, b, &m);

	if (err == ENOMEM && !bucket_is_small(b))
		goto out; /* there's only one huge bucket */

	if (err == ENOMEM) {
		/*
		 * There's no more available memory in the common heap and in
		 * this lane cache, fallback to the auxiliary (shared) bucket.
		 */
		b = heap_get_auxiliary_bucket(pop, sizeh);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/*
		 * The auxiliary bucket cannot satisfy our request, borrow
		 * memory from other caches.
		 */
		heap_drain_to_auxiliary(pop, b, m.size_idx);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/* we are completely out of memory */
		goto out;
	}

	/*
	 * Now that the memory is reserved we can go ahead with making the
	 * allocation persistent.
	 */
	uint64_t real_size = bucket_unit_size(b) * m.size_idx;
	err = persist_alloc(pop, lane, m, real_size, off,
		constructor, arg, data_off);

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
示例#4
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
 */
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
	uint32_t units)
{
	ASSERTeq(b->aclass->type, CLASS_RUN);

	if (b->is_active) {
		b->c_ops->rm_all(b->container);
		b->active_memory_block.m_ops
			->claim_revoke(&b->active_memory_block);

		b->is_active = 0;
	}

	struct heap_rt *h = heap->rt;
	struct memory_block m = MEMORY_BLOCK_NONE;

	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);

		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	m.size_idx = b->aclass->run.size_idx;

	/* cannot reuse an existing run, create a new one */
	struct bucket *defb = heap_get_default_bucket(heap);
	util_mutex_lock(&defb->lock);
	if (heap_get_bestfit_block(heap, defb, &m) == 0) {
		ASSERTeq(m.block_off, 0);

		heap_create_run(heap, b, &m);

		b->active_memory_block = m;
		b->is_active = 1;

		util_mutex_unlock(&defb->lock);
		return 0;
	}
	util_mutex_unlock(&defb->lock);

	/*
	 * Try the recycler again, the previous call to the bestfit_block for
	 * huge chunks might have reclaimed some unused runs.
	 */
	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);
		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		/*
		 * To verify that the recycler run is not able to satisfy our
		 * request we attempt to retrieve a block. This is not ideal,
		 * and should be replaced by a different heuristic once proper
		 * memory block scoring is implemented.
		 */
		struct memory_block tmp = MEMORY_BLOCK_NONE;
		tmp.size_idx = units;
		if (b->c_ops->get_rm_bestfit(b->container, &tmp) != 0) {
			b->c_ops->rm_all(b->container);
			m.m_ops->claim_revoke(&m);
			return ENOMEM;
		} else {
			bucket_insert_block(b, &tmp);
		}

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	return ENOMEM;
}
示例#5
0
static void
test_heap()
{
	struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->size = MOCK_POOL_SIZE;
	pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->p_ops.persist = obj_heap_persist;
	pop->p_ops.memset_persist = obj_heap_memset_persist;
	pop->p_ops.base = pop;
	pop->p_ops.pool_size = pop->size;

	void *heap_start = (char *)pop + pop->heap_offset;
	uint64_t heap_size = pop->heap_size;
	struct palloc_heap *heap = &pop->heap;
	struct pmem_ops *p_ops = &pop->p_ops;

	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
	UT_ASSERT(heap_init(heap_start, heap_size, p_ops) == 0);
	UT_ASSERT(heap_boot(heap, heap_start, heap_size, pop, p_ops) == 0);
	UT_ASSERT(pop->heap.rt != NULL);

	struct bucket *b_small = heap_get_best_bucket(heap, 1);
	struct bucket *b_big = heap_get_best_bucket(heap, 2048);

	UT_ASSERT(b_small->unit_size < b_big->unit_size);

	struct bucket *b_def = heap_get_best_bucket(heap, CHUNKSIZE);
	UT_ASSERT(b_def->unit_size == CHUNKSIZE);

	/* new small buckets should be empty */
	UT_ASSERT(b_small->type == BUCKET_RUN);
	UT_ASSERT(b_big->type == BUCKET_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(heap, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}

	struct memory_block prev;
	heap_get_adjacent_free_block(heap, b_def, &prev, blocks[1], 1);
	UT_ASSERT(prev.chunk_id == blocks[0].chunk_id);
	struct memory_block cnt;
	heap_get_adjacent_free_block(heap, b_def, &cnt, blocks[0], 0);
	UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id);

	struct memory_block next;
	heap_get_adjacent_free_block(heap, b_def, &next, blocks[1], 0);
	UT_ASSERT(next.chunk_id == blocks[2].chunk_id);

	UT_ASSERT(heap_check(heap_start, heap_size) == 0);
	heap_cleanup(heap);
	UT_ASSERT(heap->rt == NULL);

	Free(mpop);
}
示例#6
0
文件: obj_heap.c 项目: AmesianX/nvml
static void
test_heap()
{
	struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->size = MOCK_POOL_SIZE;
	pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->persist = obj_heap_persist;

	UT_ASSERT(heap_check(pop) != 0);
	UT_ASSERT(heap_init(pop) == 0);
	UT_ASSERT(heap_boot(pop) == 0);
	UT_ASSERT(pop->heap != NULL);

	struct bucket *b_small = heap_get_best_bucket(pop, 1);
	struct bucket *b_big = heap_get_best_bucket(pop, 2048);

	UT_ASSERT(b_small->unit_size < b_big->unit_size);

	struct bucket *b_def = heap_get_best_bucket(pop, CHUNKSIZE);
	UT_ASSERT(b_def->unit_size == CHUNKSIZE);

	/* new small buckets should be empty */
	UT_ASSERT(b_small->type == BUCKET_RUN);
	UT_ASSERT(b_big->type == BUCKET_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(pop, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}

	struct memory_block *blocksp[MAX_BLOCKS] = {NULL};

	struct memory_block prev;
	heap_get_adjacent_free_block(pop, b_def, &prev, blocks[1], 1);
	UT_ASSERT(prev.chunk_id == blocks[0].chunk_id);
	blocksp[0] = &prev;

	struct memory_block cnt;
	heap_get_adjacent_free_block(pop, b_def, &cnt, blocks[0], 0);
	UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id);
	blocksp[1] = &cnt;

	struct memory_block next;
	heap_get_adjacent_free_block(pop, b_def, &next, blocks[1], 0);
	UT_ASSERT(next.chunk_id == blocks[2].chunk_id);
	blocksp[2] = &next;

	struct operation_context *ctx = operation_init(pop, NULL);
	struct memory_block result =
		heap_coalesce(pop, blocksp, MAX_BLOCKS, HEAP_OP_FREE, ctx);
	operation_process(ctx);
	operation_delete(ctx);

	UT_ASSERT(result.size_idx == 3);
	UT_ASSERT(result.chunk_id == prev.chunk_id);

	UT_ASSERT(heap_check(pop) == 0);
	heap_cleanup(pop);
	UT_ASSERT(pop->heap == NULL);

	Free(mpop);
}