Exemple #1
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	int err;

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	struct memory_block m = {0, 0, 0, 0};

	m.size_idx = b->calc_units(b, sizeh);

	err = heap_get_bestfit_block(pop, b, &m);

	if (err == ENOMEM && b->type == BUCKET_HUGE)
		goto out; /* there's only one huge bucket */

	if (err == ENOMEM) {
		/*
		 * There's no more available memory in the common heap and in
		 * this lane cache, fallback to the auxiliary (shared) bucket.
		 */
		b = heap_get_auxiliary_bucket(pop, sizeh);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/*
		 * The auxiliary bucket cannot satisfy our request, borrow
		 * memory from other caches.
		 */
		heap_drain_to_auxiliary(pop, b, m.size_idx);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/* we are completely out of memory */
		goto out;
	}

	/*
	 * Now that the memory is reserved we can go ahead with making the
	 * allocation persistent.
	 */
	uint64_t real_size = b->unit_size * m.size_idx;
	persist_alloc(pop, lane, m, real_size, off, constructor, arg, data_off);
	err = 0;
out:
	lane_release(pop);

	return err;
}
Exemple #2
0
/*
 * alloc_reserve_block -- (internal) reserves a memory block in volatile state
 *
 * The first step in the allocation of a new block is reserving it in the
 * transient heap - which is represented by the bucket abstraction.
 *
 * To provide optimal scaling for multi-threaded applications and reduce
 * fragmentation the appropriate bucket is chosen depending on the current
 * thread context and to which allocation class the requested size falls into.
 *
 * Once the bucket is selected, just enough memory is reserved for the requested
 * size. The underlying block allocation algorithm (best-fit, next-fit, ...)
 * varies depending on the bucket container.
 *
 * Because the heap in general tries to avoid lock-contention on buckets,
 * the threads might, in near OOM cases, be unable to allocate requested memory
 * from their assigned buckets. To combat this there's one common collection
 * of buckets that threads can fallback to. The auxiliary bucket will 'steal'
 * memory from other caches if that's required to satisfy the current caller
 * needs.
 *
 * Once this method completes no further locking is required on the transient
 * part of the heap during the allocation process.
 */
static int
alloc_reserve_block(struct palloc_heap *heap, struct memory_block *m,
		size_t sizeh)
{
	struct bucket *b = heap_get_best_bucket(heap, sizeh);

	/*
	 * The caller provided size in bytes, but buckets operate in
	 * 'size indexes' which are multiples of the block size in the bucket.
	 *
	 * For example, to allocate 500 bytes from a bucket that provides 256
	 * byte blocks two memory 'units' are required.
	 */
	m->size_idx = b->calc_units(b, sizeh);

	int err = heap_get_bestfit_block(heap, b, m);

	if (err == ENOMEM && b->type == BUCKET_HUGE)
		return ENOMEM; /* there's only one huge bucket */

	if (err == ENOMEM) {
		/*
		 * There's no more available memory in the common heap and in
		 * this lane cache, fallback to the auxiliary (shared) bucket.
		 */
		b = heap_get_auxiliary_bucket(heap, sizeh);
		err = heap_get_bestfit_block(heap, b, m);
	}

	if (err == ENOMEM) {
		/*
		 * The auxiliary bucket cannot satisfy our request, borrow
		 * memory from other caches.
		 */
		heap_drain_to_auxiliary(heap, b, m->size_idx);
		err = heap_get_bestfit_block(heap, b, m);
	}

	if (err == ENOMEM) {
		/* we are completely out of memory */
		return ENOMEM;
	}

	return 0;
}
Exemple #3
0
int
pfree(PMEMobjpool *pop, uint64_t *off)
{

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	int err = 0;

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

#ifdef _EAP_ALLOC_OPTIMIZE
		//fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n");
		if(is_alloc_free_opt_enable(alloc->size))
		{
			goto error_lane_hold;		
			//goto temphere;
		}else {
			//printf("Relaxing allocs %zu\n", alloc->size);	
		}
#endif

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);


	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

#ifdef _EAP_ALLOC_OPTIMIZE
	goto temphere;
	temphere:
//		if(is_alloc_free_opt_enable(alloc->size))
//			goto error_lane_hold;
#endif


	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res)
		!= 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	return 0;

error_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemple #4
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg,
	uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err = 0;

	struct allocation_header *alloc = alloc_get_header(pop, *off);
	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = bucket_calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = bucket_calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * bucket_unit_size(b);

	struct memory_block cnt = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, cnt)) != 0)
		return err;

	struct memory_block next = {0};
	if ((err = heap_get_adjacent_free_block(pop, &next, cnt, 0)) != 0)
		goto error;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto error;
	}

	if ((err = heap_get_exact_block(pop, b, &next,
		add_size_idx)) != 0)
		goto error;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);
	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

error:
	if (heap_unlock_if_run(pop, cnt) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Exemple #5
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	int err = 0;
	uint32_t units = bucket_calc_units(b, sizeh);

	struct memory_block m = {0, 0, units, 0};

	if ((err = heap_get_bestfit_block(pop, b, &m)) != 0)
		return err;

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	uint64_t real_size = bucket_unit_size(b) * units;

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto err_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

err_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_insert_block(b, m) != 0) {
		ERR("Failed to recover heap volatile state");
		ASSERT(0);
	}

	return err;
}
Exemple #6
0
/*
 * prealloc_construct -- resizes an existing memory block with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
prealloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	if (size <= pmalloc_usable_size(pop, *off))
		return 0;

	size_t sizeh = size + sizeof (struct allocation_header);

	int err;

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct lane_section *lane;
	lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	uint32_t add_size_idx = b->calc_units(b, sizeh - alloc->size);
	uint32_t new_size_idx = b->calc_units(b, sizeh);
	uint64_t real_size = new_size_idx * b->unit_size;

	struct memory_block cnt = get_mblock_from_alloc(pop, alloc);

	heap_lock_if_run(pop, cnt);

	struct memory_block next = {0, 0, 0, 0};
	if ((err = heap_get_adjacent_free_block(pop, b, &next, cnt, 0)) != 0)
		goto out;

	if (next.size_idx < add_size_idx) {
		err = ENOMEM;
		goto out;
	}

	if ((err = heap_get_exact_block(pop, b, &next, add_size_idx)) != 0)
		goto out;

	struct memory_block *blocks[2] = {&cnt, &next};
	uint64_t op_result;
	void *hdr;
	struct memory_block m =
		heap_coalesce(pop, blocks, 2, HEAP_OP_ALLOC, &hdr, &op_result);

	void *block_data = heap_get_block_data(pop, m);
	void *datap = (char *)block_data + sizeof (struct allocation_header);
	void *userdatap = (char *)datap + data_off;

	/* mark new part as accessible and undefined */
	VALGRIND_DO_MAKE_MEM_UNDEFINED(pop, (char *)block_data + alloc->size,
			real_size - alloc->size);
	/* resize allocated space */
	VALGRIND_DO_MEMPOOL_CHANGE(pop, userdatap, userdatap,
		real_size  - sizeof (struct allocation_header) - data_off);

	if (constructor != NULL)
		constructor(pop, userdatap,
			real_size - sizeof (struct allocation_header) -
			data_off, arg);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, &alloc->size), real_size);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

out:
	heap_unlock_if_run(pop, cnt);
	lane_release(pop);

	return err;
}
Exemple #7
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(pop, b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Exemple #8
0
static void
test_heap()
{
	struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->size = MOCK_POOL_SIZE;
	pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->p_ops.persist = obj_heap_persist;
	pop->p_ops.memset_persist = obj_heap_memset_persist;
	pop->p_ops.base = pop;
	pop->p_ops.pool_size = pop->size;

	void *heap_start = (char *)pop + pop->heap_offset;
	uint64_t heap_size = pop->heap_size;
	struct palloc_heap *heap = &pop->heap;
	struct pmem_ops *p_ops = &pop->p_ops;

	UT_ASSERT(heap_check(heap_start, heap_size) != 0);
	UT_ASSERT(heap_init(heap_start, heap_size, p_ops) == 0);
	UT_ASSERT(heap_boot(heap, heap_start, heap_size, pop, p_ops) == 0);
	UT_ASSERT(pop->heap.rt != NULL);

	struct bucket *b_small = heap_get_best_bucket(heap, 1);
	struct bucket *b_big = heap_get_best_bucket(heap, 2048);

	UT_ASSERT(b_small->unit_size < b_big->unit_size);

	struct bucket *b_def = heap_get_best_bucket(heap, CHUNKSIZE);
	UT_ASSERT(b_def->unit_size == CHUNKSIZE);

	/* new small buckets should be empty */
	UT_ASSERT(b_small->type == BUCKET_RUN);
	UT_ASSERT(b_big->type == BUCKET_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(heap, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}

	struct memory_block prev;
	heap_get_adjacent_free_block(heap, b_def, &prev, blocks[1], 1);
	UT_ASSERT(prev.chunk_id == blocks[0].chunk_id);
	struct memory_block cnt;
	heap_get_adjacent_free_block(heap, b_def, &cnt, blocks[0], 0);
	UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id);

	struct memory_block next;
	heap_get_adjacent_free_block(heap, b_def, &next, blocks[1], 0);
	UT_ASSERT(next.chunk_id == blocks[2].chunk_id);

	UT_ASSERT(heap_check(heap_start, heap_size) == 0);
	heap_cleanup(heap);
	UT_ASSERT(heap->rt == NULL);

	Free(mpop);
}
Exemple #9
0
static void
test_heap()
{
	struct mock_pop *mpop = Malloc(MOCK_POOL_SIZE);
	PMEMobjpool *pop = &mpop->p;
	memset(pop, 0, MOCK_POOL_SIZE);
	pop->size = MOCK_POOL_SIZE;
	pop->heap_size = MOCK_POOL_SIZE - sizeof(PMEMobjpool);
	pop->heap_offset = (uint64_t)((uint64_t)&mpop->heap - (uint64_t)mpop);
	pop->persist = obj_heap_persist;

	UT_ASSERT(heap_check(pop) != 0);
	UT_ASSERT(heap_init(pop) == 0);
	UT_ASSERT(heap_boot(pop) == 0);
	UT_ASSERT(pop->heap != NULL);

	struct bucket *b_small = heap_get_best_bucket(pop, 1);
	struct bucket *b_big = heap_get_best_bucket(pop, 2048);

	UT_ASSERT(b_small->unit_size < b_big->unit_size);

	struct bucket *b_def = heap_get_best_bucket(pop, CHUNKSIZE);
	UT_ASSERT(b_def->unit_size == CHUNKSIZE);

	/* new small buckets should be empty */
	UT_ASSERT(b_small->type == BUCKET_RUN);
	UT_ASSERT(b_big->type == BUCKET_RUN);

	struct memory_block blocks[MAX_BLOCKS] = {
		{0, 0, 1, 0},
		{0, 0, 1, 0},
		{0, 0, 1, 0}
	};

	for (int i = 0; i < MAX_BLOCKS; ++i) {
		heap_get_bestfit_block(pop, b_def, &blocks[i]);
		UT_ASSERT(blocks[i].block_off == 0);
	}

	struct memory_block *blocksp[MAX_BLOCKS] = {NULL};

	struct memory_block prev;
	heap_get_adjacent_free_block(pop, b_def, &prev, blocks[1], 1);
	UT_ASSERT(prev.chunk_id == blocks[0].chunk_id);
	blocksp[0] = &prev;

	struct memory_block cnt;
	heap_get_adjacent_free_block(pop, b_def, &cnt, blocks[0], 0);
	UT_ASSERT(cnt.chunk_id == blocks[1].chunk_id);
	blocksp[1] = &cnt;

	struct memory_block next;
	heap_get_adjacent_free_block(pop, b_def, &next, blocks[1], 0);
	UT_ASSERT(next.chunk_id == blocks[2].chunk_id);
	blocksp[2] = &next;

	struct operation_context *ctx = operation_init(pop, NULL);
	struct memory_block result =
		heap_coalesce(pop, blocksp, MAX_BLOCKS, HEAP_OP_FREE, ctx);
	operation_process(ctx);
	operation_delete(ctx);

	UT_ASSERT(result.size_idx == 3);
	UT_ASSERT(result.chunk_id == prev.chunk_id);

	UT_ASSERT(heap_check(pop) == 0);
	heap_cleanup(pop);
	UT_ASSERT(pop->heap == NULL);

	Free(mpop);
}