Пример #1
0
/*
 * bucket_new -- allocates and initializes bucket instance
 */
struct bucket *
bucket_new(size_t unit_size, int unit_max)
{
	ASSERT(unit_size > 0);

	struct bucket *b = Malloc(sizeof (*b));
	if (b == NULL)
		goto error_bucket_malloc;

	b->tree = ctree_new();
	if (b->tree == NULL)
		goto error_tree_new;

	if ((errno = pthread_mutex_init(&b->lock, NULL)) != 0) {
		ERR("!pthread_mutex_init");
		goto error_mutex_init;
	}

	b->unit_size = unit_size;
	b->unit_max = unit_max;

	if (bucket_is_small(b)) {
		b->bitmap_nallocs = RUNSIZE / unit_size;

		ASSERT(b->bitmap_nallocs <= RUN_BITMAP_SIZE);

		int unused_bits = RUN_BITMAP_SIZE - b->bitmap_nallocs;
		int unused_values = unused_bits / BITS_PER_VALUE;
		b->bitmap_nval = MAX_BITMAP_VALUES - unused_values;
		unused_bits -= (unused_values * BITS_PER_VALUE);

		ASSERT(unused_bits >= 0);

		b->bitmap_lastval = unused_bits ?
			(((1ULL << unused_bits) - 1ULL) <<
				(BITS_PER_VALUE - unused_bits)) : 0;
	} else {
		b->bitmap_nval = 0;
		b->bitmap_lastval = 0;
		b->bitmap_nallocs = 0;
	}

	return b;

error_mutex_init:
	ctree_delete(b->tree);
error_tree_new:
	Free(b);
error_bucket_malloc:
	return NULL;
}
Пример #2
0
/*
 * calc_block_offset -- (internal) calculates the block offset of allocation
 */
static uint16_t
calc_block_offset(PMEMobjpool *pop, struct bucket *b,
	struct allocation_header *alloc)
{
	uint16_t block_off = 0;
	if (bucket_is_small(b)) {
		struct memory_block m = {alloc->chunk_id, alloc->zone_id, 0, 0};
		void *data = heap_get_block_data(pop, m);
		uintptr_t diff = (uintptr_t)alloc - (uintptr_t)data;
		block_off = diff / bucket_unit_size(b);
		ASSERT(diff % bucket_unit_size(b) == 0);
	}

	return block_off;
}
Пример #3
0
/*
 * heap_recycle_block -- (internal) recycles unused part of the memory block
 */
static void
heap_recycle_block(PMEMobjpool *pop, struct bucket *b, struct memory_block *m,
                   uint32_t units)
{
    if (bucket_is_small(b)) {
        struct memory_block r = {m->chunk_id, m->zone_id,
                   m->size_idx - units, m->block_off + units
        };
        bucket_insert_block(b, r);
    } else {
        heap_resize_chunk(pop, m->chunk_id, m->zone_id, units);
    }

    m->size_idx = units;
}
Пример #4
0
/*
 * heap_recycle_block -- (internal) recycles unused part of the memory block
 */
static void
heap_recycle_block(PMEMobjpool *pop, struct bucket *b, struct memory_block *m,
	uint32_t units)
{

#ifdef _EAP_ALLOC_OPTIMIZE
	//m->size_idx = units;
	//return;
#endif

	//printf("bucket_insert_block heap_recycle_block");

	if (bucket_is_small(b)) {
		struct memory_block r = {m->chunk_id, m->zone_id,
			m->size_idx - units, m->block_off + units};
		bucket_insert_block(b, r);
	} else {
		heap_resize_chunk(pop, m->chunk_id, m->zone_id, units);
	}

	m->size_idx = units;
}
Пример #5
0
/*
 * heap_ensure_bucket_filled -- (internal) refills the bucket if needed
 */
static void
heap_ensure_bucket_filled(PMEMobjpool *pop, struct bucket *b, int force)
{
	if (!force && !bucket_is_empty(b))
		return;

	if (!bucket_is_small(b)) {
		/* not much to do here apart from using the next zone */
		heap_populate_buckets(pop);
		return;
	}

	struct bucket *def_bucket = heap_get_default_bucket(pop);

	struct memory_block m = {0, 0, 1, 0};
	if (heap_get_bestfit_block(pop, def_bucket, &m) != 0)
		return; /* OOM */

	ASSERT(m.block_off == 0);

	heap_populate_run_bucket(pop, b, m.chunk_id, m.zone_id);
}
Пример #6
0
int
pfree(PMEMobjpool *pop, uint64_t *off)
{

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	int err = 0;

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

#ifdef _EAP_ALLOC_OPTIMIZE
		//fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n");
		if(is_alloc_free_opt_enable(alloc->size))
		{
			goto error_lane_hold;		
			//goto temphere;
		}else {
			//printf("Relaxing allocs %zu\n", alloc->size);	
		}
#endif

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);


	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

#ifdef _EAP_ALLOC_OPTIMIZE
	goto temphere;
	temphere:
//		if(is_alloc_free_opt_enable(alloc->size))
//			goto error_lane_hold;
#endif


	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res)
		!= 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	return 0;

error_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
Пример #7
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	bucket_insert_block(pop, b, res);

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Пример #8
0
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg), void *arg, uint64_t data_off)
{
	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	struct memory_block m = {0, 0, 0, 0};

	m.size_idx = bucket_calc_units(b, sizeh);

	err = heap_get_bestfit_block(pop, b, &m);

	if (err == ENOMEM && !bucket_is_small(b))
		goto out; /* there's only one huge bucket */

	if (err == ENOMEM) {
		/*
		 * There's no more available memory in the common heap and in
		 * this lane cache, fallback to the auxiliary (shared) bucket.
		 */
		b = heap_get_auxiliary_bucket(pop, sizeh);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/*
		 * The auxiliary bucket cannot satisfy our request, borrow
		 * memory from other caches.
		 */
		heap_drain_to_auxiliary(pop, b, m.size_idx);
		err = heap_get_bestfit_block(pop, b, &m);
	}

	if (err == ENOMEM) {
		/* we are completely out of memory */
		goto out;
	}

	/*
	 * Now that the memory is reserved we can go ahead with making the
	 * allocation persistent.
	 */
	uint64_t real_size = bucket_unit_size(b) * m.size_idx;
	err = persist_alloc(pop, lane, m, real_size, off,
		constructor, arg, data_off);

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
Пример #9
0
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}