コード例 #1
0
ファイル: heap.c プロジェクト: paul-von-behren/nvml
/*
 * heap_recycle_block -- (internal) recycles unused part of the memory block
 */
static void
heap_recycle_block(PMEMobjpool *pop, struct bucket *b, struct memory_block *m,
                   uint32_t units)
{
    if (bucket_is_small(b)) {
        struct memory_block r = {m->chunk_id, m->zone_id,
                   m->size_idx - units, m->block_off + units
        };
        bucket_insert_block(b, r);
    } else {
        heap_resize_chunk(pop, m->chunk_id, m->zone_id, units);
    }

    m->size_idx = units;
}
コード例 #2
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_recycle_block -- (internal) recycles unused part of the memory block
 */
static void
heap_recycle_block(struct palloc_heap *heap, struct bucket *b,
		struct memory_block *m, uint32_t units)
{
	if (b->aclass->type == CLASS_RUN) {
		ASSERT(units <= UINT16_MAX);
		ASSERT(m->block_off + units <= UINT16_MAX);
		struct memory_block r = {m->chunk_id, m->zone_id,
			m->size_idx - units, (uint16_t)(m->block_off + units),
			0, 0, NULL, NULL};
		memblock_rebuild_state(heap, &r);
		bucket_insert_block(b, &r);
	} else {
		heap_resize_chunk(heap, m->chunk_id, m->zone_id, units);
	}

	m->size_idx = units;
}
コード例 #3
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_resize_chunk -- (internal) splits the chunk into two smaller ones
 */
static void
heap_resize_chunk(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx)
{
	uint32_t new_chunk_id = chunk_id + new_size_idx;

	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
	struct chunk_header *old_hdr = &z->chunk_headers[chunk_id];
	struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id];

	uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx;
	heap_chunk_init(heap, new_hdr, CHUNK_TYPE_FREE, rem_size_idx);
	heap_chunk_init(heap, old_hdr, CHUNK_TYPE_FREE, new_size_idx);

	struct bucket *def_bucket = heap->rt->default_bucket;
	struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0,
		0, 0, NULL, NULL};
	memblock_rebuild_state(heap, &m);
	bucket_insert_block(def_bucket, &m);
}
コード例 #4
0
ファイル: heap.c プロジェクト: sudkannan/nvml
/*
 * heap_resize_chunk -- (internal) splits the chunk into two smaller ones
 */
static void
heap_resize_chunk(PMEMobjpool *pop,
	uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx)
{
	uint32_t new_chunk_id = chunk_id + new_size_idx;

	struct zone *z = &pop->heap->layout->zones[zone_id];
	struct chunk_header *old_hdr = &z->chunk_headers[chunk_id];
	struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id];

	uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx;
	heap_chunk_init(pop, new_hdr, CHUNK_TYPE_FREE, rem_size_idx);
	heap_chunk_init(pop, old_hdr, CHUNK_TYPE_FREE, new_size_idx);

	struct bucket *def_bucket = pop->heap->buckets[DEFAULT_BUCKET];
	struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0};
	if (bucket_insert_block(def_bucket, m) != 0) {
		ERR("bucket_insert_block failed during resize");
	}
}
コード例 #5
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_init_free_chunk -- initializes free chunk transient state
 */
static void
heap_init_free_chunk(struct palloc_heap *heap,
	struct chunk_header *hdr,
	struct memory_block *m)
{
	struct operation_context ctx;
	operation_init(&ctx, heap->base, NULL, NULL);
	ctx.p_ops = &heap->p_ops;
	heap_chunk_write_footer(hdr, hdr->size_idx);
	/*
	 * Perform coalescing just in case there
	 * are any neighbouring free chunks.
	 */
	struct memory_block nm = heap_coalesce_huge(heap, m);
	if (nm.chunk_id != m->chunk_id) {
		m->m_ops->prep_hdr(&nm, MEMBLOCK_FREE, &ctx);
		operation_process(&ctx);
	}
	*m = nm;
	bucket_insert_block(heap->rt->default_bucket, m);
}
コード例 #6
0
ファイル: heap.c プロジェクト: sudkannan/nvml
/*
 * heap_run_insert -- (internal) inserts and splits a block of memory into a run
 */
static void
heap_run_insert(struct bucket *b, uint32_t chunk_id, uint32_t zone_id,
		uint32_t size_idx, uint16_t block_off)
{
	ASSERT(size_idx <= BITS_PER_VALUE);
	ASSERT(block_off + size_idx <= bucket_bitmap_nallocs(b));

	size_t unit_max = bucket_unit_max(b);
	struct memory_block m = {chunk_id, zone_id,
		unit_max - (block_off % 4), block_off};

	if (m.size_idx > size_idx)
		m.size_idx = size_idx;

	do {
		bucket_insert_block(b, m);
		m.block_off += m.size_idx;
		size_idx -= m.size_idx;
		m.size_idx = size_idx > unit_max ? unit_max : size_idx;
	} while (size_idx != 0);
}
コード例 #7
0
ファイル: heap.c プロジェクト: sudkannan/nvml
/*
 * heap_recycle_block -- (internal) recycles unused part of the memory block
 */
static void
heap_recycle_block(PMEMobjpool *pop, struct bucket *b, struct memory_block *m,
	uint32_t units)
{

#ifdef _EAP_ALLOC_OPTIMIZE
	//m->size_idx = units;
	//return;
#endif

	//printf("bucket_insert_block heap_recycle_block");

	if (bucket_is_small(b)) {
		struct memory_block r = {m->chunk_id, m->zone_id,
			m->size_idx - units, m->block_off + units};
		bucket_insert_block(b, r);
	} else {
		heap_resize_chunk(pop, m->chunk_id, m->zone_id, units);
	}

	m->size_idx = units;
}
コード例 #8
0
ファイル: heap.c プロジェクト: sudkannan/nvml
/*
 * heap_populate_buckets -- (internal) creates volatile state of memory blocks
 */
static void
heap_populate_buckets(PMEMobjpool *pop)
{
	struct pmalloc_heap *h = pop->heap;

	if (h->zones_exhausted == h->max_zone)
		return;

	uint32_t zone_id = h->zones_exhausted++;
	struct zone *z = &h->layout->zones[zone_id];

	/* ignore zone and chunk headers */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof (z->header) +
		sizeof (z->chunk_headers));

	if (z->header.magic != ZONE_HEADER_MAGIC)
		heap_zone_init(pop, zone_id);

	struct bucket *def_bucket = h->buckets[DEFAULT_BUCKET];

	for (uint32_t i = 0; i < z->header.size_idx; ) {
		struct chunk_header *hdr = &z->chunk_headers[i];
		heap_chunk_write_footer(hdr, hdr->size_idx);

		if (hdr->type == CHUNK_TYPE_RUN) {
			struct chunk_run *run =
				(struct chunk_run *)&z->chunks[i];
			heap_populate_run_bucket(pop,
				h->bucket_map[run->block_size], i, zone_id);
		} else if (hdr->type == CHUNK_TYPE_FREE) {
			struct memory_block m = {i, zone_id, hdr->size_idx, 0};
			bucket_insert_block(def_bucket, m);
		}

		i += hdr->size_idx;
	}
}
コード例 #9
0
ファイル: pmalloc.c プロジェクト: sudkannan/nvml
/*
 * pmalloc_construct -- allocates a new block of memory with a constructor
 *
 * The block offset is written persistently into the off variable, but only
 * after the constructor function has been called.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
	void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
	void *arg, uint64_t data_off)
{
	size_t sizeh = size + sizeof (struct allocation_header);

	struct bucket *b = heap_get_best_bucket(pop, sizeh);

	int err = 0;
	uint32_t units = bucket_calc_units(b, sizeh);

	struct memory_block m = {0, 0, units, 0};

	if ((err = heap_get_bestfit_block(pop, b, &m)) != 0)
		return err;

	uint64_t op_result = 0;

	void *block_data = heap_get_block_data(pop, m);
	void *datap = block_data + sizeof (struct allocation_header);

	ASSERT((uint64_t)block_data % _POBJ_CL_ALIGNMENT == 0);

	uint64_t real_size = bucket_unit_size(b) * units;

	alloc_write_header(pop, block_data, m.chunk_id, m.zone_id, real_size);

	if (constructor != NULL)
		constructor(pop, datap + data_off, arg);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

	void *hdr = heap_get_block_header(pop, m, HEAP_OP_ALLOC, &op_result);

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto err_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), pop_offset(pop, datap));
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return 0;

err_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_insert_block(b, m) != 0) {
		ERR("Failed to recover heap volatile state");
		ASSERT(0);
	}

	return err;
}
コード例 #10
0
ファイル: pmalloc.c プロジェクト: andreas-bluemle/nvml
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off, uint64_t data_off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_chunk_bucket(pop,
		alloc->chunk_id, alloc->zone_id);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	VALGRIND_DO_MEMPOOL_FREE(pop,
			(char *)alloc + sizeof (*alloc) + data_off);

	bucket_insert_block(pop, b, res);

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
コード例 #11
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
 */
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
	uint32_t units)
{
	ASSERTeq(b->aclass->type, CLASS_RUN);

	if (b->is_active) {
		b->c_ops->rm_all(b->container);
		b->active_memory_block.m_ops
			->claim_revoke(&b->active_memory_block);

		b->is_active = 0;
	}

	struct heap_rt *h = heap->rt;
	struct memory_block m = MEMORY_BLOCK_NONE;

	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);

		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	m.size_idx = b->aclass->run.size_idx;

	/* cannot reuse an existing run, create a new one */
	struct bucket *defb = heap_get_default_bucket(heap);
	util_mutex_lock(&defb->lock);
	if (heap_get_bestfit_block(heap, defb, &m) == 0) {
		ASSERTeq(m.block_off, 0);

		heap_create_run(heap, b, &m);

		b->active_memory_block = m;
		b->is_active = 1;

		util_mutex_unlock(&defb->lock);
		return 0;
	}
	util_mutex_unlock(&defb->lock);

	/*
	 * Try the recycler again, the previous call to the bestfit_block for
	 * huge chunks might have reclaimed some unused runs.
	 */
	if (recycler_get(h->recyclers[b->aclass->id], &m) == 0) {
		pthread_mutex_t *lock = m.m_ops->get_lock(&m);
		util_mutex_lock(lock);
		heap_reuse_run(heap, b, &m);
		util_mutex_unlock(lock);

		/*
		 * To verify that the recycler run is not able to satisfy our
		 * request we attempt to retrieve a block. This is not ideal,
		 * and should be replaced by a different heuristic once proper
		 * memory block scoring is implemented.
		 */
		struct memory_block tmp = MEMORY_BLOCK_NONE;
		tmp.size_idx = units;
		if (b->c_ops->get_rm_bestfit(b->container, &tmp) != 0) {
			b->c_ops->rm_all(b->container);
			m.m_ops->claim_revoke(&m);
			return ENOMEM;
		} else {
			bucket_insert_block(b, &tmp);
		}

		b->active_memory_block = m;
		b->is_active = 1;

		return 0;
	}

	return ENOMEM;
}
コード例 #12
0
ファイル: pmalloc.c プロジェクト: perone/nvml
/*
 * pfree -- deallocates a memory block previously allocated by pmalloc
 *
 * A zero value is written persistently into the off variable.
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
pfree(PMEMobjpool *pop, uint64_t *off)
{
	struct allocation_header *alloc = alloc_get_header(pop, *off);

	int err = 0;

	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		return err;

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

#ifdef DEBUG
	if (!heap_block_is_allocated(pop, m)) {
		ERR("Double free or heap corruption");
		ASSERT(0);
	}
#endif /* DEBUG */

	if ((err = heap_lock_if_run(pop, m)) != 0)
		goto out;

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res) != 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

out:
	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

	return err;
}
コード例 #13
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_chunk_init -- (internal) writes chunk header
 */
static void
heap_chunk_init(struct palloc_heap *heap, struct chunk_header *hdr,
	uint16_t type, uint32_t size_idx)
{
	struct chunk_header nhdr = {
		.type = type,
		.flags = 0,
		.size_idx = size_idx
	};
	VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));

	*hdr = nhdr; /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	heap_chunk_write_footer(hdr, size_idx);
}

/*
 * heap_zone_init -- (internal) writes zone's first chunk and header
 */
static void
heap_zone_init(struct palloc_heap *heap, uint32_t zone_id)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
	uint32_t size_idx = get_zone_size_idx(zone_id, heap->rt->max_zone,
			heap->size);

	heap_chunk_init(heap, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx);

	struct zone_header nhdr = {
		.size_idx = size_idx,
		.magic = ZONE_HEADER_MAGIC,
	};
	z->header = nhdr;  /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header));
}

/*
 * heap_run_init -- (internal) creates a run based on a chunk
 */
static void
heap_run_init(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);

	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];
	ASSERTne(m->size_idx, 0);
	size_t runsize = SIZEOF_RUN(run, m->size_idx);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);

	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, runsize);
	run->block_size = c->unit_size;
	pmemops_persist(&heap->p_ops, &run->block_size,
			sizeof(run->block_size));

	/* set all the bits */
	memset(run->bitmap, 0xFF, sizeof(run->bitmap));

	unsigned nval = c->run.bitmap_nval;
	ASSERT(nval > 0);
	/* clear only the bits available for allocations from this bucket */
	memset(run->bitmap, 0, sizeof(uint64_t) * (nval - 1));
	run->bitmap[nval - 1] = c->run.bitmap_lastval;

	run->incarnation_claim = heap->run_id;
	VALGRIND_SET_CLEAN(&run->incarnation_claim,
		sizeof(run->incarnation_claim));

	VALGRIND_REMOVE_FROM_TX(run, runsize);

	pmemops_persist(&heap->p_ops, run->bitmap, sizeof(run->bitmap));

	struct chunk_header run_data_hdr;
	run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
	run_data_hdr.flags = 0;

	struct chunk_header *data_hdr;
	for (unsigned i = 1; i < m->size_idx; ++i) {
		data_hdr = &z->chunk_headers[m->chunk_id + i];
		VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
		VALGRIND_ADD_TO_TX(data_hdr, sizeof(*data_hdr));
		run_data_hdr.size_idx = i;
		*data_hdr = run_data_hdr;
		VALGRIND_REMOVE_FROM_TX(data_hdr, sizeof(*data_hdr));
	}
	pmemops_persist(&heap->p_ops,
		&z->chunk_headers[m->chunk_id + 1],
		sizeof(struct chunk_header) * (m->size_idx - 1));

	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
	struct chunk_header run_hdr;
	run_hdr.size_idx = hdr->size_idx;
	run_hdr.type = CHUNK_TYPE_RUN;
	run_hdr.flags = header_type_to_flag[c->header_type];
	*hdr = run_hdr;
	VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));

	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
}

/*
 * heap_run_insert -- (internal) inserts and splits a block of memory into a run
 */
static void
heap_run_insert(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m, uint32_t size_idx, uint16_t block_off)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	ASSERT(size_idx <= BITS_PER_VALUE);
	ASSERT(block_off + size_idx <= c->run.bitmap_nallocs);

	uint32_t unit_max = c->run.unit_max;
	struct memory_block nm = *m;
	nm.size_idx = unit_max - (block_off % unit_max);
	nm.block_off = block_off;
	if (nm.size_idx > size_idx)
		nm.size_idx = size_idx;

	do {
		bucket_insert_block(b, &nm);
		ASSERT(nm.size_idx <= UINT16_MAX);
		ASSERT(nm.block_off + nm.size_idx <= UINT16_MAX);
		nm.block_off = (uint16_t)(nm.block_off + (uint16_t)nm.size_idx);
		size_idx -= nm.size_idx;
		nm.size_idx = size_idx > unit_max ? unit_max : size_idx;
	} while (size_idx != 0);
}

/*
 * heap_process_run_metadata -- (internal) parses the run bitmap
 */
static uint32_t
heap_process_run_metadata(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	uint16_t block_off = 0;
	uint16_t block_size_idx = 0;
	uint32_t inserted_blocks = 0;

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];

	for (unsigned i = 0; i < c->run.bitmap_nval; ++i) {
		ASSERT(i < MAX_BITMAP_VALUES);
		uint64_t v = run->bitmap[i];
		ASSERT(BITS_PER_VALUE * i <= UINT16_MAX);
		block_off = (uint16_t)(BITS_PER_VALUE * i);
		if (v == 0) {
			heap_run_insert(heap, b, m, BITS_PER_VALUE, block_off);
			inserted_blocks += BITS_PER_VALUE;
			continue;
		} else if (v == UINT64_MAX) {
			continue;
		}

		for (unsigned j = 0; j < BITS_PER_VALUE; ++j) {
			if (BIT_IS_CLR(v, j)) {
				block_size_idx++;
			} else if (block_size_idx != 0) {
				ASSERT(block_off >= block_size_idx);

				heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
				inserted_blocks += block_size_idx;
				block_size_idx = 0;
			}

			if ((block_off++) == c->run.bitmap_nallocs) {
				i = MAX_BITMAP_VALUES;
				break;
			}
		}

		if (block_size_idx != 0) {
			ASSERT(block_off >= block_size_idx);

			heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
			inserted_blocks += block_size_idx;
			block_size_idx = 0;
		}
	}

	return inserted_blocks;
}

/*
 * heap_create_run -- (internal) initializes a new run on an existing free chunk
 */
static void
heap_create_run(struct palloc_heap *heap, struct bucket *b,
	struct memory_block *m)
{
	heap_run_init(heap, b, m);
	memblock_rebuild_state(heap, m);
	heap_process_run_metadata(heap, b, m);
}
コード例 #14
0
ファイル: heap.c プロジェクト: sudkannan/nvml
/*
 * heap_degrade_run_if_empty -- makes a chunk out of an empty run
 */
int
heap_degrade_run_if_empty(PMEMobjpool *pop, struct bucket *b,
	struct memory_block m)
{
	struct zone *z = &pop->heap->layout->zones[m.zone_id];
	struct chunk_header *hdr = &z->chunk_headers[m.chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_RUN);

	struct chunk_run *run = (struct chunk_run *)&z->chunks[m.chunk_id];

	int err = 0;
	if ((err = pthread_mutex_lock(heap_get_run_lock(pop, m))) != 0)
		return err;

	int i;
	for (i = 0; i < bucket_bitmap_nval(b) - 1; ++i)
		if (run->bitmap[i] != 0)
			goto out;

	if (run->bitmap[i] != bucket_bitmap_lastval(b))
		goto out;

	m.block_off = 0;
	m.size_idx = RUN_UNIT_MAX;
	uint32_t size_idx_sum = 0;
	while (size_idx_sum != bucket_bitmap_nallocs(b)) {
		if (bucket_get_rm_block_exact(b, m) != 0) {
			ERR("persistent and volatile state mismatched");
			ASSERT(0);
		}

		size_idx_sum += m.size_idx;

		m.block_off += RUN_UNIT_MAX;
		if (m.block_off + RUN_UNIT_MAX > bucket_bitmap_nallocs(b))
			m.size_idx = bucket_bitmap_nallocs(b) - m.block_off;
		else
			m.size_idx = RUN_UNIT_MAX;
	}

	struct bucket *defb = pop->heap->buckets[DEFAULT_BUCKET];
	if ((err = bucket_lock(defb)) != 0) {
		ERR("Failed to lock default bucket");
		ASSERT(0);
	}

	m.block_off = 0;
	m.size_idx = 1;
	heap_chunk_init(pop, hdr, CHUNK_TYPE_FREE, m.size_idx);

	uint64_t *mhdr;
	uint64_t op_result;
	struct memory_block fm =
			heap_free_block(pop, defb, m, &mhdr, &op_result);
	VALGRIND_ADD_TO_TX(mhdr, sizeof (*mhdr));
	*mhdr = op_result;
	VALGRIND_REMOVE_FROM_TX(mhdr, sizeof (*mhdr));
	pop->persist(mhdr, sizeof (*mhdr));

	if ((err = bucket_insert_block(defb, fm)) != 0) {
		ERR("Failed to update heap volatile state");
	}

	bucket_unlock(defb);

out:
	if (pthread_mutex_unlock(heap_get_run_lock(pop, m)) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
コード例 #15
0
ファイル: pmalloc.c プロジェクト: sudkannan/nvml
int
pfree(PMEMobjpool *pop, uint64_t *off)
{

	struct allocation_header *alloc = alloc_get_header(pop, *off);

	struct bucket *b = heap_get_best_bucket(pop, alloc->size);

	int err = 0;

	struct memory_block m = get_mblock_from_alloc(pop, b, alloc);

	if ((err = heap_lock_if_run(pop, m)) != 0)
		return err;

#ifdef _EAP_ALLOC_OPTIMIZE
		//fprintf(stderr,"_EAP_ALLOC_OPTIMIZE\n");
		if(is_alloc_free_opt_enable(alloc->size))
		{
			goto error_lane_hold;		
			//goto temphere;
		}else {
			//printf("Relaxing allocs %zu\n", alloc->size);	
		}
#endif

	uint64_t op_result;
	void *hdr;
	struct memory_block res = heap_free_block(pop, b, m, &hdr, &op_result);


	struct lane_section *lane;
	if ((err = lane_hold(pop, &lane, LANE_SECTION_ALLOCATOR)) != 0)
		goto error_lane_hold;

	struct allocator_lane_section *sec =
		(struct allocator_lane_section *)lane->layout;

	redo_log_store(pop, sec->redo, ALLOC_OP_REDO_PTR_OFFSET,
		pop_offset(pop, off), 0);
	redo_log_store_last(pop, sec->redo, ALLOC_OP_REDO_HEADER,
		pop_offset(pop, hdr), op_result);

	redo_log_process(pop, sec->redo, MAX_ALLOC_OP_REDO);

	if (lane_release(pop) != 0) {
		ERR("Failed to release the lane");
		ASSERT(0);
	}

#ifdef _EAP_ALLOC_OPTIMIZE
	goto temphere;
	temphere:
//		if(is_alloc_free_opt_enable(alloc->size))
//			goto error_lane_hold;
#endif


	/*
	 * There's no point in rolling back redo log changes because the
	 * volatile errors don't break the persistent state.
	 */
	if (bucket_insert_block(b, res)
		!= 0) {
		ERR("Failed to update the heap volatile state");
		ASSERT(0);
	}

	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	if (bucket_is_small(b) && heap_degrade_run_if_empty(pop, b, res) != 0) {
		ERR("Failed to degrade run");
		ASSERT(0);
	}

	return 0;

error_lane_hold:
	if (heap_unlock_if_run(pop, m) != 0) {
		ERR("Failed to release run lock");
		ASSERT(0);
	}

	return err;
}
コード例 #16
0
ファイル: palloc.c プロジェクト: mramotowski/nvml
/*
 * palloc_reservation_create -- creates a volatile reservation of a
 *	memory block.
 *
 * The first step in the allocation of a new block is reserving it in
 * the transient heap - which is represented by the bucket abstraction.
 *
 * To provide optimal scaling for multi-threaded applications and reduce
 * fragmentation the appropriate bucket is chosen depending on the
 * current thread context and to which allocation class the requested
 * size falls into.
 *
 * Once the bucket is selected, just enough memory is reserved for the
 * requested size. The underlying block allocation algorithm
 * (best-fit, next-fit, ...) varies depending on the bucket container.
 */
static int
palloc_reservation_create(struct palloc_heap *heap, size_t size,
	palloc_constr constructor, void *arg,
	uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
	struct pobj_action_internal *out)
{
	int err = 0;

	struct memory_block *new_block = &out->m;

	ASSERT(class_id < UINT8_MAX);
	struct alloc_class *c = class_id == 0 ?
		heap_get_best_class(heap, size) :
		alloc_class_by_id(heap_alloc_classes(heap),
			(uint8_t)class_id);

	if (c == NULL) {
		ERR("no allocation class for size %lu bytes", size);
		errno = EINVAL;
		return -1;
	}

	/*
	 * The caller provided size in bytes, but buckets operate in
	 * 'size indexes' which are multiples of the block size in the
	 * bucket.
	 *
	 * For example, to allocate 500 bytes from a bucket that
	 * provides 256 byte blocks two memory 'units' are required.
	 */
	ssize_t size_idx = alloc_class_calc_size_idx(c, size);
	if (size_idx < 0) {
		ERR("allocation class not suitable for size %lu bytes",
			size);
		errno = EINVAL;
		return -1;
	}
	ASSERT(size_idx <= UINT32_MAX);
	new_block->size_idx = (uint32_t)size_idx;

	struct bucket *b = heap_bucket_acquire(heap, c);

	err = heap_get_bestfit_block(heap, b, new_block);
	if (err != 0)
		goto out;

	if (alloc_prep_block(heap, new_block, constructor, arg,
		extra_field, object_flags, &out->offset) != 0) {
		/*
		 * Constructor returned non-zero value which means
		 * the memory block reservation has to be rolled back.
		 */
		if (new_block->type == MEMORY_BLOCK_HUGE) {
			bucket_insert_block(b, new_block);
		}
		err = ECANCELED;
		goto out;
	}

	/*
	 * Each as of yet unfulfilled reservation needs to be tracked in the
	 * runtime state.
	 * The memory block cannot be put back into the global state unless
	 * there are no active reservations.
	 */
	if ((out->resvp = bucket_current_resvp(b)) != NULL)
		util_fetch_and_add64(out->resvp, 1);

	out->lock = new_block->m_ops->get_lock(new_block);
	out->new_state = MEMBLOCK_ALLOCATED;

out:
	heap_bucket_release(heap, b);

	if (err == 0)
		return 0;

	errno = err;
	return -1;
}
コード例 #17
0
ファイル: heap.c プロジェクト: tomaszkapela/nvml
/*
 * heap_reclaim_run -- checks the run for available memory if unclaimed.
 *
 * Returns 1 if reclaimed chunk, 0 otherwise.
 */
static int
heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run,
	struct memory_block *m)
{
	if (m->m_ops->claim(m) != 0)
		return 0; /* this run already has an owner */

	struct alloc_class *c = alloc_class_get_create_by_unit_size(
		heap->rt->alloc_classes, run->block_size);
	if (c == NULL)
		return 0;

	ASSERTeq(c->type, CLASS_RUN);

	pthread_mutex_t *lock = m->m_ops->get_lock(m);
	util_mutex_lock(lock);

	unsigned i;
	unsigned nval = c->run.bitmap_nval;
	for (i = 0; nval > 0 && i < nval - 1; ++i)
		if (run->bitmap[i] != 0)
			break;

	int empty = (i == (nval - 1)) &&
		(run->bitmap[i] == c->run.bitmap_lastval);
	if (empty) {
		struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
		struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
		struct bucket *defb = heap_get_default_bucket(heap);

		/*
		 * The redo log ptr can be NULL if we are sure that there's only
		 * one persistent value modification in the entire operation
		 * context.
		 */
		struct operation_context ctx;
		operation_init(&ctx, heap->base, NULL, NULL);
		ctx.p_ops = &heap->p_ops;

		struct memory_block nb = MEMORY_BLOCK_NONE;
		nb.chunk_id = m->chunk_id;
		nb.zone_id = m->zone_id;
		nb.block_off = 0;
		nb.size_idx = m->size_idx;

		heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx);
		memblock_rebuild_state(heap, &nb);

		nb = heap_coalesce_huge(heap, &nb);
		nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx);

		operation_process(&ctx);

		bucket_insert_block(defb, &nb);

		*m = nb;
	} else {
		recycler_put(heap->rt->recyclers[c->id], m);
	}

	util_mutex_unlock(lock);

	return empty;
}