示例#1
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_init -- initializes the heap
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
heap_init(void *heap_start, uint64_t heap_size, struct pmem_ops *p_ops)
{
	if (heap_size < HEAP_MIN_SIZE)
		return EINVAL;

	VALGRIND_DO_MAKE_MEM_UNDEFINED(heap_start, heap_size);

	struct heap_layout *layout = heap_start;
	heap_write_header(&layout->header, heap_size);
	pmemops_persist(p_ops, &layout->header, sizeof(struct heap_header));

	unsigned zones = heap_max_zone(heap_size);
	for (unsigned i = 0; i < zones; ++i) {
		pmemops_memset_persist(p_ops,
				&ZID_TO_ZONE(layout, i)->header,
				0, sizeof(struct zone_header));
		pmemops_memset_persist(p_ops,
				&ZID_TO_ZONE(layout, i)->chunk_headers,
				0, sizeof(struct chunk_header));

		/* only explicitly allocated chunks should be accessible */
		VALGRIND_DO_MAKE_MEM_NOACCESS(
			&ZID_TO_ZONE(layout, i)->chunk_headers,
			sizeof(struct chunk_header));
	}

	return 0;
}
示例#2
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_check -- verifies if the heap is consistent and can be opened properly
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
heap_check(void *heap_start, uint64_t heap_size)
{
	if (heap_size < HEAP_MIN_SIZE) {
		ERR("heap: invalid heap size");
		return -1;
	}

	struct heap_layout *layout = heap_start;

	if (heap_size != layout->header.size) {
		ERR("heap: heap size missmatch");
		return -1;
	}

	if (heap_verify_header(&layout->header))
		return -1;

	for (unsigned i = 0; i < heap_max_zone(layout->header.size); ++i) {
		if (heap_verify_zone(ZID_TO_ZONE(layout, i)))
			return -1;
	}

	return 0;
}
示例#3
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_chunk_foreach_object -- (internal) iterates through objects in a chunk
 */
static int
heap_chunk_foreach_object(struct palloc_heap *heap, object_callback cb,
	void *arg, struct memory_block *m)
{
	struct zone *zone = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_header *hdr = &zone->chunk_headers[m->chunk_id];
	memblock_rebuild_state(heap, m);

	switch (hdr->type) {
		case CHUNK_TYPE_FREE:
			return 0;
		case CHUNK_TYPE_USED:
			m->size_idx = hdr->size_idx;
			return cb(m, arg);
		case CHUNK_TYPE_RUN:
			return heap_run_foreach_object(heap, cb, arg, m,
				alloc_class_get_create_by_unit_size(
					heap->rt->alloc_classes,
					m->m_ops->block_size(m)));
		default:
			ASSERT(0);
	}

	return 0;
}
示例#4
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_get_adjacent_free_block -- locates adjacent free memory block in heap
 */
static int
heap_get_adjacent_free_block(struct palloc_heap *heap,
	const struct memory_block *in, struct memory_block *out, int prev)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, in->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[in->chunk_id];
	out->zone_id = in->zone_id;

	if (prev) {
		if (in->chunk_id == 0)
			return ENOENT;

		struct chunk_header *prev_hdr =
			&z->chunk_headers[in->chunk_id - 1];
		out->chunk_id = in->chunk_id - prev_hdr->size_idx;

		if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
			return ENOENT;

		out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
	} else { /* next */
		if (in->chunk_id + hdr->size_idx == z->header.size_idx)
			return ENOENT;

		out->chunk_id = in->chunk_id + hdr->size_idx;

		if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
			return ENOENT;

		out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
	}
	memblock_rebuild_state(heap, out);

	return 0;
}
示例#5
0
文件: memblock.c 项目: krzycz/nvml
/*
 * huge_vg_init -- initalizes chunk metadata in memcheck state
 */
static void
huge_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk *chunk = heap_get_chunk(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/*
	 * Mark unused chunk headers as not accessible.
	 */
	VALGRIND_DO_MAKE_MEM_NOACCESS(
		&z->chunk_headers[m->chunk_id + 1],
		(m->size_idx - 1) *
		sizeof(struct chunk_header));

	size_t size = block_get_real_size(m);
	VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);

	if (objects && huge_get_state(m) == MEMBLOCK_ALLOCATED) {
		if (cb(m, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
示例#6
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * run_block_size -- looks for the right chunk and returns the block size
 *	information that is attached to the run block metadata.
 */
static size_t
run_block_size(const struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];

	return run->block_size;
}
示例#7
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * huge_get_real_data -- returns pointer to the beginning data of a huge block
 */
static void *
huge_get_real_data(const struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	void *data = &z->chunks[m->chunk_id].data;

	return (char *)data;
}
示例#8
0
文件: memblock.c 项目: ChandKV/nvml
/*
 * run_block_size -- looks for the right chunk and returns the block size
 *	information that is attached to the run block metadata.
 */
static size_t
run_block_size(struct memory_block *m, struct heap_layout *h)
{
	struct zone *z = ZID_TO_ZONE(h, m->zone_id);
	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];

	return run->block_size;
}
示例#9
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_reclaim_zone_garbage -- (internal) creates volatile state of unused runs
 */
static int
heap_reclaim_zone_garbage(struct palloc_heap *heap, uint32_t zone_id, int init)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

	struct chunk_run *run = NULL;
	int rchunks = 0;

	/*
	 * If this is the first time this zone is processed, recreate all
	 * footers BEFORE any other operation takes place. For example, the
	 * heap_init_free_chunk call expects the footers to be created.
	 */
	if (init) {
		for (uint32_t i = 0; i < z->header.size_idx; ) {
			struct chunk_header *hdr = &z->chunk_headers[i];
			switch (hdr->type) {
				case CHUNK_TYPE_USED:
					heap_chunk_write_footer(hdr,
						hdr->size_idx);
					break;
			}

			i += hdr->size_idx;
		}
	}

	for (uint32_t i = 0; i < z->header.size_idx; ) {
		struct chunk_header *hdr = &z->chunk_headers[i];
		ASSERT(hdr->size_idx != 0);

		struct memory_block m = MEMORY_BLOCK_NONE;
		m.zone_id = zone_id;
		m.chunk_id = i;
		m.size_idx = hdr->size_idx;

		memblock_rebuild_state(heap, &m);

		switch (hdr->type) {
			case CHUNK_TYPE_RUN:
				run = (struct chunk_run *)&z->chunks[i];
				rchunks += heap_reclaim_run(heap, run, &m);
				break;
			case CHUNK_TYPE_FREE:
				if (init)
					heap_init_free_chunk(heap, hdr, &m);
				break;
			case CHUNK_TYPE_USED:
				break;
			default:
				ASSERT(0);
		}

		i = m.chunk_id + m.size_idx; /* hdr might have changed */
	}

	return rchunks == 0 ? ENOMEM : 0;
}
示例#10
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_end -- returns first address after heap
 */
void *
heap_end(struct palloc_heap *h)
{
	ASSERT(h->rt->max_zone > 0);

	struct zone *last_zone = ZID_TO_ZONE(h->layout, h->rt->max_zone - 1);

	return &last_zone->chunks[last_zone->header.size_idx];
}
示例#11
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * run_get_real_data -- returns pointer to the beginning data of a run block
 */
static void *
run_get_real_data(const struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);

	struct chunk_run *run =
		(struct chunk_run *)&z->chunks[m->chunk_id].data;
	ASSERT(run->block_size != 0);

	return (char *)&run->data + (run->block_size * m->block_off);
}
示例#12
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * run_ensure_header_type -- runs must be created with appropriate header type.
 */
static void
run_ensure_header_type(const struct memory_block *m,
	enum header_type t)
{
#ifdef DEBUG
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
	ASSERT((hdr->flags & header_type_to_flag[t]) == header_type_to_flag[t]);
#endif
}
示例#13
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * memblock_header_type -- determines the memory block's header type
 */
static enum header_type
memblock_header_type(const struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];

	if (hdr->flags & CHUNK_FLAG_COMPACT_HEADER)
		return HEADER_COMPACT;

	if (hdr->flags & CHUNK_FLAG_HEADER_NONE)
		return HEADER_NONE;

	return HEADER_LEGACY;
}
示例#14
0
文件: memblock.c 项目: ChandKV/nvml
/*
 * huge_get_state -- returns whether a huge block is allocated or not
 */
static enum memblock_state
huge_get_state(struct memory_block *m, struct palloc_heap *heap)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];

	if (hdr->type == CHUNK_TYPE_USED)
		return MEMBLOCK_ALLOCATED;

	if (hdr->type == CHUNK_TYPE_FREE)
		return MEMBLOCK_FREE;

	return MEMBLOCK_STATE_UNKNOWN;
}
示例#15
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_run_foreach_object -- (internal) iterates through objects in a run
 */
int
heap_run_foreach_object(struct palloc_heap *heap, object_callback cb,
		void *arg, struct memory_block *m, struct alloc_class *c)
{
	if (c == NULL)
		return -1;

	uint16_t i = m->block_off / BITS_PER_VALUE;
	uint16_t block_start = m->block_off % BITS_PER_VALUE;
	uint16_t block_off;

	struct chunk_run *run = (struct chunk_run *)
		&ZID_TO_ZONE(heap->layout, m->zone_id)->chunks[m->chunk_id];

	for (; i < c->run.bitmap_nval; ++i) {
		uint64_t v = run->bitmap[i];
		block_off = (uint16_t)(BITS_PER_VALUE * i);

		for (uint16_t j = block_start; j < BITS_PER_VALUE; ) {
			if (block_off + j >= (uint16_t)c->run.bitmap_nallocs)
				break;

			if (!BIT_IS_CLR(v, j)) {
				m->block_off = (uint16_t)(block_off + j);

				/*
				 * The size index of this memory block cannot be
				 * retrieved at this time because the header
				 * might not be initialized in valgrind yet.
				 */
				m->size_idx = 0;

				if (cb(m, arg)
						!= 0)
					return 1;

				m->size_idx = CALC_SIZE_IDX(c->unit_size,
					m->m_ops->get_real_size(m));
				j = (uint16_t)(j + m->size_idx);
			} else {
				++j;
			}
		}
		block_start = 0;
	}

	return 0;
}
示例#16
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_check_remote -- verifies if the heap of a remote pool is consistent
 *                      and can be opened properly
 *
 * If successful function returns zero. Otherwise an error number is returned.
 */
int
heap_check_remote(void *heap_start, uint64_t heap_size, struct remote_ops *ops)
{
	if (heap_size < HEAP_MIN_SIZE) {
		ERR("heap: invalid heap size");
		return -1;
	}

	struct heap_layout *layout = heap_start;

	struct heap_header header;
	if (ops->read(ops->ctx, ops->base, &header, &layout->header,
						sizeof(struct heap_header))) {
		ERR("heap: obj_read_remote error");
		return -1;
	}

	if (heap_size != header.size) {
		ERR("heap: heap size mismatch");
		return -1;
	}

	if (heap_verify_header(&header))
		return -1;

	struct zone *zone_buff = (struct zone *)Malloc(sizeof(struct zone));
	if (zone_buff == NULL) {
		ERR("heap: zone_buff malloc error");
		return -1;
	}
	for (unsigned i = 0; i < heap_max_zone(header.size); ++i) {
		if (ops->read(ops->ctx, ops->base, zone_buff,
				ZID_TO_ZONE(layout, i), sizeof(struct zone))) {
			ERR("heap: obj_read_remote error");
			goto out;
		}

		if (heap_verify_zone(zone_buff)) {
			goto out;
		}
	}
	Free(zone_buff);
	return 0;

out:
	Free(zone_buff);
	return -1;
}
示例#17
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * huge_ensure_header_type -- checks the header type of a chunk and modifies
 *	it if necessery. This is fail-safe atomic.
 */
static void
huge_ensure_header_type(const struct memory_block *m,
	enum header_type t)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERTeq(hdr->type, CHUNK_TYPE_FREE);

	if ((hdr->flags & header_type_to_flag[t]) == 0) {
		VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
		uint16_t f = ((uint16_t)header_type_to_flag[t]);
		hdr->flags |= f;
		pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
		VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));
	}
}
示例#18
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * memblock_from_offset -- resolves a memory block data from an offset that
 *	originates from the heap
 */
struct memory_block
memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size)
{
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.heap = heap;

	off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0);
	m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE);

	off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone);
	m.chunk_id = (uint32_t)(off / CHUNKSIZE);

	struct chunk_header *hdr = &ZID_TO_ZONE(heap->layout, m.zone_id)
						->chunk_headers[m.chunk_id];

	if (hdr->type == CHUNK_TYPE_RUN_DATA)
		m.chunk_id -= hdr->size_idx;

	off -= CHUNKSIZE * m.chunk_id;

	m.header_type = memblock_header_type(&m);

	off -= header_type_to_size[m.header_type];

	m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE;
#ifdef DEBUG
	enum memory_block_type t = memblock_detect_type(&m, heap->layout);
	ASSERTeq(t, m.type);
#endif
	m.m_ops = &mb_ops[m.type];

	uint64_t unit_size = m.m_ops->block_size(&m);

	if (off != 0) { /* run */
		off -= RUN_METASIZE;
		m.block_off = (uint16_t)(off / unit_size);
		off -= m.block_off * unit_size;
	}

	m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size,
		memblock_header_ops[m.header_type].get_size(&m));

	ASSERTeq(off, 0);

	return m;
}
示例#19
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * huge_prep_operation_hdr -- prepares the new value of a chunk header that will
 *	be set after the operation concludes.
 */
static void
huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
	struct operation_context *ctx)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];

	/*
	 * Depending on the operation that needs to be performed a new chunk
	 * header needs to be prepared with the new chunk state.
	 */
	uint64_t val = chunk_get_chunk_hdr_value(
		op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE,
		hdr->flags,
		m->size_idx);

	operation_add_entry(ctx, hdr, val, OPERATION_SET);

	VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1,
		(hdr->size_idx - 1) * sizeof(struct chunk_header));

	/*
	 * In the case of chunks larger than one unit the footer must be
	 * created immediately AFTER the persistent state is safely updated.
	 */
	if (m->size_idx == 1)
		return;

	struct chunk_header *footer = hdr + m->size_idx - 1;
	VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer));

	val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx);

	/*
	 * It's only safe to write the footer AFTER the persistent part of
	 * the operation have been successfully processed because the footer
	 * pointer might point to a currently valid persistent state
	 * of a different chunk.
	 * The footer entry change is updated as transient because it will
	 * be recreated at heap boot regardless - it's just needed for runtime
	 * operations.
	 */
	operation_add_typed_entry(ctx,
		footer, val, OPERATION_SET, ENTRY_TRANSIENT);
}
示例#20
0
文件: memblock.c 项目: wojtuss/nvml
/*
 * run_prep_operation_hdr -- prepares the new value for a select few bytes of
 *	a run bitmap that will be set after the operation concludes.
 *
 * It's VERY important to keep in mind that the particular value of the
 * bitmap this method is modifying must not be changed after this function
 * is called and before the operation is processed.
 */
static void
run_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
	struct operation_context *ctx)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);

	struct chunk_run *r = (struct chunk_run *)&z->chunks[m->chunk_id];

	ASSERT(m->size_idx <= BITS_PER_VALUE);

	/*
	 * Free blocks are represented by clear bits and used blocks by set
	 * bits - which is the reverse of the commonly used scheme.
	 *
	 * Here a bit mask is prepared that flips the bits that represent the
	 * memory block provided by the caller - because both the size index and
	 * the block offset are tied 1:1 to the bitmap this operation is
	 * relatively simple.
	 */
	uint64_t bmask;
	if (m->size_idx == BITS_PER_VALUE) {
		ASSERTeq(m->block_off % BITS_PER_VALUE, 0);
		bmask = UINT64_MAX;
	} else {
		bmask = ((1ULL << m->size_idx) - 1ULL) <<
				(m->block_off % BITS_PER_VALUE);
	}

	/*
	 * The run bitmap is composed of several 8 byte values, so a proper
	 * element of the bitmap array must be selected.
	 */
	int bpos = m->block_off / BITS_PER_VALUE;

	/* the bit mask is applied immediately by the add entry operations */
	if (op == MEMBLOCK_ALLOCATED) {
		operation_add_entry(ctx, &r->bitmap[bpos],
			bmask, OPERATION_OR);
	} else if (op == MEMBLOCK_FREE) {
		operation_add_entry(ctx, &r->bitmap[bpos],
			~bmask, OPERATION_AND);
	} else {
		ASSERT(0);
	}
}
示例#21
0
static int
pfree(uint64_t *off)
{
	uint64_t offset = *off;
	if (offset == 0)
		return 0;

	PMEMoid oid;
	oid.off = offset;

	struct allocation_header *hdr = &D_RW_OBJ(oid)->alloch;

	struct zone *z = ZID_TO_ZONE(heap, hdr->zone_id);
	struct chunk_header *chdr = &z->chunk_headers[hdr->chunk_id];
	if (chdr->type == CHUNK_TYPE_USED) {
		chdr->type = CHUNK_TYPE_FREE;
		pmempool_convert_persist(poolset, &chdr->type,
			sizeof(chdr->type));
		*off = 0;
		pmempool_convert_persist(poolset, off, sizeof(*off));
		return 0;
	} else if (chdr->type != CHUNK_TYPE_RUN) {
		assert(0);
	}

	struct chunk_run *run =
		(struct chunk_run *)&z->chunks[hdr->chunk_id].data;
	uintptr_t diff = (uintptr_t)hdr - (uintptr_t)&run->data;
	uint64_t block_off = (uint16_t)((size_t)diff / run->block_size);
	uint64_t size_idx = CALC_SIZE_IDX(run->block_size, hdr->size);

	uint64_t bmask = ((1ULL << size_idx) - 1ULL) <<
			(block_off % BITS_PER_VALUE);

	uint64_t bpos = block_off / BITS_PER_VALUE;

	run->bitmap[bpos] &= ~bmask;
	pmempool_convert_persist(poolset, &run->bitmap[bpos],
		sizeof(run->bitmap[bpos]));
	*off = 0;
	pmempool_convert_persist(poolset, off, sizeof(*off));

	return 0;
}
示例#22
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_resize_chunk -- (internal) splits the chunk into two smaller ones
 */
static void
heap_resize_chunk(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t new_size_idx)
{
	uint32_t new_chunk_id = chunk_id + new_size_idx;

	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
	struct chunk_header *old_hdr = &z->chunk_headers[chunk_id];
	struct chunk_header *new_hdr = &z->chunk_headers[new_chunk_id];

	uint32_t rem_size_idx = old_hdr->size_idx - new_size_idx;
	heap_chunk_init(heap, new_hdr, CHUNK_TYPE_FREE, rem_size_idx);
	heap_chunk_init(heap, old_hdr, CHUNK_TYPE_FREE, new_size_idx);

	struct bucket *def_bucket = heap->rt->default_bucket;
	struct memory_block m = {new_chunk_id, zone_id, rem_size_idx, 0,
		0, 0, NULL, NULL};
	memblock_rebuild_state(heap, &m);
	bucket_insert_block(def_bucket, &m);
}
示例#23
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_populate_buckets -- (internal) creates volatile state of memory blocks
 */
static int
heap_populate_buckets(struct palloc_heap *heap)
{
	struct heap_rt *h = heap->rt;

	if (h->zones_exhausted == h->max_zone)
		return ENOMEM;

	uint32_t zone_id = h->zones_exhausted++;
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

	/* ignore zone and chunk headers */
	VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof(z->header) +
		sizeof(z->chunk_headers));

	if (z->header.magic != ZONE_HEADER_MAGIC)
		heap_zone_init(heap, zone_id);

	return heap_reclaim_zone_garbage(heap, zone_id, 1 /* init */);
}
示例#24
0
文件: memblock.c 项目: ChandKV/nvml
/*
 * memblock_autodetect_type -- looks for the corresponding chunk header and
 *	depending on the chunks type returns the right memory block type.
 */
enum memory_block_type
memblock_autodetect_type(struct memory_block *m, struct heap_layout *h)
{
	enum memory_block_type ret;

	switch (ZID_TO_ZONE(h, m->zone_id)->chunk_headers[m->chunk_id].type) {
		case CHUNK_TYPE_RUN:
			ret = MEMORY_BLOCK_RUN;
			break;
		case CHUNK_TYPE_FREE:
		case CHUNK_TYPE_USED:
		case CHUNK_TYPE_FOOTER:
			ret = MEMORY_BLOCK_HUGE;
			break;
		default:
			/* unreachable */
			FATAL("possible zone chunks metadata corruption");
	}
	return ret;
}
示例#25
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_zone_foreach_object -- (internal) iterates through objects in a zone
 */
static int
heap_zone_foreach_object(struct palloc_heap *heap, object_callback cb,
	void *arg, struct memory_block *m)
{
	struct zone *zone = ZID_TO_ZONE(heap->layout, m->zone_id);
	if (zone->header.magic == 0)
		return 0;

	for (; m->chunk_id < zone->header.size_idx; ) {
		if (heap_chunk_foreach_object(heap, cb, arg, m) != 0)
			return 1;

		m->chunk_id += zone->chunk_headers[m->chunk_id].size_idx;

		/* reset the starting position of memblock */
		m->block_off = 0;
		m->size_idx = 0;
	}

	return 0;
}
示例#26
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_vg_open_chunk -- (internal) notifies Valgrind about chunk layout
 */
static void
heap_vg_open_chunk(struct palloc_heap *heap,
	object_callback cb, void *arg, int objects,
	struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	void *chunk = &z->chunks[m->chunk_id];
	memblock_rebuild_state(heap, m);

	if (m->type == MEMORY_BLOCK_RUN) {
		struct chunk_run *run = chunk;

		ASSERTne(m->size_idx, 0);
		VALGRIND_DO_MAKE_MEM_NOACCESS(run,
			SIZEOF_RUN(run, m->size_idx));

		/* set the run metadata as defined */
		VALGRIND_DO_MAKE_MEM_DEFINED(run,
			sizeof(*run) - sizeof(run->data));

		if (objects) {
			int ret = heap_run_foreach_object(heap, cb, arg, m,
				alloc_class_get_create_by_unit_size(
					heap->rt->alloc_classes,
					m->m_ops->block_size(m)));
			ASSERTeq(ret, 0);
		}
	} else {
		size_t size = m->m_ops->get_real_size(m);
		VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);

		if (objects && m->m_ops->get_state(m) == MEMBLOCK_ALLOCATED) {
			int ret = cb(m, arg);
			ASSERTeq(ret, 0);
		}
	}
}
示例#27
0
文件: memblock.c 项目: krzycz/nvml
/*
 * run_vg_init -- initalizes run metadata in memcheck state
 */
static void
run_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/* set the run metadata as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE);

	struct run_bitmap b;
	run_get_bitmap(m, &b);

	/*
	 * Mark run data headers as defined.
	 */
	for (unsigned j = 1; j < m->size_idx; ++j) {
		struct chunk_header *data_hdr =
			&z->chunk_headers[m->chunk_id + j];
		VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
			sizeof(struct chunk_header));
		ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA);
	}

	VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx));

	/* set the run bitmap as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE);

	if (objects) {
		if (run_iterate_used(m, cb, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
示例#28
0
文件: memblock.c 项目: ChandKV/nvml
/*
 * huge_get_state -- returns whether a block from a run is allocated or not
 */
static enum memblock_state
run_get_state(struct memory_block *m, struct palloc_heap *heap)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERTeq(hdr->type, CHUNK_TYPE_RUN);

	struct chunk_run *r = (struct chunk_run *)&z->chunks[m->chunk_id];

	unsigned v = m->block_off / BITS_PER_VALUE;
	uint64_t bitmap = r->bitmap[v];
	unsigned b = m->block_off % BITS_PER_VALUE;

	unsigned b_last = b + m->size_idx;
	ASSERT(b_last <= BITS_PER_VALUE);

	for (unsigned i = b; i < b_last; ++i) {
		if (!BIT_IS_CLR(bitmap, i)) {
			return MEMBLOCK_ALLOCATED;
		}
	}

	return MEMBLOCK_FREE;
}
示例#29
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_reclaim_run -- checks the run for available memory if unclaimed.
 *
 * Returns 1 if reclaimed chunk, 0 otherwise.
 */
static int
heap_reclaim_run(struct palloc_heap *heap, struct chunk_run *run,
	struct memory_block *m)
{
	if (m->m_ops->claim(m) != 0)
		return 0; /* this run already has an owner */

	struct alloc_class *c = alloc_class_get_create_by_unit_size(
		heap->rt->alloc_classes, run->block_size);
	if (c == NULL)
		return 0;

	ASSERTeq(c->type, CLASS_RUN);

	pthread_mutex_t *lock = m->m_ops->get_lock(m);
	util_mutex_lock(lock);

	unsigned i;
	unsigned nval = c->run.bitmap_nval;
	for (i = 0; nval > 0 && i < nval - 1; ++i)
		if (run->bitmap[i] != 0)
			break;

	int empty = (i == (nval - 1)) &&
		(run->bitmap[i] == c->run.bitmap_lastval);
	if (empty) {
		struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
		struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
		struct bucket *defb = heap_get_default_bucket(heap);

		/*
		 * The redo log ptr can be NULL if we are sure that there's only
		 * one persistent value modification in the entire operation
		 * context.
		 */
		struct operation_context ctx;
		operation_init(&ctx, heap->base, NULL, NULL);
		ctx.p_ops = &heap->p_ops;

		struct memory_block nb = MEMORY_BLOCK_NONE;
		nb.chunk_id = m->chunk_id;
		nb.zone_id = m->zone_id;
		nb.block_off = 0;
		nb.size_idx = m->size_idx;

		heap_chunk_init(heap, hdr, CHUNK_TYPE_FREE, nb.size_idx);
		memblock_rebuild_state(heap, &nb);

		nb = heap_coalesce_huge(heap, &nb);
		nb.m_ops->prep_hdr(&nb, MEMBLOCK_FREE, &ctx);

		operation_process(&ctx);

		bucket_insert_block(defb, &nb);

		*m = nb;
	} else {
		recycler_put(heap->rt->recyclers[c->id], m);
	}

	util_mutex_unlock(lock);

	return empty;
}
示例#30
0
文件: heap.c 项目: tomaszkapela/nvml
/*
 * heap_chunk_init -- (internal) writes chunk header
 */
static void
heap_chunk_init(struct palloc_heap *heap, struct chunk_header *hdr,
	uint16_t type, uint32_t size_idx)
{
	struct chunk_header nhdr = {
		.type = type,
		.flags = 0,
		.size_idx = size_idx
	};
	VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));

	*hdr = nhdr; /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	heap_chunk_write_footer(hdr, size_idx);
}

/*
 * heap_zone_init -- (internal) writes zone's first chunk and header
 */
static void
heap_zone_init(struct palloc_heap *heap, uint32_t zone_id)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
	uint32_t size_idx = get_zone_size_idx(zone_id, heap->rt->max_zone,
			heap->size);

	heap_chunk_init(heap, &z->chunk_headers[0], CHUNK_TYPE_FREE, size_idx);

	struct zone_header nhdr = {
		.size_idx = size_idx,
		.magic = ZONE_HEADER_MAGIC,
	};
	z->header = nhdr;  /* write the entire header (8 bytes) at once */
	pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header));
}

/*
 * heap_run_init -- (internal) creates a run based on a chunk
 */
static void
heap_run_init(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);

	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];
	ASSERTne(m->size_idx, 0);
	size_t runsize = SIZEOF_RUN(run, m->size_idx);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);

	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, runsize);
	run->block_size = c->unit_size;
	pmemops_persist(&heap->p_ops, &run->block_size,
			sizeof(run->block_size));

	/* set all the bits */
	memset(run->bitmap, 0xFF, sizeof(run->bitmap));

	unsigned nval = c->run.bitmap_nval;
	ASSERT(nval > 0);
	/* clear only the bits available for allocations from this bucket */
	memset(run->bitmap, 0, sizeof(uint64_t) * (nval - 1));
	run->bitmap[nval - 1] = c->run.bitmap_lastval;

	run->incarnation_claim = heap->run_id;
	VALGRIND_SET_CLEAN(&run->incarnation_claim,
		sizeof(run->incarnation_claim));

	VALGRIND_REMOVE_FROM_TX(run, runsize);

	pmemops_persist(&heap->p_ops, run->bitmap, sizeof(run->bitmap));

	struct chunk_header run_data_hdr;
	run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
	run_data_hdr.flags = 0;

	struct chunk_header *data_hdr;
	for (unsigned i = 1; i < m->size_idx; ++i) {
		data_hdr = &z->chunk_headers[m->chunk_id + i];
		VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
		VALGRIND_ADD_TO_TX(data_hdr, sizeof(*data_hdr));
		run_data_hdr.size_idx = i;
		*data_hdr = run_data_hdr;
		VALGRIND_REMOVE_FROM_TX(data_hdr, sizeof(*data_hdr));
	}
	pmemops_persist(&heap->p_ops,
		&z->chunk_headers[m->chunk_id + 1],
		sizeof(struct chunk_header) * (m->size_idx - 1));

	struct chunk_header *hdr = &z->chunk_headers[m->chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
	struct chunk_header run_hdr;
	run_hdr.size_idx = hdr->size_idx;
	run_hdr.type = CHUNK_TYPE_RUN;
	run_hdr.flags = header_type_to_flag[c->header_type];
	*hdr = run_hdr;
	VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));

	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));
}

/*
 * heap_run_insert -- (internal) inserts and splits a block of memory into a run
 */
static void
heap_run_insert(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m, uint32_t size_idx, uint16_t block_off)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	ASSERT(size_idx <= BITS_PER_VALUE);
	ASSERT(block_off + size_idx <= c->run.bitmap_nallocs);

	uint32_t unit_max = c->run.unit_max;
	struct memory_block nm = *m;
	nm.size_idx = unit_max - (block_off % unit_max);
	nm.block_off = block_off;
	if (nm.size_idx > size_idx)
		nm.size_idx = size_idx;

	do {
		bucket_insert_block(b, &nm);
		ASSERT(nm.size_idx <= UINT16_MAX);
		ASSERT(nm.block_off + nm.size_idx <= UINT16_MAX);
		nm.block_off = (uint16_t)(nm.block_off + (uint16_t)nm.size_idx);
		size_idx -= nm.size_idx;
		nm.size_idx = size_idx > unit_max ? unit_max : size_idx;
	} while (size_idx != 0);
}

/*
 * heap_process_run_metadata -- (internal) parses the run bitmap
 */
static uint32_t
heap_process_run_metadata(struct palloc_heap *heap, struct bucket *b,
	const struct memory_block *m)
{
	struct alloc_class *c = b->aclass;
	ASSERTeq(c->type, CLASS_RUN);

	uint16_t block_off = 0;
	uint16_t block_size_idx = 0;
	uint32_t inserted_blocks = 0;

	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	struct chunk_run *run = (struct chunk_run *)&z->chunks[m->chunk_id];

	for (unsigned i = 0; i < c->run.bitmap_nval; ++i) {
		ASSERT(i < MAX_BITMAP_VALUES);
		uint64_t v = run->bitmap[i];
		ASSERT(BITS_PER_VALUE * i <= UINT16_MAX);
		block_off = (uint16_t)(BITS_PER_VALUE * i);
		if (v == 0) {
			heap_run_insert(heap, b, m, BITS_PER_VALUE, block_off);
			inserted_blocks += BITS_PER_VALUE;
			continue;
		} else if (v == UINT64_MAX) {
			continue;
		}

		for (unsigned j = 0; j < BITS_PER_VALUE; ++j) {
			if (BIT_IS_CLR(v, j)) {
				block_size_idx++;
			} else if (block_size_idx != 0) {
				ASSERT(block_off >= block_size_idx);

				heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
				inserted_blocks += block_size_idx;
				block_size_idx = 0;
			}

			if ((block_off++) == c->run.bitmap_nallocs) {
				i = MAX_BITMAP_VALUES;
				break;
			}
		}

		if (block_size_idx != 0) {
			ASSERT(block_off >= block_size_idx);

			heap_run_insert(heap, b, m,
					block_size_idx,
					(uint16_t)(block_off - block_size_idx));
			inserted_blocks += block_size_idx;
			block_size_idx = 0;
		}
	}

	return inserted_blocks;
}

/*
 * heap_create_run -- (internal) initializes a new run on an existing free chunk
 */
static void
heap_create_run(struct palloc_heap *heap, struct bucket *b,
	struct memory_block *m)
{
	heap_run_init(heap, b, m);
	memblock_rebuild_state(heap, m);
	heap_process_run_metadata(heap, b, m);
}