Beispiel #1
0
/*
 * huge_reinit_chunk -- chunk reinitialization on first zone traversal
 */
static void
huge_reinit_chunk(const struct memory_block *m)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	if (hdr->type == CHUNK_TYPE_USED)
		huge_write_footer(hdr, hdr->size_idx);
}
Beispiel #2
0
/*
 * huge_vg_init -- initalizes chunk metadata in memcheck state
 */
static void
huge_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk *chunk = heap_get_chunk(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/*
	 * Mark unused chunk headers as not accessible.
	 */
	VALGRIND_DO_MAKE_MEM_NOACCESS(
		&z->chunk_headers[m->chunk_id + 1],
		(m->size_idx - 1) *
		sizeof(struct chunk_header));

	size_t size = block_get_real_size(m);
	VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);

	if (objects && huge_get_state(m) == MEMBLOCK_ALLOCATED) {
		if (cb(m, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
Beispiel #3
0
/*
 * huge_iterate_free -- calls cb on memory block if it's used
 */
static int
huge_iterate_used(const struct memory_block *m, object_callback cb, void *arg)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);

	return hdr->type == CHUNK_TYPE_USED ? cb(m, arg) : 0;
}
Beispiel #4
0
/*
 * huge_prep_operation_hdr -- prepares the new value of a chunk header that will
 *	be set after the operation concludes.
 */
static void
huge_prep_operation_hdr(const struct memory_block *m, enum memblock_state op,
	struct operation_context *ctx)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);

	/*
	 * Depending on the operation that needs to be performed a new chunk
	 * header needs to be prepared with the new chunk state.
	 */
	uint64_t val = chunk_get_chunk_hdr_value(
		op == MEMBLOCK_ALLOCATED ? CHUNK_TYPE_USED : CHUNK_TYPE_FREE,
		hdr->flags,
		m->size_idx);

	if (ctx == NULL) {
		util_atomic_store_explicit64((uint64_t *)hdr, val,
			memory_order_relaxed);
		pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
	} else {
		operation_add_entry(ctx, hdr, val, ULOG_OPERATION_SET);
	}

	VALGRIND_DO_MAKE_MEM_NOACCESS(hdr + 1,
		(hdr->size_idx - 1) * sizeof(struct chunk_header));

	/*
	 * In the case of chunks larger than one unit the footer must be
	 * created immediately AFTER the persistent state is safely updated.
	 */
	if (m->size_idx == 1)
		return;

	struct chunk_header *footer = hdr + m->size_idx - 1;
	VALGRIND_DO_MAKE_MEM_UNDEFINED(footer, sizeof(*footer));

	val = chunk_get_chunk_hdr_value(CHUNK_TYPE_FOOTER, 0, m->size_idx);

	/*
	 * It's only safe to write the footer AFTER the persistent part of
	 * the operation have been successfully processed because the footer
	 * pointer might point to a currently valid persistent state
	 * of a different chunk.
	 * The footer entry change is updated as transient because it will
	 * be recreated at heap boot regardless - it's just needed for runtime
	 * operations.
	 */
	if (ctx == NULL) {
		util_atomic_store_explicit64((uint64_t *)footer, val,
			memory_order_relaxed);
		VALGRIND_SET_CLEAN(footer, sizeof(*footer));
	} else {
		operation_add_typed_entry(ctx,
			footer, val, ULOG_OPERATION_SET, LOG_TRANSIENT);
	}
}
Beispiel #5
0
/*
 * run_ensure_header_type -- runs must be created with appropriate header type.
 */
static void
run_ensure_header_type(const struct memory_block *m,
	enum header_type t)
{
#ifdef DEBUG
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
	ASSERT((hdr->flags & header_type_to_flag[t]) == header_type_to_flag[t]);
#endif
}
Beispiel #6
0
/*
 * run_get_real_data -- returns pointer to the beginning data of a run block
 */
static void *
run_get_real_data(const struct memory_block *m)
{
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	ASSERT(run->block_size != 0);

	return run_get_data_start(hdr, run, m->header_type) +
		(run->block_size * m->block_off);
}
Beispiel #7
0
/*
 * run_get_bitmap -- initializes run bitmap information
 */
static void
run_get_bitmap(const struct memory_block *m, struct run_bitmap *b)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);

	uint32_t size_idx = hdr->size_idx;
	memblock_run_bitmap(&size_idx, hdr->flags, run->hdr.block_size,
		run->hdr.alignment, run->content, b);
	ASSERTeq(size_idx, hdr->size_idx);
}
Beispiel #8
0
/*
 * huge_get_state -- returns whether a huge block is allocated or not
 */
static enum memblock_state
huge_get_state(const struct memory_block *m)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);

	if (hdr->type == CHUNK_TYPE_USED)
		return MEMBLOCK_ALLOCATED;

	if (hdr->type == CHUNK_TYPE_FREE)
		return MEMBLOCK_FREE;

	return MEMBLOCK_STATE_UNKNOWN;
}
Beispiel #9
0
/*
 * memblock_header_type -- determines the memory block's header type
 */
static enum header_type
memblock_header_type(const struct memory_block *m)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);

	if (hdr->flags & CHUNK_FLAG_COMPACT_HEADER)
		return HEADER_COMPACT;

	if (hdr->flags & CHUNK_FLAG_HEADER_NONE)
		return HEADER_NONE;

	return HEADER_LEGACY;
}
Beispiel #10
0
/*
 * huge_ensure_header_type -- checks the header type of a chunk and modifies
 *	it if necessary. This is fail-safe atomic.
 */
static void
huge_ensure_header_type(const struct memory_block *m,
	enum header_type t)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	ASSERTeq(hdr->type, CHUNK_TYPE_FREE);

	if ((hdr->flags & header_type_to_flag[t]) == 0) {
		VALGRIND_ADD_TO_TX(hdr, sizeof(*hdr));
		uint16_t f = ((uint16_t)header_type_to_flag[t]);
		hdr->flags |= f;
		pmemops_persist(&m->heap->p_ops, hdr, sizeof(*hdr));
		VALGRIND_REMOVE_FROM_TX(hdr, sizeof(*hdr));
	}
}
Beispiel #11
0
/*
 * memblock_from_offset -- resolves a memory block data from an offset that
 *	originates from the heap
 */
struct memory_block
memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size)
{
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.heap = heap;

	off -= HEAP_PTR_TO_OFF(heap, &heap->layout->zone0);
	m.zone_id = (uint32_t)(off / ZONE_MAX_SIZE);

	off -= (ZONE_MAX_SIZE * m.zone_id) + sizeof(struct zone);
	m.chunk_id = (uint32_t)(off / CHUNKSIZE);

	struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);

	if (hdr->type == CHUNK_TYPE_RUN_DATA)
		m.chunk_id -= hdr->size_idx;

	off -= CHUNKSIZE * m.chunk_id;

	m.header_type = memblock_header_type(&m);

	off -= header_type_to_size[m.header_type];

	m.type = off != 0 ? MEMORY_BLOCK_RUN : MEMORY_BLOCK_HUGE;
	ASSERTeq(memblock_detect_type(heap, &m), m.type);

	m.m_ops = &mb_ops[m.type];

	uint64_t unit_size = m.m_ops->block_size(&m);

	if (off != 0) { /* run */
		struct chunk_run *run = heap_get_chunk_run(heap, &m);

		off -= run_get_alignment_padding(hdr, run, m.header_type);
		off -= RUN_METASIZE;
		m.block_off = (uint16_t)(off / unit_size);
		off -= m.block_off * unit_size;
	}

	m.size_idx = !size ? 0 : CALC_SIZE_IDX(unit_size,
		memblock_header_ops[m.header_type].get_size(&m));

	ASSERTeq(off, 0);

	return m;
}
Beispiel #12
0
/*
 * memblock_detect_type -- looks for the corresponding chunk header and
 *	depending on the chunks type returns the right memory block type
 */
static enum memory_block_type
memblock_detect_type(struct palloc_heap *heap, const struct memory_block *m)
{
	enum memory_block_type ret;

	switch (heap_get_chunk_hdr(heap, m)->type) {
		case CHUNK_TYPE_RUN:
		case CHUNK_TYPE_RUN_DATA:
			ret = MEMORY_BLOCK_RUN;
			break;
		case CHUNK_TYPE_FREE:
		case CHUNK_TYPE_USED:
		case CHUNK_TYPE_FOOTER:
			ret = MEMORY_BLOCK_HUGE;
			break;
		default:
			/* unreachable */
			FATAL("possible zone chunks metadata corruption");
	}
	return ret;
}
Beispiel #13
0
/*
 * run_get_data_start -- (internal) returns the pointer to the beginning of
 *	allocations in a run
 */
static char *
run_get_data_start(const struct memory_block *m)
{
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);

	struct run_bitmap b;
	run_get_bitmap(m, &b);

	if (hdr->flags & CHUNK_FLAG_ALIGNED) {
		/*
		 * Alignment is property of user data in allocations. And
		 * since objects have headers, we need to take them into
		 * account when calculating the address.
		 */
		uintptr_t hsize = header_type_to_size[m->header_type];
		uintptr_t base = (uintptr_t)run->content +
			b.size + hsize;
		return (char *)(ALIGN_UP(base, run->hdr.alignment) - hsize);
	} else {
		return (char *)&run->content + b.size;
	}
}
Beispiel #14
0
/*
 * run_vg_init -- initalizes run metadata in memcheck state
 */
static void
run_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/* set the run metadata as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE);

	struct run_bitmap b;
	run_get_bitmap(m, &b);

	/*
	 * Mark run data headers as defined.
	 */
	for (unsigned j = 1; j < m->size_idx; ++j) {
		struct chunk_header *data_hdr =
			&z->chunk_headers[m->chunk_id + j];
		VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
			sizeof(struct chunk_header));
		ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA);
	}

	VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx));

	/* set the run bitmap as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE);

	if (objects) {
		if (run_iterate_used(m, cb, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
Beispiel #15
0
/*
 * memblock_huge_init -- initializes a new huge memory block
 */
struct memory_block
memblock_huge_init(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx)
{
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.chunk_id = chunk_id;
	m.zone_id = zone_id;
	m.size_idx = size_idx;
	m.heap = heap;

	struct chunk_header nhdr = {
		.type = CHUNK_TYPE_FREE,
		.flags = 0,
		.size_idx = size_idx
	};

	struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));
	VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));

	*hdr = nhdr; /* write the entire header (8 bytes) at once */

	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	huge_write_footer(hdr, size_idx);

	memblock_rebuild_state(heap, &m);

	return m;
}

/*
 * memblock_run_init -- initializes a new run memory block
 */
struct memory_block
memblock_run_init(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags,
	uint64_t unit_size, uint64_t alignment)
{
	ASSERTne(size_idx, 0);

	struct memory_block m = MEMORY_BLOCK_NONE;
	m.chunk_id = chunk_id;
	m.zone_id = zone_id;
	m.size_idx = size_idx;
	m.heap = heap;

	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

	struct chunk_run *run = heap_get_chunk_run(heap, &m);
	size_t runsize = SIZEOF_RUN(run, size_idx);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);

	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, runsize);
	run->hdr.block_size = unit_size;
	run->hdr.alignment = alignment;

	struct run_bitmap b;
	memblock_run_bitmap(&size_idx, flags, unit_size, alignment,
		run->content, &b);

	size_t bitmap_size = b.size;

	/* set all the bits */
	memset(b.values, 0xFF, bitmap_size);

	/* clear only the bits available for allocations from this bucket */
	memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1));

	unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE;
	uint64_t last_value = UINT64_MAX << trailing_bits;
	b.values[b.nvalues - 1] = last_value;

	VALGRIND_REMOVE_FROM_TX(run, runsize);

	pmemops_flush(&heap->p_ops, run,
		sizeof(struct chunk_run_header) +
		bitmap_size);

	struct chunk_header run_data_hdr;
	run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
	run_data_hdr.flags = 0;

	VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id],
		sizeof(struct chunk_header) * size_idx);

	struct chunk_header *data_hdr;
	for (unsigned i = 1; i < size_idx; ++i) {
		data_hdr = &z->chunk_headers[chunk_id + i];
		VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
		VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr));
		run_data_hdr.size_idx = i;
		*data_hdr = run_data_hdr;
	}
	pmemops_persist(&heap->p_ops,
		&z->chunk_headers[chunk_id + 1],
		sizeof(struct chunk_header) * (size_idx - 1));

	struct chunk_header *hdr = &z->chunk_headers[chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));

	struct chunk_header run_hdr;
	run_hdr.size_idx = hdr->size_idx;
	run_hdr.type = CHUNK_TYPE_RUN;
	run_hdr.flags = flags;
	*hdr = run_hdr;
	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id],
		sizeof(struct chunk_header) * size_idx);

	memblock_rebuild_state(heap, &m);

	return m;
}