Пример #1
0
/*
 * pvector_reinit -- reinitializes the pvector runtime data
 */
void
pvector_reinit(struct pvector_context *ctx)
{
	VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx));
	for (size_t n = 1; n < PVECTOR_MAX_ARRAYS; ++n) {
		if (ctx->vec->arrays[n] == 0)
			break;
		size_t arr_size = 1ULL << (n + PVECTOR_INIT_SHIFT);
		uint64_t *arrp = OBJ_OFF_TO_PTR(ctx->pop, ctx->vec->arrays[n]);
		VALGRIND_ANNOTATE_NEW_MEMORY(arrp, sizeof(*arrp) * arr_size);
	}
}
Пример #2
0
/*
 * alloc_prep_block -- (internal) prepares a memory block for allocation
 *
 * Once the block is fully reserved and it's guaranteed that no one else will
 * be able to write to this memory region it is safe to write the allocation
 * header and call the object construction function.
 *
 * Because the memory block at this stage is only reserved in transient state
 * there's no need to worry about fail-safety of this method because in case
 * of a crash the memory will be back in the free blocks collection.
 */
static int
alloc_prep_block(struct palloc_heap *heap, const struct memory_block *m,
	palloc_constr constructor, void *arg,
	uint64_t extra_field, uint16_t object_flags,
	uint64_t *offset_value)
{
	void *uptr = m->m_ops->get_user_data(m);
	size_t usize = m->m_ops->get_user_size(m);

	VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
	VALGRIND_DO_MAKE_MEM_UNDEFINED(uptr, usize);
	VALGRIND_ANNOTATE_NEW_MEMORY(uptr, usize);

	int ret;
	if (constructor != NULL &&
		(ret = constructor(heap->base, uptr, usize, arg)) != 0) {

		/*
		 * If canceled, revert the block back to the free state in vg
		 * machinery.
		 */
		VALGRIND_DO_MEMPOOL_FREE(heap->layout, uptr);

		return ret;
	}

	m->m_ops->write_header(m, extra_field, object_flags);

	/*
	 * To avoid determining the user data pointer twice this method is also
	 * responsible for calculating the offset of the object in the pool that
	 * will be used to set the offset destination pointer provided by the
	 * caller.
	 */
	*offset_value = HEAP_PTR_TO_OFF(heap, uptr);

	return 0;
}
Пример #3
0
/*
 * memblock_huge_init -- initializes a new huge memory block
 */
struct memory_block
memblock_huge_init(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx)
{
	struct memory_block m = MEMORY_BLOCK_NONE;
	m.chunk_id = chunk_id;
	m.zone_id = zone_id;
	m.size_idx = size_idx;
	m.heap = heap;

	struct chunk_header nhdr = {
		.type = CHUNK_TYPE_FREE,
		.flags = 0,
		.size_idx = size_idx
	};

	struct chunk_header *hdr = heap_get_chunk_hdr(heap, &m);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(hdr, sizeof(*hdr));
	VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));

	*hdr = nhdr; /* write the entire header (8 bytes) at once */

	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	huge_write_footer(hdr, size_idx);

	memblock_rebuild_state(heap, &m);

	return m;
}

/*
 * memblock_run_init -- initializes a new run memory block
 */
struct memory_block
memblock_run_init(struct palloc_heap *heap,
	uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx, uint16_t flags,
	uint64_t unit_size, uint64_t alignment)
{
	ASSERTne(size_idx, 0);

	struct memory_block m = MEMORY_BLOCK_NONE;
	m.chunk_id = chunk_id;
	m.zone_id = zone_id;
	m.size_idx = size_idx;
	m.heap = heap;

	struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);

	struct chunk_run *run = heap_get_chunk_run(heap, &m);
	size_t runsize = SIZEOF_RUN(run, size_idx);

	VALGRIND_DO_MAKE_MEM_UNDEFINED(run, runsize);

	/* add/remove chunk_run and chunk_header to valgrind transaction */
	VALGRIND_ADD_TO_TX(run, runsize);
	run->hdr.block_size = unit_size;
	run->hdr.alignment = alignment;

	struct run_bitmap b;
	memblock_run_bitmap(&size_idx, flags, unit_size, alignment,
		run->content, &b);

	size_t bitmap_size = b.size;

	/* set all the bits */
	memset(b.values, 0xFF, bitmap_size);

	/* clear only the bits available for allocations from this bucket */
	memset(b.values, 0, sizeof(*b.values) * (b.nvalues - 1));

	unsigned trailing_bits = b.nbits % RUN_BITS_PER_VALUE;
	uint64_t last_value = UINT64_MAX << trailing_bits;
	b.values[b.nvalues - 1] = last_value;

	VALGRIND_REMOVE_FROM_TX(run, runsize);

	pmemops_flush(&heap->p_ops, run,
		sizeof(struct chunk_run_header) +
		bitmap_size);

	struct chunk_header run_data_hdr;
	run_data_hdr.type = CHUNK_TYPE_RUN_DATA;
	run_data_hdr.flags = 0;

	VALGRIND_ADD_TO_TX(&z->chunk_headers[chunk_id],
		sizeof(struct chunk_header) * size_idx);

	struct chunk_header *data_hdr;
	for (unsigned i = 1; i < size_idx; ++i) {
		data_hdr = &z->chunk_headers[chunk_id + i];
		VALGRIND_DO_MAKE_MEM_UNDEFINED(data_hdr, sizeof(*data_hdr));
		VALGRIND_ANNOTATE_NEW_MEMORY(data_hdr, sizeof(*data_hdr));
		run_data_hdr.size_idx = i;
		*data_hdr = run_data_hdr;
	}
	pmemops_persist(&heap->p_ops,
		&z->chunk_headers[chunk_id + 1],
		sizeof(struct chunk_header) * (size_idx - 1));

	struct chunk_header *hdr = &z->chunk_headers[chunk_id];
	ASSERT(hdr->type == CHUNK_TYPE_FREE);

	VALGRIND_ANNOTATE_NEW_MEMORY(hdr, sizeof(*hdr));

	struct chunk_header run_hdr;
	run_hdr.size_idx = hdr->size_idx;
	run_hdr.type = CHUNK_TYPE_RUN;
	run_hdr.flags = flags;
	*hdr = run_hdr;
	pmemops_persist(&heap->p_ops, hdr, sizeof(*hdr));

	VALGRIND_REMOVE_FROM_TX(&z->chunk_headers[chunk_id],
		sizeof(struct chunk_header) * size_idx);

	memblock_rebuild_state(heap, &m);

	return m;
}