Ejemplo n.º 1
0
/*
 * huge_vg_init -- initalizes chunk metadata in memcheck state
 */
static void
huge_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk *chunk = heap_get_chunk(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/*
	 * Mark unused chunk headers as not accessible.
	 */
	VALGRIND_DO_MAKE_MEM_NOACCESS(
		&z->chunk_headers[m->chunk_id + 1],
		(m->size_idx - 1) *
		sizeof(struct chunk_header));

	size_t size = block_get_real_size(m);
	VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);

	if (objects && huge_get_state(m) == MEMBLOCK_ALLOCATED) {
		if (cb(m, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
Ejemplo n.º 2
0
Archivo: obj.c Proyecto: jxy859/nvml
/*
 * pmemobj_vg_register_object -- (internal) notify Valgrind about object
 */
static void
pmemobj_vg_register_object(struct pmemobjpool *pop, PMEMoid oid, int is_root)
{
	LOG(4, "pop %p oid.off 0x%016jx is_root %d", pop, oid.off, is_root);
	void *addr = pmemobj_direct(oid);

	size_t sz;
	if (is_root)
		sz = pmemobj_root_size(pop);
	else
		sz = pmemobj_alloc_usable_size(oid);

	size_t headers = sizeof (struct allocation_header) + OBJ_OOB_SIZE;

	VALGRIND_DO_MEMPOOL_ALLOC(pop, addr, sz);
	VALGRIND_DO_MAKE_MEM_DEFINED(pop, addr - headers, sz + headers);

	struct oob_header *oob = OOB_HEADER_FROM_PTR(addr);

	if (!is_root)
		/* no one should touch it */
		VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->size,
				sizeof (oob->size));

	/* no one should touch it */
	VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &oob->data.padding,
			sizeof (oob->data.padding));
}
Ejemplo n.º 3
0
/*
 * memblock_header_compact_reinit --
 *	(internal) reinitializes a compact header after a heap restart
 */
static void
memblock_header_compact_reinit(const struct memory_block *m)
{
	struct allocation_header_compact *hdr = m->m_ops->get_real_data(m);

	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
}
Ejemplo n.º 4
0
/*
 * rpmem_fip_read -- perform read operation
 */
int
rpmem_fip_read(struct rpmem_fip *fip, void *buff, size_t len, size_t off)
{
	RPMEM_ASSERT(!rpmem_fip_lane_busy(&fip->rd_lane.lane));

	int ret;
	size_t rd = 0;
	uint8_t *cbuff = buff;
	while (rd < len) {
		rpmem_fip_lane_begin(&fip->rd_lane.lane, FI_READ);

		size_t rd_len = len - rd < RPMEM_RD_BUFF_SIZE ?
				len - rd : RPMEM_RD_BUFF_SIZE;
		size_t rd_off = off + rd;
		uint64_t raddr = fip->raddr + rd_off;

		ret = rpmem_fip_readmsg(fip->ep, &fip->rd_lane.read,
				fip->rd_buff, rd_len, raddr);
		VALGRIND_DO_MAKE_MEM_DEFINED(fip->rd_buff, rd_len);

		ret = rpmem_fip_lane_wait(&fip->rd_lane.lane, FI_READ);
		if (ret) {
			ERR("error when processing read request");
			errno = ret;
			return -1;
		}

		memcpy(&cbuff[rd], fip->rd_buff, rd_len);

		rd += rd_len;
	}

	return 0;
}
Ejemplo n.º 5
0
/*
 * palloc_vg_register_object -- registers object in Valgrind
 */
void
palloc_vg_register_object(struct palloc_heap *heap, void *addr, size_t size)
{
	size_t headers = sizeof(struct allocation_header) + PALLOC_DATA_OFF;

	VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, addr, size);
	VALGRIND_DO_MAKE_MEM_DEFINED((char *)addr - headers, size + headers);
}
Ejemplo n.º 6
0
/*
 * memblock_header_legacy_reinit --
 *	(internal) reinitializes a legacy header after a heap restart
 */
static void
memblock_header_legacy_reinit(const struct memory_block *m)
{
	struct allocation_header_legacy *hdr = m->m_ops->get_real_data(m);

	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/* unused fields of the legacy headers are used as a red zone */
	VALGRIND_DO_MAKE_MEM_NOACCESS(hdr->unused, sizeof(hdr->unused));
}
Ejemplo n.º 7
0
/*
 * palloc_vg_register_alloc -- (internal) registers allocation header
 * in Valgrind
 */
static int
palloc_vg_register_alloc(const struct memory_block *m, void *arg)
{
	struct palloc_heap *heap = arg;

	m->m_ops->reinit_header(m);

	void *uptr = m->m_ops->get_user_data(m);
	size_t usize = m->m_ops->get_user_size(m);
	VALGRIND_DO_MEMPOOL_ALLOC(heap->layout, uptr, usize);
	VALGRIND_DO_MAKE_MEM_DEFINED(uptr, usize);

	return 0;
}
Ejemplo n.º 8
0
/*
 * run_vg_init -- initalizes run metadata in memcheck state
 */
static void
run_vg_init(const struct memory_block *m, int objects,
	object_callback cb, void *arg)
{
	struct zone *z = ZID_TO_ZONE(m->heap->layout, m->zone_id);
	struct chunk_header *hdr = heap_get_chunk_hdr(m->heap, m);
	struct chunk_run *run = heap_get_chunk_run(m->heap, m);
	VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

	/* set the run metadata as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, RUN_BASE_METADATA_SIZE);

	struct run_bitmap b;
	run_get_bitmap(m, &b);

	/*
	 * Mark run data headers as defined.
	 */
	for (unsigned j = 1; j < m->size_idx; ++j) {
		struct chunk_header *data_hdr =
			&z->chunk_headers[m->chunk_id + j];
		VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
			sizeof(struct chunk_header));
		ASSERTeq(data_hdr->type, CHUNK_TYPE_RUN_DATA);
	}

	VALGRIND_DO_MAKE_MEM_NOACCESS(run, SIZEOF_RUN(run, m->size_idx));

	/* set the run bitmap as defined */
	VALGRIND_DO_MAKE_MEM_DEFINED(run, b.size + RUN_BASE_METADATA_SIZE);

	if (objects) {
		if (run_iterate_used(m, cb, arg) != 0)
			FATAL("failed to initialize valgrind state");
	}
}
Ejemplo n.º 9
0
/*
 * container_ravl_insert_block -- (internal) inserts a new memory block
 *	into the container
 */
static int
container_ravl_insert_block(struct block_container *bc,
	const struct memory_block *m)
{
	struct block_container_ravl *c =
		(struct block_container_ravl *)bc;

	struct memory_block *e = m->m_ops->get_user_data(m);
	VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
	VALGRIND_ADD_TO_TX(e, sizeof(*e));
	*e = *m;
	VALGRIND_SET_CLEAN(e, sizeof(*e));
	VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));

	return ravl_insert(c->tree, e);
}
Ejemplo n.º 10
0
/*
 * heap_vg_open_chunk -- (internal) notifies Valgrind about chunk layout
 */
static void
heap_vg_open_chunk(struct palloc_heap *heap,
	object_callback cb, void *arg, int objects,
	struct memory_block *m)
{
	struct zone *z = ZID_TO_ZONE(heap->layout, m->zone_id);
	void *chunk = &z->chunks[m->chunk_id];
	memblock_rebuild_state(heap, m);

	if (m->type == MEMORY_BLOCK_RUN) {
		struct chunk_run *run = chunk;

		ASSERTne(m->size_idx, 0);
		VALGRIND_DO_MAKE_MEM_NOACCESS(run,
			SIZEOF_RUN(run, m->size_idx));

		/* set the run metadata as defined */
		VALGRIND_DO_MAKE_MEM_DEFINED(run,
			sizeof(*run) - sizeof(run->data));

		if (objects) {
			int ret = heap_run_foreach_object(heap, cb, arg, m,
				alloc_class_get_create_by_unit_size(
					heap->rt->alloc_classes,
					m->m_ops->block_size(m)));
			ASSERTeq(ret, 0);
		}
	} else {
		size_t size = m->m_ops->get_real_size(m);
		VALGRIND_DO_MAKE_MEM_NOACCESS(chunk, size);

		if (objects && m->m_ops->get_state(m) == MEMBLOCK_ALLOCATED) {
			int ret = cb(m, arg);
			ASSERTeq(ret, 0);
		}
	}
}
Ejemplo n.º 11
0
/*
 * rpmem_fip_process_gpspm -- (internal) process completion queue entry for
 * GPSPM
 */
static int
rpmem_fip_process_gpspm(struct rpmem_fip *fip, void *context, uint64_t flags)
{
	if (flags & FI_RECV) {
		/* RECV completion */
		struct rpmem_fip_msg *resp = context;
		struct rpmem_msg_persist_resp *msg_resp =
			rpmem_fip_msg_get_pres(resp);
		VALGRIND_DO_MAKE_MEM_DEFINED(msg_resp, sizeof(*msg_resp));

		if (unlikely(msg_resp->lane >= fip->nlanes)) {
			RPMEM_LOG(ERR, "lane number received (%lu) is greater "
					"than maximum lane number (%u)",
					msg_resp->lane, fip->nlanes - 1);
			return -1;
		}

		struct rpmem_fip_lane *lanep =
			&fip->lanes.gpspm[msg_resp->lane].lane;

		/* post RECV buffer immediately */
		int ret = rpmem_fip_gpspm_post_resp(fip, resp);
		if (unlikely(ret))
			RPMEM_FI_ERR((int)ret, "MSG send");

		rpmem_fip_lane_sigret(lanep, flags, ret);

		return ret;
	}

	struct rpmem_fip_lane *lanep = context;

	/* SEND completion */
	rpmem_fip_lane_signal(lanep, flags);

	return 0;
}
Ejemplo n.º 12
0
/*
 * heap_vg_open -- notifies Valgrind about heap layout
 */
void
heap_vg_open(struct palloc_heap *heap, object_callback cb,
	void *arg, int objects)
{
	ASSERTne(cb, NULL);
	VALGRIND_DO_MAKE_MEM_UNDEFINED(heap->layout, heap->size);

	struct heap_layout *layout = heap->layout;

	VALGRIND_DO_MAKE_MEM_DEFINED(&layout->header, sizeof(layout->header));

	unsigned zones = heap_max_zone(heap->size);

	struct memory_block m = MEMORY_BLOCK_NONE;
	for (unsigned i = 0; i < zones; ++i) {
		struct zone *z = ZID_TO_ZONE(layout, i);
		uint32_t chunks;
		m.zone_id = i;
		m.chunk_id = 0;

		VALGRIND_DO_MAKE_MEM_DEFINED(&z->header, sizeof(z->header));

		if (z->header.magic != ZONE_HEADER_MAGIC)
			continue;

		chunks = z->header.size_idx;

		for (uint32_t c = 0; c < chunks; ) {
			struct chunk_header *hdr = &z->chunk_headers[c];
			m.chunk_id = c;

			VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));

			m.size_idx = hdr->size_idx;
			heap_vg_open_chunk(heap, cb, arg, objects, &m);
			m.block_off = 0;

			ASSERT(hdr->size_idx > 0);

			if (hdr->type == CHUNK_TYPE_RUN) {
				/*
				 * Mark run data headers as defined.
				 */
				for (unsigned j = 1; j < hdr->size_idx; ++j) {
					struct chunk_header *data_hdr =
						&z->chunk_headers[c + j];
					VALGRIND_DO_MAKE_MEM_DEFINED(data_hdr,
						sizeof(struct chunk_header));
					ASSERTeq(data_hdr->type,
						CHUNK_TYPE_RUN_DATA);
				}
			} else {
				/*
				 * Mark unused chunk headers as not accessible.
				 */
				VALGRIND_DO_MAKE_MEM_NOACCESS(
					&z->chunk_headers[c + 1],
					(hdr->size_idx - 1) *
					sizeof(struct chunk_header));
			}

			c += hdr->size_idx;
		}

		/* mark all unused chunk headers after last as not accessible */
		VALGRIND_DO_MAKE_MEM_NOACCESS(&z->chunk_headers[chunks],
			(MAX_CHUNK - chunks) * sizeof(struct chunk_header));
	}
}
Ejemplo n.º 13
0
/*
 * obj_realloc_common -- (internal) common routine for resizing
 *                          existing objects
 */
static int
obj_realloc_common(PMEMobjpool *pop, struct object_store *store,
	PMEMoid *oidp, size_t size, type_num_t type_num, int zero_init)
{

	/* if OID is NULL just allocate memory */
	if (OBJ_OID_IS_NULL(*oidp)) {
		/* if size is 0 - do nothing */
		if (size == 0)
			return 0;

		return obj_alloc_construct(pop, oidp, size, type_num,
				zero_init, NULL, NULL);
	}

	if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
		ERR("requested size too large");
		errno = ENOMEM;
		return -1;
	}

	/* if size is 0 just free */
	if (size == 0) {
		obj_free(pop, oidp);
		return 0;
	}

	struct carg_realloc carg;
	carg.ptr = OBJ_OFF_TO_PTR(pop, oidp->off);
	carg.new_size = size;
	carg.old_size = pmemobj_alloc_usable_size(*oidp);
	carg.user_type = type_num;
	carg.constructor = NULL;
	carg.arg = NULL;
	carg.zero_init = zero_init;

	struct oob_header *pobj = OOB_HEADER_FROM_OID(pop, *oidp);
	type_num_t user_type_old = pobj->data.user_type;

	/* callers should have checked this */
	ASSERT(type_num < PMEMOBJ_NUM_OID_TYPES);
	ASSERT(user_type_old < PMEMOBJ_NUM_OID_TYPES);

	struct list_head *lhead_old = &store->bytype[user_type_old].head;
	if (type_num == user_type_old) {
		int ret = list_realloc(pop, lhead_old, 0, NULL, size,
				constructor_realloc, &carg, 0, 0, oidp);
		if (ret)
			LOG(2, "list_realloc failed");

		/* oidp could be different, so we need to get the ptr again */
		VALGRIND_DO_MAKE_MEM_NOACCESS(pop,
			&OOB_HEADER_FROM_OID(pop, *oidp)->data.padding,
			sizeof (OOB_HEADER_FROM_OID(pop, *oidp)->data.padding));

		return ret;
	} else {
		struct list_head *lhead_new = &store->bytype[type_num].head;

		/*
		 * Header padding doubles as a red zone to check for header
		 * overwrites. Disable it temporarily so we can modify the type
		 * number.
		 */
		VALGRIND_DO_MAKE_MEM_DEFINED(pop,
			&OOB_HEADER_FROM_OID(pop, *oidp)->data.padding,
			sizeof (OOB_HEADER_FROM_OID(pop, *oidp)->data.padding));

		/*
		 * Redo log updates 8 byte entries, so we have to prepare
		 * full 8-byte value even if we want to update smaller field
		 * (here: user_type).
		 */
		struct oob_header_data d = pobj->data;
		d.user_type = type_num;

		uint64_t data_offset = OOB_OFFSET_OF(*oidp, data);

		int ret = list_realloc_move(pop, lhead_old, lhead_new, 0, NULL,
				size, constructor_realloc, &carg, data_offset,
				*((uint64_t *)&d), oidp);
		if (ret)
			LOG(2, "list_realloc_move failed");

		/* oidp could be different, so we need to get the ptr again */
		VALGRIND_DO_MAKE_MEM_NOACCESS(pop,
			&OOB_HEADER_FROM_OID(pop, *oidp)->data.padding,
			sizeof (OOB_HEADER_FROM_OID(pop, *oidp)->data.padding));

		return ret;
	}
}