Esempio n. 1
0
File: set.c Progetto: tgockel/nvml
/*
 * util_parse_add_part -- (internal) add a new part file to the replica info
 */
static int
util_parse_add_part(struct pool_set *set, const char *path, size_t filesize)
{
	LOG(3, "set %p path %s filesize %zu", set, path, filesize);

	ASSERTne(set, NULL);

	struct pool_replica *rep = set->replica[set->nreplicas - 1];
	ASSERTne(rep, NULL);

	/* XXX - pre-allocate space for X parts, and reallocate every X parts */
	rep = Realloc(rep, sizeof (struct pool_replica) +
			(rep->nparts + 1) * sizeof (struct pool_set_part));
	if (rep == NULL) {
		ERR("!Realloc");
		return -1;
	}
	set->replica[set->nreplicas - 1] = rep;

	unsigned p = rep->nparts++;

	rep->part[p].path = path;
	rep->part[p].filesize = filesize;
	rep->part[p].fd = -1;
	rep->part[p].addr = NULL;
	rep->part[p].created = 0;

	return 0;
}
Esempio n. 2
0
File: set.c Progetto: tgockel/nvml
/*
 * util_parse_add_replica -- (internal) add a new replica to the pool set info
 */
static int
util_parse_add_replica(struct pool_set **setp)
{
	LOG(3, "setp %p", setp);

	ASSERTne(setp, NULL);

	struct pool_set *set = *setp;
	ASSERTne(set, NULL);

	set = Realloc(set, sizeof (struct pool_set) +
			(set->nreplicas + 1) * sizeof (struct pool_replica *));
	if (set == NULL) {
		ERR("!Realloc");
		return -1;
	}
	*setp = set;

	struct pool_replica *rep;
	rep = Malloc(sizeof (struct pool_replica));
	if (rep == NULL) {
		ERR("!Malloc");
		return -1;
	}

	unsigned r = set->nreplicas++;

	set->replica[r] = rep;

	rep->nparts = 0;
	rep->repsize = 0;

	return 0;
}
Esempio n. 3
0
File: set.c Progetto: bgbhpe/nvml
/*
 * util_parse_add_remote_replica -- (internal) add a new remote replica
 *                                  to the pool set info
 */
static int
util_parse_add_remote_replica(struct pool_set **setp, char *node_addr,
				char *pool_desc)
{
	LOG(3, "setp %p node_addr %s pool_desc %s", setp, node_addr, pool_desc);

	ASSERTne(setp, NULL);
	ASSERTne(node_addr, NULL);
	ASSERTne(pool_desc, NULL);

	int ret = util_parse_add_replica(setp);
	if (ret != 0)
		return ret;

	/* a remote replica has one 'fake' part */
	ret = util_parse_add_part(*setp, NULL, 0);
	if (ret != 0)
		return ret;

	struct pool_set *set = *setp;
	struct pool_replica *rep = set->replica[set->nreplicas - 1];
	ASSERTne(rep, NULL);

	rep->remote = Zalloc(sizeof(struct remote_replica));
	if (rep->remote == NULL) {
		ERR("!Malloc");
		return -1;
	}
	rep->remote->node_addr = node_addr;
	rep->remote->pool_desc = pool_desc;
	set->remote = 1;

	return 0;
}
Esempio n. 4
0
File: obj.c Progetto: jxy859/nvml
/*
 * constructor_zrealloc -- (internal) constructor for pmemobj_zrealloc
 */
static void
constructor_zrealloc(PMEMobjpool *pop, void *ptr, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct carg_realloc *carg = arg;
	struct oob_header *pobj = OOB_HEADER_FROM_PTR(ptr);

	if (ptr != carg->ptr) {
		size_t cpy_size = carg->new_size > carg->old_size ?
			carg->old_size : carg->new_size;

		pop->memcpy_persist(pop, ptr, carg->ptr, cpy_size);

		pobj->data.internal_type = TYPE_ALLOCATED;
		pobj->data.user_type = carg->user_type;
		pop->persist(pop, &pobj->data.internal_type,
		/* there's no padding between these, so we can add sizes */
			sizeof (pobj->data.internal_type) +
			sizeof (pobj->data.user_type));

		VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &pobj->data.padding,
				sizeof (pobj->data.padding));
	}

	if (carg->new_size > carg->old_size) {
		size_t grow_len = carg->new_size - carg->old_size;
		void *new_data_ptr = (void *)((uintptr_t)ptr + carg->old_size);

		pop->memset_persist(pop, new_data_ptr, 0, grow_len);
	}
}
Esempio n. 5
0
/*
 * constructor_zrealloc_root -- (internal) constructor for pmemobj_root
 */
static void
constructor_zrealloc_root(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct carg_realloc *carg = arg;

	VALGRIND_ADD_TO_TX(OOB_HEADER_FROM_PTR(ptr),
		usable_size + OBJ_OOB_SIZE);

	constructor_realloc(pop, ptr, usable_size, arg);

	/* activate the padding redzone */
	VALGRIND_DO_MAKE_MEM_NOACCESS(pop,
		&OOB_HEADER_FROM_PTR(ptr)->data.padding,
		sizeof (OOB_HEADER_FROM_PTR(ptr)->data.padding));

	if (carg->constructor)
		carg->constructor(pop, ptr, carg->arg);

	VALGRIND_REMOVE_FROM_TX(OOB_HEADER_FROM_PTR(ptr),
		carg->new_size + OBJ_OOB_SIZE);
}
Esempio n. 6
0
static void *
do_test(void *arg)
{
	int **bufs = malloc(NBUFS * sizeof (void *));
	ASSERTne(bufs, NULL);

	size_t *sizes = malloc(NBUFS * sizeof (size_t));
	ASSERTne(sizes, NULL);

	for (int j = 0; j < NBUFS; j++) {
		sizes[j] = sizeof (int) + 64 * (rand() % 100);
		bufs[j] = malloc(sizes[j]);
		ASSERTne(bufs[j], NULL);
	}

	for (int j = 0; j < NBUFS; j++) {
		ASSERT(malloc_usable_size(bufs[j]) >= sizes[j]);
		free(bufs[j]);
	}

	free(sizes);
	free(bufs);

	return NULL;
}
Esempio n. 7
0
/*
 * constructor_alloc_bytype -- (internal) constructor for obj_alloc_construct
 */
static void
constructor_alloc_bytype(PMEMobjpool *pop, void *ptr,
	size_t usable_size, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct oob_header *pobj = OOB_HEADER_FROM_PTR(ptr);
	struct carg_bytype *carg = arg;

	pobj->data.internal_type = TYPE_ALLOCATED;
	pobj->data.user_type = carg->user_type;
	pop->flush(pop, &pobj->data.internal_type,
		/* there's no padding between these, so we can add sizes */
		sizeof (pobj->data.internal_type) +
		sizeof (pobj->data.user_type));

	if (carg->zero_init)
		pop->memset_persist(pop, ptr, 0, usable_size);
	else
		pop->drain(pop);

	VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &pobj->data.padding,
			sizeof (pobj->data.padding));

	if (carg->constructor)
		carg->constructor(pop, ptr, carg->arg);
}
Esempio n. 8
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_check");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP(NULL, VMEM_MIN_POOL*2, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	ASSERTeq(1, vmem_check(vmp));

	/* create pool in this same memory region */
	if (dir == NULL) {
		unsigned long Pagesize = (unsigned long) sysconf(_SC_PAGESIZE);
		void *mem_pool2 = (void *)(((uintptr_t)mem_pool +
			VMEM_MIN_POOL/2) & ~(Pagesize-1));

		VMEM *vmp2 = vmem_create_in_region(mem_pool2,
			VMEM_MIN_POOL);

		if (vmp2 == NULL)
			FATAL("!vmem_create_in_region");

		/* detect memory range collision */
		ASSERTne(1, vmem_check(vmp));
		ASSERTne(1, vmem_check(vmp2));

		vmem_delete(vmp2);

		ASSERTne(1, vmem_check(vmp2));
	}

	vmem_delete(vmp);

	/* for vmem_create() memory unmapped after delete pool */
	if (!dir)
		ASSERTne(1, vmem_check(vmp));

	DONE(NULL);
}
Esempio n. 9
0
File: lane.c Progetto: Neuvenen/nvml
/*
 * lane_init -- (internal) initializes a single lane runtime variables
 */
static int
lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout,
		pthread_mutex_t *mtx, pthread_mutexattr_t *attr)
{
	ASSERTne(lane, NULL);
	ASSERTne(mtx, NULL);
	ASSERTne(attr, NULL);

	int err;

	util_mutex_init(mtx, attr);

	lane->lock = mtx;

	int i;
	for (i = 0; i < MAX_LANE_SECTION; ++i) {
		lane->sections[i].runtime = NULL;
		lane->sections[i].layout = &layout->sections[i];
		err = Section_ops[i]->construct(pop, &lane->sections[i]);
		if (err != 0) {
			ERR("!lane_construct_ops %d", i);
			goto error_section_construct;
		}
	}

	return 0;

error_section_construct:
	for (i = i - 1; i >= 0; --i)
		Section_ops[i]->destruct(pop, &lane->sections[i]);

	util_mutex_destroy(lane->lock);
	return err;
}
Esempio n. 10
0
File: obj.c Progetto: jxy859/nvml
/*
 * constructor_alloc_root -- (internal) constructor for obj_alloc_root
 */
static void
constructor_alloc_root(PMEMobjpool *pop, void *ptr, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct oob_header *ro = OOB_HEADER_FROM_PTR(ptr);
	struct carg_root *carg = arg;

	/* temporarily add atomic root allocation to pmemcheck transaction */
	VALGRIND_ADD_TO_TX(ro, OBJ_OOB_SIZE + carg->size);

	if (carg->constructor)
		carg->constructor(pop, ptr, carg->arg);
	else
		pop->memset_persist(pop, ptr, 0, carg->size);

	ro->data.internal_type = TYPE_ALLOCATED;
	ro->data.user_type = POBJ_ROOT_TYPE_NUM;
	ro->size = carg->size;

	VALGRIND_REMOVE_FROM_TX(ro, OBJ_OOB_SIZE + carg->size);

	pop->persist(pop, &ro->size,
		/* there's no padding between these, so we can add sizes */
		sizeof (ro->size) + sizeof (ro->data.internal_type) +
		sizeof (ro->data.user_type));

	VALGRIND_DO_MAKE_MEM_NOACCESS(pop, &ro->data.padding,
			sizeof (ro->data.padding));
}
Esempio n. 11
0
int
main(int argc, char *argv[])
{
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_check");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP_ANON_ALIGNED(VMEM_MIN_POOL * 2, 4 << 20);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	ASSERTeq(1, vmem_check(vmp));

	/* create pool in this same memory region */
	if (dir == NULL) {
		void *mem_pool2 = (void *)(((uintptr_t)mem_pool +
			VMEM_MIN_POOL / 2) & ~(Ut_pagesize - 1));

		VMEM *vmp2 = vmem_create_in_region(mem_pool2,
			VMEM_MIN_POOL);

		if (vmp2 == NULL)
			FATAL("!vmem_create_in_region");

		/* detect memory range collision */
		ASSERTne(1, vmem_check(vmp));
		ASSERTne(1, vmem_check(vmp2));

		vmem_delete(vmp2);

		ASSERTne(1, vmem_check(vmp2));
	}

	vmem_delete(vmp);

	/* for vmem_create() memory unmapped after delete pool */
	if (!dir)
		ASSERTne(1, vmem_check(vmp));

	DONE(NULL);
}
Esempio n. 12
0
int
main(int argc, char *argv[])
{
	const int test_value = 123456;
	char *dir = NULL;
	void *mem_pool = NULL;
	VMEM *vmp;

	START(argc, argv, "vmem_realloc");

	if (argc == 2) {
		dir = argv[1];
	} else if (argc > 2) {
		FATAL("usage: %s [directory]", argv[0]);
	}

	if (dir == NULL) {
		/* allocate memory for function vmem_create_in_region() */
		mem_pool = MMAP(NULL, VMEM_MIN_POOL, PROT_READ|PROT_WRITE,
					MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);

		vmp = vmem_create_in_region(mem_pool, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create_in_region");
	} else {
		vmp = vmem_create(dir, VMEM_MIN_POOL);
		if (vmp == NULL)
			FATAL("!vmem_create");
	}

	int *test = vmem_realloc(vmp, NULL, sizeof (int));
	ASSERTne(test, NULL);

	test[0] = test_value;
	ASSERTeq(test[0], test_value);

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	test = vmem_realloc(vmp, test, sizeof (int) * 10);
	ASSERTne(test, NULL);
	ASSERTeq(test[0], test_value);
	test[1] = test_value;
	test[9] = test_value;

	/* check that pointer came from mem_pool */
	if (dir == NULL) {
		ASSERTrange(test, mem_pool, VMEM_MIN_POOL);
	}

	vmem_free(vmp, test);

	vmem_delete(vmp);

	DONE(NULL);
}
Esempio n. 13
0
/*
 * alloc_class_find_min_frag -- searches for an existing allocation
 * class that will provide the smallest internal fragmentation for the given
 * size.
 */
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
	LOG(10, NULL);

	struct alloc_class *best_c = NULL;
	size_t lowest_waste = SIZE_MAX;

	ASSERTne(n, 0);

	/*
	 * Start from the largest buckets in order to minimize unit size of
	 * allocated memory blocks.
	 */
	for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
		struct alloc_class *c = ac->aclasses[i];

		/* can't use alloc classes /w no headers by default */
		if (c == NULL || c->header_type == HEADER_NONE)
			continue;

		size_t real_size = n + header_type_to_size[c->header_type];

		size_t units = CALC_SIZE_IDX(c->unit_size, real_size);

		/* can't exceed the maximum allowed run unit max */
		if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
			continue;

		if (c->unit_size * units == real_size)
			return c;

		size_t waste = (c->unit_size * units) - real_size;

		/*
		 * If we assume that the allocation class is only ever going to
		 * be used with exactly one size, the effective internal
		 * fragmentation would be increased by the leftover
		 * memory at the end of the run.
		 */
		if (c->type == CLASS_RUN) {
			size_t wasted_units = c->run.nallocs % units;
			size_t wasted_bytes = wasted_units * c->unit_size;
			size_t waste_avg_per_unit = wasted_bytes /
				c->run.nallocs;

			waste += waste_avg_per_unit;
		}

		if (best_c == NULL || lowest_waste > waste) {
			best_c = c;
			lowest_waste = waste;
		}
	}

	ASSERTne(best_c, NULL);
	return best_c;
}
Esempio n. 14
0
int
main(int argc, char *argv[])
{
	void *handle = NULL;
	void *ptr;

	START(argc, argv, "vmmalloc_init");

	if (argc > 2)
		FATAL("usage: %s [d|l]", argv[0]);

	if (argc == 2) {
		switch (argv[1][0]) {
		case 'd':
			OUT("deep binding");
			handle = dlopen("./libtest.so",
				RTLD_NOW | RTLD_LOCAL | RTLD_DEEPBIND);
			break;
		case 'l':
			OUT("lazy binding");
			handle = dlopen("./libtest.so", RTLD_LAZY);
			break;
		default:
			FATAL("usage: %s [d|l]", argv[0]);
		}

		if (handle == NULL)
			OUT("dlopen: %s", dlerror());
		ASSERTne(handle, NULL);

		Falloc = dlsym(handle, "falloc");
		ASSERTne(Falloc, NULL);
	}

	ptr = malloc(4321);
	free(ptr);

	if (argc == 2) {
		/*
		 * NOTE: falloc calls malloc internally.
		 * If libtest is loaded with RTLD_DEEPBIND flag, then it will
		 * use its own lookup scope in preference to global symbols
		 * from already loaded (LD_PRELOAD) libvmmalloc.  So, falloc
		 * will call the stock libc's malloc.
		 * However, since we override the malloc hooks, a call to libc's
		 * malloc will be redirected to libvmmalloc anyway, and the
		 * memory can be safely reclaimed using libvmmalloc's free.
		 */
		ptr = Falloc(4321, 0xaa);
		free(ptr);
	}

	DONE(NULL);
}
Esempio n. 15
0
File: obj.c Progetto: jxy859/nvml
/*
 * constructor_zalloc -- (internal) constructor for pmemobj_zalloc
 */
static void
constructor_zalloc(PMEMobjpool *pop, void *ptr, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct carg_alloc *carg = arg;

	pop->memset_persist(pop, ptr, 0, carg->size);
}
Esempio n. 16
0
File: obj.c Progetto: jxy859/nvml
/*
 * constructor_strdup -- (internal) constructor of pmemobj_strndup
 */
static void
constructor_strdup(PMEMobjpool *pop, void *ptr, void *arg)
{
	LOG(3, "pop %p ptr %p arg %p", pop, ptr, arg);

	ASSERTne(ptr, NULL);
	ASSERTne(arg, NULL);

	struct carg_strdup *carg = arg;

	/* copy string */
	pop->memcpy_persist(pop, ptr, carg->s, carg->size);
}
Esempio n. 17
0
/*
 * alloc_class_find_min_frag -- searches for an existing allocation
 * class that will provide the smallest internal fragmentation for the given
 * size.
 */
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
	LOG(10, NULL);

	struct alloc_class *best_c = NULL;
	size_t best_frag_d = SIZE_MAX;
	size_t best_frag_r = SIZE_MAX;

	ASSERTne(n, 0);

	/*
	 * Start from the largest buckets in order to minimize unit size of
	 * allocated memory blocks.
	 */
	for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
		struct alloc_class *c = ac->aclasses[i];

		/* can't use alloc classes /w no headers by default */
		if (c == NULL || c->header_type == HEADER_NONE)
			continue;

		size_t real_size = n + header_type_to_size[c->header_type];

		size_t units = CALC_SIZE_IDX(c->unit_size, real_size);

		/* can't exceed the maximum allowed run unit max */
		if (units > RUN_UNIT_MAX_ALLOC)
			continue;

		if (c->unit_size * units == real_size)
			return c;

		ASSERT(c->unit_size * units > real_size);

		size_t frag_d = (c->unit_size * units) / real_size;
		size_t frag_r = (c->unit_size * units) % real_size;
		if (best_c == NULL || frag_d < best_frag_d ||
			(frag_d == best_frag_d && frag_r < best_frag_r)) {
			best_c = c;
			best_frag_d = frag_d;
			best_frag_r = frag_r;
		}
	}

	ASSERTne(best_c, NULL);
	return best_c;
}
Esempio n. 18
0
/*
 * pool_test -- test pool
 *
 * This function creates a memory pool in a file (if dir is not NULL),
 * or in RAM (if dir is NULL) and allocates memory for the test.
 */
void
pool_test(const char *dir)
{
	VMEM *vmp = NULL;

	if (dir != NULL) {
		vmp = vmem_pool_create(dir, VMEM_MIN_POOL);
	} else {
		vmp = vmem_pool_create_in_region(mem_pool, VMEM_MIN_POOL);
	}

	if (expect_create_pool == 0) {
		ASSERTeq(vmp, NULL);
		DONE(NULL);
	} else {
		if (vmp == NULL) {
			if (dir == NULL) {
				FATAL("!vmem_pool_create_in_region");
			} else {
				FATAL("!vmem_pool_create");
			}
		}
	}

	char *test = vmem_malloc(vmp, strlen(TEST_STRING_VALUE) + 1);
	ASSERTne(test, NULL);

	strcpy(test, TEST_STRING_VALUE);
	ASSERTeq(strcmp(test, TEST_STRING_VALUE), 0);

	vmem_free(vmp, test);

	vmem_pool_delete(vmp);
}
Esempio n. 19
0
/*
 * alloc_class_assign_by_size -- (internal) chooses the allocation class that
 *	best approximates the provided size
 */
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
	size_t size)
{
	LOG(10, NULL);

	size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
		ac->granularity);

	struct alloc_class *c = alloc_class_find_min_frag(ac,
		class_map_index * ac->granularity);
	ASSERTne(c, NULL);

	/*
	 * We don't lock this array because locking this section here and then
	 * bailing out if someone else was faster would be still slower than
	 * just calculating the class and failing to assign the variable.
	 * We are using a compare and swap so that helgrind/drd don't complain.
	 */
	util_bool_compare_and_swap64(
		&ac->class_map_by_alloc_size[class_map_index],
		MAX_ALLOCATION_CLASSES, c->id);

	return c;
}
Esempio n. 20
0
File: file.c Progetto: ChandKV/nvml
/*
 * util_file_create -- create a new memory pool file
 */
int
util_file_create(const char *path, size_t size, size_t minsize)
{
	LOG(3, "path %s size %zu minsize %zu", path, size, minsize);

	ASSERTne(size, 0);

	if (size < minsize) {
		ERR("size %zu smaller than %zu", size, minsize);
		errno = EINVAL;
		return -1;
	}

	if (((off_t)size) < 0) {
		ERR("invalid size (%zu) for off_t", size);
		errno = EFBIG;
		return -1;
	}

	int fd;
	int mode;
	int flags = O_RDWR | O_CREAT | O_EXCL;
#ifndef _WIN32
	mode = 0;
#else
	mode = S_IWRITE | S_IREAD;
	flags |= O_BINARY;
#endif

	/*
	 * Create file without any permission. It will be granted once
	 * initialization completes.
	 */
	if ((fd = open(path, flags, mode)) < 0) {
		ERR("!open %s", path);
		return -1;
	}

	if ((errno = posix_fallocate(fd, 0, (off_t)size)) != 0) {
		ERR("!posix_fallocate");
		goto err;
	}

	/* for windows we can't flock until after we fallocate */
	if (flock(fd, LOCK_EX | LOCK_NB) < 0) {
		ERR("!flock");
		goto err;
	}

	return fd;

err:
	LOG(4, "error clean up");
	int oerrno = errno;
	if (fd != -1)
		(void) close(fd);
	unlink(path);
	errno = oerrno;
	return -1;
}
Esempio n. 21
0
int
main(int argc, char *argv[])
{
	START(argc, argv, "obj_heap_state");

	if (argc != 2)
		FATAL("usage: %s file-name", argv[0]);

	const char *path = argv[1];

	PMEMobjpool *pop = NULL;

	if ((pop = pmemobj_create(path, LAYOUT_NAME,
			PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
		FATAL("!pmemobj_create: %s", path);

	pmemobj_root(pop, ROOT_SIZE); /* just to trigger allocation */

	pmemobj_close(pop);

	pop = pmemobj_open(path, LAYOUT_NAME);
	ASSERTne(pop, NULL);

	for (int i = 0; i < ALLOCS; ++i) {
		PMEMoid oid;
		pmemobj_alloc(pop, &oid, ALLOC_SIZE, 0, NULL, NULL);
		OUT("%d %lu", i, oid.off);
	}

	pmemobj_close(pop);

	DONE(NULL);
}
Esempio n. 22
0
/*
 * container_seglists_insert_block -- (internal) inserts a new memory block
 *	into the container
 */
static int
container_seglists_insert_block(struct block_container *bc,
	const struct memory_block *m)
{
	ASSERT(m->chunk_id < MAX_CHUNK);
	ASSERT(m->zone_id < UINT16_MAX);
	ASSERTne(m->size_idx, 0);

	struct block_container_seglists *c =
		(struct block_container_seglists *)bc;

	if (c->nonempty_lists == 0)
		c->m = *m;

	ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS);
	ASSERT(m->chunk_id == c->m.chunk_id);
	ASSERT(m->zone_id == c->m.zone_id);

	if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0)
		return -1;

	/* marks the list as nonempty */
	c->nonempty_lists |= 1ULL << (m->size_idx - 1);

	return 0;
}
Esempio n. 23
0
File: pool.c Progetto: mslusarz/nvml
/*
 * pool_hdr_default -- return default pool header values
 */
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
	memset(hdrp, 0, sizeof(*hdrp));
	const char *sig = pool_get_signature(type);
	ASSERTne(sig, NULL);

	memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);

	switch (type) {
	case POOL_TYPE_LOG:
		hdrp->major = LOG_FORMAT_MAJOR;
		hdrp->compat_features = LOG_FORMAT_COMPAT;
		hdrp->incompat_features = LOG_FORMAT_INCOMPAT;
		hdrp->ro_compat_features = LOG_FORMAT_RO_COMPAT;
		break;
	case POOL_TYPE_BLK:
		hdrp->major = BLK_FORMAT_MAJOR;
		hdrp->compat_features = BLK_FORMAT_COMPAT;
		hdrp->incompat_features = BLK_FORMAT_INCOMPAT;
		hdrp->ro_compat_features = BLK_FORMAT_RO_COMPAT;
		break;
	case POOL_TYPE_OBJ:
		hdrp->major = OBJ_FORMAT_MAJOR;
		hdrp->compat_features = OBJ_FORMAT_COMPAT;
		hdrp->incompat_features = OBJ_FORMAT_INCOMPAT;
		hdrp->ro_compat_features = OBJ_FORMAT_RO_COMPAT;
		break;
	default:
		break;
	}
}
Esempio n. 24
0
/*
 * list_mutexes_lock -- (internal) grab one or two locks in ascending
 * address order
 */
static inline int
list_mutexes_lock(PMEMobjpool *pop,
	struct list_head *head1, struct list_head *head2)
{
	ASSERTne(head1, NULL);

	if (!head2 || head1 == head2)
		return pmemobj_mutex_lock(pop, &head1->lock);

	PMEMmutex *lock1;
	PMEMmutex *lock2;
	if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) {
		lock1 = &head1->lock;
		lock2 = &head2->lock;
	} else {
		lock1 = &head2->lock;
		lock2 = &head1->lock;
	}

	int ret;
	if ((ret = pmemobj_mutex_lock(pop, lock1)))
		goto err;
	if ((ret = pmemobj_mutex_lock(pop, lock2)))
		goto err_unlock;

	return 0;

err_unlock:
	pmemobj_mutex_unlock(pop, lock1);
err:
	return ret;
}
Esempio n. 25
0
File: set.c Progetto: tgockel/nvml
/*
 * util_map_part -- (internal) map a header of a pool set
 */
static int
util_map_hdr(struct pool_set_part *part, size_t size,
		off_t offset, int flags)
{
	LOG(3, "part %p size %zu offset %ju flags %d",
		part, size, offset, flags);

	ASSERTne(size, 0);
	ASSERTeq(size % Pagesize, 0);
	ASSERTeq(offset % Pagesize, 0);

	part->hdrsize = size;
	part->hdr = mmap(NULL, part->hdrsize,
		PROT_READ|PROT_WRITE, flags, part->fd, offset);

	if (part->hdr == MAP_FAILED) {
		ERR("!mmap: %s", part->path);
		return -1;
	}

	VALGRIND_REGISTER_PMEM_MAPPING(part->hdr, part->hdrsize);
	VALGRIND_REGISTER_PMEM_FILE(part->fd, part->hdr, part->hdrsize, offset);

	return 0;
}
Esempio n. 26
0
File: pool.c Progetto: krzycz/nvml
/*
 * pool_hdr_default -- return default pool header values
 */
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
	memset(hdrp, 0, sizeof(*hdrp));
	const char *sig = pool_get_signature(type);
	ASSERTne(sig, NULL);

	memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);

	switch (type) {
	case POOL_TYPE_LOG:
		hdrp->major = LOG_FORMAT_MAJOR;
		hdrp->features = log_format_feat_default;
		break;
	case POOL_TYPE_BLK:
		hdrp->major = BLK_FORMAT_MAJOR;
		hdrp->features = blk_format_feat_default;
		break;
	case POOL_TYPE_OBJ:
		hdrp->major = OBJ_FORMAT_MAJOR;
		hdrp->features = obj_format_feat_default;
		break;
	default:
		break;
	}
}
Esempio n. 27
0
/*
 * pool_hdr_nondefault_fix -- (internal) fix custom value fields
 */
static int
pool_hdr_nondefault_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
	void *context)
{
	LOG(3, NULL);

	ASSERTne(loc, NULL);
	uint64_t *flags = NULL;

	switch (question) {
	case Q_CRTIME:
		CHECK_INFO(ppc, "%ssetting pool_hdr.crtime to file's modtime: "
			"%s", loc->prefix,
			check_get_time_str(ppc->pool->set_file->mtime));
		util_convert2h_hdr_nocheck(&loc->hdr);
		loc->hdr.crtime = (uint64_t)ppc->pool->set_file->mtime;
		util_convert2le_hdr(&loc->hdr);
		break;
	case Q_ARCH_FLAGS:
		flags = (uint64_t *)&loc->valid_part_hdrp->arch_flags;
		CHECK_INFO(ppc, "%ssetting pool_hdr.arch_flags to 0x%08" PRIx64
				"%08" PRIx64, loc->prefix, flags[0], flags[1]);
		util_convert2h_hdr_nocheck(&loc->hdr);
		memcpy(&loc->hdr.arch_flags, &loc->valid_part_hdrp->arch_flags,
			sizeof(struct arch_flags));
		util_convert2le_hdr(&loc->hdr);
		break;
	default:
		ERR("not implemented question id: %u", question);
	}

	return 0;
}
Esempio n. 28
0
/*
 * pool_hdr_uuid_fix -- (internal) fix UUID value
 */
static int
pool_hdr_uuid_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
	void *context)
{
	LOG(3, NULL);

	ASSERTne(loc, NULL);

	switch (question) {
	case Q_UUID_SET:
		CHECK_INFO(ppc, "%ssetting pool_hdr.uuid to %s", loc->prefix,
			check_get_uuid_str(*loc->valid_uuid));
		memcpy(loc->hdr.uuid, loc->valid_uuid, POOL_HDR_UUID_LEN);
		break;
	case Q_UUID_REGENERATE:
		if (util_uuid_generate(loc->hdr.uuid) != 0) {
			ppc->result = CHECK_RESULT_INTERNAL_ERROR;
			return CHECK_ERR(ppc, "%suuid generation failed",
				loc->prefix);
		}
		CHECK_INFO(ppc, "%ssetting pool_hdr.uuid to %s", loc->prefix,
			check_get_uuid_str(loc->hdr.uuid));
		break;
	default:
		ERR("not implemented question id: %u", question);
	}

	return 0;
}
Esempio n. 29
0
/*
 * bucket_tree_insert_block -- (internal) inserts a new memory block
 *	into the container
 */
static int
bucket_tree_insert_block(struct block_container *bc, struct palloc_heap *heap,
	struct memory_block m)
{
	/*
	 * Even though the memory block representation of an object uses
	 * relatively large types in practise the entire memory block structure
	 * needs to fit in a single 64 bit value - the type of the key in the
	 * container tree.
	 * Given those limitations a reasonable idea might be to make the
	 * memory_block structure be the size of single uint64_t, which would
	 * work for now, but if someday someone decides there's a need for
	 * larger objects the current implementation would allow them to simply
	 * replace this container instead of making little changes all over
	 * the heap code.
	 */
	ASSERT(m.chunk_id < MAX_CHUNK);
	ASSERT(m.zone_id < UINT16_MAX);
	ASSERTne(m.size_idx, 0);

	struct block_container_ctree *c = (struct block_container_ctree *)bc;

#ifdef USE_VG_MEMCHECK
	bucket_vg_mark_noaccess(heap, bc, m);
#endif

	uint64_t key = CHUNK_KEY_PACK(m.zone_id, m.chunk_id, m.block_off,
				m.size_idx);

	return ctree_insert(c->tree, key, 0);
}
int
main(int argc, char *argv[])
{
	VMEM *vmp;
	size_t i;

	START(argc, argv, "vmem_pool_create_in_region");

	if (argc > 1)
		FATAL("usage: %s", argv[0]);

	vmp = vmem_pool_create_in_region(mem_pool, VMEM_MIN_POOL);

	if (vmp == NULL)
		FATAL("!vmem_pool_create_in_region");

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		allocs[i] = vmem_malloc(vmp, sizeof (int));

		ASSERTne(allocs[i], NULL);

		/* check that pointer came from mem_pool */
		ASSERTrange(allocs[i], mem_pool, VMEM_MIN_POOL);
	}

	for (i = 0; i < TEST_ALLOCATIONS; ++i) {
		vmem_free(vmp, allocs[i]);
	}

	vmem_pool_delete(vmp);

	DONE(NULL);
}