Esempio n. 1
0
File: pool.c Progetto: mslusarz/nvml
/*
 * pool_set_file_unmap_headers -- unmap headers of each pool set part file
 */
void
pool_set_file_unmap_headers(struct pool_set_file *file)
{
	if (!file->poolset)
		return;
	for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
		struct pool_replica *rep = file->poolset->replica[r];
		for (unsigned p = 0; p < rep->nparts; p++) {
			struct pool_set_part *part = &rep->part[p];
			util_unmap_hdr(part);
		}
	}
}
Esempio n. 2
0
File: set.c Progetto: tgockel/nvml
/*
 * util_replica_close -- (internal) close a memory pool replica
 *
 * This function unmaps all mapped memory regions.
 */
static int
util_replica_close(struct pool_set *set, unsigned repidx)
{
	LOG(3, "set %p repidx %u\n", set, repidx);
	struct pool_replica *rep = set->replica[repidx];

	for (unsigned p = 0; p < rep->nparts; p++)
		util_unmap_hdr(&rep->part[p]);

	util_unmap_part(&rep->part[0]);

	return 0;
}
Esempio n. 3
0
File: set.c Progetto: tgockel/nvml
/*
 * util_pool_open_nocheck -- open a memory pool (set or a single file)
 *
 * This function opens opens a pool set without checking the header values.
 */
int
util_pool_open_nocheck(struct pool_set **setp, const char *path, int rdonly,
		size_t hdrsize)
{
	LOG(3, "setp %p path %s", setp, path);

	int flags = rdonly ? MAP_PRIVATE|MAP_NORESERVE : MAP_SHARED;

	int ret = util_poolset_open(setp, path, 0);
	if (ret < 0) {
		LOG(2, "cannot open pool set");
		return -1;
	}

	struct pool_set *set = *setp;

	ASSERT(set->nreplicas > 0);

	set->rdonly = 0;
	set->poolsize = SIZE_MAX;

	for (unsigned r = 0; r < set->nreplicas; r++) {
		if (util_replica_open(set, r, flags, hdrsize) != 0) {
			LOG(2, "replica open failed");
			goto err;
		}
	}

	/* unmap all headers */
	for (unsigned r = 0; r < set->nreplicas; r++) {
		struct pool_replica *rep = set->replica[r];
		for (unsigned p = 0; p < rep->nparts; p++)
			util_unmap_hdr(&rep->part[p]);
	}

	return 0;

err:
	LOG(4, "error clean up");
	int oerrno = errno;
	for (unsigned r = 0; r < set->nreplicas; r++)
		util_replica_close(set, r);

	util_poolset_close(set, 0);
	errno = oerrno;
	return -1;
}
Esempio n. 4
0
File: set.c Progetto: tgockel/nvml
/*
 * util_pool_open -- open a memory pool (set or a single file)
 *
 * This routine does all the work, but takes a rdonly flag so internal
 * calls can map a read-only pool if required.
 */
int
util_pool_open(struct pool_set **setp, const char *path, int rdonly,
	size_t minsize, size_t hdrsize, const char *sig,
	uint32_t major, uint32_t compat, uint32_t incompat, uint32_t ro_compat)
{
	LOG(3, "setp %p path %s rdonly %d minsize %zu "
		"hdrsize %zu sig %s major %u "
		"compat %#x incompat %#x ro_comapt %#x",
		setp, path, rdonly, minsize, hdrsize,
		sig, major, compat, incompat, ro_compat);

	int flags = rdonly ? MAP_PRIVATE|MAP_NORESERVE : MAP_SHARED;

	int ret = util_poolset_open(setp, path, minsize);
	if (ret < 0) {
		LOG(2, "cannot open pool set");
		return -1;
	}

	struct pool_set *set = *setp;

	ASSERT(set->nreplicas > 0);

	set->rdonly = 0;
	set->poolsize = SIZE_MAX;

	for (unsigned r = 0; r < set->nreplicas; r++) {
		if (util_replica_open(set, r, flags, hdrsize) != 0) {
			LOG(2, "replica open failed");
			goto err;
		}
	}

	/* check headers, check UUID's, check replicas linkage */
	for (unsigned r = 0; r < set->nreplicas; r++) {
		struct pool_replica *rep = set->replica[r];
		for (unsigned p = 0; p < rep->nparts; p++) {
			if (util_header_check(set, r, p,  sig, major,
					compat, incompat, ro_compat) != 0) {
				LOG(2, "header check failed - part #%d", p);
				goto err;
			}

			set->rdonly |= rep->part[p].rdonly;
		}

		if (memcmp(HDR(REP(set, r - 1), 0)->uuid,
					HDR(REP(set, r), 0)->prev_repl_uuid,
					POOL_HDR_UUID_LEN) ||
		    memcmp(HDR(REP(set, r + 1), 0)->uuid,
					HDR(REP(set, r), 0)->next_repl_uuid,
					POOL_HDR_UUID_LEN)) {
			ERR("wrong replica UUID");
			errno = EINVAL;
			goto err;
		}
	}

	/* unmap all headers */
	for (unsigned r = 0; r < set->nreplicas; r++) {
		struct pool_replica *rep = set->replica[r];
		for (unsigned p = 0; p < rep->nparts; p++)
			util_unmap_hdr(&rep->part[p]);
	}

	return 0;

err:
	LOG(4, "error clean up");
	int oerrno = errno;
	for (unsigned r = 0; r < set->nreplicas; r++)
		util_replica_close(set, r);

	util_poolset_close(set, 0);
	errno = oerrno;
	return -1;
}
Esempio n. 5
0
File: set.c Progetto: tgockel/nvml
/*
 * util_replica_open -- (internal) open a memory pool replica
 */
static int
util_replica_open(struct pool_set *set, unsigned repidx, int flags,
	size_t hdrsize)
{
	LOG(3, "set %p repidx %u flags %d hdrsize %zu\n",
		set, repidx, flags, hdrsize);

	struct pool_replica *rep = set->replica[repidx];

	rep->repsize -= (rep->nparts - 1) * hdrsize;

	/* determine a hint address for mmap() */
	void *addr = util_map_hint(rep->repsize); /* XXX - randomize */
	if (addr == NULL) {
		ERR("cannot find a contiguous region of given size");
		return -1;
	}

	/* map the first part and reserve space for remaining parts */
	if (util_map_part(&rep->part[0], addr, rep->repsize, 0, flags) != 0) {
		LOG(2, "pool mapping failed - part #0");
		return -1;
	}

	VALGRIND_REGISTER_PMEM_MAPPING(rep->part[0].addr, rep->part[0].size);
	VALGRIND_REGISTER_PMEM_FILE(rep->part[0].fd,
				rep->part[0].addr, rep->part[0].size, 0);

	/* map all headers - don't care about the address */
	for (unsigned p = 0; p < rep->nparts; p++) {
		if (util_map_hdr(&rep->part[p],
				hdrsize, 0, flags) != 0) {
			LOG(2, "header mapping failed - part #%d", p);
			goto err;
		}
	}

	size_t mapsize = rep->part[0].filesize & ~(Pagesize - 1);
	addr = (char *)rep->part[0].addr + mapsize;

	/*
	 * map the remaining parts of the usable pool space
	 * (4K-aligned)
	 */
	for (unsigned p = 1; p < rep->nparts; p++) {
		/* map data part */
		if (util_map_part(&rep->part[p], addr, 0, hdrsize,
				flags | MAP_FIXED) != 0) {
			LOG(2, "usable space mapping failed - part #%d", p);
			goto err;
		}

		VALGRIND_REGISTER_PMEM_FILE(rep->part[p].fd,
			rep->part[p].addr, rep->part[p].size, hdrsize);

		mapsize += rep->part[p].size;
		addr = (char *)addr + rep->part[p].size;
	}

	rep->is_pmem = pmem_is_pmem(rep->part[0].addr, rep->part[0].size);

	ASSERTeq(mapsize, rep->repsize);

	/* calculate pool size - choose the smallest replica size */
	if (rep->repsize < set->poolsize)
		set->poolsize = rep->repsize;

	LOG(3, "replica addr %p", rep->part[0].addr);

	return 0;
err:
	LOG(4, "error clean up");
	int oerrno = errno;
	for (unsigned p = 0; p < rep->nparts; p++)
		util_unmap_hdr(&rep->part[p]);
	util_unmap_part(&rep->part[0]);
	errno = oerrno;
	return -1;
}
Esempio n. 6
0
/*
 * pmempool_convert_func -- main function for convert command
 */
int
pmempool_convert_func(char *appname, int argc, char *argv[])
{
	if (argc != 2) {
		print_usage(appname);

		return -1;
	}

	int ret = 0;
	const char *f = argv[1];

	struct pmem_pool_params params;
	if (pmem_pool_parse_params(f, &params, 1)) {
		fprintf(stderr, "Cannot determine type of pool.\n");
		return -1;
	}

	if (params.is_part) {
		fprintf(stderr, "Conversion cannot be performed on "
			"a poolset part.\n");
		return -1;
	}

	if (params.type != PMEM_POOL_TYPE_OBJ) {
		fprintf(stderr, "Conversion is currently supported only for "
				"pmemobj pools.\n");
		return -1;
	}

	struct pool_set_file *psf = pool_set_file_open(f, 0, 1);
	if (psf == NULL) {
		perror(f);
		return -1;
	}

	if (psf->poolset->remote) {
		fprintf(stderr, "Conversion of remotely replicated  pools is "
			"currently not supported. Remove the replica first\n");
		pool_set_file_close(psf);
		return -1;
	}

	void *addr = pool_set_file_map(psf, 0);
	if (addr == NULL) {
		perror(f);
		ret = -1;
		goto out;
	}

	struct pool_hdr *phdr = addr;
	uint32_t m = le32toh(phdr->major);
	if (m >= COUNT_OF(version_convert) || !version_convert[m]) {
		fprintf(stderr, "There's no conversion method for the pool.\n"
				"Please make sure the pmempool utility "
				"is up-to-date.\n");
		ret = -1;
		goto out;
	}

	printf("This tool will update the pool to the latest available "
		"layout version.\nThis process is NOT fail-safe.\n"
		"Proceed only if the pool has been backed up or\n"
		"the risks are fully understood and acceptable.\n");
	if (ask_Yn('?', "convert the pool '%s' ?", f) != 'y') {
		ret = 0;
		goto out;
	}

	PMEMobjpool *pop = addr;

	for (unsigned r = 0; r < psf->poolset->nreplicas; ++r) {
		struct pool_replica *rep = psf->poolset->replica[r];
		for (unsigned p = 0; p < rep->nparts; ++p) {
			struct pool_set_part *part = &rep->part[p];
			if (util_map_hdr(part, MAP_SHARED, 0) != 0) {
				fprintf(stderr, "Failed to map headers.\n"
						"Conversion did not start.\n");
				ret = -1;
				goto out;
			}
		}
	}

	uint32_t i;
	for (i = m; i < COUNT_OF(version_convert); ++i) {
		if (version_convert[i](psf, pop) != 0) {
			fprintf(stderr, "Failed to convert the pool\n");
			break;
		} else {
			/* need to update every header of every part */
			uint32_t target_m = i + 1;
			for (unsigned r = 0; r < psf->poolset->nreplicas; ++r) {
				struct pool_replica *rep =
					psf->poolset->replica[r];
				for (unsigned p = 0; p < rep->nparts; ++p) {
					struct pool_set_part *part =
						&rep->part[p];

					struct pool_hdr *hdr = part->hdr;
					hdr->major = htole32(target_m);
					util_checksum(hdr, sizeof(*hdr),
						&hdr->checksum, 1);
					PERSIST_GENERIC_AUTO(hdr,
						sizeof(struct pool_hdr));
				}
			}
		}
	}

	if (i != m) /* at least one step has been performed */
		printf("The pool has been converted to version %d\n.", i);

	PERSIST_GENERIC_AUTO(pop, psf->size);

out:
	for (unsigned r = 0; r < psf->poolset->nreplicas; ++r) {
		struct pool_replica *rep = psf->poolset->replica[r];
		for (unsigned p = 0; p < rep->nparts; ++p) {
			struct pool_set_part *part = &rep->part[p];
			if (part->hdr != NULL)
				util_unmap_hdr(part);
		}
	}
	pool_set_file_close(psf);

	return ret;
}
Esempio n. 7
0
File: set.c Progetto: bgbhpe/nvml
/*
 * util_pool_open_remote -- open a remote pool set file
 *
 * This routine does all the work, but takes a rdonly flag so internal
 * calls can map a read-only pool if required.
 */
int
util_pool_open_remote(struct pool_set **setp, const char *path, int rdonly,
	size_t minsize, char *sig, uint32_t *major,
	uint32_t *compat, uint32_t *incompat, uint32_t *ro_compat,
	unsigned char *poolset_uuid, unsigned char *first_part_uuid,
	unsigned char *prev_repl_uuid, unsigned char *next_repl_uuid,
	unsigned char *arch_flags)
{
	LOG(3, "setp %p path %s rdonly %d minsize %zu "
		"sig %p major %p compat %p incompat %p ro_comapt %p"
		"poolset_uuid %p first_part_uuid %p"
		"prev_repl_uuid %p next_repl_uuid %p arch_flags %p",
		setp, path, rdonly, minsize,
		sig, major, compat, incompat, ro_compat,
		poolset_uuid, first_part_uuid, prev_repl_uuid, next_repl_uuid,
		arch_flags);

	int flags = rdonly ? MAP_PRIVATE|MAP_NORESERVE : MAP_SHARED;

	int ret = util_poolset_open(setp, path, minsize);
	if (ret < 0) {
		LOG(2, "cannot open pool set");
		return -1;
	}

	struct pool_set *set = *setp;

	if (set->nreplicas > 1) {
		LOG(2, "remote pool set cannot have replicas");
		goto err;
	}

	if (util_replica_open(set, 0, flags) != 0) {
		LOG(2, "replica open failed");
		goto err;
	}

	struct pool_replica *rep = set->replica[0];
	struct pool_hdr *hdr = rep->part[0].hdr;

	set->rdonly |= rep->part[0].rdonly;

	/* check headers, check UUID's, check replicas linkage */
	for (unsigned p = 0; p < rep->nparts; p++) {
		if (util_header_check_remote(rep, p) != 0) {
			LOG(2, "header check failed - part #%d", p);
			goto err;
		}
		set->rdonly |= rep->part[p].rdonly;
	}

	memcpy(sig, hdr->signature, POOL_HDR_SIG_LEN);
	*major = hdr->major;
	*compat = hdr->compat_features;
	*incompat = hdr->incompat_features;
	*ro_compat = hdr->ro_compat_features;
	memcpy(poolset_uuid, hdr->poolset_uuid, POOL_HDR_UUID_LEN);
	memcpy(first_part_uuid, hdr->uuid, POOL_HDR_UUID_LEN);
	memcpy(prev_repl_uuid, hdr->prev_repl_uuid, POOL_HDR_UUID_LEN);
	memcpy(next_repl_uuid, hdr->next_repl_uuid, POOL_HDR_UUID_LEN);
	memcpy(arch_flags, &hdr->arch_flags, sizeof(struct arch_flags));

	/* unmap all headers */
	for (unsigned p = 0; p < rep->nparts; p++)
		util_unmap_hdr(&rep->part[p]);

	return 0;

err:
	LOG(4, "error clean up");
	int oerrno = errno;
	util_replica_close(set, 0);
	util_poolset_close(set, 0);
	errno = oerrno;
	return -1;
}
Esempio n. 8
0
File: set.c Progetto: bgbhpe/nvml
/*
 * util_replica_create -- (internal) create a new memory pool replica
 */
static int
util_replica_create(struct pool_set *set, unsigned repidx, int flags,
	const char *sig, uint32_t major, uint32_t compat, uint32_t incompat,
	uint32_t ro_compat, const unsigned char *prev_repl_uuid,
	const unsigned char *next_repl_uuid, const unsigned char *arch_flags)
{
	LOG(3, "set %p repidx %u flags %d sig %.8s major %u "
		"compat %#x incompat %#x ro_comapt %#x"
		"prev_repl_uuid %p next_repl_uuid %p arch_flags %p",
		set, repidx, flags, sig, major,
		compat, incompat, ro_compat,
		prev_repl_uuid, next_repl_uuid, arch_flags);

	struct pool_replica *rep = set->replica[repidx];

	/* determine a hint address for mmap() */
	void *addr = util_map_hint(rep->repsize, 0);
	if (addr == MAP_FAILED) {
		ERR("cannot find a contiguous region of given size");
		return -1;
	}

	/* map the first part and reserve space for remaining parts */
	/* XXX investigate this idea of reserving space on Windows */
	if (util_map_part(&rep->part[0], addr, rep->repsize, 0, flags) != 0) {
		LOG(2, "pool mapping failed - part #0");
		return -1;
	}

	VALGRIND_REGISTER_PMEM_MAPPING(rep->part[0].addr, rep->part[0].size);
	VALGRIND_REGISTER_PMEM_FILE(rep->part[0].fd,
				rep->part[0].addr, rep->part[0].size, 0);

	/* map all headers - don't care about the address */
	for (unsigned p = 0; p < rep->nparts; p++) {
		if (util_map_hdr(&rep->part[p], flags) != 0) {
			LOG(2, "header mapping failed - part #%d", p);
			goto err;
		}
	}

	/* create headers, set UUID's */
	for (unsigned p = 0; p < rep->nparts; p++) {
		if (util_header_create(set, repidx, p, sig, major,
				compat, incompat, ro_compat,
				prev_repl_uuid, next_repl_uuid,
				arch_flags) != 0) {
			LOG(2, "header creation failed - part #%d", p);
			goto err;
		}
	}

	/* unmap all headers */
	for (unsigned p = 0; p < rep->nparts; p++)
		util_unmap_hdr(&rep->part[p]);

	set->zeroed &= rep->part[0].created;

	size_t mapsize = rep->part[0].filesize & ~(Pagesize - 1);
	addr = (char *)rep->part[0].addr + mapsize;

	/*
	 * map the remaining parts of the usable pool space (4K-aligned)
	 */
	for (unsigned p = 1; p < rep->nparts; p++) {
		/* map data part */
		if (util_map_part(&rep->part[p], addr, 0, POOL_HDR_SIZE,
				flags | MAP_FIXED) != 0) {
			LOG(2, "usable space mapping failed - part #%d", p);
			goto err;
		}

		VALGRIND_REGISTER_PMEM_FILE(rep->part[p].fd,
			rep->part[p].addr, rep->part[p].size, POOL_HDR_SIZE);

		mapsize += rep->part[p].size;
		set->zeroed &= rep->part[p].created;
		addr = (char *)addr + rep->part[p].size;
	}

	rep->is_pmem = pmem_is_pmem(rep->part[0].addr, rep->part[0].size);

	ASSERTeq(mapsize, rep->repsize);

	LOG(3, "replica addr %p", rep->part[0].addr);

	return 0;

err:
	LOG(4, "error clean up");
	int oerrno = errno;
	for (unsigned p = 0; p < rep->nparts; p++)
		util_unmap_hdr(&rep->part[p]);
	util_unmap_part(&rep->part[0]);
	errno = oerrno;
	return -1;
}