Example #1
0
nvlist_t *
fnvlist_unpack(char *buf, size_t buflen)
{
	nvlist_t *rv;
	VERIFY0(nvlist_unpack(buf, buflen, &rv, KM_SLEEP));
	return (rv);
}
Example #2
0
File: zfs_sa.c Project: Alyseo/zfs
int
zfs_sa_get_xattr(znode_t *zp)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	char *obj;
	int size;
	int error;

	ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
	ASSERT(!zp->z_xattr_cached);
	ASSERT(zp->z_is_sa);

	error = sa_size(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), &size);
	if (error) {
		if (error == ENOENT)
			return nvlist_alloc(&zp->z_xattr_cached,
			    NV_UNIQUE_NAME, KM_SLEEP);
		else
			return (error);
	}

	obj = zio_buf_alloc(size);

	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), obj, size);
	if (error == 0)
		error = nvlist_unpack(obj, size, &zp->z_xattr_cached, KM_SLEEP);

	zio_buf_free(obj, size);

	return (error);
}
Example #3
0
int
fstyp_zfs(FILE *fp, char *label, size_t labelsize)
{
	vdev_label_t *vdev_label = NULL;
	vdev_phys_t *vdev_phys;
	char *zpool_name = NULL;
	nvlist_t *config = NULL;
	int err = 0;

	/*
	 * Read in the first ZFS vdev label ("L0"), located at the beginning
	 * of the vdev and extract the pool name from it.
	 *
	 * TODO: the checksum of label should be validated.
	 */
	vdev_label = (vdev_label_t *)read_buf(fp, 0, sizeof(*vdev_label));
	if (vdev_label == NULL)
		return (1);

	vdev_phys = &(vdev_label->vl_vdev_phys);

	if ((nvlist_unpack(vdev_phys->vp_nvlist, sizeof(vdev_phys->vp_nvlist),
	    &config, 0)) == 0 &&
	    (nvlist_lookup_string(config, "name", &zpool_name) == 0)) {
		strlcpy(label, zpool_name, labelsize);
	} else
		err = 1;

	nvlist_free(config);
	free(vdev_label);

	return (err);
}
Example #4
0
int __osd_xattr_load(struct osd_device *osd, uint64_t dnode, nvlist_t **sa)
{
	sa_handle_t *sa_hdl;
	char	    *buf;
	int	     rc, size;

	if (unlikely(dnode == ZFS_NO_OBJECT))
		return -ENOENT;

	rc = -sa_handle_get(osd->od_os, dnode, NULL, SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		return rc;

	rc = -sa_size(sa_hdl, SA_ZPL_DXATTR(osd), &size);
	if (rc) {
		if (rc == -ENOENT)
			rc = -nvlist_alloc(sa, NV_UNIQUE_NAME, KM_SLEEP);
		goto out_sa;
	}

	buf = osd_zio_buf_alloc(size);
	if (buf == NULL) {
		rc = -ENOMEM;
		goto out_sa;
	}
	rc = -sa_lookup(sa_hdl, SA_ZPL_DXATTR(osd), buf, size);
	if (rc == 0)
		rc = -nvlist_unpack(buf, size, sa, KM_SLEEP);
	osd_zio_buf_free(buf, size);
out_sa:
	sa_handle_destroy(sa_hdl);

	return rc;
}
Example #5
0
/*ARGSUSED*/
static void
event_handler(void *cookie, char *argp, size_t asize,
    door_desc_t *dp, uint_t n_desc)
{
	door_cred_t		cred;
	nvlist_t		*nvlp;
	char			*dtype;

	if (piclevent_debug)
		syslog(LOG_INFO,
		    "piclevent: got SLM event cookie:%p evarg:%p size:0x%x\n",
		    cookie, argp, asize);
	if ((door_id < 0) || (argp == NULL) || (door_cred(&cred) < 0) ||
	    (cred.dc_euid != 0))
		(void) door_return(argp, 0, NULL, 0);

	if (nvlist_unpack(argp, asize, &nvlp, NULL))
		(void) door_return(argp, 0, NULL, 0);

	if (nvlist_lookup_string(nvlp, PICLEVENTARG_DATA_TYPE, &dtype)) {
		nvlist_free(nvlp);
		(void) door_return(argp, 0, NULL, 0);
	}

	if (strcmp(dtype, PICLEVENTARG_PICLEVENT_DATA) == 0)
		parse_piclevent(nvlp);
	/*
	 * ignore other event data types
	 */
	nvlist_free(nvlp);
	(void) door_return(argp, 0, NULL, 0);
}
Example #6
0
/* On error, returns NULL but does not set python exception. */
static PyObject *
ioctl_with_dstnv(int ioc, zfs_cmd_t *zc)
{
	int nvsz = 2048;
	void *nvbuf;
	PyObject *pynv = NULL;

again:
	nvbuf = malloc(nvsz);
	zc->zc_nvlist_dst_size = nvsz;
	zc->zc_nvlist_dst = (uintptr_t)nvbuf;

	if (ioctl(zfsdevfd, ioc, zc) == 0) {
		nvlist_t *nvl;

		errno = nvlist_unpack(nvbuf, zc->zc_nvlist_dst_size, &nvl, 0);
		if (errno == 0) {
			pynv = nvl2py(nvl);
			nvlist_free(nvl);
		}
	} else if (errno == ENOMEM) {
		free(nvbuf);
		nvsz = zc->zc_nvlist_dst_size;
		goto again;
	}
	free(nvbuf);
	return (pynv);
}
/*
 * Process the buffer of nvlists, unpacking and storing each nvlist record
 * into 'records'.  'leftover' is set to the number of bytes that weren't
 * processed as there wasn't a complete record.
 */
static int
zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
    nvlist_t ***records, uint_t *numrecords)
{
	uint64_t reclen;
	nvlist_t *nv;
	int i;

	while (bytes_read > sizeof (reclen)) {

		/* get length of packed record (stored as little endian) */
		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);

		if (bytes_read < sizeof (reclen) + reclen)
			break;

		/* unpack record */
		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
			return (ENOMEM);
		bytes_read -= sizeof (reclen) + reclen;
		buf += sizeof (reclen) + reclen;

		/* add record to nvlist array */
		(*numrecords)++;
		if (ISP2(*numrecords + 1)) {
			*records = realloc(*records,
			    *numrecords * 2 * sizeof (nvlist_t *));
		}
		(*records)[*numrecords - 1] = nv;
	}

	*leftover = bytes_read;
	return (0);
}
Example #8
0
static int
copyin_nvlist(char *packed_usr, size_t packed_sz, nvlist_t **nvlp)
{
	int err = 0;
	char *packed;
	nvlist_t *profile = NULL;

	/* simple sanity check */
	if (packed_usr == NULL || packed_sz == 0)
		return (NULL);

	/* copyin packed profile nvlist */
	packed = kmem_alloc(packed_sz, KM_NOSLEEP);
	if (packed == NULL)
		return (ENOMEM);
	err = copyin(packed_usr, packed, packed_sz);

	/* unpack packed profile nvlist */
	if (err)
		cmn_err(CE_WARN, "copyin_nvlist: copyin failed with "
		    "err %d\n", err);
	else if (err = nvlist_unpack(packed, packed_sz, &profile, KM_NOSLEEP))
		cmn_err(CE_WARN, "copyin_nvlist: nvlist_unpack "
		    "failed with err %d\n", err);

	kmem_free(packed, packed_sz);
	if (err == 0)
		*nvlp = profile;
	return (err);
}
Example #9
0
/*
 * Fetch the config schema from the kernel via ioctl.  This function has to
 * call the ioctl twice: the first returns the amount of memory that we need
 * to allocate for the schema, and the second actually fetches the schema.
 */
static nvlist_t *
get_schema(int fd)
{
	struct pci_iov_schema arg;
	nvlist_t *schema;
	int error;

	/* Do the ioctl() once to fetch the size of the schema. */
	arg.schema = NULL;
	arg.len = 0;
	arg.error = 0;
	error = ioctl(fd, IOV_GET_SCHEMA, &arg);
	if (error != 0)
		err(1, "Could not fetch size of config schema");

	arg.schema = malloc(arg.len);
	if (arg.schema == NULL)
		err(1, "Could not allocate %zu bytes for schema",
		    arg.len);

	/* Now do the ioctl() for real to get the schema. */
	error = ioctl(fd, IOV_GET_SCHEMA, &arg);
	if (error != 0 || arg.error != 0) {
		if (arg.error != 0)
			errno = arg.error;
		err(1, "Could not fetch config schema");
	}

	schema = nvlist_unpack(arg.schema, arg.len, NV_FLAG_IGNORE_CASE);
	if (schema == NULL)
		err(1, "Could not unpack schema");

	free(arg.schema);
	return (schema);
}
Example #10
0
/*
 * Copy an extended attribute into the buffer provided, or compute the
 * required buffer size.
 *
 * If buf is NULL, it computes the required buffer size.
 *
 * Returns 0 on success or a negative error number on failure.
 * On success, the number of bytes used / required is stored in 'size'.
 *
 * No locking is done here.
 */
int __osd_xattr_load(udmu_objset_t *uos, uint64_t dnode, nvlist_t **sa_xattr)
{
	sa_handle_t *sa_hdl;
	char	    *buf;
	int	     rc, size;

	if (unlikely(dnode == ZFS_NO_OBJECT))
		return -ENOENT;

	rc = -sa_handle_get(uos->os, dnode, NULL, SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		return rc;

	rc = -sa_size(sa_hdl, SA_ZPL_DXATTR(uos), &size);
	if (rc) {
		if (rc == -ENOENT)
			rc = -nvlist_alloc(sa_xattr, NV_UNIQUE_NAME, KM_SLEEP);
		goto out_sa;
	}

	buf = sa_spill_alloc(KM_SLEEP);
	if (buf == NULL) {
		rc = -ENOMEM;
		goto out_sa;
	}
	rc = -sa_lookup(sa_hdl, SA_ZPL_DXATTR(uos), buf, size);
	if (rc == 0)
		rc = -nvlist_unpack(buf, size, sa_xattr, KM_SLEEP);
	sa_spill_free(buf);
out_sa:
	sa_handle_destroy(sa_hdl);

	return rc;
}
Example #11
0
int
main(int argc, char **argv)
{
	nvlist_t *nvl;
	char *buf;
	int ret;
	size_t size;
	FILE *pfile;
	bool print;

	ret = 1;
	nvl = NULL;
	buf = NULL;
	pfile = NULL;
	print = false;
	if (argc != 2 && argc != 3) {
		fprintf(stderr, "Usage: %s [filename] [print]\n", argv[0]);
		return (1);
	}
	if (argc == 3 && strcmp(argv[2], "print"))
		print = true;

	pfile = fopen(argv[1], "r");
	if (pfile == NULL) {
		fprintf(stderr, "Unable to open file %s.\n", argv[1]);
		return (1);
	}

	fseek(pfile, 0, SEEK_END);
	size = ftell(pfile);
	fseek(pfile, 0, SEEK_SET);

	buf = malloc(size);
	if (buf == NULL) {
		fprintf(stderr, "Unable to read file %s.\n", argv[1]);
		goto out;
	}

	if (fread(buf, 1, size, pfile) != size) {
		fprintf(stderr, "Unable to read full file %s.\n", argv[1]);
		goto out;
	}

	nvl = nvlist_unpack(buf, size, 0);
	if (nvl == NULL || nvlist_error(nvl) != 0)
		printf("Failed to unpack.\n");
	if (print)
		nvlist_fdump(nvl, stdout);

	ret = 0;
out:
	if (pfile != NULL)
		fclose(pfile);
	if (nvl != NULL)
		nvlist_destroy(nvl);
	free(buf);

	return (ret);
}
Example #12
0
/*
 * DR event handler
 * respond to the picl events:
 *      PICLEVENT_DR_AP_STATE_CHANGE
 */
static void
dr_handler(const char *ename, const void *earg, size_t size, void *cookie)
{
	nvlist_t	*nvlp = NULL;
	char		*dtype;
	char		*ap_id;
	char		*hint;


	if (strcmp(ename, PICLEVENT_DR_AP_STATE_CHANGE) != 0) {
		return;
	}

	if (nvlist_unpack((char *)earg, size, &nvlp, NULL)) {
		return;
	}

	if (nvlist_lookup_string(nvlp, PICLEVENTARG_DATA_TYPE, &dtype)) {
		nvlist_free(nvlp);
		return;
	}

	if (strcmp(dtype, PICLEVENTARG_PICLEVENT_DATA) != 0) {
		nvlist_free(nvlp);
		return;
	}

	if (nvlist_lookup_string(nvlp, PICLEVENTARG_AP_ID, &ap_id)) {
		nvlist_free(nvlp);
		return;
	}

	if (nvlist_lookup_string(nvlp, PICLEVENTARG_HINT, &hint)) {
		nvlist_free(nvlp);
		return;
	}

	mdp = mdesc_devinit();
	if (mdp == NULL) {
		nvlist_free(nvlp);
		return;
	}

	rootnode = md_root_node(mdp);

	if (strcmp(hint, DR_HINT_INSERT) == 0)
		(void) update_devices(ap_id, DEV_ADD);
	else if (strcmp(hint, DR_HINT_REMOVE) == 0)
		(void) update_devices(ap_id, DEV_REMOVE);

	mdesc_devfini(mdp);
	nvlist_free(nvlp);

	/*
	 * Signal the devtree plugin to add more cpu properties.
	 */
	signal_devtree();
}
Example #13
0
static nvlist_t *
amd_lookup_by_mcid(topo_mod_t *mod, topo_instance_t id)
{
	mc_snapshot_info_t mcs;
	void *buf = NULL;
	uint8_t ver;

	nvlist_t *nvl = NULL;
	char path[64];
	int fd, err;

	(void) snprintf(path, sizeof (path), "/dev/mc/mc%d", id);
	fd = open(path, O_RDONLY);

	if (fd == -1) {
		/*
		 * Some v20z and v40z systems may have had the 3rd-party
		 * NWSnps packagae installed which installs a /dev/mc
		 * link.  So try again via /devices.
		 */
		(void) snprintf(path, sizeof (path),
		    "/devices/pci@0,0/pci1022,1102@%x,2:mc-amd",
		    MC_AMD_DEV_OFFSET + id);
		fd = open(path, O_RDONLY);
	}

	if (fd == -1)
		return (NULL);	/* do not whinge */

	if (ioctl(fd, MC_IOC_SNAPSHOT_INFO, &mcs) == -1 ||
	    (buf = topo_mod_alloc(mod, mcs.mcs_size)) == NULL ||
	    ioctl(fd, MC_IOC_SNAPSHOT, buf) == -1) {

		whinge(mod, NULL, "mc failed to snapshot %s: %s\n",
		    path, strerror(errno));

		free(buf);
		(void) close(fd);
		return (NULL);
	}

	(void) close(fd);
	err = nvlist_unpack(buf, mcs.mcs_size, &nvl, 0);
	topo_mod_free(mod, buf, mcs.mcs_size);

	if (nvlist_lookup_uint8(nvl, MC_NVLIST_VERSTR, &ver) != 0) {
		whinge(mod, NULL, "mc nvlist is not versioned\n");
		nvlist_free(nvl);
		return (NULL);
	} else if (ver != MC_NVLIST_VERS1) {
		whinge(mod, NULL, "mc nvlist version mismatch\n");
		nvlist_free(nvl);
		return (NULL);
	}

	return (err ? NULL : nvl);
}
Example #14
0
/*
 * Unpacks an nvlist from the ZFS ioctl command structure.
 */
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
	if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
	    zc->zc_nvlist_dst_size, nvlp, 0) != 0)
		return (no_memory(hdl));

	return (0);
}
Example #15
0
/*
 * Given the root disk device name, read the label from
 * the device, and construct a configuration nvlist.
 */
int
vdev_disk_read_rootlabel(char *devname, nvlist_t **config)
{
	vdev_label_t *label;
	struct device *dev;
	uint64_t size;
	int l;
	int error = -1;
	char *minor_name;

	error = device_open(devname + 5, DO_RDWR, &dev);
	if (error)
		return error;

	size = P2ALIGN_TYPED(dev->size, sizeof (vdev_label_t), uint64_t);
	label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP);

	*config = NULL;
	for (l = 0; l < VDEV_LABELS; l++) {
		uint64_t offset, state, txg = 0;

		/* read vdev label */
		offset = vdev_label_offset(size, l, 0);
		if (vdev_disk_physio(dev, (caddr_t)label,
		    VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, 0) != 0)
			continue;

		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
			*config = NULL;
			continue;
		}

		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
		    &state) != 0 || state >= POOL_STATE_DESTROYED) {
			nvlist_free(*config);
			*config = NULL;
			continue;
		}

		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
		    &txg) != 0 || txg == 0) {
			nvlist_free(*config);
			*config = NULL;
			continue;
		}

		break;
	}

	kmem_free(label, sizeof (vdev_label_t));
	device_close(dev);
	if (*config == NULL)
		error = EIDRM;

	return (error);
}
Example #16
0
void
zpool_read_cachefile(void)
{
	int fd;
	struct stat stbf;
	void *buf = NULL;
	nvlist_t *nvlist, *child;
	nvpair_t *nvpair;
	uint64_t guid;
	int importrc = 0;

	printf("reading cachefile\n");

	fd = open(ZPOOL_CACHE, O_RDONLY);
	if (fd < 0)
		return;

	if (fstat(fd, &stbf) || !stbf.st_size)
		goto out;

	buf = kmem_alloc(stbf.st_size, 0);
	if (!buf)
		goto out;

	if (read(fd, buf, stbf.st_size) != stbf.st_size)
		goto out;

	if (nvlist_unpack(buf, stbf.st_size, &nvlist, KM_PUSHPAGE) != 0)
		goto out;

	nvpair = NULL;
	while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
		if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
			continue;

		VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

		printf("Cachefile has pool '%s'\n", nvpair_name(nvpair));

		if (nvlist_lookup_uint64(child, ZPOOL_CONFIG_POOL_GUID,
		    &guid) == 0) {
			printf("Cachefile has pool '%s' guid %llu\n",
			    nvpair_name(nvpair), guid);

			importrc = zpool_import_by_guid(guid);
			printf("zpool import error %d\n", importrc);
		}

	}
	nvlist_free(nvlist);

out:
	close(fd);
	if (buf)
		kmem_free(buf, stbf.st_size);

}
Example #17
0
/*
 * Top level function for event service
 */
void
event_service(void **data, size_t *datalen)
{
	int cmd;
	int lerrno;
	int seq_num;
	nvlist_t *nvl;
	nvlist_t *ret;

	rcm_log_message(RCM_TRACE1, "received door operation\n");

	/* Decode the data from the door into an unpacked nvlist */
	if (data == NULL || datalen == NULL) {
		rcm_log_message(RCM_ERROR, "received null door argument\n");
		return;
	}
	if (lerrno = nvlist_unpack(*data, *datalen, &nvl, 0)) {
		rcm_log_message(RCM_ERROR, "received bad door argument, %s\n",
		    strerror(lerrno));
		return;
	}

	/* Do nothing if the door is just being knocked on */
	if (errno = nvlist_lookup_int32(nvl, RCM_CMD, &cmd)) {
		rcm_log_message(RCM_ERROR,
		    "bad door argument (nvlist_lookup=%s)\n", strerror(errno));
		nvlist_free(nvl);
		return;
	}
	if (cmd == CMD_KNOCK) {
		rcm_log_message(RCM_TRACE1, "door event was just a knock\n");
		nvlist_free(nvl);
		*data = NULL;
		*datalen = 0;
		return;
	}

	/*
	 * Go increment thread count. Before daemon is fully initialized,
	 * the event processing blocks inside this function.
	 */
	seq_num = rcmd_thr_incr(cmd);

	process_event(cmd, seq_num, nvl, &ret);
	nvlist_free(nvl);
	assert(ret != NULL);

	/*
	 * Decrement thread count
	 */
	rcmd_thr_decr();

out:
	*data = ret;
	*datalen = 0;
}
Example #18
0
/*
 * load initial fuid domain and idx trees.  This function is used by
 * both the kernel and zdb.
 */
uint64_t
zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
    avl_tree_t *domain_tree)
{
	dmu_buf_t *db;
	uint64_t fuid_size;

	avl_create(idx_tree, idx_compare,
	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_idxnode));
	avl_create(domain_tree, domain_compare,
	    sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode));

	VERIFY(0 == dmu_bonus_hold(os, fuid_obj, FTAG, &db));
	fuid_size = *(uint64_t *)db->db_data;
	dmu_buf_rele(db, FTAG);

	if (fuid_size)  {
		nvlist_t **fuidnvp;
		nvlist_t *nvp = NULL;
		uint_t count;
		char *packed;
		int i;

		packed = kmem_alloc(fuid_size, KM_SLEEP);
		VERIFY(dmu_read(os, fuid_obj, 0, fuid_size, packed) == 0);
		VERIFY(nvlist_unpack(packed, fuid_size,
		    &nvp, 0) == 0);
		VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
		    &fuidnvp, &count) == 0);

		for (i = 0; i != count; i++) {
			fuid_domain_t *domnode;
			char *domain;
			uint64_t idx;

			VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
			    &domain) == 0);
			VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
			    &idx) == 0);

			domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);

			domnode->f_idx = idx;
			domnode->f_ksid = ksid_lookupdomain(domain);
			avl_add(idx_tree, domnode);
			avl_add(domain_tree, domnode);
		}
		nvlist_free(nvp);
		kmem_free(packed, fuid_size);
	}
	return (fuid_size);
}
Example #19
0
/*
 * Given a file descriptor, read the label information and return an nvlist
 * describing the configuration, if there is one.
 */
int
zpool_read_label(int fd, nvlist_t **config)
{
	struct stat64 statbuf;
	int l;
	vdev_label_t *label;
	uint64_t state, txg, size;

	*config = NULL;

	if (fstat64(fd, &statbuf) == -1)
		return (0);
	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);

	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
		return (-1);

	for (l = 0; l < VDEV_LABELS; l++) {
		if (pread64(fd, label, sizeof (vdev_label_t),
		    label_offset(size, l)) != sizeof (vdev_label_t))
			continue;

		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
			continue;

		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
		    &state) != 0 || state > POOL_STATE_L2CACHE) {
			nvlist_free(*config);
			continue;
		}

		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
		    &txg) != 0 || txg == 0)) {
			nvlist_free(*config);
			continue;
		}

		free(label);
		return (0);
	}

	free(label);
	*config = NULL;
	return (0);
}
Example #20
0
/*
 * This function is invoked when a share is disabled to disconnect trees
 * and close files.  Cleaning up may involve VOP and/or VFS calls, which
 * may conflict/deadlock with stuck threads if something is amiss with the
 * file system.  Queueing the request for asynchronous processing allows the
 * call to return immediately so that, if the unshare is being done in the
 * context of a forced unmount, the forced unmount will always be able to
 * proceed (unblocking stuck I/O and eventually allowing all blocked unshare
 * processes to complete).
 *
 * The path lookup to find the root vnode of the VFS in question and the
 * release of this vnode are done synchronously prior to any associated
 * unmount.  Doing these asynchronous to an associated unmount could run
 * the risk of a spurious EBUSY for a standard unmount or an EIO during
 * the path lookup due to a forced unmount finishing first.
 */
int
smb_kshare_unexport_list(smb_ioc_share_t *ioc)
{
	smb_server_t	*sv = NULL;
	smb_unshare_t	*ux;
	nvlist_t	*shrlist = NULL;
	nvpair_t	*nvp;
	boolean_t	unexport = B_FALSE;
	char		*shrname;
	int		rc;

	if ((rc = smb_server_lookup(&sv)) != 0)
		return (rc);

	if ((rc = nvlist_unpack(ioc->shr, ioc->shrlen, &shrlist, 0)) != 0)
		goto out;

	for (nvp = nvlist_next_nvpair(shrlist, NULL); nvp != NULL;
	    nvp = nvlist_next_nvpair(shrlist, nvp)) {
		if (nvpair_type(nvp) != DATA_TYPE_NVLIST)
			continue;

		shrname = nvpair_name(nvp);
		ASSERT(shrname);

		if ((rc = smb_kshare_unexport(sv, shrname)) != 0)
			continue;

		ux = kmem_cache_alloc(smb_kshare_cache_unexport, KM_SLEEP);
		(void) strlcpy(ux->us_sharename, shrname, MAXNAMELEN);

		smb_slist_insert_tail(&sv->sv_export.e_unexport_list, ux);
		unexport = B_TRUE;
	}

	if (unexport)
		smb_thread_signal(&sv->sv_export.e_unexport_thread);
	rc = 0;

out:
	if (shrlist != NULL)
		nvlist_free(shrlist);
	smb_server_release(sv);
	return (rc);
}
Example #21
0
static int
pci_iov_parse_config(struct pcicfg_iov *iov, struct pci_iov_arg *arg,
    nvlist_t **ret)
{
	void *packed_config;
	nvlist_t *config;
	int error;

	config = NULL;
	packed_config = NULL;

	if (arg->len > pci_iov_max_config) {
		error = EMSGSIZE;
		goto out;
	}

	packed_config = malloc(arg->len, M_SRIOV, M_WAITOK);

	error = copyin(arg->config, packed_config, arg->len);
	if (error != 0)
		goto out;

	config = nvlist_unpack(packed_config, arg->len, NV_FLAG_IGNORE_CASE);
	if (config == NULL) {
		error = EINVAL;
		goto out;
	}

	error = pci_iov_schema_validate_config(iov->iov_schema, config);
	if (error != 0)
		goto out;

	error = nvlist_error(config);
	if (error != 0)
		goto out;

	*ret = config;
	config = NULL;

out:
	nvlist_destroy(config);
	free(packed_config, M_SRIOV);
	return (error);
}
Example #22
0
/*
 * Copy an extended attribute into the buffer provided, or compute the
 * required buffer size.
 *
 * If buf is NULL, it computes the required buffer size.
 *
 * Returns 0 on success or a negative error number on failure.
 * On success, the number of bytes used / required is stored in 'size'.
 *
 * No locking is done here.
 */
int __osd_xattr_cache(const struct lu_env *env, struct osd_object *obj)
{
	struct osd_device *osd = osd_obj2dev(obj);
	udmu_objset_t     *uos = &osd->od_objset;
	sa_handle_t       *sa_hdl;
	char              *buf;
	int                size;
	int                rc;

	LASSERT(obj->oo_sa_xattr == NULL);
	LASSERT(obj->oo_db != NULL);

	rc = -sa_handle_get(uos->os, obj->oo_db->db_object, NULL,
			SA_HDL_PRIVATE, &sa_hdl);
	if (rc)
		return rc;

	rc = -sa_size(sa_hdl, SA_ZPL_DXATTR(uos), &size);
	if (rc) {
		if (rc == -ENOENT)
			rc = -nvlist_alloc(&obj->oo_sa_xattr,
					NV_UNIQUE_NAME, KM_SLEEP);
		goto out_sa;
	}

	buf = sa_spill_alloc(KM_SLEEP);
	if (buf == NULL) {
		rc = -ENOMEM;
		goto out_sa;
	}
	rc = -sa_lookup(sa_hdl, SA_ZPL_DXATTR(uos), buf, size);
	if (rc == 0)
		rc = -nvlist_unpack(buf, size, &obj->oo_sa_xattr, KM_SLEEP);
	sa_spill_free(buf);
out_sa:
	sa_handle_destroy(sa_hdl);

	return rc;
}
Example #23
0
/*
 * Discovery event handler
 * respond to the picl events:
 *      PICLEVENT_SYSEVENT_DEVICE_ADDED
 *      PICLEVENT_SYSEVENT_DEVICE_REMOVED
 */
static void
dsc_handler(const char *ename, const void *earg, size_t size, void *cookie)
{
	nvlist_t	*nvlp = NULL;
	char		*path;
	disk_lookup_t	lookup;
	int		status;

	/*
	 * retrieve the device's physical path from the event arg
	 * and determine which disk (if any) we are working with
	 */
	if (nvlist_unpack((char *)earg, size, &nvlp, NULL))
		return;
	if (nvlist_lookup_string(nvlp, "devfs-path", &path))
		return;

	lookup.path = strdup(path);
	lookup.disk = NULL;
	lookup.result = DISK_NOT_FOUND;

	status = ptree_walk_tree_by_class(root_node, "disk", (void *)&lookup,
	    find_disk);
	if (status != PICL_SUCCESS) {
		return;
	}

	if (lookup.result == DISK_FOUND) {
		if (strcmp(ename, PICLEVENT_SYSEVENT_DEVICE_ADDED) == 0)
			ptree_update_propval_by_name(lookup.disk, "State",
			    (void *)strdup(CONFIGURED), PICL_PROPNAMELEN_MAX);
		else if (strcmp(ename, PICLEVENT_SYSEVENT_DEVICE_REMOVED) == 0)
			ptree_update_propval_by_name(lookup.disk, "State",
			    (void *)strdup(UNCONFIGURED), PICL_PROPNAMELEN_MAX);
	}

	nvlist_free(nvlp);
}
Example #24
0
static int
zfs_ioctl_compat_get_nvlist(uint64_t nvl, size_t size, int iflag,
    nvlist_t **nvp)
{
	char *packed;
	int error;
	nvlist_t *list = NULL;

	/*
	 * Read in and unpack the user-supplied nvlist.
	 */
	if (size == 0)
		return (EINVAL);

#ifdef _KERNEL
	packed = kmem_alloc(size, KM_SLEEP);
	if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size,
	    iflag)) != 0) {
		kmem_free(packed, size);
		return (error);
	}
#else
	packed = (void *)(uintptr_t)nvl;
#endif

	error = nvlist_unpack(packed, size, &list, 0);

#ifdef _KERNEL
	kmem_free(packed, size);
#endif

	if (error != 0)
		return (error);

	*nvp = list;
	return (0);
}
Example #25
0
/*
 * Handles the door command IPMGMT_CMD_SETADDR. It adds a new address object
 * node to the list `aobjmap' and then persists the address information in the
 * DB.
 */
static void
ipmgmt_setaddr_handler(void *argp)
{
	ipmgmt_setaddr_arg_t	*sargp = argp;
	ipmgmt_retval_t		rval;
	ipmgmt_aobjmap_t	node;
	nvlist_t		*nvl = NULL;
	char			*nvlbuf;
	size_t			nvlsize = sargp->ia_nvlsize;
	uint32_t		flags = sargp->ia_flags;
	int			err = 0;

	nvlbuf = (char *)argp + sizeof (ipmgmt_setaddr_arg_t);
	if ((err = nvlist_unpack(nvlbuf, nvlsize, &nvl, NV_ENCODE_NATIVE)) != 0)
		goto ret;
	if (flags & (IPMGMT_ACTIVE|IPMGMT_INIT)) {
		if ((err = i_ipmgmt_nvl2aobjnode(nvl, &node)) != 0)
			goto ret;
		if (flags & IPMGMT_INIT)
			node.am_flags = (IPMGMT_ACTIVE|IPMGMT_PERSIST);
		else
			node.am_flags = flags;
		if ((err = ipmgmt_aobjmap_op(&node, ADDROBJ_ADD)) != 0)
			goto ret;
	}
	if (flags & IPMGMT_PERSIST) {
		ipadm_dbwrite_cbarg_t	cb;

		cb.dbw_nvl = nvl;
		cb.dbw_flags = 0;
		err = ipmgmt_db_walk(ipmgmt_db_add, &cb, IPADM_DB_WRITE);
	}
ret:
	nvlist_free(nvl);
	rval.ir_err = err;
	(void) door_return((char *)&rval, sizeof (rval), NULL, 0);
}
Example #26
0
/*
 * Called when the module is first loaded, this routine loads the configuration
 * file into the SPA namespace.  It does not actually open or load the pools; it
 * only populates the namespace.
 */
void
spa_config_load(void)
{
	void *buf = NULL;
	nvlist_t *nvlist, *child;
	nvpair_t *nvpair;
	spa_t *spa;
	char pathname[128];
	struct _buf *file;
	struct bootstat bst;

	/*
	 * Open the configuration file.
	 */
	(void) snprintf(pathname, sizeof (pathname), "%s%s/%s",
	    (rootdir != NULL) ? "./" : "", spa_config_dir, ZPOOL_CACHE_FILE);

	file = kobj_open_file(pathname);
	if (file == (struct _buf *)-1)
		return;

	if (kobj_fstat(file->_fd, &bst) != 0)
		goto out;

	buf = kmem_alloc(bst.st_size, KM_SLEEP);

	/*
	 * Read the nvlist from the file.
	 */
	if (kobj_read_file(file, buf, bst.st_size, 0) < 0)
		goto out;

	/*
	 * Unpack the nvlist.
	 */
	if (nvlist_unpack(buf, bst.st_size, &nvlist, KM_SLEEP) != 0)
		goto out;

	/*
	 * Iterate over all elements in the nvlist, creating a new spa_t for
	 * each one with the specified configuration.
	 */
	mutex_enter(&spa_namespace_lock);
	nvpair = NULL;
	while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {

		if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
			continue;

		VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

		if (spa_lookup(nvpair_name(nvpair)) != NULL)
			continue;
		spa = spa_add(nvpair_name(nvpair), NULL);

		/*
		 * We blindly duplicate the configuration here.  If it's
		 * invalid, we will catch it when the pool is first opened.
		 */
		VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0);
	}
	mutex_exit(&spa_namespace_lock);

	nvlist_free(nvlist);

out:
	if (buf != NULL)
		kmem_free(buf, bst.st_size);

	kobj_close_file(file);
}
Example #27
0
/*
 * Given a file descriptor, read the label information and return an nvlist
 * describing the configuration, if there is one.
 * Return 0 on success, or -1 on failure
 */
int
zpool_read_label(int fd, nvlist_t **config, int *num_labels)
{
	struct stat statbuf;
	int l, count = 0;
	vdev_label_t *label;
	nvlist_t *expected_config = NULL;
	uint64_t expected_guid = 0, size;

	*config = NULL;

	if (fstat(fd, &statbuf) == -1)
		return (-1);

	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);

	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
		return (-1);

	for (l = 0; l < VDEV_LABELS; l++) {
		uint64_t state, guid, txg;

		if (pread(fd, label, sizeof (vdev_label_t),
		    label_offset(size, l)) != sizeof (vdev_label_t))
			continue;

		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
			continue;

		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
		    &guid) != 0 || guid == 0) {
			nvlist_free(*config);
			continue;
		}

		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
		    &state) != 0 || state > POOL_STATE_L2CACHE) {
			nvlist_free(*config);
			continue;
		}

		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
		    &txg) != 0 || txg == 0)) {
			nvlist_free(*config);
			continue;
		}

		if (expected_guid) {
			if (expected_guid == guid)
				count++;

			nvlist_free(*config);
		} else {
			expected_config = *config;
			expected_guid = guid;
			count++;
		}
	}

	if (num_labels != NULL)
		*num_labels = count;

	free(label);
	*config = expected_config;

	return (0);
}
Example #28
0
/*
 * Given a cache file, return the contents as a list of importable pools.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
nvlist_t *
zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
    char *poolname, uint64_t guid)
{
	char *buf;
	int fd;
	struct stat statbuf;
	nvlist_t *raw, *src, *dst;
	nvlist_t *pools;
	nvpair_t *elem;
	char *name;
	uint64_t this_guid;
	boolean_t active;

	verify(poolname == NULL || guid == 0);

	if ((fd = open(cachefile, O_RDONLY)) < 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
		return (NULL);
	}

	if (fstat(fd, &statbuf) != 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) close(fd);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
		return (NULL);
	}

	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
		(void) close(fd);
		return (NULL);
	}

	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
		(void) close(fd);
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "failed to read cache file contents"));
		return (NULL);
	}

	(void) close(fd);

	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "invalid or corrupt cache file contents"));
		return (NULL);
	}

	free(buf);

	/*
	 * Go through and get the current state of the pools and refresh their
	 * state.
	 */
	if (nvlist_alloc(&pools, 0, 0) != 0) {
		(void) no_memory(hdl);
		nvlist_free(raw);
		return (NULL);
	}

	elem = NULL;
	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
		src = fnvpair_value_nvlist(elem);

		name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
		if (poolname != NULL && strcmp(poolname, name) != 0)
			continue;

		this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
		if (guid != 0 && guid != this_guid)
			continue;

		if (pool_active(hdl, name, this_guid, &active) != 0) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (active)
			continue;

		if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
		    cachefile) != 0) {
			(void) no_memory(hdl);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if ((dst = refresh_config(hdl, src)) == NULL) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
			(void) no_memory(hdl);
			nvlist_free(dst);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}
		nvlist_free(dst);
	}

	nvlist_free(raw);
	return (pools);
}
Example #29
0
/*
 * Called when the module is first loaded, this routine loads the configuration
 * file into the SPA namespace.  It does not actually open or load the pools; it
 * only populates the namespace.
 */
void
spa_config_load(void)
{
	void *buf = NULL;
	nvlist_t *nvlist, *child;
	nvpair_t *nvpair;
	char *pathname;
	struct _buf *file;
	uint64_t fsize;

	/*
	 * Open the configuration file.
	 */
	pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);

	(void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);

	file = kobj_open_file(pathname);

	kmem_free(pathname, MAXPATHLEN);

	if (file == (struct _buf *)-1)
		return;

	if (kobj_get_filesize(file, &fsize) != 0)
		goto out;

	buf = kmem_alloc(fsize, KM_SLEEP);

	/*
	 * Read the nvlist from the file.
	 */
	if (kobj_read_file(file, buf, fsize, 0) < 0)
		goto out;

	/*
	 * Unpack the nvlist.
	 */
	if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
		goto out;

	/*
	 * Iterate over all elements in the nvlist, creating a new spa_t for
	 * each one with the specified configuration.
	 */
	mutex_enter(&spa_namespace_lock);
	nvpair = NULL;
	while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
		if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
			continue;

		VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

		if (spa_lookup(nvpair_name(nvpair)) != NULL)
			continue;
		(void) spa_add(nvpair_name(nvpair), child, NULL);
	}
	mutex_exit(&spa_namespace_lock);

	nvlist_free(nvlist);

out:
	if (buf != NULL)
		kmem_free(buf, fsize);

	kobj_close_file(file);
}
Example #30
0
/*
 * Our approach to cases is the same as for resources: we first obtain a
 * list of UUIDs, sort them, then obtain the case information for each.
 */
int
fmd_adm_case_iter(fmd_adm_t *ap, const char *url_token, fmd_adm_case_f *func,
    void *arg)
{
	struct fmd_rpc_caselist rcl;
	struct fmd_rpc_caseinfo rci;
	fmd_adm_caseinfo_t aci;
	char **uuids, *p;
	int i, rv;
	enum clnt_stat cs;
	uint_t retries = 0;

	bzero(&rcl, sizeof (rcl)); /* tell xdr to allocate memory for us */

	do {
		cs = fmd_adm_caselist_1(&rcl, ap->adm_clnt);
	} while (fmd_adm_retry(ap, cs, &retries));

	if (cs != RPC_SUCCESS)
		return (fmd_adm_set_errno(ap, EPROTO));

	if (rcl.rcl_err != 0) {
		xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
		return (fmd_adm_set_svcerr(ap, rcl.rcl_err));
	}

	if ((uuids = malloc(sizeof (char *) * rcl.rcl_cnt)) == NULL) {
		xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
		return (fmd_adm_set_errno(ap, EAGAIN));
	}

	p = rcl.rcl_buf.rcl_buf_val;

	for (i = 0; i < rcl.rcl_cnt; i++, p += strlen(p) + 1)
		uuids[i] = p;

	qsort(uuids, rcl.rcl_cnt, sizeof (char *), fmd_adm_case_cmp);

	for (i = 0; i < rcl.rcl_cnt; i++) {
		bzero(&rci, sizeof (rci));

		retries = 0;
		do {
			cs = fmd_adm_caseinfo_1(uuids[i], &rci, ap->adm_clnt);
		} while (fmd_adm_retry(ap, cs, &retries));

		if (cs != RPC_SUCCESS) {
			free(uuids);
			xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
			return (fmd_adm_set_errno(ap, EPROTO));
		}

		if (rci.rci_err != 0 && rci.rci_err != FMD_ADM_ERR_CASESRCH) {
			xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
			free(uuids);
			xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
			return (fmd_adm_set_svcerr(ap, rci.rci_err));
		}

		if (rci.rci_err == FMD_ADM_ERR_CASESRCH) {
			xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
			continue;
		}

		bzero(&aci, sizeof (aci));

		if ((rv = nvlist_unpack(rci.rci_evbuf.rci_evbuf_val,
		    rci.rci_evbuf.rci_evbuf_len, &aci.aci_event, 0)) != 0) {
			xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
			free(uuids);
			xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
			return (fmd_adm_set_errno(ap, rv));
		}

		if ((rv = nvlist_lookup_string(aci.aci_event, FM_SUSPECT_UUID,
		    (char **)&aci.aci_uuid)) != 0) {
			xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
			free(uuids);
			xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
			nvlist_free(aci.aci_event);
			return (fmd_adm_set_errno(ap, rv));
		}
		if ((rv = nvlist_lookup_string(aci.aci_event,
		    FM_SUSPECT_DIAG_CODE, (char **)&aci.aci_code)) != 0) {
			xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
			free(uuids);
			xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
			nvlist_free(aci.aci_event);
			return (fmd_adm_set_errno(ap, rv));
		}

		rv = fmd_adm_case_one(&aci, url_token, func, arg);

		xdr_free(xdr_fmd_rpc_caseinfo, (char *)&rci);
		nvlist_free(aci.aci_event);

		if (rv != 0)
			break;
	}

	free(uuids);
	xdr_free(xdr_fmd_rpc_caselist, (char *)&rcl);
	return (0);
}