Exemplo n.º 1
0
/*
 * Sets the share properties on a ZFS share. For now, this method sets only
 * the "sharesmb" property.
 *
 * This method includes building a comma seperated name-value string to be
 * set on the "sharesmb" property of a ZFS share. This name-value string is
 * build in 2 steps:
 *    - New property values given as name-value pair are set first.
 *    - Existing optionset properties, which are not part of the new properties
 *	passed in step 1, are appended to the newly set properties.
 */
int
sa_zfs_setprop(sa_handle_t handle, char *path, nvlist_t *nvl)
{
	zfs_handle_t *z_fs;
	libzfs_handle_t *z_lib;
	char sharesmb_val[MAXPATHLEN];
	char *dataset, *lastcomma;

	if (nvlist_empty(nvl))
		return (0);

	if ((handle == NULL) || (path == NULL))
		return (-1);

	if ((dataset = get_zfs_dataset(handle, path, B_FALSE)) == NULL)
		return (-1);

	if ((z_lib = libzfs_init()) == NULL) {
		free(dataset);
		return (-1);
	}

	z_fs = zfs_open(z_lib, dataset, ZFS_TYPE_DATASET);
	if (z_fs == NULL) {
		free(dataset);
		libzfs_fini(z_lib);
		return (-1);
	}

	bzero(sharesmb_val, MAXPATHLEN);
	if (sa_zfs_sprintf_new_prop(nvl, sharesmb_val) != 0) {
		free(dataset);
		zfs_close(z_fs);
		libzfs_fini(z_lib);
		return (-1);
	}

	if (sa_zfs_sprintf_existing_prop(z_fs, sharesmb_val) != 0) {
		free(dataset);
		zfs_close(z_fs);
		libzfs_fini(z_lib);
		return (-1);
	}

	lastcomma = strrchr(sharesmb_val, ',');
	if ((lastcomma != NULL) && (lastcomma[1] == '\0'))
		*lastcomma = '\0';

	(void) zfs_prop_set(z_fs, zfs_prop_to_name(ZFS_PROP_SHARESMB),
	    sharesmb_val);
	free(dataset);
	zfs_close(z_fs);
	libzfs_fini(z_lib);

	return (0);
}
Exemplo n.º 2
0
/*
 * Write the zfs property string, note that properties with a NULL or
 * zero-length value will not be written and 0 returned.
 */
static int zfs_set_prop_str(zfs_handle_t *zhp, char *prop, char *val)
{
	int ret = 0;

	if (val && strlen(val) > 0) {
		vprint("  %s=%s\n", prop, val);
		ret = zfs_prop_set(zhp, prop, val);
	}

	return ret;
}
Exemplo n.º 3
0
static int zfs_set_prop_int(zfs_handle_t *zhp, char *prop, __u32 val)
{
	char str[64];
	int ret;

	(void) snprintf(str, sizeof (str), "%lu", (unsigned long)val);
	vprint("  %s=%s\n", prop, str);
	ret = zfs_prop_set(zhp, prop, str);

	return ret;
}
Exemplo n.º 4
0
/**
 * Create the zpool
 * @param p_libzfshd: libzfs handle
 * @param psz_zpool: zpool name
 * @param pnv_root: the root tree of vdev
 * @param pnv_props: the tree of properties (can be NULL)
 * @param pnv_fsprops: the tree of the file system properties (can be NULL)
 * @param ppsz_error: the error message if any
 * @return 0 in case of error, the error code overwise
 */
int libzfs_zpool_create(libzfs_handle_t *p_libzfshd, const char* psz_zpool,
                        nvlist_t *pnv_root, nvlist_t *pnv_props,
                        nvlist_t *pnv_fsprops, const char **ppsz_error)
{
        int i_error;
        char *psz_altroot;

        /* Check the zpool name */
        if(libzfs_zpool_name_valid(psz_zpool, ppsz_error))
                return EINVAL;

        /** Check the properties
            TODO: zpool_valid_proplist and zfs_valid_proplist */

        if((i_error = spa_create(psz_zpool, pnv_root, pnv_props, "libzfswrap_zpool_create", pnv_fsprops)))
        {
                switch(i_error)
                {
                case EBUSY:
                        *ppsz_error = "one or more vdevs refer to the same device";
                        break;
                case EOVERFLOW:
                        *ppsz_error = "one or more devices is less than the minimum size (64Mo)";
                        break;
                case ENOSPC:
                        *ppsz_error = "one or more devices is out of space";
                        break;
                case ENOTBLK:
                        *ppsz_error = "cache device must be a disk or disk slice";
                        break;
                case EEXIST:
                        *ppsz_error = "the pool already exist";
                        break;
                default:
                        *ppsz_error = "unable to create the spa";
                }
                return i_error;
        }

        /* If this is an alternate root pool, then automatically set the
           mountpoint to be '/' */
        if(nvlist_lookup_string(pnv_props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &psz_altroot) == 0)
        {
                zfs_handle_t *p_zhd;
                assert((p_zhd = zfs_open(p_libzfshd, psz_zpool, ZFS_TYPE_DATASET)) != NULL);
                assert(zfs_prop_set(p_zhd, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), "/") == 0);
                zfs_close(p_zhd);
        }

        return 0;
}
Exemplo n.º 5
0
/*
 * Create the named pool, using the provided vdev list.  It is assumed
 * that the consumer has already validated the contents of the nvlist, so we
 * don't have to worry about error semantics.
 */
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
    const char *altroot)
{
	zfs_cmd_t zc = { 0 };
	char *packed;
	size_t len;
	char msg[1024];

	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
	    "cannot create '%s'"), pool);

	if (!zpool_name_valid(hdl, B_FALSE, pool))
		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));

	if (altroot != NULL && altroot[0] != '/')
		return (zfs_error(hdl, EZFS_BADPATH,
		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));

	if (nvlist_size(nvroot, &len, NV_ENCODE_NATIVE) != 0)
		return (no_memory(hdl));

	if ((packed = zfs_alloc(hdl, len)) == NULL)
		return (-1);

	if (nvlist_pack(nvroot, &packed, &len,
	    NV_ENCODE_NATIVE, 0) != 0) {
		free(packed);
		return (no_memory(hdl));
	}

	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
	zc.zc_config_src = (uint64_t)(uintptr_t)packed;
	zc.zc_config_src_size = len;

	if (altroot != NULL)
		(void) strlcpy(zc.zc_root, altroot, sizeof (zc.zc_root));

	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
		free(packed);

		switch (errno) {
		case EBUSY:
			/*
			 * This can happen if the user has specified the same
			 * device multiple times.  We can't reliably detect this
			 * until we try to add it and see we already have a
			 * label.
			 */
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "one or more vdevs refer to the same device"));
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		case EOVERFLOW:
			/*
			 * This occurs when one of the devices is below
			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
			 * device was the problem device since there's no
			 * reliable way to determine device size from userland.
			 */
			{
				char buf[64];

				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));

				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
				    "one or more devices is less than the "
				    "minimum size (%s)"), buf);
			}
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		case ENOSPC:
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "one or more devices is out of space"));
			return (zfs_error(hdl, EZFS_BADDEV, msg));

		default:
			return (zpool_standard_error(hdl, errno, msg));
		}
	}

	free(packed);

	/*
	 * If this is an alternate root pool, then we automatically set the
	 * moutnpoint of the root dataset to be '/'.
	 */
	if (altroot != NULL) {
		zfs_handle_t *zhp;

		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
		verify(zfs_prop_set(zhp, ZFS_PROP_MOUNTPOINT, "/") == 0);

		zfs_close(zhp);
	}

	return (0);
}
Exemplo n.º 6
0
/*
 * Function:	set_canmount
 * Description:	Sets the canmount property on the datasets of the
 *		activated BE.
 * Parameters:
 *		be_nodes - The be_node_t returned from be_list
 *		value - The value of canmount we setting, on|off|noauto.
 * Return:
 *		BE_SUCCESS - Success
 *		be_errno_t - Failure
 * Scope:
 *		Private
 */
static int
set_canmount(be_node_list_t *be_nodes, char *value)
{
	char		ds_path[MAXPATHLEN];
	zfs_handle_t	*zhp = NULL;
	be_node_list_t	*list = be_nodes;
	int		err = BE_SUCCESS;

	while (list != NULL) {
		be_dataset_list_t *datasets = list->be_node_datasets;

		be_make_root_ds(list->be_rpool, list->be_node_name, ds_path,
		    sizeof (ds_path));

		if ((zhp = zfs_open(g_zfs, ds_path, ZFS_TYPE_DATASET)) ==
		    NULL) {
			be_print_err(gettext("set_canmount: failed to open "
			    "dataset (%s): %s\n"), ds_path,
			    libzfs_error_description(g_zfs));
			err = zfs_err_to_be_err(g_zfs);
			return (err);
		}
		if (zfs_prop_get_int(zhp, ZFS_PROP_MOUNTED)) {
			/*
			 * it's already mounted so we can't change the
			 * canmount property anyway.
			 */
			err = BE_SUCCESS;
		} else {
			err = zfs_prop_set(zhp,
			    zfs_prop_to_name(ZFS_PROP_CANMOUNT), value);
			if (err) {
				ZFS_CLOSE(zhp);
				be_print_err(gettext("set_canmount: failed to "
				    "set dataset property (%s): %s\n"),
				    ds_path, libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
		}
		ZFS_CLOSE(zhp);

		while (datasets != NULL) {
			be_make_root_ds(list->be_rpool,
			    datasets->be_dataset_name, ds_path,
			    sizeof (ds_path));

			if ((zhp = zfs_open(g_zfs, ds_path, ZFS_TYPE_DATASET))
			    == NULL) {
				be_print_err(gettext("set_canmount: failed to "
				    "open dataset %s: %s\n"), ds_path,
				    libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
			if (zfs_prop_get_int(zhp, ZFS_PROP_MOUNTED)) {
				/*
				 * it's already mounted so we can't change the
				 * canmount property anyway.
				 */
				err = BE_SUCCESS;
				ZFS_CLOSE(zhp);
				break;
			}
			err = zfs_prop_set(zhp,
			    zfs_prop_to_name(ZFS_PROP_CANMOUNT), value);
			if (err) {
				ZFS_CLOSE(zhp);
				be_print_err(gettext("set_canmount: "
				    "Failed to set property value %s "
				    "for dataset %s: %s\n"), value, ds_path,
				    libzfs_error_description(g_zfs));
				err = zfs_err_to_be_err(g_zfs);
				return (err);
			}
			ZFS_CLOSE(zhp);
			datasets = datasets->be_next_dataset;
		}
		list = list->be_next_node;
	}
	return (err);
}