示例#1
0
文件: dsl_userhold.c 项目: AB17/zfs
static void
dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
{
	dsl_dataset_user_release_arg_t *ddura = arg;
	dsl_pool_t *dp = dmu_tx_pool(tx);
	nvpair_t *pair;

	for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) {
		dsl_dataset_t *ds;

		VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
		dsl_dataset_user_release_sync_one(ds,
		    fnvpair_value_nvlist(pair), tx);
		if (nvlist_exists(ddura->ddura_todelete,
		    nvpair_name(pair))) {
			ASSERT(ds->ds_userrefs == 0 &&
			    ds->ds_phys->ds_num_children == 1 &&
			    DS_IS_DEFER_DESTROY(ds));
			dsl_destroy_snapshot_sync_impl(ds, B_FALSE, tx);
		}
		dsl_dataset_rele(ds, FTAG);
	}
}
示例#2
0
文件: dsl_userhold.c 项目: AB17/zfs
static void
dsl_dataset_user_release_sync_one(dsl_dataset_t *ds, nvlist_t *holds,
    dmu_tx_t *tx)
{
	dsl_pool_t *dp = ds->ds_dir->dd_pool;
	objset_t *mos = dp->dp_meta_objset;
	uint64_t zapobj;
	int error;
	nvpair_t *pair;

	for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		ds->ds_userrefs--;
		error = dsl_pool_user_release(dp, ds->ds_object,
		    nvpair_name(pair), tx);
		VERIFY(error == 0 || error == ENOENT);
		zapobj = ds->ds_phys->ds_userrefs_obj;
		VERIFY0(zap_remove(mos, zapobj, nvpair_name(pair), tx));

		spa_history_log_internal_ds(ds, "release", tx,
		    "tag=%s refs=%lld", nvpair_name(pair),
		    (longlong_t)ds->ds_userrefs);
	}
}
示例#3
0
static void
dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
{
	dsl_dataset_user_hold_arg_t *dduha = arg;
	dsl_pool_t *dp = dmu_tx_pool(tx);
	nvlist_t *tmpholds;
	uint64_t now = gethrestime_sec();

	if (dduha->dduha_minor != 0)
		tmpholds = fnvlist_alloc();
	else
		tmpholds = NULL;
	for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_chkholds, NULL);
	    pair != NULL;
	    pair = nvlist_next_nvpair(dduha->dduha_chkholds, pair)) {
		dsl_dataset_t *ds;

		VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
		dsl_dataset_user_hold_sync_one_impl(tmpholds, ds,
		    fnvpair_value_string(pair), dduha->dduha_minor, now, tx);
		dsl_dataset_rele(ds, FTAG);
	}
	dsl_onexit_hold_cleanup(dp->dp_spa, tmpholds, dduha->dduha_minor);
}
示例#4
0
/*
 * Release "user holds" on snapshots.  If the snapshot has been marked for
 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
 * any clones, and all the user holds are removed, then the snapshot will be
 * destroyed.
 *
 * The keys in the nvlist are snapshot names.
 * The snapshots must all be in the same pool.
 * The value is a nvlist whose keys are the holds to remove.
 *
 * Holds which failed to release because they didn't exist will have an entry
 * added to errlist, but will not cause an overall failure.
 *
 * The return value will be 0 if the nvl holds was empty or all holds that
 * existed, were successfully removed.
 *
 * Otherwise the return value will be the errno of a (unspecified) hold that
 * failed to release and no holds will be released.
 *
 * In all cases the errlist will have an entry for each hold that failed to
 * to release.
 */
int
lzc_release(nvlist_t *holds, nvlist_t **errlist)
{
	char pool[MAXNAMELEN];
	nvpair_t *elem;

	/* determine the pool name */
	elem = nvlist_next_nvpair(holds, NULL);
	if (elem == NULL)
		return (0);
	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
	pool[strcspn(pool, "/@")] = '\0';

	return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
}
示例#5
0
static void
dsl_dataset_user_release_sync_one(dsl_dataset_t *ds, nvlist_t *holds,
    dmu_tx_t *tx)
{
	dsl_pool_t *dp = ds->ds_dir->dd_pool;
	objset_t *mos = dp->dp_meta_objset;

	for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		int error;
		const char *holdname = nvpair_name(pair);

		/* Remove temporary hold if one exists. */
		error = dsl_pool_user_release(dp, ds->ds_object, holdname, tx);
		VERIFY(error == 0 || error == ENOENT);

		VERIFY0(zap_remove(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
		    holdname, tx));
		ds->ds_userrefs--;

		spa_history_log_internal_ds(ds, "release", tx,
		    "tag=%s refs=%lld", holdname, (longlong_t)ds->ds_userrefs);
	}
}
示例#6
0
/*
 * The bookmarks must all be in the same pool.
 */
int
dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
{
	nvpair_t *pair;
	dsl_bookmark_create_arg_t dbca;

	pair = nvlist_next_nvpair(bmarks, NULL);
	if (pair == NULL)
		return (0);

	dbca.dbca_bmarks = bmarks;
	dbca.dbca_errors = errors;

	return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
	    dsl_bookmark_create_sync, &dbca, fnvlist_num_pairs(bmarks)));
}
示例#7
0
/*
 * This is invoked after a new filesystem is mounted to define the
 * name space. It is also invoked during normal system operation
 * to update the name space.
 *
 * Applications call di_prof_commit() in libdevinfo, which invokes
 * modctl(). modctl calls this function. The input is a packed nvlist.
 */
int
devname_profile_update(char *packed, size_t packed_sz)
{
	char *mntpt;
	nvlist_t *nvl;
	nvpair_t *nvp;
	struct sdev_data *mntinfo;
	int err;
	int rv;

	nvl = NULL;
	if ((err = copyin_nvlist(packed, packed_sz, &nvl)) != 0)
		return (err);
	ASSERT(nvl);

	/* The first nvpair must be the mount point */
	nvp = nvlist_next_nvpair(nvl, NULL);
	if (strcmp(nvpair_name(nvp), SDEV_NVNAME_MOUNTPT) != 0) {
		cmn_err(CE_NOTE,
		    "devname_profile_update: mount point not specified");
		nvlist_free(nvl);
		return (EINVAL);
	}

	/* find the matching filesystem instance */
	rv = nvpair_value_string(nvp, &mntpt);
	if (rv != 0) {
		cmn_err(CE_WARN, sdev_nvp_val_err,
		    rv, nvpair_name(nvp));
	} else {
		mntinfo = sdev_find_mntinfo(mntpt);
		if (mntinfo == NULL) {
			cmn_err(CE_NOTE, "devname_profile_update: "
			    " mount point %s not found", mntpt);
			nvlist_free(nvl);
			return (EINVAL);
		}

		/* now do the hardwork to process the profile */
		sdev_process_profile(mntinfo, nvl);

		sdev_mntinfo_rele(mntinfo);
	}

	nvlist_free(nvl);
	return (0);
}
示例#8
0
文件: dsl_userhold.c 项目: AB17/zfs
/*
 * holds is nvl of snapname -> holdname
 * errlist will be filled in with snapname -> error
 * if cleanup_minor is not 0, the holds will be temporary, cleaned up
 * when the process exits.
 *
 * if any fails, all will fail.
 */
int
dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
{
	dsl_dataset_user_hold_arg_t dduha;
	nvpair_t *pair;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	dduha.dduha_holds = holds;
	dduha.dduha_errlist = errlist;
	dduha.dduha_minor = cleanup_minor;

	return (dsl_sync_task(nvpair_name(pair), dsl_dataset_user_hold_check,
	    dsl_dataset_user_hold_sync, &dduha, fnvlist_num_pairs(holds)));
}
示例#9
0
/*
 * Destroys bookmarks.
 *
 * The keys in the bmarks nvlist are the bookmarks to be destroyed.
 * They must all be in the same pool.  Bookmarks are specified as
 * <fs>#<bmark>.
 *
 * Bookmarks that do not exist will be silently ignored.
 *
 * The return value will be 0 if all bookmarks that existed were destroyed.
 *
 * Otherwise the return value will be the errno of a (undetermined) bookmark
 * that failed, no bookmarks will be destroyed, and the errlist will have an
 * entry for each bookmarks that failed.  The value in the errlist will be
 * the (int32) error code.
 */
int
lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
{
	nvpair_t *elem;
	int error;
	char pool[MAXNAMELEN];

	/* determine the pool name */
	elem = nvlist_next_nvpair(bmarks, NULL);
	if (elem == NULL)
		return (0);
	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
	pool[strcspn(pool, "/#")] = '\0';

	error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);

	return (error);
}
示例#10
0
/*
 * The bookmarks must all be in the same pool.
 */
int
dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
{
	int rv;
	dsl_bookmark_destroy_arg_t dbda;
	nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
	if (pair == NULL)
		return (0);

	dbda.dbda_bmarks = bmarks;
	dbda.dbda_errors = errors;
	dbda.dbda_success = fnvlist_alloc();

	rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
	    dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks));
	fnvlist_free(dbda.dbda_success);
	return (rv);
}
示例#11
0
文件: libzfs_core.c 项目: LLNL/zfs
/*
 * Creates bookmarks.
 *
 * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
 * the name of the snapshot (e.g. "pool/fs@snap").  All the bookmarks and
 * snapshots must be in the same pool.
 *
 * The returned results nvlist will have an entry for each bookmark that failed.
 * The value will be the (int32) error code.
 *
 * The return value will be 0 if all bookmarks were created, otherwise it will
 * be the errno of a (undetermined) bookmarks that failed.
 */
int
lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
{
	nvpair_t *elem;
	int error;
	char pool[ZFS_MAX_DATASET_NAME_LEN];

	/* determine the pool name */
	elem = nvlist_next_nvpair(bookmarks, NULL);
	if (elem == NULL)
		return (0);
	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
	pool[strcspn(pool, "/#")] = '\0';

	error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);

	return (error);
}
示例#12
0
/*
 * The function obtains size of the nvlist after nvlist_pack().
 */
size_t
nvlist_size(const nvlist_t *nvl)
{
	const nvlist_t *tmpnvl;
	const nvpair_t *nvp, *tmpnvp;
	void *cookie;
	size_t size;

	NVLIST_ASSERT(nvl);
	PJDLOG_ASSERT(nvl->nvl_error == 0);

	size = sizeof(struct nvlist_header);
	nvp = nvlist_first_nvpair(nvl);
	while (nvp != NULL) {
		size += nvpair_header_size();
		size += strlen(nvpair_name(nvp)) + 1;
		if (nvpair_type(nvp) == NV_TYPE_NVLIST) {
			size += sizeof(struct nvlist_header);
			size += nvpair_header_size() + 1;
			tmpnvl = nvpair_get_nvlist(nvp);
			PJDLOG_ASSERT(tmpnvl->nvl_error == 0);
			tmpnvp = nvlist_first_nvpair(tmpnvl);
			if (tmpnvp != NULL) {
				nvl = tmpnvl;
				nvp = tmpnvp;
				continue;
			}
		} else {
			size += nvpair_size(nvp);
		}

		while ((nvp = nvlist_next_nvpair(nvl, nvp)) == NULL) {
			cookie = NULL;
			nvl = nvlist_get_parent(nvl, &cookie);
			if (nvl == NULL)
				goto out;
			nvp = cookie;
		}
	}

out:
	return (size);
}
示例#13
0
const char *
nvlist_next(const nvlist_t *nvl, int *typep, void **cookiep)
{
	nvpair_t *nvp;

	NVLIST_ASSERT(nvl);
	PJDLOG_ASSERT(cookiep != NULL);

	if (*cookiep == NULL)
		nvp = nvlist_first_nvpair(nvl);
	else
		nvp = nvlist_next_nvpair(nvl, *cookiep);
	if (nvp == NULL)
		return (NULL);
	if (typep != NULL)
		*typep = nvpair_type(nvp);
	*cookiep = nvp;
	return (nvpair_name(nvp));
}
示例#14
0
文件: dsl_userhold.c 项目: AB17/zfs
/*
 * holds is nvl of snapname -> { holdname, ... }
 * errlist will be filled in with snapname -> error
 *
 * if any fails, all will fail.
 */
int
dsl_dataset_user_release(nvlist_t *holds, nvlist_t *errlist)
{
	dsl_dataset_user_release_arg_t ddura;
	nvpair_t *pair;
	int error;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	ddura.ddura_holds = holds;
	ddura.ddura_errlist = errlist;
	ddura.ddura_todelete = fnvlist_alloc();

	error = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_release_check,
	    dsl_dataset_user_release_sync, &ddura, fnvlist_num_pairs(holds));
	fnvlist_free(ddura.ddura_todelete);
	return (error);
}
示例#15
0
/*
 * Get a handle to the next nvpair with the specified name and data
 * type in the list following the given nvpair.
 *
 * Some variation of this function will likely appear in the libnvpair
 * library per 4981923.
 *
 * @param       nvl
 *              the nvlist_t to search
 *
 * @param       name
 *              the string key for the pair to find in the list, or
 *              NULL to match any name
 *
 * @param       type
 *              the data type for the pair to find in the list, or
 *              DATA_TYPE_UNKNOWN to match any type
 *
 * @param       nvp
 *              the pair to search from in the list, or NULL to search
 *              from the beginning of the list
 *
 * @return      the next nvpair in the list matching the given
 *              criteria, or NULL if no matching nvpair is found
 */
static nvpair_t *
nvlist_walk_nvpair(
    nvlist_t *nvl,
    const char *name,
    data_type_t type,
    nvpair_t *nvp)
{
    /* For each nvpair in the list following nvp... */
    while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {

        /* Does this pair's name match the given name? */
        if ((name == NULL || strcmp(nvpair_name(nvp), name) == 0) &&

                /* Does this pair's type match the given type? */
                (type == DATA_TYPE_UNKNOWN || type == nvpair_type(nvp))) {
            return (nvp);
        }
    }

    return (NULL);
}
示例#16
0
void
zpool_get_load_policy(nvlist_t *nvl, zpool_load_policy_t *zlpp)
{
	nvlist_t *policy;
	nvpair_t *elem;
	char *nm;

	/* Defaults */
	zlpp->zlp_rewind = ZPOOL_NO_REWIND;
	zlpp->zlp_maxmeta = 0;
	zlpp->zlp_maxdata = UINT64_MAX;
	zlpp->zlp_txg = UINT64_MAX;

	if (nvl == NULL)
		return;

	elem = NULL;
	while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
		nm = nvpair_name(elem);
		if (strcmp(nm, ZPOOL_LOAD_POLICY) == 0) {
			if (nvpair_value_nvlist(elem, &policy) == 0)
				zpool_get_load_policy(policy, zlpp);
			return;
		} else if (strcmp(nm, ZPOOL_LOAD_REWIND_POLICY) == 0) {
			if (nvpair_value_uint32(elem, &zlpp->zlp_rewind) == 0)
				if (zlpp->zlp_rewind & ~ZPOOL_REWIND_POLICIES)
					zlpp->zlp_rewind = ZPOOL_NO_REWIND;
		} else if (strcmp(nm, ZPOOL_LOAD_REQUEST_TXG) == 0) {
			(void) nvpair_value_uint64(elem, &zlpp->zlp_txg);
		} else if (strcmp(nm, ZPOOL_LOAD_META_THRESH) == 0) {
			(void) nvpair_value_uint64(elem, &zlpp->zlp_maxmeta);
		} else if (strcmp(nm, ZPOOL_LOAD_DATA_THRESH) == 0) {
			(void) nvpair_value_uint64(elem, &zlpp->zlp_maxdata);
		}
	}
	if (zlpp->zlp_rewind == 0)
		zlpp->zlp_rewind = ZPOOL_NO_REWIND;
}
示例#17
0
/*
 * The function obtains size of the nvlist after nvlist_pack().
 * Additional argument 'level' allows to track how deep are we as we obtain
 * size of the NV_TYPE_NVLIST elements using recursion. We allow at most
 * three levels of recursion.
 */
static size_t
nvlist_xsize(const nvlist_t *nvl, int level)
{
	const nvpair_t *nvp;
	size_t size;

	NVLIST_ASSERT(nvl);
	PJDLOG_ASSERT(nvl->nvl_error == 0);
	PJDLOG_ASSERT(level < 3);

	size = sizeof(struct nvlist_header);
	for (nvp = nvlist_first_nvpair(nvl); nvp != NULL;
	    nvp = nvlist_next_nvpair(nvl, nvp)) {
		size += nvpair_header_size();
		size += strlen(nvpair_name(nvp)) + 1;
		if (nvpair_type(nvp) == NV_TYPE_NVLIST)
			size += nvlist_xsize(nvpair_get_nvlist(nvp), level + 1);
		else
			size += nvpair_size(nvp);
	}

	return (size);
}
示例#18
0
/*
 * The full semantics of this function are described in the comment above
 * lzc_hold().
 *
 * To summarize:
 * holds is nvl of snapname -> holdname
 * errlist will be filled in with snapname -> error
 *
 * The snaphosts must all be in the same pool.
 *
 * Holds for snapshots that don't exist will be skipped.
 *
 * If none of the snapshots for requested holds exist then ENOENT will be
 * returned.
 *
 * If cleanup_minor is not 0, the holds will be temporary, which will be cleaned
 * up when the process exits.
 *
 * On success all the holds, for snapshots that existed, will be created and 0
 * will be returned.
 *
 * On failure no holds will be created, the errlist will be filled in,
 * and an errno will returned.
 *
 * In all cases the errlist will contain entries for holds where the snapshot
 * didn't exist.
 */
int
dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
{
	dsl_dataset_user_hold_arg_t dduha;
	nvpair_t *pair;
	int ret;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	dduha.dduha_holds = holds;
	dduha.dduha_chkholds = fnvlist_alloc();
	dduha.dduha_errlist = errlist;
	dduha.dduha_minor = cleanup_minor;

	ret = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_hold_check,
	    dsl_dataset_user_hold_sync, &dduha,
	    fnvlist_num_pairs(holds), ZFS_SPACE_CHECK_RESERVED);
	fnvlist_free(dduha.dduha_chkholds);

	return (ret);
}
示例#19
0
static void
prof_make_names(struct sdev_node *dir)
{
	char *name;
	nvpair_t *nvp = NULL;
	nvlist_t *nvl = dir->sdev_prof.dev_name;
	int rv;

	ASSERT(RW_WRITE_HELD(&dir->sdev_contents));

	if ((dir->sdev_flags & SDEV_ZONED) != 0)
		prof_make_names_walk(dir, prof_make_name_zone);

	if (nvl == NULL)
		return;

	if (dir->sdev_prof.has_glob) {
		prof_make_names_walk(dir, prof_make_name_glob);
		return;
	}

	/* Walk nvlist and lookup corresponding device in global inst */
	while (nvp = nvlist_next_nvpair(nvl, nvp)) {
		int type;
		rv = nvpair_value_int32(nvp, &type);
		if (rv != 0) {
			cmn_err(CE_WARN, sdev_nvp_val_err,
			    rv, nvpair_name(nvp));
			break;
		}
		if (type == PROFILE_TYPE_EXCLUDE)
			continue;
		name = nvpair_name(nvp);
		(void) prof_lookup_globaldev(dir, dir->sdev_origin,
		    name, name);
	}
}
示例#20
0
void *
nvlist_xpack(const nvlist_t *nvl, int64_t *fdidxp, size_t *sizep)
{
	unsigned char *buf, *ptr;
	size_t left, size;
	nvpair_t *nvp;

	NVLIST_ASSERT(nvl);

	if (nvl->nvl_error != 0) {
		errno = nvl->nvl_error;
		return (NULL);
	}

	size = nvlist_size(nvl);
	buf = malloc(size);
	if (buf == NULL)
		return (NULL);

	ptr = buf;
	left = size;

	ptr = nvlist_pack_header(nvl, ptr, &left);

	for (nvp = nvlist_first_nvpair(nvl); nvp != NULL;
	    nvp = nvlist_next_nvpair(nvl, nvp)) {
		ptr = nvpair_pack(nvp, ptr, fdidxp, &left);
		if (ptr == NULL) {
			free(buf);
			return (NULL);
		}
	}

	if (sizep != NULL)
		*sizep = size;
	return (buf);
}
示例#21
0
/*
 * Create "user holds" on snapshots.  If there is a hold on a snapshot,
 * the snapshot can not be destroyed.  (However, it can be marked for deletion
 * by lzc_destroy_snaps(defer=B_TRUE).)
 *
 * The keys in the nvlist are snapshot names.
 * The snapshots must all be in the same pool.
 * The value is the name of the hold (string type).
 *
 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
 * In this case, when the cleanup_fd is closed (including on process
 * termination), the holds will be released.  If the system is shut down
 * uncleanly, the holds will be released when the pool is next opened
 * or imported.
 *
 * Holds for snapshots which don't exist will be skipped and have an entry
 * added to errlist, but will not cause an overall failure.
 *
 * The return value will be 0 if all holds, for snapshots that existed,
 * were succesfully created.
 *
 * Otherwise the return value will be the errno of a (unspecified) hold that
 * failed and no holds will be created.
 *
 * In all cases the errlist will have an entry for each hold that failed
 * (name = snapshot), with its value being the error code (int32).
 */
int
lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
{
	char pool[MAXNAMELEN];
	nvlist_t *args;
	nvpair_t *elem;
	int error;

	/* determine the pool name */
	elem = nvlist_next_nvpair(holds, NULL);
	if (elem == NULL)
		return (0);
	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
	pool[strcspn(pool, "/@")] = '\0';

	args = fnvlist_alloc();
	fnvlist_add_nvlist(args, "holds", holds);
	if (cleanup_fd != -1)
		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);

	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
	nvlist_free(args);
	return (error);
}
示例#22
0
/*
 * Create symlinks in the current directory based on profile
 */
static void
prof_make_symlinks(struct sdev_node *dir)
{
	char *tgt, *lnm;
	nvpair_t *nvp = NULL;
	nvlist_t *nvl = dir->sdev_prof.dev_symlink;
	int rv;

	ASSERT(RW_WRITE_HELD(&dir->sdev_contents));

	if (nvl == NULL)
		return;

	while (nvp = nvlist_next_nvpair(nvl, nvp)) {
		lnm = nvpair_name(nvp);
		rv = nvpair_value_string(nvp, &tgt);
		if (rv != 0) {
			cmn_err(CE_WARN, sdev_nvp_val_err,
			    rv, nvpair_name(nvp));
			break;
		}
		prof_make_sym(dir, lnm, tgt);
	}
}
示例#23
0
/*
 * Push a Lua table representing nvl onto the stack.  If it can't be
 * converted, return EINVAL, fill in errbuf, and push nothing. errbuf may
 * be specified as NULL, in which case no error string will be output.
 *
 * Most nvlists are converted as simple key->value Lua tables, but we make
 * an exception for the case where all nvlist entries are BOOLEANs (a string
 * key without a value). In Lua, a table key pointing to a value of Nil
 * (no value) is equivalent to the key not existing, so a BOOLEAN nvlist
 * entry can't be directly converted to a Lua table entry. Nvlists of entirely
 * BOOLEAN entries are frequently used to pass around lists of datasets, so for
 * convenience we check for this case, and convert it to a simple Lua array of
 * strings.
 */
int
zcp_nvlist_to_lua(lua_State *state, nvlist_t *nvl,
    char *errbuf, int errbuf_len)
{
	nvpair_t *pair;
	lua_newtable(state);
	boolean_t has_values = B_FALSE;
	/*
	 * If the list doesn't have any values, just convert it to a string
	 * array.
	 */
	for (pair = nvlist_next_nvpair(nvl, NULL);
	    pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
		if (nvpair_type(pair) != DATA_TYPE_BOOLEAN) {
			has_values = B_TRUE;
			break;
		}
	}
	if (!has_values) {
		int i = 1;
		for (pair = nvlist_next_nvpair(nvl, NULL);
		    pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
			(void) lua_pushinteger(state, i);
			(void) lua_pushstring(state, nvpair_name(pair));
			(void) lua_settable(state, -3);
			i++;
		}
	} else {
		for (pair = nvlist_next_nvpair(nvl, NULL);
		    pair != NULL; pair = nvlist_next_nvpair(nvl, pair)) {
			int err = zcp_nvpair_value_to_lua(state, pair,
			    errbuf, errbuf_len);
			if (err != 0) {
				lua_pop(state, 1);
				return (err);
			}
			(void) lua_setfield(state, -2, nvpair_name(pair));
		}
	}
	return (0);
}
示例#24
0
/*
 * Called when the module is first loaded, this routine loads the configuration
 * file into the SPA namespace.  It does not actually open or load the pools; it
 * only populates the namespace.
 */
void
spa_config_load(void)
{
	void *buf = NULL;
	nvlist_t *nvlist, *child;
	nvpair_t *nvpair;
	spa_t *spa;
	char pathname[128];
	struct _buf *file;
	struct bootstat bst;

	/*
	 * Open the configuration file.
	 */
	(void) snprintf(pathname, sizeof (pathname), "%s%s/%s",
	    (rootdir != NULL) ? "./" : "", spa_config_dir, ZPOOL_CACHE_FILE);

	file = kobj_open_file(pathname);
	if (file == (struct _buf *)-1)
		return;

	if (kobj_fstat(file->_fd, &bst) != 0)
		goto out;

	buf = kmem_alloc(bst.st_size, KM_SLEEP);

	/*
	 * Read the nvlist from the file.
	 */
	if (kobj_read_file(file, buf, bst.st_size, 0) < 0)
		goto out;

	/*
	 * Unpack the nvlist.
	 */
	if (nvlist_unpack(buf, bst.st_size, &nvlist, KM_SLEEP) != 0)
		goto out;

	/*
	 * Iterate over all elements in the nvlist, creating a new spa_t for
	 * each one with the specified configuration.
	 */
	mutex_enter(&spa_namespace_lock);
	nvpair = NULL;
	while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {

		if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
			continue;

		VERIFY(nvpair_value_nvlist(nvpair, &child) == 0);

		if (spa_lookup(nvpair_name(nvpair)) != NULL)
			continue;
		spa = spa_add(nvpair_name(nvpair), NULL);

		/*
		 * We blindly duplicate the configuration here.  If it's
		 * invalid, we will catch it when the pool is first opened.
		 */
		VERIFY(nvlist_dup(child, &spa->spa_config, 0) == 0);
	}
	mutex_exit(&spa_namespace_lock);

	nvlist_free(nvlist);

out:
	if (buf != NULL)
		kmem_free(buf, bst.st_size);

	kobj_close_file(file);
}
示例#25
0
/*
 * The full semantics of this function are described in the comment above
 * lzc_release().
 *
 * To summarize:
 * Releases holds specified in the nvl holds.
 *
 * holds is nvl of snapname -> { holdname, ... }
 * errlist will be filled in with snapname -> error
 *
 * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
 * otherwise they should be the names of shapshots.
 *
 * As a release may cause snapshots to be destroyed this trys to ensure they
 * aren't mounted.
 *
 * The release of non-existent holds are skipped.
 *
 * At least one hold must have been released for the this function to succeed
 * and return 0.
 */
static int
dsl_dataset_user_release_impl(nvlist_t *holds, nvlist_t *errlist,
    dsl_pool_t *tmpdp)
{
	dsl_dataset_user_release_arg_t ddura;
	nvpair_t *pair;
	char *pool;
	int error;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	/*
	 * The release may cause snapshots to be destroyed; make sure they
	 * are not mounted.
	 */
	if (tmpdp != NULL) {
		/* Temporary holds are specified by dsobj string. */
		ddura.ddura_holdfunc = dsl_dataset_hold_obj_string;
		pool = spa_name(tmpdp->dp_spa);
#ifdef _KERNEL
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			dsl_dataset_t *ds;

			dsl_pool_config_enter(tmpdp, FTAG);
			error = dsl_dataset_hold_obj_string(tmpdp,
			    nvpair_name(pair), FTAG, &ds);
			if (error == 0) {
				char name[MAXNAMELEN];
				dsl_dataset_name(ds, name);
				dsl_pool_config_exit(tmpdp, FTAG);
				dsl_dataset_rele(ds, FTAG);
				(void) zfs_unmount_snap(name);
			} else {
				dsl_pool_config_exit(tmpdp, FTAG);
			}
		}
#endif
	} else {
		/* Non-temporary holds are specified by name. */
		ddura.ddura_holdfunc = dsl_dataset_hold;
		pool = nvpair_name(pair);
#ifdef _KERNEL
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			(void) zfs_unmount_snap(nvpair_name(pair));
		}
#endif
	}

	ddura.ddura_holds = holds;
	ddura.ddura_errlist = errlist;
	ddura.ddura_todelete = fnvlist_alloc();
	ddura.ddura_chkholds = fnvlist_alloc();

	error = dsl_sync_task(pool, dsl_dataset_user_release_check,
	    dsl_dataset_user_release_sync, &ddura, 0);
	fnvlist_free(ddura.ddura_todelete);
	fnvlist_free(ddura.ddura_chkholds);

	return (error);
}
示例#26
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
    dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
{
	uint64_t zapobj;
	nvlist_t *holds_found;
	objset_t *mos;
	int numholds;

	if (!dsl_dataset_is_snapshot(ds))
		return (SET_ERROR(EINVAL));

	if (nvlist_empty(holds))
		return (0);

	numholds = 0;
	mos = ds->ds_dir->dd_pool->dp_meta_objset;
	zapobj = ds->ds_phys->ds_userrefs_obj;
	holds_found = fnvlist_alloc();

	for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		uint64_t tmp;
		int error;
		const char *holdname = nvpair_name(pair);

		if (zapobj != 0)
			error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
		else
			error = SET_ERROR(ENOENT);

		/*
		 * Non-existent holds are put on the errlist, but don't
		 * cause an overall failure.
		 */
		if (error == ENOENT) {
			if (ddura->ddura_errlist != NULL) {
				char *errtag = kmem_asprintf("%s#%s",
				    snapname, holdname);
				fnvlist_add_int32(ddura->ddura_errlist, errtag,
				    ENOENT);
				strfree(errtag);
			}
			continue;
		}

		if (error != 0) {
			fnvlist_free(holds_found);
			return (error);
		}

		fnvlist_add_boolean(holds_found, holdname);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */
		if (dsl_dataset_long_held(ds)) {
			fnvlist_free(holds_found);
			return (SET_ERROR(EBUSY));
		}
		fnvlist_add_boolean(ddura->ddura_todelete, snapname);
	}

	if (numholds != 0) {
		fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
		    holds_found);
	}
	fnvlist_free(holds_found);

	return (0);
}
示例#27
0
/*
 * nvlist_print - Prints elements in an event buffer
 */
static
void
nvlist_print_with_indent(FILE *fp, nvlist_t *nvl, int depth)
{
	int i;
	char *name;
	uint_t nelem;
	nvpair_t *nvp;

	if (nvl == NULL)
		return;

	indent(fp, depth);
	(void) fprintf(fp, "nvlist version: %d\n", NVL_VERSION(nvl));

	nvp = nvlist_next_nvpair(nvl, NULL);

	while (nvp) {
		data_type_t type = nvpair_type(nvp);

		indent(fp, depth);
		name = nvpair_name(nvp);
		(void) fprintf(fp, "\t%s =", name);
		nelem = 0;
		switch (type) {
		case DATA_TYPE_BOOLEAN: {
			(void) fprintf(fp, " 1");
			break;
		}
		case DATA_TYPE_BOOLEAN_VALUE: {
			boolean_t val;
			(void) nvpair_value_boolean_value(nvp, &val);
			(void) fprintf(fp, " %d", val);
			break;
		}
		case DATA_TYPE_BYTE: {
			uchar_t val;
			(void) nvpair_value_byte(nvp, &val);
			(void) fprintf(fp, " 0x%2.2x", val);
			break;
		}
		case DATA_TYPE_INT8: {
			int8_t val;
			(void) nvpair_value_int8(nvp, &val);
			(void) fprintf(fp, " %d", val);
			break;
		}
		case DATA_TYPE_UINT8: {
			uint8_t val;
			(void) nvpair_value_uint8(nvp, &val);
			(void) fprintf(fp, " 0x%x", val);
			break;
		}
		case DATA_TYPE_INT16: {
			int16_t val;
			(void) nvpair_value_int16(nvp, &val);
			(void) fprintf(fp, " %d", val);
			break;
		}
		case DATA_TYPE_UINT16: {
			uint16_t val;
			(void) nvpair_value_uint16(nvp, &val);
			(void) fprintf(fp, " 0x%x", val);
			break;
		}
		case DATA_TYPE_INT32: {
			int32_t val;
			(void) nvpair_value_int32(nvp, &val);
			(void) fprintf(fp, " %d", val);
			break;
		}
		case DATA_TYPE_UINT32: {
			uint32_t val;
			(void) nvpair_value_uint32(nvp, &val);
			(void) fprintf(fp, " 0x%x", val);
			break;
		}
		case DATA_TYPE_INT64: {
			int64_t val;
			(void) nvpair_value_int64(nvp, &val);
			(void) fprintf(fp, " %lld", (longlong_t)val);
			break;
		}
		case DATA_TYPE_UINT64: {
			uint64_t val;
			(void) nvpair_value_uint64(nvp, &val);
			(void) fprintf(fp, " 0x%llx", (u_longlong_t)val);
			break;
		}
		case DATA_TYPE_DOUBLE: {
			double val;
			(void) nvpair_value_double(nvp, &val);
			(void) fprintf(fp, " 0x%llf", val);
			break;
		}
		case DATA_TYPE_STRING: {
			char *val;
			(void) nvpair_value_string(nvp, &val);
			(void) fprintf(fp, " %s", val);
			break;
		}
		case DATA_TYPE_BOOLEAN_ARRAY: {
			boolean_t *val;
			(void) nvpair_value_boolean_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %d", val[i]);
			break;
		}
		case DATA_TYPE_BYTE_ARRAY: {
			uchar_t *val;
			(void) nvpair_value_byte_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " 0x%2.2x", val[i]);
			break;
		}
		case DATA_TYPE_INT8_ARRAY: {
			int8_t *val;
			(void) nvpair_value_int8_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %d", val[i]);
			break;
		}
		case DATA_TYPE_UINT8_ARRAY: {
			uint8_t *val;
			(void) nvpair_value_uint8_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " 0x%x", val[i]);
			break;
		}
		case DATA_TYPE_INT16_ARRAY: {
			int16_t *val;
			(void) nvpair_value_int16_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %d", val[i]);
			break;
		}
		case DATA_TYPE_UINT16_ARRAY: {
			uint16_t *val;
			(void) nvpair_value_uint16_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " 0x%x", val[i]);
			break;
		}
		case DATA_TYPE_INT32_ARRAY: {
			int32_t *val;
			(void) nvpair_value_int32_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %d", val[i]);
			break;
		}
		case DATA_TYPE_UINT32_ARRAY: {
			uint32_t *val;
			(void) nvpair_value_uint32_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " 0x%x", val[i]);
			break;
		}
		case DATA_TYPE_INT64_ARRAY: {
			int64_t *val;
			(void) nvpair_value_int64_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %lld", (longlong_t)val[i]);
			break;
		}
		case DATA_TYPE_UINT64_ARRAY: {
			uint64_t *val;
			(void) nvpair_value_uint64_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " 0x%llx",
				    (u_longlong_t)val[i]);
			break;
		}
		case DATA_TYPE_STRING_ARRAY: {
			char **val;
			(void) nvpair_value_string_array(nvp, &val, &nelem);
			for (i = 0; i < nelem; i++)
				(void) fprintf(fp, " %s", val[i]);
			break;
		}
		case DATA_TYPE_HRTIME: {
			hrtime_t val;
			(void) nvpair_value_hrtime(nvp, &val);
			(void) fprintf(fp, " 0x%llx", val);
			break;
		}
		case DATA_TYPE_NVLIST: {
			nvlist_t *val;
			(void) nvpair_value_nvlist(nvp, &val);
			(void) fprintf(fp, " (embedded nvlist)\n");
			nvlist_print_with_indent(fp, val, depth + 1);
			indent(fp, depth + 1);
			(void) fprintf(fp, "(end %s)\n", name);
			break;
		}
		case DATA_TYPE_NVLIST_ARRAY: {
			nvlist_t **val;
			(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
			(void) fprintf(fp, " (array of embedded nvlists)\n");
			for (i = 0; i < nelem; i++) {
				indent(fp, depth + 1);
				(void) fprintf(fp,
				    "(start %s[%d])\n", name, i);
				nvlist_print_with_indent(fp, val[i], depth + 1);
				indent(fp, depth + 1);
				(void) fprintf(fp, "(end %s[%d])\n", name, i);
			}
			break;
		}
		default:
			(void) fprintf(fp, " unknown data type (%d)", type);
			break;
		}
		(void) fprintf(fp, "\n");
		nvp = nvlist_next_nvpair(nvl, nvp);
	}
}
示例#28
0
/*
 * Given a cache file, return the contents as a list of importable pools.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
nvlist_t *
zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
    char *poolname, uint64_t guid)
{
	char *buf;
	int fd;
	struct stat statbuf;
	nvlist_t *raw, *src, *dst;
	nvlist_t *pools;
	nvpair_t *elem;
	char *name;
	uint64_t this_guid;
	boolean_t active;

	verify(poolname == NULL || guid == 0);

	if ((fd = open(cachefile, O_RDONLY)) < 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
		return (NULL);
	}

	if (fstat(fd, &statbuf) != 0) {
		zfs_error_aux(hdl, "%s", strerror(errno));
		(void) close(fd);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
		return (NULL);
	}

	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
		(void) close(fd);
		return (NULL);
	}

	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
		(void) close(fd);
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "failed to read cache file contents"));
		return (NULL);
	}

	(void) close(fd);

	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
		free(buf);
		(void) zfs_error(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN,
		    "invalid or corrupt cache file contents"));
		return (NULL);
	}

	free(buf);

	/*
	 * Go through and get the current state of the pools and refresh their
	 * state.
	 */
	if (nvlist_alloc(&pools, 0, 0) != 0) {
		(void) no_memory(hdl);
		nvlist_free(raw);
		return (NULL);
	}

	elem = NULL;
	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
		src = fnvpair_value_nvlist(elem);

		name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
		if (poolname != NULL && strcmp(poolname, name) != 0)
			continue;

		this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
		if (guid != 0 && guid != this_guid)
			continue;

		if (pool_active(hdl, name, this_guid, &active) != 0) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (active)
			continue;

		if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
		    cachefile) != 0) {
			(void) no_memory(hdl);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if ((dst = refresh_config(hdl, src)) == NULL) {
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}

		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
			(void) no_memory(hdl);
			nvlist_free(dst);
			nvlist_free(raw);
			nvlist_free(pools);
			return (NULL);
		}
		nvlist_free(dst);
	}

	nvlist_free(raw);
	return (pools);
}
示例#29
0
/*
 * Process profile passed down from libdevinfo. There are four types
 * of matching rules:
 *  include: export a name or names matching a pattern
 *  exclude: exclude a name or names matching a pattern
 *  symlink: create a local symlink
 *  map:     export a device with a name different from the global zone
 * Note: We may consider supporting VOP_SYMLINK in non-global instances,
 *	because it does not present any security risk. For now, the fs
 *	instance is read only.
 */
static void
sdev_process_profile(struct sdev_data *sdev_data, nvlist_t *profile)
{
	nvpair_t *nvpair;
	char *nvname, *dname;
	struct sdev_node *dir, *gdir;
	char **pair;				/* for symlinks and maps */
	uint_t nelem;
	int rv;

	gdir = sdev_origins->sdev_root;	/* root of global /dev */
	dir = sdev_data->sdev_root;	/* root of current instance */

	ASSERT(profile);

	/* process nvpairs in the list */
	nvpair = NULL;
	while (nvpair = nvlist_next_nvpair(profile, nvpair)) {
		nvname = nvpair_name(nvpair);
		ASSERT(nvname != NULL);

		if (strcmp(nvname, SDEV_NVNAME_INCLUDE) == 0) {
			rv = nvpair_value_string(nvpair, &dname);
			if (rv != 0) {
				cmn_err(CE_WARN, sdev_nvp_val_err,
				    rv, nvpair_name(nvpair));
				break;
			}
			process_rule(dir, gdir, dname, NULL,
			    PROFILE_TYPE_INCLUDE);
		} else if (strcmp(nvname, SDEV_NVNAME_EXCLUDE) == 0) {
			rv = nvpair_value_string(nvpair, &dname);
			if (rv != 0) {
				cmn_err(CE_WARN, sdev_nvp_val_err,
				    rv, nvpair_name(nvpair));
				break;
			}
			process_rule(dir, gdir, dname, NULL,
			    PROFILE_TYPE_EXCLUDE);
		} else if (strcmp(nvname, SDEV_NVNAME_SYMLINK) == 0) {
			rv = nvpair_value_string_array(nvpair, &pair, &nelem);
			if (rv != 0) {
				cmn_err(CE_WARN, sdev_nvp_val_err,
				    rv, nvpair_name(nvpair));
				break;
			}
			ASSERT(nelem == 2);
			process_rule(dir, gdir, pair[0], pair[1],
			    PROFILE_TYPE_SYMLINK);
		} else if (strcmp(nvname, SDEV_NVNAME_MAP) == 0) {
			rv = nvpair_value_string_array(nvpair, &pair, &nelem);
			if (rv != 0) {
				cmn_err(CE_WARN, sdev_nvp_val_err,
				    rv, nvpair_name(nvpair));
				break;
			}
			process_rule(dir, gdir, pair[1], pair[0],
			    PROFILE_TYPE_MAP);
		} else if (strcmp(nvname, SDEV_NVNAME_MOUNTPT) != 0) {
			cmn_err(CE_WARN, "sdev_process_profile: invalid "
			    "nvpair %s\n", nvname);
		}
	}
}
示例#30
-1
static int
osd_sa_xattr_list(const struct lu_env *env, struct osd_object *obj,
		struct lu_buf *lb)
{
	nvpair_t *nvp = NULL;
	int       len, counted = 0, remain = lb->lb_len;
	int       rc = 0;

	if (obj->oo_sa_xattr == NULL) {
		rc = __osd_xattr_cache(env, obj);
		if (rc)
			return rc;
	}

	LASSERT(obj->oo_sa_xattr);

	while ((nvp = nvlist_next_nvpair(obj->oo_sa_xattr, nvp)) != NULL) {
		const char *name = nvpair_name(nvp);

		if (!osd_obj2dev(obj)->od_posix_acl &&
		    (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0 ||
		     strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0))
			continue;

		len = strlen(nvpair_name(nvp));
		if (lb->lb_buf != NULL) {
			if (len + 1 > remain)
				return -ERANGE;

			memcpy(lb->lb_buf, name, len);
			lb->lb_buf += len;
			*((char *)lb->lb_buf) = '\0';
			lb->lb_buf++;
			remain -= len + 1;
		}
		counted += len + 1;
	}
	return counted;
}