Esempio n. 1
0
/*
 * Create "user holds" on snapshots.  If there is a hold on a snapshot,
 * the snapshot can not be destroyed.  (However, it can be marked for deletion
 * by lzc_destroy_snaps(defer=B_TRUE).)
 *
 * The keys in the nvlist are snapshot names.
 * The snapshots must all be in the same pool.
 * The value is the name of the hold (string type).
 *
 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
 * In this case, when the cleanup_fd is closed (including on process
 * termination), the holds will be released.  If the system is shut down
 * uncleanly, the holds will be released when the pool is next opened
 * or imported.
 *
 * Holds for snapshots which don't exist will be skipped and have an entry
 * added to errlist, but will not cause an overall failure.
 *
 * The return value will be 0 if all holds, for snapshots that existed,
 * were succesfully created.
 *
 * Otherwise the return value will be the errno of a (unspecified) hold that
 * failed and no holds will be created.
 *
 * In all cases the errlist will have an entry for each hold that failed
 * (name = snapshot), with its value being the error code (int32).
 */
int
lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
{
	char pool[ZFS_MAX_DATASET_NAME_LEN];
	nvlist_t *args;
	nvpair_t *elem;
	int error;

	/* determine the pool name */
	elem = nvlist_next_nvpair(holds, NULL);
	if (elem == NULL)
		return (0);
	(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
	pool[strcspn(pool, "/@")] = '\0';

	args = fnvlist_alloc();
	fnvlist_add_nvlist(args, "holds", holds);
	if (cleanup_fd != -1)
		fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);

	error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
	nvlist_free(args);
	return (error);
}
Esempio n. 2
0
static void
dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
{
	dsl_dataset_user_hold_arg_t *dduha = arg;
	dsl_pool_t *dp = dmu_tx_pool(tx);
	nvlist_t *tmpholds;
	uint64_t now = gethrestime_sec();

	if (dduha->dduha_minor != 0)
		tmpholds = fnvlist_alloc();
	else
		tmpholds = NULL;
	for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_chkholds, NULL);
	    pair != NULL;
	    pair = nvlist_next_nvpair(dduha->dduha_chkholds, pair)) {
		dsl_dataset_t *ds;

		VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
		dsl_dataset_user_hold_sync_one_impl(tmpholds, ds,
		    fnvpair_value_string(pair), dduha->dduha_minor, now, tx);
		dsl_dataset_rele(ds, FTAG);
	}
	dsl_onexit_hold_cleanup(dp->dp_spa, tmpholds, dduha->dduha_minor);
}
Esempio n. 3
0
/*
 * "from" can be NULL, a snapshot, or a bookmark.
 *
 * If from is NULL, a full (non-incremental) stream will be estimated.  This
 * is calculated very efficiently.
 *
 * If from is a snapshot, lzc_send_space uses the deadlists attached to
 * each snapshot to efficiently estimate the stream size.
 *
 * If from is a bookmark, the indirect blocks in the destination snapshot
 * are traversed, looking for blocks with a birth time since the creation TXG of
 * the snapshot this bookmark was created from.  This will result in
 * significantly more I/O and be less efficient than a send space estimation on
 * an equivalent snapshot.
 */
int
lzc_send_space(const char *snapname, const char *from,
    enum lzc_send_flags flags, uint64_t *spacep)
{
	nvlist_t *args;
	nvlist_t *result;
	int err;

	args = fnvlist_alloc();
	if (from != NULL)
		fnvlist_add_string(args, "from", from);
	if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
		fnvlist_add_boolean(args, "largeblockok");
	if (flags & LZC_SEND_FLAG_EMBED_DATA)
		fnvlist_add_boolean(args, "embedok");
	if (flags & LZC_SEND_FLAG_COMPRESS)
		fnvlist_add_boolean(args, "compressok");
	err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
	nvlist_free(args);
	if (err == 0)
		*spacep = fnvlist_lookup_uint64(result, "space");
	nvlist_free(result);
	return (err);
}
Esempio n. 4
0
/*
 * The full semantics of this function are described in the comment above
 * lzc_release().
 *
 * To summarize:
 * Releases holds specified in the nvl holds.
 *
 * holds is nvl of snapname -> { holdname, ... }
 * errlist will be filled in with snapname -> error
 *
 * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
 * otherwise they should be the names of shapshots.
 *
 * As a release may cause snapshots to be destroyed this trys to ensure they
 * aren't mounted.
 *
 * The release of non-existent holds are skipped.
 *
 * At least one hold must have been released for the this function to succeed
 * and return 0.
 */
static int
dsl_dataset_user_release_impl(nvlist_t *holds, nvlist_t *errlist,
    dsl_pool_t *tmpdp)
{
	dsl_dataset_user_release_arg_t ddura;
	nvpair_t *pair;
	char *pool;
	int error;

	pair = nvlist_next_nvpair(holds, NULL);
	if (pair == NULL)
		return (0);

	/*
	 * The release may cause snapshots to be destroyed; make sure they
	 * are not mounted.
	 */
	if (tmpdp != NULL) {
		/* Temporary holds are specified by dsobj string. */
		ddura.ddura_holdfunc = dsl_dataset_hold_obj_string;
		pool = spa_name(tmpdp->dp_spa);
#ifdef _KERNEL
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			dsl_dataset_t *ds;

			dsl_pool_config_enter(tmpdp, FTAG);
			error = dsl_dataset_hold_obj_string(tmpdp,
			    nvpair_name(pair), FTAG, &ds);
			if (error == 0) {
				char name[MAXNAMELEN];
				dsl_dataset_name(ds, name);
				dsl_pool_config_exit(tmpdp, FTAG);
				dsl_dataset_rele(ds, FTAG);
				(void) zfs_unmount_snap(name);
			} else {
				dsl_pool_config_exit(tmpdp, FTAG);
			}
		}
#endif
	} else {
		/* Non-temporary holds are specified by name. */
		ddura.ddura_holdfunc = dsl_dataset_hold;
		pool = nvpair_name(pair);
#ifdef _KERNEL
		for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
		    pair = nvlist_next_nvpair(holds, pair)) {
			(void) zfs_unmount_snap(nvpair_name(pair));
		}
#endif
	}

	ddura.ddura_holds = holds;
	ddura.ddura_errlist = errlist;
	ddura.ddura_todelete = fnvlist_alloc();
	ddura.ddura_chkholds = fnvlist_alloc();

	error = dsl_sync_task(pool, dsl_dataset_user_release_check,
	    dsl_dataset_user_release_sync, &ddura, 0);
	fnvlist_free(ddura.ddura_todelete);
	fnvlist_free(ddura.ddura_chkholds);

	return (error);
}
Esempio n. 5
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
    dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
{
	uint64_t zapobj;
	nvlist_t *holds_found;
	objset_t *mos;
	int numholds;

	if (!dsl_dataset_is_snapshot(ds))
		return (SET_ERROR(EINVAL));

	if (nvlist_empty(holds))
		return (0);

	numholds = 0;
	mos = ds->ds_dir->dd_pool->dp_meta_objset;
	zapobj = ds->ds_phys->ds_userrefs_obj;
	holds_found = fnvlist_alloc();

	for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		uint64_t tmp;
		int error;
		const char *holdname = nvpair_name(pair);

		if (zapobj != 0)
			error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
		else
			error = SET_ERROR(ENOENT);

		/*
		 * Non-existent holds are put on the errlist, but don't
		 * cause an overall failure.
		 */
		if (error == ENOENT) {
			if (ddura->ddura_errlist != NULL) {
				char *errtag = kmem_asprintf("%s#%s",
				    snapname, holdname);
				fnvlist_add_int32(ddura->ddura_errlist, errtag,
				    ENOENT);
				strfree(errtag);
			}
			continue;
		}

		if (error != 0) {
			fnvlist_free(holds_found);
			return (error);
		}

		fnvlist_add_boolean(holds_found, holdname);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */
		if (dsl_dataset_long_held(ds)) {
			fnvlist_free(holds_found);
			return (SET_ERROR(EBUSY));
		}
		fnvlist_add_boolean(ddura->ddura_todelete, snapname);
	}

	if (numholds != 0) {
		fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
		    holds_found);
	}
	fnvlist_free(holds_found);

	return (0);
}
Esempio n. 6
0
static void
zfs_ioc_input_tests(const char *pool)
{
	char filepath[] = "/tmp/ioc_test_file_XXXXXX";
	char dataset[ZFS_MAX_DATASET_NAME_LEN];
	char snapbase[ZFS_MAX_DATASET_NAME_LEN + 32];
	char snapshot[ZFS_MAX_DATASET_NAME_LEN + 32];
	char bookmark[ZFS_MAX_DATASET_NAME_LEN + 32];
	char backup[ZFS_MAX_DATASET_NAME_LEN];
	char clone[ZFS_MAX_DATASET_NAME_LEN];
	int tmpfd, err;

	/*
	 * Setup names and create a working dataset
	 */
	(void) snprintf(dataset, sizeof (dataset), "%s/test-fs", pool);
	(void) snprintf(snapbase, sizeof (snapbase), "%s@snapbase", dataset);
	(void) snprintf(snapshot, sizeof (snapshot), "%s@snapshot", dataset);
	(void) snprintf(bookmark, sizeof (bookmark), "%s#bookmark", dataset);
	(void) snprintf(clone, sizeof (clone), "%s/test-fs-clone", pool);
	(void) snprintf(backup, sizeof (backup), "%s/backup", pool);

	err = lzc_create(dataset, DMU_OST_ZFS, NULL, NULL, 0);
	if (err) {
		(void) fprintf(stderr, "could not create '%s': %s\n",
		    dataset, strerror(errno));
		exit(2);
	}

	tmpfd = mkstemp(filepath);
	if (tmpfd < 0) {
		(void) fprintf(stderr, "could not create '%s': %s\n",
		    filepath, strerror(errno));
		exit(2);
	}

	/*
	 * run a test for each ioctl
	 * Note that some test build on previous test operations
	 */
	test_pool_sync(pool);
	test_pool_reopen(pool);
	test_pool_checkpoint(pool);
	test_pool_discard_checkpoint(pool);
	test_log_history(pool);

	test_create(dataset);
	test_snapshot(pool, snapbase);
	test_snapshot(pool, snapshot);

	test_space_snaps(snapshot);
	test_send_space(snapbase, snapshot);
	test_send_new(snapshot, tmpfd);
	test_recv_new(backup, tmpfd);

	test_bookmark(pool, snapshot, bookmark);
	test_get_bookmarks(dataset);
	test_destroy_bookmarks(pool, bookmark);

	test_hold(pool, snapshot);
	test_get_holds(snapshot);
	test_release(pool, snapshot);

	test_clone(snapshot, clone);
	zfs_destroy(clone);

	test_rollback(dataset, snapshot);
	test_destroy_snaps(pool, snapshot);
	test_destroy_snaps(pool, snapbase);

	test_remap(dataset);
	test_channel_program(pool);

	test_load_key(dataset);
	test_change_key(dataset);
	test_unload_key(dataset);

	/*
	 * cleanup
	 */
	zfs_cmd_t zc = {"\0"};

	nvlist_t *snaps = fnvlist_alloc();
	fnvlist_add_boolean(snaps, snapshot);
	(void) lzc_destroy_snaps(snaps, B_FALSE, NULL);
	nvlist_free(snaps);

	(void) zfs_destroy(dataset);
	(void) zfs_destroy(backup);

	(void) close(tmpfd);
	(void) unlink(filepath);

	/*
	 * All the unused slots should yield ZFS_ERR_IOC_CMD_UNAVAIL
	 */
	for (int i = 0; i < ARRAY_SIZE(ioc_skip); i++) {
		if (ioc_tested[ioc_skip[i] - ZFS_IOC_FIRST])
			(void) fprintf(stderr, "cmd %d tested, not skipped!\n",
			    (int)(ioc_skip[i] - ZFS_IOC_FIRST));

		ioc_tested[ioc_skip[i] - ZFS_IOC_FIRST] = B_TRUE;
	}

	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
	zc.zc_name[sizeof (zc.zc_name) - 1] = '\0';

	for (unsigned ioc = ZFS_IOC_FIRST; ioc < ZFS_IOC_LAST; ioc++) {
		unsigned cmd = ioc - ZFS_IOC_FIRST;

		if (ioc_tested[cmd])
			continue;

		if (ioctl(zfs_fd, ioc, &zc) != 0 &&
		    errno != ZFS_ERR_IOC_CMD_UNAVAIL) {
			(void) fprintf(stderr, "cmd %d is missing a test case "
			    "(%d)\n", cmd, errno);
		}
	}
}
Esempio n. 7
0
/*
 * Test each ioc for the folowing ioctl input errors:
 *   ZFS_ERR_IOC_ARG_UNAVAIL	an input argument is not supported by kernel
 *   ZFS_ERR_IOC_ARG_REQUIRED	a required input argument is missing
 *   ZFS_ERR_IOC_ARG_BADTYPE	an input argument has an invalid type
 */
static int
lzc_ioctl_test(zfs_ioc_t ioc, const char *name, nvlist_t *required,
    nvlist_t *optional, int expected_error, boolean_t wildcard)
{
	nvlist_t *input = fnvlist_alloc();
	nvlist_t *future = fnvlist_alloc();
	int error = 0;

	if (required != NULL) {
		for (nvpair_t *pair = nvlist_next_nvpair(required, NULL);
		    pair != NULL; pair = nvlist_next_nvpair(required, pair)) {
			fnvlist_add_nvpair(input, pair);
		}
	}
	if (optional != NULL) {
		for (nvpair_t *pair = nvlist_next_nvpair(optional, NULL);
		    pair != NULL; pair = nvlist_next_nvpair(optional, pair)) {
			fnvlist_add_nvpair(input, pair);
		}
	}

	/*
	 * Generic input run with 'optional' nvlist pair
	 */
	if (!wildcard)
		fnvlist_add_nvlist(input, "optional", future);
	lzc_ioctl_run(ioc, name, input, expected_error);
	if (!wildcard)
		fnvlist_remove(input, "optional");

	/*
	 * Bogus input value
	 */
	if (!wildcard) {
		fnvlist_add_string(input, "bogus_input", "bogus");
		lzc_ioctl_run(ioc, name, input, ZFS_ERR_IOC_ARG_UNAVAIL);
		fnvlist_remove(input, "bogus_input");
	}

	/*
	 * Missing required inputs
	 */
	if (required != NULL) {
		nvlist_t *empty = fnvlist_alloc();
		lzc_ioctl_run(ioc, name, empty, ZFS_ERR_IOC_ARG_REQUIRED);
		nvlist_free(empty);
	}

	/*
	 * Wrong nvpair type
	 */
	if (required != NULL || optional != NULL) {
		/*
		 * switch the type of one of the input pairs
		 */
		for (nvpair_t *pair = nvlist_next_nvpair(input, NULL);
		    pair != NULL; pair = nvlist_next_nvpair(input, pair)) {
			char pname[MAXNAMELEN];
			data_type_t ptype;

			strlcpy(pname, nvpair_name(pair), sizeof (pname));
			pname[sizeof (pname) - 1] = '\0';
			ptype = nvpair_type(pair);
			fnvlist_remove_nvpair(input, pair);

			switch (ptype) {
			case DATA_TYPE_STRING:
				fnvlist_add_uint64(input, pname, 42);
				break;
			default:
				fnvlist_add_string(input, pname, "bogus");
				break;
			}
		}
		lzc_ioctl_run(ioc, name, input, ZFS_ERR_IOC_ARG_BADTYPE);
	}

	nvlist_free(future);
	nvlist_free(input);

	return (error);
}
Esempio n. 8
0
/*
 * Generate the pool's configuration based on the current in-core state.
 *
 * We infer whether to generate a complete config or just one top-level config
 * based on whether vd is the root vdev.
 */
nvlist_t *
spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
{
	nvlist_t *config, *nvroot;
	vdev_t *rvd = spa->spa_root_vdev;
	unsigned long hostid = 0;
	boolean_t locked = B_FALSE;
	uint64_t split_guid;
	char *pool_name;
	int config_gen_flags = 0;

	if (vd == NULL) {
		vd = rvd;
		locked = B_TRUE;
		spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
	}

	ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
	    (SCL_CONFIG | SCL_STATE));

	/*
	 * If txg is -1, report the current value of spa->spa_config_txg.
	 */
	if (txg == -1ULL)
		txg = spa->spa_config_txg;

	/*
	 * Originally, users had to handle spa namespace collisions by either
	 * exporting the already imported pool or by specifying a new name for
	 * the pool with a conflicting name. In the case of root pools from
	 * virtual guests, neither approach to collision resolution is
	 * reasonable. This is addressed by extending the new name syntax with
	 * an option to specify that the new name is temporary. When specified,
	 * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us
	 * to use the previous name, which we do below.
	 */
	if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
		VERIFY0(nvlist_lookup_string(spa->spa_config,
		    ZPOOL_CONFIG_POOL_NAME, &pool_name));
	} else
		pool_name = spa_name(spa);

	config = fnvlist_alloc();

	fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
	fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name);
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa));
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
	fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
	fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata);
	if (spa->spa_comment != NULL)
		fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
		    spa->spa_comment);

#ifdef	_KERNEL
	hostid = zone_get_hostid(NULL);
#else	/* _KERNEL */
	/*
	 * We're emulating the system's hostid in userland, so we can't use
	 * zone_get_hostid().
	 */
	(void) ddi_strtoul(hw_serial, NULL, 10, &hostid);
#endif	/* _KERNEL */
	if (hostid != 0)
		fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
	fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename);

	if (vd != rvd) {
		fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID,
		    vd->vdev_top->vdev_guid);
		fnvlist_add_uint64(config, ZPOOL_CONFIG_GUID,
		    vd->vdev_guid);
		if (vd->vdev_isspare)
			fnvlist_add_uint64(config,
			    ZPOOL_CONFIG_IS_SPARE, 1ULL);
		if (vd->vdev_islog)
			fnvlist_add_uint64(config,
			    ZPOOL_CONFIG_IS_LOG, 1ULL);
		vd = vd->vdev_top;		/* label contains top config */
	} else {
		/*
		 * Only add the (potentially large) split information
		 * in the mos config, and not in the vdev labels
		 */
		if (spa->spa_config_splitting != NULL)
			fnvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT,
			    spa->spa_config_splitting);

		fnvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS);

		config_gen_flags |= VDEV_CONFIG_MOS;
	}

	/*
	 * Add the top-level config.  We even add this on pools which
	 * don't support holes in the namespace.
	 */
	vdev_top_config_generate(spa, config);

	/*
	 * If we're splitting, record the original pool's guid.
	 */
	if (spa->spa_config_splitting != NULL &&
	    nvlist_lookup_uint64(spa->spa_config_splitting,
	    ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) {
		fnvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid);
	}

	nvroot = vdev_config_generate(spa, vd, getstats, config_gen_flags);
	fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot);
	nvlist_free(nvroot);

	/*
	 * Store what's necessary for reading the MOS in the label.
	 */
	fnvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
	    spa->spa_label_features);

	if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) {
		ddt_histogram_t *ddh;
		ddt_stat_t *dds;
		ddt_object_t *ddo;

		ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
		ddt_get_dedup_histogram(spa, ddh);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_HISTOGRAM,
		    (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t));
		kmem_free(ddh, sizeof (ddt_histogram_t));

		ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP);
		ddt_get_dedup_object_stats(spa, ddo);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_OBJ_STATS,
		    (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t));
		kmem_free(ddo, sizeof (ddt_object_t));

		dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP);
		ddt_get_dedup_stats(spa, dds);
		fnvlist_add_uint64_array(config,
		    ZPOOL_CONFIG_DDT_STATS,
		    (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t));
		kmem_free(dds, sizeof (ddt_stat_t));
	}

	if (locked)
		spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);

	return (config);
}
Esempio n. 9
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held. Synchronizing the pool cache is typically done after
 * the configuration has been synced to the MOS. This exposes a window where
 * the MOS config will have been updated but the cache file has not. If
 * the system were to crash at that instant then the cached config may not
 * contain the correct information to open the pool and an explicit import
 * would be required.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;
	char *pool_name;
	boolean_t ccw_failure;
	int error = 0;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (rootdir == NULL || !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	ccw_failure = B_FALSE;
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			/*
			 * Skip over our own pool if we're about to remove
			 * ourselves from the spa namespace or any pool that
			 * is readonly. Since we cannot guarantee that a
			 * readonly pool would successfully import upon reboot,
			 * we don't allow them to be written to the cache file.
			 */
			if ((spa == target && removing) ||
			    !spa_writeable(spa))
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				nvl = fnvlist_alloc();

			if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME)
				pool_name = fnvlist_lookup_string(
				    spa->spa_config, ZPOOL_CONFIG_POOL_NAME);
			else
				pool_name = spa_name(spa);

			fnvlist_add_nvlist(nvl, pool_name, spa->spa_config);
			mutex_exit(&spa->spa_props_lock);
		}

		error = spa_config_write(dp, nvl);
		if (error != 0)
			ccw_failure = B_TRUE;
		nvlist_free(nvl);
	}

	if (ccw_failure) {
		/*
		 * Keep trying so that configuration data is
		 * written if/when any temporary filesystem
		 * resource issues are resolved.
		 */
		if (target->spa_ccw_fail_time == 0) {
			zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE,
			    target, NULL, NULL, 0, 0);
		}
		target->spa_ccw_fail_time = gethrtime();
		spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE);
	} else {
		/*
		 * Do not rate limit future attempts to update
		 * the config cache.
		 */
		target->spa_ccw_fail_time = 0;
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC);
}
Esempio n. 10
0
static void
run_tests(void)
{
	const char *key = "key";

	/* Note: maximum nvlist key length is 32KB */
	int len = 1024 * 31;
	char *bigstring = malloc(len);
	for (int i = 0; i < len; i++)
		bigstring[i] = 'a' + i % 26;
	bigstring[len - 1] = '\0';

	nvl = fnvlist_alloc();

	fnvlist_add_boolean(nvl, key);
	test("boolean", B_TRUE, B_FALSE);

	fnvlist_add_boolean_value(nvl, key, B_TRUE);
	test("boolean_value", B_FALSE, B_FALSE);

	fnvlist_add_byte(nvl, key, 1);
	test("byte", B_FALSE, B_FALSE);

	fnvlist_add_int8(nvl, key, 1);
	test("int8", B_FALSE, B_FALSE);

	fnvlist_add_uint8(nvl, key, 1);
	test("uint8", B_FALSE, B_FALSE);

	fnvlist_add_int16(nvl, key, 1);
	test("int16", B_FALSE, B_FALSE);

	fnvlist_add_uint16(nvl, key, 1);
	test("uint16", B_FALSE, B_FALSE);

	fnvlist_add_int32(nvl, key, 1);
	test("int32", B_FALSE, B_FALSE);

	fnvlist_add_uint32(nvl, key, 1);
	test("uint32", B_FALSE, B_FALSE);

	fnvlist_add_int64(nvl, key, 1);
	test("int64", B_TRUE, B_TRUE);

	fnvlist_add_uint64(nvl, key, 1);
	test("uint64", B_FALSE, B_FALSE);

	fnvlist_add_string(nvl, key, "1");
	test("string", B_TRUE, B_TRUE);


	{
		nvlist_t *val = fnvlist_alloc();
		fnvlist_add_string(val, "subkey", "subvalue");
		fnvlist_add_nvlist(nvl, key, val);
		fnvlist_free(val);
		test("nvlist", B_TRUE, B_TRUE);
	}
	{
		boolean_t val[2] = { B_FALSE, B_TRUE };
		fnvlist_add_boolean_array(nvl, key, val, 2);
		test("boolean_array", B_FALSE, B_FALSE);
	}
	{
		uchar_t val[2] = { 0, 1 };
		fnvlist_add_byte_array(nvl, key, val, 2);
		test("byte_array", B_FALSE, B_FALSE);
	}
	{
		int8_t val[2] = { 0, 1 };
		fnvlist_add_int8_array(nvl, key, val, 2);
		test("int8_array", B_FALSE, B_FALSE);
	}
	{
		uint8_t val[2] = { 0, 1 };
		fnvlist_add_uint8_array(nvl, key, val, 2);
		test("uint8_array", B_FALSE, B_FALSE);
	}
	{
		int16_t val[2] = { 0, 1 };
		fnvlist_add_int16_array(nvl, key, val, 2);
		test("int16_array", B_FALSE, B_FALSE);
	}
	{
		uint16_t val[2] = { 0, 1 };
		fnvlist_add_uint16_array(nvl, key, val, 2);
		test("uint16_array", B_FALSE, B_FALSE);
	}
	{
		int32_t val[2] = { 0, 1 };
		fnvlist_add_int32_array(nvl, key, val, 2);
		test("int32_array", B_FALSE, B_FALSE);
	}
	{
		uint32_t val[2] = { 0, 1 };
		fnvlist_add_uint32_array(nvl, key, val, 2);
		test("uint32_array", B_FALSE, B_FALSE);
	}
	{
		int64_t val[2] = { 0, 1 };
		fnvlist_add_int64_array(nvl, key, val, 2);
		test("int64_array", B_TRUE, B_FALSE);
	}
	{
		uint64_t val[2] = { 0, 1 };
		fnvlist_add_uint64_array(nvl, key, val, 2);
		test("uint64_array", B_FALSE, B_FALSE);
	}
	{
		char *const val[2] = { "0", "1" };
		fnvlist_add_string_array(nvl, key, val, 2);
		test("string_array", B_TRUE, B_FALSE);
	}
	{
		nvlist_t *val[2];
		val[0] = fnvlist_alloc();
		fnvlist_add_string(val[0], "subkey", "subvalue");
		val[1] = fnvlist_alloc();
		fnvlist_add_string(val[1], "subkey2", "subvalue2");
		fnvlist_add_nvlist_array(nvl, key, val, 2);
		fnvlist_free(val[0]);
		fnvlist_free(val[1]);
		test("nvlist_array", B_FALSE, B_FALSE);
	}
	{
		fnvlist_add_string(nvl, bigstring, "1");
		test("large_key", B_TRUE, B_TRUE);
	}
	{
		fnvlist_add_string(nvl, key, bigstring);
		test("large_value", B_TRUE, B_TRUE);
	}
	{
		for (int i = 0; i < 1024; i++) {
			char buf[32];
			(void) snprintf(buf, sizeof (buf), "key-%u", i);
			fnvlist_add_int64(nvl, buf, i);
		}
		test("many_keys", B_TRUE, B_TRUE);
	}
#ifndef __sparc__
	{
		for (int i = 0; i < 10; i++) {
			nvlist_t *newval = fnvlist_alloc();
			fnvlist_add_nvlist(newval, "key", nvl);
			fnvlist_free(nvl);
			nvl = newval;
		}
		test("deeply_nested_pos", B_TRUE, B_TRUE);
	}
	{
		for (int i = 0; i < 90; i++) {
			nvlist_t *newval = fnvlist_alloc();
			fnvlist_add_nvlist(newval, "key", nvl);
			fnvlist_free(nvl);
			nvl = newval;
		}
		test("deeply_nested_neg", B_FALSE, B_FALSE);
	}
#endif
	free(bigstring);
	fnvlist_free(nvl);
}
Esempio n. 11
0
/*
 * Linux adds ZFS_IOC_RECV_NEW for resumable and raw streams and preserves the
 * legacy ZFS_IOC_RECV user/kernel interface.  The new interface supports all
 * stream options but is currently only used for resumable streams.  This way
 * updated user space utilities will interoperate with older kernel modules.
 *
 * Non-Linux OpenZFS platforms have opted to modify the legacy interface.
 */
static int
recv_impl(const char *snapname, nvlist_t *recvdprops, nvlist_t *localprops,
    const char *origin, boolean_t force, boolean_t resumable, boolean_t raw,
    int input_fd, const dmu_replay_record_t *begin_record, int cleanup_fd,
    uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
    nvlist_t **errors)
{
	dmu_replay_record_t drr;
	char fsname[MAXPATHLEN];
	char *atp;
	int error;

	ASSERT3S(g_refcount, >, 0);
	VERIFY3S(g_fd, !=, -1);

	/* Set 'fsname' to the name of containing filesystem */
	(void) strlcpy(fsname, snapname, sizeof (fsname));
	atp = strchr(fsname, '@');
	if (atp == NULL)
		return (EINVAL);
	*atp = '\0';

	/* If the fs does not exist, try its parent. */
	if (!lzc_exists(fsname)) {
		char *slashp = strrchr(fsname, '/');
		if (slashp == NULL)
			return (ENOENT);
		*slashp = '\0';
	}

	/*
	 * The begin_record is normally a non-byteswapped BEGIN record.
	 * For resumable streams it may be set to any non-byteswapped
	 * dmu_replay_record_t.
	 */
	if (begin_record == NULL) {
		error = recv_read(input_fd, &drr, sizeof (drr));
		if (error != 0)
			return (error);
	} else {
		drr = *begin_record;
	}

	if (resumable || raw) {
		nvlist_t *outnvl = NULL;
		nvlist_t *innvl = fnvlist_alloc();

		fnvlist_add_string(innvl, "snapname", snapname);

		if (recvdprops != NULL)
			fnvlist_add_nvlist(innvl, "props", recvdprops);

		if (localprops != NULL)
			fnvlist_add_nvlist(innvl, "localprops", localprops);

		if (origin != NULL && strlen(origin))
			fnvlist_add_string(innvl, "origin", origin);

		fnvlist_add_byte_array(innvl, "begin_record",
		    (uchar_t *)&drr, sizeof (drr));

		fnvlist_add_int32(innvl, "input_fd", input_fd);

		if (force)
			fnvlist_add_boolean(innvl, "force");

		if (resumable)
			fnvlist_add_boolean(innvl, "resumable");

		if (cleanup_fd >= 0)
			fnvlist_add_int32(innvl, "cleanup_fd", cleanup_fd);

		if (action_handle != NULL)
			fnvlist_add_uint64(innvl, "action_handle",
			    *action_handle);

		error = lzc_ioctl(ZFS_IOC_RECV_NEW, fsname, innvl, &outnvl);

		if (error == 0 && read_bytes != NULL)
			error = nvlist_lookup_uint64(outnvl, "read_bytes",
			    read_bytes);

		if (error == 0 && errflags != NULL)
			error = nvlist_lookup_uint64(outnvl, "error_flags",
			    errflags);

		if (error == 0 && action_handle != NULL)
			error = nvlist_lookup_uint64(outnvl, "action_handle",
			    action_handle);

		if (error == 0 && errors != NULL) {
			nvlist_t *nvl;
			error = nvlist_lookup_nvlist(outnvl, "errors", &nvl);
			if (error == 0)
				*errors = fnvlist_dup(nvl);
		}

		fnvlist_free(innvl);
		fnvlist_free(outnvl);
	} else {
		zfs_cmd_t zc = {"\0"};
		char *packed = NULL;
		size_t size;

		ASSERT3S(g_refcount, >, 0);

		(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_value));
		(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));

		if (recvdprops != NULL) {
			packed = fnvlist_pack(recvdprops, &size);
			zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
			zc.zc_nvlist_src_size = size;
		}

		if (localprops != NULL) {
			packed = fnvlist_pack(localprops, &size);
			zc.zc_nvlist_conf = (uint64_t)(uintptr_t)packed;
			zc.zc_nvlist_conf_size = size;
		}

		if (origin != NULL)
			(void) strlcpy(zc.zc_string, origin,
			    sizeof (zc.zc_string));

		ASSERT3S(drr.drr_type, ==, DRR_BEGIN);
		zc.zc_begin_record = drr.drr_u.drr_begin;
		zc.zc_guid = force;
		zc.zc_cookie = input_fd;
		zc.zc_cleanup_fd = -1;
		zc.zc_action_handle = 0;

		if (cleanup_fd >= 0)
			zc.zc_cleanup_fd = cleanup_fd;

		if (action_handle != NULL)
			zc.zc_action_handle = *action_handle;

		zc.zc_nvlist_dst_size = 128 * 1024;
		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
		    malloc(zc.zc_nvlist_dst_size);

		error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
		if (error != 0) {
			error = errno;
		} else {
			if (read_bytes != NULL)
				*read_bytes = zc.zc_cookie;

			if (errflags != NULL)
				*errflags = zc.zc_obj;

			if (action_handle != NULL)
				*action_handle = zc.zc_action_handle;

			if (errors != NULL)
				VERIFY0(nvlist_unpack(
				    (void *)(uintptr_t)zc.zc_nvlist_dst,
				    zc.zc_nvlist_dst_size, errors, KM_SLEEP));
		}

		if (packed != NULL)
			fnvlist_pack_free(packed, size);
		free((void *)(uintptr_t)zc.zc_nvlist_dst);
	}

	return (error);
}
Esempio n. 12
0
static int
lzc_ioctl(zfs_ioc_t ioc, const char *name,
    nvlist_t *source, nvlist_t **resultp)
{
	zfs_cmd_t zc = { 0 };
	int error = 0;
	char *packed;
#ifdef __FreeBSD__
	nvlist_t *oldsource;
#endif
	size_t size;

	ASSERT3S(g_refcount, >, 0);

	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));

#ifdef __FreeBSD__
	if (zfs_ioctl_version == ZFS_IOCVER_UNDEF)
		zfs_ioctl_version = get_zfs_ioctl_version();

	if (zfs_ioctl_version < ZFS_IOCVER_LZC) {
		oldsource = source;
		error = lzc_compat_pre(&zc, &ioc, &source);
		if (error)
			return (error);
	}
#endif

	packed = fnvlist_pack(source, &size);
	zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
	zc.zc_nvlist_src_size = size;

	if (resultp != NULL) {
		*resultp = NULL;
		zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
		zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
		    malloc(zc.zc_nvlist_dst_size);
#ifdef illumos
		if (zc.zc_nvlist_dst == NULL) {
#else
		if (zc.zc_nvlist_dst == 0) {
#endif
			error = ENOMEM;
			goto out;
		}
	}

	while (ioctl(g_fd, ioc, &zc) != 0) {
		if (errno == ENOMEM && resultp != NULL) {
			free((void *)(uintptr_t)zc.zc_nvlist_dst);
			zc.zc_nvlist_dst_size *= 2;
			zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
			    malloc(zc.zc_nvlist_dst_size);
#ifdef illumos
			if (zc.zc_nvlist_dst == NULL) {
#else
			if (zc.zc_nvlist_dst == 0) {
#endif
				error = ENOMEM;
				goto out;
			}
		} else {
			error = errno;
			break;
		}
	}

#ifdef __FreeBSD__
	if (zfs_ioctl_version < ZFS_IOCVER_LZC)
		lzc_compat_post(&zc, ioc);
#endif
	if (zc.zc_nvlist_dst_filled) {
		*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
		    zc.zc_nvlist_dst_size);
	}
#ifdef __FreeBSD__
	if (zfs_ioctl_version < ZFS_IOCVER_LZC)
		lzc_compat_outnvl(&zc, ioc, resultp);
#endif
out:
#ifdef __FreeBSD__
	if (zfs_ioctl_version < ZFS_IOCVER_LZC) {
		if (source != oldsource)
			nvlist_free(source);
		source = oldsource;
	}
#endif
	fnvlist_pack_free(packed, size);
	free((void *)(uintptr_t)zc.zc_nvlist_dst);
	return (error);
}

int
lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props)
{
	int error;
	nvlist_t *args = fnvlist_alloc();
	fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
	if (props != NULL)
		fnvlist_add_nvlist(args, "props", props);
	error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
	nvlist_free(args);
	return (error);
}
Esempio n. 13
0
/*
 * Find all 'allow' permissions from a given point and then continue
 * traversing up to the root.
 *
 * This function constructs an nvlist of nvlists.
 * each setpoint is an nvlist composed of an nvlist of an nvlist
 * of the individual * users/groups/everyone/create
 * permissions.
 *
 * The nvlist will look like this.
 *
 * { source fsname -> { whokeys { permissions,...}, ...}}
 *
 * The fsname nvpairs will be arranged in a bottom up order.  For example,
 * if we have the following structure a/b/c then the nvpairs for the fsnames
 * will be ordered a/b/c, a/b, a.
 */
int
dsl_deleg_get(const char *ddname, nvlist_t **nvp)
{
    dsl_dir_t *dd, *startdd;
    dsl_pool_t *dp;
    int error;
    objset_t *mos;
    zap_cursor_t *basezc, *zc;
    zap_attribute_t *baseza, *za;
    char *source;

    error = dsl_pool_hold(ddname, FTAG, &dp);
    if (error != 0)
        return (error);

    error = dsl_dir_hold(dp, ddname, FTAG, &startdd, NULL);
    if (error != 0) {
        dsl_pool_rele(dp, FTAG);
        return (error);
    }

    dp = startdd->dd_pool;
    mos = dp->dp_meta_objset;

    zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
    za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
    basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
    baseza = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
    source = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, KM_SLEEP);
    VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

    for (dd = startdd; dd != NULL; dd = dd->dd_parent) {
        nvlist_t *sp_nvp;
        uint64_t n;

        if (dd->dd_phys->dd_deleg_zapobj == 0 ||
                zap_count(mos, dd->dd_phys->dd_deleg_zapobj, &n) != 0 ||
                n == 0)
            continue;

        sp_nvp = fnvlist_alloc();
        for (zap_cursor_init(basezc, mos,
                             dd->dd_phys->dd_deleg_zapobj);
                zap_cursor_retrieve(basezc, baseza) == 0;
                zap_cursor_advance(basezc)) {
            nvlist_t *perms_nvp;

            ASSERT(baseza->za_integer_length == 8);
            ASSERT(baseza->za_num_integers == 1);

            perms_nvp = fnvlist_alloc();
            for (zap_cursor_init(zc, mos, baseza->za_first_integer);
                    zap_cursor_retrieve(zc, za) == 0;
                    zap_cursor_advance(zc)) {
                fnvlist_add_boolean(perms_nvp, za->za_name);
            }
            zap_cursor_fini(zc);
            fnvlist_add_nvlist(sp_nvp, baseza->za_name, perms_nvp);
            fnvlist_free(perms_nvp);
        }

        zap_cursor_fini(basezc);

        dsl_dir_name(dd, source);
        fnvlist_add_nvlist(*nvp, source, sp_nvp);
        nvlist_free(sp_nvp);
    }

    kmem_free(source, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1);
    kmem_free(baseza, sizeof (zap_attribute_t));
    kmem_free(basezc, sizeof (zap_cursor_t));
    kmem_free(za, sizeof (zap_attribute_t));
    kmem_free(zc, sizeof (zap_cursor_t));

    dsl_dir_rele(startdd, FTAG);
    dsl_pool_rele(dp, FTAG);
    return (0);
}
Esempio n. 14
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held. Synchronizing the pool cache is typically done after
 * the configuration has been synced to the MOS. This exposes a window where
 * the MOS config will have been updated but the cache file has not. If
 * the system were to crash at that instant then the cached config may not
 * contain the correct information to open the pool and an explicit import
 * would be required.
 */
void
spa_write_cachefile(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;
	char *pool_name;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (/*rootdir == NULL ||*/ !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			/*
			 * Skip over our own pool if we're about to remove
			 * ourselves from the spa namespace or any pool that
			 * is readonly. Since we cannot guarantee that a
			 * readonly pool would successfully import upon reboot,
			 * we don't allow them to be written to the cache file.
			 */
			if ((spa == target && removing) ||
			    !spa_writeable(spa))
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				nvl = fnvlist_alloc();

			if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
				VERIFY0(nvlist_lookup_string(spa->spa_config,
					ZPOOL_CONFIG_POOL_NAME, &pool_name));
			} else
				pool_name = spa_name(spa);

			fnvlist_add_nvlist(nvl, spa->spa_name,
			    spa->spa_config);
			mutex_exit(&spa->spa_props_lock);
		}

		spa_config_write(dp, nvl);

		nvlist_free(nvl);
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent) {
		/* We don't have spa in spa_config_write, so handle the events here */
		if (nvl == NULL) {
			zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_REMOVE, target, NULL, NULL,
			    NULL, (uint64_t)dp->scd_path, 0);
		} else {
			zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_RENAME, target, NULL, NULL,
			    NULL, (uint64_t)dp->scd_path, 0);
		}
	}
}
Esempio n. 15
0
/*
 * Find all 'allow' permissions from a given point and then continue
 * traversing up to the root.
 *
 * This function constructs an nvlist of nvlists.
 * each setpoint is an nvlist composed of an nvlist of an nvlist
 * of the individual * users/groups/everyone/create
 * permissions.
 *
 * The nvlist will look like this.
 *
 * { source fsname -> { whokeys { permissions,...}, ...}}
 *
 * The fsname nvpairs will be arranged in a bottom up order.  For example,
 * if we have the following structure a/b/c then the nvpairs for the fsnames
 * will be ordered a/b/c, a/b, a.
 */
int
dsl_deleg_get(const char *ddname, nvlist_t **nvp)
{
	dsl_dir_t *dd, *startdd;
	dsl_pool_t *dp;
	int error;
	objset_t *mos;

	error = dsl_pool_hold(ddname, FTAG, &dp);
	if (error != 0)
		return (error);

	error = dsl_dir_hold(dp, ddname, FTAG, &startdd, NULL);
	if (error != 0) {
		dsl_pool_rele(dp, FTAG);
		return (error);
	}

	dp = startdd->dd_pool;
	mos = dp->dp_meta_objset;

	VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	for (dd = startdd; dd != NULL; dd = dd->dd_parent) {
		zap_cursor_t basezc;
		zap_attribute_t baseza;
		nvlist_t *sp_nvp;
		uint64_t n;
		char source[ZFS_MAX_DATASET_NAME_LEN];

		if (dsl_dir_phys(dd)->dd_deleg_zapobj == 0 ||
		    zap_count(mos,
		    dsl_dir_phys(dd)->dd_deleg_zapobj, &n) != 0 || n == 0)
			continue;

		sp_nvp = fnvlist_alloc();
		for (zap_cursor_init(&basezc, mos,
		    dsl_dir_phys(dd)->dd_deleg_zapobj);
		    zap_cursor_retrieve(&basezc, &baseza) == 0;
		    zap_cursor_advance(&basezc)) {
			zap_cursor_t zc;
			zap_attribute_t za;
			nvlist_t *perms_nvp;

			ASSERT(baseza.za_integer_length == 8);
			ASSERT(baseza.za_num_integers == 1);

			perms_nvp = fnvlist_alloc();
			for (zap_cursor_init(&zc, mos, baseza.za_first_integer);
			    zap_cursor_retrieve(&zc, &za) == 0;
			    zap_cursor_advance(&zc)) {
				fnvlist_add_boolean(perms_nvp, za.za_name);
			}
			zap_cursor_fini(&zc);
			fnvlist_add_nvlist(sp_nvp, baseza.za_name, perms_nvp);
			fnvlist_free(perms_nvp);
		}

		zap_cursor_fini(&basezc);

		dsl_dir_name(dd, source);
		fnvlist_add_nvlist(*nvp, source, sp_nvp);
		nvlist_free(sp_nvp);
	}

	dsl_dir_rele(startdd, FTAG);
	dsl_pool_rele(dp, FTAG);
	return (0);
}