Ejemplo n.º 1
0
static void
dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
    const char *propname, uint64_t value, int first)
{
	dsl_dir_t *dd;
	dsl_prop_record_t *pr;
	dsl_prop_cb_record_t *cbr;
	objset_t *mos = dp->dp_meta_objset;
	zap_cursor_t zc;
	zap_attribute_t *za;
	int err;

	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
	err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd);
	if (err)
		return;

	if (!first) {
		/*
		 * If the prop is set here, then this change is not
		 * being inherited here or below; stop the recursion.
		 */
		err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
		    propname);
		if (err == 0) {
			dsl_dir_rele(dd, FTAG);
			return;
		}
		ASSERT3U(err, ==, ENOENT);
	}
Ejemplo n.º 2
0
static void
dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
{
	dsl_dataset_user_release_arg_t *ddura = arg;
	dsl_holdfunc_t *holdfunc = ddura->ddura_holdfunc;
	dsl_pool_t *dp = dmu_tx_pool(tx);

	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));

	for (nvpair_t *pair = nvlist_next_nvpair(ddura->ddura_chkholds, NULL);
	    pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_chkholds,
	    pair)) {
		dsl_dataset_t *ds;
		const char *name = nvpair_name(pair);

		VERIFY0(holdfunc(dp, name, FTAG, &ds));

		dsl_dataset_user_release_sync_one(ds,
		    fnvpair_value_nvlist(pair), tx);
		if (nvlist_exists(ddura->ddura_todelete, name)) {
			ASSERT(ds->ds_userrefs == 0 &&
			    ds->ds_phys->ds_num_children == 1 &&
			    DS_IS_DEFER_DESTROY(ds));
			dsl_destroy_snapshot_sync_impl(ds, B_FALSE, tx);
		}
		dsl_dataset_rele(ds, FTAG);
	}
}
Ejemplo n.º 3
0
/*
 * Update all property values for ddobj & its descendants.  This is used
 * when renaming the dir.
 */
void
dsl_prop_notify_all(dsl_dir_t *dd)
{
	dsl_pool_t *dp = dd->dd_pool;
	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
	(void) dmu_objset_find_dp(dp, dd->dd_object, dsl_prop_notify_all_cb,
	    NULL, DS_FIND_CHILDREN);
}
Ejemplo n.º 4
0
void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
{
#ifdef ZFS_DEBUG
	int err;
#endif
	int after_branch_point = FALSE;
	dsl_pool_t *dp = ds->ds_dir->dd_pool;
	objset_t *mos = dp->dp_meta_objset;
	dsl_dataset_t *ds_prev = NULL;
	uint64_t obj, old_unique, used = 0, comp = 0, uncomp = 0;
	dsl_dataset_t *ds_next, *ds_head, *hds;


	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
	ASSERT(refcount_is_zero(&ds->ds_longholds));

	if (defer &&
	    (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)) {
		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
		dmu_buf_will_dirty(ds->ds_dbuf, tx);
		ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
		return;
	}

	ASSERT3U(ds->ds_phys->ds_num_children, <=, 1);

	/* We need to log before removing it from the namespace. */
	spa_history_log_internal_ds(ds, "destroy", tx, "");

	dsl_scan_ds_destroyed(ds, tx);

	obj = ds->ds_object;

	if (ds->ds_phys->ds_prev_snap_obj != 0) {
		ASSERT3P(ds->ds_prev, ==, NULL);
		VERIFY0(dsl_dataset_hold_obj(dp,
		    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
		after_branch_point =
		    (ds_prev->ds_phys->ds_next_snap_obj != obj);

		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
		if (after_branch_point &&
		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
			dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
			if (ds->ds_phys->ds_next_snap_obj != 0) {
				VERIFY0(zap_add_int(mos,
				    ds_prev->ds_phys->ds_next_clones_obj,
				    ds->ds_phys->ds_next_snap_obj, tx));
			}
		}
		if (!after_branch_point) {
			ds_prev->ds_phys->ds_next_snap_obj =
			    ds->ds_phys->ds_next_snap_obj;
		}
	}
Ejemplo n.º 5
0
static int
dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
{
	dsl_dataset_user_release_arg_t *ddura;
	dsl_holdfunc_t *holdfunc;
	dsl_pool_t *dp;
	nvpair_t *pair;

	if (!dmu_tx_is_syncing(tx))
		return (0);

	dp = dmu_tx_pool(tx);

	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));

	ddura = arg;
	holdfunc = ddura->ddura_holdfunc;

	for (pair = nvlist_next_nvpair(ddura->ddura_holds, NULL);
	    pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) {
		int error;
		dsl_dataset_t *ds;
		nvlist_t *holds;
		const char *snapname = nvpair_name(pair);

		error = nvpair_value_nvlist(pair, &holds);
		if (error != 0)
			error = (SET_ERROR(EINVAL));
		else
			error = holdfunc(dp, snapname, FTAG, &ds);
		if (error == 0) {
			error = dsl_dataset_user_release_check_one(ddura, ds,
			    holds, snapname);
			dsl_dataset_rele(ds, FTAG);
		}
		if (error != 0) {
			if (ddura->ddura_errlist != NULL) {
				fnvlist_add_int32(ddura->ddura_errlist,
				    snapname, error);
			}
			/*
			 * Non-existent snapshots are put on the errlist,
			 * but don't cause an overall failure.
			 */
			if (error != ENOENT)
				return (error);
		}
	}

	return (0);
}
Ejemplo n.º 6
0
static void
dsl_dataset_user_hold_sync_one_impl(nvlist_t *tmpholds, dsl_dataset_t *ds,
    const char *htag, minor_t minor, uint64_t now, dmu_tx_t *tx)
{
	dsl_pool_t *dp = ds->ds_dir->dd_pool;
	objset_t *mos = dp->dp_meta_objset;
	uint64_t zapobj;

	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));

	if (ds->ds_phys->ds_userrefs_obj == 0) {
		/*
		 * This is the first user hold for this dataset.  Create
		 * the userrefs zap object.
		 */
		dmu_buf_will_dirty(ds->ds_dbuf, tx);
		zapobj = ds->ds_phys->ds_userrefs_obj =
		    zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
	} else {
		zapobj = ds->ds_phys->ds_userrefs_obj;
	}
	ds->ds_userrefs++;

	VERIFY0(zap_add(mos, zapobj, htag, 8, 1, &now, tx));

	if (minor != 0) {
		char name[MAXNAMELEN];
		nvlist_t *tags;

		VERIFY0(dsl_pool_user_hold(dp, ds->ds_object,
		    htag, now, tx));
		(void) snprintf(name, sizeof (name), "%llx",
		    (u_longlong_t)ds->ds_object);

		if (nvlist_lookup_nvlist(tmpholds, name, &tags) != 0) {
			VERIFY0(nvlist_alloc(&tags, NV_UNIQUE_NAME,
			    KM_PUSHPAGE));
			fnvlist_add_boolean(tags, htag);
			fnvlist_add_nvlist(tmpholds, name, tags);
			fnvlist_free(tags);
		} else {
			fnvlist_add_boolean(tags, htag);
		}
	}

	spa_history_log_internal_ds(ds, "hold", tx,
	    "tag=%s temp=%d refs=%llu",
	    htag, minor != 0, ds->ds_userrefs);
}
Ejemplo n.º 7
0
/*
 * Reopen zfsvfs_t::z_os and release VOPs.
 */
int
zfs_resume_fs(zfsvfs_t *zfsvfs, const char *osname, int mode)
{
	int err;

	ASSERT(RRW_WRITE_HELD(&zfsvfs->z_teardown_lock));
	ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));

	err = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (err) {
		zfsvfs->z_os = NULL;
	} else {
		znode_t *zp;

		VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);

		/*
		 * Attempt to re-establish all the active znodes with
		 * their dbufs.  If a zfs_rezget() fails, then we'll let
		 * any potential callers discover that via ZFS_ENTER_VERIFY_VP
		 * when they try to use their znode.
		 */
		mutex_enter(&zfsvfs->z_znodes_lock);
		for (zp = list_head(&zfsvfs->z_all_znodes); zp;
		    zp = list_next(&zfsvfs->z_all_znodes, zp)) {
			(void) zfs_rezget(zp);
		}
		mutex_exit(&zfsvfs->z_znodes_lock);

	}

	/* release the VOPs */
	rw_exit(&zfsvfs->z_teardown_inactive_lock);
	rrw_exit(&zfsvfs->z_teardown_lock, FTAG);

	if (err) {
		/*
		 * Since we couldn't reopen zfsvfs::z_os, force
		 * unmount this file system.
		 */
		if (vn_vfswlock(zfsvfs->z_vfs->vfs_vnodecovered) == 0)
			(void) dounmount(zfsvfs->z_vfs, MS_FORCE, curthread);
	}
	return (err);
}
Ejemplo n.º 8
0
/*
 * dsl_crypto_key_change
 *
 * The old key must already be present in memory since the user interface
 * doesn't provide away to prompt or retrieve the old key.
 */
static void
dsl_crypto_key_change_sync(void *arg1, dmu_tx_t *tx)
{
    struct wkey_change_arg *ca = arg1;
    dsl_dataset_t *ds = ca->ca_ds;
    size_t wkeylen;
    char *wkeybuf = NULL;
    zcrypt_key_t *txgkey;
    zap_cursor_t zc;
    zap_attribute_t za;
    objset_t *mos;
    uint64_t keychain_zapobj;
    spa_t *spa;
    zcrypt_keystore_node_t *zkn;

    ASSERT(RRW_WRITE_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock));

    mos = ds->ds_dir->dd_pool->dp_meta_objset;
    keychain_zapobj = dsl_dir_phys(ds->ds_dir)->dd_keychain_obj;

    /*
     * To allow for the case were the keychains of child datasets
     * are not loaded (ie an explicit 'zfs key -u tank/fs/sub' had
     * been done some time before doing 'zfs key -c tank/fs') we itterate
     * over the zap objects on disk rather than copying from the
     * in memory keystore node.
     */
    for (zap_cursor_init(&zc, mos, keychain_zapobj);
            zap_cursor_retrieve(&zc, &za) == 0;
            zap_cursor_advance(&zc)) {
        wkeylen = za.za_num_integers;
        wkeybuf = kmem_alloc(wkeylen, KM_SLEEP);
        VERIFY(zap_lookup_uint64(mos, keychain_zapobj,
                                 (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf) == 0);
        VERIFY(zcrypt_unwrap_key(ca->ca_old_key,
                                 ds->ds_objset->os_crypt, wkeybuf, wkeylen, &txgkey) == 0);
        kmem_free(wkeybuf, wkeylen);
        VERIFY(zcrypt_wrap_key(ca->ca_new_key, txgkey,
                               &wkeybuf, &wkeylen,
                               zio_crypt_select_wrap(ds->ds_objset->os_crypt)) == 0);
        zcrypt_key_free(txgkey);
        VERIFY(zap_update_uint64(mos, keychain_zapobj,
                                 (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf, tx) == 0);
        kmem_free(wkeybuf, wkeylen);
    }

    zap_cursor_fini(&zc);

    spa = dsl_dataset_get_spa(ds);

    /*
     * If the wrapping key is loaded switch the in memory copy now.
     */
    zkn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE);
    if (zkn != NULL) {
        zcrypt_key_free(zkn->skn_wrapkey);
        zkn->skn_wrapkey = zcrypt_key_copy(ca->ca_new_key);
    }

    spa_history_log_internal(spa, "key change", tx,
                             "succeeded dataset = %llu", ds->ds_object);
}
Ejemplo n.º 9
0
/*
 * Reopen zfs_sb_t and release VFS ops.
 */
int
zfs_resume_fs(zfs_sb_t *zsb, const char *osname)
{
	int err, err2;

	ASSERT(RRW_WRITE_HELD(&zsb->z_teardown_lock));
	ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock));

	err = dmu_objset_own(osname, DMU_OST_ZFS, B_FALSE, zsb, &zsb->z_os);
	if (err) {
		zsb->z_os = NULL;
	} else {
		znode_t *zp;
		uint64_t sa_obj = 0;

		err2 = zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
		    ZFS_SA_ATTRS, 8, 1, &sa_obj);

		if ((err || err2) && zsb->z_version >= ZPL_VERSION_SA)
			goto bail;


		if ((err = sa_setup(zsb->z_os, sa_obj,
		    zfs_attr_table,  ZPL_END, &zsb->z_attr_table)) != 0)
			goto bail;

		VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0);
		zsb->z_rollback_time = jiffies;

		/*
		 * Attempt to re-establish all the active inodes with their
		 * dbufs.  If a zfs_rezget() fails, then we unhash the inode
		 * and mark it stale.  This prevents a collision if a new
		 * inode/object is created which must use the same inode
		 * number.  The stale inode will be be released when the
		 * VFS prunes the dentry holding the remaining references
		 * on the stale inode.
		 */
		mutex_enter(&zsb->z_znodes_lock);
		for (zp = list_head(&zsb->z_all_znodes); zp;
		    zp = list_next(&zsb->z_all_znodes, zp)) {
			err2 = zfs_rezget(zp);
			if (err2) {
				remove_inode_hash(ZTOI(zp));
				zp->z_is_stale = B_TRUE;
			}
		}
		mutex_exit(&zsb->z_znodes_lock);
	}

bail:
	/* release the VFS ops */
	rw_exit(&zsb->z_teardown_inactive_lock);
	rrw_exit(&zsb->z_teardown_lock, FTAG);

	if (err) {
		/*
		 * Since we couldn't reopen zfs_sb_t or, setup the
		 * sa framework, force unmount this file system.
		 */
		if (zsb->z_os)
			(void) zfs_umount(zsb->z_sb);
	}
	return (err);
}
Ejemplo n.º 10
0
void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
{
	int err;
	int after_branch_point = FALSE;
	dsl_pool_t *dp = ds->ds_dir->dd_pool;
	objset_t *mos = dp->dp_meta_objset;
	dsl_dataset_t *ds_prev = NULL;
	uint64_t obj;

	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
	ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
	rrw_exit(&ds->ds_bp_rwlock, FTAG);
	ASSERT(refcount_is_zero(&ds->ds_longholds));

	if (defer &&
	    (ds->ds_userrefs > 0 ||
	    dsl_dataset_phys(ds)->ds_num_children > 1)) {
		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
		dmu_buf_will_dirty(ds->ds_dbuf, tx);
		dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
		return;
	}

	ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);

	/* We need to log before removing it from the namespace. */
	spa_history_log_internal_ds(ds, "destroy", tx, "");

	dsl_scan_ds_destroyed(ds, tx);

	obj = ds->ds_object;

	boolean_t book_exists = dsl_bookmark_ds_destroyed(ds, tx);

	for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
		if (dsl_dataset_feature_is_active(ds, f))
			dsl_dataset_deactivate_feature(ds, f, tx);
	}
	if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
		ASSERT3P(ds->ds_prev, ==, NULL);
		VERIFY0(dsl_dataset_hold_obj(dp,
		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
		after_branch_point =
		    (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);

		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
		if (after_branch_point &&
		    dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
			dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
			if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
				VERIFY0(zap_add_int(mos,
				    dsl_dataset_phys(ds_prev)->
				    ds_next_clones_obj,
				    dsl_dataset_phys(ds)->ds_next_snap_obj,
				    tx));
			}
		}
		if (!after_branch_point) {
			dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
			    dsl_dataset_phys(ds)->ds_next_snap_obj;
		}
	}
Ejemplo n.º 11
0
/*
 * Reopen zfs_sb_t and release VFS ops.
 */
int
zfs_resume_fs(zfs_sb_t *zsb, const char *osname)
{
	int err, err2;
	znode_t *zp;
	uint64_t sa_obj = 0;

	ASSERT(RRW_WRITE_HELD(&zsb->z_teardown_lock));
	ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock));

	/*
	 * We already own this, so just hold and rele it to update the
	 * objset_t, as the one we had before may have been evicted.
	 */
	VERIFY0(dmu_objset_hold(osname, zsb, &zsb->z_os));
	VERIFY3P(zsb->z_os->os_dsl_dataset->ds_owner, ==, zsb);
	VERIFY(dsl_dataset_long_held(zsb->z_os->os_dsl_dataset));
	dmu_objset_rele(zsb->z_os, zsb);

	/*
	 * Make sure version hasn't changed
	 */

	err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION,
	    &zsb->z_version);

	if (err)
		goto bail;

	err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
	    ZFS_SA_ATTRS, 8, 1, &sa_obj);

	if (err && zsb->z_version >= ZPL_VERSION_SA)
		goto bail;

	if ((err = sa_setup(zsb->z_os, sa_obj,
	    zfs_attr_table,  ZPL_END, &zsb->z_attr_table)) != 0)
		goto bail;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(zsb->z_os,
		    zfs_sa_upgrade);

	VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0);

	zfs_set_fuid_feature(zsb);
	zsb->z_rollback_time = jiffies;

	/*
	 * Attempt to re-establish all the active inodes with their
	 * dbufs.  If a zfs_rezget() fails, then we unhash the inode
	 * and mark it stale.  This prevents a collision if a new
	 * inode/object is created which must use the same inode
	 * number.  The stale inode will be be released when the
	 * VFS prunes the dentry holding the remaining references
	 * on the stale inode.
	 */
	mutex_enter(&zsb->z_znodes_lock);
	for (zp = list_head(&zsb->z_all_znodes); zp;
	    zp = list_next(&zsb->z_all_znodes, zp)) {
		err2 = zfs_rezget(zp);
		if (err2) {
			remove_inode_hash(ZTOI(zp));
			zp->z_is_stale = B_TRUE;
		}
	}
	mutex_exit(&zsb->z_znodes_lock);

bail:
	/* release the VFS ops */
	rw_exit(&zsb->z_teardown_inactive_lock);
	rrw_exit(&zsb->z_teardown_lock, FTAG);

	if (err) {
		/*
		 * Since we couldn't setup the sa framework, try to force
		 * unmount this file system.
		 */
		if (zsb->z_os)
			(void) zfs_umount(zsb->z_sb);
	}
	return (err);
}