Esempio n. 1
0
int
dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
{
	if (!ds->ds_is_snapshot)
		return (SET_ERROR(EINVAL));

	if (dsl_dataset_long_held(ds))
		return (SET_ERROR(EBUSY));

	/*
	 * Only allow deferred destroy on pools that support it.
	 * NOTE: deferred destroy is only supported on snapshots.
	 */
	if (defer) {
		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
		    SPA_VERSION_USERREFS)
			return (SET_ERROR(ENOTSUP));
		return (0);
	}

	/*
	 * If this snapshot has an elevated user reference count,
	 * we can't destroy it yet.
	 */
	if (ds->ds_userrefs > 0)
		return (SET_ERROR(EBUSY));

	/*
	 * Can't delete a branch point.
	 */
	if (dsl_dataset_phys(ds)->ds_num_children > 1)
		return (SET_ERROR(EEXIST));

	return (0);
}
Esempio n. 2
0
int
dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
{
	int err = 0;

	/*
	 * We shouldn't be doing anything with dsl_dataset_t's unless the
	 * pool_config lock is held, or the dataset is long-held.
	 */
	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
	    dsl_dataset_long_held(ds));

	mutex_enter(&ds->ds_opening_lock);
	if (ds->ds_objset == NULL) {
		objset_t *os;
		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
		    ds, dsl_dataset_get_blkptr(ds), &os);
		rrw_exit(&ds->ds_bp_rwlock, FTAG);

		if (err == 0) {
			mutex_enter(&ds->ds_lock);
			ASSERT(ds->ds_objset == NULL);
			ds->ds_objset = os;
			mutex_exit(&ds->ds_lock);
		}
	}
	*osp = ds->ds_objset;
	mutex_exit(&ds->ds_opening_lock);
	return (err);
}
Esempio n. 3
0
/*
 * When we are called, os MUST refer to an objset associated with a dataset
 * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
 * == tag.  We will then release and reacquire ownership of the dataset while
 * holding the pool config_rwlock to avoid intervening namespace or ownership
 * changes may occur.
 *
 * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
 * release the hold on its dataset and acquire a new one on the dataset of the
 * same name so that it can be partially torn down and reconstructed.
 */
void
dmu_objset_refresh_ownership(objset_t *os, void *tag)
{
	dsl_pool_t *dp;
	dsl_dataset_t *ds, *newds;
	char name[MAXNAMELEN];

	ds = os->os_dsl_dataset;
	VERIFY3P(ds, !=, NULL);
	VERIFY3P(ds->ds_owner, ==, tag);
	VERIFY(dsl_dataset_long_held(ds));

	dsl_dataset_name(ds, name);
	dp = dmu_objset_pool(os);
	dsl_pool_config_enter(dp, FTAG);
	dmu_objset_disown(os, tag);
	VERIFY0(dsl_dataset_own(dp, name, tag, &newds));
	VERIFY3P(newds, ==, os->os_dsl_dataset);
	dsl_pool_config_exit(dp, FTAG);
}
Esempio n. 4
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
    nvlist_t *holds, boolean_t *todelete)
{
	uint64_t zapobj;
	nvpair_t *pair;
	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
	int error;
	int numholds = 0;

	*todelete = B_FALSE;

	if (!dsl_dataset_is_snapshot(ds))
		return (EINVAL);

	zapobj = ds->ds_phys->ds_userrefs_obj;
	if (zapobj == 0)
		return (ESRCH);

	for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		/* Make sure the hold exists */
		uint64_t tmp;
		error = zap_lookup(mos, zapobj, nvpair_name(pair), 8, 1, &tmp);
		if (error == ENOENT)
			error = ESRCH;
		if (error != 0)
			return (error);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */

		if (dsl_dataset_long_held(ds))
			return (EBUSY);
		*todelete = B_TRUE;
	}
	return (0);
}
Esempio n. 5
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
    dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
{
	uint64_t zapobj;
	nvlist_t *holds_found;
	objset_t *mos;
	int numholds;

	if (!dsl_dataset_is_snapshot(ds))
		return (SET_ERROR(EINVAL));

	if (nvlist_empty(holds))
		return (0);

	numholds = 0;
	mos = ds->ds_dir->dd_pool->dp_meta_objset;
	zapobj = ds->ds_phys->ds_userrefs_obj;
	holds_found = fnvlist_alloc();

	for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		uint64_t tmp;
		int error;
		const char *holdname = nvpair_name(pair);

		if (zapobj != 0)
			error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
		else
			error = SET_ERROR(ENOENT);

		/*
		 * Non-existent holds are put on the errlist, but don't
		 * cause an overall failure.
		 */
		if (error == ENOENT) {
			if (ddura->ddura_errlist != NULL) {
				char *errtag = kmem_asprintf("%s#%s",
				    snapname, holdname);
				fnvlist_add_int32(ddura->ddura_errlist, errtag,
				    ENOENT);
				strfree(errtag);
			}
			continue;
		}

		if (error != 0) {
			fnvlist_free(holds_found);
			return (error);
		}

		fnvlist_add_boolean(holds_found, holdname);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */
		if (dsl_dataset_long_held(ds)) {
			fnvlist_free(holds_found);
			return (SET_ERROR(EBUSY));
		}
		fnvlist_add_boolean(ddura->ddura_todelete, snapname);
	}

	if (numholds != 0) {
		fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
		    holds_found);
	}
	fnvlist_free(holds_found);

	return (0);
}
Esempio n. 6
0
File: zfs_vfsops.c Progetto: nwf/zfs
/*
 * Reopen zfs_sb_t and release VFS ops.
 */
int
zfs_resume_fs(zfs_sb_t *zsb, const char *osname)
{
	int err, err2;
	znode_t *zp;
	uint64_t sa_obj = 0;

	ASSERT(RRM_WRITE_HELD(&zsb->z_teardown_lock));
	ASSERT(RW_WRITE_HELD(&zsb->z_teardown_inactive_lock));

	/*
	 * We already own this, so just hold and rele it to update the
	 * objset_t, as the one we had before may have been evicted.
	 */
	VERIFY0(dmu_objset_hold(osname, zsb, &zsb->z_os));
	VERIFY3P(zsb->z_os->os_dsl_dataset->ds_owner, ==, zsb);
	VERIFY(dsl_dataset_long_held(zsb->z_os->os_dsl_dataset));
	dmu_objset_rele(zsb->z_os, zsb);

	/*
	 * Make sure version hasn't changed
	 */

	err = zfs_get_zplprop(zsb->z_os, ZFS_PROP_VERSION,
	    &zsb->z_version);

	if (err)
		goto bail;

	err = zap_lookup(zsb->z_os, MASTER_NODE_OBJ,
	    ZFS_SA_ATTRS, 8, 1, &sa_obj);

	if (err && zsb->z_version >= ZPL_VERSION_SA)
		goto bail;

	if ((err = sa_setup(zsb->z_os, sa_obj,
	    zfs_attr_table,  ZPL_END, &zsb->z_attr_table)) != 0)
		goto bail;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(zsb->z_os,
		    zfs_sa_upgrade);

	VERIFY(zfs_sb_setup(zsb, B_FALSE) == 0);

	zfs_set_fuid_feature(zsb);
	zsb->z_rollback_time = jiffies;

	/*
	 * Attempt to re-establish all the active inodes with their
	 * dbufs.  If a zfs_rezget() fails, then we unhash the inode
	 * and mark it stale.  This prevents a collision if a new
	 * inode/object is created which must use the same inode
	 * number.  The stale inode will be be released when the
	 * VFS prunes the dentry holding the remaining references
	 * on the stale inode.
	 */
	mutex_enter(&zsb->z_znodes_lock);
	for (zp = list_head(&zsb->z_all_znodes); zp;
	    zp = list_next(&zsb->z_all_znodes, zp)) {
		err2 = zfs_rezget(zp);
		if (err2) {
			remove_inode_hash(ZTOI(zp));
			zp->z_is_stale = B_TRUE;
		}
	}
	mutex_exit(&zsb->z_znodes_lock);

bail:
	/* release the VFS ops */
	rw_exit(&zsb->z_teardown_inactive_lock);
	rrm_exit(&zsb->z_teardown_lock, FTAG);

	if (err) {
		/*
		 * Since we couldn't setup the sa framework, try to force
		 * unmount this file system.
		 */
		if (zsb->z_os)
			(void) zfs_umount(zsb->z_sb);
	}
	return (err);
}