Ejemplo n.º 1
0
Archivo: zfs_vfsops.c Proyecto: nwf/zfs
int
zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type,
    const char *domain, uint64_t rid, uint64_t quota)
{
	char buf[32];
	int err;
	dmu_tx_t *tx;
	uint64_t *objp;
	boolean_t fuid_dirtied;

	if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
		return (SET_ERROR(EINVAL));

	if (zsb->z_version < ZPL_VERSION_USERSPACE)
		return (SET_ERROR(ENOTSUP));

	objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj :
	    &zsb->z_groupquota_obj;

	err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE);
	if (err)
		return (err);
	fuid_dirtied = zsb->z_fuid_dirty;

	tx = dmu_tx_create(zsb->z_os);
	dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL);
	if (*objp == 0) {
		dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
		    zfs_userquota_prop_prefixes[type]);
	}
	if (fuid_dirtied)
		zfs_fuid_txhold(zsb, tx);
	err = dmu_tx_assign(tx, TXG_WAIT);
	if (err) {
		dmu_tx_abort(tx);
		return (err);
	}

	mutex_enter(&zsb->z_lock);
	if (*objp == 0) {
		*objp = zap_create(zsb->z_os, DMU_OT_USERGROUP_QUOTA,
		    DMU_OT_NONE, 0, tx);
		VERIFY(0 == zap_add(zsb->z_os, MASTER_NODE_OBJ,
		    zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
	}
	mutex_exit(&zsb->z_lock);

	if (quota == 0) {
		err = zap_remove(zsb->z_os, *objp, buf, tx);
		if (err == ENOENT)
			err = 0;
	} else {
		err = zap_update(zsb->z_os, *objp, buf, 8, 1, &quota, tx);
	}
	ASSERT(err == 0);
	if (fuid_dirtied)
		zfs_fuid_sync(zsb, tx);
	dmu_tx_commit(tx);
	return (err);
}
Ejemplo n.º 2
0
Archivo: zvol.c Proyecto: alek-p/zfs
/*
 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
 */
void
zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
	zfs_creat_t *zct = arg;
	nvlist_t *nvprops = zct->zct_props;
	int error;
	uint64_t volblocksize, volsize;

	VERIFY(nvlist_lookup_uint64(nvprops,
	    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
	if (nvlist_lookup_uint64(nvprops,
	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
		volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);

	/*
	 * These properties must be removed from the list so the generic
	 * property setting step won't apply to them.
	 */
	VERIFY(nvlist_remove_all(nvprops,
	    zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
	(void) nvlist_remove_all(nvprops,
	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));

	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
	    DMU_OT_NONE, 0, tx);
	ASSERT(error == 0);

	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
	    DMU_OT_NONE, 0, tx);
	ASSERT(error == 0);

	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
	ASSERT(error == 0);
}
Ejemplo n.º 3
0
static void
sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
{
	uint64_t attr_value = 0;
	sa_os_t *sa = hdl->sa_os->os_sa;
	sa_attr_table_t *tb = sa->sa_attr_table;
	int i;

	mutex_enter(&sa->sa_lock);

	if (!sa->sa_need_attr_registration || sa->sa_master_obj == NULL) {
		mutex_exit(&sa->sa_lock);
		return;
	}

	if (sa->sa_reg_attr_obj == NULL) {
		sa->sa_reg_attr_obj = zap_create(hdl->sa_os,
		    DMU_OT_SA_ATTR_REGISTRATION, DMU_OT_NONE, 0, tx);
		VERIFY(zap_add(hdl->sa_os, sa->sa_master_obj,
		    SA_REGISTRY, 8, 1, &sa->sa_reg_attr_obj, tx) == 0);
	}
	for (i = 0; i != sa->sa_num_attrs; i++) {
		if (sa->sa_attr_table[i].sa_registered)
			continue;
		ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
		    tb[i].sa_byteswap);
		VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
		    tb[i].sa_name, 8, 1, &attr_value, tx));
		tb[i].sa_registered = B_TRUE;
	}
	sa->sa_need_attr_registration = B_FALSE;
	mutex_exit(&sa->sa_lock);
}
Ejemplo n.º 4
0
Archivo: zvol.c Proyecto: alek-p/zfs
/*
 * Ensure the zap is flushed then inform the VFS of the capacity change.
 */
static int
zvol_update_volsize(uint64_t volsize, objset_t *os)
{
	dmu_tx_t *tx;
	int error;
	uint64_t txg;

	ASSERT(MUTEX_HELD(&zvol_state_lock));

	tx = dmu_tx_create(os);
	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
	dmu_tx_mark_netfree(tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (SET_ERROR(error));
	}
	txg = dmu_tx_get_txg(tx);

	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
	    &volsize, tx);
	dmu_tx_commit(tx);

	txg_wait_synced(dmu_objset_pool(os), txg);

	if (error == 0)
		error = dmu_free_long_range(os,
		    ZVOL_OBJ, volsize, DMU_OBJECT_END);

	return (error);
}
Ejemplo n.º 5
0
/*
 * This function is non-static for zhack; it should otherwise not be used
 * outside this file.
 */
void
feature_sync(spa_t *spa, zfeature_info_t *feature, uint64_t refcount,
    dmu_tx_t *tx)
{
	uint64_t zapobj;

	ASSERT(VALID_FEATURE_OR_NONE(feature->fi_feature));
	zapobj = (feature->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
	    spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj;
	VERIFY0(zap_update(spa->spa_meta_objset, zapobj, feature->fi_guid,
	    sizeof (uint64_t), 1, &refcount, tx));

	/*
	 * feature_sync is called directly from zhack, allowing the
	 * creation of arbitrary features whose fi_feature field may
	 * be greater than SPA_FEATURES. When called from zhack, the
	 * zfeature_info_t object's fi_feature field will be set to
	 * SPA_FEATURE_NONE.
	 */
	if (feature->fi_feature != SPA_FEATURE_NONE) {
		uint64_t *refcount_cache =
		    &spa->spa_feat_refcount_cache[feature->fi_feature];
		VERIFY3U(*refcount_cache, ==,
		    atomic_swap_64(refcount_cache, refcount));
	}
Ejemplo n.º 6
0
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
{
	VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
	    DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
	    &scn->scn_phys, tx));
}
Ejemplo n.º 7
0
static sa_lot_t *
sa_add_layout_entry(objset_t *os, sa_attr_type_t *attrs, int attr_count,
    uint64_t lot_num, uint64_t hash, boolean_t zapadd, dmu_tx_t *tx)
{
	sa_os_t *sa = os->os_sa;
	sa_lot_t *tb, *findtb;
	int i;
	avl_index_t loc;

	ASSERT(MUTEX_HELD(&sa->sa_lock));
	tb = kmem_zalloc(sizeof (sa_lot_t), KM_SLEEP);
	tb->lot_attr_count = attr_count;
	tb->lot_attrs = kmem_alloc(sizeof (sa_attr_type_t) * attr_count,
	    KM_SLEEP);
	bcopy(attrs, tb->lot_attrs, sizeof (sa_attr_type_t) * attr_count);
	tb->lot_num = lot_num;
	tb->lot_hash = hash;
	tb->lot_instance = 0;

	if (zapadd) {
		char attr_name[8];

		if (sa->sa_layout_attr_obj == 0) {
			sa->sa_layout_attr_obj = zap_create(os,
			    DMU_OT_SA_ATTR_LAYOUTS, DMU_OT_NONE, 0, tx);
			VERIFY(zap_add(os, sa->sa_master_obj, SA_LAYOUTS, 8, 1,
			    &sa->sa_layout_attr_obj, tx) == 0);
		}

		(void) snprintf(attr_name, sizeof (attr_name),
		    "%d", (int)lot_num);
		VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
		    attr_name, 2, attr_count, attrs, tx));
	}

	list_create(&tb->lot_idx_tab, sizeof (sa_idx_tab_t),
	    offsetof(sa_idx_tab_t, sa_next));

	for (i = 0; i != attr_count; i++) {
		if (sa->sa_attr_table[tb->lot_attrs[i]].sa_length == 0)
			tb->lot_var_sizes++;
	}

	avl_add(&sa->sa_layout_num_tree, tb);

	/* verify we don't have a hash collision */
	if ((findtb = avl_find(&sa->sa_layout_hash_tree, tb, &loc)) != NULL) {
		for (; findtb && findtb->lot_hash == hash;
		    findtb = AVL_NEXT(&sa->sa_layout_hash_tree, findtb)) {
			if (findtb->lot_instance != tb->lot_instance)
				break;
			tb->lot_instance++;
		}
	}
	avl_add(&sa->sa_layout_hash_tree, tb);
	return (tb);
}
Ejemplo n.º 8
0
static void
vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
{
	/*
	 * We pass in the guid instead of the vdev_t since the vdev may
	 * have been freed prior to the sync task being processed. This
	 * happens when a vdev is detached as we call spa_config_vdev_exit(),
	 * stop the initializing thread, schedule the sync task, and free
	 * the vdev. Later when the scheduled sync task is invoked, it would
	 * find that the vdev has been freed.
	 */
	uint64_t guid = *(uint64_t *)arg;
	uint64_t txg = dmu_tx_get_txg(tx);
	kmem_free(arg, sizeof (uint64_t));

	vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
	if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
		return;

	uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
	vd->vdev_initialize_offset[txg & TXG_MASK] = 0;

	VERIFY(vd->vdev_leaf_zap != 0);

	objset_t *mos = vd->vdev_spa->spa_meta_objset;

	if (last_offset > 0) {
		vd->vdev_initialize_last_offset = last_offset;
		VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
		    VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
		    sizeof (last_offset), 1, &last_offset, tx));
	}
	if (vd->vdev_initialize_action_time > 0) {
		uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
		VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
		    VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
		    1, &val, tx));
	}

	uint64_t initialize_state = vd->vdev_initialize_state;
	VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
	    VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
	    &initialize_state, tx));
}
Ejemplo n.º 9
0
static void
dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
	dsl_dir_t *dd = arg1;
	nvlist_t *nvp = arg2;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	nvpair_t *whopair = NULL;
	uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj;

	if (zapobj == 0) {
		dmu_buf_will_dirty(dd->dd_dbuf, tx);
		zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos,
		    DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
	}

	while (whopair = nvlist_next_nvpair(nvp, whopair)) {
		const char *whokey = nvpair_name(whopair);
		nvlist_t *perms;
		nvpair_t *permpair = NULL;
		uint64_t jumpobj;

		VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);

		if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) {
			jumpobj = zap_create(mos, DMU_OT_DSL_PERMS,
			    DMU_OT_NONE, 0, tx);
			VERIFY(zap_update(mos, zapobj,
			    whokey, 8, 1, &jumpobj, tx) == 0);
		}

		while (permpair = nvlist_next_nvpair(perms, permpair)) {
			const char *perm = nvpair_name(permpair);
			uint64_t n = 0;

			VERIFY(zap_update(mos, jumpobj,
			    perm, 8, 1, &n, tx) == 0);
			spa_history_internal_log(LOG_DS_PERM_UPDATE,
			    dd->dd_pool->dp_spa, tx, cr,
			    "%s %s dataset = %llu", whokey, perm,
			    dd->dd_phys->dd_head_dataset_obj);
		}
	}
}
Ejemplo n.º 10
0
int
zvol_set_volsize(zfs_cmd_t *zc)
{
	zvol_state_t *zv;
	dev_t dev = zc->zc_dev;
	dmu_tx_t *tx;
	int error;
	dmu_object_info_t doi;

	mutex_enter(&zvol_state_lock);

	if ((zv = zvol_minor_lookup(zc->zc_name)) == NULL) {
		mutex_exit(&zvol_state_lock);
		return (ENXIO);
	}

	if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 ||
	    (error = zvol_check_volsize(zc, doi.doi_data_block_size)) != 0) {
		mutex_exit(&zvol_state_lock);
		return (error);
	}

	if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) {
		mutex_exit(&zvol_state_lock);
		return (EROFS);
	}

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
	dmu_tx_hold_free(tx, ZVOL_OBJ, zc->zc_volsize, DMU_OBJECT_END);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		mutex_exit(&zvol_state_lock);
		return (error);
	}

	error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
	    &zc->zc_volsize, tx);
	if (error == 0) {
		error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, zc->zc_volsize,
		    DMU_OBJECT_END, tx);
	}

	dmu_tx_commit(tx);

	if (error == 0) {
		zv->zv_volsize = zc->zc_volsize;
		zvol_size_changed(zv, dev);
	}

	mutex_exit(&zvol_state_lock);

	return (error);
}
Ejemplo n.º 11
0
Archivo: zfs_vfsops.c Proyecto: nwf/zfs
int
zfs_set_version(zfs_sb_t *zsb, uint64_t newvers)
{
	int error;
	objset_t *os = zsb->z_os;
	dmu_tx_t *tx;

	if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
		return (SET_ERROR(EINVAL));

	if (newvers < zsb->z_version)
		return (SET_ERROR(EINVAL));

	if (zfs_spa_version_map(newvers) >
	    spa_version(dmu_objset_spa(zsb->z_os)))
		return (SET_ERROR(ENOTSUP));

	tx = dmu_tx_create(os);
	dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);
	if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
		dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE,
		    ZFS_SA_ATTRS);
		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	}
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (error);
	}

	error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
	    8, 1, &newvers, tx);

	if (error) {
		dmu_tx_commit(tx);
		return (error);
	}

	if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) {
		uint64_t sa_obj;

		ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=,
		    SPA_VERSION_SA);
		sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
		    DMU_OT_NONE, 0, tx);

		error = zap_add(os, MASTER_NODE_OBJ,
		    ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
		ASSERT0(error);

		VERIFY(0 == sa_set_sa_object(os, sa_obj));
		sa_register_update_callback(os, zfs_sa_upgrade);
	}
Ejemplo n.º 12
0
int
zfs_set_version(const char *name, uint64_t newvers)
{
	int error;
	objset_t *os;
	dmu_tx_t *tx;
	uint64_t curvers;

	/*
	 * XXX for now, require that the filesystem be unmounted.  Would
	 * be nice to find the zfsvfs_t and just update that if
	 * possible.
	 */

	if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
		return (EINVAL);

	error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_PRIMARY, &os);
	if (error)
		return (error);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
	    8, 1, &curvers);
	if (error)
		goto out;
	if (newvers < curvers) {
		error = EINVAL;
		goto out;
	}

	tx = dmu_tx_create(os);
	dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		goto out;
	}
	error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1,
	    &newvers, tx);

	spa_history_internal_log(LOG_DS_UPGRADE,
	    dmu_objset_spa(os), tx, CRED(),
	    "oldver=%llu newver=%llu dataset = %llu", curvers, newvers,
	    dmu_objset_id(os));
	dmu_tx_commit(tx);

out:
	dmu_objset_close(os);
	return (error);
}
Ejemplo n.º 13
0
/*
 * Ensure the zap is flushed then inform the VFS of the capacity change.
 */
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
{
	struct block_device *bdev;
	dmu_tx_t *tx;
	int error;

	ASSERT(MUTEX_HELD(&zvol_state_lock));

	tx = dmu_tx_create(os);
	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (error);
	}

	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
	    &volsize, tx);
	dmu_tx_commit(tx);

	if (error)
		return (error);

	error = dmu_free_long_range(os,
	    ZVOL_OBJ, volsize, DMU_OBJECT_END);
	if (error)
		return (error);

	bdev = bdget_disk(zv->zv_disk, 0);
	if (!bdev)
		return (EIO);
/*
 * 2.6.28 API change
 * Added check_disk_size_change() helper function.
 */
#ifdef HAVE_CHECK_DISK_SIZE_CHANGE
	set_capacity(zv->zv_disk, volsize >> 9);
	zv->zv_volsize = volsize;
	check_disk_size_change(zv->zv_disk, bdev);
#else
	zv->zv_volsize = volsize;
	zv->zv_changed = 1;
	(void) check_disk_change(bdev);
#endif /* HAVE_CHECK_DISK_SIZE_CHANGE */

	bdput(bdev);

	return (0);
}
Ejemplo n.º 14
0
static void
dsl_keychain_set_key(dsl_dir_t *dd, dmu_tx_t *tx,
                     caddr_t wkeybuf, size_t wkeylen, uint64_t rekey_date)
{
    objset_t *mos = dd->dd_pool->dp_meta_objset;
    uint64_t keychain_zapobj = dsl_dir_phys(dd)->dd_keychain_obj;
    uint64_t props_zapobj = dsl_dir_phys(dd)->dd_props_zapobj;

    ASSERT(keychain_zapobj != 0);
    VERIFY(zap_update_uint64(mos, keychain_zapobj,
                             (uint64_t *)&tx->tx_txg, 1, 1, wkeylen, wkeybuf, tx) == 0);
    VERIFY(zap_update(mos, props_zapobj,
                      zfs_prop_to_name(ZFS_PROP_REKEYDATE),
                      8, 1, (void *)&rekey_date, tx) == 0);
}
Ejemplo n.º 15
0
void
zvol_create_cb(objset_t *os, void *arg, dmu_tx_t *tx)
{
	zfs_cmd_t *zc = arg;
	int error;

	error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, zc->zc_volblocksize,
	    DMU_OT_NONE, 0, tx);
	ASSERT(error == 0);

	error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
	    DMU_OT_NONE, 0, tx);
	ASSERT(error == 0);

	error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &zc->zc_volsize, tx);
	ASSERT(error == 0);
}
Ejemplo n.º 16
0
static void
dsl_deleg_set_sync(void *arg, dmu_tx_t *tx)
{
    dsl_deleg_arg_t *dda = arg;
    dsl_dir_t *dd;
    dsl_pool_t *dp = dmu_tx_pool(tx);
    objset_t *mos = dp->dp_meta_objset;
    nvpair_t *whopair = NULL;
    uint64_t zapobj;

    VERIFY0(dsl_dir_hold(dp, dda->dda_name, FTAG, &dd, NULL));

    zapobj = dd->dd_phys->dd_deleg_zapobj;
    if (zapobj == 0) {
        dmu_buf_will_dirty(dd->dd_dbuf, tx);
        zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos,
                                                DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
    }

    while ((whopair = nvlist_next_nvpair(dda->dda_nvlist, whopair))) {
        const char *whokey = nvpair_name(whopair);
        nvlist_t *perms;
        nvpair_t *permpair = NULL;
        uint64_t jumpobj;

        perms = fnvpair_value_nvlist(whopair);

        if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) {
            jumpobj = zap_create_link(mos, DMU_OT_DSL_PERMS,
                                      zapobj, whokey, tx);
        }

        while ((permpair = nvlist_next_nvpair(perms, permpair))) {
            const char *perm = nvpair_name(permpair);
            uint64_t n = 0;

            VERIFY(zap_update(mos, jumpobj,
                              perm, 8, 1, &n, tx) == 0);
            spa_history_log_internal_dd(dd, "permission update", tx,
                                        "%s %s", whokey, perm);
        }
    }
    dsl_dir_rele(dd, FTAG);
}
Ejemplo n.º 17
0
/*
 * Ensure the zap is flushed then inform the VFS of the capacity change.
 */
static int
zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
	struct block_device *bdev;
	dmu_tx_t *tx;
	int error;

	ASSERT(MUTEX_HELD(&zvol_state_lock));

	tx = dmu_tx_create(zv->zv_objset);
	dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		dmu_tx_abort(tx);
		return (error);
	}

	error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1,
	    &volsize, tx);
	dmu_tx_commit(tx);

	if (error)
		return (error);

	error = dmu_free_long_range(zv->zv_objset,
	    ZVOL_OBJ, volsize, DMU_OBJECT_END);
	if (error)
		return (error);

	zv->zv_volsize = volsize;
	zv->zv_changed = 1;

	bdev = bdget_disk(zv->zv_disk, 0);
	if (!bdev)
		return EIO;

	error = check_disk_change(bdev);
	ASSERT3U(error, !=, 0);
	bdput(bdev);

	return (0);
}
Ejemplo n.º 18
0
static void
copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
    boolean_t dosets, uint64_t uid, dmu_tx_t *tx)
{
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	uint64_t jumpobj, pjumpobj;
	uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj;
	zap_cursor_t zc;
	zap_attribute_t za;
	char whokey[ZFS_MAX_DELEG_NAME];

	zfs_deleg_whokey(whokey,
	    dosets ? ZFS_DELEG_CREATE_SETS : ZFS_DELEG_CREATE,
	    ZFS_DELEG_LOCAL, NULL);
	if (zap_lookup(mos, pzapobj, whokey, 8, 1, &pjumpobj) != 0)
		return;

	if (zapobj == 0) {
		dmu_buf_will_dirty(dd->dd_dbuf, tx);
		zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos,
		    DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
	}

	zfs_deleg_whokey(whokey,
	    dosets ? ZFS_DELEG_USER_SETS : ZFS_DELEG_USER,
	    ZFS_DELEG_LOCAL, &uid);
	if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) {
		jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
		VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0);
	}

	for (zap_cursor_init(&zc, mos, pjumpobj);
	    zap_cursor_retrieve(&zc, &za) == 0;
	    zap_cursor_advance(&zc)) {
		uint64_t zero = 0;
		ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1);

		VERIFY(zap_update(mos, jumpobj, za.za_name,
		    8, 1, &zero, tx) == 0);
	}
	zap_cursor_fini(&zc);
}
Ejemplo n.º 19
0
/*
 * dsl_crypto_key_clone
 *
 * Our caller (dmu_objset_create_sync) must have a lock on the dataset.
 */
int
dsl_crypto_key_clone(dsl_dir_t *dd, dsl_dataset_phys_t *dsphys,
                     uint64_t dsobj, dsl_dataset_t *clone_origin,
                     dsl_crypto_ctx_t *ctx, dmu_tx_t *tx)
{
    zcrypt_key_t *txgkey;
    zcrypt_key_t *wrappingkey = NULL;
    dsl_dataset_t *origin;
    uint64_t crypt;
    spa_t *spa = dd->dd_pool->dp_spa;
    int error = 0;

    ASSERT(ctx != NULL);

    if (BP_IS_HOLE(&dsphys->ds_bp)) {
        origin = ctx->dcc_origin_ds;
    } else {
        origin = clone_origin;
    }
    ASSERT(origin != NULL);

    /*
     * Need to look at the value of crypt on the origin snapshot
     * since it is sticky for encryption.
     */
    VERIFY(dsl_prop_get_ds(origin, zfs_prop_to_name(ZFS_PROP_ENCRYPTION),
                           8, 1, &crypt,/* DSL_PROP_GET_EFFECTIVE,*/ NULL) == 0);

    if (crypt == ZIO_CRYPT_OFF) {
        return (0);
    }

    wrappingkey = ctx->dcc_wrap_key;

    dsl_keychain_create_obj(dd, tx);
    dsl_keychain_clone_phys(origin, dd, tx, wrappingkey);

    if (ctx->dcc_salt != 0) {
        objset_t *mos = dd->dd_pool->dp_meta_objset;
        uint64_t props_zapobj = dsl_dir_phys(dd)->dd_props_zapobj;

        error = zap_update(mos, props_zapobj,
                           zfs_prop_to_name(ZFS_PROP_SALT), 8, 1,
                           (void *)&ctx->dcc_salt, tx);
    }

    if (ctx->dcc_clone_newkey) {
        caddr_t wkeybuf;
        size_t wkeylen;
        /*
         * Generate a new key and add it to the keychain
         * to be valid from this txg onwards.
         */
        zcrypt_key_hold(wrappingkey, FTAG);
        txgkey = zcrypt_key_gen(crypt);
        VERIFY(zcrypt_wrap_key(wrappingkey, txgkey,
                               &wkeybuf, &wkeylen, zio_crypt_select_wrap(crypt)) == 0);
        zcrypt_key_release(wrappingkey, FTAG);
        dsl_keychain_set_key(dd, tx, wkeybuf, wkeylen,
                             dsphys->ds_creation_time);
        kmem_free(wkeybuf, wkeylen);
        zcrypt_key_free(txgkey);
        spa_history_log_internal(spa, "key create", tx,
                                 "rekey succeeded dataset = %llu from dataset = %llu",
                                 dsobj, clone_origin->ds_object);
    } else {
        spa_history_log_internal(spa, "key create", tx,
                                 "succeeded dataset = %llu from dataset = %llu",
                                 dsobj, clone_origin->ds_object);
    }

    if (wrappingkey != NULL) {
        error = dsl_keychain_load_dd(dd, dsobj, crypt, wrappingkey);
    }

    return (error);
}
Ejemplo n.º 20
0
/*
 * dsl_crypto_key_gen - Generate dataset key
 *
 * Generate a new key for this dataset based on its encryption property type.
 * Store the key as a usable zcrypt_key_t in the in memory keystore and
 * put the wrapped version of it in the on disk keychain.
 *
 * returns 0 on success
 */
int
dsl_crypto_key_create(dsl_dir_t *dd, dsl_dataset_phys_t *dsphys,
                      uint64_t dsobj, dsl_crypto_ctx_t *ctx, dmu_tx_t *tx)
{
    int error = -1;
#ifdef DEBUG
    zcrypt_key_t *debugkey;
#endif /* DEBUG */
    zcrypt_key_t *wrappingkey, *dslkey;
    caddr_t wkeybuf = NULL;
    size_t wkeylen = 0;
    uint64_t crypt;
    spa_t *spa = dd->dd_pool->dp_spa;
    zcrypt_keystore_node_t *skn;

    if (ctx == NULL) {
        return (0);
    }

    crypt = ctx->dcc_crypt;
    if (crypt == ZIO_CRYPT_OFF || crypt == ZIO_CRYPT_INHERIT) {
        return (0);
    }

    crypt = zio_crypt_select(crypt, ZIO_CRYPT_ON_VALUE);
    wrappingkey = ctx->dcc_wrap_key;
    skn = zcrypt_keystore_insert(spa, dsobj, wrappingkey);
    if (skn == NULL) {
        error = ENOKEY;
        goto out;
    }

    dslkey = zcrypt_key_gen(crypt);
    zcrypt_key_hold(wrappingkey, FTAG);
    error = zcrypt_wrap_key(wrappingkey, dslkey,
                            &wkeybuf, &wkeylen, zio_crypt_select_wrap(crypt));
    if (error != 0) {
        zcrypt_key_free(dslkey);
        spa_history_log_internal(spa, "key create", tx,
                                 "failed dataset = %llu unable to wrap key", dsobj, error);
        goto out;
    }
    dsl_keychain_create_obj(dd, tx);
    dsl_keychain_set_key(dd, tx, wkeybuf, wkeylen,
                         dsphys->ds_creation_time);
    zcrypt_keychain_insert(&skn->skn_keychain,
                           dsphys->ds_creation_txg, dslkey);
    if (ctx->dcc_salt != 0) {
        objset_t *mos = dd->dd_pool->dp_meta_objset;
        uint64_t props_zapobj = dsl_dir_phys(dd)->dd_props_zapobj;

        error = zap_update(mos, props_zapobj,
                           zfs_prop_to_name(ZFS_PROP_SALT), 8, 1,
                           (void *)&ctx->dcc_salt, tx);
    }
    if (error == 0)
        spa_history_log_internal(spa, "key create", tx,
                                 "succeeded dataset = %llu", dsobj);
#ifdef DEBUG
    ASSERT3U(zcrypt_unwrap_key(wrappingkey, crypt,
                               wkeybuf, wkeylen, &debugkey), ==, 0);
    zcrypt_key_compare(dslkey, debugkey);
    zcrypt_key_free(debugkey);
#endif /* DEBUG */
out:
    zcrypt_key_release(wrappingkey, FTAG);
    if (wkeylen > 0) {
        kmem_free(wkeybuf, wkeylen);
    }

    return (error);
}
Ejemplo n.º 21
0
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
	struct super_block *sb;
	zfs_sb_t	*zsb;
	uint64_t	moid, obj, sa_obj, version;
	uint64_t	sense = ZFS_CASE_SENSITIVE;
	uint64_t	norm = 0;
	nvpair_t	*elem;
	int		error;
	int		i;
	znode_t		*rootzp = NULL;
	vattr_t		vattr;
	znode_t		*zp;
	zfs_acl_ids_t	acl_ids;

	/*
	 * First attempt to create master node.
	 */
	/*
	 * In an empty objset, there are no blocks to read and thus
	 * there can be no i/o errors (which we assert below).
	 */
	moid = MASTER_NODE_OBJ;
	error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
	    DMU_OT_NONE, 0, tx);
	ASSERT(error == 0);

	/*
	 * Set starting attributes.
	 */
	version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
	elem = NULL;
	while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
		/* For the moment we expect all zpl props to be uint64_ts */
		uint64_t val;
		char *name;

		ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
		VERIFY(nvpair_value_uint64(elem, &val) == 0);
		name = nvpair_name(elem);
		if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
			if (val < version)
				version = val;
		} else {
			error = zap_update(os, moid, name, 8, 1, &val, tx);
		}
		ASSERT(error == 0);
		if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
			norm = val;
		else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
			sense = val;
	}
	ASSERT(version != 0);
	error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);

	/*
	 * Create zap object used for SA attribute registration
	 */

	if (version >= ZPL_VERSION_SA) {
		sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
		    DMU_OT_NONE, 0, tx);
		error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
		ASSERT(error == 0);
	} else {
		sa_obj = 0;
	}
	/*
	 * Create a delete queue.
	 */
	obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);

	error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
	ASSERT(error == 0);

	/*
	 * Create root znode.  Create minimal znode/inode/zsb/sb
	 * to allow zfs_mknode to work.
	 */
	vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
	vattr.va_mode = S_IFDIR|0755;
	vattr.va_uid = crgetuid(cr);
	vattr.va_gid = crgetgid(cr);

	rootzp = kmem_cache_alloc(znode_cache, KM_PUSHPAGE);
	rootzp->z_moved = 0;
	rootzp->z_unlinked = 0;
	rootzp->z_atime_dirty = 0;
	rootzp->z_is_sa = USE_SA(version, os);

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_PUSHPAGE | KM_NODEBUG);
	zsb->z_os = os;
	zsb->z_parent = zsb;
	zsb->z_version = version;
	zsb->z_use_fuids = USE_FUIDS(version, os);
	zsb->z_use_sa = USE_SA(version, os);
	zsb->z_norm = norm;

	sb = kmem_zalloc(sizeof (struct super_block), KM_PUSHPAGE);
	sb->s_fs_info = zsb;

	ZTOI(rootzp)->i_sb = sb;

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);

	ASSERT(error == 0);

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));

	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
	    cr, NULL, &acl_ids));
	zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
	ASSERT3P(zp, ==, rootzp);
	error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
	ASSERT(error == 0);
	zfs_acl_ids_free(&acl_ids);

	atomic_set(&ZTOI(rootzp)->i_count, 0);
	sa_handle_destroy(rootzp->z_sa_hdl);
	kmem_cache_free(znode_cache, rootzp);

	/*
	 * Create shares directory
	 */
	error = zfs_create_share_dir(zsb, tx);
	ASSERT(error == 0);

	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_destroy(&zsb->z_hold_mtx[i]);

	kmem_free(sb, sizeof (struct super_block));
	kmem_free(zsb, sizeof (zfs_sb_t));
}
Ejemplo n.º 22
0
/**
 * update/insert/delete the specified OI mapping (@fid @id) according to the ops
 *
 * \retval   1, changed nothing
 * \retval   0, changed successfully
 * \retval -ve, on error
 */
static int osd_scrub_refresh_mapping(const struct lu_env *env,
				     struct osd_device *dev,
				     const struct lu_fid *fid,
				     uint64_t oid, int ops,
				     bool force, const char *name)
{
	struct osd_thread_info *info = osd_oti_get(env);
	struct zpl_direntry *zde = &info->oti_zde.lzd_reg;
	char *buf = info->oti_str;
	dmu_tx_t *tx = NULL;
	dnode_t *dn = NULL;
	uint64_t zapid;
	int rc;
	ENTRY;

	if (dev->od_scrub.os_file.sf_param & SP_DRYRUN && !force)
		GOTO(log, rc = 0);

	tx = dmu_tx_create(dev->od_os);
	if (!tx)
		GOTO(log, rc = -ENOMEM);

	zapid = osd_get_name_n_idx(env, dev, fid, buf,
				   sizeof(info->oti_str), &dn);
	osd_tx_hold_zap(tx, zapid, dn,
			ops == DTO_INDEX_INSERT ? TRUE : FALSE, NULL);
	rc = -dmu_tx_assign(tx, TXG_WAIT);
	if (rc) {
		dmu_tx_abort(tx);
		GOTO(log, rc);
	}

	switch (ops) {
	case DTO_INDEX_UPDATE:
		zde->zde_pad = 0;
		zde->zde_dnode = oid;
		zde->zde_type = 0; /* The type in OI mapping is useless. */
		rc = -zap_update(dev->od_os, zapid, buf, 8, sizeof(*zde) / 8,
				 zde, tx);
		if (unlikely(rc == -ENOENT)) {
			/* Some unlink thread may removed the OI mapping. */
			rc = 1;
		}
		break;
	case DTO_INDEX_INSERT:
		zde->zde_pad = 0;
		zde->zde_dnode = oid;
		zde->zde_type = 0; /* The type in OI mapping is useless. */
		rc = osd_zap_add(dev, zapid, dn, buf, 8, sizeof(*zde) / 8,
				 zde, tx);
		if (unlikely(rc == -EEXIST))
			rc = 1;
		break;
	case DTO_INDEX_DELETE:
		rc = osd_zap_remove(dev, zapid, dn, buf, tx);
		if (rc == -ENOENT) {
			/* It is normal that the unlink thread has removed the
			 * OI mapping already. */
			rc = 1;
		}
		break;
	default:
		LASSERTF(0, "Unexpected ops %d\n", ops);
		rc = -EINVAL;
		break;
	}

	dmu_tx_commit(tx);
	GOTO(log, rc);

log:
	CDEBUG(D_LFSCK, "%s: refresh OI map for scrub, op %d, force %s, "
	       DFID" => %llu (%s): rc = %d\n", osd_name(dev), ops,
	       force ? "yes" : "no", PFID(fid), oid, name ? name : "null", rc);

	return rc;
}
Ejemplo n.º 23
0
/* ARGSUSED */
static void
dsl_pool_scrub_setup_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
{
	dsl_pool_t *dp = arg1;
	enum scrub_func *funcp = arg2;
	dmu_object_type_t ot = 0;
	boolean_t complete = B_FALSE;

	dsl_pool_scrub_cancel_sync(dp, &complete, cr, tx);

	ASSERT(dp->dp_scrub_func == SCRUB_FUNC_NONE);
	ASSERT(*funcp > SCRUB_FUNC_NONE);
	ASSERT(*funcp < SCRUB_FUNC_NUMFUNCS);

	dp->dp_scrub_min_txg = 0;
	dp->dp_scrub_max_txg = tx->tx_txg;
	dp->dp_scrub_ddt_class_max = zfs_scrub_ddt_class_max;

	if (*funcp == SCRUB_FUNC_CLEAN) {
		vdev_t *rvd = dp->dp_spa->spa_root_vdev;

		/* rewrite all disk labels */
		vdev_config_dirty(rvd);

		if (vdev_resilver_needed(rvd,
		    &dp->dp_scrub_min_txg, &dp->dp_scrub_max_txg)) {
			spa_event_notify(dp->dp_spa, NULL,
			    ESC_ZFS_RESILVER_START);
			dp->dp_scrub_max_txg = MIN(dp->dp_scrub_max_txg,
			    tx->tx_txg);
		} else {
			spa_event_notify(dp->dp_spa, NULL,
			    ESC_ZFS_SCRUB_START);
		}

		/* zero out the scrub stats in all vdev_stat_t's */
		vdev_scrub_stat_update(rvd,
		    dp->dp_scrub_min_txg ? POOL_SCRUB_RESILVER :
		    POOL_SCRUB_EVERYTHING, B_FALSE);

		/*
		 * If this is an incremental scrub, limit the DDT scrub phase
		 * to just the auto-ditto class (for correctness); the rest
		 * of the scrub should go faster using top-down pruning.
		 */
		if (dp->dp_scrub_min_txg > TXG_INITIAL)
			dp->dp_scrub_ddt_class_max = DDT_CLASS_DITTO;

		dp->dp_spa->spa_scrub_started = B_TRUE;
	}

	/* back to the generic stuff */

	if (dp->dp_blkstats == NULL) {
		dp->dp_blkstats =
		    kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
	}
	bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));

	if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB)
		ot = DMU_OT_ZAP_OTHER;

	dp->dp_scrub_func = *funcp;
	dp->dp_scrub_queue_obj = zap_create(dp->dp_meta_objset,
	    ot ? ot : DMU_OT_SCRUB_QUEUE, DMU_OT_NONE, 0, tx);
	bzero(&dp->dp_scrub_bookmark, sizeof (zbookmark_t));
	bzero(&dp->dp_scrub_ddt_bookmark, sizeof (ddt_bookmark_t));
	dp->dp_scrub_restart = B_FALSE;
	dp->dp_spa->spa_scrub_errors = 0;

	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1,
	    &dp->dp_scrub_func, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1,
	    &dp->dp_scrub_queue_obj, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_min_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1,
	    &dp->dp_scrub_max_txg, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t),
	    sizeof (dp->dp_scrub_bookmark) / sizeof (uint64_t),
	    &dp->dp_scrub_bookmark, tx));
	VERIFY(0 == zap_update(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_BOOKMARK, sizeof (uint64_t),
	    sizeof (dp->dp_scrub_ddt_bookmark) / sizeof (uint64_t),
	    &dp->dp_scrub_ddt_bookmark, tx));
	VERIFY(0 == zap_update(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_DDT_CLASS_MAX, sizeof (uint64_t), 1,
	    &dp->dp_scrub_ddt_class_max, tx));
	VERIFY(0 == zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
	    DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1,
	    &dp->dp_spa->spa_scrub_errors, tx));

	spa_history_internal_log(LOG_POOL_SCRUB, dp->dp_spa, tx, cr,
	    "func=%u mintxg=%llu maxtxg=%llu",
	    *funcp, dp->dp_scrub_min_txg, dp->dp_scrub_max_txg);
}
Ejemplo n.º 24
0
static int
feature_do_action(objset_t *os, uint64_t read_obj, uint64_t write_obj,
                  uint64_t desc_obj, zfeature_info_t *feature, feature_action_t action,
                  dmu_tx_t *tx)
{
    int error;
    uint64_t refcount;
    uint64_t zapobj = feature->fi_can_readonly ? write_obj : read_obj;

    ASSERT(0 != zapobj);
    ASSERT(zfeature_is_valid_guid(feature->fi_guid));

    error = zap_lookup(os, zapobj, feature->fi_guid,
                       sizeof (uint64_t), 1, &refcount);

    /*
     * If we can't ascertain the status of the specified feature, an I/O
     * error occurred.
     */
    if (error != 0 && error != ENOENT)
        return (error);

    switch (action) {
    case FEATURE_ACTION_ENABLE:
        /*
         * If the feature is already enabled, ignore the request.
         */
        if (error == 0)
            return (0);
        refcount = 0;
        break;
    case FEATURE_ACTION_INCR:
        if (error == ENOENT)
            return (ENOTSUP);
        if (refcount == UINT64_MAX)
            return (EOVERFLOW);
        refcount++;
        break;
    case FEATURE_ACTION_DECR:
        if (error == ENOENT)
            return (ENOTSUP);
        if (refcount == 0)
            return (EOVERFLOW);
        refcount--;
        break;
    default:
        ASSERT(0);
        break;
    }

    if (action == FEATURE_ACTION_ENABLE) {
        int i;

        for (i = 0; feature->fi_depends[i] != NULL; i++) {
            zfeature_info_t *dep = feature->fi_depends[i];

            error = feature_do_action(os, read_obj, write_obj,
                                      desc_obj, dep, FEATURE_ACTION_ENABLE, tx);
            if (error != 0)
                return (error);
        }
    }

    error = zap_update(os, zapobj, feature->fi_guid,
                       sizeof (uint64_t), 1, &refcount, tx);
    if (error != 0)
        return (error);

    if (action == FEATURE_ACTION_ENABLE) {
        error = zap_update(os, desc_obj,
                           feature->fi_guid, 1, strlen(feature->fi_desc) + 1,
                           feature->fi_desc, tx);
        if (error != 0)
            return (error);
    }

    if (action == FEATURE_ACTION_INCR && refcount == 1 && feature->fi_mos) {
        spa_activate_mos_feature(dmu_objset_spa(os), feature->fi_guid);
    }

    if (action == FEATURE_ACTION_DECR && refcount == 0) {
        spa_deactivate_mos_feature(dmu_objset_spa(os),
                                   feature->fi_guid);
    }

    return (0);
}