/* ARGSUSED */ int dmu_objset_prefetch(char *name, void *arg) { dsl_dataset_t *ds; if (dsl_dataset_hold(name, FTAG, &ds)) return (0); if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) { mutex_enter(&ds->ds_opening_lock); if (!dsl_dataset_get_user_ptr(ds)) { uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; zbookmark_t zb; zb.zb_objset = ds->ds_object; zb.zb_object = 0; zb.zb_level = -1; zb.zb_blkid = 0; (void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds), &ds->ds_phys->ds_bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, &zb); } mutex_exit(&ds->ds_opening_lock); } dsl_dataset_rele(ds, FTAG); return (0); }
static int dmu_objset_open_ds_os(dsl_dataset_t *ds, objset_t *os, dmu_objset_type_t type) { objset_impl_t *osi; mutex_enter(&ds->ds_opening_lock); osi = dsl_dataset_get_user_ptr(ds); if (osi == NULL) { int err; err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, &ds->ds_phys->ds_bp, &osi); if (err) { mutex_exit(&ds->ds_opening_lock); return (err); } } mutex_exit(&ds->ds_opening_lock); os->os = osi; os->os_mode = DS_MODE_NOHOLD; if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) return (EINVAL); return (0); }
static void dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) { dsl_dir_t *dd = arg1; struct oscarg *oa = arg2; dsl_dataset_t *ds; blkptr_t *bp; uint64_t dsobj; ASSERT(dmu_tx_is_syncing(tx)); dsobj = dsl_dataset_create_sync(dd, oa->lastname, oa->clone_parent, oa->flags, cr, tx); VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj, FTAG, &ds)); bp = dsl_dataset_get_blkptr(ds); if (BP_IS_HOLE(bp)) { objset_impl_t *osi; /* This is an empty dmu_objset; not a clone. */ osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds), ds, bp, oa->type, tx); if (oa->userfunc) oa->userfunc(&osi->os, oa->userarg, cr, tx); } spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa, tx, cr, "dataset = %llu", dsobj); dsl_dataset_rele(ds, FTAG); }
int dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) { int err = 0; /* * We shouldn't be doing anything with dsl_dataset_t's unless the * pool_config lock is held, or the dataset is long-held. */ ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || dsl_dataset_long_held(ds)); mutex_enter(&ds->ds_opening_lock); if (ds->ds_objset == NULL) { objset_t *os; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), &os); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (err == 0) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } } *osp = ds->ds_objset; mutex_exit(&ds->ds_opening_lock); return (err); }
void dsl_dataset_user_hold_sync_one(dsl_dataset_t *ds, const char *htag, minor_t minor, uint64_t now, dmu_tx_t *tx) { nvlist_t *tmpholds; if (minor != 0) tmpholds = fnvlist_alloc(); else tmpholds = NULL; dsl_dataset_user_hold_sync_one_impl(tmpholds, ds, htag, minor, now, tx); dsl_onexit_hold_cleanup(dsl_dataset_get_spa(ds), tmpholds, minor); }
static void dsl_keychain_clone_phys(dsl_dataset_t *src, dsl_dir_t *dd, dmu_tx_t *tx, zcrypt_key_t *dwkey) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t keychain = dsl_dir_phys(dd)->dd_keychain_obj; caddr_t wrappedkey = NULL; size_t wkeylen = 0; zcrypt_keystore_node_t *kn; zcrypt_keychain_node_t *n; uint64_t newest_txg = dsl_dataset_phys(src)->ds_creation_txg; kn = zcrypt_keystore_find_node(dsl_dataset_get_spa(src), src->ds_object, B_FALSE); if (kn == NULL) { kn = zcrypt_keystore_find_node(dsl_dataset_get_spa(src), dsl_dir_phys(src->ds_dir)->dd_head_dataset_obj, B_FALSE); } ASSERT(kn != NULL); ASSERT(dwkey != NULL); /* * Walk the in memory AVL tree representation of the keychain * creating new keychain entries using our wrappingkey, stopping * when we reach keychain entries created after the snapshot we * are cloning from. */ mutex_enter(&kn->skn_lock); for (n = avl_first(&kn->skn_keychain); n != NULL && n->dkn_txg <= newest_txg; n = AVL_NEXT(&kn->skn_keychain, n)) { VERIFY(zcrypt_wrap_key(dwkey, n->dkn_key, &wrappedkey, &wkeylen, zio_crypt_select_wrap(dwkey->zk_crypt)) == 0); VERIFY(zap_update_uint64(mos, keychain, &n->dkn_txg, 1, 1, wkeylen, wrappedkey, tx) == 0); kmem_free(wrappedkey, wkeylen); } mutex_exit(&kn->skn_lock); }
void dsl_dataset_user_hold_sync_one(dsl_dataset_t *ds, const char *htag, minor_t minor, uint64_t now, dmu_tx_t *tx) { nvlist_t *tmpholds; if (minor != 0) VERIFY0(nvlist_alloc(&tmpholds, NV_UNIQUE_NAME, KM_PUSHPAGE)); else tmpholds = NULL; dsl_dataset_user_hold_sync_one_impl(tmpholds, ds, htag, minor, now, tx); dsl_onexit_hold_cleanup(dsl_dataset_get_spa(ds), tmpholds, minor); }
int dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) { int err = 0; mutex_enter(&ds->ds_opening_lock); *osp = ds->ds_objset; if (*osp == NULL) { err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), osp); } mutex_exit(&ds->ds_opening_lock); return (err); }
void dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag, minor_t minor) { zfs_hold_cleanup_arg_t *ca = kmem_alloc(sizeof (*ca), KM_PUSHPAGE); spa_t *spa = dsl_dataset_get_spa(ds); (void) strlcpy(ca->zhca_spaname, spa_name(spa), sizeof (ca->zhca_spaname)); ca->zhca_spa_load_guid = spa_load_guid(spa); ca->zhca_dsobj = ds->ds_object; (void) strlcpy(ca->zhca_htag, htag, sizeof (ca->zhca_htag)); VERIFY0(zfs_onexit_add_cb(minor, dsl_dataset_user_release_onexit, ca, NULL)); }
int dmu_objset_from_ds_NEW(dsl_dataset_t *ds, objset_t **osp) { mutex_enter(&ds->ds_opening_lock); if (ds->ds_objset == NULL) { int err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), osp); if (err) { mutex_exit(&ds->ds_opening_lock); return (err); } ASSERT(ds->ds_objset == *osp); } *osp = ds->ds_objset; mutex_exit(&ds->ds_opening_lock); return (0); }
void spa_history_log_internal_ds(dsl_dataset_t *ds, const char *operation, dmu_tx_t *tx, const char *fmt, ...) { va_list adx; char namebuf[MAXNAMELEN]; nvlist_t *nvl = fnvlist_alloc(); ASSERT(tx != NULL); dsl_dataset_name(ds, namebuf); fnvlist_add_string(nvl, ZPOOL_HIST_DSNAME, namebuf); fnvlist_add_uint64(nvl, ZPOOL_HIST_DSID, ds->ds_object); va_start(adx, fmt); log_internal(nvl, operation, dsl_dataset_get_spa(ds), tx, fmt, adx); va_end(adx); }
static void dsl_crypto_key_new_sync(void *arg1, dmu_tx_t *tx) { struct knarg *kn = arg1; dsl_dataset_t *ds = kn->kn_ds; /* * Generate a new key and add it to the keychain to be valid from * this txg onwards. */ dsl_keychain_set_key(ds->ds_dir, tx, kn->kn_wkeybuf, kn->kn_wkeylen, gethrestime_sec()); zcrypt_keychain_insert(&kn->kn_skn->skn_keychain, tx->tx_txg, kn->kn_txgkey); spa_history_log_internal( dsl_dataset_get_spa(ds), "key create", tx, "rekey succeeded dataset = %llu", ds->ds_object); }
zfs_crypt_key_status_t dsl_dataset_keystatus(dsl_dataset_t *ds, boolean_t dp_config_rwlock_held) { /* * Sneaky way of determining if this is an encrypted dataset * by looking for a keychain obj so we can avoid calling * dsl_prop_get_ds and all the locking issues that can entail * given when we can be called. */ if (ds == NULL) return (ZFS_CRYPT_KEY_UNAVAILABLE); if (ds->ds_dir != NULL && dsl_dir_phys(ds->ds_dir) != NULL && dsl_dir_phys(ds->ds_dir)->dd_keychain_obj == 0) { return (ZFS_CRYPT_KEY_NONE); } if (zcrypt_keystore_find_node(dsl_dataset_get_spa(ds), ds->ds_object, dp_config_rwlock_held)) { return (ZFS_CRYPT_KEY_AVAILABLE); } return (ZFS_CRYPT_KEY_UNAVAILABLE); }
int dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) { int err = 0; mutex_enter(&ds->ds_opening_lock); if (ds->ds_objset == NULL) { objset_t *os; err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), &os); if (err == 0) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } } *osp = ds->ds_objset; mutex_exit(&ds->ds_opening_lock); return (err); }
/* called from zpl */ int dmu_objset_open(const char *name, dmu_objset_type_t type, int mode, objset_t **osp) { dsl_dataset_t *ds; int err; objset_t *os; objset_impl_t *osi; os = kmem_alloc(sizeof (objset_t), KM_SLEEP); err = dsl_dataset_open(name, mode, os, &ds); if (err) { kmem_free(os, sizeof (objset_t)); return (err); } osi = dsl_dataset_get_user_ptr(ds); if (osi == NULL) { err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, &ds->ds_phys->ds_bp, &osi); if (err) { dsl_dataset_close(ds, mode, os); kmem_free(os, sizeof (objset_t)); return (err); } } os->os = osi; os->os_mode = mode; if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) { dmu_objset_close(os); return (EINVAL); } *osp = os; return (0); }
/* * dsl_crypto_key_change * * The old key must already be present in memory since the user interface * doesn't provide away to prompt or retrieve the old key. */ static void dsl_crypto_key_change_sync(void *arg1, dmu_tx_t *tx) { struct wkey_change_arg *ca = arg1; dsl_dataset_t *ds = ca->ca_ds; size_t wkeylen; char *wkeybuf = NULL; zcrypt_key_t *txgkey; zap_cursor_t zc; zap_attribute_t za; objset_t *mos; uint64_t keychain_zapobj; spa_t *spa; zcrypt_keystore_node_t *zkn; ASSERT(RRW_WRITE_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock)); mos = ds->ds_dir->dd_pool->dp_meta_objset; keychain_zapobj = dsl_dir_phys(ds->ds_dir)->dd_keychain_obj; /* * To allow for the case were the keychains of child datasets * are not loaded (ie an explicit 'zfs key -u tank/fs/sub' had * been done some time before doing 'zfs key -c tank/fs') we itterate * over the zap objects on disk rather than copying from the * in memory keystore node. */ for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { wkeylen = za.za_num_integers; wkeybuf = kmem_alloc(wkeylen, KM_SLEEP); VERIFY(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf) == 0); VERIFY(zcrypt_unwrap_key(ca->ca_old_key, ds->ds_objset->os_crypt, wkeybuf, wkeylen, &txgkey) == 0); kmem_free(wkeybuf, wkeylen); VERIFY(zcrypt_wrap_key(ca->ca_new_key, txgkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(ds->ds_objset->os_crypt)) == 0); zcrypt_key_free(txgkey); VERIFY(zap_update_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf, tx) == 0); kmem_free(wkeybuf, wkeylen); } zap_cursor_fini(&zc); spa = dsl_dataset_get_spa(ds); /* * If the wrapping key is loaded switch the in memory copy now. */ zkn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE); if (zkn != NULL) { zcrypt_key_free(zkn->skn_wrapkey); zkn->skn_wrapkey = zcrypt_key_copy(ca->ca_new_key); } spa_history_log_internal(spa, "key change", tx, "succeeded dataset = %llu", ds->ds_object); }
int dsl_crypto_key_new(const char *dsname) { dsl_dataset_t *ds; objset_t *os; zcrypt_keystore_node_t *skn; spa_t *spa; struct knarg arg; int error; dsl_pool_t *dp; void *cookie; error = dsl_pool_hold(dsname, FTAG, &dp); if (error != 0) return (error); if ((error = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (dsl_dataset_is_snapshot(ds)) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (ENOTSUP); } if ((error = dmu_objset_from_ds(ds, &os)) != 0) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } if (os->os_crypt == ZIO_CRYPT_OFF) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (ENOTSUP); } ASSERT(os->os_crypt != ZIO_CRYPT_INHERIT); /* * Need the keychain and wrapping key to already be available. */ spa = dsl_dataset_get_spa(ds); skn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE); if (skn == NULL) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (ENOENT); } ASSERT(ds != NULL); ASSERT(ds->ds_objset != NULL); //zil_suspend_dmu_sync(dmu_objset_zil(os)); zil_suspend(dsname, &cookie); arg.kn_skn = skn; arg.kn_txgkey = zcrypt_key_gen(os->os_crypt); arg.kn_ds = ds; zcrypt_key_hold(skn->skn_wrapkey, FTAG); VERIFY(zcrypt_wrap_key(skn->skn_wrapkey, arg.kn_txgkey, &arg.kn_wkeybuf, &arg.kn_wkeylen, zio_crypt_select_wrap(os->os_crypt)) == 0); error = dsl_sync_task(spa->spa_name, dsl_crypto_key_new_check, dsl_crypto_key_new_sync, &arg, 1, ZFS_SPACE_CHECK_NONE); kmem_free(arg.kn_wkeybuf, arg.kn_wkeylen); zcrypt_key_release(skn->skn_wrapkey, FTAG); //zil_resume_dmu_sync(dmu_objset_zil(os)); zil_resume(os); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); if (error) zcrypt_key_free(arg.kn_txgkey); return (error); }
int dsl_crypto_key_inherit(const char *dsname) { char keysource[MAXNAMELEN]; char setpoint[MAXNAMELEN]; dsl_dataset_t *ids; int error; zcrypt_key_t *wrappingkey; zfs_crypt_key_status_t keystatus; spa_t *spa; dsl_pool_t *dp; /* * Try inheriting the wrapping key from our parent */ error = dsl_pool_hold(dsname, FTAG, &dp); if (error != 0) return (error); error = dsl_dataset_keystatus_byname(dp, dsname, &keystatus); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (keystatus == ZFS_CRYPT_KEY_NONE) { dsl_pool_rele(dp, FTAG); return (0); } if (keystatus == ZFS_CRYPT_KEY_AVAILABLE) { dsl_pool_rele(dp, FTAG); return (EEXIST); } error = dsl_prop_get(dsname, zfs_prop_to_name(ZFS_PROP_KEYSOURCE), 1, sizeof (keysource), &keysource, setpoint); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } if (strcmp(setpoint, dsname) == 0) { dsl_pool_rele(dp, FTAG); return (ENOENT); } error = dsl_dataset_hold(dp, setpoint, FTAG, &ids); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } spa = dsl_dataset_get_spa(ids); wrappingkey = zcrypt_key_copy(zcrypt_keystore_find_wrappingkey(spa, ids->ds_object)); dsl_dataset_rele(ids, FTAG); dsl_pool_rele(dp, FTAG); if (wrappingkey == NULL) return (ENOENT); error = dsl_crypto_key_load(dsname, wrappingkey); return (error); }
int dsl_crypto_key_load(const char *dsname, zcrypt_key_t *wrappingkey) { dsl_dataset_t *ds; uint64_t crypt; int error; dsl_pool_t *dp; error = dsl_pool_hold(dsname, FTAG, &dp); if (error != 0) return (error); if ((error = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) { dsl_pool_rele(dp, FTAG); return (error); } /* * This is key load not key change so if ds->ds_key is already * set we fail. */ if (zcrypt_keystore_find_node(dsl_dataset_get_spa(ds), ds->ds_object, B_FALSE) != NULL) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (EEXIST); } /* * Find out what size of key we expect. * * For now the wrapping key size (and type) matches the size * of the dataset key, this may not always be the case * (particularly if we ever support wrapping dataset keys * with asymmetric keys (eg RSA)). * * When alternate wrapping keys are added it maybe done using * a index property. */ rrw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER, FTAG); error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), 8, 1, &crypt, NULL/*, DSL_PROP_GET_EFFECTIVE*/); rrw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock, FTAG); if (error != 0) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } if (crypt == ZIO_CRYPT_OFF) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (ENOTSUP); } ASSERT(crypt != ZIO_CRYPT_INHERIT); error = dsl_keychain_load(ds, crypt, wrappingkey); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); }
/* * dsl_crypto_key_unload * * Remove the key from the in memory keystore. * * First we have to remove the minor node for a ZVOL or unmount * the filesystem. This is so that we flush all pending IO for it to disk * so we won't need to encrypt anything with this key. Anything in flight * should already have a lock on the keys it needs. * We can't assume that userland has already successfully unmounted the * dataset though in many cases it will have. * * If the key can't be removed return the failure back to our caller. */ int dsl_crypto_key_unload(const char *dsname) { dsl_dataset_t *ds; objset_t *os; int error; spa_t *spa; dsl_pool_t *dp; #ifdef _KERNEL dmu_objset_type_t os_type; //vfs_t *vfsp; struct vfsmount *vfsp; #endif /* _KERNEL */ error = dsl_pool_hold(dsname, FTAG, &dp); if (error != 0) return (error); /* XXX - should we use own_exclusive() here? */ if ((error = dsl_dataset_hold(dp, dsname, FTAG, &ds)) != 0) { dsl_pool_rele(dp, FTAG); return (error); } if ((error = dmu_objset_from_ds(ds, &os)) != 0) { dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); } #ifdef _KERNEL /* * Make sure that the device node has gone for ZVOLs * and that filesystems are umounted. */ #if 0 // FIXME os_type = dmu_objset_type(os); if (os_type == DMU_OST_ZVOL) { error = zvol_remove_minor(dsname); if (error == ENXIO) error = 0; } else if (os_type == DMU_OST_ZFS) { vfsp = zfs_get_vfs(dsname); if (vfsp != NULL) { error = vn_vfswlock(vfsp->vfs_vnodecovered); VFS_RELE(vfsp); if (error == 0) error = dounmount(vfsp, 0, CRED()); } } if (error != 0) { dsl_dataset_rele(ds, FTAG); return (error); } #endif #endif /* _KERNEL */ /* * Make sure all dbufs are synced. * * It is essential for encrypted datasets to ensure that * there is no further pending IO before removing the key. */ if (dmu_objset_is_dirty(os, 0)) // FIXME, 0? txg_wait_synced(dmu_objset_pool(os), 0); dmu_objset_evict_dbufs(os); spa = dsl_dataset_get_spa(ds); error = zcrypt_keystore_remove(spa, ds->ds_object); dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (error); }