static void dsl_deleg_unset_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dir_t *dd = arg1; nvlist_t *nvp = arg2; objset_t *mos = dd->dd_pool->dp_meta_objset; nvpair_t *whopair = NULL; uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; if (zapobj == 0) return; while (whopair = nvlist_next_nvpair(nvp, whopair)) { const char *whokey = nvpair_name(whopair); nvlist_t *perms; nvpair_t *permpair = NULL; uint64_t jumpobj; if (nvpair_value_nvlist(whopair, &perms) != 0) { if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == 0) { (void) zap_remove(mos, zapobj, whokey, tx); VERIFY(0 == zap_destroy(mos, jumpobj, tx)); } spa_history_log_internal(LOG_DS_PERM_WHO_REMOVE, dd->dd_pool->dp_spa, tx, "%s dataset = %llu", whokey, dd->dd_phys->dd_head_dataset_obj); continue; } if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) continue; while (permpair = nvlist_next_nvpair(perms, permpair)) { const char *perm = nvpair_name(permpair); uint64_t n = 0; (void) zap_remove(mos, jumpobj, perm, tx); if (zap_count(mos, jumpobj, &n) == 0 && n == 0) { (void) zap_remove(mos, zapobj, whokey, tx); VERIFY(0 == zap_destroy(mos, jumpobj, tx)); } spa_history_log_internal(LOG_DS_PERM_REMOVE, dd->dd_pool->dp_spa, tx, "%s %s dataset = %llu", whokey, perm, dd->dd_phys->dd_head_dataset_obj); } } }
static void vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state) { ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock)); spa_t *spa = vd->vdev_spa; if (new_state == vd->vdev_initialize_state) return; /* * Copy the vd's guid, this will be freed by the sync task. */ uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); *guid = vd->vdev_guid; /* * If we're suspending, then preserving the original start time. */ if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) { vd->vdev_initialize_action_time = gethrestime_sec(); } vd->vdev_initialize_state = new_state; dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync, guid, 2, ZFS_SPACE_CHECK_RESERVED, tx); switch (new_state) { case VDEV_INITIALIZE_ACTIVE: spa_history_log_internal(spa, "initialize", tx, "vdev=%s activated", vd->vdev_path); break; case VDEV_INITIALIZE_SUSPENDED: spa_history_log_internal(spa, "initialize", tx, "vdev=%s suspended", vd->vdev_path); break; case VDEV_INITIALIZE_CANCELED: spa_history_log_internal(spa, "initialize", tx, "vdev=%s canceled", vd->vdev_path); break; case VDEV_INITIALIZE_COMPLETE: spa_history_log_internal(spa, "initialize", tx, "vdev=%s complete", vd->vdev_path); break; default: panic("invalid state %llu", (unsigned long long)new_state); } dmu_tx_commit(tx); }
void spa_history_log_version(spa_t *spa, const char *operation) { spa_history_log_internal(spa, operation, NULL, "pool version %llu; software version %llu/%d; uts %s %s %s %s", (u_longlong_t)spa_version(spa), SPA_VERSION, ZPL_VERSION, utsname.nodename, utsname.release, utsname.version, utsname.machine); }
static void feature_decr_sync(void *arg, dmu_tx_t *tx) { spa_t *spa = dmu_tx_pool(tx)->dp_spa; zfeature_info_t *feature = arg; spa_feature_decr(spa, feature, tx); spa_history_log_internal(spa, "zhack feature decr", tx, "name=%s", feature->fi_guid); }
void spa_history_log_version(spa_t *spa, const char *operation, dmu_tx_t *tx) { utsname_t *u = utsname(); spa_history_log_internal(spa, operation, tx, "pool version %llu; software version %llu/%llu; uts %s %s %s %s", (u_longlong_t)spa_version(spa), SPA_VERSION, ZPL_VERSION, u->nodename, u->release, u->version, u->machine); }
static void feature_enable_sync(void *arg, dmu_tx_t *tx) { spa_t *spa = dmu_tx_pool(tx)->dp_spa; zfeature_info_t *feature = arg; spa_feature_enable(spa, feature, tx); spa_history_log_internal(spa, "zhack enable feature", tx, "name=%s can_readonly=%u", feature->fi_guid, feature->fi_can_readonly); }
static void zhack_feature_enable_sync(void *arg, dmu_tx_t *tx) { spa_t *spa = dmu_tx_pool(tx)->dp_spa; zfeature_info_t *feature = arg; feature_enable_sync(spa, feature, tx); spa_history_log_internal(spa, "zhack enable feature", tx, "name=%s flags=%u", feature->fi_guid, feature->fi_flags); }
static void feature_decr_sync(void *arg, dmu_tx_t *tx) { spa_t *spa = dmu_tx_pool(tx)->dp_spa; zfeature_info_t *feature = arg; uint64_t refcount; VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount)); feature_sync(spa, feature, refcount - 1, tx); spa_history_log_internal(spa, "zhack feature decr", tx, "name=%s", feature->fi_guid); }
void spa_history_log_version(spa_t *spa, const char *operation) { #ifdef _KERNEL uint64_t current_vers = spa_version(spa); spa_history_log_internal(spa, operation, NULL, "pool version %llu; software version %llu/%d; uts %s %s %s %s", (u_longlong_t)current_vers, SPA_VERSION, ZPL_VERSION, utsname.nodename, utsname.release, utsname.version, utsname.machine); cmn_err(CE_CONT, "!%s version %llu pool %s using %llu", operation, (u_longlong_t)current_vers, spa_name(spa), SPA_VERSION); #endif }
static void dsl_crypto_key_new_sync(void *arg1, dmu_tx_t *tx) { struct knarg *kn = arg1; dsl_dataset_t *ds = kn->kn_ds; /* * Generate a new key and add it to the keychain to be valid from * this txg onwards. */ dsl_keychain_set_key(ds->ds_dir, tx, kn->kn_wkeybuf, kn->kn_wkeylen, gethrestime_sec()); zcrypt_keychain_insert(&kn->kn_skn->skn_keychain, tx->tx_txg, kn->kn_txgkey); spa_history_log_internal( dsl_dataset_get_spa(ds), "key create", tx, "rekey succeeded dataset = %llu", ds->ds_object); }
void spa_history_log_version(spa_t *spa, history_internal_events_t event) { #ifdef _KERNEL uint64_t current_vers = spa_version(spa); if (current_vers >= SPA_VERSION_ZPOOL_HISTORY) { spa_history_log_internal(event, spa, NULL, "pool spa %llu; zfs spa %llu; zpl %d; uts %s %s %s %s", (u_longlong_t)current_vers, SPA_VERSION, ZPL_VERSION, utsname.nodename, utsname.release, utsname.version, utsname.machine); } cmn_err(CE_CONT, "!%s version %llu pool %s using %llu", event == LOG_POOL_IMPORT ? "imported" : event == LOG_POOL_CREATE ? "created" : "accessed", (u_longlong_t)current_vers, spa_name(spa), SPA_VERSION); #endif }
static void dsl_deleg_set_sync(void *arg1, void *arg2, dmu_tx_t *tx) { dsl_dir_t *dd = arg1; nvlist_t *nvp = arg2; objset_t *mos = dd->dd_pool->dp_meta_objset; nvpair_t *whopair = NULL; uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; if (zapobj == 0) { dmu_buf_will_dirty(dd->dd_dbuf, tx); zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); } while (whopair = nvlist_next_nvpair(nvp, whopair)) { const char *whokey = nvpair_name(whopair); nvlist_t *perms; nvpair_t *permpair = NULL; uint64_t jumpobj; VERIFY(nvpair_value_nvlist(whopair, &perms) == 0); if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0) { jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); VERIFY(zap_update(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0); } while (permpair = nvlist_next_nvpair(perms, permpair)) { const char *perm = nvpair_name(permpair); uint64_t n = 0; VERIFY(zap_update(mos, jumpobj, perm, 8, 1, &n, tx) == 0); spa_history_log_internal(LOG_DS_PERM_UPDATE, dd->dd_pool->dp_spa, tx, "%s %s dataset = %llu", whokey, perm, dd->dd_phys->dd_head_dataset_obj); } } }
/* * dsl_crypto_key_change * * The old key must already be present in memory since the user interface * doesn't provide away to prompt or retrieve the old key. */ static void dsl_crypto_key_change_sync(void *arg1, dmu_tx_t *tx) { struct wkey_change_arg *ca = arg1; dsl_dataset_t *ds = ca->ca_ds; size_t wkeylen; char *wkeybuf = NULL; zcrypt_key_t *txgkey; zap_cursor_t zc; zap_attribute_t za; objset_t *mos; uint64_t keychain_zapobj; spa_t *spa; zcrypt_keystore_node_t *zkn; ASSERT(RRW_WRITE_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock)); mos = ds->ds_dir->dd_pool->dp_meta_objset; keychain_zapobj = dsl_dir_phys(ds->ds_dir)->dd_keychain_obj; /* * To allow for the case were the keychains of child datasets * are not loaded (ie an explicit 'zfs key -u tank/fs/sub' had * been done some time before doing 'zfs key -c tank/fs') we itterate * over the zap objects on disk rather than copying from the * in memory keystore node. */ for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { wkeylen = za.za_num_integers; wkeybuf = kmem_alloc(wkeylen, KM_SLEEP); VERIFY(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf) == 0); VERIFY(zcrypt_unwrap_key(ca->ca_old_key, ds->ds_objset->os_crypt, wkeybuf, wkeylen, &txgkey) == 0); kmem_free(wkeybuf, wkeylen); VERIFY(zcrypt_wrap_key(ca->ca_new_key, txgkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(ds->ds_objset->os_crypt)) == 0); zcrypt_key_free(txgkey); VERIFY(zap_update_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf, tx) == 0); kmem_free(wkeybuf, wkeylen); } zap_cursor_fini(&zc); spa = dsl_dataset_get_spa(ds); /* * If the wrapping key is loaded switch the in memory copy now. */ zkn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE); if (zkn != NULL) { zcrypt_key_free(zkn->skn_wrapkey); zkn->skn_wrapkey = zcrypt_key_copy(ca->ca_new_key); } spa_history_log_internal(spa, "key change", tx, "succeeded dataset = %llu", ds->ds_object); }
/* * dsl_crypto_key_clone * * Our caller (dmu_objset_create_sync) must have a lock on the dataset. */ int dsl_crypto_key_clone(dsl_dir_t *dd, dsl_dataset_phys_t *dsphys, uint64_t dsobj, dsl_dataset_t *clone_origin, dsl_crypto_ctx_t *ctx, dmu_tx_t *tx) { zcrypt_key_t *txgkey; zcrypt_key_t *wrappingkey = NULL; dsl_dataset_t *origin; uint64_t crypt; spa_t *spa = dd->dd_pool->dp_spa; int error = 0; ASSERT(ctx != NULL); if (BP_IS_HOLE(&dsphys->ds_bp)) { origin = ctx->dcc_origin_ds; } else { origin = clone_origin; } ASSERT(origin != NULL); /* * Need to look at the value of crypt on the origin snapshot * since it is sticky for encryption. */ VERIFY(dsl_prop_get_ds(origin, zfs_prop_to_name(ZFS_PROP_ENCRYPTION), 8, 1, &crypt,/* DSL_PROP_GET_EFFECTIVE,*/ NULL) == 0); if (crypt == ZIO_CRYPT_OFF) { return (0); } wrappingkey = ctx->dcc_wrap_key; dsl_keychain_create_obj(dd, tx); dsl_keychain_clone_phys(origin, dd, tx, wrappingkey); if (ctx->dcc_salt != 0) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t props_zapobj = dsl_dir_phys(dd)->dd_props_zapobj; error = zap_update(mos, props_zapobj, zfs_prop_to_name(ZFS_PROP_SALT), 8, 1, (void *)&ctx->dcc_salt, tx); } if (ctx->dcc_clone_newkey) { caddr_t wkeybuf; size_t wkeylen; /* * Generate a new key and add it to the keychain * to be valid from this txg onwards. */ zcrypt_key_hold(wrappingkey, FTAG); txgkey = zcrypt_key_gen(crypt); VERIFY(zcrypt_wrap_key(wrappingkey, txgkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(crypt)) == 0); zcrypt_key_release(wrappingkey, FTAG); dsl_keychain_set_key(dd, tx, wkeybuf, wkeylen, dsphys->ds_creation_time); kmem_free(wkeybuf, wkeylen); zcrypt_key_free(txgkey); spa_history_log_internal(spa, "key create", tx, "rekey succeeded dataset = %llu from dataset = %llu", dsobj, clone_origin->ds_object); } else { spa_history_log_internal(spa, "key create", tx, "succeeded dataset = %llu from dataset = %llu", dsobj, clone_origin->ds_object); } if (wrappingkey != NULL) { error = dsl_keychain_load_dd(dd, dsobj, crypt, wrappingkey); } return (error); }
/* * dsl_crypto_key_gen - Generate dataset key * * Generate a new key for this dataset based on its encryption property type. * Store the key as a usable zcrypt_key_t in the in memory keystore and * put the wrapped version of it in the on disk keychain. * * returns 0 on success */ int dsl_crypto_key_create(dsl_dir_t *dd, dsl_dataset_phys_t *dsphys, uint64_t dsobj, dsl_crypto_ctx_t *ctx, dmu_tx_t *tx) { int error = -1; #ifdef DEBUG zcrypt_key_t *debugkey; #endif /* DEBUG */ zcrypt_key_t *wrappingkey, *dslkey; caddr_t wkeybuf = NULL; size_t wkeylen = 0; uint64_t crypt; spa_t *spa = dd->dd_pool->dp_spa; zcrypt_keystore_node_t *skn; if (ctx == NULL) { return (0); } crypt = ctx->dcc_crypt; if (crypt == ZIO_CRYPT_OFF || crypt == ZIO_CRYPT_INHERIT) { return (0); } crypt = zio_crypt_select(crypt, ZIO_CRYPT_ON_VALUE); wrappingkey = ctx->dcc_wrap_key; skn = zcrypt_keystore_insert(spa, dsobj, wrappingkey); if (skn == NULL) { error = ENOKEY; goto out; } dslkey = zcrypt_key_gen(crypt); zcrypt_key_hold(wrappingkey, FTAG); error = zcrypt_wrap_key(wrappingkey, dslkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(crypt)); if (error != 0) { zcrypt_key_free(dslkey); spa_history_log_internal(spa, "key create", tx, "failed dataset = %llu unable to wrap key", dsobj, error); goto out; } dsl_keychain_create_obj(dd, tx); dsl_keychain_set_key(dd, tx, wkeybuf, wkeylen, dsphys->ds_creation_time); zcrypt_keychain_insert(&skn->skn_keychain, dsphys->ds_creation_txg, dslkey); if (ctx->dcc_salt != 0) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t props_zapobj = dsl_dir_phys(dd)->dd_props_zapobj; error = zap_update(mos, props_zapobj, zfs_prop_to_name(ZFS_PROP_SALT), 8, 1, (void *)&ctx->dcc_salt, tx); } if (error == 0) spa_history_log_internal(spa, "key create", tx, "succeeded dataset = %llu", dsobj); #ifdef DEBUG ASSERT3U(zcrypt_unwrap_key(wrappingkey, crypt, wkeybuf, wkeylen, &debugkey), ==, 0); zcrypt_key_compare(dslkey, debugkey); zcrypt_key_free(debugkey); #endif /* DEBUG */ out: zcrypt_key_release(wrappingkey, FTAG); if (wkeylen > 0) { kmem_free(wkeybuf, wkeylen); } return (error); }
/* ARGSUSED */ static void dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) { static const char *old_names[] = { "scrub_bookmark", "scrub_ddt_bookmark", "scrub_ddt_class_max", "scrub_queue", "scrub_min_txg", "scrub_max_txg", "scrub_func", "scrub_errors", NULL }; dsl_pool_t *dp = scn->scn_dp; spa_t *spa = dp->dp_spa; int i; /* Remove any remnants of an old-style scrub. */ for (i = 0; old_names[i]; i++) { (void) zap_remove(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); } if (scn->scn_phys.scn_queue_obj != 0) { VERIFY(0 == dmu_object_free(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, tx)); scn->scn_phys.scn_queue_obj = 0; } /* * If we were "restarted" from a stopped state, don't bother * with anything else. */ if (scn->scn_phys.scn_state != DSS_SCANNING) return; if (complete) scn->scn_phys.scn_state = DSS_FINISHED; else scn->scn_phys.scn_state = DSS_CANCELED; spa_history_log_internal(spa, "scan done", tx, "complete=%u", complete); if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { mutex_enter(&spa->spa_scrub_lock); while (spa->spa_scrub_inflight > 0) { cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); } mutex_exit(&spa->spa_scrub_lock); spa->spa_scrub_started = B_FALSE; spa->spa_scrub_active = B_FALSE; /* * If the scrub/resilver completed, update all DTLs to * reflect this. Whether it succeeded or not, vacate * all temporary scrub DTLs. */ vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); if (complete) { spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ? ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); } spa_errlog_rotate(spa); /* * We may have finished replacing a device. * Let the async thread assess this and handle the detach. */ spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); } scn->scn_phys.scn_end_time = gethrestime_sec(); }
static void dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) { dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; pool_scan_func_t *funcp = arg; dmu_object_type_t ot = 0; dsl_pool_t *dp = scn->scn_dp; spa_t *spa = dp->dp_spa; ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); bzero(&scn->scn_phys, sizeof (scn->scn_phys)); scn->scn_phys.scn_func = *funcp; scn->scn_phys.scn_state = DSS_SCANNING; scn->scn_phys.scn_min_txg = 0; scn->scn_phys.scn_max_txg = tx->tx_txg; scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ scn->scn_phys.scn_start_time = gethrestime_sec(); scn->scn_phys.scn_errors = 0; scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; scn->scn_restart_txg = 0; spa_scan_stat_init(spa); if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; /* rewrite all disk labels */ vdev_config_dirty(spa->spa_root_vdev); if (vdev_resilver_needed(spa->spa_root_vdev, &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START); } else { spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START); } spa->spa_scrub_started = B_TRUE; /* * If this is an incremental scrub, limit the DDT scrub phase * to just the auto-ditto class (for correctness); the rest * of the scrub should go faster using top-down pruning. */ if (scn->scn_phys.scn_min_txg > TXG_INITIAL) scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; } /* back to the generic stuff */ if (dp->dp_blkstats == NULL) { dp->dp_blkstats = kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); } bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) ot = DMU_OT_ZAP_OTHER; scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); dsl_scan_sync_state(scn, tx); spa_history_log_internal(spa, "scan setup", tx, "func=%u mintxg=%llu maxtxg=%llu", *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); }