/* * Iterate over all bookmarks */ int zfs_iter_bookmarks(zfs_handle_t *zhp, zfs_iter_f func, void *data) { zfs_handle_t *nzhp; nvlist_t *props = NULL; nvlist_t *bmarks = NULL; int err; nvpair_t *pair; if ((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT | ZFS_TYPE_BOOKMARK)) != 0) return (0); /* Setup the requested properties nvlist. */ props = fnvlist_alloc(); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_GUID)); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_CREATETXG)); fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_CREATION)); /* Allocate an nvlist to hold the bookmarks. */ bmarks = fnvlist_alloc(); if ((err = lzc_get_bookmarks(zhp->zfs_name, props, &bmarks)) != 0) goto out; for (pair = nvlist_next_nvpair(bmarks, NULL); pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) { char name[ZFS_MAXNAMELEN]; char *bmark_name; nvlist_t *bmark_props; bmark_name = nvpair_name(pair); bmark_props = fnvpair_value_nvlist(pair); (void) snprintf(name, sizeof (name), "%s#%s", zhp->zfs_name, bmark_name); nzhp = make_bookmark_handle(zhp, name, bmark_props); if (nzhp == NULL) continue; if ((err = func(nzhp, data)) != 0) goto out; } out: fnvlist_free(props); fnvlist_free(bmarks); return (err); }
/* * Discard the checkpoint from the specified pool. * * If this function returns 0 the checkpoint was successfully discarded. * * This method may also return: * * ZFS_ERR_NO_CHECKPOINT * The pool does not have a checkpoint. * * ZFS_ERR_DISCARDING_CHECKPOINT * ZFS is already in the middle of discarding the checkpoint. */ int lzc_pool_checkpoint_discard(const char *pool) { int error; nvlist_t *result = NULL; nvlist_t *args = fnvlist_alloc(); error = lzc_ioctl(ZFS_IOC_POOL_DISCARD_CHECKPOINT, pool, args, &result); fnvlist_free(args); fnvlist_free(result); return (error); }
/* * Called at spa_load time to release a stale temporary user hold. * Also called by the onexit code. */ void dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, const char *htag) { dsl_dataset_user_release_tmp_arg_t ddurta; #ifdef _KERNEL dsl_dataset_t *ds; int error; /* Make sure it is not mounted. */ dsl_pool_config_enter(dp, FTAG); error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); if (error == 0) { char name[MAXNAMELEN]; dsl_dataset_name(ds, name); dsl_dataset_rele(ds, FTAG); dsl_pool_config_exit(dp, FTAG); zfs_unmount_snap(name); } else { dsl_pool_config_exit(dp, FTAG); } #endif ddurta.ddurta_dsobj = dsobj; ddurta.ddurta_holds = fnvlist_alloc(); fnvlist_add_boolean(ddurta.ddurta_holds, htag); (void) dsl_sync_task(spa_name(dp->dp_spa), dsl_dataset_user_release_tmp_check, dsl_dataset_user_release_tmp_sync, &ddurta, 1); fnvlist_free(ddurta.ddurta_holds); }
int lzc_snaprange_space(const char *firstsnap, const char *lastsnap, uint64_t *usedp) { nvlist_t *args; nvlist_t *result; int err; char fs[MAXNAMELEN]; char *atp; /* determine the fs name */ (void) strlcpy(fs, firstsnap, sizeof (fs)); atp = strchr(fs, '@'); if (atp == NULL) return (EINVAL); *atp = '\0'; args = fnvlist_alloc(); fnvlist_add_string(args, "firstsnap", firstsnap); err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result); nvlist_free(args); if (err == 0) *usedp = fnvlist_lookup_uint64(result, "used"); fnvlist_free(result); return (err); }
static void dsl_dataset_user_release_onexit(void *arg) { zfs_hold_cleanup_arg_t *ca = arg; spa_t *spa; int error; error = spa_open(ca->zhca_spaname, &spa, FTAG); if (error != 0) { zfs_dbgmsg("couldn't release holds on pool=%s " "because pool is no longer loaded", ca->zhca_spaname); return; } if (spa_load_guid(spa) != ca->zhca_spa_load_guid) { zfs_dbgmsg("couldn't release holds on pool=%s " "because pool is no longer loaded (guid doesn't match)", ca->zhca_spaname); spa_close(spa, FTAG); return; } (void) dsl_dataset_user_release_tmp(spa_get_dsl(spa), ca->zhca_holds); fnvlist_free(ca->zhca_holds); kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t)); spa_close(spa, FTAG); }
static int zcp_props_iter(lua_State *state) { char *source, *val; nvlist_t *nvprop; nvlist_t **props = lua_touserdata(state, lua_upvalueindex(1)); nvpair_t *pair = lua_touserdata(state, lua_upvalueindex(2)); do { pair = nvlist_next_nvpair(*props, pair); if (pair == NULL) { fnvlist_free(*props); *props = NULL; return (0); } } while (!zfs_prop_user(nvpair_name(pair))); lua_pushlightuserdata(state, pair); lua_replace(state, lua_upvalueindex(2)); nvprop = fnvpair_value_nvlist(pair); val = fnvlist_lookup_string(nvprop, ZPROP_VALUE); source = fnvlist_lookup_string(nvprop, ZPROP_SOURCE); (void) lua_pushstring(state, nvpair_name(pair)); (void) lua_pushstring(state, val); (void) lua_pushstring(state, source); return (3); }
/* * The nvlist will be consumed by this call. */ static void log_internal(nvlist_t *nvl, const char *operation, spa_t *spa, dmu_tx_t *tx, const char *fmt, va_list adx) { char *msg; /* * If this is part of creating a pool, not everything is * initialized yet, so don't bother logging the internal events. * Likewise if the pool is not writeable. */ if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) { fnvlist_free(nvl); return; } msg = kmem_vasprintf(fmt, adx); fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg); strfree(msg); fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation); fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg); if (dmu_tx_is_syncing(tx)) { spa_history_log_sync(nvl, tx); } else { dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync, nvl, 0, ZFS_SPACE_CHECK_NONE, tx); } /* spa_history_log_sync() will free nvl */ }
static int zcp_props_list_gc(lua_State *state) { nvlist_t **props = lua_touserdata(state, 1); if (*props != NULL) fnvlist_free(*props); return (0); }
static int zcp_synctask_wrapper(lua_State *state) { int err; zcp_cleanup_handler_t *zch; int num_ret = 1; nvlist_t *err_details = fnvlist_alloc(); /* * Make sure err_details is properly freed, even if a fatal error is * thrown during the synctask. */ zch = zcp_register_cleanup(state, (zcp_cleanup_t *)&fnvlist_free, err_details); zcp_synctask_info_t *info = lua_touserdata(state, lua_upvalueindex(1)); boolean_t sync = lua_toboolean(state, lua_upvalueindex(2)); zcp_run_info_t *ri = zcp_run_info(state); dsl_pool_t *dp = ri->zri_pool; /* MOS space is triple-dittoed, so we multiply by 3. */ uint64_t funcspace = (info->blocks_modified << DST_AVG_BLKSHIFT) * 3; zcp_parse_args(state, info->name, info->pargs, info->kwargs); err = 0; if (info->space_check != ZFS_SPACE_CHECK_NONE && funcspace > 0) { uint64_t quota = dsl_pool_adjustedsize(dp, info->space_check == ZFS_SPACE_CHECK_RESERVED) - metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes + ri->zri_space_used; if (used + funcspace > quota) { err = SET_ERROR(ENOSPC); } } if (err == 0) { err = info->func(state, sync, err_details); } if (err == 0) { ri->zri_space_used += funcspace; } lua_pushnumber(state, (lua_Number)err); if (fnvlist_num_pairs(err_details) > 0) { (void) zcp_nvlist_to_lua(state, err_details, NULL, 0); num_ret++; } zcp_deregister_cleanup(state, zch); fnvlist_free(err_details); return (num_ret); }
/* * Retrieve list of user holds on the specified snapshot. * * On success, *holdsp will be set to a nvlist which the caller must free. * The keys are the names of the holds, and the value is the creation time * of the hold (uint64) in seconds since the epoch. */ int lzc_get_holds(const char *snapname, nvlist_t **holdsp) { int error; nvlist_t *innvl = fnvlist_alloc(); error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp); fnvlist_free(innvl); return (error); }
/* * Create a redaction bookmark named bookname by redacting snapshot with respect * to all the snapshots in snapnv. */ int lzc_redact(const char *snapshot, const char *bookname, nvlist_t *snapnv) { nvlist_t *args = fnvlist_alloc(); fnvlist_add_string(args, "bookname", bookname); fnvlist_add_nvlist(args, "snapnv", snapnv); int error = lzc_ioctl(ZFS_IOC_REDACT, snapshot, args, NULL); fnvlist_free(args); return (error); }
/* * Write out a history event. */ int spa_history_log(spa_t *spa, const char *msg) { int err; nvlist_t *nvl = fnvlist_alloc(); fnvlist_add_string(nvl, ZPOOL_HIST_CMD, msg); err = spa_history_log_nvl(spa, nvl); fnvlist_free(nvl); return (err); }
/* * Get bookmark properties. * * Given a bookmark's full name, retrieve all properties for the bookmark. * * The format of the returned property list is as follows: * { * <name of property> -> { * "value" -> uint64 * } * ... * "redact_snaps" -> { * "value" -> uint64 array * } */ int lzc_get_bookmark_props(const char *bookmark, nvlist_t **props) { int error; nvlist_t *innvl = fnvlist_alloc(); error = lzc_ioctl(ZFS_IOC_GET_BOOKMARK_PROPS, bookmark, innvl, props); fnvlist_free(innvl); return (error); }
int dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl) { int err = 0; zap_cursor_t zc; zap_attribute_t attr; dsl_pool_t *dp = ds->ds_dir->dd_pool; uint64_t bmark_zapobj = ds->ds_bookmarks; if (bmark_zapobj == 0) return (0); for (zap_cursor_init(&zc, dp->dp_meta_objset, bmark_zapobj); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { nvlist_t *out_props; char *bmark_name = attr.za_name; zfs_bookmark_phys_t bmark_phys = { 0 }; err = dsl_dataset_bmark_lookup(ds, bmark_name, &bmark_phys); ASSERT3U(err, !=, ENOENT); if (err != 0) break; out_props = fnvlist_alloc(); if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_GUID))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_GUID, bmark_phys.zbm_guid); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_CREATETXG))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_CREATETXG, bmark_phys.zbm_creation_txg); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_CREATION))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_CREATION, bmark_phys.zbm_creation_time); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_IVSET_GUID, bmark_phys.zbm_ivset_guid); } fnvlist_add_nvlist(outnvl, bmark_name, out_props); fnvlist_free(out_props); } zap_cursor_fini(&zc); return (err); }
int dmu_objset_snapshot_one(const char *fsname, const char *snapname) { int err; char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); nvlist_t *snaps = fnvlist_alloc(); fnvlist_add_boolean(snaps, longsnap); strfree(longsnap); err = dsl_dataset_snapshot(snaps, NULL, NULL); fnvlist_free(snaps); return (err); }
/* * Write out a history event. */ int spa_history_log(spa_t *spa, const char *msg) { int err; nvlist_t *nvl; VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE)); fnvlist_add_string(nvl, ZPOOL_HIST_CMD, msg); err = spa_history_log_nvl(spa, nvl); fnvlist_free(nvl); return (err); }
static void dsl_dataset_user_hold_sync_one_impl(nvlist_t *tmpholds, dsl_dataset_t *ds, const char *htag, minor_t minor, uint64_t now, dmu_tx_t *tx) { dsl_pool_t *dp = ds->ds_dir->dd_pool; objset_t *mos = dp->dp_meta_objset; uint64_t zapobj; ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock)); if (ds->ds_phys->ds_userrefs_obj == 0) { /* * This is the first user hold for this dataset. Create * the userrefs zap object. */ dmu_buf_will_dirty(ds->ds_dbuf, tx); zapobj = ds->ds_phys->ds_userrefs_obj = zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx); } else { zapobj = ds->ds_phys->ds_userrefs_obj; } ds->ds_userrefs++; VERIFY0(zap_add(mos, zapobj, htag, 8, 1, &now, tx)); if (minor != 0) { char name[MAXNAMELEN]; nvlist_t *tags; VERIFY0(dsl_pool_user_hold(dp, ds->ds_object, htag, now, tx)); (void) snprintf(name, sizeof (name), "%llx", (u_longlong_t)ds->ds_object); if (nvlist_lookup_nvlist(tmpholds, name, &tags) != 0) { VERIFY0(nvlist_alloc(&tags, NV_UNIQUE_NAME, KM_PUSHPAGE)); fnvlist_add_boolean(tags, htag); fnvlist_add_nvlist(tmpholds, name, tags); fnvlist_free(tags); } else { fnvlist_add_boolean(tags, htag); } } spa_history_log_internal_ds(ds, "hold", tx, "tag=%s temp=%d refs=%llu", htag, minor != 0, ds->ds_userrefs); }
/* * Convert a value from the given index into the lua stack to an nvpair, adding * it to an nvlist with the given key. * * Values are converted as follows: * * string -> string * number -> int64 * boolean -> boolean * nil -> boolean (no value) * * Lua tables are converted to nvlists and then inserted. The table's keys * are converted to strings then used as keys in the nvlist to store each table * element. Keys are converted as follows: * * string -> no change * number -> "%lld" * boolean -> "true" | "false" * nil -> error * * In the case of a key collision, an error is thrown. * * If an error is encountered, a nonzero error code is returned, and an error * string will be pushed onto the Lua stack. */ static int zcp_lua_to_nvlist_impl(lua_State *state, int index, nvlist_t *nvl, const char *key, int depth) { /* * Verify that we have enough remaining space in the lua stack to parse * a key-value pair and push an error. */ if (!lua_checkstack(state, 3)) { (void) lua_pushstring(state, "Lua stack overflow"); return (1); } index = lua_absindex(state, index); switch (lua_type(state, index)) { case LUA_TNIL: fnvlist_add_boolean(nvl, key); break; case LUA_TBOOLEAN: fnvlist_add_boolean_value(nvl, key, lua_toboolean(state, index)); break; case LUA_TNUMBER: fnvlist_add_int64(nvl, key, lua_tonumber(state, index)); break; case LUA_TSTRING: fnvlist_add_string(nvl, key, lua_tostring(state, index)); break; case LUA_TTABLE: { nvlist_t *value_nvl = zcp_table_to_nvlist(state, index, depth); if (value_nvl == NULL) return (EINVAL); fnvlist_add_nvlist(nvl, key, value_nvl); fnvlist_free(value_nvl); break; } default: (void) lua_pushfstring(state, "Invalid value type '%s' for key '%s'", lua_typename(state, lua_type(state, index)), key); return (EINVAL); } return (0); }
/* * The bookmarks must all be in the same pool. */ int dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors) { int rv; dsl_bookmark_destroy_arg_t dbda; nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL); if (pair == NULL) return (0); dbda.dbda_bmarks = bmarks; dbda.dbda_errors = errors; dbda.dbda_success = fnvlist_alloc(); rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check, dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks)); fnvlist_free(dbda.dbda_success); return (rv); }
static int lzc_channel_program_impl(const char *pool, const char *program, boolean_t sync, uint64_t instrlimit, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl) { int error; nvlist_t *args; args = fnvlist_alloc(); fnvlist_add_string(args, ZCP_ARG_PROGRAM, program); fnvlist_add_nvlist(args, ZCP_ARG_ARGLIST, argnvl); fnvlist_add_boolean_value(args, ZCP_ARG_SYNC, sync); fnvlist_add_uint64(args, ZCP_ARG_INSTRLIMIT, instrlimit); fnvlist_add_uint64(args, ZCP_ARG_MEMLIMIT, memlimit); error = lzc_ioctl(ZFS_IOC_CHANNEL_PROGRAM, pool, args, outnvl); fnvlist_free(args); return (error); }
static void dsl_onexit_hold_cleanup(spa_t *spa, nvlist_t *holds, minor_t minor) { zfs_hold_cleanup_arg_t *ca; if (minor == 0 || nvlist_empty(holds)) { fnvlist_free(holds); return; } ASSERT(spa != NULL); ca = kmem_alloc(sizeof (*ca), KM_SLEEP); (void) strlcpy(ca->zhca_spaname, spa_name(spa), sizeof (ca->zhca_spaname)); ca->zhca_spa_load_guid = spa_load_guid(spa); ca->zhca_holds = holds; VERIFY0(zfs_onexit_add_cb(minor, dsl_dataset_user_release_onexit, ca, NULL)); }
/* * holds is nvl of snapname -> { holdname, ... } * errlist will be filled in with snapname -> error * * if any fails, all will fail. */ int dsl_dataset_user_release(nvlist_t *holds, nvlist_t *errlist) { dsl_dataset_user_release_arg_t ddura; nvpair_t *pair; int error; pair = nvlist_next_nvpair(holds, NULL); if (pair == NULL) return (0); ddura.ddura_holds = holds; ddura.ddura_errlist = errlist; ddura.ddura_todelete = fnvlist_alloc(); error = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_release_check, dsl_dataset_user_release_sync, &ddura, fnvlist_num_pairs(holds)); fnvlist_free(ddura.ddura_todelete); return (error); }
/* * The full semantics of this function are described in the comment above * lzc_hold(). * * To summarize: * holds is nvl of snapname -> holdname * errlist will be filled in with snapname -> error * * The snaphosts must all be in the same pool. * * Holds for snapshots that don't exist will be skipped. * * If none of the snapshots for requested holds exist then ENOENT will be * returned. * * If cleanup_minor is not 0, the holds will be temporary, which will be cleaned * up when the process exits. * * On success all the holds, for snapshots that existed, will be created and 0 * will be returned. * * On failure no holds will be created, the errlist will be filled in, * and an errno will returned. * * In all cases the errlist will contain entries for holds where the snapshot * didn't exist. */ int dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist) { dsl_dataset_user_hold_arg_t dduha; nvpair_t *pair; int ret; pair = nvlist_next_nvpair(holds, NULL); if (pair == NULL) return (0); dduha.dduha_holds = holds; dduha.dduha_chkholds = fnvlist_alloc(); dduha.dduha_errlist = errlist; dduha.dduha_minor = cleanup_minor; ret = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, &dduha, fnvlist_num_pairs(holds)); fnvlist_free(dduha.dduha_chkholds); return (ret); }
/* * The nvlist will be consumed by this call. */ static void log_internal(nvlist_t *nvl, const char *operation, spa_t *spa, dmu_tx_t *tx, const char *fmt, va_list adx) { char *msg; va_list adx1; int size; /* * If this is part of creating a pool, not everything is * initialized yet, so don't bother logging the internal events. * Likewise if the pool is not writeable. */ if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) { fnvlist_free(nvl); return; } va_copy(adx1, adx); size = vsnprintf(NULL, 0, fmt, adx1) + 1; msg = kmem_alloc(size, KM_PUSHPAGE); va_end(adx1); va_copy(adx1, adx); (void) vsprintf(msg, fmt, adx1); va_end(adx1); fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg); kmem_free(msg, size); fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation); fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg); if (dmu_tx_is_syncing(tx)) { spa_history_log_sync(nvl, tx); } else { dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync, nvl, 0, tx); } /* spa_history_log_sync() will free nvl */ }
static void test(const char *testname, boolean_t expect_success, boolean_t expect_match) { char *progstr = "input = ...; return {output=input}"; nvlist_t *outnvl; (void) printf("\nrunning test '%s'; input:\n", testname); dump_nvlist(nvl, 4); int err = lzc_channel_program(pool, progstr, 10 * 1000 * 1000, 10 * 1024 * 1024, nvl, &outnvl); (void) printf("lzc_channel_program returned %u\n", err); dump_nvlist(outnvl, 5); if (err == 0 && expect_match) { /* * Verify that outnvl is the same as input nvl, if we expect * them to be. The input and output will never match if the * input contains an array (since arrays are converted to lua * tables), so this is only asserted for some test cases. */ nvlist_t *real_outnvl = fnvlist_lookup_nvlist(outnvl, "return"); real_outnvl = fnvlist_lookup_nvlist(real_outnvl, "output"); if (!nvlist_equal(nvl, real_outnvl)) { unexpected_failures = B_TRUE; (void) printf("unexpected input/output mismatch for " "case: %s\n", testname); } } if (err != 0 && expect_success) { unexpected_failures = B_TRUE; (void) printf("unexpected FAIL of case: %s\n", testname); } fnvlist_free(nvl); nvl = fnvlist_alloc(); }
/* ARGSUSED */ static int zcp_synctask_snapshot(lua_State *state, boolean_t sync, nvlist_t *err_details) { int err; dsl_dataset_snapshot_arg_t ddsa = { 0 }; const char *dsname = lua_tostring(state, 1); zcp_run_info_t *ri = zcp_run_info(state); /* * On old pools, the ZIL must not be active when a snapshot is created, * but we can't suspend the ZIL because we're already in syncing * context. */ if (spa_version(ri->zri_pool->dp_spa) < SPA_VERSION_FAST_SNAP) { return (ENOTSUP); } /* * We only allow for a single snapshot rather than a list, so the * error list output is unnecessary. */ ddsa.ddsa_errors = NULL; ddsa.ddsa_props = NULL; ddsa.ddsa_cr = ri->zri_cred; ddsa.ddsa_snaps = fnvlist_alloc(); fnvlist_add_boolean(ddsa.ddsa_snaps, dsname); zcp_cleanup_handler_t *zch = zcp_register_cleanup(state, (zcp_cleanup_t *)&fnvlist_free, ddsa.ddsa_snaps); err = zcp_sync_task(state, dsl_dataset_snapshot_check, dsl_dataset_snapshot_sync, &ddsa, sync, dsname); zcp_deregister_cleanup(state, zch); fnvlist_free(ddsa.ddsa_snaps); return (err); }
/*ARGSUSED*/ static void spa_history_log_sync(void *arg, dmu_tx_t *tx) { nvlist_t *nvl = arg; spa_t *spa = dmu_tx_pool(tx)->dp_spa; objset_t *mos = spa->spa_meta_objset; dmu_buf_t *dbp; spa_history_phys_t *shpp; size_t reclen; uint64_t le_len; char *record_packed = NULL; int ret; /* * If we have an older pool that doesn't have a command * history object, create it now. */ mutex_enter(&spa->spa_history_lock); if (!spa->spa_history) spa_history_create_obj(spa, tx); mutex_exit(&spa->spa_history_lock); /* * Get the offset of where we need to write via the bonus buffer. * Update the offset when the write completes. */ VERIFY0(dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)); shpp = dbp->db_data; dmu_buf_will_dirty(dbp, tx); #ifdef ZFS_DEBUG { dmu_object_info_t doi; dmu_object_info_from_db(dbp, &doi); ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS); } #endif fnvlist_add_uint64(nvl, ZPOOL_HIST_TIME, gethrestime_sec()); #ifdef _KERNEL fnvlist_add_string(nvl, ZPOOL_HIST_HOST, utsname.nodename); #endif if (nvlist_exists(nvl, ZPOOL_HIST_CMD)) { zfs_dbgmsg("command: %s", fnvlist_lookup_string(nvl, ZPOOL_HIST_CMD)); } else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) { if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) { zfs_dbgmsg("txg %lld %s %s (id %llu) %s", fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG), fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME), fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME), fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID), fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR)); } else { zfs_dbgmsg("txg %lld %s %s", fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG), fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME), fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR)); } } else if (nvlist_exists(nvl, ZPOOL_HIST_IOCTL)) { zfs_dbgmsg("ioctl %s", fnvlist_lookup_string(nvl, ZPOOL_HIST_IOCTL)); } record_packed = fnvlist_pack(nvl, &reclen); mutex_enter(&spa->spa_history_lock); /* write out the packed length as little endian */ le_len = LE_64((uint64_t)reclen); ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx); if (!ret) ret = spa_history_write(spa, record_packed, reclen, shpp, tx); /* The first command is the create, which we keep forever */ if (ret == 0 && shpp->sh_pool_create_len == 0 && nvlist_exists(nvl, ZPOOL_HIST_CMD)) { shpp->sh_pool_create_len = shpp->sh_bof = shpp->sh_eof; } mutex_exit(&spa->spa_history_lock); fnvlist_pack_free(record_packed, reclen); dmu_buf_rele(dbp, FTAG); fnvlist_free(nvl); }
/* * The full semantics of this function are described in the comment above * lzc_release(). * * To summarize: * Releases holds specified in the nvl holds. * * holds is nvl of snapname -> { holdname, ... } * errlist will be filled in with snapname -> error * * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots, * otherwise they should be the names of shapshots. * * As a release may cause snapshots to be destroyed this trys to ensure they * aren't mounted. * * The release of non-existent holds are skipped. * * At least one hold must have been released for the this function to succeed * and return 0. */ static int dsl_dataset_user_release_impl(nvlist_t *holds, nvlist_t *errlist, dsl_pool_t *tmpdp) { dsl_dataset_user_release_arg_t ddura; nvpair_t *pair; char *pool; int error; pair = nvlist_next_nvpair(holds, NULL); if (pair == NULL) return (0); /* * The release may cause snapshots to be destroyed; make sure they * are not mounted. */ if (tmpdp != NULL) { /* Temporary holds are specified by dsobj string. */ ddura.ddura_holdfunc = dsl_dataset_hold_obj_string; pool = spa_name(tmpdp->dp_spa); #ifdef _KERNEL for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { dsl_dataset_t *ds; dsl_pool_config_enter(tmpdp, FTAG); error = dsl_dataset_hold_obj_string(tmpdp, nvpair_name(pair), FTAG, &ds); if (error == 0) { char name[MAXNAMELEN]; dsl_dataset_name(ds, name); dsl_pool_config_exit(tmpdp, FTAG); dsl_dataset_rele(ds, FTAG); (void) zfs_unmount_snap(name); } else { dsl_pool_config_exit(tmpdp, FTAG); } } #endif } else { /* Non-temporary holds are specified by name. */ ddura.ddura_holdfunc = dsl_dataset_hold; pool = nvpair_name(pair); #ifdef _KERNEL for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { (void) zfs_unmount_snap(nvpair_name(pair)); } #endif } ddura.ddura_holds = holds; ddura.ddura_errlist = errlist; ddura.ddura_todelete = fnvlist_alloc(); ddura.ddura_chkholds = fnvlist_alloc(); error = dsl_sync_task(pool, dsl_dataset_user_release_check, dsl_dataset_user_release_sync, &ddura, 0); fnvlist_free(ddura.ddura_todelete); fnvlist_free(ddura.ddura_chkholds); return (error); }
static int dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura, dsl_dataset_t *ds, nvlist_t *holds, const char *snapname) { uint64_t zapobj; nvlist_t *holds_found; objset_t *mos; int numholds; if (!dsl_dataset_is_snapshot(ds)) return (SET_ERROR(EINVAL)); if (nvlist_empty(holds)) return (0); numholds = 0; mos = ds->ds_dir->dd_pool->dp_meta_objset; zapobj = ds->ds_phys->ds_userrefs_obj; holds_found = fnvlist_alloc(); for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL; pair = nvlist_next_nvpair(holds, pair)) { uint64_t tmp; int error; const char *holdname = nvpair_name(pair); if (zapobj != 0) error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp); else error = SET_ERROR(ENOENT); /* * Non-existent holds are put on the errlist, but don't * cause an overall failure. */ if (error == ENOENT) { if (ddura->ddura_errlist != NULL) { char *errtag = kmem_asprintf("%s#%s", snapname, holdname); fnvlist_add_int32(ddura->ddura_errlist, errtag, ENOENT); strfree(errtag); } continue; } if (error != 0) { fnvlist_free(holds_found); return (error); } fnvlist_add_boolean(holds_found, holdname); numholds++; } if (DS_IS_DEFER_DESTROY(ds) && ds->ds_phys->ds_num_children == 1 && ds->ds_userrefs == numholds) { /* we need to destroy the snapshot as well */ if (dsl_dataset_long_held(ds)) { fnvlist_free(holds_found); return (SET_ERROR(EBUSY)); } fnvlist_add_boolean(ddura->ddura_todelete, snapname); } if (numholds != 0) { fnvlist_add_nvlist(ddura->ddura_chkholds, snapname, holds_found); } fnvlist_free(holds_found); return (0); }
static void run_tests(void) { const char *key = "key"; /* Note: maximum nvlist key length is 32KB */ int len = 1024 * 31; char *bigstring = malloc(len); for (int i = 0; i < len; i++) bigstring[i] = 'a' + i % 26; bigstring[len - 1] = '\0'; nvl = fnvlist_alloc(); fnvlist_add_boolean(nvl, key); test("boolean", B_TRUE, B_FALSE); fnvlist_add_boolean_value(nvl, key, B_TRUE); test("boolean_value", B_FALSE, B_FALSE); fnvlist_add_byte(nvl, key, 1); test("byte", B_FALSE, B_FALSE); fnvlist_add_int8(nvl, key, 1); test("int8", B_FALSE, B_FALSE); fnvlist_add_uint8(nvl, key, 1); test("uint8", B_FALSE, B_FALSE); fnvlist_add_int16(nvl, key, 1); test("int16", B_FALSE, B_FALSE); fnvlist_add_uint16(nvl, key, 1); test("uint16", B_FALSE, B_FALSE); fnvlist_add_int32(nvl, key, 1); test("int32", B_FALSE, B_FALSE); fnvlist_add_uint32(nvl, key, 1); test("uint32", B_FALSE, B_FALSE); fnvlist_add_int64(nvl, key, 1); test("int64", B_TRUE, B_TRUE); fnvlist_add_uint64(nvl, key, 1); test("uint64", B_FALSE, B_FALSE); fnvlist_add_string(nvl, key, "1"); test("string", B_TRUE, B_TRUE); { nvlist_t *val = fnvlist_alloc(); fnvlist_add_string(val, "subkey", "subvalue"); fnvlist_add_nvlist(nvl, key, val); fnvlist_free(val); test("nvlist", B_TRUE, B_TRUE); } { boolean_t val[2] = { B_FALSE, B_TRUE }; fnvlist_add_boolean_array(nvl, key, val, 2); test("boolean_array", B_FALSE, B_FALSE); } { uchar_t val[2] = { 0, 1 }; fnvlist_add_byte_array(nvl, key, val, 2); test("byte_array", B_FALSE, B_FALSE); } { int8_t val[2] = { 0, 1 }; fnvlist_add_int8_array(nvl, key, val, 2); test("int8_array", B_FALSE, B_FALSE); } { uint8_t val[2] = { 0, 1 }; fnvlist_add_uint8_array(nvl, key, val, 2); test("uint8_array", B_FALSE, B_FALSE); } { int16_t val[2] = { 0, 1 }; fnvlist_add_int16_array(nvl, key, val, 2); test("int16_array", B_FALSE, B_FALSE); } { uint16_t val[2] = { 0, 1 }; fnvlist_add_uint16_array(nvl, key, val, 2); test("uint16_array", B_FALSE, B_FALSE); } { int32_t val[2] = { 0, 1 }; fnvlist_add_int32_array(nvl, key, val, 2); test("int32_array", B_FALSE, B_FALSE); } { uint32_t val[2] = { 0, 1 }; fnvlist_add_uint32_array(nvl, key, val, 2); test("uint32_array", B_FALSE, B_FALSE); } { int64_t val[2] = { 0, 1 }; fnvlist_add_int64_array(nvl, key, val, 2); test("int64_array", B_TRUE, B_FALSE); } { uint64_t val[2] = { 0, 1 }; fnvlist_add_uint64_array(nvl, key, val, 2); test("uint64_array", B_FALSE, B_FALSE); } { char *const val[2] = { "0", "1" }; fnvlist_add_string_array(nvl, key, val, 2); test("string_array", B_TRUE, B_FALSE); } { nvlist_t *val[2]; val[0] = fnvlist_alloc(); fnvlist_add_string(val[0], "subkey", "subvalue"); val[1] = fnvlist_alloc(); fnvlist_add_string(val[1], "subkey2", "subvalue2"); fnvlist_add_nvlist_array(nvl, key, val, 2); fnvlist_free(val[0]); fnvlist_free(val[1]); test("nvlist_array", B_FALSE, B_FALSE); } { fnvlist_add_string(nvl, bigstring, "1"); test("large_key", B_TRUE, B_TRUE); } { fnvlist_add_string(nvl, key, bigstring); test("large_value", B_TRUE, B_TRUE); } { for (int i = 0; i < 1024; i++) { char buf[32]; (void) snprintf(buf, sizeof (buf), "key-%u", i); fnvlist_add_int64(nvl, buf, i); } test("many_keys", B_TRUE, B_TRUE); } #ifndef __sparc__ { for (int i = 0; i < 10; i++) { nvlist_t *newval = fnvlist_alloc(); fnvlist_add_nvlist(newval, "key", nvl); fnvlist_free(nvl); nvl = newval; } test("deeply_nested_pos", B_TRUE, B_TRUE); } { for (int i = 0; i < 90; i++) { nvlist_t *newval = fnvlist_alloc(); fnvlist_add_nvlist(newval, "key", nvl); fnvlist_free(nvl); nvl = newval; } test("deeply_nested_neg", B_FALSE, B_FALSE); } #endif free(bigstring); fnvlist_free(nvl); }