static int zcp_clones_iter(lua_State *state) { int err; char clonename[ZFS_MAX_DATASET_NAME_LEN]; uint64_t dsobj = lua_tonumber(state, lua_upvalueindex(1)); uint64_t cursor = lua_tonumber(state, lua_upvalueindex(2)); dsl_pool_t *dp = zcp_run_info(state)->zri_pool; dsl_dataset_t *ds, *clone; zap_attribute_t za; zap_cursor_t zc; err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); if (err == ENOENT) { return (0); } else if (err != 0) { return (luaL_error(state, "unexpected error %d from dsl_dataset_hold_obj(dsobj)", err)); } if (dsl_dataset_phys(ds)->ds_next_clones_obj == 0) { dsl_dataset_rele(ds, FTAG); return (0); } zap_cursor_init_serialized(&zc, dp->dp_meta_objset, dsl_dataset_phys(ds)->ds_next_clones_obj, cursor); dsl_dataset_rele(ds, FTAG); err = zap_cursor_retrieve(&zc, &za); if (err != 0) { zap_cursor_fini(&zc); if (err != ENOENT) { return (luaL_error(state, "unexpected error %d from zap_cursor_retrieve()", err)); } return (0); } zap_cursor_advance(&zc); cursor = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); err = dsl_dataset_hold_obj(dp, za.za_first_integer, FTAG, &clone); if (err != 0) { return (luaL_error(state, "unexpected error %d from " "dsl_dataset_hold_obj(za_first_integer)", err)); } dsl_dir_name(clone->ds_dir, clonename); dsl_dataset_rele(clone, FTAG); lua_pushnumber(state, cursor); lua_replace(state, lua_upvalueindex(2)); (void) lua_pushstring(state, clonename); return (1); }
static int osd_index_it_load(const struct lu_env *env, const struct dt_it *di, __u64 hash) { struct osd_zap_it *it = (struct osd_zap_it *)di; struct osd_object *obj = it->ozi_obj; struct osd_device *osd = osd_obj2dev(obj); zap_attribute_t *za = &osd_oti_get(env)->oti_za; int rc; ENTRY; /* close the current cursor */ zap_cursor_fini(it->ozi_zc); /* create a new one starting at hash */ memset(it->ozi_zc, 0, sizeof(*it->ozi_zc)); zap_cursor_init_serialized(it->ozi_zc, osd->od_objset.os, obj->oo_db->db_object, hash); it->ozi_reset = 0; rc = -zap_cursor_retrieve(it->ozi_zc, za); if (rc == 0) RETURN(+1); else if (rc == -ENOENT) RETURN(0); RETURN(rc); }
static void dump_obj(objset_t *os, uint64_t obj, const char *name) { zap_cursor_t zc; zap_attribute_t za; (void) printf("%s_obj:\n", name); for (zap_cursor_init(&zc, os, obj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { if (za.za_integer_length == 8) { ASSERT(za.za_num_integers == 1); (void) printf("\t%s = %llu\n", za.za_name, (u_longlong_t)za.za_first_integer); } else { ASSERT(za.za_integer_length == 1); char val[1024]; VERIFY(zap_lookup(os, obj, za.za_name, 1, sizeof (val), val) == 0); (void) printf("\t%s = %s\n", za.za_name, val); } } zap_cursor_fini(&zc); }
/* * Iterate over the sets specified in the specified zapobj * and load them into the permsets avl tree. */ static int dsl_load_sets(objset_t *mos, uint64_t zapobj, char type, char checkflag, void *valp, avl_tree_t *avl) { zap_cursor_t zc; zap_attribute_t za; perm_set_t *permnode; avl_index_t idx; uint64_t jumpobj; int error; char whokey[ZFS_MAX_DELEG_NAME]; zfs_deleg_whokey(whokey, type, checkflag, valp); error = zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj); if (error != 0) return (error); for (zap_cursor_init(&zc, mos, jumpobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { permnode = kmem_alloc(sizeof (perm_set_t), KM_SLEEP); (void) strlcpy(permnode->p_setname, za.za_name, sizeof (permnode->p_setname)); permnode->p_matched = B_FALSE; if (avl_find(avl, permnode, &idx) == NULL) { avl_insert(avl, permnode, idx); } else { kmem_free(permnode, sizeof (perm_set_t)); } } zap_cursor_fini(&zc); return (0); }
/* * Read as many directory entry names as will fit in to the provided buffer, * or when no buffer is provided calculate the required buffer size. */ int zpl_xattr_readdir(struct inode *dxip, xattr_filldir_t *xf) { zap_cursor_t zc; zap_attribute_t zap; int error; zap_cursor_init(&zc, ITOZSB(dxip)->z_os, ITOZ(dxip)->z_id); while ((error = -zap_cursor_retrieve(&zc, &zap)) == 0) { if (zap.za_integer_length != 8 || zap.za_num_integers != 1) { error = -ENXIO; break; } error = zpl_xattr_filldir(xf, zap.za_name, strlen(zap.za_name)); if (error) break; zap_cursor_advance(&zc); } zap_cursor_fini(&zc); if (error == -ENOENT) error = 0; return (error); }
static int ddt_zap_walk(objset_t *os, uint64_t object, ddt_entry_t *dde, uint64_t *walk) { zap_cursor_t zc; zap_attribute_t za; int error; zap_cursor_init_serialized(&zc, os, object, *walk); if ((error = zap_cursor_retrieve(&zc, &za)) == 0) { uchar_t cbuf[sizeof (dde->dde_phys) + 1]; uint64_t csize = za.za_num_integers; ASSERT(za.za_integer_length == 1); error = zap_lookup_uint64(os, object, (uint64_t *)za.za_name, DDT_KEY_WORDS, 1, csize, cbuf); ASSERT(error == 0); if (error == 0) { ddt_decompress(cbuf, dde->dde_phys, csize, sizeof (dde->dde_phys)); dde->dde_key = *(ddt_key_t *)za.za_name; } zap_cursor_advance(&zc); *walk = zap_cursor_serialize(&zc); } zap_cursor_fini(&zc); return (error); }
static void dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; zap_cursor_t zc; zap_attribute_t za; /* * If it is the old version, dd_clones doesn't exist so we can't * find the clones, but dsl_deadlist_remove_key() is a no-op so it * doesn't matter. */ if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0) return; for (zap_cursor_init(&zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { dsl_dataset_t *clone; VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool, za.za_first_integer, FTAG, &clone)); if (clone->ds_dir->dd_origin_txg > mintxg) { dsl_deadlist_remove_key(&clone->ds_deadlist, mintxg, tx); dsl_dataset_remove_clones_key(clone, mintxg, tx); } dsl_dataset_rele(clone, FTAG); } zap_cursor_fini(&zc); }
/* * Clean up any znodes that had no links when we either crashed or * (force) umounted the file system. */ void zfs_unlinked_drain(zfs_sb_t *zsb) { zap_cursor_t zc; zap_attribute_t zap; dmu_object_info_t doi; znode_t *zp; int error; /* * Interate over the contents of the unlinked set. */ for (zap_cursor_init(&zc, zsb->z_os, zsb->z_unlinkedobj); zap_cursor_retrieve(&zc, &zap) == 0; zap_cursor_advance(&zc)) { /* * See what kind of object we have in list */ error = dmu_object_info(zsb->z_os, zap.za_first_integer, &doi); if (error != 0) continue; ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) || (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS)); /* * We need to re-mark these list entries for deletion, * so we pull them back into core and set zp->z_unlinked. */ error = zfs_zget(zsb, zap.za_first_integer, &zp); /* * We may pick up znodes that are already marked for deletion. * This could happen during the purge of an extended attribute * directory. All we need to do is skip over them, since they * are already in the system marked z_unlinked. */ if (error != 0) continue; zp->z_unlinked = B_TRUE; /* * If this is an attribute directory, purge its contents. */ if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) { /* * We don't need to check the return value of * zfs_purgedir here, because zfs_rmnode will just * return this xattr directory to the unlinked set * until all of its xattrs are gone. */ (void) zfs_purgedir(zp); } iput(ZTOI(zp)); } zap_cursor_fini(&zc); }
int dsl_dataset_get_holds(const char *dsname, nvlist_t *nvl) { dsl_pool_t *dp; dsl_dataset_t *ds; int err; err = dsl_pool_hold(dsname, FTAG, &dp); if (err != 0) return (err); err = dsl_dataset_hold(dp, dsname, FTAG, &ds); if (err != 0) { dsl_pool_rele(dp, FTAG); return (err); } if (ds->ds_phys->ds_userrefs_obj != 0) { zap_attribute_t *za; zap_cursor_t zc; za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_userrefs_obj); zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) { fnvlist_add_uint64(nvl, za->za_name, za->za_first_integer); } zap_cursor_fini(&zc); kmem_free(za, sizeof (zap_attribute_t)); } dsl_dataset_rele(ds, FTAG); dsl_pool_rele(dp, FTAG); return (0); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } /* ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); */ tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); //VN_RELE(ZTOV(xzp)); // async VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os))); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); //VN_RELE(ZTOV(xzp)); // async VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os))); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; zap_cursor_t zc; zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; int offset = 0; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && !dmu_objset_userobjspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == ZFS_NO_OBJECT) { *bufsizep = 0; return (0); } if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) offset = DMU_OBJACCT_PREFIX_LEN; for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > *bufsizep) break; /* * skip object quota (with zap name prefix DMU_OBJACCT_PREFIX) * when dealing with block quota and vice versa. */ if ((offset > 0) != (strncmp(za.za_name, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN) == 0)) continue; fuidstr_to_sid(zsb, za.za_name + offset, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; buf++; } if (error == ENOENT) error = 0; ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep); *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf; *cookiep = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); return (error); }
/* * Special function to requeue the znodes for deletion that were * in progress when we either crashed or umounted the file system. * * returns 1 if queue was drained. */ static int zfs_drain_dq(zfsvfs_t *zfsvfs) { zap_cursor_t zc; zap_attribute_t zap; dmu_object_info_t doi; znode_t *zp; int error; /* * Interate over the contents of the delete queue. */ for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_dqueue); zap_cursor_retrieve(&zc, &zap) == 0; zap_cursor_advance(&zc)) { /* * Create more threads if necessary to balance the load. * quit if the delete threads have been shut down. */ if (zfs_delete_thread_target(zfsvfs, -1) != 0) return (0); /* * See what kind of object we have in queue */ error = dmu_object_info(zfsvfs->z_os, zap.za_first_integer, &doi); if (error != 0) continue; ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) || (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS)); /* * We need to re-mark these queue entries for reaping, * so we pull them back into core and set zp->z_reap. */ error = zfs_zget(zfsvfs, zap.za_first_integer, &zp); /* * We may pick up znodes that are already marked for reaping. * This could happen during the purge of an extended attribute * directory. All we need to do is skip over them, since they * are already in the system to be processed by the delete * thread(s). */ if (error != 0) { continue; } zp->z_reap = 1; VN_RELE(ZTOV(zp)); } zap_cursor_fini(&zc); return (1); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } ASSERT(S_ISREG(ZTOI(xzp)->i_mode) || S_ISLNK(ZTOI(xzp)->i_mode)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); zfs_iput_async(ZTOI(xzp)); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); zfs_iput_async(ZTOI(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } vn_lock(ZTOV(xzp), LK_EXCLUSIVE | LK_RETRY); ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); vput(ZTOV(xzp)); skipped += 1; continue; } error = zfs_link_destroy(dzp, zap.za_name, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); vput(ZTOV(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, dzp->z_id); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_bonus(tx, xzp->z_id); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); VN_RELE(ZTOV(xzp)); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); VN_RELE(ZTOV(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
/* * Checks that the active features in the pool are supported by * this software. Adds each unsupported feature (name -> description) to * the supplied nvlist. */ boolean_t spa_features_check(spa_t *spa, boolean_t for_write, nvlist_t *unsup_feat, nvlist_t *enabled_feat) { objset_t *os = spa->spa_meta_objset; boolean_t supported; zap_cursor_t *zc; zap_attribute_t *za; uint64_t obj = for_write ? spa->spa_feat_for_write_obj : spa->spa_feat_for_read_obj; char *buf; zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); supported = B_TRUE; for (zap_cursor_init(zc, os, obj); zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { ASSERT(za->za_integer_length == sizeof (uint64_t) && za->za_num_integers == 1); if (NULL != enabled_feat) { fnvlist_add_uint64(enabled_feat, za->za_name, za->za_first_integer); } if (za->za_first_integer != 0 && !zfeature_is_supported(za->za_name)) { supported = B_FALSE; if (NULL != unsup_feat) { char *desc = ""; if (zap_lookup(os, spa->spa_feat_desc_obj, za->za_name, 1, MAXPATHLEN, buf) == 0) desc = buf; VERIFY(nvlist_add_string(unsup_feat, za->za_name, desc) == 0); } } } zap_cursor_fini(zc); kmem_free(buf, MAXPATHLEN); kmem_free(za, sizeof (zap_attribute_t)); kmem_free(zc, sizeof (zap_cursor_t)); return (supported); }
int dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl) { int err = 0; zap_cursor_t zc; zap_attribute_t attr; dsl_pool_t *dp = ds->ds_dir->dd_pool; uint64_t bmark_zapobj = ds->ds_bookmarks; if (bmark_zapobj == 0) return (0); for (zap_cursor_init(&zc, dp->dp_meta_objset, bmark_zapobj); zap_cursor_retrieve(&zc, &attr) == 0; zap_cursor_advance(&zc)) { nvlist_t *out_props; char *bmark_name = attr.za_name; zfs_bookmark_phys_t bmark_phys = { 0 }; err = dsl_dataset_bmark_lookup(ds, bmark_name, &bmark_phys); ASSERT3U(err, !=, ENOENT); if (err != 0) break; out_props = fnvlist_alloc(); if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_GUID))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_GUID, bmark_phys.zbm_guid); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_CREATETXG))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_CREATETXG, bmark_phys.zbm_creation_txg); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_CREATION))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_CREATION, bmark_phys.zbm_creation_time); } if (nvlist_exists(props, zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) { dsl_prop_nvlist_add_uint64(out_props, ZFS_PROP_IVSET_GUID, bmark_phys.zbm_ivset_guid); } fnvlist_add_nvlist(outnvl, bmark_name, out_props); fnvlist_free(out_props); } zap_cursor_fini(&zc); return (err); }
/* * Clean up any znodes that had no links when we either crashed or * (force) umounted the file system. */ void zfs_unlinked_drain(zfsvfs_t *zfsvfs) { zap_cursor_t zc; zap_attribute_t zap; dmu_object_info_t doi; znode_t *zp; int error; printf("ZFS: unlinked drain\n"); /* * Interate over the contents of the unlinked set. */ for (zap_cursor_init(&zc, zfsvfs->z_os, zfsvfs->z_unlinkedobj); zap_cursor_retrieve(&zc, &zap) == 0; zap_cursor_advance(&zc)) { /* * See what kind of object we have in list */ error = dmu_object_info(zfsvfs->z_os, zap.za_first_integer, &doi); if (error != 0) continue; ASSERT((doi.doi_type == DMU_OT_PLAIN_FILE_CONTENTS) || (doi.doi_type == DMU_OT_DIRECTORY_CONTENTS)); /* * We need to re-mark these list entries for deletion, * so we pull them back into core and set zp->z_unlinked. */ error = zfs_zget(zfsvfs, zap.za_first_integer, &zp); /* * We may pick up znodes that are already marked for deletion. * This could happen during the purge of an extended attribute * directory. All we need to do is skip over them, since they * are already in the system marked z_unlinked. */ if (error != 0) continue; zp->z_unlinked = B_TRUE; VN_RELE(ZTOV(zp)); } zap_cursor_fini(&zc); printf("ZFS: unlinked drain completed.\n"); }
int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) { dsl_dataset_t *ds = os->os->os_dsl_dataset; zap_cursor_t cursor; zap_attribute_t attr; if (ds->ds_phys->ds_snapnames_zapobj == 0) return (ENOENT); zap_cursor_init_serialized(&cursor, ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_snapnames_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (ENOENT); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (ENAMETOOLONG); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; if (case_conflict) *case_conflict = attr.za_normalization_conflict; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); }
int dmu_dir_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp) { dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; zap_cursor_t cursor; zap_attribute_t attr; /* there is no next dir on a snapshot! */ if (os->os->os_dsl_dataset->ds_object != dd->dd_phys->dd_head_dataset_obj) return (ENOENT); zap_cursor_init_serialized(&cursor, dd->dd_pool->dp_meta_objset, dd->dd_phys->dd_child_dir_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (ENOENT); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (ENAMETOOLONG); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); }
int dsl_deleg_destroy(objset_t *mos, uint64_t zapobj, dmu_tx_t *tx) { zap_cursor_t zc; zap_attribute_t za; if (zapobj == 0) return (0); for (zap_cursor_init(&zc, mos, zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1); VERIFY(0 == zap_destroy(mos, za.za_first_integer, tx)); } zap_cursor_fini(&zc); VERIFY(0 == zap_destroy(mos, zapobj, tx)); return (0); }
static void copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj, boolean_t dosets, uint64_t uid, dmu_tx_t *tx) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t jumpobj, pjumpobj; uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; zap_cursor_t zc; zap_attribute_t za; char whokey[ZFS_MAX_DELEG_NAME]; zfs_deleg_whokey(whokey, dosets ? ZFS_DELEG_CREATE_SETS : ZFS_DELEG_CREATE, ZFS_DELEG_LOCAL, NULL); if (zap_lookup(mos, pzapobj, whokey, 8, 1, &pjumpobj) != 0) return; if (zapobj == 0) { dmu_buf_will_dirty(dd->dd_dbuf, tx); zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); } zfs_deleg_whokey(whokey, dosets ? ZFS_DELEG_USER_SETS : ZFS_DELEG_USER, ZFS_DELEG_LOCAL, &uid); if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) { jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0); } for (zap_cursor_init(&zc, mos, pjumpobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { uint64_t zero = 0; ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1); VERIFY(zap_update(mos, jumpobj, za.za_name, 8, 1, &zero, tx) == 0); } zap_cursor_fini(&zc); }
/* * Checks that the features active in the specified object are supported by * this software. Adds each unsupported feature (name -> description) to * the supplied nvlist. */ boolean_t feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj, nvlist_t *unsup_feat, nvlist_t *enabled_feat) { boolean_t supported; zap_cursor_t zc; zap_attribute_t za; supported = B_TRUE; for (zap_cursor_init(&zc, os, obj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { ASSERT(za.za_integer_length == sizeof (uint64_t) && za.za_num_integers == 1); if (NULL != enabled_feat) { fnvlist_add_uint64(enabled_feat, za.za_name, za.za_first_integer); } if (za.za_first_integer != 0 && !zfeature_is_supported(za.za_name)) { supported = B_FALSE; if (NULL != unsup_feat) { char *desc = ""; char buf[MAXPATHLEN]; if (zap_lookup(os, desc_obj, za.za_name, 1, sizeof (buf), buf) == 0) desc = buf; VERIFY(nvlist_add_string(unsup_feat, za.za_name, desc) == 0); } } } zap_cursor_fini(&zc); return (supported); }
int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; zap_cursor_t zc; zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == 0) { *bufsizep = 0; return (0); } for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > *bufsizep) break; fuidstr_to_sid(zsb, za.za_name, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; buf++; } if (error == ENOENT) error = 0; ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep); *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf; *cookiep = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); return (error); }
static int osd_index_it_get(const struct lu_env *env, struct dt_it *di, const struct dt_key *key) { struct osd_zap_it *it = (struct osd_zap_it *)di; struct osd_object *obj = it->ozi_obj; struct osd_device *osd = osd_obj2dev(obj); ENTRY; LASSERT(it); LASSERT(it->ozi_zc); /* XXX: API is broken at the moment */ LASSERT(*((const __u64 *)key) == 0); zap_cursor_fini(it->ozi_zc); memset(it->ozi_zc, 0, sizeof(*it->ozi_zc)); zap_cursor_init(it->ozi_zc, osd->od_objset.os, obj->oo_db->db_object); it->ozi_reset = 1; RETURN(+1); }
static int osd_index_it_get(const struct lu_env *env, struct dt_it *di, const struct dt_key *key) { struct osd_zap_it *it = (struct osd_zap_it *)di; struct osd_object *obj = it->ozi_obj; struct osd_device *osd = osd_obj2dev(obj); ENTRY; LASSERT(it); LASSERT(it->ozi_zc); /* * XXX: we need a binary version of zap_cursor_move_to_key() * to implement this API */ if (*((const __u64 *)key) != 0) CERROR("NOT IMPLEMETED YET (move to %Lx)\n", *((__u64 *)key)); zap_cursor_fini(it->ozi_zc); memset(it->ozi_zc, 0, sizeof(*it->ozi_zc)); zap_cursor_init(it->ozi_zc, osd->od_objset.os, obj->oo_db->db_object); it->ozi_reset = 1; RETURN(+1); }
uint64_t dmu_snapname_to_id(objset_t *os, const char *snapname) { dsl_dataset_t *ds = os->os->os_dsl_dataset; zap_cursor_t cursor; zap_attribute_t attr; if (ds->ds_phys->ds_snapnames_zapobj == 0) { return (ENOENT); } zap_cursor_init(&cursor, ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_snapnames_zapobj); if(!(zap_cursor_move_to_key(&cursor, snapname, MT_EXACT))) { if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); /* not returning ENOENT as that might be the id no */ return 0; // no snapshot by name snapname } return attr.za_first_integer; } else { return 0; // no snapshot by name snapname } }
int dsl_keychain_load_dd(dsl_dir_t *dd, uint64_t dsobj, int crypt, zcrypt_key_t *wrappingkey) { zap_cursor_t zc; zap_attribute_t za; objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t keychain_zapobj = dsl_dir_phys(dd)->dd_keychain_obj; zcrypt_key_t *txgkey; zcrypt_keystore_node_t *skn; caddr_t wrappedkey; size_t wkeylen; spa_t *spa = dd->dd_pool->dp_spa; int unwrapped = 0, entries = 0; /* * Basic algorithm is start with the ds_keychain_obj * and iterate using zap_cursor_*() unwraping the * values (the actual encryption keys) into zcrypt_key_t's * and calling zcrypt_keychain_insert() to put them into the dsl AVL * tree of keys. */ zcrypt_key_hold(wrappingkey, FTAG); skn = zcrypt_keystore_insert(spa, dsobj, wrappingkey); ASSERT(skn != NULL); mutex_enter(&skn->skn_lock); for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { entries++; wkeylen = za.za_num_integers; wrappedkey = kmem_alloc(wkeylen, KM_SLEEP); VERIFY3U(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)&za.za_name, 1, 1, za.za_num_integers, wrappedkey), ==, 0); if (zcrypt_unwrap_key(wrappingkey, crypt, wrappedkey, wkeylen, &txgkey) != 0) { kmem_free(wrappedkey, wkeylen); continue; } unwrapped++; kmem_free(wrappedkey, wkeylen); zcrypt_keychain_insert(&skn->skn_keychain, *(uint64_t *)za.za_name, txgkey); } zap_cursor_fini(&zc); mutex_exit(&skn->skn_lock); zcrypt_key_release(wrappingkey, FTAG); if (entries > 0 && unwrapped == 0) { /* Wrong wrapping key passed */ (void) zcrypt_keystore_remove(spa, dsobj); return (EACCES); } /* * If we didn't unwrap everything we have possible corruption. * If an attempt is ever made to decrypt blocks either we won't * find the key (ENOKEY) or we will use the wrong key which * will result in the MAC failing to verify so ECKSUM will be * set in zio->io_error which will result in an ereport being * logged because the zio_read() failed. * When we are running DEBUG lets ASSERT this instead. */ ASSERT3U(entries, ==, unwrapped); return (0); }
/* * dsl_crypto_key_change * * The old key must already be present in memory since the user interface * doesn't provide away to prompt or retrieve the old key. */ static void dsl_crypto_key_change_sync(void *arg1, dmu_tx_t *tx) { struct wkey_change_arg *ca = arg1; dsl_dataset_t *ds = ca->ca_ds; size_t wkeylen; char *wkeybuf = NULL; zcrypt_key_t *txgkey; zap_cursor_t zc; zap_attribute_t za; objset_t *mos; uint64_t keychain_zapobj; spa_t *spa; zcrypt_keystore_node_t *zkn; ASSERT(RRW_WRITE_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock)); mos = ds->ds_dir->dd_pool->dp_meta_objset; keychain_zapobj = dsl_dir_phys(ds->ds_dir)->dd_keychain_obj; /* * To allow for the case were the keychains of child datasets * are not loaded (ie an explicit 'zfs key -u tank/fs/sub' had * been done some time before doing 'zfs key -c tank/fs') we itterate * over the zap objects on disk rather than copying from the * in memory keystore node. */ for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { wkeylen = za.za_num_integers; wkeybuf = kmem_alloc(wkeylen, KM_SLEEP); VERIFY(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf) == 0); VERIFY(zcrypt_unwrap_key(ca->ca_old_key, ds->ds_objset->os_crypt, wkeybuf, wkeylen, &txgkey) == 0); kmem_free(wkeybuf, wkeylen); VERIFY(zcrypt_wrap_key(ca->ca_new_key, txgkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(ds->ds_objset->os_crypt)) == 0); zcrypt_key_free(txgkey); VERIFY(zap_update_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf, tx) == 0); kmem_free(wkeybuf, wkeylen); } zap_cursor_fini(&zc); spa = dsl_dataset_get_spa(ds); /* * If the wrapping key is loaded switch the in memory copy now. */ zkn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE); if (zkn != NULL) { zcrypt_key_free(zkn->skn_wrapkey); zkn->skn_wrapkey = zcrypt_key_copy(ca->ca_new_key); } spa_history_log_internal(spa, "key change", tx, "succeeded dataset = %llu", ds->ds_object); }
/* * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * * This function constructs an nvlist of nvlists. * each setpoint is an nvlist composed of an nvlist of an nvlist * of the individual * users/groups/everyone/create * permissions. * * The nvlist will look like this. * * { source fsname -> { whokeys { permissions,...}, ...}} * * The fsname nvpairs will be arranged in a bottom up order. For example, * if we have the following structure a/b/c then the nvpairs for the fsnames * will be ordered a/b/c, a/b, a. */ int dsl_deleg_get(const char *ddname, nvlist_t **nvp) { dsl_dir_t *dd, *startdd; dsl_pool_t *dp; int error; objset_t *mos; error = dsl_dir_open(ddname, FTAG, &startdd, NULL); if (error) return (error); dp = startdd->dd_pool; mos = dp->dp_meta_objset; VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); rw_enter(&dp->dp_config_rwlock, RW_READER); for (dd = startdd; dd != NULL; dd = dd->dd_parent) { zap_cursor_t basezc; zap_attribute_t baseza; nvlist_t *sp_nvp; uint64_t n; char source[MAXNAMELEN]; if (dd->dd_phys->dd_deleg_zapobj && (zap_count(mos, dd->dd_phys->dd_deleg_zapobj, &n) == 0) && n) { VERIFY(nvlist_alloc(&sp_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); } else { continue; } for (zap_cursor_init(&basezc, mos, dd->dd_phys->dd_deleg_zapobj); zap_cursor_retrieve(&basezc, &baseza) == 0; zap_cursor_advance(&basezc)) { zap_cursor_t zc; zap_attribute_t za; nvlist_t *perms_nvp; ASSERT(baseza.za_integer_length == 8); ASSERT(baseza.za_num_integers == 1); VERIFY(nvlist_alloc(&perms_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (zap_cursor_init(&zc, mos, baseza.za_first_integer); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { VERIFY(nvlist_add_boolean(perms_nvp, za.za_name) == 0); } zap_cursor_fini(&zc); VERIFY(nvlist_add_nvlist(sp_nvp, baseza.za_name, perms_nvp) == 0); nvlist_free(perms_nvp); } zap_cursor_fini(&basezc); dsl_dir_name(dd, source); VERIFY(nvlist_add_nvlist(*nvp, source, sp_nvp) == 0); nvlist_free(sp_nvp); } rw_exit(&dp->dp_config_rwlock); dsl_dir_close(startdd, FTAG); return (0); }