static void copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj, boolean_t dosets, uint64_t uid, dmu_tx_t *tx) { objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t jumpobj, pjumpobj; uint64_t zapobj = dd->dd_phys->dd_deleg_zapobj; zap_cursor_t zc; zap_attribute_t za; char whokey[ZFS_MAX_DELEG_NAME]; zfs_deleg_whokey(whokey, dosets ? ZFS_DELEG_CREATE_SETS : ZFS_DELEG_CREATE, ZFS_DELEG_LOCAL, NULL); if (zap_lookup(mos, pzapobj, whokey, 8, 1, &pjumpobj) != 0) return; if (zapobj == 0) { dmu_buf_will_dirty(dd->dd_dbuf, tx); zapobj = dd->dd_phys->dd_deleg_zapobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); } zfs_deleg_whokey(whokey, dosets ? ZFS_DELEG_USER_SETS : ZFS_DELEG_USER, ZFS_DELEG_LOCAL, &uid); if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) { jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx); VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0); } for (zap_cursor_init(&zc, mos, pjumpobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { uint64_t zero = 0; ASSERT(za.za_integer_length == 8 && za.za_num_integers == 1); VERIFY(zap_update(mos, jumpobj, za.za_name, 8, 1, &zero, tx) == 0); } zap_cursor_fini(&zc); }
int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; zap_cursor_t zc; zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == 0) { *bufsizep = 0; return (0); } for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > *bufsizep) break; fuidstr_to_sid(zsb, za.za_name, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; buf++; } if (error == ENOENT) error = 0; ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep); *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf; *cookiep = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); return (error); }
/* * Checks that the features active in the specified object are supported by * this software. Adds each unsupported feature (name -> description) to * the supplied nvlist. */ boolean_t feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj, nvlist_t *unsup_feat, nvlist_t *enabled_feat) { boolean_t supported; zap_cursor_t zc; zap_attribute_t za; supported = B_TRUE; for (zap_cursor_init(&zc, os, obj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { ASSERT(za.za_integer_length == sizeof (uint64_t) && za.za_num_integers == 1); if (NULL != enabled_feat) { fnvlist_add_uint64(enabled_feat, za.za_name, za.za_first_integer); } if (za.za_first_integer != 0 && !zfeature_is_supported(za.za_name)) { supported = B_FALSE; if (NULL != unsup_feat) { char *desc = ""; char buf[MAXPATHLEN]; if (zap_lookup(os, desc_obj, za.za_name, 1, sizeof (buf), buf) == 0) desc = buf; VERIFY(nvlist_add_string(unsup_feat, za.za_name, desc) == 0); } } } zap_cursor_fini(&zc); return (supported); }
void osd_declare_xattrs_destroy(const struct lu_env *env, struct osd_object *obj, struct osd_thandle *oh) { struct osd_device *osd = osd_obj2dev(obj); zap_attribute_t *za = &osd_oti_get(env)->oti_za; uint64_t oid = obj->oo_xattr, xid; dmu_tx_t *tx = oh->ot_tx; zap_cursor_t *zc; int rc; if (oid == ZFS_NO_OBJECT) return; /* Nothing to do for SA xattrs */ /* Declare to free the ZAP holding xattrs */ dmu_tx_hold_free(tx, oid, 0, DMU_OBJECT_END); rc = osd_zap_cursor_init(&zc, osd->od_os, oid, 0); if (rc) goto out; while (zap_cursor_retrieve(zc, za) == 0) { LASSERT(za->za_num_integers == 1); LASSERT(za->za_integer_length == sizeof(uint64_t)); rc = -zap_lookup(osd->od_os, oid, za->za_name, sizeof(uint64_t), 1, &xid); if (rc) { CERROR("%s: xattr %s lookup failed: rc = %d\n", osd->od_svname, za->za_name, rc); break; } dmu_tx_hold_free(tx, xid, 0, DMU_OBJECT_END); zap_cursor_advance(zc); } osd_zap_cursor_fini(zc); out: if (rc && tx->tx_err == 0) tx->tx_err = -rc; }
/** * to load a directory entry at a time and stored it in * iterator's in-memory data structure. * * \param di, struct osd_it_ea, iterator's in memory structure * * \retval +ve, iterator reached to end * \retval 0, iterator not reached to end * \retval -ve, on error */ static int osd_zap_it_next(const struct lu_env *env, struct dt_it *di) { struct osd_zap_it *it = (struct osd_zap_it *)di; int rc; ENTRY; if (it->ozi_reset == 0) zap_cursor_advance(it->ozi_zc); it->ozi_reset = 0; /* * According to current API we need to return error if its last entry. * zap_cursor_advance() does return any value. So we need to call * retrieve to check if there is any record. We should make * changes to Iterator API to not return status for this API */ rc = -udmu_zap_cursor_retrieve_key(env, it->ozi_zc, NULL, NAME_MAX); if (rc == -ENOENT) /* end of dir*/ RETURN(+1); RETURN((rc)); }
/* * in Orion . and .. were stored in the directory, while ZPL * and current osd-zfs generate them up on request. so, we * need to ignore previously stored . and .. */ static int osd_index_retrieve_skip_dots(struct osd_zap_it *it, zap_attribute_t *za) { int rc, isdot; do { rc = -zap_cursor_retrieve(it->ozi_zc, za); isdot = 0; if (unlikely(rc == 0 && za->za_name[0] == '.')) { if (za->za_name[1] == 0) { isdot = 1; } else if (za->za_name[1] == '.' && za->za_name[2] == 0) { isdot = 1; } if (unlikely(isdot)) zap_cursor_advance(it->ozi_zc); } } while (unlikely(rc == 0 && isdot)); return rc; }
static int osd_index_it_next(const struct lu_env *env, struct dt_it *di) { struct osd_zap_it *it = (struct osd_zap_it *)di; zap_attribute_t *za = &osd_oti_get(env)->oti_za; int rc; ENTRY; if (it->ozi_reset == 0) zap_cursor_advance(it->ozi_zc); it->ozi_reset = 0; /* * According to current API we need to return error if it's last entry. * zap_cursor_advance() does not return any value. So we need to call * retrieve to check if there is any record. We should make * changes to Iterator API to not return status for this API */ rc = -zap_cursor_retrieve(it->ozi_zc, za); if (rc == -ENOENT) RETURN(+1); RETURN((rc)); }
void dsl_dir_remove_clones_key(dsl_dir_t *dd, uint64_t mintxg, dmu_tx_t *tx) { objset_t *mos = dd->dd_pool->dp_meta_objset; zap_cursor_t zc; zap_attribute_t za; /* * If it is the old version, dd_clones doesn't exist so we can't * find the clones, but dsl_deadlist_remove_key() is a no-op so it * doesn't matter. */ if (dsl_dir_phys(dd)->dd_clones == 0) return; for (zap_cursor_init(&zc, mos, dsl_dir_phys(dd)->dd_clones); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { dsl_dataset_t *clone; VERIFY0(dsl_dataset_hold_obj(dd->dd_pool, za.za_first_integer, FTAG, &clone)); if (clone->ds_dir->dd_origin_txg > mintxg) { dsl_deadlist_remove_key(&clone->ds_deadlist, mintxg, tx); if (dsl_dataset_remap_deadlist_exists(clone)) { dsl_deadlist_remove_key( &clone->ds_remap_deadlist, mintxg, tx); } dsl_dir_remove_clones_key(clone->ds_dir, mintxg, tx); } dsl_dataset_rele(clone, FTAG); } zap_cursor_fini(&zc); }
static void dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx) { objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; zap_cursor_t *zc; zap_attribute_t *za; /* * If it is the old version, dd_clones doesn't exist so we can't * find the clones, but dsl_deadlist_remove_key() is a no-op so it * doesn't matter. */ if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0) return; zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones); zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { dsl_dataset_t *clone; VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool, za->za_first_integer, FTAG, &clone)); if (clone->ds_dir->dd_origin_txg > mintxg) { dsl_deadlist_remove_key(&clone->ds_deadlist, mintxg, tx); dsl_dataset_remove_clones_key(clone, mintxg, tx); } dsl_dataset_rele(clone, FTAG); } zap_cursor_fini(zc); kmem_free(za, sizeof (zap_attribute_t)); kmem_free(zc, sizeof (zap_cursor_t)); }
int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) { dsl_dataset_t *ds = os->os->os_dsl_dataset; zap_cursor_t cursor; zap_attribute_t attr; if (ds->ds_phys->ds_snapnames_zapobj == 0) return (ENOENT); zap_cursor_init_serialized(&cursor, ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_phys->ds_snapnames_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (ENOENT); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (ENAMETOOLONG); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; if (case_conflict) *case_conflict = attr.za_normalization_conflict; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); }
int dmu_dir_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp) { dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir; zap_cursor_t cursor; zap_attribute_t attr; /* there is no next dir on a snapshot! */ if (os->os->os_dsl_dataset->ds_object != dd->dd_phys->dd_head_dataset_obj) return (ENOENT); zap_cursor_init_serialized(&cursor, dd->dd_pool->dp_meta_objset, dd->dd_phys->dd_child_dir_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (ENOENT); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (ENAMETOOLONG); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); }
static int sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count) { sa_os_t *sa = os->os_sa; uint64_t sa_attr_count = 0; uint64_t sa_reg_count; int error = 0; uint64_t attr_value; sa_attr_table_t *tb; zap_cursor_t zc; zap_attribute_t za; int registered_count = 0; int i; dmu_objset_type_t ostype = dmu_objset_type(os); sa->sa_user_table = kmem_zalloc(count * sizeof (sa_attr_type_t), KM_SLEEP); sa->sa_user_table_sz = count * sizeof (sa_attr_type_t); if (sa->sa_reg_attr_obj != 0) { error = zap_count(os, sa->sa_reg_attr_obj, &sa_attr_count); /* * Make sure we retrieved a count and that it isn't zero */ if (error || (error == 0 && sa_attr_count == 0)) { if (error == 0) error = EINVAL; goto bail; } sa_reg_count = sa_attr_count; } if (ostype == DMU_OST_ZFS && sa_attr_count == 0) sa_attr_count += sa_legacy_attr_count; /* Allocate attribute numbers for attributes that aren't registered */ for (i = 0; i != count; i++) { boolean_t found = B_FALSE; int j; if (ostype == DMU_OST_ZFS) { for (j = 0; j != sa_legacy_attr_count; j++) { if (strcmp(reg_attrs[i].sa_name, sa_legacy_attrs[j].sa_name) == 0) { sa->sa_user_table[i] = sa_legacy_attrs[j].sa_attr; found = B_TRUE; } } } if (found) continue; if (sa->sa_reg_attr_obj) error = zap_lookup(os, sa->sa_reg_attr_obj, reg_attrs[i].sa_name, 8, 1, &attr_value); else error = ENOENT; switch (error) { case ENOENT: sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count; sa_attr_count++; break; case 0: sa->sa_user_table[i] = ATTR_NUM(attr_value); break; default: goto bail; } } sa->sa_num_attrs = sa_attr_count; tb = sa->sa_attr_table = kmem_zalloc(sizeof (sa_attr_table_t) * sa_attr_count, KM_SLEEP); /* * Attribute table is constructed from requested attribute list, * previously foreign registered attributes, and also the legacy * ZPL set of attributes. */ if (sa->sa_reg_attr_obj) { for (zap_cursor_init(&zc, os, sa->sa_reg_attr_obj); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { uint64_t value; value = za.za_first_integer; registered_count++; tb[ATTR_NUM(value)].sa_attr = ATTR_NUM(value); tb[ATTR_NUM(value)].sa_length = ATTR_LENGTH(value); tb[ATTR_NUM(value)].sa_byteswap = ATTR_BSWAP(value); tb[ATTR_NUM(value)].sa_registered = B_TRUE; if (tb[ATTR_NUM(value)].sa_name) { continue; } tb[ATTR_NUM(value)].sa_name = kmem_zalloc(strlen(za.za_name) +1, KM_SLEEP); (void) strlcpy(tb[ATTR_NUM(value)].sa_name, za.za_name, strlen(za.za_name) +1); } zap_cursor_fini(&zc); /* * Make sure we processed the correct number of registered * attributes */ if (registered_count != sa_reg_count) { ASSERT(error != 0); goto bail; } } if (ostype == DMU_OST_ZFS) { for (i = 0; i != sa_legacy_attr_count; i++) { if (tb[i].sa_name) continue; tb[i].sa_attr = sa_legacy_attrs[i].sa_attr; tb[i].sa_length = sa_legacy_attrs[i].sa_length; tb[i].sa_byteswap = sa_legacy_attrs[i].sa_byteswap; tb[i].sa_registered = B_FALSE; tb[i].sa_name = kmem_zalloc(strlen(sa_legacy_attrs[i].sa_name) +1, KM_SLEEP); (void) strlcpy(tb[i].sa_name, sa_legacy_attrs[i].sa_name, strlen(sa_legacy_attrs[i].sa_name) + 1); } } for (i = 0; i != count; i++) { sa_attr_type_t attr_id; attr_id = sa->sa_user_table[i]; if (tb[attr_id].sa_name) continue; tb[attr_id].sa_length = reg_attrs[i].sa_length; tb[attr_id].sa_byteswap = reg_attrs[i].sa_byteswap; tb[attr_id].sa_attr = attr_id; tb[attr_id].sa_name = kmem_zalloc(strlen(reg_attrs[i].sa_name) + 1, KM_SLEEP); (void) strlcpy(tb[attr_id].sa_name, reg_attrs[i].sa_name, strlen(reg_attrs[i].sa_name) + 1); } sa->sa_need_attr_registration = (sa_attr_count != registered_count); return (0); bail: kmem_free(sa->sa_user_table, count * sizeof (sa_attr_type_t)); sa->sa_user_table = NULL; sa_free_attr_table(sa); return ((error != 0) ? error : EINVAL); }
/* * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * * This function constructs an nvlist of nvlists. * each setpoint is an nvlist composed of an nvlist of an nvlist * of the individual * users/groups/everyone/create * permissions. * * The nvlist will look like this. * * { source fsname -> { whokeys { permissions,...}, ...}} * * The fsname nvpairs will be arranged in a bottom up order. For example, * if we have the following structure a/b/c then the nvpairs for the fsnames * will be ordered a/b/c, a/b, a. */ int dsl_deleg_get(const char *ddname, nvlist_t **nvp) { dsl_dir_t *dd, *startdd; dsl_pool_t *dp; int error; objset_t *mos; error = dsl_pool_hold(ddname, FTAG, &dp); if (error != 0) return (error); error = dsl_dir_hold(dp, ddname, FTAG, &startdd, NULL); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } dp = startdd->dd_pool; mos = dp->dp_meta_objset; VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (dd = startdd; dd != NULL; dd = dd->dd_parent) { zap_cursor_t basezc; zap_attribute_t baseza; nvlist_t *sp_nvp; uint64_t n; char source[ZFS_MAX_DATASET_NAME_LEN]; if (dsl_dir_phys(dd)->dd_deleg_zapobj == 0 || zap_count(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, &n) != 0 || n == 0) continue; sp_nvp = fnvlist_alloc(); for (zap_cursor_init(&basezc, mos, dsl_dir_phys(dd)->dd_deleg_zapobj); zap_cursor_retrieve(&basezc, &baseza) == 0; zap_cursor_advance(&basezc)) { zap_cursor_t zc; zap_attribute_t za; nvlist_t *perms_nvp; ASSERT(baseza.za_integer_length == 8); ASSERT(baseza.za_num_integers == 1); perms_nvp = fnvlist_alloc(); for (zap_cursor_init(&zc, mos, baseza.za_first_integer); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { fnvlist_add_boolean(perms_nvp, za.za_name); } zap_cursor_fini(&zc); fnvlist_add_nvlist(sp_nvp, baseza.za_name, perms_nvp); fnvlist_free(perms_nvp); } zap_cursor_fini(&basezc); dsl_dir_name(dd, source); fnvlist_add_nvlist(*nvp, source, sp_nvp); nvlist_free(sp_nvp); } dsl_dir_rele(startdd, FTAG); dsl_pool_rele(dp, FTAG); return (0); }
/* * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * * This function constructs an nvlist of nvlists. * each setpoint is an nvlist composed of an nvlist of an nvlist * of the individual * users/groups/everyone/create * permissions. * * The nvlist will look like this. * * { source fsname -> { whokeys { permissions,...}, ...}} * * The fsname nvpairs will be arranged in a bottom up order. For example, * if we have the following structure a/b/c then the nvpairs for the fsnames * will be ordered a/b/c, a/b, a. */ int dsl_deleg_get(const char *ddname, nvlist_t **nvp) { dsl_dir_t *dd, *startdd; dsl_pool_t *dp; int error; objset_t *mos; error = dsl_dir_open(ddname, FTAG, &startdd, NULL); if (error) return (error); dp = startdd->dd_pool; mos = dp->dp_meta_objset; VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); rw_enter(&dp->dp_config_rwlock, RW_READER); for (dd = startdd; dd != NULL; dd = dd->dd_parent) { zap_cursor_t basezc; zap_attribute_t baseza; nvlist_t *sp_nvp; uint64_t n; char source[MAXNAMELEN]; if (dd->dd_phys->dd_deleg_zapobj && (zap_count(mos, dd->dd_phys->dd_deleg_zapobj, &n) == 0) && n) { VERIFY(nvlist_alloc(&sp_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); } else { continue; } for (zap_cursor_init(&basezc, mos, dd->dd_phys->dd_deleg_zapobj); zap_cursor_retrieve(&basezc, &baseza) == 0; zap_cursor_advance(&basezc)) { zap_cursor_t zc; zap_attribute_t za; nvlist_t *perms_nvp; ASSERT(baseza.za_integer_length == 8); ASSERT(baseza.za_num_integers == 1); VERIFY(nvlist_alloc(&perms_nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (zap_cursor_init(&zc, mos, baseza.za_first_integer); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { VERIFY(nvlist_add_boolean(perms_nvp, za.za_name) == 0); } zap_cursor_fini(&zc); VERIFY(nvlist_add_nvlist(sp_nvp, baseza.za_name, perms_nvp) == 0); nvlist_free(perms_nvp); } zap_cursor_fini(&basezc); dsl_dir_name(dd, source); VERIFY(nvlist_add_nvlist(*nvp, source, sp_nvp) == 0); nvlist_free(sp_nvp); } rw_exit(&dp->dp_config_rwlock); dsl_dir_close(startdd, FTAG); return (0); }
/* * Find all objsets under name, call func on each */ int dmu_objset_find_spa(spa_t *spa, const char *name, int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags) { dsl_dir_t *dd; dsl_pool_t *dp; dsl_dataset_t *ds; zap_cursor_t zc; zap_attribute_t *attr; char *child; uint64_t thisobj; int err; if (name == NULL) name = spa_name(spa); err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL); if (err) return (err); /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ if (dd->dd_myname[0] == '$') { dsl_dir_close(dd, FTAG); return (0); } thisobj = dd->dd_phys->dd_head_dataset_obj; attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); dp = dd->dd_pool; /* * Iterate over all children. */ if (flags & DS_FIND_CHILDREN) { for (zap_cursor_init(&zc, dp->dp_meta_objset, dd->dd_phys->dd_child_dir_zapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT(attr->za_integer_length == sizeof (uint64_t)); ASSERT(attr->za_num_integers == 1); child = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) strcpy(child, name); (void) strcat(child, "/"); (void) strcat(child, attr->za_name); err = dmu_objset_find_spa(spa, child, func, arg, flags); kmem_free(child, MAXPATHLEN); if (err) break; } zap_cursor_fini(&zc); if (err) { dsl_dir_close(dd, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); return (err); } } /* * Iterate over all snapshots. */ if (flags & DS_FIND_SNAPSHOTS) { if (!dsl_pool_sync_context(dp)) rw_enter(&dp->dp_config_rwlock, RW_READER); err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); if (!dsl_pool_sync_context(dp)) rw_exit(&dp->dp_config_rwlock); if (err == 0) { uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; dsl_dataset_rele(ds, FTAG); for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT(attr->za_integer_length == sizeof (uint64_t)); ASSERT(attr->za_num_integers == 1); child = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) strcpy(child, name); (void) strcat(child, "@"); (void) strcat(child, attr->za_name); err = func(spa, attr->za_first_integer, child, arg); kmem_free(child, MAXPATHLEN); if (err) break; } zap_cursor_fini(&zc); } } dsl_dir_close(dd, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); if (err) return (err); /* * Apply to self if appropriate. */ err = func(spa, thisobj, name, arg); return (err); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { #ifdef __APPLE__ error = zfs_zget_sans_vnode(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); ASSERT3U(error, ==, 0); #else error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); ASSERT3U(error, ==, 0); ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); #endif /* __APPLE__ */ tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); #ifdef __APPLE__ if (ZTOV(xzp) == NULL) { zfs_zinactive(xzp); } else { VN_RELE(ZTOV(xzp)); } #else VN_RELE(ZTOV(xzp)); #endif /* __APPLE__ */ skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); ASSERT3U(error, ==, 0); dmu_tx_commit(tx); #ifdef __APPLE__ if (ZTOV(xzp) == NULL) { zfs_zinactive(xzp); } else { VN_RELE(ZTOV(xzp)); } #else VN_RELE(ZTOV(xzp)); #endif /* __APPLE__ */ } zap_cursor_fini(&zc); ASSERT(error == ENOENT); return (skipped); }
/* * dsl_crypto_key_change * * The old key must already be present in memory since the user interface * doesn't provide away to prompt or retrieve the old key. */ static void dsl_crypto_key_change_sync(void *arg1, dmu_tx_t *tx) { struct wkey_change_arg *ca = arg1; dsl_dataset_t *ds = ca->ca_ds; size_t wkeylen; char *wkeybuf = NULL; zcrypt_key_t *txgkey; zap_cursor_t zc; zap_attribute_t za; objset_t *mos; uint64_t keychain_zapobj; spa_t *spa; zcrypt_keystore_node_t *zkn; ASSERT(RRW_WRITE_HELD(&ds->ds_dir->dd_pool->dp_config_rwlock)); mos = ds->ds_dir->dd_pool->dp_meta_objset; keychain_zapobj = dsl_dir_phys(ds->ds_dir)->dd_keychain_obj; /* * To allow for the case were the keychains of child datasets * are not loaded (ie an explicit 'zfs key -u tank/fs/sub' had * been done some time before doing 'zfs key -c tank/fs') we itterate * over the zap objects on disk rather than copying from the * in memory keystore node. */ for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { wkeylen = za.za_num_integers; wkeybuf = kmem_alloc(wkeylen, KM_SLEEP); VERIFY(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf) == 0); VERIFY(zcrypt_unwrap_key(ca->ca_old_key, ds->ds_objset->os_crypt, wkeybuf, wkeylen, &txgkey) == 0); kmem_free(wkeybuf, wkeylen); VERIFY(zcrypt_wrap_key(ca->ca_new_key, txgkey, &wkeybuf, &wkeylen, zio_crypt_select_wrap(ds->ds_objset->os_crypt)) == 0); zcrypt_key_free(txgkey); VERIFY(zap_update_uint64(mos, keychain_zapobj, (uint64_t *)za.za_name, 1, 1, wkeylen, wkeybuf, tx) == 0); kmem_free(wkeybuf, wkeylen); } zap_cursor_fini(&zc); spa = dsl_dataset_get_spa(ds); /* * If the wrapping key is loaded switch the in memory copy now. */ zkn = zcrypt_keystore_find_node(spa, ds->ds_object, B_FALSE); if (zkn != NULL) { zcrypt_key_free(zkn->skn_wrapkey); zkn->skn_wrapkey = zcrypt_key_copy(ca->ca_new_key); } spa_history_log_internal(spa, "key change", tx, "succeeded dataset = %llu", ds->ds_object); }
int osd_xattr_list(const struct lu_env *env, struct dt_object *dt, const struct lu_buf *lb) { struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); zap_attribute_t *za = &osd_oti_get(env)->oti_za; zap_cursor_t *zc; int rc, counted; ENTRY; LASSERT(obj->oo_db != NULL); LASSERT(osd_invariant(obj)); LASSERT(dt_object_exists(dt)); down_read(&obj->oo_guard); rc = osd_sa_xattr_list(env, obj, lb); if (rc < 0) GOTO(out, rc); counted = rc; /* continue with dnode xattr if any */ if (obj->oo_xattr == ZFS_NO_OBJECT) GOTO(out, rc = counted); rc = osd_zap_cursor_init(&zc, osd->od_os, obj->oo_xattr, 0); if (rc) GOTO(out, rc); while ((rc = -zap_cursor_retrieve(zc, za)) == 0) { if (!osd_obj2dev(obj)->od_posix_acl && (strcmp(za->za_name, POSIX_ACL_XATTR_ACCESS) == 0 || strcmp(za->za_name, POSIX_ACL_XATTR_DEFAULT) == 0)) { zap_cursor_advance(zc); continue; } rc = strlen(za->za_name); if (lb->lb_buf != NULL) { if (counted + rc + 1 > lb->lb_len) GOTO(out_fini, rc = -ERANGE); memcpy(lb->lb_buf + counted, za->za_name, rc + 1); } counted += rc + 1; zap_cursor_advance(zc); } if (rc == -ENOENT) /* no more kes in the index */ rc = 0; else if (unlikely(rc < 0)) GOTO(out_fini, rc); rc = counted; out_fini: osd_zap_cursor_fini(zc); out: up_read(&obj->oo_guard); RETURN(rc); }
int sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count, sa_attr_type_t **user_table) { zap_cursor_t zc; zap_attribute_t za; sa_os_t *sa; dmu_objset_type_t ostype = dmu_objset_type(os); sa_attr_type_t *tb; int error; mutex_enter(&os->os_lock); if (os->os_sa) { mutex_enter(&os->os_sa->sa_lock); mutex_exit(&os->os_lock); tb = os->os_sa->sa_user_table; mutex_exit(&os->os_sa->sa_lock); *user_table = tb; return (0); } sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP); mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL); sa->sa_master_obj = sa_obj; os->os_sa = sa; mutex_enter(&sa->sa_lock); mutex_exit(&os->os_lock); avl_create(&sa->sa_layout_num_tree, layout_num_compare, sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node)); avl_create(&sa->sa_layout_hash_tree, layout_hash_compare, sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node)); if (sa_obj) { error = zap_lookup(os, sa_obj, SA_LAYOUTS, 8, 1, &sa->sa_layout_attr_obj); if (error != 0 && error != ENOENT) goto fail; error = zap_lookup(os, sa_obj, SA_REGISTRY, 8, 1, &sa->sa_reg_attr_obj); if (error != 0 && error != ENOENT) goto fail; } if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0) goto fail; if (sa->sa_layout_attr_obj != 0) { uint64_t layout_count; error = zap_count(os, sa->sa_layout_attr_obj, &layout_count); /* * Layout number count should be > 0 */ if (error || (error == 0 && layout_count == 0)) { if (error == 0) error = EINVAL; goto fail; } for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { sa_attr_type_t *lot_attrs; uint64_t lot_num; lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) * za.za_num_integers, KM_SLEEP); if ((error = (zap_lookup(os, sa->sa_layout_attr_obj, za.za_name, 2, za.za_num_integers, lot_attrs))) != 0) { kmem_free(lot_attrs, sizeof (sa_attr_type_t) * za.za_num_integers); break; } VERIFY(ddi_strtoull(za.za_name, NULL, 10, (unsigned long long *)&lot_num) == 0); (void) sa_add_layout_entry(os, lot_attrs, za.za_num_integers, lot_num, sa_layout_info_hash(lot_attrs, za.za_num_integers), B_FALSE, NULL); kmem_free(lot_attrs, sizeof (sa_attr_type_t) * za.za_num_integers); } zap_cursor_fini(&zc); /* * Make sure layout count matches number of entries added * to AVL tree */ if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) { ASSERT(error != 0); goto fail; } } /* Add special layout number for old ZNODES */ if (ostype == DMU_OST_ZFS) { (void) sa_add_layout_entry(os, sa_legacy_zpl_layout, sa_legacy_attr_count, 0, sa_layout_info_hash(sa_legacy_zpl_layout, sa_legacy_attr_count), B_FALSE, NULL); (void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1, 0, B_FALSE, NULL); } *user_table = os->os_sa->sa_user_table; mutex_exit(&sa->sa_lock); return (0); fail: os->os_sa = NULL; sa_free_attr_table(sa); if (sa->sa_user_table) kmem_free(sa->sa_user_table, sa->sa_user_table_sz); mutex_exit(&sa->sa_lock); kmem_free(sa, sizeof (sa_os_t)); return ((error == ECKSUM) ? EIO : error); }
/* * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * * This function constructs an nvlist of nvlists. * each setpoint is an nvlist composed of an nvlist of an nvlist * of the individual * users/groups/everyone/create * permissions. * * The nvlist will look like this. * * { source fsname -> { whokeys { permissions,...}, ...}} * * The fsname nvpairs will be arranged in a bottom up order. For example, * if we have the following structure a/b/c then the nvpairs for the fsnames * will be ordered a/b/c, a/b, a. */ int dsl_deleg_get(const char *ddname, nvlist_t **nvp) { dsl_dir_t *dd, *startdd; dsl_pool_t *dp; int error; objset_t *mos; zap_cursor_t *basezc, *zc; zap_attribute_t *baseza, *za; char *source; error = dsl_pool_hold(ddname, FTAG, &dp); if (error != 0) return (error); error = dsl_dir_hold(dp, ddname, FTAG, &startdd, NULL); if (error != 0) { dsl_pool_rele(dp, FTAG); return (error); } dp = startdd->dd_pool; mos = dp->dp_meta_objset; zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); baseza = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); source = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, KM_SLEEP); VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); for (dd = startdd; dd != NULL; dd = dd->dd_parent) { nvlist_t *sp_nvp; uint64_t n; if (dd->dd_phys->dd_deleg_zapobj == 0 || zap_count(mos, dd->dd_phys->dd_deleg_zapobj, &n) != 0 || n == 0) continue; sp_nvp = fnvlist_alloc(); for (zap_cursor_init(basezc, mos, dd->dd_phys->dd_deleg_zapobj); zap_cursor_retrieve(basezc, baseza) == 0; zap_cursor_advance(basezc)) { nvlist_t *perms_nvp; ASSERT(baseza->za_integer_length == 8); ASSERT(baseza->za_num_integers == 1); perms_nvp = fnvlist_alloc(); for (zap_cursor_init(zc, mos, baseza->za_first_integer); zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) { fnvlist_add_boolean(perms_nvp, za->za_name); } zap_cursor_fini(zc); fnvlist_add_nvlist(sp_nvp, baseza->za_name, perms_nvp); fnvlist_free(perms_nvp); } zap_cursor_fini(basezc); dsl_dir_name(dd, source); fnvlist_add_nvlist(*nvp, source, sp_nvp); nvlist_free(sp_nvp); } kmem_free(source, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); kmem_free(baseza, sizeof (zap_attribute_t)); kmem_free(basezc, sizeof (zap_cursor_t)); kmem_free(za, sizeof (zap_attribute_t)); kmem_free(zc, sizeof (zap_cursor_t)); dsl_dir_rele(startdd, FTAG); dsl_pool_rele(dp, FTAG); return (0); }
int dsl_keychain_load_dd(dsl_dir_t *dd, uint64_t dsobj, int crypt, zcrypt_key_t *wrappingkey) { zap_cursor_t zc; zap_attribute_t za; objset_t *mos = dd->dd_pool->dp_meta_objset; uint64_t keychain_zapobj = dsl_dir_phys(dd)->dd_keychain_obj; zcrypt_key_t *txgkey; zcrypt_keystore_node_t *skn; caddr_t wrappedkey; size_t wkeylen; spa_t *spa = dd->dd_pool->dp_spa; int unwrapped = 0, entries = 0; /* * Basic algorithm is start with the ds_keychain_obj * and iterate using zap_cursor_*() unwraping the * values (the actual encryption keys) into zcrypt_key_t's * and calling zcrypt_keychain_insert() to put them into the dsl AVL * tree of keys. */ zcrypt_key_hold(wrappingkey, FTAG); skn = zcrypt_keystore_insert(spa, dsobj, wrappingkey); ASSERT(skn != NULL); mutex_enter(&skn->skn_lock); for (zap_cursor_init(&zc, mos, keychain_zapobj); zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) { entries++; wkeylen = za.za_num_integers; wrappedkey = kmem_alloc(wkeylen, KM_SLEEP); VERIFY3U(zap_lookup_uint64(mos, keychain_zapobj, (uint64_t *)&za.za_name, 1, 1, za.za_num_integers, wrappedkey), ==, 0); if (zcrypt_unwrap_key(wrappingkey, crypt, wrappedkey, wkeylen, &txgkey) != 0) { kmem_free(wrappedkey, wkeylen); continue; } unwrapped++; kmem_free(wrappedkey, wkeylen); zcrypt_keychain_insert(&skn->skn_keychain, *(uint64_t *)za.za_name, txgkey); } zap_cursor_fini(&zc); mutex_exit(&skn->skn_lock); zcrypt_key_release(wrappingkey, FTAG); if (entries > 0 && unwrapped == 0) { /* Wrong wrapping key passed */ (void) zcrypt_keystore_remove(spa, dsobj); return (EACCES); } /* * If we didn't unwrap everything we have possible corruption. * If an attempt is ever made to decrypt blocks either we won't * find the key (ENOKEY) or we will use the wrong key which * will result in the MAC failing to verify so ECKSUM will be * set in zio->io_error which will result in an ereport being * logged because the zio_read() failed. * When we are running DEBUG lets ASSERT this instead. */ ASSERT3U(entries, ==, unwrapped); return (0); }
int osd_xattr_list(const struct lu_env *env, struct dt_object *dt, struct lu_buf *lb, struct lustre_capa *capa) { struct osd_thread_info *oti = osd_oti_get(env); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); udmu_objset_t *uos = &osd->od_objset; zap_cursor_t *zc; int rc, counted = 0, remain = lb->lb_len; ENTRY; LASSERT(obj->oo_db != NULL); LASSERT(osd_invariant(obj)); LASSERT(dt_object_exists(dt)); down(&obj->oo_guard); rc = osd_sa_xattr_list(env, obj, lb); if (rc < 0) GOTO(out, rc); counted = rc; remain -= counted; /* continue with dnode xattr if any */ if (obj->oo_xattr == ZFS_NO_OBJECT) GOTO(out, rc = counted); rc = -udmu_zap_cursor_init(&zc, uos, obj->oo_xattr, 0); if (rc) GOTO(out, rc); while ((rc = -udmu_zap_cursor_retrieve_key(env, zc, oti->oti_key, MAXNAMELEN)) == 0) { if (!osd_obj2dev(obj)->od_posix_acl && (strcmp(oti->oti_key, POSIX_ACL_XATTR_ACCESS) == 0 || strcmp(oti->oti_key, POSIX_ACL_XATTR_DEFAULT) == 0)) { zap_cursor_advance(zc); continue; } rc = strlen(oti->oti_key); if (lb->lb_buf != NULL) { if (rc + 1 > remain) RETURN(-ERANGE); memcpy(lb->lb_buf, oti->oti_key, rc); lb->lb_buf += rc; *((char *)lb->lb_buf) = '\0'; lb->lb_buf++; remain -= rc + 1; } counted += rc + 1; zap_cursor_advance(zc); } if (rc == -ENOENT) /* no more kes in the index */ rc = 0; else if (unlikely(rc < 0)) GOTO(out_fini, rc); rc = counted; out_fini: udmu_zap_cursor_fini(zc); out: up(&obj->oo_guard); RETURN(rc); }