int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t *valp) { char buf[32]; int err; uint64_t obj; *valp = 0; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == 0) return (0); err = id_to_fuidstr(zsb, domain, rid, buf, B_FALSE); if (err) return (err); err = zap_lookup(zsb->z_os, obj, buf, 8, 1, valp); if (err == ENOENT) err = 0; return (err); }
int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; zap_cursor_t zc; zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; int offset = 0; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && !dmu_objset_userobjspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == ZFS_NO_OBJECT) { *bufsizep = 0; return (0); } if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) offset = DMU_OBJACCT_PREFIX_LEN; for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > *bufsizep) break; /* * skip object quota (with zap name prefix DMU_OBJACCT_PREFIX) * when dealing with block quota and vice versa. */ if ((offset > 0) != (strncmp(za.za_name, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN) == 0)) continue; fuidstr_to_sid(zsb, za.za_name + offset, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; buf++; } if (error == ENOENT) error = 0; ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep); *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf; *cookiep = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); return (error); }
void dmu_objset_stats(objset_t *os, nvlist_t *nv) { ASSERT(os->os->os_dsl_dataset || os->os->os_phys->os_type == DMU_OST_META); if (os->os->os_dsl_dataset != NULL) dsl_dataset_stats(os->os->os_dsl_dataset, nv); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, os->os->os_phys->os_type); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, dmu_objset_userspace_present(os)); }
int dmu_objset_userspace_upgrade(objset_t *os) { uint64_t obj; int err = 0; if (dmu_objset_userspace_present(os)) return (0); if (!dmu_objset_userused_enabled(os->os)) return (ENOTSUP); if (dmu_objset_is_snapshot(os)) return (EINVAL); /* * We simply need to mark every object dirty, so that it will be * synced out and now accounted. If this is called * concurrently, or if we already did some work before crashing, * that's fine, since we track each object's accounted state * independently. */ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { dmu_tx_t *tx; dmu_buf_t *db; int objerr; if (issig(JUSTLOOKING) && issig(FORREAL)) return (EINTR); objerr = dmu_bonus_hold(os, obj, FTAG, &db); if (objerr) continue; tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, obj); objerr = dmu_tx_assign(tx, TXG_WAIT); if (objerr) { dmu_tx_abort(tx); continue; } dmu_buf_will_dirty(db, tx); dmu_buf_rele(db, FTAG); dmu_tx_commit(tx); } os->os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); }
int zfs_userspace_many(zfs_sb_t *zsb, zfs_userquota_prop_t type, uint64_t *cookiep, void *vbuf, uint64_t *bufsizep) { int error; zap_cursor_t zc; zap_attribute_t za; zfs_useracct_t *buf = vbuf; uint64_t obj; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == 0) { *bufsizep = 0; return (0); } for (zap_cursor_init_serialized(&zc, zsb->z_os, obj, *cookiep); (error = zap_cursor_retrieve(&zc, &za)) == 0; zap_cursor_advance(&zc)) { if ((uintptr_t)buf - (uintptr_t)vbuf + sizeof (zfs_useracct_t) > *bufsizep) break; fuidstr_to_sid(zsb, za.za_name, buf->zu_domain, sizeof (buf->zu_domain), &buf->zu_rid); buf->zu_space = za.za_first_integer; buf++; } if (error == ENOENT) error = 0; ASSERT3U((uintptr_t)buf - (uintptr_t)vbuf, <=, *bufsizep); *bufsizep = (uintptr_t)buf - (uintptr_t)vbuf; *cookiep = zap_cursor_serialize(&zc); zap_cursor_fini(&zc); return (error); }
int zfs_userspace_one(zfs_sb_t *zsb, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t *valp) { char buf[20 + DMU_OBJACCT_PREFIX_LEN]; int offset = 0; int err; uint64_t obj; *valp = 0; if (!dmu_objset_userspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); if ((type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED || type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA) && !dmu_objset_userobjspace_present(zsb->z_os)) return (SET_ERROR(ENOTSUP)); obj = zfs_userquota_prop_to_obj(zsb, type); if (obj == ZFS_NO_OBJECT) return (0); if (type == ZFS_PROP_USEROBJUSED || type == ZFS_PROP_GROUPOBJUSED) { strlcpy(buf, DMU_OBJACCT_PREFIX, DMU_OBJACCT_PREFIX_LEN); offset = DMU_OBJACCT_PREFIX_LEN; } err = id_to_fuidstr(zsb, domain, rid, buf + offset, B_FALSE); if (err) return (err); err = zap_lookup(zsb->z_os, obj, buf, 8, 1, valp); if (err == ENOENT) err = 0; return (err); }