int zfs_set_userquota(zfs_sb_t *zsb, zfs_userquota_prop_t type, const char *domain, uint64_t rid, uint64_t quota) { char buf[32]; int err; dmu_tx_t *tx; uint64_t *objp; boolean_t fuid_dirtied; if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA) return (SET_ERROR(EINVAL)); if (zsb->z_version < ZPL_VERSION_USERSPACE) return (SET_ERROR(ENOTSUP)); objp = (type == ZFS_PROP_USERQUOTA) ? &zsb->z_userquota_obj : &zsb->z_groupquota_obj; err = id_to_fuidstr(zsb, domain, rid, buf, B_TRUE); if (err) return (err); fuid_dirtied = zsb->z_fuid_dirty; tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_zap(tx, *objp ? *objp : DMU_NEW_OBJECT, B_TRUE, NULL); if (*objp == 0) { dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE, zfs_userquota_prop_prefixes[type]); } if (fuid_dirtied) zfs_fuid_txhold(zsb, tx); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } mutex_enter(&zsb->z_lock); if (*objp == 0) { *objp = zap_create(zsb->z_os, DMU_OT_USERGROUP_QUOTA, DMU_OT_NONE, 0, tx); VERIFY(0 == zap_add(zsb->z_os, MASTER_NODE_OBJ, zfs_userquota_prop_prefixes[type], 8, 1, objp, tx)); } mutex_exit(&zsb->z_lock); if (quota == 0) { err = zap_remove(zsb->z_os, *objp, buf, tx); if (err == ENOENT) err = 0; } else { err = zap_update(zsb->z_os, *objp, buf, 8, 1, "a, tx); } ASSERT(err == 0); if (fuid_dirtied) zfs_fuid_sync(zsb, tx); dmu_tx_commit(tx); return (err); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } /* ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); */ tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); //VN_RELE(ZTOV(xzp)); // async VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os))); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); //VN_RELE(ZTOV(xzp)); // async VN_RELE_ASYNC(ZTOV(xzp), dsl_pool_vnrele_taskq(dmu_objset_pool(zfsvfs->z_os))); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_fuid_info_t *fuidp = NULL; *xvpp = NULL; /* * In FreeBSD, access checking for creating an EA is being done * in zfs_setextattr(), */ #ifndef __FreeBSD__ if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)) return (error); #endif tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) { if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, FUID_SIZE_ESTIMATE(zfsvfs)); } } error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) dmu_tx_wait(tx); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, 0, NULL, &fuidp); ASSERT(xzp->z_phys->zp_parent == zp->z_id); dmu_buf_will_dirty(zp->z_dbuf, tx); zp->z_phys->zp_xattr = xzp->z_id; (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, fuidp, vap); if (fuidp) zfs_fuid_info_free(fuidp); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } ASSERT(S_ISREG(ZTOI(xzp)->i_mode) || S_ISLNK(ZTOI(xzp)->i_mode)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); zfs_iput_async(ZTOI(xzp)); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); zfs_iput_async(ZTOI(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } vn_lock(ZTOV(xzp), LK_EXCLUSIVE | LK_RETRY); ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); /* Is this really needed ? */ zfs_sa_upgrade_txholds(tx, xzp); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); vput(ZTOV(xzp)); skipped += 1; continue; } error = zfs_link_destroy(dzp, zap.za_name, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); vput(ZTOV(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
int zfs_set_version(zfs_sb_t *zsb, uint64_t newvers) { int error; objset_t *os = zsb->z_os; dmu_tx_t *tx; if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION) return (SET_ERROR(EINVAL)); if (newvers < zsb->z_version) return (SET_ERROR(EINVAL)); if (zfs_spa_version_map(newvers) > spa_version(dmu_objset_spa(zsb->z_os))) return (SET_ERROR(ENOTSUP)); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR); if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) { dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_TRUE, ZFS_SA_ATTRS); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); } error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (error); } error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &newvers, tx); if (error) { dmu_tx_commit(tx); return (error); } if (newvers >= ZPL_VERSION_SA && !zsb->z_use_sa) { uint64_t sa_obj; ASSERT3U(spa_version(dmu_objset_spa(zsb->z_os)), >=, SPA_VERSION_SA); sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, DMU_OT_NONE, 0, tx); error = zap_add(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); ASSERT0(error); VERIFY(0 == sa_set_sa_object(os, sa_obj)); sa_register_update_callback(os, zfs_sa_upgrade); }
static int osd_declare_object_destroy(const struct lu_env *env, struct dt_object *dt, struct thandle *th) { char *buf = osd_oti_get(env)->oti_str; const struct lu_fid *fid = lu_object_fid(&dt->do_lu); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; int rc; uint64_t zapid; ENTRY; LASSERT(th != NULL); LASSERT(dt_object_exists(dt)); oh = container_of0(th, struct osd_thandle, ot_super); LASSERT(oh->ot_tx != NULL); /* declare that we'll remove object from fid-dnode mapping */ zapid = osd_get_name_n_idx(env, osd, fid, buf); dmu_tx_hold_bonus(oh->ot_tx, zapid); dmu_tx_hold_zap(oh->ot_tx, zapid, FALSE, buf); osd_declare_xattrs_destroy(env, obj, oh); /* declare that we'll remove object from inode accounting ZAPs */ dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, buf); dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, buf); /* one less inode */ rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid, obj->oo_attr.la_gid, -1, oh, false, NULL, false); if (rc) RETURN(rc); /* data to be truncated */ rc = osd_declare_quota(env, osd, obj->oo_attr.la_uid, obj->oo_attr.la_gid, 0, oh, true, NULL, false); if (rc) RETURN(rc); osd_object_set_destroy_type(obj); if (obj->oo_destroy == OSD_DESTROY_SYNC) dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, 0, DMU_OBJECT_END); else dmu_tx_hold_zap(oh->ot_tx, osd->od_unlinkedid, TRUE, NULL); RETURN(0); }
/* * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays * in the unlinked set. * * NOTE: this function assumes that the directory is inactive, * so there is no need to lock its entries before deletion. * Also, it assumes the directory contents is *only* regular * files. */ static int zfs_purgedir(znode_t *dzp) { zap_cursor_t zc; zap_attribute_t zap; znode_t *xzp; dmu_tx_t *tx; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfs_dirlock_t dl; int skipped = 0; int error; for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id); (error = zap_cursor_retrieve(&zc, &zap)) == 0; zap_cursor_advance(&zc)) { error = zfs_zget(zfsvfs, ZFS_DIRENT_OBJ(zap.za_first_integer), &xzp); if (error) { skipped += 1; continue; } ASSERT((ZTOV(xzp)->v_type == VREG) || (ZTOV(xzp)->v_type == VLNK)); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, dzp->z_id); dmu_tx_hold_zap(tx, dzp->z_id, FALSE, zap.za_name); dmu_tx_hold_bonus(tx, xzp->z_id); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); VN_RELE(ZTOV(xzp)); skipped += 1; continue; } bzero(&dl, sizeof (dl)); dl.dl_dzp = dzp; dl.dl_name = zap.za_name; error = zfs_link_destroy(&dl, xzp, tx, 0, NULL); if (error) skipped += 1; dmu_tx_commit(tx); VN_RELE(ZTOV(xzp)); } zap_cursor_fini(&zc); if (error != ENOENT) skipped += 1; return (skipped); }
static void __osd_xattr_declare_del(const struct lu_env *env, struct osd_object *obj, const char *name, struct osd_thandle *oh) { struct osd_device *osd = osd_obj2dev(obj); dmu_tx_t *tx = oh->ot_tx; uint64_t xa_data_obj; int rc; /* update SA_ZPL_DXATTR if xattr was in SA */ dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 0); if (obj->oo_xattr == ZFS_NO_OBJECT) return; rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, 8, 1, &xa_data_obj); if (rc == 0) { /* * Entry exists. * We'll delete the existing object and ZAP entry. */ dmu_tx_hold_bonus(tx, xa_data_obj); dmu_tx_hold_free(tx, xa_data_obj, 0, DMU_OBJECT_END); dmu_tx_hold_zap(tx, obj->oo_xattr, FALSE, (char *) name); return; } else if (rc == -ENOENT) { /* * Entry doesn't exist, nothing to be changed. */ return; } /* An error happened */ tx->tx_err = -rc; }
/* * Ensure the zap is flushed then inform the VFS of the capacity change. */ static int zvol_update_volsize(uint64_t volsize, objset_t *os) { dmu_tx_t *tx; int error; uint64_t txg; ASSERT(MUTEX_HELD(&zvol_state_lock)); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); dmu_tx_mark_netfree(tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (SET_ERROR(error)); } txg = dmu_tx_get_txg(tx); error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); dmu_tx_commit(tx); txg_wait_synced(dmu_objset_pool(os), txg); if (error == 0) error = dmu_free_long_range(os, ZVOL_OBJ, volsize, DMU_OBJECT_END); return (error); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_fuid_info_t *fuidp = NULL; *xvpp = NULL; if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)) return (error); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, SPA_MAXBLOCKSIZE); } error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) dmu_tx_wait(tx); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, 0, NULL, &fuidp); ASSERT(xzp->z_phys->zp_parent == zp->z_id); dmu_buf_will_dirty(zp->z_dbuf, tx); zp->z_phys->zp_xattr = xzp->z_id; (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, fuidp, vap); if (fuidp) zfs_fuid_info_free(fuidp); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; *xvpp = NULL; if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)) return (error); if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (EDQUOT); } tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { zfs_acl_ids_free(&acl_ids); if (error == ERESTART) dmu_tx_wait(tx); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, 0, &acl_ids); if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); ASSERT(xzp->z_phys->zp_parent == zp->z_id); dmu_buf_will_dirty(zp->z_dbuf, tx); zp->z_phys->zp_xattr = xzp->z_id; (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
int zvol_set_volsize(zfs_cmd_t *zc) { zvol_state_t *zv; dev_t dev = zc->zc_dev; dmu_tx_t *tx; int error; dmu_object_info_t doi; mutex_enter(&zvol_state_lock); if ((zv = zvol_minor_lookup(zc->zc_name)) == NULL) { mutex_exit(&zvol_state_lock); return (ENXIO); } if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 || (error = zvol_check_volsize(zc, doi.doi_data_block_size)) != 0) { mutex_exit(&zvol_state_lock); return (error); } if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) { mutex_exit(&zvol_state_lock); return (EROFS); } tx = dmu_tx_create(zv->zv_objset); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); dmu_tx_hold_free(tx, ZVOL_OBJ, zc->zc_volsize, DMU_OBJECT_END); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); mutex_exit(&zvol_state_lock); return (error); } error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1, &zc->zc_volsize, tx); if (error == 0) { error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, zc->zc_volsize, DMU_OBJECT_END, tx); } dmu_tx_commit(tx); if (error == 0) { zv->zv_volsize = zc->zc_volsize; zvol_size_changed(zv, dev); } mutex_exit(&zvol_state_lock); return (error); }
/* * zfs_init_fs - Initialize the zfsvfs struct and the file system * incore "master" object. Verify version compatibility. */ int zfs_init_fs(zfsvfs_t *zfsvfs, znode_t **zpp, cred_t *cr) { extern int zfsfstype; objset_t *os = zfsvfs->z_os; int i, error; dmu_object_info_t doi; uint64_t fsid_guid; uint64_t zval; *zpp = NULL; /* * XXX - hack to auto-create the pool root filesystem at * the first attempted mount. */ if (dmu_object_info(os, MASTER_NODE_OBJ, &doi) == ENOENT) { dmu_tx_t *tx = dmu_tx_create(os); uint64_t zpl_version; nvlist_t *zprops; dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL); /* master */ dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, NULL); /* del queue */ dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); /* root node */ error = dmu_tx_assign(tx, TXG_WAIT); ASSERT3U(error, ==, 0); if (spa_version(dmu_objset_spa(os)) >= SPA_VERSION_FUID) zpl_version = ZPL_VERSION; else zpl_version = ZPL_VERSION_FUID - 1; VERIFY(nvlist_alloc(&zprops, NV_UNIQUE_NAME, KM_SLEEP) == 0); VERIFY(nvlist_add_uint64(zprops, zfs_prop_to_name(ZFS_PROP_VERSION), zpl_version) == 0); zfs_create_fs(os, cr, zprops, tx); nvlist_free(zprops); dmu_tx_commit(tx); }
/* * Ensure the zap is flushed then inform the VFS of the capacity change. */ static int zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os) { struct block_device *bdev; dmu_tx_t *tx; int error; ASSERT(MUTEX_HELD(&zvol_state_lock)); tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (error); } error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); dmu_tx_commit(tx); if (error) return (error); error = dmu_free_long_range(os, ZVOL_OBJ, volsize, DMU_OBJECT_END); if (error) return (error); bdev = bdget_disk(zv->zv_disk, 0); if (!bdev) return (EIO); /* * 2.6.28 API change * Added check_disk_size_change() helper function. */ #ifdef HAVE_CHECK_DISK_SIZE_CHANGE set_capacity(zv->zv_disk, volsize >> 9); zv->zv_volsize = volsize; check_disk_size_change(zv->zv_disk, bdev); #else zv->zv_volsize = volsize; zv->zv_changed = 1; (void) check_disk_change(bdev); #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */ bdput(bdev); return (0); }
void zfs_fuid_txhold(zfsvfs_t *zfsvfs, dmu_tx_t *tx) { if (zfsvfs->z_fuid_obj == 0) { dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, FUID_SIZE_ESTIMATE(zfsvfs)); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); } else { dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, FUID_SIZE_ESTIMATE(zfsvfs)); } }
int zfs_set_version(const char *name, uint64_t newvers) { int error; objset_t *os; dmu_tx_t *tx; uint64_t curvers; /* * XXX for now, require that the filesystem be unmounted. Would * be nice to find the zfsvfs_t and just update that if * possible. */ if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION) return (EINVAL); error = dmu_objset_open(name, DMU_OST_ZFS, DS_MODE_PRIMARY, &os); if (error) return (error); error = zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &curvers); if (error) goto out; if (newvers < curvers) { error = EINVAL; goto out; } tx = dmu_tx_create(os); dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 0, ZPL_VERSION_STR); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); goto out; } error = zap_update(os, MASTER_NODE_OBJ, ZPL_VERSION_STR, 8, 1, &newvers, tx); spa_history_internal_log(LOG_DS_UPGRADE, dmu_objset_spa(os), tx, CRED(), "oldver=%llu newver=%llu dataset = %llu", curvers, newvers, dmu_objset_id(os)); dmu_tx_commit(tx); out: dmu_objset_close(os); return (error); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; uint64_t xoid; int error; *xvpp = NULL; #ifndef __APPLE__ /* In Mac OS X access preflighting is done above the file system. */ if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, cr)) return (error); #endif /*!__APPLE__*/ tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) dmu_tx_wait(tx); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, &xoid, tx, cr, IS_XATTR, &xzp, 0); ASSERT(xzp->z_id == xoid); ASSERT(xzp->z_phys->zp_parent == zp->z_id); dmu_buf_will_dirty(zp->z_dbuf, tx); zp->z_phys->zp_xattr = xoid; (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, ""); dmu_tx_commit(tx); #ifdef __APPLE__ /* * Obtain and attach the vnode after committing the transaction */ zfs_attach_vnode(xzp); #endif *xvpp = ZTOV(xzp); return (0); }
static int osd_declare_index_delete(const struct lu_env *env, struct dt_object *dt, const struct dt_key *key, struct thandle *th) { struct osd_object *obj = osd_dt_obj(dt); struct osd_thandle *oh; ENTRY; LASSERT(dt_object_exists(dt)); LASSERT(osd_invariant(obj)); LASSERT(th != NULL); LASSERT(obj->oo_db); oh = container_of0(th, struct osd_thandle, ot_super); dmu_tx_hold_zap(oh->ot_tx, obj->oo_db->db_object, TRUE, NULL); RETURN(0); }
/* * Ensure the zap is flushed then inform the VFS of the capacity change. */ static int zvol_update_volsize(zvol_state_t *zv, uint64_t volsize) { struct block_device *bdev; dmu_tx_t *tx; int error; ASSERT(MUTEX_HELD(&zvol_state_lock)); tx = dmu_tx_create(zv->zv_objset); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); return (error); } error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); dmu_tx_commit(tx); if (error) return (error); error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, volsize, DMU_OBJECT_END); if (error) return (error); zv->zv_volsize = volsize; zv->zv_changed = 1; bdev = bdget_disk(zv->zv_disk, 0); if (!bdev) return EIO; error = check_disk_change(bdev); ASSERT3U(error, !=, 0); bdput(bdev); return (0); }
static int osd_declare_dir_insert(const struct lu_env *env, struct dt_object *dt, const struct dt_rec *rec, const struct dt_key *key, struct thandle *th) { struct osd_object *obj = osd_dt_obj(dt); struct osd_thandle *oh; ENTRY; LASSERT(th != NULL); oh = container_of0(th, struct osd_thandle, ot_super); LASSERT(obj->oo_db); LASSERT(udmu_object_is_zap(obj->oo_db)); dmu_tx_hold_bonus(oh->ot_tx, obj->oo_db->db_object); dmu_tx_hold_zap(oh->ot_tx, obj->oo_db->db_object, TRUE, (char *)key); RETURN(0); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; uint64_t xoid; int error; *xvpp = NULL; if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, cr)) return (error); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_bonus(tx, zp->z_id); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); error = dmu_tx_assign(tx, zfsvfs->z_assign); if (error) { if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) dmu_tx_wait(tx); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, &xoid, tx, cr, IS_XATTR, &xzp, 0); ASSERT(xzp->z_id == xoid); ASSERT(xzp->z_phys->zp_parent == zp->z_id); dmu_buf_will_dirty(zp->z_dbuf, tx); zp->z_phys->zp_xattr = xoid; (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, ""); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
static int osd_declare_index_insert(const struct lu_env *env, struct dt_object *dt, const struct dt_rec *rec, const struct dt_key *key, struct thandle *th) { struct osd_object *obj = osd_dt_obj(dt); struct osd_thandle *oh; ENTRY; LASSERT(th != NULL); oh = container_of0(th, struct osd_thandle, ot_super); LASSERT(obj->oo_db); dmu_tx_hold_bonus(oh->ot_tx, obj->oo_db->db_object); /* It is not clear what API should be used for binary keys, so we pass * a null name which has the side effect of over-reserving space, * accounting for the worst case. See zap_count_write() */ dmu_tx_hold_zap(oh->ot_tx, obj->oo_db->db_object, TRUE, NULL); RETURN(0); }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; uint64_t parent; *xvpp = NULL; if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))) return (error); if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (EDQUOT); } top: tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); #ifdef HAVE_ZPL if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); #endif /* HAVE_ZPL */ #ifdef DEBUG error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent)); ASSERT(error == 0 && parent == zp->z_id); #endif VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, sizeof (xzp->z_id), tx)); (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj, int vallen, const char *name, struct osd_thandle *oh) { struct osd_device *osd = osd_obj2dev(obj); dmu_buf_t *db = obj->oo_db; dmu_tx_t *tx = oh->ot_tx; uint64_t xa_data_obj; int rc = 0; int here; if (unlikely(obj->oo_destroyed)) return; here = dt_object_exists(&obj->oo_dt); /* object may be not yet created */ if (here) { LASSERT(db); LASSERT(obj->oo_sa_hdl); /* we might just update SA_ZPL_DXATTR */ dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1); if (obj->oo_xattr == ZFS_NO_OBJECT) rc = -ENOENT; } if (!here || rc == -ENOENT) { /* we'll be updating SA_ZPL_XATTR */ if (here) { LASSERT(obj->oo_sa_hdl); dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1); } /* xattr zap + entry */ dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, (char *) name); /* xattr value obj */ dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen); return; } rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, sizeof(uint64_t), 1, &xa_data_obj); if (rc == 0) { /* * Entry already exists. * We'll truncate the existing object. */ dmu_tx_hold_bonus(tx, xa_data_obj); dmu_tx_hold_free(tx, xa_data_obj, vallen, DMU_OBJECT_END); dmu_tx_hold_write(tx, xa_data_obj, 0, vallen); return; } else if (rc == -ENOENT) { /* * Entry doesn't exist, we need to create a new one and a new * object to store the value. */ dmu_tx_hold_bonus(tx, obj->oo_xattr); dmu_tx_hold_zap(tx, obj->oo_xattr, TRUE, (char *) name); dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen); return; } /* An error happened */ tx->tx_err = -rc; }
void zfs_rmnode(znode_t *zp) { zfsvfs_t *zfsvfs = ZTOZSB(zp); objset_t *os = zfsvfs->z_os; znode_t *xzp = NULL; dmu_tx_t *tx; uint64_t acl_obj; uint64_t xattr_obj; uint64_t links; int error; ASSERT(ZTOI(zp)->i_nlink == 0); ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0); /* * If this is an attribute directory, purge its contents. */ if (S_ISDIR(ZTOI(zp)->i_mode) && (zp->z_pflags & ZFS_XATTR)) { if (zfs_purgedir(zp) != 0) { /* * Not enough space to delete some xattrs. * Leave it in the unlinked set. */ zfs_znode_dmu_fini(zp); return; } } /* * Free up all the data in the file. We don't do this for directories * because we need truncate and remove to be in the same tx, like in * zfs_znode_delete(). Otherwise, if we crash here we'll end up with * an inconsistent truncated zap object in the delete queue. Note a * truncated file is harmless since it only contains user data. */ if (S_ISREG(ZTOI(zp)->i_mode)) { error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END); if (error) { /* * Not enough space or we were interrupted by unmount. * Leave the file in the unlinked set. */ zfs_znode_dmu_fini(zp); return; } } /* * If the file has extended attributes, we're going to unlink * the xattr dir. */ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, sizeof (xattr_obj)); if (error == 0 && xattr_obj) { error = zfs_zget(zfsvfs, xattr_obj, &xzp); ASSERT(error == 0); } acl_obj = zfs_external_acl(zp); /* * Set up the final transaction. */ tx = dmu_tx_create(os); dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); if (xzp) { dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); } if (acl_obj) dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { /* * Not enough space to delete the file. Leave it in the * unlinked set, leaking it until the fs is remounted (at * which point we'll call zfs_unlinked_drain() to process it). */ dmu_tx_abort(tx); zfs_znode_dmu_fini(zp); goto out; } if (xzp) { ASSERT(error == 0); mutex_enter(&xzp->z_lock); xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */ clear_nlink(ZTOI(xzp)); /* no more links to it */ links = 0; VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &links, sizeof (links), tx)); mutex_exit(&xzp->z_lock); zfs_unlinked_add(xzp, tx); } /* Remove this znode from the unlinked set */ VERIFY3U(0, ==, zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); zfs_znode_delete(zp, tx); dmu_tx_commit(tx); out: if (xzp) zfs_iput_async(ZTOI(xzp)); }
static int osd_declare_attr_set(const struct lu_env *env, struct dt_object *dt, const struct lu_attr *attr, struct thandle *handle) { struct osd_thread_info *info = osd_oti_get(env); char *buf = osd_oti_get(env)->oti_str; struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t bspace; uint32_t blksize; int rc; ENTRY; if (!dt_object_exists(dt)) { /* XXX: sanity check that object creation is declared */ RETURN(0); } LASSERT(handle != NULL); LASSERT(osd_invariant(obj)); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(obj->oo_sa_hdl != NULL); dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0); sa_object_size(obj->oo_sa_hdl, &blksize, &bspace); bspace = toqb(bspace * blksize); if (attr && attr->la_valid & LA_UID) { /* account for user inode tracking ZAP update */ dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf); /* quota enforcement for user */ if (attr->la_uid != obj->oo_attr.la_uid) { rc = qsd_transfer(env, osd->od_quota_slave, &oh->ot_quota_trans, USRQUOTA, obj->oo_attr.la_uid, attr->la_uid, bspace, &info->oti_qi); if (rc) RETURN(rc); } } if (attr && attr->la_valid & LA_GID) { /* account for user inode tracking ZAP update */ dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf); /* quota enforcement for group */ if (attr->la_gid != obj->oo_attr.la_gid) { rc = qsd_transfer(env, osd->od_quota_slave, &oh->ot_quota_trans, GRPQUOTA, obj->oo_attr.la_gid, attr->la_gid, bspace, &info->oti_qi); if (rc) RETURN(rc); } } RETURN(0); }
void zfs_rmnode(znode_t *zp) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; objset_t *os = zfsvfs->z_os; znode_t *xzp = NULL; dmu_tx_t *tx; uint64_t acl_obj; uint64_t xattr_obj; int error; ASSERT(zp->z_links == 0); ASSERT(ZTOV(zp)->v_count == 0); /* * If this is an attribute directory, purge its contents. */ if (ZTOV(zp)->v_type == VDIR && (zp->z_pflags & ZFS_XATTR)) { if (zfs_purgedir(zp) != 0) { /* * Not enough space to delete some xattrs. * Leave it in the unlinked set. */ zfs_znode_dmu_fini(zp); zfs_znode_free(zp); return; } } /* * Free up all the data in the file. */ error = dmu_free_long_range(os, zp->z_id, 0, DMU_OBJECT_END); if (error) { /* * Not enough space. Leave the file in the unlinked set. */ zfs_znode_dmu_fini(zp); zfs_znode_free(zp); return; } /* * If the file has extended attributes, we're going to unlink * the xattr dir. */ error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, sizeof (xattr_obj)); if (error == 0 && xattr_obj) { error = zfs_zget(zfsvfs, xattr_obj, &xzp); ASSERT(error == 0); } acl_obj = zfs_external_acl(zp); /* * Set up the final transaction. */ tx = dmu_tx_create(os); dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); if (xzp) { dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, TRUE, NULL); dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); } if (acl_obj) dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); zfs_sa_upgrade_txholds(tx, zp); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { /* * Not enough space to delete the file. Leave it in the * unlinked set, leaking it until the fs is remounted (at * which point we'll call zfs_unlinked_drain() to process it). */ dmu_tx_abort(tx); zfs_znode_dmu_fini(zp); zfs_znode_free(zp); goto out; } if (xzp) { ASSERT(error == 0); mutex_enter(&xzp->z_lock); xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */ xzp->z_links = 0; /* no more links to it */ VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &xzp->z_links, sizeof (xzp->z_links), tx)); mutex_exit(&xzp->z_lock); zfs_unlinked_add(xzp, tx); } /* Remove this znode from the unlinked set */ VERIFY3U(0, ==, zap_remove_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); zfs_znode_delete(zp, tx); dmu_tx_commit(tx); out: if (xzp) VN_RELE(ZTOV(xzp)); }
static int osd_declare_object_create(const struct lu_env *env, struct dt_object *dt, struct lu_attr *attr, struct dt_allocation_hint *hint, struct dt_object_format *dof, struct thandle *handle) { char *buf = osd_oti_get(env)->oti_str; const struct lu_fid *fid = lu_object_fid(&dt->do_lu); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t zapid; int rc; ENTRY; LASSERT(dof); switch (dof->dof_type) { case DFT_REGULAR: case DFT_SYM: case DFT_NODE: if (obj->oo_dt.do_body_ops == NULL) obj->oo_dt.do_body_ops = &osd_body_ops; break; default: break; } LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_tx != NULL); switch (dof->dof_type) { case DFT_DIR: case DFT_INDEX: /* for zap create */ dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, 1, NULL); break; case DFT_REGULAR: case DFT_SYM: case DFT_NODE: /* first, we'll create new object */ dmu_tx_hold_bonus(oh->ot_tx, DMU_NEW_OBJECT); break; default: LBUG(); break; } /* and we'll add it to some mapping */ zapid = osd_get_name_n_idx(env, osd, fid, buf); dmu_tx_hold_bonus(oh->ot_tx, zapid); dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, buf); /* we will also update inode accounting ZAPs */ dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf); dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf); dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE); __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs), XATTR_NAME_LMA, oh); rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh, false, NULL, false); RETURN(rc); }
void zfs_rmnode(znode_t *zp) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; objset_t *os = zfsvfs->z_os; znode_t *xzp = NULL; char obj_name[17]; dmu_tx_t *tx; uint64_t acl_obj; int error; ASSERT(ZTOV(zp)->v_count == 0); ASSERT(zp->z_phys->zp_links == 0); /* * If this is an attribute directory, purge its contents. */ if (ZTOV(zp)->v_type == VDIR && (zp->z_phys->zp_flags & ZFS_XATTR)) if (zfs_purgedir(zp) != 0) { zfs_delete_t *delq = &zfsvfs->z_delete_head; /* * Add this back to the delete list to be retried later. * * XXX - this could just busy loop on us... */ mutex_enter(&delq->z_mutex); list_insert_tail(&delq->z_znodes, zp); delq->z_znode_count++; mutex_exit(&delq->z_mutex); return; } /* * If the file has extended attributes, unlink the xattr dir. */ if (zp->z_phys->zp_xattr) { error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); ASSERT(error == 0); } acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj; /* * Set up the transaction. */ tx = dmu_tx_create(os); dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END); dmu_tx_hold_zap(tx, zfsvfs->z_dqueue, FALSE, NULL); if (xzp) { dmu_tx_hold_bonus(tx, xzp->z_id); dmu_tx_hold_zap(tx, zfsvfs->z_dqueue, TRUE, NULL); } if (acl_obj) dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { zfs_delete_t *delq = &zfsvfs->z_delete_head; dmu_tx_abort(tx); /* * Add this back to the delete list to be retried later. * * XXX - this could just busy loop on us... */ mutex_enter(&delq->z_mutex); list_insert_tail(&delq->z_znodes, zp); delq->z_znode_count++; mutex_exit(&delq->z_mutex); return; } if (xzp) { dmu_buf_will_dirty(xzp->z_dbuf, tx); mutex_enter(&xzp->z_lock); xzp->z_reap = 1; /* mark xzp for deletion */ xzp->z_phys->zp_links = 0; /* no more links to it */ mutex_exit(&xzp->z_lock); zfs_dq_add(xzp, tx); /* add xzp to delete queue */ } /* * Remove this znode from delete queue */ error = zap_remove(os, zfsvfs->z_dqueue, zfs_dq_hexname(obj_name, zp->z_id), tx); ASSERT3U(error, ==, 0); zfs_znode_delete(zp, tx); dmu_tx_commit(tx); if (xzp) VN_RELE(ZTOV(xzp)); }