static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt, const struct lu_buf *buf, loff_t pos, struct thandle *th) { struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t oid; ENTRY; oh = container_of0(th, struct osd_thandle, ot_super); /* in some cases declare can race with creation (e.g. llog) * and we need to wait till object is initialized. notice * LOHA_EXISTs is supposed to be the last step in the * initialization */ /* declare possible size change. notice we can't check * current size here as another thread can change it */ if (dt_object_exists(dt)) { LASSERT(obj->oo_db); oid = obj->oo_db->db_object; dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0); } else { oid = DMU_NEW_OBJECT; dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE); } /* XXX: we still miss for append declaration support in ZFS * -1 means append which is used by llog mostly, llog * can grow upto LLOG_MIN_CHUNK_SIZE*8 records */ if (pos == -1) pos = max_t(loff_t, 256 * 8 * LLOG_MIN_CHUNK_SIZE, obj->oo_attr.la_size + (2 << 20)); dmu_tx_hold_write(oh->ot_tx, oid, pos, buf->lb_len); /* dt_declare_write() is usually called for system objects, such * as llog or last_rcvd files. We needn't enforce quota on those * objects, so always set the lqi_space as 0. */ RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid, obj->oo_attr.la_gid, 0, oh, true, NULL, false)); }
int zfs_sa_set_xattr(znode_t *zp) { zfs_sb_t *zsb = ZTOZSB(zp); dmu_tx_t *tx; char *obj; size_t size; int error; ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock)); ASSERT(zp->z_xattr_cached); ASSERT(zp->z_is_sa); error = nvlist_size(zp->z_xattr_cached, &size, NV_ENCODE_XDR); if (error) goto out; obj = zio_buf_alloc(size); error = nvlist_pack(zp->z_xattr_cached, &obj, &size, NV_ENCODE_XDR, KM_SLEEP); if (error) goto out_free; tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_sa_create(tx, size); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); } else { error = sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zsb), obj, size, tx); if (error) dmu_tx_abort(tx); else dmu_tx_commit(tx); } out_free: zio_buf_free(obj, size); out: return (error); }
int zfs_sa_set_xattr(znode_t *zp) { zfsvfs_t *zfsvfs = ZTOZSB(zp); dmu_tx_t *tx; char *obj; size_t size; int error; ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock)); ASSERT(zp->z_xattr_cached); ASSERT(zp->z_is_sa); error = nvlist_size(zp->z_xattr_cached, &size, NV_ENCODE_XDR); if ((error == 0) && (size > SA_ATTR_MAX_LEN)) error = EFBIG; if (error) goto out; obj = vmem_alloc(size, KM_SLEEP); error = nvlist_pack(zp->z_xattr_cached, &obj, &size, NV_ENCODE_XDR, KM_SLEEP); if (error) goto out_free; tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, size); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); } else { VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_DXATTR(zfsvfs), obj, size, tx)); dmu_tx_commit(tx); } out_free: vmem_free(obj, size); out: return (error); }
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt, const loff_t size, loff_t pos, struct thandle *th) { struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t oid; ENTRY; oh = container_of0(th, struct osd_thandle, ot_super); /* in some cases declare can race with creation (e.g. llog) * and we need to wait till object is initialized. notice * LOHA_EXISTs is supposed to be the last step in the * initialization */ /* declare possible size change. notice we can't check * current size here as another thread can change it */ if (dt_object_exists(dt)) { LASSERT(obj->oo_db); oid = obj->oo_db->db_object; dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0); } else { oid = DMU_NEW_OBJECT; dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE); } dmu_tx_hold_write(oh->ot_tx, oid, pos, size); /* dt_declare_write() is usually called for system objects, such * as llog or last_rcvd files. We needn't enforce quota on those * objects, so always set the lqi_space as 0. */ RETURN(osd_declare_quota(env, osd, obj->oo_attr.la_uid, obj->oo_attr.la_gid, 0, oh, true, NULL, false)); }
/* * Link zp into dl. Can only fail if zp has been unlinked. */ int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; uint64_t parent; *xvpp = NULL; if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))) return (error); if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (EDQUOT); } top: tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); #ifdef HAVE_ZPL if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); #endif /* HAVE_ZPL */ #ifdef DEBUG error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent)); ASSERT(error == 0 && parent == zp->z_id); #endif VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, sizeof (xzp->z_id), tx)); (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); *xvpp = ZTOV(xzp); return (0); }
void __osd_xattr_declare_set(const struct lu_env *env, struct osd_object *obj, int vallen, const char *name, struct osd_thandle *oh) { struct osd_device *osd = osd_obj2dev(obj); dmu_buf_t *db = obj->oo_db; dmu_tx_t *tx = oh->ot_tx; uint64_t xa_data_obj; int rc = 0; int here; if (unlikely(obj->oo_destroyed)) return; here = dt_object_exists(&obj->oo_dt); /* object may be not yet created */ if (here) { LASSERT(db); LASSERT(obj->oo_sa_hdl); /* we might just update SA_ZPL_DXATTR */ dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1); if (obj->oo_xattr == ZFS_NO_OBJECT) rc = -ENOENT; } if (!here || rc == -ENOENT) { /* we'll be updating SA_ZPL_XATTR */ if (here) { LASSERT(obj->oo_sa_hdl); dmu_tx_hold_sa(tx, obj->oo_sa_hdl, 1); } /* xattr zap + entry */ dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, TRUE, (char *) name); /* xattr value obj */ dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen); return; } rc = -zap_lookup(osd->od_os, obj->oo_xattr, name, sizeof(uint64_t), 1, &xa_data_obj); if (rc == 0) { /* * Entry already exists. * We'll truncate the existing object. */ dmu_tx_hold_bonus(tx, xa_data_obj); dmu_tx_hold_free(tx, xa_data_obj, vallen, DMU_OBJECT_END); dmu_tx_hold_write(tx, xa_data_obj, 0, vallen); return; } else if (rc == -ENOENT) { /* * Entry doesn't exist, we need to create a new one and a new * object to store the value. */ dmu_tx_hold_bonus(tx, obj->oo_xattr); dmu_tx_hold_zap(tx, obj->oo_xattr, TRUE, (char *) name); dmu_tx_hold_sa_create(tx, ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, vallen); return; } /* An error happened */ tx->tx_err = -rc; }
static int osd_declare_object_create(const struct lu_env *env, struct dt_object *dt, struct lu_attr *attr, struct dt_allocation_hint *hint, struct dt_object_format *dof, struct thandle *handle) { char *buf = osd_oti_get(env)->oti_str; const struct lu_fid *fid = lu_object_fid(&dt->do_lu); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t zapid; int rc; ENTRY; LASSERT(dof); switch (dof->dof_type) { case DFT_REGULAR: case DFT_SYM: case DFT_NODE: if (obj->oo_dt.do_body_ops == NULL) obj->oo_dt.do_body_ops = &osd_body_ops; break; default: break; } LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_tx != NULL); switch (dof->dof_type) { case DFT_DIR: case DFT_INDEX: /* for zap create */ dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, 1, NULL); break; case DFT_REGULAR: case DFT_SYM: case DFT_NODE: /* first, we'll create new object */ dmu_tx_hold_bonus(oh->ot_tx, DMU_NEW_OBJECT); break; default: LBUG(); break; } /* and we'll add it to some mapping */ zapid = osd_get_name_n_idx(env, osd, fid, buf); dmu_tx_hold_bonus(oh->ot_tx, zapid); dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, buf); /* we will also update inode accounting ZAPs */ dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf); dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf); dmu_tx_hold_sa_create(oh->ot_tx, ZFS_SA_BASE_ATTR_SIZE); __osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs), XATTR_NAME_LMA, oh); rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh, false, NULL, false); RETURN(rc); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, struct inode **xipp, cred_t *cr) { zfs_sb_t *zsb = ZTOZSB(zp); znode_t *xzp; dmu_tx_t *tx; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; #ifdef DEBUG uint64_t parent; #endif *xipp = NULL; if ((error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))) return (error); if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zsb, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (SET_ERROR(EDQUOT)); } tx = dmu_tx_create(zsb->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zsb->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zsb, tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); if (fuid_dirtied) zfs_fuid_sync(zsb, tx); #ifdef DEBUG error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zsb), &parent, sizeof (parent)); ASSERT(error == 0 && parent == zp->z_id); #endif VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zsb), &xzp->z_id, sizeof (xzp->z_id), tx)); if (!zp->z_unlinked) (void) zfs_log_create(zsb->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); *xipp = ZTOI(xzp); return (0); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; uint64_t xoid; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; *xvpp = NULL; #ifndef __APPLE__ /* In Mac OS X access preflighting is done above the file system. */ if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, cr)) return (error); #endif /*!__APPLE__*/ if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (EDQUOT); } top: tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_NOWAIT); if (error) { if (error == ERESTART) { dmu_tx_wait(tx); dmu_tx_abort(tx); goto top; } zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, sizeof (xzp->z_id), tx)); (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); /* Cleanup any znode we consumed during zfs_mknode() */ printf("ZFS_POSTPROCESS_ZP(xzp);\n"); printf("zfs_dir attach 2\n"); zfs_attach_vnode(xzp); *xvpp = ZTOV(xzp); return (0); }
int zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; znode_t *xzp; dmu_tx_t *tx; int error; zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; uint64_t parent; *xvpp = NULL; /* * In FreeBSD, access checking for creating an EA is being done * in zfs_setextattr(), */ #ifndef __FreeBSD_kernel__ if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr)) return (error); #endif if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, &acl_ids)) != 0) return (error); if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { zfs_acl_ids_free(&acl_ids); return (SET_ERROR(EDQUOT)); } getnewvnode_reserve(1); tx = dmu_tx_create(zfsvfs->z_os); dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + ZFS_SA_BASE_ATTR_SIZE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); fuid_dirtied = zfsvfs->z_fuid_dirty; if (fuid_dirtied) zfs_fuid_txhold(zfsvfs, tx); error = dmu_tx_assign(tx, TXG_WAIT); if (error) { zfs_acl_ids_free(&acl_ids); dmu_tx_abort(tx); return (error); } zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids); if (fuid_dirtied) zfs_fuid_sync(zfsvfs, tx); #ifdef DEBUG error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent)); ASSERT(error == 0 && parent == zp->z_id); #endif VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, sizeof (xzp->z_id), tx)); (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, xzp, "", NULL, acl_ids.z_fuidp, vap); zfs_acl_ids_free(&acl_ids); dmu_tx_commit(tx); getnewvnode_drop_reserve(); *xvpp = ZTOV(xzp); return (0); }
static int osd_declare_object_create(const struct lu_env *env, struct dt_object *dt, struct lu_attr *attr, struct dt_allocation_hint *hint, struct dt_object_format *dof, struct thandle *handle) { const struct lu_fid *fid = lu_object_fid(&dt->do_lu); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t zapid; int rc, dnode_size; ENTRY; LASSERT(dof); switch (dof->dof_type) { case DFT_REGULAR: case DFT_SYM: case DFT_NODE: if (obj->oo_dt.do_body_ops == NULL) obj->oo_dt.do_body_ops = &osd_body_ops; break; default: break; } LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_tx != NULL); /* this is the minimum set of EAs on every Lustre object */ obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE + sizeof(__u64) + /* VBR VERSION */ sizeof(struct lustre_mdt_attrs); /* LMA */ /* reserve 32 bytes for extra stuff like ACLs */ dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32); switch (dof->dof_type) { case DFT_DIR: dt->do_index_ops = &osd_dir_ops; case DFT_INDEX: /* for zap create */ dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL); dmu_tx_hold_sa_create(oh->ot_tx, dnode_size); break; case DFT_REGULAR: case DFT_SYM: case DFT_NODE: /* first, we'll create new object */ dmu_tx_hold_sa_create(oh->ot_tx, dnode_size); break; default: LBUG(); break; } /* and we'll add it to some mapping */ zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0); dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, NULL); /* we will also update inode accounting ZAPs */ dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, NULL); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, NULL); rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh, false, NULL, false); RETURN(rc); }