int __osd_attr_init(const struct lu_env *env, struct osd_device *osd, sa_handle_t *sa_hdl, dmu_tx_t *tx, struct lu_attr *la, uint64_t parent) { sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk; struct osa_attr *osa = &osd_oti_get(env)->oti_osa; uint64_t gen; uint64_t crtime[2]; timestruc_t now; int cnt; int rc; LASSERT(sa_hdl); gen = dmu_tx_get_txg(tx); gethrestime(&now); ZFS_TIME_ENCODE(&now, crtime); osa->atime[0] = la->la_atime; osa->ctime[0] = la->la_ctime; osa->mtime[0] = la->la_mtime; osa->mode = la->la_mode; osa->uid = la->la_uid; osa->gid = la->la_gid; osa->rdev = la->la_rdev; osa->nlink = la->la_nlink; osa->flags = attrs_fs2zfs(la->la_flags); osa->size = la->la_size; /* * we need to create all SA below upon object create. * * XXX The attribute order matters since the accounting callback relies * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to * look up the UID/GID attributes. Moreover, the callback does not seem * to support the spill block. * We define attributes in the same order as SA_*_OFFSET in order to * work around the problem. See ORI-610. */ cnt = 0; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(osd), NULL, &gen, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(osd), NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(osd), NULL, crtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8); LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk)); rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx); return rc; }
/* * Update the embedded inode given the znode. We should work toward * eliminating this function as soon as possible by removing values * which are duplicated between the znode and inode. If the generic * inode has the correct field it should be used, and the ZFS code * updated to access the inode. This can be done incrementally. */ void zfs_inode_update(znode_t *zp) { zfs_sb_t *zsb; struct inode *ip; uint32_t blksize; uint64_t atime[2], mtime[2], ctime[2]; ASSERT(zp != NULL); zsb = ZTOZSB(zp); ip = ZTOI(zp); /* Skip .zfs control nodes which do not exist on disk. */ if (zfsctl_is_node(ip)) return; sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zsb), &atime, 16); sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zsb), &mtime, 16); sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zsb), &ctime, 16); spin_lock(&ip->i_lock); ip->i_generation = zp->z_gen; ip->i_uid = SUID_TO_KUID(zp->z_uid); ip->i_gid = SGID_TO_KGID(zp->z_gid); set_nlink(ip, zp->z_links); ip->i_mode = zp->z_mode; zfs_set_inode_flags(zp, ip); ip->i_blkbits = SPA_MINBLOCKSHIFT; dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, (u_longlong_t *)&ip->i_blocks); ZFS_TIME_DECODE(&ip->i_atime, atime); ZFS_TIME_DECODE(&ip->i_mtime, mtime); ZFS_TIME_DECODE(&ip->i_ctime, ctime); i_size_write(ip, zp->z_size); spin_unlock(&ip->i_lock); }
void zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) { dmu_buf_t *db = sa_get_db(hdl); znode_t *zp = sa_get_userdata(hdl); zfsvfs_t *zfsvfs = zp->z_zfsvfs; sa_bulk_attr_t bulk[20]; int count = 0; sa_bulk_attr_t sa_attrs[20] = { { 0 } }; zfs_acl_locator_cb_t locate = { 0 }; uint64_t uid, gid, mode, rdev, xattr, parent; uint64_t crtime[2], mtime[2], ctime[2]; zfs_acl_phys_t znode_acl; char scanstamp[AV_SCANSTAMP_SZ]; boolean_t drop_lock = B_FALSE; /* * No upgrade if ACL isn't cached * since we won't know which locks are held * and ready the ACL would require special "locked" * interfaces that would be messy */ if (zp->z_acl_cached == NULL || vnode_islnk(ZTOV(zp))) return; /* * If the z_lock is held and we aren't the owner * the just return since we don't want to deadlock * trying to update the status of z_is_sa. This * file can then be upgraded at a later time. * * Otherwise, we know we are doing the * sa_update() that caused us to enter this function. */ if (mutex_owner(&zp->z_lock) != curthread) { if (mutex_tryenter(&zp->z_lock) == 0) return; else drop_lock = B_TRUE; } /* First do a bulk query of the attributes that aren't cached */ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, &znode_acl, 88); if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) goto done; /* * While the order here doesn't matter its best to try and organize * it is such a way to pick up an already existing layout number */ count = 0; SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zfsvfs), NULL, &zp->z_size, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_UID(zfsvfs), NULL, &uid, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_GID(zfsvfs), NULL, &gid, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_ATIME(zfsvfs), NULL, zp->z_atime, 16); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, 8); if (vnode_isblk(zp->z_vnode) || vnode_islnk(zp->z_vnode)) SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_RDEV(zfsvfs), NULL, &rdev, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_COUNT(zfsvfs), NULL, &zp->z_acl_cached->z_acl_count, 8); if (zp->z_acl_cached->z_version < ZFS_ACL_VERSION_FUID) zfs_acl_xform(zp, zp->z_acl_cached, CRED()); locate.cb_aclp = zp->z_acl_cached; SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_DACL_ACES(zfsvfs), zfs_acl_data_locator, &locate, zp->z_acl_cached->z_acl_bytes); if (xattr) SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_XATTR(zfsvfs), NULL, &xattr, 8); /* if scanstamp then add scanstamp */ if (zp->z_pflags & ZFS_BONUS_SCANSTAMP) { bcopy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE, scanstamp, AV_SCANSTAMP_SZ); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SCANSTAMP(zfsvfs), NULL, scanstamp, AV_SCANSTAMP_SZ); zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP; } VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0); VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs, count, tx) == 0); if (znode_acl.z_acl_extern_obj) VERIFY(0 == dmu_object_free(zfsvfs->z_os, znode_acl.z_acl_extern_obj, tx)); zp->z_is_sa = B_TRUE; done: if (drop_lock) mutex_exit(&zp->z_lock); }
/* * Set the attributes of an object * * The transaction passed to this routine must have * dmu_tx_hold_bonus(tx, oid) called and then assigned * to a transaction group. */ static int osd_attr_set(const struct lu_env *env, struct dt_object *dt, const struct lu_attr *la, struct thandle *handle, struct lustre_capa *capa) { struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); udmu_objset_t *uos = &osd->od_objset; struct osd_thandle *oh; struct osa_attr *osa = &osd_oti_get(env)->oti_osa; sa_bulk_attr_t *bulk; int cnt; int rc = 0; ENTRY; LASSERT(handle != NULL); LASSERT(dt_object_exists(dt)); LASSERT(osd_invariant(obj)); LASSERT(obj->oo_sa_hdl); oh = container_of0(handle, struct osd_thandle, ot_super); /* Assert that the transaction has been assigned to a transaction group. */ LASSERT(oh->ot_tx->tx_txg != 0); if (la->la_valid == 0) RETURN(0); OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 10); if (bulk == NULL) RETURN(-ENOMEM); /* do both accounting updates outside oo_attr_lock below */ if ((la->la_valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) { /* Update user accounting. Failure isn't fatal, but we still * log an error message */ rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid, la->la_uid, 1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, la->la_uid, rc); rc = -zap_increment_int(osd->od_objset.os, osd->od_iusr_oid, obj->oo_attr.la_uid, -1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, obj->oo_attr.la_uid, rc); } if ((la->la_valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) { /* Update group accounting. Failure isn't fatal, but we still * log an error message */ rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid, la->la_gid, 1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, la->la_gid, rc); rc = -zap_increment_int(osd->od_objset.os, osd->od_igrp_oid, obj->oo_attr.la_gid, -1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, obj->oo_attr.la_gid, rc); } write_lock(&obj->oo_attr_lock); cnt = 0; if (la->la_valid & LA_ATIME) { osa->atime[0] = obj->oo_attr.la_atime = la->la_atime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL, osa->atime, 16); } if (la->la_valid & LA_MTIME) { osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL, osa->mtime, 16); } if (la->la_valid & LA_CTIME) { osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL, osa->ctime, 16); } if (la->la_valid & LA_MODE) { /* mode is stored along with type, so read it first */ obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) | (la->la_mode & ~S_IFMT); osa->mode = obj->oo_attr.la_mode; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL, &osa->mode, 8); } if (la->la_valid & LA_SIZE) { osa->size = obj->oo_attr.la_size = la->la_size; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL, &osa->size, 8); } if (la->la_valid & LA_NLINK) { osa->nlink = obj->oo_attr.la_nlink = la->la_nlink; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL, &osa->nlink, 8); } if (la->la_valid & LA_RDEV) { osa->rdev = obj->oo_attr.la_rdev = la->la_rdev; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(uos), NULL, &osa->rdev, 8); } if (la->la_valid & LA_FLAGS) { osa->flags = obj->oo_attr.la_flags = la->la_flags; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL, &osa->flags, 8); } if (la->la_valid & LA_UID) { osa->uid = obj->oo_attr.la_uid = la->la_uid; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL, &osa->uid, 8); } if (la->la_valid & LA_GID) { osa->gid = obj->oo_attr.la_gid = la->la_gid; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL, &osa->gid, 8); } obj->oo_attr.la_valid |= la->la_valid; write_unlock(&obj->oo_attr_lock); rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh); OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 10); RETURN(rc); }
/* * Retrieve the attributes of a DMU object */ int __osd_object_attr_get(const struct lu_env *env, udmu_objset_t *uos, struct osd_object *obj, struct lu_attr *la) { struct osa_attr *osa = &osd_oti_get(env)->oti_osa; sa_handle_t *sa_hdl; sa_bulk_attr_t *bulk; int cnt = 0; int rc; ENTRY; LASSERT(obj->oo_db != NULL); rc = -sa_handle_get(uos->os, obj->oo_db->db_object, NULL, SA_HDL_PRIVATE, &sa_hdl); if (rc) RETURN(rc); OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 9); if (bulk == NULL) GOTO(out_sa, rc = -ENOMEM); la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL, osa->atime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL, osa->mtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL, osa->ctime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL, &osa->mode, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL, &osa->size, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL, &osa->nlink, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL, &osa->uid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL, &osa->gid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL, &osa->flags, 8); rc = -sa_bulk_lookup(sa_hdl, bulk, cnt); if (rc) GOTO(out_bulk, rc); la->la_atime = osa->atime[0]; la->la_mtime = osa->mtime[0]; la->la_ctime = osa->ctime[0]; la->la_mode = osa->mode; la->la_uid = osa->uid; la->la_gid = osa->gid; la->la_nlink = osa->nlink; la->la_flags = osa->flags; la->la_size = osa->size; if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) { rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(uos), &osa->rdev, 8); if (rc) GOTO(out_bulk, rc); la->la_rdev = osa->rdev; la->la_valid |= LA_RDEV; } out_bulk: OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 9); out_sa: sa_handle_destroy(sa_hdl); RETURN(rc); }
int __osd_attr_init(const struct lu_env *env, udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx, struct lu_attr *la) { sa_bulk_attr_t *bulk; sa_handle_t *sa_hdl; struct osa_attr *osa = &osd_oti_get(env)->oti_osa; uint64_t gen; uint64_t parent; uint64_t crtime[2]; timestruc_t now; int cnt; int rc; gethrestime(&now); gen = dmu_tx_get_txg(tx); ZFS_TIME_ENCODE(&now, crtime); /* XXX: this should be real id of parent for ZPL access, but we have no * such info in OSD, probably it can be part of dt_object_format */ parent = 0; osa->atime[0] = la->la_atime; osa->ctime[0] = la->la_ctime; osa->mtime[0] = la->la_mtime; osa->mode = la->la_mode; osa->uid = la->la_uid; osa->gid = la->la_gid; osa->rdev = la->la_rdev; osa->nlink = la->la_nlink; osa->flags = la->la_flags; osa->size = la->la_size; /* Now add in all of the "SA" attributes */ rc = -sa_handle_get(uos->os, oid, NULL, SA_HDL_PRIVATE, &sa_hdl); if (rc) return rc; OBD_ALLOC(bulk, sizeof(sa_bulk_attr_t) * 13); if (bulk == NULL) { rc = -ENOMEM; goto out; } /* * we need to create all SA below upon object create. * * XXX The attribute order matters since the accounting callback relies * on static offsets (i.e. SA_*_OFFSET, see zfs_space_delta_cb()) to * look up the UID/GID attributes. Moreover, the callback does not seem * to support the spill block. * We define attributes in the same order as SA_*_OFFSET in order to * work around the problem. See ORI-610. */ cnt = 0; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(uos), NULL, &osa->mode, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(uos), NULL, &osa->size, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GEN(uos), NULL, &gen, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(uos), NULL, &osa->uid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(uos), NULL, &osa->gid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_PARENT(uos), NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(uos), NULL, &osa->flags, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(uos), NULL, osa->atime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(uos), NULL, osa->mtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(uos), NULL, osa->ctime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CRTIME(uos), NULL, crtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(uos), NULL, &osa->nlink, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(uos), NULL, &osa->rdev, 8); rc = -sa_replace_all_by_template(sa_hdl, bulk, cnt, tx); OBD_FREE(bulk, sizeof(sa_bulk_attr_t) * 13); out: sa_handle_destroy(sa_hdl); return rc; }
/* * Create a new DMU object to hold a zfs znode. * * IN: dzp - parent directory for new znode * vap - file attributes for new znode * tx - dmu transaction id for zap operations * cr - credentials of caller * flag - flags: * IS_ROOT_NODE - new object will be root * IS_XATTR - new object is an attribute * bonuslen - length of bonus buffer * setaclp - File/Dir initial ACL * fuidp - Tracks fuid allocation. * * OUT: zpp - allocated znode * */ void zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids) { uint64_t crtime[2], atime[2], mtime[2], ctime[2]; uint64_t mode, size, links, parent, pflags; uint64_t dzp_pflags = 0; uint64_t rdev = 0; zfs_sb_t *zsb = ZTOZSB(dzp); dmu_buf_t *db; timestruc_t now; uint64_t gen, obj; int bonuslen; sa_handle_t *sa_hdl; dmu_object_type_t obj_type; sa_bulk_attr_t *sa_attrs; int cnt = 0; zfs_acl_locator_cb_t locate = { 0 }; if (zsb->z_replay) { obj = vap->va_nodeid; now = vap->va_ctime; /* see zfs_replay_create() */ gen = vap->va_nblocks; /* ditto */ } else { obj = 0; gethrestime(&now); gen = dmu_tx_get_txg(tx); } obj_type = zsb->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE; bonuslen = (obj_type == DMU_OT_SA) ? DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE; /* * Create a new DMU object. */ /* * There's currently no mechanism for pre-reading the blocks that will * be needed to allocate a new object, so we accept the small chance * that there will be an i/o error and we will fail one of the * assertions below. */ if (S_ISDIR(vap->va_mode)) { if (zsb->z_replay) { VERIFY0(zap_create_claim_norm(zsb->z_os, obj, zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS, obj_type, bonuslen, tx)); } else { obj = zap_create_norm(zsb->z_os, zsb->z_norm, DMU_OT_DIRECTORY_CONTENTS, obj_type, bonuslen, tx); } } else { if (zsb->z_replay) { VERIFY0(dmu_object_claim(zsb->z_os, obj, DMU_OT_PLAIN_FILE_CONTENTS, 0, obj_type, bonuslen, tx)); } else { obj = dmu_object_alloc(zsb->z_os, DMU_OT_PLAIN_FILE_CONTENTS, 0, obj_type, bonuslen, tx); } } ZFS_OBJ_HOLD_ENTER(zsb, obj); VERIFY(0 == sa_buf_hold(zsb->z_os, obj, NULL, &db)); /* * If this is the root, fix up the half-initialized parent pointer * to reference the just-allocated physical data area. */ if (flag & IS_ROOT_NODE) { dzp->z_id = obj; } else { dzp_pflags = dzp->z_pflags; } /* * If parent is an xattr, so am I. */ if (dzp_pflags & ZFS_XATTR) { flag |= IS_XATTR; } if (zsb->z_use_fuids) pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED; else pflags = 0; if (S_ISDIR(vap->va_mode)) { size = 2; /* contents ("." and "..") */ links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1; } else { size = links = 0; } if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode)) rdev = vap->va_rdev; parent = dzp->z_id; mode = acl_ids->z_mode; if (flag & IS_XATTR) pflags |= ZFS_XATTR; /* * No execs denied will be deterimed when zfs_mode_compute() is called. */ pflags |= acl_ids->z_aclp->z_hints & (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT| ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED); ZFS_TIME_ENCODE(&now, crtime); ZFS_TIME_ENCODE(&now, ctime); if (vap->va_mask & ATTR_ATIME) { ZFS_TIME_ENCODE(&vap->va_atime, atime); } else { ZFS_TIME_ENCODE(&now, atime); } if (vap->va_mask & ATTR_MTIME) { ZFS_TIME_ENCODE(&vap->va_mtime, mtime); } else { ZFS_TIME_ENCODE(&now, mtime); } /* Now add in all of the "SA" attributes */ VERIFY(0 == sa_handle_get_from_db(zsb->z_os, db, NULL, SA_HDL_SHARED, &sa_hdl)); /* * Setup the array of attributes to be replaced/set on the new file * * order for DMU_OT_ZNODE is critical since it needs to be constructed * in the old znode_phys_t format. Don't change this ordering */ sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP); if (obj_type == DMU_OT_ZNODE) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb), NULL, &gen, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb), NULL, &mode, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb), NULL, &size, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb), NULL, &parent, 8); } else { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zsb), NULL, &mode, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zsb), NULL, &size, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zsb), NULL, &gen, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL, &acl_ids->z_fuid, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL, &acl_ids->z_fgid, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zsb), NULL, &parent, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb), NULL, &pflags, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb), NULL, &atime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16); } SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zsb), NULL, &links, 8); if (obj_type == DMU_OT_ZNODE) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zsb), NULL, &empty_xattr, 8); } if (obj_type == DMU_OT_ZNODE || (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zsb), NULL, &rdev, 8); } if (obj_type == DMU_OT_ZNODE) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zsb), NULL, &pflags, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zsb), NULL, &acl_ids->z_fuid, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zsb), NULL, &acl_ids->z_fgid, 8); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zsb), NULL, pad, sizeof (uint64_t) * 4); SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zsb), NULL, &acl_phys, sizeof (zfs_acl_phys_t)); } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zsb), NULL, &acl_ids->z_aclp->z_acl_count, 8); locate.cb_aclp = acl_ids->z_aclp; SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zsb), zfs_acl_data_locator, &locate, acl_ids->z_aclp->z_acl_bytes); mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags, acl_ids->z_fuid, acl_ids->z_fgid); } VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); if (!(flag & IS_ROOT_NODE)) { *zpp = zfs_znode_alloc(zsb, db, 0, obj_type, obj, sa_hdl, ZTOI(dzp)); VERIFY(*zpp != NULL); VERIFY(dzp != NULL); } else { /* * If we are creating the root node, the "parent" we * passed in is the znode for the root. */ *zpp = dzp; (*zpp)->z_sa_hdl = sa_hdl; } (*zpp)->z_pflags = pflags; (*zpp)->z_mode = mode; if (obj_type == DMU_OT_ZNODE || acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) { VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx)); } kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END); ZFS_OBJ_HOLD_EXIT(zsb, obj); }
/* * Construct a znode+inode and initialize. * * This does not do a call to dmu_set_user() that is * up to the caller to do, in case you don't want to * return the znode */ static znode_t * zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl, struct inode *dip) { znode_t *zp; struct inode *ip; uint64_t mode; uint64_t parent; sa_bulk_attr_t bulk[9]; int count = 0; ASSERT(zsb != NULL); ip = new_inode(zsb->z_sb); if (ip == NULL) return (NULL); zp = ITOZ(ip); ASSERT(zp->z_dirlocks == NULL); ASSERT3P(zp->z_acl_cached, ==, NULL); ASSERT3P(zp->z_xattr_cached, ==, NULL); ASSERT3P(zp->z_xattr_parent, ==, NULL); zp->z_moved = 0; zp->z_sa_hdl = NULL; zp->z_unlinked = 0; zp->z_atime_dirty = 0; zp->z_mapcnt = 0; zp->z_id = db->db_object; zp->z_blksz = blksz; zp->z_seq = 0x7A4653; zp->z_sync_cnt = 0; zp->z_is_zvol = B_FALSE; zp->z_is_mapped = B_FALSE; zp->z_is_ctldir = B_FALSE; zp->z_is_stale = B_FALSE; zfs_znode_sa_init(zsb, zp, db, obj_type, hdl); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &zp->z_atime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8); if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) { if (hdl == NULL) sa_handle_destroy(zp->z_sa_hdl); goto error; } zp->z_mode = mode; /* * xattr znodes hold a reference on their unique parent */ if (dip && zp->z_pflags & ZFS_XATTR) { igrab(dip); zp->z_xattr_parent = ITOZ(dip); } ip->i_ino = obj; zfs_inode_update(zp); zfs_inode_set_ops(zsb, ip); /* * The only way insert_inode_locked() can fail is if the ip->i_ino * number is already hashed for this super block. This can never * happen because the inode numbers map 1:1 with the object numbers. * * The one exception is rolling back a mounted file system, but in * this case all the active inode are unhashed during the rollback. */ VERIFY3S(insert_inode_locked(ip), ==, 0); mutex_enter(&zsb->z_znodes_lock); list_insert_tail(&zsb->z_all_znodes, zp); zsb->z_nr_znodes++; membar_producer(); mutex_exit(&zsb->z_znodes_lock); unlock_new_inode(ip); return (zp); error: unlock_new_inode(ip); iput(ip); return (NULL); }
int zfs_rezget(znode_t *zp) { zfs_sb_t *zsb = ZTOZSB(zp); dmu_object_info_t doi; dmu_buf_t *db; uint64_t obj_num = zp->z_id; uint64_t mode; sa_bulk_attr_t bulk[8]; int err; int count = 0; uint64_t gen; ZFS_OBJ_HOLD_ENTER(zsb, obj_num); mutex_enter(&zp->z_acl_lock); if (zp->z_acl_cached) { zfs_acl_free(zp->z_acl_cached); zp->z_acl_cached = NULL; } mutex_exit(&zp->z_acl_lock); rw_enter(&zp->z_xattr_lock, RW_WRITER); if (zp->z_xattr_cached) { nvlist_free(zp->z_xattr_cached); zp->z_xattr_cached = NULL; } if (zp->z_xattr_parent) { iput(ZTOI(zp->z_xattr_parent)); zp->z_xattr_parent = NULL; } rw_exit(&zp->z_xattr_lock); ASSERT(zp->z_sa_hdl == NULL); err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db); if (err) { ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (err); } dmu_object_info_from_db(db, &doi); if (doi.doi_bonus_type != DMU_OT_SA && (doi.doi_bonus_type != DMU_OT_ZNODE || (doi.doi_bonus_type == DMU_OT_ZNODE && doi.doi_bonus_size < sizeof (znode_phys_t)))) { sa_buf_rele(db, NULL); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (SET_ERROR(EINVAL)); } zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL); /* reload cached values */ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &gen, sizeof (gen)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, sizeof (zp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, sizeof (zp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &zp->z_atime, sizeof (zp->z_atime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, sizeof (zp->z_uid)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, sizeof (zp->z_gid)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, sizeof (mode)); if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) { zfs_znode_dmu_fini(zp); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (SET_ERROR(EIO)); } zp->z_mode = mode; if (gen != zp->z_gen) { zfs_znode_dmu_fini(zp); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (SET_ERROR(EIO)); } zp->z_unlinked = (zp->z_links == 0); zp->z_blksz = doi.doi_data_block_size; zfs_inode_update(zp); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (0); }
void commonattrpack(attrinfo_t *aip, zfsvfs_t *zfsvfs, znode_t *zp, const char *name, ino64_t objnum, enum vtype vtype, boolean_t user64) { attrgroup_t commonattr = aip->ai_attrlist->commonattr; void *attrbufptr = *aip->ai_attrbufpp; void *varbufptr = *aip->ai_varbufpp; struct mount *mp = zfsvfs->z_vfs; cred_t *cr = (cred_t *)vfs_context_ucred(aip->ai_context); finderinfo_t finderinfo; /* * We should probably combine all the sa_lookup into a bulk * lookup operand. */ finderinfo.fi_flags = 0; if (ATTR_CMN_NAME & commonattr) { nameattrpack(aip, name, strlen(name)); attrbufptr = *aip->ai_attrbufpp; varbufptr = *aip->ai_varbufpp; } if (ATTR_CMN_DEVID & commonattr) { *((dev_t *)attrbufptr) = vfs_statfs(mp)->f_fsid.val[0]; attrbufptr = ((dev_t *)attrbufptr) + 1; } if (ATTR_CMN_FSID & commonattr) { *((fsid_t *)attrbufptr) = vfs_statfs(mp)->f_fsid; attrbufptr = ((fsid_t *)attrbufptr) + 1; } if (ATTR_CMN_OBJTYPE & commonattr) { *((fsobj_type_t *)attrbufptr) = vtype; attrbufptr = ((fsobj_type_t *)attrbufptr) + 1; } if (ATTR_CMN_OBJTAG & commonattr) { *((fsobj_tag_t *)attrbufptr) = VT_ZFS; attrbufptr = ((fsobj_tag_t *)attrbufptr) + 1; } /* * Note: ATTR_CMN_OBJID is lossy (only 32 bits). */ if ((ATTR_CMN_OBJID | ATTR_CMN_OBJPERMANENTID) & commonattr) { u_int32_t fileid; /* * On Mac OS X we always export the root directory id as 2 */ fileid = (objnum == zfsvfs->z_root) ? 2 : objnum; if (ATTR_CMN_OBJID & commonattr) { ((fsobj_id_t *)attrbufptr)->fid_objno = fileid; ((fsobj_id_t *)attrbufptr)->fid_generation = 0; attrbufptr = ((fsobj_id_t *)attrbufptr) + 1; } if (ATTR_CMN_OBJPERMANENTID & commonattr) { ((fsobj_id_t *)attrbufptr)->fid_objno = fileid; ((fsobj_id_t *)attrbufptr)->fid_generation = 0; attrbufptr = ((fsobj_id_t *)attrbufptr) + 1; } } /* * Note: ATTR_CMN_PAROBJID is lossy (only 32 bits). */ if (ATTR_CMN_PAROBJID & commonattr) { uint64_t parentid; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parentid, sizeof (parentid)) == 0); /* * On Mac OS X we always export the root * directory id as 2 and its parent as 1 */ if (zp && zp->z_id == zfsvfs->z_root) parentid = 1; else if (parentid == zfsvfs->z_root) parentid = 2; ASSERT(parentid != 0); ((fsobj_id_t *)attrbufptr)->fid_objno = (uint32_t)parentid; ((fsobj_id_t *)attrbufptr)->fid_generation = 0; attrbufptr = ((fsobj_id_t *)attrbufptr) + 1; } if (ATTR_CMN_SCRIPT & commonattr) { *((text_encoding_t *)attrbufptr) = kTextEncodingMacUnicode; attrbufptr = ((text_encoding_t *)attrbufptr) + 1; } if (ATTR_CMN_CRTIME & commonattr) { uint64_t times[2]; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), times, sizeof(times)) == 0); if (user64) { ZFS_TIME_DECODE((timespec_user64_t *)attrbufptr, times); attrbufptr = ((timespec_user64_t *)attrbufptr) + 1; } else { ZFS_TIME_DECODE((timespec_user32_t *)attrbufptr, times); attrbufptr = ((timespec_user32_t *)attrbufptr) + 1; } } if (ATTR_CMN_MODTIME & commonattr) { uint64_t times[2]; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs), times, sizeof(times)) == 0); if (user64) { ZFS_TIME_DECODE((timespec_user64_t *)attrbufptr, times); attrbufptr = ((timespec_user64_t *)attrbufptr) + 1; } else { ZFS_TIME_DECODE((timespec_user32_t *)attrbufptr, times); attrbufptr = ((timespec_user32_t *)attrbufptr) + 1; } } if (ATTR_CMN_CHGTIME & commonattr) { uint64_t times[2]; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs), times, sizeof(times)) == 0); if (user64) { ZFS_TIME_DECODE((timespec_user64_t *)attrbufptr, times); attrbufptr = ((timespec_user64_t *)attrbufptr) + 1; } else { ZFS_TIME_DECODE((timespec_user32_t *)attrbufptr, times); attrbufptr = ((timespec_user32_t *)attrbufptr) + 1; } } if (ATTR_CMN_ACCTIME & commonattr) { uint64_t times[2]; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), times, sizeof(times)) == 0); if (user64) { ZFS_TIME_DECODE((timespec_user64_t *)attrbufptr, times); attrbufptr = ((timespec_user64_t *)attrbufptr) + 1; } else { ZFS_TIME_DECODE((timespec_user32_t *)attrbufptr, times); attrbufptr = ((timespec_user32_t *)attrbufptr) + 1; } } if (ATTR_CMN_BKUPTIME & commonattr) { /* legacy attribute -- just pass zero */ if (user64) { ((timespec_user64_t *)attrbufptr)->tv_sec = 0; ((timespec_user64_t *)attrbufptr)->tv_nsec = 0; attrbufptr = ((timespec_user64_t *)attrbufptr) + 1; } else { ((timespec_user32_t *)attrbufptr)->tv_sec = 0; ((timespec_user32_t *)attrbufptr)->tv_nsec = 0; attrbufptr = ((timespec_user32_t *)attrbufptr) + 1; } } if (ATTR_CMN_FNDRINFO & commonattr) { uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), &val, sizeof(val)) == 0); getfinderinfo(zp, cr, &finderinfo); /* Shadow ZFS_HIDDEN to Finder Info's invisible bit */ if (val & ZFS_HIDDEN) { finderinfo.fi_flags |= OSSwapHostToBigConstInt16(kIsInvisible); } bcopy(&finderinfo, attrbufptr, sizeof (finderinfo)); attrbufptr = (char *)attrbufptr + 32; } if (ATTR_CMN_OWNERID & commonattr) { uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_UID(zfsvfs), &val, sizeof(val)) == 0); *((uid_t *)attrbufptr) = val; attrbufptr = ((uid_t *)attrbufptr) + 1; } if (ATTR_CMN_GRPID & commonattr) { uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GID(zfsvfs), &val, sizeof(val)) == 0); *((gid_t *)attrbufptr) = val; attrbufptr = ((gid_t *)attrbufptr) + 1; } if (ATTR_CMN_ACCESSMASK & commonattr) { uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &val, sizeof(val)) == 0); *((u_int32_t *)attrbufptr) = val; attrbufptr = ((u_int32_t *)attrbufptr) + 1; } if (ATTR_CMN_FLAGS & commonattr) { u_int32_t flags = zfs_getbsdflags(zp); /* Shadow Finder Info's invisible bit to UF_HIDDEN */ if ((ATTR_CMN_FNDRINFO & commonattr) && (OSSwapBigToHostInt16(finderinfo.fi_flags) & kIsInvisible)) flags |= UF_HIDDEN; *((u_int32_t *)attrbufptr) = flags; attrbufptr = ((u_int32_t *)attrbufptr) + 1; } if (ATTR_CMN_USERACCESS & commonattr) { u_int32_t user_access = 0; uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), &val, sizeof(val)) == 0); user_access = getuseraccess(zp, aip->ai_context); /* Also consider READ-ONLY file system. */ if (vfs_flags(mp) & MNT_RDONLY) { user_access &= ~W_OK; } /* Locked objects are not writable either */ if ((val & ZFS_IMMUTABLE) && (vfs_context_suser(aip->ai_context) != 0)) { user_access &= ~W_OK; } *((u_int32_t *)attrbufptr) = user_access; attrbufptr = ((u_int32_t *)attrbufptr) + 1; } if (ATTR_CMN_FILEID & commonattr) { /* * On Mac OS X we always export the root directory id as 2 */ if (objnum == zfsvfs->z_root) objnum = 2; *((u_int64_t *)attrbufptr) = objnum; attrbufptr = ((u_int64_t *)attrbufptr) + 1; } if (ATTR_CMN_PARENTID & commonattr) { uint64_t parentid; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parentid, sizeof (parentid)) == 0); /* * On Mac OS X we always export the root * directory id as 2 and its parent as 1 */ if (zp && zp->z_id == zfsvfs->z_root) parentid = 1; else if (parentid == zfsvfs->z_root) parentid = 2; ASSERT(parentid != 0); *((u_int64_t *)attrbufptr) = parentid; attrbufptr = ((u_int64_t *)attrbufptr) + 1; } *aip->ai_attrbufpp = attrbufptr; *aip->ai_varbufpp = varbufptr; }
/* For part 1 of zfs_getattr() */ int zfs_getattr_znode_locked(vattr_t *vap, znode_t *zp, cred_t *cr) { zfsvfs_t *zfsvfs = zp->z_zfsvfs; int error; uint64_t times[2]; uint64_t val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &val, sizeof (val)) == 0); vap->va_mode = val & MODEMASK; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_UID(zfsvfs), &val, sizeof (val)) == 0); vap->va_uid = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GID(zfsvfs), &val, sizeof (val)) == 0); vap->va_gid = val; //vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; /* On OS X, the root directory id is always 2 */ vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), &val, sizeof (val)) == 0); vap->va_nlink = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), &val, sizeof (val)) == 0); vap->va_data_size = val; vap->va_total_size = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &val, sizeof (val)) == 0); vap->va_rdev = val; VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &val, sizeof (val)) == 0); vap->va_gen = val; (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_create_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_access_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_MTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_modify_time, times); (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CTIME(zfsvfs), times, sizeof (times)); ZFS_TIME_DECODE(&vap->va_change_time, times); if (VATTR_IS_ACTIVE(vap, va_backup_time)) { vap->va_backup_time.tv_sec = 0; vap->va_backup_time.tv_nsec = 0; VATTR_SET_SUPPORTED(vap, va_backup_time); } vap->va_flags = zfs_getbsdflags(zp); /* On OS X, the root directory id is always 2 and its parent is 1 */ VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &val, sizeof (val)) == 0); if (zp->z_id == zfsvfs->z_root) vap->va_parentid = 1; else if (val == zfsvfs->z_root) vap->va_parentid = 2; else vap->va_parentid = val; vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz; VATTR_SET_SUPPORTED(vap, va_iosize); printf("stat blksize set to %d\n", vap->va_iosize); vap->va_supported |= ZFS_SUPPORTED_VATTRS; if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(ZTOV(zp))) VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2); if (VATTR_IS_ACTIVE(vap, va_acl)) { if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs), times, sizeof (times)))) { // if (zp->z_phys->zp_acl.z_acl_count == 0) { vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; } else { error = zfs_getacl(zp, &vap->va_acl, B_TRUE, cr); if (error) return (error); VATTR_SET_SUPPORTED(vap, va_acl); /* * va_acl implies that va_uuuid and va_guuid are * also supported. */ VATTR_RETURN(vap, va_uuuid, kauth_null_guid); VATTR_RETURN(vap, va_guuid, kauth_null_guid); } } return (0); }
/* * Set the attributes of an object * * The transaction passed to this routine must have * dmu_tx_hold_bonus(tx, oid) called and then assigned * to a transaction group. */ static int osd_attr_set(const struct lu_env *env, struct dt_object *dt, const struct lu_attr *la, struct thandle *handle) { struct osd_thread_info *info = osd_oti_get(env); sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk; struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; struct osa_attr *osa = &info->oti_osa; __u64 valid = la->la_valid; int cnt; int rc = 0; ENTRY; down_read(&obj->oo_guard); if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed)) GOTO(out, rc = -ENOENT); LASSERT(handle != NULL); LASSERT(osd_invariant(obj)); LASSERT(obj->oo_sa_hdl); oh = container_of0(handle, struct osd_thandle, ot_super); /* Assert that the transaction has been assigned to a transaction group. */ LASSERT(oh->ot_tx->tx_txg != 0); /* Only allow set size for regular file */ if (!S_ISREG(dt->do_lu.lo_header->loh_attr)) valid &= ~(LA_SIZE | LA_BLOCKS); if (valid & LA_CTIME && la->la_ctime == obj->oo_attr.la_ctime) valid &= ~LA_CTIME; if (valid & LA_MTIME && la->la_mtime == obj->oo_attr.la_mtime) valid &= ~LA_MTIME; if (valid & LA_ATIME && la->la_atime == obj->oo_attr.la_atime) valid &= ~LA_ATIME; if (valid == 0) GOTO(out, rc = 0); if (valid & LA_FLAGS) { struct lustre_mdt_attrs *lma; struct lu_buf buf; if (la->la_flags & LUSTRE_LMA_FL_MASKS) { CLASSERT(sizeof(info->oti_buf) >= sizeof(*lma)); lma = (struct lustre_mdt_attrs *)&info->oti_buf; buf.lb_buf = lma; buf.lb_len = sizeof(info->oti_buf); rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA); if (rc > 0) { lma->lma_incompat = le32_to_cpu(lma->lma_incompat); lma->lma_incompat |= lustre_to_lma_flags(la->la_flags); lma->lma_incompat = cpu_to_le32(lma->lma_incompat); buf.lb_buf = lma; buf.lb_len = sizeof(*lma); rc = osd_xattr_set_internal(env, obj, &buf, XATTR_NAME_LMA, LU_XATTR_REPLACE, oh); } if (rc < 0) { CWARN("%s: failed to set LMA flags: rc = %d\n", osd->od_svname, rc); RETURN(rc); } } } /* do both accounting updates outside oo_attr_lock below */ if ((valid & LA_UID) && (la->la_uid != obj->oo_attr.la_uid)) { /* Update user accounting. Failure isn't fatal, but we still * log an error message */ rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid, la->la_uid, 1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, la->la_uid, rc); rc = -zap_increment_int(osd->od_os, osd->od_iusr_oid, obj->oo_attr.la_uid, -1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, obj->oo_attr.la_uid, rc); } if ((valid & LA_GID) && (la->la_gid != obj->oo_attr.la_gid)) { /* Update group accounting. Failure isn't fatal, but we still * log an error message */ rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid, la->la_gid, 1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, la->la_gid, rc); rc = -zap_increment_int(osd->od_os, osd->od_igrp_oid, obj->oo_attr.la_gid, -1, oh->ot_tx); if (rc) CERROR("%s: failed to update accounting ZAP for user " "%d (%d)\n", osd->od_svname, obj->oo_attr.la_gid, rc); } write_lock(&obj->oo_attr_lock); cnt = 0; if (valid & LA_ATIME) { osa->atime[0] = obj->oo_attr.la_atime = la->la_atime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(osd), NULL, osa->atime, 16); } if (valid & LA_MTIME) { osa->mtime[0] = obj->oo_attr.la_mtime = la->la_mtime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(osd), NULL, osa->mtime, 16); } if (valid & LA_CTIME) { osa->ctime[0] = obj->oo_attr.la_ctime = la->la_ctime; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(osd), NULL, osa->ctime, 16); } if (valid & LA_MODE) { /* mode is stored along with type, so read it first */ obj->oo_attr.la_mode = (obj->oo_attr.la_mode & S_IFMT) | (la->la_mode & ~S_IFMT); osa->mode = obj->oo_attr.la_mode; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(osd), NULL, &osa->mode, 8); } if (valid & LA_SIZE) { osa->size = obj->oo_attr.la_size = la->la_size; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(osd), NULL, &osa->size, 8); } if (valid & LA_NLINK) { osa->nlink = obj->oo_attr.la_nlink = la->la_nlink; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(osd), NULL, &osa->nlink, 8); } if (valid & LA_RDEV) { osa->rdev = obj->oo_attr.la_rdev = la->la_rdev; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_RDEV(osd), NULL, &osa->rdev, 8); } if (valid & LA_FLAGS) { osa->flags = attrs_fs2zfs(la->la_flags); /* many flags are not supported by zfs, so ensure a good cached * copy */ obj->oo_attr.la_flags = attrs_zfs2fs(osa->flags); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(osd), NULL, &osa->flags, 8); } if (valid & LA_UID) { osa->uid = obj->oo_attr.la_uid = la->la_uid; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(osd), NULL, &osa->uid, 8); } if (valid & LA_GID) { osa->gid = obj->oo_attr.la_gid = la->la_gid; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(osd), NULL, &osa->gid, 8); } obj->oo_attr.la_valid |= valid; write_unlock(&obj->oo_attr_lock); LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk)); rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh); out: up_read(&obj->oo_guard); RETURN(rc); }
/* * Retrieve the attributes of a DMU object */ int __osd_object_attr_get(const struct lu_env *env, struct osd_device *o, struct osd_object *obj, struct lu_attr *la) { struct osa_attr *osa = &osd_oti_get(env)->oti_osa; sa_bulk_attr_t *bulk = osd_oti_get(env)->oti_attr_bulk; sa_handle_t *sa_hdl; int cnt = 0; int rc; ENTRY; LASSERT(obj->oo_db != NULL); rc = -sa_handle_get(o->od_os, obj->oo_db->db_object, NULL, SA_HDL_PRIVATE, &sa_hdl); if (rc) RETURN(rc); la->la_valid |= LA_ATIME | LA_MTIME | LA_CTIME | LA_MODE | LA_TYPE | LA_SIZE | LA_UID | LA_GID | LA_FLAGS | LA_NLINK; SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(o), NULL, osa->atime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(o), NULL, osa->mtime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(o), NULL, osa->ctime, 16); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(o), NULL, &osa->mode, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_SIZE(o), NULL, &osa->size, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_LINKS(o), NULL, &osa->nlink, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_UID(o), NULL, &osa->uid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_GID(o), NULL, &osa->gid, 8); SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(o), NULL, &osa->flags, 8); LASSERT(cnt <= ARRAY_SIZE(osd_oti_get(env)->oti_attr_bulk)); rc = -sa_bulk_lookup(sa_hdl, bulk, cnt); if (rc) GOTO(out_sa, rc); la->la_atime = osa->atime[0]; la->la_mtime = osa->mtime[0]; la->la_ctime = osa->ctime[0]; la->la_mode = osa->mode; la->la_uid = osa->uid; la->la_gid = osa->gid; la->la_nlink = osa->nlink; la->la_flags = attrs_zfs2fs(osa->flags); la->la_size = osa->size; /* Try to get extra flag from LMA. Right now, only LMAI_ORPHAN * flags is stored in LMA, and it is only for orphan directory */ if (S_ISDIR(la->la_mode) && dt_object_exists(&obj->oo_dt)) { struct osd_thread_info *info = osd_oti_get(env); struct lustre_mdt_attrs *lma; struct lu_buf buf; lma = (struct lustre_mdt_attrs *)info->oti_buf; buf.lb_buf = lma; buf.lb_len = sizeof(info->oti_buf); rc = osd_xattr_get(env, &obj->oo_dt, &buf, XATTR_NAME_LMA); if (rc > 0) { rc = 0; lma->lma_incompat = le32_to_cpu(lma->lma_incompat); obj->oo_lma_flags = lma_to_lustre_flags(lma->lma_incompat); } else if (rc == -ENODATA) { rc = 0; } } if (S_ISCHR(la->la_mode) || S_ISBLK(la->la_mode)) { rc = -sa_lookup(sa_hdl, SA_ZPL_RDEV(o), &osa->rdev, 8); if (rc) GOTO(out_sa, rc); la->la_rdev = osa->rdev; la->la_valid |= LA_RDEV; } out_sa: sa_handle_destroy(sa_hdl); RETURN(rc); }
/* * Construct a znode+inode and initialize. * * This does not do a call to dmu_set_user() that is * up to the caller to do, in case you don't want to * return the znode */ static znode_t * zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl, struct dentry *dentry, struct inode *dip) { znode_t *zp; struct inode *ip; uint64_t parent; sa_bulk_attr_t bulk[9]; int count = 0; ASSERT(zsb != NULL); ip = new_inode(zsb->z_sb); if (ip == NULL) return (NULL); zp = ITOZ(ip); ASSERT(zp->z_dirlocks == NULL); ASSERT3P(zp->z_acl_cached, ==, NULL); ASSERT3P(zp->z_xattr_cached, ==, NULL); zp->z_moved = 0; zp->z_sa_hdl = NULL; zp->z_unlinked = 0; zp->z_atime_dirty = 0; zp->z_mapcnt = 0; zp->z_id = db->db_object; zp->z_blksz = blksz; zp->z_seq = 0x7A4653; zp->z_sync_cnt = 0; zp->z_is_zvol = B_FALSE; zp->z_is_mapped = B_FALSE; zp->z_is_ctldir = B_FALSE; zfs_znode_sa_init(zsb, zp, db, obj_type, hdl); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL, &zp->z_pflags, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL, &parent, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL, &zp->z_atime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8); if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) { if (hdl == NULL) sa_handle_destroy(zp->z_sa_hdl); goto error; } ip->i_ino = obj; zfs_inode_update(zp); zfs_inode_set_ops(zsb, ip); if (insert_inode_locked(ip)) goto error; if (dentry) { if (zpl_xattr_security_init(ip, dip, &dentry->d_name)) goto error; d_instantiate(dentry, ip); } mutex_enter(&zsb->z_znodes_lock); list_insert_tail(&zsb->z_all_znodes, zp); zsb->z_nr_znodes++; membar_producer(); mutex_exit(&zsb->z_znodes_lock); unlock_new_inode(ip); return (zp); error: unlock_new_inode(ip); iput(ip); return NULL; }