Exemplo n.º 1
0
int
zfs_rezget(znode_t *zp)
{
	zfs_sb_t *zsb = ZTOZSB(zp);
	dmu_object_info_t doi;
	dmu_buf_t *db;
	uint64_t obj_num = zp->z_id;
	uint64_t mode;
	sa_bulk_attr_t bulk[8];
	int err;
	int count = 0;
	uint64_t gen;

	ZFS_OBJ_HOLD_ENTER(zsb, obj_num);

	mutex_enter(&zp->z_acl_lock);
	if (zp->z_acl_cached) {
		zfs_acl_free(zp->z_acl_cached);
		zp->z_acl_cached = NULL;
	}
	mutex_exit(&zp->z_acl_lock);

	rw_enter(&zp->z_xattr_lock, RW_WRITER);
	if (zp->z_xattr_cached) {
		nvlist_free(zp->z_xattr_cached);
		zp->z_xattr_cached = NULL;
	}

	if (zp->z_xattr_parent) {
		iput(ZTOI(zp->z_xattr_parent));
		zp->z_xattr_parent = NULL;
	}
	rw_exit(&zp->z_xattr_lock);

	ASSERT(zp->z_sa_hdl == NULL);
	err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db);
	if (err) {
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (err);
	}

	dmu_object_info_from_db(db, &doi);
	if (doi.doi_bonus_type != DMU_OT_SA &&
	    (doi.doi_bonus_type != DMU_OT_ZNODE ||
	    (doi.doi_bonus_type == DMU_OT_ZNODE &&
	    doi.doi_bonus_size < sizeof (znode_phys_t)))) {
		sa_buf_rele(db, NULL);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EINVAL));
	}

	zfs_znode_sa_init(zsb, zp, db, doi.doi_bonus_type, NULL);

	/* reload cached values */
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL,
	    &gen, sizeof (gen));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL,
	    &zp->z_size, sizeof (zp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL,
	    &zp->z_links, sizeof (zp->z_links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
	    &zp->z_pflags, sizeof (zp->z_pflags));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
	    &zp->z_atime, sizeof (zp->z_atime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
	    &zp->z_uid, sizeof (zp->z_uid));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL,
	    &zp->z_gid, sizeof (zp->z_gid));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
	    &mode, sizeof (mode));

	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
		zfs_znode_dmu_fini(zp);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EIO));
	}

	zp->z_mode = mode;

	if (gen != zp->z_gen) {
		zfs_znode_dmu_fini(zp);
		ZFS_OBJ_HOLD_EXIT(zsb, obj_num);
		return (SET_ERROR(EIO));
	}

	zp->z_unlinked = (zp->z_links == 0);
	zp->z_blksz = doi.doi_data_block_size;
	zfs_inode_update(zp);

	ZFS_OBJ_HOLD_EXIT(zsb, obj_num);

	return (0);
}
Exemplo n.º 2
0
/*
 * Construct a znode+inode and initialize.
 *
 * This does not do a call to dmu_set_user() that is
 * up to the caller to do, in case you don't want to
 * return the znode
 */
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
    dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
    struct inode *dip)
{
	znode_t	*zp;
	struct inode *ip;
	uint64_t mode;
	uint64_t parent;
	sa_bulk_attr_t bulk[9];
	int count = 0;

	ASSERT(zsb != NULL);

	ip = new_inode(zsb->z_sb);
	if (ip == NULL)
		return (NULL);

	zp = ITOZ(ip);
	ASSERT(zp->z_dirlocks == NULL);
	ASSERT3P(zp->z_acl_cached, ==, NULL);
	ASSERT3P(zp->z_xattr_cached, ==, NULL);
	ASSERT3P(zp->z_xattr_parent, ==, NULL);
	zp->z_moved = 0;
	zp->z_sa_hdl = NULL;
	zp->z_unlinked = 0;
	zp->z_atime_dirty = 0;
	zp->z_mapcnt = 0;
	zp->z_id = db->db_object;
	zp->z_blksz = blksz;
	zp->z_seq = 0x7A4653;
	zp->z_sync_cnt = 0;
	zp->z_is_zvol = B_FALSE;
	zp->z_is_mapped = B_FALSE;
	zp->z_is_ctldir = B_FALSE;
	zp->z_is_stale = B_FALSE;

	zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);

	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &mode, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
	    &zp->z_pflags, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
	    &parent, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
	    &zp->z_atime, 16);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);

	if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
		if (hdl == NULL)
			sa_handle_destroy(zp->z_sa_hdl);

		goto error;
	}

	zp->z_mode = mode;

	/*
	 * xattr znodes hold a reference on their unique parent
	 */
	if (dip && zp->z_pflags & ZFS_XATTR) {
		igrab(dip);
		zp->z_xattr_parent = ITOZ(dip);
	}

	ip->i_ino = obj;
	zfs_inode_update(zp);
	zfs_inode_set_ops(zsb, ip);

	/*
	 * The only way insert_inode_locked() can fail is if the ip->i_ino
	 * number is already hashed for this super block.  This can never
	 * happen because the inode numbers map 1:1 with the object numbers.
	 *
	 * The one exception is rolling back a mounted file system, but in
	 * this case all the active inode are unhashed during the rollback.
	 */
	VERIFY3S(insert_inode_locked(ip), ==, 0);

	mutex_enter(&zsb->z_znodes_lock);
	list_insert_tail(&zsb->z_all_znodes, zp);
	zsb->z_nr_znodes++;
	membar_producer();
	mutex_exit(&zsb->z_znodes_lock);

	unlock_new_inode(ip);
	return (zp);

error:
	unlock_new_inode(ip);
	iput(ip);
	return (NULL);
}
Exemplo n.º 3
0
/*
 * Construct a znode+inode and initialize.
 *
 * This does not do a call to dmu_set_user() that is
 * up to the caller to do, in case you don't want to
 * return the znode
 */
static znode_t *
zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
                dmu_object_type_t obj_type, uint64_t obj, sa_handle_t *hdl,
                struct dentry *dentry, struct inode *dip)
{
    znode_t	*zp;
    struct inode *ip;
    uint64_t parent;
    sa_bulk_attr_t bulk[9];
    int count = 0;

    ASSERT(zsb != NULL);

    ip = new_inode(zsb->z_sb);
    if (ip == NULL)
        return (NULL);

    zp = ITOZ(ip);
    ASSERT(zp->z_dirlocks == NULL);
    ASSERT3P(zp->z_acl_cached, ==, NULL);
    ASSERT3P(zp->z_xattr_cached, ==, NULL);
    zp->z_moved = 0;
    zp->z_sa_hdl = NULL;
    zp->z_unlinked = 0;
    zp->z_atime_dirty = 0;
    zp->z_mapcnt = 0;
    zp->z_id = db->db_object;
    zp->z_blksz = blksz;
    zp->z_seq = 0x7A4653;
    zp->z_sync_cnt = 0;
    zp->z_is_zvol = B_FALSE;
    zp->z_is_mapped = B_FALSE;
    zp->z_is_ctldir = B_FALSE;

    zfs_znode_sa_init(zsb, zp, db, obj_type, hdl);

    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL, &zp->z_mode, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zsb), NULL, &zp->z_gen, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zsb), NULL, &zp->z_links, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
                     &zp->z_pflags, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zsb), NULL,
                     &parent, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
                     &zp->z_atime, 16);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL, &zp->z_uid, 8);
    SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb), NULL, &zp->z_gid, 8);

    if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) {
        if (hdl == NULL)
            sa_handle_destroy(zp->z_sa_hdl);

        goto error;
    }

    ip->i_ino = obj;
    zfs_inode_update(zp);
    zfs_inode_set_ops(zsb, ip);

    if (insert_inode_locked(ip))
        goto error;

    if (dentry) {
        if (zpl_xattr_security_init(ip, dip, &dentry->d_name))
            goto error;

        d_instantiate(dentry, ip);
    }

    mutex_enter(&zsb->z_znodes_lock);
    list_insert_tail(&zsb->z_all_znodes, zp);
    zsb->z_nr_znodes++;
    membar_producer();
    mutex_exit(&zsb->z_znodes_lock);

    unlock_new_inode(ip);
    return (zp);

error:
    unlock_new_inode(ip);
    iput(ip);
    return NULL;
}