Esempio n. 1
0
static int osd_attr_get(const struct lu_env *env,
			struct dt_object *dt,
			struct lu_attr *attr,
			struct lustre_capa *capa)
{
	struct osd_object	*obj = osd_dt_obj(dt);
	uint64_t		 blocks;
	uint32_t		 blksize;

	LASSERT(dt_object_exists(dt));
	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_db);

	read_lock(&obj->oo_attr_lock);
	*attr = obj->oo_attr;
	read_unlock(&obj->oo_attr_lock);

	/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
	 * from within sa_object_size() can block on a mutex, so
	 * we can't call sa_object_size() holding rwlock */
	sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
	/* we do not control size of indices, so always calculate
	 * it from number of blocks reported by DMU */
	if (S_ISDIR(attr->la_mode))
		attr->la_size = 512 * blocks;
	/* Block size may be not set; suggest maximal I/O transfers. */
	if (blksize == 0)
		blksize = 1ULL << SPA_MAXBLOCKSHIFT;

	attr->la_blksize = blksize;
	attr->la_blocks = blocks;
	attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;

	return 0;
}
Esempio n. 2
0
static int osd_attr_get(const struct lu_env *env,
			struct dt_object *dt,
			struct lu_attr *attr)
{
	struct osd_object	*obj = osd_dt_obj(dt);
	uint64_t		 blocks;
	uint32_t		 blksize;
	int			 rc = 0;

	down_read(&obj->oo_guard);

	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = -ENOENT);

	LASSERT(osd_invariant(obj));
	LASSERT(obj->oo_db);

	read_lock(&obj->oo_attr_lock);
	*attr = obj->oo_attr;
	if (obj->oo_lma_flags & LUSTRE_ORPHAN_FL)
		attr->la_flags |= LUSTRE_ORPHAN_FL;
	read_unlock(&obj->oo_attr_lock);

	/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
	 * from within sa_object_size() can block on a mutex, so
	 * we can't call sa_object_size() holding rwlock */
	sa_object_size(obj->oo_sa_hdl, &blksize, &blocks);
	/* we do not control size of indices, so always calculate
	 * it from number of blocks reported by DMU */
	if (S_ISDIR(attr->la_mode))
		attr->la_size = 512 * blocks;
	/* Block size may be not set; suggest maximal I/O transfers. */
	if (blksize == 0)
		blksize = osd_spa_maxblocksize(
			dmu_objset_spa(osd_obj2dev(obj)->od_os));

	attr->la_blksize = blksize;
	attr->la_blocks = blocks;
	attr->la_valid |= LA_BLOCKS | LA_BLKSIZE;

out:
	up_read(&obj->oo_guard);
	return rc;
}
Esempio n. 3
0
static int osd_declare_attr_set(const struct lu_env *env,
				struct dt_object *dt,
				const struct lu_attr *attr,
				struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	char			*buf = osd_oti_get(env)->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	uint64_t		 bspace;
	uint32_t		 blksize;
	int			 rc;
	ENTRY;

	if (!dt_object_exists(dt)) {
		/* XXX: sanity check that object creation is declared */
		RETURN(0);
	}

	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));

	oh = container_of0(handle, struct osd_thandle, ot_super);

	LASSERT(obj->oo_sa_hdl != NULL);
	dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);

	sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
	bspace = toqb(bspace * blksize);

	if (attr && attr->la_valid & LA_UID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);

		/* quota enforcement for user */
		if (attr->la_uid != obj->oo_attr.la_uid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, USRQUOTA,
					  obj->oo_attr.la_uid, attr->la_uid,
					  bspace, &info->oti_qi);
			if (rc)
				RETURN(rc);
		}
	}
	if (attr && attr->la_valid & LA_GID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);

		/* quota enforcement for group */
		if (attr->la_gid != obj->oo_attr.la_gid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, GRPQUOTA,
					  obj->oo_attr.la_gid, attr->la_gid,
					  bspace, &info->oti_qi);
			if (rc)
				RETURN(rc);
		}
	}

	RETURN(0);
}
Esempio n. 4
0
void fileattrpack(attrinfo_t *aip, zfsvfs_t *zfsvfs, znode_t *zp)
{
	attrgroup_t fileattr = aip->ai_attrlist->fileattr;
	void *attrbufptr = *aip->ai_attrbufpp;
	void *varbufptr = *aip->ai_varbufpp;
	uint64_t allocsize = 0;
	cred_t  *cr = (cred_t *)vfs_context_ucred(aip->ai_context);

	if ((ATTR_FILE_ALLOCSIZE | ATTR_FILE_DATAALLOCSIZE) & fileattr && zp) {
		uint32_t  blksize;
		u_longlong_t  nblks;

		sa_object_size(zp->z_sa_hdl, &blksize, &nblks);
		allocsize = (uint64_t)512LL * (uint64_t)nblks;
	}
	if (ATTR_FILE_LINKCOUNT & fileattr) {
        uint64_t val;
        VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
                         &val, sizeof(val)) == 0);
		*((u_int32_t *)attrbufptr) = val;
		attrbufptr = ((u_int32_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_TOTALSIZE & fileattr) {
        uint64_t val;
        VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
                         &val, sizeof(val)) == 0);
		*((off_t *)attrbufptr) = val;
		attrbufptr = ((off_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_ALLOCSIZE & fileattr) {
		*((off_t *)attrbufptr) = allocsize;
		attrbufptr = ((off_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_IOBLOCKSIZE & fileattr && zp) {
		*((u_int32_t *)attrbufptr) =
            zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz;
		attrbufptr = ((u_int32_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_DEVTYPE & fileattr) {
        uint64_t mode, val=0;
        VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
                         &mode, sizeof(mode)) == 0);
        sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs),
                  &val, sizeof(val));
		if (S_ISBLK(mode) || S_ISCHR(mode))
			*((u_int32_t *)attrbufptr) = (u_int32_t)val;
		else
			*((u_int32_t *)attrbufptr) = 0;
		attrbufptr = ((u_int32_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_DATALENGTH & fileattr) {
        uint64_t val;
        VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
                         &val, sizeof(val)) == 0);
		*((off_t *)attrbufptr) = val;
		attrbufptr = ((off_t *)attrbufptr) + 1;
	}
	if (ATTR_FILE_DATAALLOCSIZE & fileattr) {
		*((off_t *)attrbufptr) = allocsize;
		attrbufptr = ((off_t *)attrbufptr) + 1;
	}
	if ((ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE) & fileattr) {
		uint64_t rsrcsize = 0;
        uint64_t xattr;

        if (!sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
                       &xattr, sizeof(xattr)) &&
            xattr) {
			vnode_t *xdvp = NULLVP;
			vnode_t *xvp = NULLVP;
			struct componentname  cn;

			bzero(&cn, sizeof (cn));
			cn.cn_nameiop = LOOKUP;
			cn.cn_flags = ISLASTCN;
			cn.cn_nameptr = XATTR_RESOURCEFORK_NAME;
			cn.cn_namelen = strlen(cn.cn_nameptr);

			/* Grab the hidden attribute directory vnode. */
			if (zfs_get_xattrdir(zp, &xdvp, cr, 0) == 0 &&
			    zfs_dirlook(VTOZ(xdvp), cn.cn_nameptr, &xvp, 0, NULL,
                            &cn) == 0) {
				rsrcsize = VTOZ(xvp)->z_size;
			}
			if (xvp)
				vnode_put(xvp);
			if (xdvp)
				vnode_put(xdvp);
		}
		if (ATTR_FILE_RSRCLENGTH & fileattr) {
			*((off_t *)attrbufptr) = rsrcsize;
			attrbufptr = ((off_t *)attrbufptr) + 1;
		}
		if (ATTR_FILE_RSRCALLOCSIZE & fileattr) {
			*((off_t *)attrbufptr) = roundup(rsrcsize, 512);
			attrbufptr = ((off_t *)attrbufptr) + 1;
		}
	}
	*aip->ai_attrbufpp = attrbufptr;
	*aip->ai_varbufpp = varbufptr;
}
Esempio n. 5
0
int
zfs_getattr_znode_unlocked(struct vnode *vp, vattr_t *vap)
{
	znode_t *zp = VTOZ(vp);
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	int error = 0;
	uint64_t	parent;

    //printf("getattr_osx\n");

	ZFS_ENTER(zfsvfs);
	/*
	 * On Mac OS X we always export the root directory id as 2
	 */
	vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id;
	//vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_vid;
	vap->va_nlink = zp->z_links;
	vap->va_data_size = zp->z_size;
	vap->va_total_size = zp->z_size;
	vap->va_gen = zp->z_gen;

	/*
	 * For Carbon compatibility,pretend to support this legacy/unused attribute
	 */
	if (VATTR_IS_ACTIVE(vap, va_backup_time)) {
		vap->va_backup_time.tv_sec = 0;
		vap->va_backup_time.tv_nsec = 0;
		VATTR_SET_SUPPORTED(vap, va_backup_time);
    }
	vap->va_flags = zfs_getbsdflags(zp);
	/*
	 * On Mac OS X we always export the root directory id as 2
     * and its parent as 1
	 */
	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
                      &parent, sizeof (parent));

    if (!error) {
        if (zp->z_id == zfsvfs->z_root)
            vap->va_parentid = 1;
        else if (parent == zfsvfs->z_root)
            vap->va_parentid = 2;
        else
            vap->va_parentid = parent;
    }

	vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz;
	//vap->va_iosize = 512;
    VATTR_SET_SUPPORTED(vap, va_iosize);

	/* Don't include '.' and '..' in the number of entries */
	if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(vp)) {
		VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2);
    }

	/*
	 * va_dirlinkcount is the count of directory hard links. When a file
	 * system does not support ATTR_DIR_LINKCOUNT, xnu will default to 1.
	 * Since we claim to support ATTR_DIR_LINKCOUNT both as valid and as
	 * native, we'll just return 1. We set 1 for this value in dirattrpack
	 * as well. If in the future ZFS actually supports directory hard links,
	 * we can return a real value.
	 */
	if (VATTR_IS_ACTIVE(vap, va_dirlinkcount) && vnode_isdir(vp)) {
		VATTR_RETURN(vap, va_dirlinkcount, 1);
    }

	if (VATTR_IS_ACTIVE(vap, va_acl)) {
        //printf("want acl\n");
#if 0
        zfs_acl_phys_t acl;

        if (sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
                      &acl, sizeof (zfs_acl_phys_t))) {
            //if (zp->z_acl.z_acl_count == 0) {
			vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
		} else {
			if ((error = zfs_getacl(zp, &vap->va_acl, B_TRUE, NULL))) {
                dprintf("zfs_getacl returned error %d\n", error);
                error = 0;
				//ZFS_EXIT(zfsvfs);
				//return (error);
			}
		}

#endif
      //VATTR_SET_SUPPORTED(vap, va_acl);
        VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
        VATTR_RETURN(vap, va_guuid, kauth_null_guid);

        dprintf("Calling getacl\n");
        if ((error = zfs_getacl(zp, &vap->va_acl, B_FALSE, NULL))) {
            dprintf("zfs_getacl returned error %d\n", error);
            error = 0;
        } else {

            VATTR_SET_SUPPORTED(vap, va_acl);
            /* va_acl implies that va_uuuid and va_guuid are also supported. */
            VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
            VATTR_RETURN(vap, va_guuid, kauth_null_guid);
        }

    }

	if (VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_alloc)) {
		uint32_t  blksize;
		u_longlong_t  nblks;
        sa_object_size(zp->z_sa_hdl, &blksize, &nblks);
		vap->va_data_alloc = (uint64_t)512LL * (uint64_t)nblks;
		vap->va_total_alloc = vap->va_data_alloc;
		vap->va_supported |= VNODE_ATTR_va_data_alloc |
            VNODE_ATTR_va_total_alloc;
	}

	if (VATTR_IS_ACTIVE(vap, va_name)) {
        vap->va_name[0] = 0;

        if (!vnode_isvroot(vp)) {
            /* Lets not supply name as zap_cursor can cause panic */
#if 0
            if (zap_value_search(zfsvfs->z_os, parent, zp->z_id,
                                 ZFS_DIRENT_OBJ(-1ULL), vap->va_name) == 0)
                VATTR_SET_SUPPORTED(vap, va_name);
#endif
        } else {
            /*
             * The vroot objects must return a unique name for Finder to
             * be able to distringuish between mounts. For this reason
             * we simply return the fullname, from the statfs mountedfrom
             */
            strlcpy(vap->va_name,
                    vfs_statfs(vnode_mount(vp))->f_mntfromname,
                    MAXPATHLEN);
            VATTR_SET_SUPPORTED(vap, va_name);
        }
	}

	if (VATTR_IS_ACTIVE(vap, va_filerev)) {
        VATTR_RETURN(vap, va_filerev, 0);
    }
	if (VATTR_IS_ACTIVE(vap, va_linkid)) {
        VATTR_RETURN(vap, va_linkid, vap->va_fileid);
    }
	if (VATTR_IS_ACTIVE(vap, va_fsid)) {
        VATTR_RETURN(vap, va_fsid, vfs_statfs(zfsvfs->z_vfs)->f_fsid.val[0]);
    }
	if (VATTR_IS_ACTIVE(vap, va_type)) {
        VATTR_RETURN(vap, va_type, vnode_vtype(ZTOV(zp)));
    }
	if (VATTR_IS_ACTIVE(vap, va_encoding)) {
        VATTR_RETURN(vap, va_encoding, kTextEncodingMacUnicode);
    }
#ifdef VNODE_ATTR_va_addedtime
	if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
        VATTR_RETURN(vap, va_addedtime, vap->va_ctime);
    }
#endif
	if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
        kauth_cred_uid2guid(zp->z_uid, &vap->va_uuuid);
    }
	if (VATTR_IS_ACTIVE(vap, va_guuid)) {
        kauth_cred_uid2guid(zp->z_gid, &vap->va_guuid);
    }

	vap->va_supported |= ZFS_SUPPORTED_VATTRS;

	ZFS_EXIT(zfsvfs);
	return (error);
}
Esempio n. 6
0
static int osd_declare_attr_set(const struct lu_env *env,
				struct dt_object *dt,
				const struct lu_attr *attr,
				struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	dmu_tx_hold_t		*txh;
	struct osd_thandle	*oh;
	uint64_t		 bspace;
	uint32_t		 blksize;
	int			 rc = 0;
	bool			 found;
	ENTRY;


	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));

	oh = container_of0(handle, struct osd_thandle, ot_super);

	down_read(&obj->oo_guard);
	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = 0);

	LASSERT(obj->oo_sa_hdl != NULL);
	LASSERT(oh->ot_tx != NULL);
	/* regular attributes are part of the bonus buffer */
	/* let's check whether this object is already part of
	 * transaction.. */
	found = false;
	for (txh = list_head(&oh->ot_tx->tx_holds); txh;
	     txh = list_next(&oh->ot_tx->tx_holds, txh)) {
		if (txh->txh_dnode == NULL)
			continue;
		if (txh->txh_dnode->dn_object != obj->oo_db->db_object)
			continue;
		/* this object is part of the transaction already
		 * we don't need to declare bonus again */
		found = true;
		break;
	}
	if (!found)
		dmu_tx_hold_bonus(oh->ot_tx, obj->oo_db->db_object);
	if (oh->ot_tx->tx_err != 0)
		GOTO(out, rc = -oh->ot_tx->tx_err);

	if (attr && attr->la_valid & LA_FLAGS) {
		/* LMA is usually a part of bonus, no need to declare
		 * anything else */
	}

	if (attr && (attr->la_valid & (LA_UID | LA_GID))) {
		sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
		bspace = toqb(bspace * blksize);
	}

	if (attr && attr->la_valid & LA_UID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, NULL);

		/* quota enforcement for user */
		if (attr->la_uid != obj->oo_attr.la_uid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, USRQUOTA,
					  obj->oo_attr.la_uid, attr->la_uid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}
	if (attr && attr->la_valid & LA_GID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, NULL);

		/* quota enforcement for group */
		if (attr->la_gid != obj->oo_attr.la_gid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, GRPQUOTA,
					  obj->oo_attr.la_gid, attr->la_gid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}

out:
	up_read(&obj->oo_guard);
	RETURN(rc);
}
Esempio n. 7
0
static int osd_declare_attr_set(const struct lu_env *env,
				struct dt_object *dt,
				const struct lu_attr *attr,
				struct thandle *handle)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	char			*buf = osd_oti_get(env)->oti_str;
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*osd = osd_obj2dev(obj);
	struct osd_thandle	*oh;
	uint64_t		 bspace;
	uint32_t		 blksize;
	int			 rc = 0;
	ENTRY;


	LASSERT(handle != NULL);
	LASSERT(osd_invariant(obj));

	oh = container_of0(handle, struct osd_thandle, ot_super);

	down_read(&obj->oo_guard);
	if (unlikely(!dt_object_exists(dt) || obj->oo_destroyed))
		GOTO(out, rc = 0);

	LASSERT(obj->oo_sa_hdl != NULL);
	LASSERT(oh->ot_tx != NULL);
	dmu_tx_hold_sa(oh->ot_tx, obj->oo_sa_hdl, 0);
	if (oh->ot_tx->tx_err != 0)
		GOTO(out, rc = -oh->ot_tx->tx_err);

	sa_object_size(obj->oo_sa_hdl, &blksize, &bspace);
	bspace = toqb(bspace * blksize);

	__osd_xattr_declare_set(env, obj, sizeof(struct lustre_mdt_attrs),
				XATTR_NAME_LMA, oh);

	if (attr && attr->la_valid & LA_UID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_iusr_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, TRUE, buf);

		/* quota enforcement for user */
		if (attr->la_uid != obj->oo_attr.la_uid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, USRQUOTA,
					  obj->oo_attr.la_uid, attr->la_uid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}
	if (attr && attr->la_valid & LA_GID) {
		/* account for user inode tracking ZAP update */
		dmu_tx_hold_bonus(oh->ot_tx, osd->od_igrp_oid);
		dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, TRUE, buf);

		/* quota enforcement for group */
		if (attr->la_gid != obj->oo_attr.la_gid) {
			rc = qsd_transfer(env, osd->od_quota_slave,
					  &oh->ot_quota_trans, GRPQUOTA,
					  obj->oo_attr.la_gid, attr->la_gid,
					  bspace, &info->oti_qi);
			if (rc)
				GOTO(out, rc);
		}
	}

out:
	up_read(&obj->oo_guard);
	RETURN(rc);
}