Esempio n. 1
0
static int
tmpfs_open(struct vop_open_args *v)
{
	struct vnode *vp = v->a_vp;
	int mode = v->a_mode;

	int error;
	struct tmpfs_node *node;

	MPASS(VOP_ISLOCKED(vp));

	node = VP_TO_TMPFS_NODE(vp);

	/* The file is still active but all its names have been removed
	 * (e.g. by a "rmdir $(pwd)").  It cannot be opened any more as
	 * it is about to die. */
	if (node->tn_links < 1)
		return (ENOENT);

	/* If the file is marked append-only, deny write requests. */
	if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
		error = EPERM;
	else {
		error = 0;
		vnode_create_vobject(vp, node->tn_size, v->a_td);
	}

	MPASS(VOP_ISLOCKED(vp));
	return error;
}
Esempio n. 2
0
/*
 * File handle to vnode
 *
 * Have to be really careful about stale file handles:
 * - check that the inode number is valid
 * - call ext2_vget() to get the locked inode
 * - check for an unallocated inode (i_mode == 0)
 * - check that the given client host has export rights and return
 *   those rights via. exflagsp and credanonp
 */
static int
ext2_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
{
	struct inode *ip;
	struct ufid *ufhp;
	struct vnode *nvp;
	struct m_ext2fs *fs;
	int error;

	ufhp = (struct ufid *)fhp;
	fs = VFSTOEXT2(mp)->um_e2fs;
	if (ufhp->ufid_ino < EXT2_ROOTINO ||
	    ufhp->ufid_ino > fs->e2fs_gcount * fs->e2fs->e2fs_ipg)
		return (ESTALE);

	error = VFS_VGET(mp, ufhp->ufid_ino, LK_EXCLUSIVE, &nvp);
	if (error) {
		*vpp = NULLVP;
		return (error);
	}
	ip = VTOI(nvp);
	if (ip->i_mode == 0 ||
	    ip->i_gen != ufhp->ufid_gen || ip->i_nlink <= 0) {
		vput(nvp);
		*vpp = NULLVP;
		return (ESTALE);
	}
	*vpp = nvp;
	vnode_create_vobject(*vpp, 0, curthread);
	return (0);
}
Esempio n. 3
0
void
fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td)
{
	/*
         * Funcation is called for every vnode open.
         * Merge fuse_open_flags it may be 0
         *
         * XXXIP: Handle FOPEN_KEEP_CACHE
         */
        /*
	  * Ideally speaking, direct io should be enabled on
         * fd's but do not see of any way of providing that
         * this implementation.

         * Also cannot think of a reason why would two
         * different fd's on same vnode would like
         * have DIRECT_IO turned on and off. But linux
         * based implementation works on an fd not an
         * inode and provides such a feature.
         *
         * XXXIP: Handle fd based DIRECT_IO
         */
	if (fuse_open_flags & FOPEN_DIRECT_IO) {
		VTOFUD(vp)->flag |= FN_DIRECTIO;
	} else {
	        VTOFUD(vp)->flag &= ~FN_DIRECTIO;
	}

	if (vnode_vtype(vp) == VREG) {
		/* XXXIP prevent getattr, by using cached node size */
		vnode_create_vobject(vp, 0, td);
	}
}
Esempio n. 4
0
static int
udf_open(struct vop_open_args *ap) {
	struct udf_node *np = VTON(ap->a_vp);
	off_t fsize;

	fsize = le64toh(np->fentry->inf_len);
	vnode_create_vobject(ap->a_vp, fsize, ap->a_td);
	return 0;
}
Esempio n. 5
0
static int
reiserfs_open(struct vop_open_args *ap)
{
    /* Files marked append-only must be opened for appending. */
    if ((VTOI(ap->a_vp)->i_flags & APPEND) &&
            (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
        return (EPERM);

    vnode_create_vobject(ap->a_vp, VTOI(ap->a_vp)->i_size, ap->a_td);

    return (0);
}
Esempio n. 6
0
void
fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td)
{
	/*
         * Funcation is called for every vnode open.
         * Merge fuse_open_flags it may be 0
         *
         * XXXIP: Handle FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE
         */

	if (vnode_vtype(vp) == VREG) {
		/* XXXIP prevent getattr, by using cached node size */
		vnode_create_vobject(vp, 0, td);
	}
}
Esempio n. 7
0
static int
msdosfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
{
	struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
	struct defid *defhp = (struct defid *) fhp;
	struct denode *dep;
	int error;

	error = deget(pmp, defhp->defid_dirclust, defhp->defid_dirofs, &dep);
	if (error) {
		*vpp = NULLVP;
		return (error);
	}
	*vpp = DETOV(dep);
	vnode_create_vobject(*vpp, dep->de_FileSize, curthread);
	return (0);
}
static int
_xfs_open(
    	struct vop_open_args /* {
		struct vnode *a_vp;
		int  a_mode;
		struct ucred *a_cred;
		struct thread *a_td;
		struct file *a_fp;
	} */ *ap)
{
	int error;

	XVOP_OPEN(VPTOXFSVP(ap->a_vp), ap->a_cred, error);
	if (error == 0)
		vnode_create_vobject(ap->a_vp, 0, ap->a_td);
	return (error);
}
Esempio n. 9
0
static int
ext2_open(struct vop_open_args *ap)
{

	if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR)
		return (EOPNOTSUPP);

	/*
	 * Files marked append-only must be opened for appending.
	 */
	if ((VTOI(ap->a_vp)->i_flags & APPEND) &&
	    (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
		return (EPERM);

	vnode_create_vobject(ap->a_vp, VTOI(ap->a_vp)->i_size, ap->a_td);

	return (0);
}
Esempio n. 10
0
static int
udf_fhtovp(struct mount *mp, struct fid *fhp, int flags, struct vnode **vpp)
{
	struct ifid *ifhp;
	struct vnode *nvp;
	struct udf_node *np;
	off_t fsize;
	int error;

	ifhp = (struct ifid *)fhp;

	if ((error = VFS_VGET(mp, ifhp->ifid_ino, LK_EXCLUSIVE, &nvp)) != 0) {
		*vpp = NULLVP;
		return (error);
	}

	np = VTON(nvp);
	fsize = le64toh(np->fentry->inf_len);

	*vpp = nvp;
	vnode_create_vobject(*vpp, fsize, curthread);
	return (0);
}
Esempio n. 11
0
static int
p9fs_open(struct vop_open_args *ap)
{
	int error;
	struct p9fs_node *np = ap->a_vp->v_data;
	struct vattr vattr;
	uint32_t fid = np->p9n_fid;

	printf("%s(fid %u)\n", __func__, np->p9n_fid);

	/*
	 * XXX XXX XXX
	 * XXX Each fid is associated with a particular open mode, so this
	 *     isn't good enough.  Need to map the mode to a particular fid.
	 *     Oh, but wait, we can't determine the correct fid for a given
	 *     client I/O call, because the filesystem can't store per file
	 *     descriptor state... sigh...
	 *
	 * diod's docs for 9P2000.L mention that each user gets its own
	 * attach fid on which to perform its operations.  That doesn't help
	 * with the per-fid open mode issue though... a given user can have
	 * multiple open modes.
	 *
	 * So perhaps p9fs would have to create a list of per-user open modes
	 * to fids?  Then read/write calls would lookup the appropriate one
	 * given the implied request mode?
	 *
	 * IO_APPEND is always included for VOP_WRITE() for fd's that were
	 * opened with O_APPEND.  So for each user we'd need at most three
	 * different fids: one each for reads, writes, and appends.  Each fid
	 * would have a refcount based on the number of times an open() call
	 * was issued with its bit set in the mode flag.  That way we could
	 * clunk fids only when they no longer have corresponding users.
	 *
	 * However, R/W modes are quite common, so perhaps we should try to
	 * always open R/W and let VFS do the per-fd restriction?  Ah, but
	 * that won't work because some files will only be openable read-only
	 * or write-only or append-only on the server end.
	 *
	 * Append presents another challenge: a given user can have multiple
	 * append fd's open on the same file at once.  Different appends can
	 * be at different offsets.  And some filesystems implement having
	 * append-only files.  However, looks like in that scenario the
	 * overlapping appends will always just get sent to the file's
	 * current size regardless.  This does mean we need an append fid.
	 *
	 * Finally, a p9fs_node should be indexed in the vfs hash by qid
	 * instead of by fid, since each vnode will be mappable to
	 * potentially many fids.  p9fs_nget() already takes a qid.  The
	 * main challenge is that vfs_hash_insert() only takes an u_int for
	 * the hash value, so we'll need to provide a comparator.
	 *
	 * Although, according to py9p, we can't clone an open fid, so
	 * perhaps we need a normal fid that is used just for cloning and
	 * metadata operations.
	 *
	 * NB: We likely also have to implement Tattach for every user, so
	 *     that the server has correct credentials for each fid and
	 *     tree of fids.  The initial attach would be defined by the
	 *     mount, but followup accesses by other users will require
	 *     their own attach.
	 */
	if (np->p9n_opens > 0) {
		np->p9n_opens++;
		return (0);
	}

	/* XXX Can this be cached in some reasonable fashion? */
	error = p9fs_client_stat(np->p9n_session, np->p9n_fid, &vattr);
	if (error != 0)
		return (error);

	/*
	 * XXX VFS calls VOP_OPEN() on a directory it's about to perform
	 *     VOP_READDIR() calls on.  However, 9P2000 Twalk requires that
	 *     the given fid not have been opened.  What should we do?
	 *
	 * For now, this call performs an internal Twalk to obtain a cloned
	 * fid that can be opened separately.  It will be clunk'd at the
	 * same time as the unopened fid.
	 */
	if (ap->a_vp->v_type == VDIR) {
		if (np->p9n_ofid == 0) {
			np->p9n_ofid = p9fs_getfid(np->p9n_session);

			error = p9fs_client_walk(np->p9n_session, np->p9n_fid,
			    &np->p9n_ofid, 0, NULL, &np->p9n_qid);
			if (error != 0) {
				np->p9n_ofid = 0;
				return (error);
			}
		}
		fid = np->p9n_ofid;
	}

	error = p9fs_client_open(np->p9n_session, fid, ap->a_mode);
	if (error == 0) {
		np->p9n_opens = 1;
		vnode_create_vobject(ap->a_vp, vattr.va_bytes, ap->a_td);
	}

	return (error);
}
static int
zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, vnode_t **vpp)
{
	zfsvfs_t	*zfsvfs = vfsp->vfs_data;
	znode_t		*zp;
	uint64_t	object = 0;
	uint64_t	fid_gen = 0;
	uint64_t	gen_mask;
	uint64_t	zp_gen;
	int		i, err;

	*vpp = NULL;

	ZFS_ENTER(zfsvfs);

	/*
	 * On FreeBSD we can get snapshot's mount point or its parent file
	 * system mount point depending if snapshot is already mounted or not.
	 */
	if (zfsvfs->z_parent == zfsvfs && fidp->fid_len == LONG_FID_LEN) {
		zfid_long_t	*zlfid = (zfid_long_t *)fidp;
		uint64_t	objsetid = 0;
		uint64_t	setgen = 0;

		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
			objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);

		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
			setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);

		ZFS_EXIT(zfsvfs);

		err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
		if (err)
			return (EINVAL);
		ZFS_ENTER(zfsvfs);
	}

	if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
		zfid_short_t	*zfid = (zfid_short_t *)fidp;

		for (i = 0; i < sizeof (zfid->zf_object); i++)
			object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);

		for (i = 0; i < sizeof (zfid->zf_gen); i++)
			fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
	} else {
		ZFS_EXIT(zfsvfs);
		return (EINVAL);
	}

	/* A zero fid_gen means we are in the .zfs control directories */
	if (fid_gen == 0 &&
	    (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
		*vpp = zfsvfs->z_ctldir;
		ASSERT(*vpp != NULL);
		if (object == ZFSCTL_INO_SNAPDIR) {
			VERIFY(zfsctl_root_lookup(*vpp, "snapshot", vpp, NULL,
			    0, NULL, NULL, NULL, NULL, NULL) == 0);
		} else {
			VN_HOLD(*vpp);
		}
		vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
		ZFS_EXIT(zfsvfs);
		return (0);
	}

	gen_mask = -1ULL >> (64 - 8 * i);

	dprintf("getting %llu [%u mask %llx]\n", object, fid_gen, gen_mask);
	if (err = zfs_zget(zfsvfs, object, &zp)) {
		ZFS_EXIT(zfsvfs);
		return (err);
	}
	zp_gen = zp->z_phys->zp_gen & gen_mask;
	if (zp_gen == 0)
		zp_gen = 1;
	if (zp->z_unlinked || zp_gen != fid_gen) {
		dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
		VN_RELE(ZTOV(zp));
		ZFS_EXIT(zfsvfs);
		return (EINVAL);
	}

	*vpp = ZTOV(zp);
	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
	vnode_create_vobject(*vpp, zp->z_phys->zp_size, curthread);
	ZFS_EXIT(zfsvfs);
	return (0);
}