Esempio n. 1
0
static int
fuse_write_directbackend(struct vnode *vp, struct uio *uio,
    struct ucred *cred, struct fuse_filehandle *fufh, int ioflag)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	struct fuse_write_in *fwi;
	struct fuse_dispatcher fdi;
	size_t chunksize;
	int diff;
	int err = 0;

	if (uio->uio_resid == 0)
		return (0);
	if (ioflag & IO_APPEND)
		uio_setoffset(uio, fvdat->filesize);

	fdisp_init(&fdi, 0);

	while (uio->uio_resid > 0) {
		chunksize = MIN(uio->uio_resid,
		    fuse_get_mpdata(vp->v_mount)->max_write);

		fdi.iosize = sizeof(*fwi) + chunksize;
		fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);

		fwi = fdi.indata;
		fwi->fh = fufh->fh_id;
		fwi->offset = uio->uio_offset;
		fwi->size = chunksize;

		if ((err = uiomove((char *)fdi.indata + sizeof(*fwi),
		    chunksize, uio)))
			break;

		if ((err = fdisp_wait_answ(&fdi)))
			break;

		diff = chunksize - ((struct fuse_write_out *)fdi.answ)->size;
		if (diff < 0) {
			err = EINVAL;
			break;
		}
		uio->uio_resid += diff;
		uio->uio_offset -= diff;
		if (uio->uio_offset > fvdat->filesize)
			fuse_vnode_setsize(vp, cred, uio->uio_offset);
	}

	fdisp_destroy(&fdi);

	return (err);
}
Esempio n. 2
0
static int
fuse_write_biobackend(struct vnode *vp, struct uio *uio,
    struct ucred *cred, struct fuse_filehandle *fufh, int ioflag)
{
	struct fuse_vnode_data *fvdat = VTOFUD(vp);
	struct buf *bp;
	daddr_t lbn;
	int bcount;
	int n, on, err = 0;

	const int biosize = fuse_iosize(vp);

	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
	FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n",
	    uio->uio_resid, uio->uio_offset, fvdat->filesize);
	if (vp->v_type != VREG)
		return (EIO);
	if (uio->uio_offset < 0)
		return (EINVAL);
	if (uio->uio_resid == 0)
		return (0);
	if (ioflag & IO_APPEND)
		uio_setoffset(uio, fvdat->filesize);

	/*
         * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
         * would exceed the local maximum per-file write commit size when
         * combined with those, we must decide whether to flush,
         * go synchronous, or return err.  We don't bother checking
         * IO_UNIT -- we just make all writes atomic anyway, as there's
         * no point optimizing for something that really won't ever happen.
         */
	do {
		if (fuse_isdeadfs(vp)) {
			err = ENXIO;
			break;
		}
		lbn = uio->uio_offset / biosize;
		on = uio->uio_offset & (biosize - 1);
		n = MIN((unsigned)(biosize - on), uio->uio_resid);

		FS_DEBUG2G("lbn %ju, on %d, n %d, uio offset %ju, uio resid %zd\n",
			(uintmax_t)lbn, on, n, 
			(uintmax_t)uio->uio_offset, uio->uio_resid);

again:
		/*
	         * Handle direct append and file extension cases, calculate
	         * unaligned buffer size.
	         */
		if (uio->uio_offset == fvdat->filesize && n) {
			/*
	                 * Get the buffer (in its pre-append state to maintain
	                 * B_CACHE if it was previously set).  Resize the
	                 * nfsnode after we have locked the buffer to prevent
	                 * readers from reading garbage.
	                 */
			bcount = on;
			FS_DEBUG("getting block from OS, bcount %d\n", bcount);
			bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);

			if (bp != NULL) {
				long save;

				err = fuse_vnode_setsize(vp, cred, 
							 uio->uio_offset + n);
				if (err) {
					brelse(bp);
					break;
				}
				save = bp->b_flags & B_CACHE;
				bcount += n;
				allocbuf(bp, bcount);
				bp->b_flags |= save;
			}
		} else {
			/*
	                 * Obtain the locked cache block first, and then
	                 * adjust the file's size as appropriate.
	                 */
			bcount = on + n;
			if ((off_t)lbn * biosize + bcount < fvdat->filesize) {
				if ((off_t)(lbn + 1) * biosize < fvdat->filesize)
					bcount = biosize;
				else
					bcount = fvdat->filesize - 
					  (off_t)lbn *biosize;
			}
			FS_DEBUG("getting block from OS, bcount %d\n", bcount);
			bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
			if (bp && uio->uio_offset + n > fvdat->filesize) {
				err = fuse_vnode_setsize(vp, cred, 
							 uio->uio_offset + n);
				if (err) {
					brelse(bp);
					break;
				}
			}
		}

		if (!bp) {
			err = EINTR;
			break;
		}
		/*
	         * Issue a READ if B_CACHE is not set.  In special-append
	         * mode, B_CACHE is based on the buffer prior to the write
	         * op and is typically set, avoiding the read.  If a read
	         * is required in special append mode, the server will
	         * probably send us a short-read since we extended the file
	         * on our end, resulting in b_resid == 0 and, thusly,
	         * B_CACHE getting set.
	         *
	         * We can also avoid issuing the read if the write covers
	         * the entire buffer.  We have to make sure the buffer state
	         * is reasonable in this case since we will not be initiating
	         * I/O.  See the comments in kern/vfs_bio.c's getblk() for
	         * more information.
	         *
	         * B_CACHE may also be set due to the buffer being cached
	         * normally.
	         */

		if (on == 0 && n == bcount) {
			bp->b_flags |= B_CACHE;
			bp->b_flags &= ~B_INVAL;
			bp->b_ioflags &= ~BIO_ERROR;
		}
		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_iocmd = BIO_READ;
			vfs_busy_pages(bp, 0);
			fuse_io_strategy(vp, bp);
			if ((err = bp->b_error)) {
				brelse(bp);
				break;
			}
		}
		if (bp->b_wcred == NOCRED)
			bp->b_wcred = crhold(cred);

		/*
	         * If dirtyend exceeds file size, chop it down.  This should
	         * not normally occur but there is an append race where it
	         * might occur XXX, so we log it.
	         *
	         * If the chopping creates a reverse-indexed or degenerate
	         * situation with dirtyoff/end, we 0 both of them.
	         */

		if (bp->b_dirtyend > bcount) {
			FS_DEBUG("FUSE append race @%lx:%d\n",
			    (long)bp->b_blkno * biosize,
			    bp->b_dirtyend - bcount);
			bp->b_dirtyend = bcount;
		}
		if (bp->b_dirtyoff >= bp->b_dirtyend)
			bp->b_dirtyoff = bp->b_dirtyend = 0;

		/*
	         * If the new write will leave a contiguous dirty
	         * area, just update the b_dirtyoff and b_dirtyend,
	         * otherwise force a write rpc of the old dirty area.
	         *
	         * While it is possible to merge discontiguous writes due to
	         * our having a B_CACHE buffer ( and thus valid read data
	         * for the hole), we don't because it could lead to
	         * significant cache coherency problems with multiple clients,
	         * especially if locking is implemented later on.
	         *
	         * as an optimization we could theoretically maintain
	         * a linked list of discontinuous areas, but we would still
	         * have to commit them separately so there isn't much
	         * advantage to it except perhaps a bit of asynchronization.
	         */

		if (bp->b_dirtyend > 0 &&
		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
			/*
	                 * Yes, we mean it. Write out everything to "storage"
	                 * immediately, without hesitation. (Apart from other
	                 * reasons: the only way to know if a write is valid
	                 * if its actually written out.)
	                 */
			bwrite(bp);
			if (bp->b_error == EINTR) {
				err = EINTR;
				break;
			}
			goto again;
		}
		err = uiomove((char *)bp->b_data + on, n, uio);

		/*
	         * Since this block is being modified, it must be written
	         * again and not just committed.  Since write clustering does
	         * not work for the stage 1 data write, only the stage 2
	         * commit rpc, we have to clear B_CLUSTEROK as well.
	         */
		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);

		if (err) {
			bp->b_ioflags |= BIO_ERROR;
			bp->b_error = err;
			brelse(bp);
			break;
		}
		/*
	         * Only update dirtyoff/dirtyend if not a degenerate
	         * condition.
	         */
		if (n) {
			if (bp->b_dirtyend > 0) {
				bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
				bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
			} else {
				bp->b_dirtyoff = on;
				bp->b_dirtyend = on + n;
			}
			vfs_bio_set_valid(bp, on, n);
		}
		err = bwrite(bp);
		if (err)
			break;
	} while (uio->uio_resid > 0 && n > 0);

	if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0)
		fuse_vnode_savesize(vp, cred);

	return (err);
}
Esempio n. 3
0
/*
    struct vnop_getattr_args {
	struct vnode *a_vp;
	struct vattr *a_vap;
	struct ucred *a_cred;
	struct thread *a_td;
    };
*/
static int
fuse_vnop_getattr(struct vop_getattr_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vattr *vap = ap->a_vap;
	struct ucred *cred = ap->a_cred;
	struct thread *td = curthread;
	struct fuse_vnode_data *fvdat = VTOFUD(vp);

	int err = 0;
	int dataflags;
	struct fuse_dispatcher fdi;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;

	/* Note that we are not bailing out on a dead file system just yet. */

	if (!(dataflags & FSESS_INITED)) {
		if (!vnode_isvroot(vp)) {
			fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
			err = ENOTCONN;
			debug_printf("fuse_getattr b: returning ENOTCONN\n");
			return err;
		} else {
			goto fake;
		}
	}
	fdisp_init(&fdi, 0);
	if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
		if ((err == ENOTCONN) && vnode_isvroot(vp)) {
			/* see comment at similar place in fuse_statfs() */
			fdisp_destroy(&fdi);
			goto fake;
		}
		if (err == ENOENT) {
			fuse_internal_vnode_disappear(vp);
		}
		goto out;
	}
	cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
	if (vap != VTOVA(vp)) {
		memcpy(vap, VTOVA(vp), sizeof(*vap));
	}
	if (vap->va_type != vnode_vtype(vp)) {
		fuse_internal_vnode_disappear(vp);
		err = ENOENT;
		goto out;
	}
	if ((fvdat->flag & FN_SIZECHANGE) != 0)
		vap->va_size = fvdat->filesize;

	if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
		/*
	         * This is for those cases when the file size changed without us
	         * knowing, and we want to catch up.
	         */
		off_t new_filesize = ((struct fuse_attr_out *)
				      fdi.answ)->attr.size;

		if (fvdat->filesize != new_filesize) {
			fuse_vnode_setsize(vp, cred, new_filesize);
		}
	}
	debug_printf("fuse_getattr e: returning 0\n");

out:
	fdisp_destroy(&fdi);
	return err;

fake:
	bzero(vap, sizeof(*vap));
	vap->va_type = vnode_vtype(vp);

	return 0;
}
Esempio n. 4
0
/*
    struct vnop_setattr_args {
	struct vnode *a_vp;
	struct vattr *a_vap;
	struct ucred *a_cred;
	struct thread *a_td;
    };
*/
static int
fuse_vnop_setattr(struct vop_setattr_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vattr *vap = ap->a_vap;
	struct ucred *cred = ap->a_cred;
	struct thread *td = curthread;

	struct fuse_dispatcher fdi;
	struct fuse_setattr_in *fsai;
	struct fuse_access_param facp;

	int err = 0;
	enum vtype vtyp;
	int sizechanged = 0;
	uint64_t newsize = 0;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	if (fuse_isdeadfs(vp)) {
		return ENXIO;
	}
	fdisp_init(&fdi, sizeof(*fsai));
	fdisp_make_vp(&fdi, FUSE_SETATTR, vp, td, cred);
	fsai = fdi.indata;
	fsai->valid = 0;

	bzero(&facp, sizeof(facp));

	facp.xuid = vap->va_uid;
	facp.xgid = vap->va_gid;

	if (vap->va_uid != (uid_t)VNOVAL) {
		facp.facc_flags |= FACCESS_CHOWN;
		fsai->uid = vap->va_uid;
		fsai->valid |= FATTR_UID;
	}
	if (vap->va_gid != (gid_t)VNOVAL) {
		facp.facc_flags |= FACCESS_CHOWN;
		fsai->gid = vap->va_gid;
		fsai->valid |= FATTR_GID;
	}
	if (vap->va_size != VNOVAL) {

		struct fuse_filehandle *fufh = NULL;

		/*Truncate to a new value. */
		    fsai->size = vap->va_size;
		sizechanged = 1;
		newsize = vap->va_size;
		fsai->valid |= FATTR_SIZE;

		fuse_filehandle_getrw(vp, FUFH_WRONLY, &fufh);
		if (fufh) {
			fsai->fh = fufh->fh_id;
			fsai->valid |= FATTR_FH;
		}
	}
	if (vap->va_atime.tv_sec != VNOVAL) {
		fsai->atime = vap->va_atime.tv_sec;
		fsai->atimensec = vap->va_atime.tv_nsec;
		fsai->valid |= FATTR_ATIME;
	}
	if (vap->va_mtime.tv_sec != VNOVAL) {
		fsai->mtime = vap->va_mtime.tv_sec;
		fsai->mtimensec = vap->va_mtime.tv_nsec;
		fsai->valid |= FATTR_MTIME;
	}
	if (vap->va_mode != (mode_t)VNOVAL) {
		fsai->mode = vap->va_mode & ALLPERMS;
		fsai->valid |= FATTR_MODE;
	}
	if (!fsai->valid) {
		goto out;
	}
	vtyp = vnode_vtype(vp);

	if (fsai->valid & FATTR_SIZE && vtyp == VDIR) {
		err = EISDIR;
		goto out;
	}
	if (vfs_isrdonly(vnode_mount(vp)) && (fsai->valid & ~FATTR_SIZE || vtyp == VREG)) {
		err = EROFS;
		goto out;
	}
	if (fsai->valid & ~FATTR_SIZE) {
	  /*err = fuse_internal_access(vp, VADMIN, context, &facp); */
	  /*XXX */
		    err = 0;
	}
	facp.facc_flags &= ~FACCESS_XQUERIES;

	if (err && !(fsai->valid & ~(FATTR_ATIME | FATTR_MTIME)) &&
	    vap->va_vaflags & VA_UTIMES_NULL) {
		err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
	}
	if (err)
		goto out;
	if ((err = fdisp_wait_answ(&fdi)))
		goto out;
	vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);

	if (vnode_vtype(vp) != vtyp) {
		if (vnode_vtype(vp) == VNON && vtyp != VNON) {
			debug_printf("FUSE: Dang! vnode_vtype is VNON and vtype isn't.\n");
		} else {
			/*
	                 * STALE vnode, ditch
	                 *
	                 * The vnode has changed its type "behind our back". There's
	                 * nothing really we can do, so let us just force an internal
	                 * revocation and tell the caller to try again, if interested.
	                 */
			fuse_internal_vnode_disappear(vp);
			err = EAGAIN;
		}
	}
	if (!err && !sizechanged) {
		cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
	}
out:
	fdisp_destroy(&fdi);
	if (!err && sizechanged) {
		fuse_vnode_setsize(vp, cred, newsize);
		VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
	}
	return err;
}