Esempio n. 1
0
static int
vnop_strategy_9p(struct vnop_strategy_args *ap)
{
	mount_t mp;
	struct buf *bp;
	node_9p *np;
	caddr_t addr;
	uio_t uio;
	int e, flags;

	TRACE();
	bp = ap->a_bp;
	np = NTO9P(buf_vnode(bp));
	flags = buf_flags(bp);
	uio = NULL;
	addr = NULL;

	mp = vnode_mount(buf_vnode(bp));
	if (mp == NULL)
		return ENXIO;

	if ((e=buf_map(bp, &addr)))
		goto error;

	uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE,
					 ISSET(flags, B_READ)? UIO_READ: UIO_WRITE);
	if (uio == NULL) {
		e = ENOMEM;
		goto error;
	}
	
	uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp));
	if (ISSET(flags, B_READ)) {
		if((e=nread_9p(np, uio)))
			goto error;
		/* zero the rest of the page if we reached EOF */
		if (uio_resid(uio) > 0) {
			bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio));
			uio_update(uio, uio_resid(uio));
		}
	} else {
		if ((e=nwrite_9p(np, uio)))
			goto error;
	}
	buf_setresid(bp, uio_resid(uio));
error:
	if (uio)
		uio_free(uio);
	if (addr)
		buf_unmap(bp);
	buf_seterror(bp, e);
	buf_biodone(bp);
	return e;
}
Esempio n. 2
0
/*
 struct vnop_strategy_args {
 struct vnodeop_desc *a_desc;
 struct buf          *a_bp;
 };
 */
FUSE_VNOP_EXPORT
int
fuse_biglock_vnop_strategy(struct vnop_strategy_args *ap)
{
#if 1
	/* Now trying out a locked version of strategy. We need to hold a lock
	 * as the underlying layers expect it. */
	locked_vnop(buf_vnode(ap->a_bp), fuse_vnop_strategy, ap);
#else
	/* VNOP_STRATEGY in kpi_vfs.c is completely unprotected. This seems very
	 * dangerous, but I don't want to do anything that kpi_vfs.c doesn't do
	 * without being able to motivate why. */
	return fuse_vnop_strategy(ap);
#endif
}
Esempio n. 3
0
/*
 struct vnop_strategy_args {
 struct vnodeop_desc *a_desc;
 struct buf          *a_bp;
 };
 */
FUSE_VNOP_EXPORT
int
fuse_biglock_vnop_strategy(struct vnop_strategy_args *ap)
{
#if 1
	/* Now trying out a locked version of strategy. We need to hold a lock
	 * as the underlying layers expect it. */
	locked_vnop(buf_vnode(ap->a_bp), fuse_vnop_strategy, ap);
#else
    /* WARNING: nodelock and biglock are released and reacquired in
     *          fuse_internal_strategy. Calling fuse_vnop_strategy directly
     *          will result in a kernel panic! */

	/* VNOP_STRATEGY in kpi_vfs.c is completely unprotected. This seems very
	 * dangerous, but I don't want to do anything that kpi_vfs.c doesn't do
	 * without being able to motivate why. */
	return fuse_vnop_strategy(ap);
#endif
}
Esempio n. 4
0
int
spec_strategy(struct vnop_strategy_args *ap)
{
        buf_t	bp;
	int	bflags;
	int 	policy;
	dev_t	bdev;
	uthread_t ut;
	size_t devbsdunit;
	mount_t mp;

        bp = ap->a_bp;
	bdev = buf_device(bp);
	bflags = buf_flags(bp);
	mp = buf_vnode(bp)->v_mount;

        if (kdebug_enable) {
	        int    code = 0;

		if (bflags & B_READ)
		        code |= DKIO_READ;
		if (bflags & B_ASYNC)
		        code |= DKIO_ASYNC;

		if (bflags & B_META)
		        code |= DKIO_META;
		else if (bflags & B_PAGEIO)
		        code |= DKIO_PAGING;

		KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
				      bp, bdev, (int)buf_blkno(bp), buf_count(bp), 0);
        }
	if (((bflags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) &&
	    mp && (mp->mnt_kern_flag & MNTK_ROOTDEV))
	        hard_throttle_on_root = 1;


	if (mp != NULL)
		devbsdunit = mp->mnt_devbsdunit;
	else
		devbsdunit = LOWPRI_MAX_NUM_DEV - 1;

	throttle_info_update(&_throttle_io_info[devbsdunit], bflags);
	if ((policy = throttle_get_io_policy(&ut)) == IOPOL_THROTTLE) {
		bp->b_flags |= B_THROTTLED_IO;
	}


	if ((bflags & B_READ) == 0) {
		microuptime(&_throttle_io_info[devbsdunit].last_IO_timestamp);
		if (mp) {
			INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size);
		}
	} else if (mp) {
		INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size);
	}

	(*bdevsw[major(bdev)].d_strategy)(bp);
	
	return (0);
}
Esempio n. 5
0
__private_extern__
errno_t
fuse_internal_strategy_buf(struct vnop_strategy_args *ap)
{
    int32_t   bflags;
    upl_t     bupl;
    daddr64_t blkno, lblkno;
    int       bmap_flags;
    buf_t     bp    = ap->a_bp;
    vnode_t   vp    = buf_vnode(bp);
    int       vtype = vnode_vtype(vp);

    struct fuse_data *data;

    if (!vp || vtype == VCHR || vtype == VBLK) {
        panic("MacFUSE: buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n");
    }

    bflags = buf_flags(bp);

    if (bflags & B_READ) {
        bmap_flags = VNODE_READ;
    } else {
        bmap_flags = VNODE_WRITE;
    }

    bupl = buf_upl(bp);
    blkno = buf_blkno(bp);
    lblkno = buf_lblkno(bp);

    if (!(bflags & B_CLUSTER)) {

        if (bupl) {
            return cluster_bp(bp);
        }

        if (blkno == lblkno) {
            off_t  f_offset;
            size_t contig_bytes;

            data = fuse_get_mpdata(vnode_mount(vp));

            // Still think this is a kludge?
            f_offset = lblkno * data->blocksize;
            blkno = f_offset / data->blocksize;

            buf_setblkno(bp, blkno);

            contig_bytes = buf_count(bp);

            if (blkno == -1) {
                buf_clear(bp);
            }
                        
            /*
             * Our "device" is always /all contiguous/. We don't wanna be
             * doing things like:
             *
             * ...
             *     else if ((long)contig_bytes < buf_count(bp)) {
             *         ret = buf_strategy_fragmented(devvp, bp, f_offset,
             *                                       contig_bytes));
             *         return ret;
             *      }
             */
        }

        if (blkno == -1) {
            buf_biodone(bp);
            return 0;
        }
    }

    // Issue the I/O

    return fuse_internal_strategy(vp, bp);
}