Ejemplo n.º 1
0
/*
 * The strategy function is typically only called when memory pressure
 * forces the system to attempt to pageout pages.  It can also be called
 * by [n]vtruncbuf() when a truncation cuts a page in half.  Normal write
 * operations
 */
static int
tmpfs_strategy(struct vop_strategy_args *ap)
{
	struct bio *bio = ap->a_bio;
	struct bio *nbio;
	struct buf *bp = bio->bio_buf;
	struct vnode *vp = ap->a_vp;
	struct tmpfs_node *node;
	vm_object_t uobj;
	vm_page_t m;
	int i;

	if (vp->v_type != VREG) {
		bp->b_resid = bp->b_bcount;
		bp->b_flags |= B_ERROR | B_INVAL;
		bp->b_error = EINVAL;
		biodone(bio);
		return(0);
	}

	lwkt_gettoken(&vp->v_mount->mnt_token);
	node = VP_TO_TMPFS_NODE(vp);

	uobj = node->tn_reg.tn_aobj;

	/*
	 * Don't bother flushing to swap if there is no swap, just
	 * ensure that the pages are marked as needing a commit (still).
	 */
	if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) {
		for (i = 0; i < bp->b_xio.xio_npages; ++i) {
			m = bp->b_xio.xio_pages[i];
			vm_page_need_commit(m);
		}
		bp->b_resid = 0;
		bp->b_error = 0;
		biodone(bio);
	} else {
		nbio = push_bio(bio);
		nbio->bio_done = tmpfs_strategy_done;
		nbio->bio_offset = bio->bio_offset;
		swap_pager_strategy(uobj, nbio);
	}

	lwkt_reltoken(&vp->v_mount->mnt_token);
	return 0;
}
Ejemplo n.º 2
0
/*
 *	vnstrategy:
 *
 *	Run strategy routine for VN device.  We use VOP_READ/VOP_WRITE calls
 *	for vnode-backed vn's, and the swap_pager_strategy() call for
 *	vm_object-backed vn's.
 */
static int
vnstrategy(struct dev_strategy_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct bio *bio = ap->a_bio;
	struct buf *bp;
	struct bio *nbio;
	int unit;
	struct vn_softc *vn;
	int error;

	unit = dkunit(dev);
	vn = dev->si_drv1;
	KKASSERT(vn != NULL);

	bp = bio->bio_buf;

	IFOPT(vn, VN_DEBUG)
		kprintf("vnstrategy(%p): unit %d\n", bp, unit);

	if ((vn->sc_flags & VNF_INITED) == 0) {
		bp->b_error = ENXIO;
		bp->b_flags |= B_ERROR;
		biodone(bio);
		return(0);
	}

	bp->b_resid = bp->b_bcount;

	/*
	 * The vnode device is using disk/slice label support.
	 *
	 * The dscheck() function is called for validating the
	 * slices that exist ON the vnode device itself, and
	 * translate the "slice-relative" block number, again.
	 * dscheck() will call biodone() and return NULL if
	 * we are at EOF or beyond the device size.
	 */

	nbio = bio;

	/*
	 * Use the translated nbio from this point on
	 */
	if (vn->sc_vp && bp->b_cmd == BUF_CMD_FREEBLKS) {
		/*
		 * Freeblks is not handled for vnode-backed elements yet.
		 */
		bp->b_resid = 0;
		/* operation complete */
	} else if (vn->sc_vp) {
		/*
		 * VNODE I/O
		 *
		 * If an error occurs, we set B_ERROR but we do not set 
		 * B_INVAL because (for a write anyway), the buffer is 
		 * still valid.
		 */
		struct uio auio;
		struct iovec aiov;

		bzero(&auio, sizeof(auio));

		aiov.iov_base = bp->b_data;
		aiov.iov_len = bp->b_bcount;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = nbio->bio_offset;
		auio.uio_segflg = UIO_SYSSPACE;
		if (bp->b_cmd == BUF_CMD_READ)
			auio.uio_rw = UIO_READ;
		else
			auio.uio_rw = UIO_WRITE;
		auio.uio_resid = bp->b_bcount;
		auio.uio_td = curthread;

		/*
		 * Don't use IO_DIRECT here, it really gets in the way
		 * due to typical blocksize differences between the
		 * fs backing the VN device and whatever is running on
		 * the VN device.
		 */
		switch (bp->b_cmd) {
		case (BUF_CMD_READ):
			vn_lock(vn->sc_vp, LK_SHARED | LK_RETRY);
			error = VOP_READ(vn->sc_vp, &auio, IO_RECURSE,
					 vn->sc_cred);
			break;

		case (BUF_CMD_WRITE):
			vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_WRITE(vn->sc_vp, &auio, IO_RECURSE,
					  vn->sc_cred);
			break;

		case (BUF_CMD_FLUSH):
			auio.uio_resid = 0;
			vn_lock(vn->sc_vp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_FSYNC(vn->sc_vp, MNT_WAIT, 0);
			break;
		default:
			auio.uio_resid = 0;
			error = 0;
			break;
		}
		vn_unlock(vn->sc_vp);
		bp->b_resid = auio.uio_resid;
		if (error) {
			bp->b_error = error;
			bp->b_flags |= B_ERROR;
		}
		/* operation complete */
	} else if (vn->sc_object) {
		/*
		 * OBJT_SWAP I/O (handles read, write, freebuf)
		 *
		 * We have nothing to do if freeing  blocks on a reserved
		 * swap area, othrewise execute the op.
		 */
		if (bp->b_cmd == BUF_CMD_FREEBLKS && TESTOPT(vn, VN_RESERVE)) {
			bp->b_resid = 0;
			/* operation complete */
		} else {
			swap_pager_strategy(vn->sc_object, nbio);
			return(0);
			/* NOT REACHED */
		}
	} else {
		bp->b_resid = bp->b_bcount;
		bp->b_flags |= B_ERROR | B_INVAL;
		bp->b_error = EINVAL;
		/* operation complete */
	}
	biodone(nbio);
	return(0);
}