예제 #1
0
int
physio(struct cdev *dev, struct uio *uio, int ioflag)
{
	int i;
	int error;
	caddr_t sa;
	u_int iolen;
	struct buf *bp;

	/* Keep the process UPAGES from being swapped. XXX: why ? */
	PHOLD(curproc);

	bp = getpbuf(NULL);
	sa = bp->b_data;
	error = 0;

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	for (i = 0; i < uio->uio_iovcnt; i++) {
		while (uio->uio_iov[i].iov_len) {
			bp->b_flags = 0;
			if (uio->uio_rw == UIO_READ) {
				bp->b_iocmd = BIO_READ;
				curthread->td_ru.ru_inblock++;
			} else {
				bp->b_iocmd = BIO_WRITE;
				curthread->td_ru.ru_oublock++;
			}
			bp->b_iodone = bdone;
			bp->b_data = uio->uio_iov[i].iov_base;
			bp->b_bcount = uio->uio_iov[i].iov_len;
			bp->b_offset = uio->uio_offset;
			bp->b_iooffset = uio->uio_offset;
			bp->b_saveaddr = sa;

			/* Don't exceed drivers iosize limit */
			if (bp->b_bcount > dev->si_iosize_max)
				bp->b_bcount = dev->si_iosize_max;

			/* 
			 * Make sure the pbuf can map the request
			 * XXX: The pbuf has kvasize = MAXPHYS so a request
			 * XXX: larger than MAXPHYS - PAGE_SIZE must be
			 * XXX: page aligned or it will be fragmented.
			 */
			iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
			if ((bp->b_bcount + iolen) > bp->b_kvasize) {
				bp->b_bcount = bp->b_kvasize;
				if (iolen != 0)
					bp->b_bcount -= PAGE_SIZE;
			}
			bp->b_bufsize = bp->b_bcount;

			bp->b_blkno = btodb(bp->b_offset);

			if (uio->uio_segflg == UIO_USERSPACE)
				if (vmapbuf(bp) < 0) {
					error = EFAULT;
					goto doerror;
				}

			dev_strategy(dev, bp);
			if (uio->uio_rw == UIO_READ)
				bwait(bp, PRIBIO, "physrd");
			else
				bwait(bp, PRIBIO, "physwr");

			if (uio->uio_segflg == UIO_USERSPACE)
				vunmapbuf(bp);
			iolen = bp->b_bcount - bp->b_resid;
			if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base =
			    (char *)uio->uio_iov[i].iov_base + iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if( bp->b_ioflags & BIO_ERROR) {
				error = bp->b_error;
				goto doerror;
			}
		}
	}
doerror:
	relpbuf(bp, NULL);
	PRELE(curproc);
	return (error);
}
예제 #2
0
static int
ffs_rawread_readahead(struct vnode *vp, caddr_t udata, off_t loffset,
		      size_t len, struct buf *bp)
{
	int error;
	int iolen;
	int blockoff;
	int bsize;
	struct vnode *dp;
	int bforwards;
	
	bsize = vp->v_mount->mnt_stat.f_iosize;

	/*
	 * Make sure it fits into the pbuf
	 */
	iolen = (int)(intptr_t)udata & PAGE_MASK;
	if (len + iolen > bp->b_kvasize) {
		len = bp->b_kvasize;
		if (iolen != 0)
			len -= PAGE_SIZE;
	}

	/*
	 * Raw disk address is in bio2, but we wait for it to
	 * chain to bio1.
	 */
	bp->b_flags &= ~B_ERROR;
	bp->b_loffset = loffset;
	bp->b_bio2.bio_offset = NOOFFSET;
	bp->b_bio1.bio_done = biodone_sync;
	bp->b_bio1.bio_flags |= BIO_SYNC;

	blockoff = (loffset % bsize) / DEV_BSIZE;

	error = VOP_BMAP(vp, bp->b_loffset, &bp->b_bio2.bio_offset,
			 &bforwards, NULL, BUF_CMD_READ);
	if (error != 0)
		return error;
	dp = VTOI(vp)->i_devvp;
	if (bp->b_bio2.bio_offset == NOOFFSET) {
		/* 
		 * Fill holes with NULs to preserve semantics 
		 */
		if (len + blockoff * DEV_BSIZE > bsize)
			len = bsize - blockoff * DEV_BSIZE;
		
		if (vmapbuf(bp, udata, len) < 0)
			return EFAULT;
		
		lwkt_user_yield();
		bzero(bp->b_data, bp->b_bcount);

		/* Mark operation completed (similar to bufdone()) */

		bp->b_resid = 0;
		return 0;
	}
	
	if (len + blockoff * DEV_BSIZE > bforwards)
		len = bforwards - blockoff * DEV_BSIZE;
	bp->b_bio2.bio_offset += blockoff * DEV_BSIZE;
	
	if (vmapbuf(bp, udata, len) < 0)
		return EFAULT;
	
	/*
	 * Access the block device layer using the device vnode (dp) and
	 * the translated block number (bio2) instead of the logical block
	 * number (bio1).
	 *
	 * Even though we are bypassing the vnode layer, we still
	 * want the vnode state to indicate that an I/O on its behalf
	 * is in progress.
	 */
	bp->b_cmd = BUF_CMD_READ;
	bio_start_transaction(&bp->b_bio1, &vp->v_track_read);
	vn_strategy(dp, &bp->b_bio2);
	return 0;
}
예제 #3
0
int
physio(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct buf *bp;
	struct cdevsw *csw;
	caddr_t sa;
	u_int iolen;
	int error, i, mapped;

	/* Keep the process UPAGES from being swapped. XXX: why ? */
	PHOLD(curproc);

	bp = getpbuf(NULL);
	sa = bp->b_data;
	error = 0;

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	/*
	 * If the driver does not want I/O to be split, that means that we
	 * need to reject any requests that will not fit into one buffer.
	 */
	if (dev->si_flags & SI_NOSPLIT &&
	    (uio->uio_resid > dev->si_iosize_max || uio->uio_resid > MAXPHYS ||
	    uio->uio_iovcnt > 1)) {
		/*
		 * Tell the user why his I/O was rejected.
		 */
		if (uio->uio_resid > dev->si_iosize_max)
			uprintf("%s: request size=%zd > si_iosize_max=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, dev->si_iosize_max);
		if (uio->uio_resid > MAXPHYS)
			uprintf("%s: request size=%zd > MAXPHYS=%d; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_resid, MAXPHYS);
		if (uio->uio_iovcnt > 1)
			uprintf("%s: request vectors=%d > 1; "
			    "cannot split request\n", devtoname(dev),
			    uio->uio_iovcnt);

		error = EFBIG;
		goto doerror;
	}

	for (i = 0; i < uio->uio_iovcnt; i++) {
		while (uio->uio_iov[i].iov_len) {
			bp->b_flags = 0;
			if (uio->uio_rw == UIO_READ) {
				bp->b_iocmd = BIO_READ;
				curthread->td_ru.ru_inblock++;
			} else {
				bp->b_iocmd = BIO_WRITE;
				curthread->td_ru.ru_oublock++;
			}
			bp->b_iodone = bdone;
			bp->b_data = uio->uio_iov[i].iov_base;
			bp->b_bcount = uio->uio_iov[i].iov_len;
			bp->b_offset = uio->uio_offset;
			bp->b_iooffset = uio->uio_offset;
			bp->b_saveaddr = sa;

			/* Don't exceed drivers iosize limit */
			if (bp->b_bcount > dev->si_iosize_max)
				bp->b_bcount = dev->si_iosize_max;

			/* 
			 * Make sure the pbuf can map the request
			 * XXX: The pbuf has kvasize = MAXPHYS so a request
			 * XXX: larger than MAXPHYS - PAGE_SIZE must be
			 * XXX: page aligned or it will be fragmented.
			 */
			iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
			if ((bp->b_bcount + iolen) > bp->b_kvasize) {
				/*
				 * This device does not want I/O to be split.
				 */
				if (dev->si_flags & SI_NOSPLIT) {
					uprintf("%s: request ptr %p is not "
					    "on a page boundary; cannot split "
					    "request\n", devtoname(dev),
					    bp->b_data);
					error = EFBIG;
					goto doerror;
				}
				bp->b_bcount = bp->b_kvasize;
				if (iolen != 0)
					bp->b_bcount -= PAGE_SIZE;
			}
			bp->b_bufsize = bp->b_bcount;

			bp->b_blkno = btodb(bp->b_offset);

			csw = dev->si_devsw;
			if (uio->uio_segflg == UIO_USERSPACE) {
				if (dev->si_flags & SI_UNMAPPED)
					mapped = 0;
				else
					mapped = 1;
				if (vmapbuf(bp, mapped) < 0) {
					error = EFAULT;
					goto doerror;
				}
			}

			dev_strategy_csw(dev, csw, bp);
			if (uio->uio_rw == UIO_READ)
				bwait(bp, PRIBIO, "physrd");
			else
				bwait(bp, PRIBIO, "physwr");

			if (uio->uio_segflg == UIO_USERSPACE)
				vunmapbuf(bp);
			iolen = bp->b_bcount - bp->b_resid;
			if (iolen == 0 && !(bp->b_ioflags & BIO_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base =
			    (char *)uio->uio_iov[i].iov_base + iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if( bp->b_ioflags & BIO_ERROR) {
				error = bp->b_error;
				goto doerror;
			}
		}
	}
doerror:
	relpbuf(bp, NULL);
	PRELE(curproc);
	return (error);
}
예제 #4
0
int
physio(dev_t dev, struct uio *uio, int ioflag)
{
	int i;
	int error;
	int spl;
	caddr_t sa;
	off_t blockno;
	u_int iolen;
	struct buf *bp;

	/* Keep the process UPAGES from being swapped. XXX: why ? */
	PHOLD(curproc);

	bp = getpbuf(NULL);
	sa = bp->b_data;
	error = bp->b_error = 0;

	/* XXX: sanity check */
	if(dev->si_iosize_max < PAGE_SIZE) {
		printf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
		    devtoname(dev), dev->si_iosize_max);
		dev->si_iosize_max = DFLTPHYS;
	}

	for (i = 0; i < uio->uio_iovcnt; i++) {
		while (uio->uio_iov[i].iov_len) {
			if (uio->uio_rw == UIO_READ)
				bp->b_flags = B_PHYS | B_CALL | B_READ;
			else 
				bp->b_flags = B_PHYS | B_CALL | B_WRITE;
			bp->b_dev = dev;
			bp->b_iodone = physwakeup;
			bp->b_data = uio->uio_iov[i].iov_base;
			bp->b_bcount = uio->uio_iov[i].iov_len;
			bp->b_offset = uio->uio_offset;
			bp->b_saveaddr = sa;

			/* Don't exceed drivers iosize limit */
			if (bp->b_bcount > dev->si_iosize_max)
				bp->b_bcount = dev->si_iosize_max;

			/* 
			 * Make sure the pbuf can map the request
			 * XXX: The pbuf has kvasize = MAXPHYS so a request
			 * XXX: larger than MAXPHYS - PAGE_SIZE must be
			 * XXX: page aligned or it will be fragmented.
			 */
			iolen = ((vm_offset_t) bp->b_data) & PAGE_MASK;
			if ((bp->b_bcount + iolen) > bp->b_kvasize) {
				bp->b_bcount = bp->b_kvasize;
				if (iolen != 0)
					bp->b_bcount -= PAGE_SIZE;
			}
			bp->b_bufsize = bp->b_bcount;

			blockno = bp->b_offset >> DEV_BSHIFT;
			if ((daddr_t)blockno != blockno) {
				error = EINVAL; /* blockno overflow */
				goto doerror;
			}
			bp->b_blkno = blockno;

			if (uio->uio_segflg == UIO_USERSPACE) {
				if (!useracc(bp->b_data, bp->b_bufsize,
				    bp->b_flags & B_READ ?
				    VM_PROT_WRITE : VM_PROT_READ)) {
					error = EFAULT;
					goto doerror;
				}
				vmapbuf(bp);
			}

			BUF_STRATEGY(bp, 0);
			spl = splbio();
			while ((bp->b_flags & B_DONE) == 0)
				tsleep((caddr_t)bp, PRIBIO, "physstr", 0);
			splx(spl);

			if (uio->uio_segflg == UIO_USERSPACE)
				vunmapbuf(bp);
			iolen = bp->b_bcount - bp->b_resid;
			if (iolen == 0 && !(bp->b_flags & B_ERROR))
				goto doerror;	/* EOF */
			uio->uio_iov[i].iov_len -= iolen;
			uio->uio_iov[i].iov_base += iolen;
			uio->uio_resid -= iolen;
			uio->uio_offset += iolen;
			if( bp->b_flags & B_ERROR) {
				error = bp->b_error;
				goto doerror;
			}
		}
	}
doerror:
	relpbuf(bp, NULL);
	PRELE(curproc);
	return (error);
}
예제 #5
0
/*
 * Do "physical I/O" on behalf of a user.  "Physical I/O" is I/O directly
 * from the raw device to user buffers, and bypasses the buffer cache.
 *
 * Comments in brackets are from Leffler, et al.'s pseudo-code implementation.
 */
int
physio(void (*strategy)(struct buf *), struct buf *bp, dev_t dev, int flags,
    void (*minphys)(struct buf *), struct uio *uio)
{
	struct iovec *iovp;
	struct proc *p = curproc;
	int error, done, i, nobuf, s, todo;

	error = 0;
	flags &= B_READ | B_WRITE;

	/* Make sure we have a buffer, creating one if necessary. */
	if ((nobuf = (bp == NULL)) != 0)
		bp = getphysbuf();

	/* [raise the processor priority level to splbio;] */
	s = splbio();

	/* [while the buffer is marked busy] */
	while (bp->b_flags & B_BUSY) {
		/* [mark the buffer wanted] */
		bp->b_flags |= B_WANTED;
		/* [wait until the buffer is available] */
		tsleep(bp, PRIBIO+1, "physbuf", 0);
	}

	/* Mark it busy, so nobody else will use it. */
	bp->b_flags |= B_BUSY;

	/* [lower the priority level] */
	splx(s);

	/* [set up the fixed part of the buffer for a transfer] */
	bp->b_dev = dev;
	bp->b_error = 0;
	bp->b_proc = p;
	LIST_INIT(&bp->b_dep);

	/*
	 * [while there are data to transfer and no I/O error]
	 * Note that I/O errors are handled with a 'goto' at the bottom
	 * of the 'while' loop.
	 */
	for (i = 0; i < uio->uio_iovcnt; i++) {
		iovp = &uio->uio_iov[i];
		while (iovp->iov_len > 0) {
			/*
			 * [mark the buffer busy for physical I/O]
			 * (i.e. set B_PHYS (because it's an I/O to user
			 * memory), and B_RAW, because B_RAW is to be
			 * "Set by physio for raw transfers.", in addition
			 * to the "busy" and read/write flag.)
			 */
			bp->b_flags = B_BUSY | B_PHYS | B_RAW | flags;

			/* [set up the buffer for a maximum-sized transfer] */
			bp->b_blkno = btodb(uio->uio_offset);
			bp->b_data = iovp->iov_base;

			/*
			 * Because iov_len is unsigned but b_bcount is signed,
			 * an overflow is possible. Therefore bound to MAXPHYS
			 * before calling minphys.
			 */
			if (iovp->iov_len > MAXPHYS)
				bp->b_bcount = MAXPHYS;
			else
				bp->b_bcount = iovp->iov_len;

			/*
			 * [call minphys to bound the transfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*minphys)(bp);
			todo = bp->b_bcount;
#ifdef DIAGNOSTIC
			if (todo < 0)
				panic("todo < 0; minphys broken");
			if (todo > MAXPHYS)
				panic("todo > MAXPHYS; minphys broken");
#endif

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 * Beware vmapbuf(); it clobbers b_data and
			 * saves it in b_saveaddr.  However, vunmapbuf()
			 * restores it.
			 */
			error = uvm_vslock(p, bp->b_data, todo,
			    (flags & B_READ) ?
			    VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ);
			if (error) {
				bp->b_flags |= B_ERROR;
				bp->b_error = error;
				goto after_unlock;
			}
			vmapbuf(bp, todo);

			/* [call strategy to start the transfer] */
			(*strategy)(bp);

			/*
			 * Note that the raise/wait/lower/get error
			 * steps below would be done by biowait(), but
			 * we want to unlock the address space before
			 * we lower the priority.
			 *
			 * [raise the priority level to splbio]
			 */
			s = splbio();

			/* [wait for the transfer to complete] */
			while ((bp->b_flags & B_DONE) == 0)
				tsleep(bp, PRIBIO + 1, "physio", 0);

			/* Mark it busy again, so nobody else will use it. */
			bp->b_flags |= B_BUSY;

			/* [lower the priority level] */
			splx(s);

			/*
			 * [unlock the part of the address space previously
			 *    locked]
			 */
			vunmapbuf(bp, todo);
			uvm_vsunlock(p, bp->b_data, todo);
after_unlock:

			/* remember error value (save a splbio/splx pair) */
			if (bp->b_flags & B_ERROR)
				error = (bp->b_error ? bp->b_error : EIO);

			/*
			 * [deduct the transfer size from the total number
			 *    of data to transfer]
			 */
			done = bp->b_bcount - bp->b_resid;
#ifdef DIAGNOSTIC
			if (done < 0)
				panic("done < 0; strategy broken");
			if (done > todo)
				panic("done > todo; strategy broken");
#endif
			iovp->iov_len -= done;
			iovp->iov_base = (caddr_t)iovp->iov_base + done;
			uio->uio_offset += done;
			uio->uio_resid -= done;

			/*
			 * Now, check for an error.
			 * Also, handle weird end-of-disk semantics.
			 */
			if (error || done < todo)
				goto done;
		}
	}

done:
	/*
	 * [clean up the state of the buffer]
	 * Remember if somebody wants it, so we can wake them up below.
	 * Also, if we had to steal it, give it back.
	 */
	s = splbio();
	bp->b_flags &= ~(B_BUSY | B_PHYS | B_RAW);
	if (nobuf)
		putphysbuf(bp);
	else {
		/*
		 * [if another process is waiting for the raw I/O buffer,
		 *    wake up processes waiting to do physical I/O]
		 */
		if (bp->b_flags & B_WANTED) {
			bp->b_flags &= ~B_WANTED;
			wakeup(bp);
		}
	}
	splx(s);

	return (error);
}
예제 #6
0
/*
 * Do "physical I/O" on behalf of a user.  "Physical I/O" is I/O directly
 * from the raw device to user buffers, and bypasses the buffer cache.
 *
 * Comments in brackets are from Leffler, et al.'s pseudo-code implementation.
 */
int
physio(void (*strategy)(struct buf *), struct buf *obp, dev_t dev, int flags,
    void (*min_phys)(struct buf *), struct uio *uio)
{
	struct iovec *iovp;
	struct lwp *l = curlwp;
	struct proc *p = l->l_proc;
	int i, error;
	struct buf *bp = NULL;
	struct physio_stat *ps;
	int concurrency = PHYSIO_CONCURRENCY - 1;

	error = RUN_ONCE(&physio_initialized, physio_init);
	if (__predict_false(error != 0)) {
		return error;
	}

	DPRINTF(("%s: called: off=%" PRIu64 ", resid=%zu\n",
	    __func__, uio->uio_offset, uio->uio_resid));

	flags &= B_READ | B_WRITE;

	if ((ps = kmem_zalloc(sizeof(*ps), KM_SLEEP)) == NULL)
		return ENOMEM;
	/* ps->ps_running = 0; */
	/* ps->ps_error = 0; */
	/* ps->ps_failed = 0; */
	ps->ps_orig_bp = obp;
	ps->ps_endoffset = -1;
	mutex_init(&ps->ps_lock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&ps->ps_cv, "physio");

	/* Make sure we have a buffer, creating one if necessary. */
	if (obp != NULL) {
		/* [raise the processor priority level to splbio;] */
		mutex_enter(&bufcache_lock);
		/* Mark it busy, so nobody else will use it. */
		while (bbusy(obp, false, 0, NULL) == EPASSTHROUGH)
			;
		mutex_exit(&bufcache_lock);
		concurrency = 0; /* see "XXXkludge" comment below */
	}

	uvm_lwp_hold(l);

	for (i = 0; i < uio->uio_iovcnt; i++) {
		bool sync = true;

		iovp = &uio->uio_iov[i];
		while (iovp->iov_len > 0) {
			size_t todo;
			vaddr_t endp;

			mutex_enter(&ps->ps_lock);
			if (ps->ps_failed != 0) {
				goto done_locked;
			}
			physio_wait(ps, sync ? 0 : concurrency);
			mutex_exit(&ps->ps_lock);
			if (obp != NULL) {
				/*
				 * XXXkludge
				 * some drivers use "obp" as an identifier.
				 */
				bp = obp;
			} else {
				bp = getiobuf(NULL, true);
				bp->b_cflags = BC_BUSY;
			}
			bp->b_dev = dev;
			bp->b_proc = p;
			bp->b_private = ps;

			/*
			 * [mark the buffer busy for physical I/O]
			 * (i.e. set B_PHYS (because it's an I/O to user
			 * memory, and B_RAW, because B_RAW is to be
			 * "Set by physio for raw transfers.", in addition
			 * to the "busy" and read/write flag.)
			 */
			bp->b_oflags = 0;
			bp->b_cflags = BC_BUSY;
			bp->b_flags = flags | B_PHYS | B_RAW;
			bp->b_iodone = physio_biodone;

			/* [set up the buffer for a maximum-sized transfer] */
			bp->b_blkno = btodb(uio->uio_offset);
			if (dbtob(bp->b_blkno) != uio->uio_offset) {
				error = EINVAL;
				goto done;
			}
			bp->b_bcount = MIN(MAXPHYS, iovp->iov_len);
			bp->b_data = iovp->iov_base;

			/*
			 * [call minphys to bound the transfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*min_phys)(bp);
			todo = bp->b_bufsize = bp->b_bcount;
#if defined(DIAGNOSTIC)
			if (todo > MAXPHYS)
				panic("todo(%zu) > MAXPHYS; minphys broken",
				    todo);
#endif /* defined(DIAGNOSTIC) */

			sync = false;
			endp = (vaddr_t)bp->b_data + todo;
			if (trunc_page(endp) != endp) {
				/*
				 * following requests can overlap.
				 * note that uvm_vslock does round_page.
				 */
				sync = true;
			}

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 * Beware vmapbuf(); it clobbers b_data and
			 * saves it in b_saveaddr.  However, vunmapbuf()
			 * restores it.
			 */
			error = uvm_vslock(p->p_vmspace, bp->b_data, todo,
			    (flags & B_READ) ?  VM_PROT_WRITE : VM_PROT_READ);
			if (error) {
				goto done;
			}
			vmapbuf(bp, todo);

			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);

			mutex_enter(&ps->ps_lock);
			ps->ps_running++;
			mutex_exit(&ps->ps_lock);

			/* [call strategy to start the transfer] */
			(*strategy)(bp);
			bp = NULL;

			iovp->iov_len -= todo;
			iovp->iov_base = (char *)iovp->iov_base + todo;
			uio->uio_offset += todo;
			uio->uio_resid -= todo;
		}
	}

done:
	mutex_enter(&ps->ps_lock);
done_locked:
	physio_wait(ps, 0);
	mutex_exit(&ps->ps_lock);

	if (ps->ps_failed != 0) {
		off_t delta;

		delta = uio->uio_offset - ps->ps_endoffset;
		KASSERT(delta > 0);
		uio->uio_resid += delta;
		/* uio->uio_offset = ps->ps_endoffset; */
	} else {
		KASSERT(ps->ps_endoffset == -1);
	}
	if (bp != NULL && bp != obp) {
		putiobuf(bp);
	}
	if (error == 0) {
		error = ps->ps_error;
	}
	mutex_destroy(&ps->ps_lock);
	cv_destroy(&ps->ps_cv);
	kmem_free(ps, sizeof(*ps));

	/*
	 * [clean up the state of the buffer]
	 * Remember if somebody wants it, so we can wake them up below.
	 * Also, if we had to steal it, give it back.
	 */
	if (obp != NULL) {
		KASSERT((obp->b_cflags & BC_BUSY) != 0);

		/*
		 * [if another process is waiting for the raw I/O buffer,
		 *    wake up processes waiting to do physical I/O;
		 */
		mutex_enter(&bufcache_lock);
		obp->b_cflags &= ~(BC_BUSY | BC_WANTED);
		obp->b_flags &= ~(B_PHYS | B_RAW);
		obp->b_iodone = NULL;
		cv_broadcast(&obp->b_busy);
		mutex_exit(&bufcache_lock);
	}
	uvm_lwp_rele(l);

	DPRINTF(("%s: done: off=%" PRIu64 ", resid=%zu\n",
	    __func__, uio->uio_offset, uio->uio_resid));

	return error;
}