/* read the requested number of bytes/lines from the scanner */
static int
mustek_read(struct ss_softc *ss, struct buf *bp)
{
	struct mustek_read_cmd cmd;
	struct scsipi_xfer *xs;
	struct scsipi_periph *periph = ss->sc_periph;
	u_long lines_to_read;
	int error;

	SC_DEBUG(periph, SCSIPI_DB1, ("mustek_read: start\n"));

	memset(&cmd, 0, sizeof(cmd));
	cmd.opcode = MUSTEK_READ;

	/* instead of the bytes, the mustek wants the number of lines */
	lines_to_read = bp->b_bcount /
	    ((ss->sio.scan_pixels_per_line * ss->sio.scan_bits_per_pixel) / 8);
	SC_DEBUG(periph, SCSIPI_DB1, ("mustek_read: read %ld lines\n",
	    lines_to_read));
	_lto3b(lines_to_read, cmd.length);

	/* go ask the adapter to do all this for us */
	xs = scsipi_make_xs(periph,
	    (struct scsipi_generic *) &cmd, sizeof(cmd),
	    (u_char *) bp->b_data, bp->b_bcount,
	    MUSTEK_RETRIES, 10000, bp,
	    XS_CTL_NOSLEEP | XS_CTL_ASYNC | XS_CTL_DATA_IN);
	if (xs == NULL) {
		/*
		 * out of memory. Keep this buffer in the queue, and
		 * retry later.
		 */
		callout_reset(&ss->sc_callout, hz / 2, ssrestart,
		    periph);
		return 0;
	}
#ifdef DIAGNOSTIC
	if (bufq_get(ss->buf_queue) != bp)
		panic("ssstart(): dequeued wrong buf");
#else
	bufq_get(ss->buf_queue);
#endif
	error = scsipi_execute_xs(xs);
	/* with a scsipi_xfer preallocated, scsipi_command can't fail */
	KASSERT(error == 0);
	ss->sio.scan_lines -= lines_to_read;
#if 0
	if (ss->sio.scan_lines < 0)
		ss->sio.scan_lines = 0;
#endif
	ss->sio.scan_window_size -= bp->b_bcount;
#if 0
	if (ss->sio.scan_window_size < 0)
		ss->sio.scan_window_size = 0;
#endif
	return 0;
}
void
dk_start(struct dk_intf *di, struct dk_softc *dksc)
{
	struct	buf *bp;

	DPRINTF_FOLLOW(("dk_start(%s, %p)\n", di->di_dkname, dksc));

	/* Process the work queue */
	while ((bp = bufq_get(dksc->sc_bufq)) != NULL) {
		if (di->di_diskstart(dksc, bp) != 0) {
			bufq_put(dksc->sc_bufq, bp);
			break;
		}
	}
}
Beispiel #3
0
void
mcdstart(struct mcd_softc *sc)
{
	struct buf *bp;
	int s;

loop:
	s = splbio();

	if ((bp = bufq_get(sc->buf_queue)) == NULL) {
		/* Nothing to do. */
		sc->active = 0;
		splx(s);
		return;
	}

	/* Block found to process. */
	MCD_TRACE("start: found block bp=0x%p\n", bp);
	splx(s);

	/* Changed media? */
	if ((sc->flags & MCDF_LOADED) == 0) {
		MCD_TRACE("start: drive not valid%s", "\n");
		bp->b_error = EIO;
		biodone(bp);
		goto loop;
	}

	sc->active = 1;

	/* Instrumentation. */
	s = splbio();
	disk_busy(&sc->sc_dk);
	splx(s);

	sc->mbx.retry = MCD_RDRETRIES;
	sc->mbx.bp = bp;
	sc->mbx.blkno = bp->b_rawblkno;
	sc->mbx.nblk = bp->b_bcount / sc->blksize;
	sc->mbx.sz = sc->blksize;
	sc->mbx.skip = 0;
	sc->mbx.state = MCD_S_BEGIN;
	sc->mbx.mode = MCD_MD_COOKED;

	s = splbio();
	(void) mcdintr(sc);
	splx(s);
}
/* ARGSUSED */
int
mfs_close(void *v)
{
	struct vop_close_args /* {
		struct vnode *a_vp;
		int  a_fflag;
		kauth_cred_t a_cred;
	} */ *ap = v;
	struct vnode *vp = ap->a_vp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	int error;

	/*
	 * Finish any pending I/O requests.
	 */
	mutex_enter(&mfs_lock);
	while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) {
		mutex_exit(&mfs_lock);
		mfs_doio(bp, mfsp->mfs_baseoff);
		mutex_enter(&mfs_lock);
	}
	mutex_exit(&mfs_lock);
	/*
	 * On last close of a memory filesystem
	 * we must invalidate any in core blocks, so that
	 * we can, free up its vnode.
	 */
	if ((error = vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 0, 0)) != 0)
		return (error);
	/*
	 * There should be no way to have any more uses of this
	 * vnode, so if we find any other uses, it is a panic.
	 */
	if (bufq_peek(mfsp->mfs_buflist) != NULL)
		panic("mfs_close");
	/*
	 * Send a request to the filesystem server to exit.
	 */
	mutex_enter(&mfs_lock);
	mfsp->mfs_shutdown = 1;
	cv_broadcast(&mfsp->mfs_cv);
	mutex_exit(&mfs_lock);
	return (0);
}
Beispiel #5
0
void
dk_start(struct dk_softc *dksc, struct buf *bp)
{
	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
	int error;

	if (!(dksc->sc_flags & DKF_INITED)) {
		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
		return;
	}

	mutex_enter(&dksc->sc_iolock);

	if (bp != NULL)
		bufq_put(dksc->sc_bufq, bp);

	if (dksc->sc_busy)
		goto done;
	dksc->sc_busy = true;

	/*
	 * Peeking at the buffer queue and committing the operation
	 * only after success isn't atomic.
	 *
	 * So when a diskstart fails, the buffer is saved
	 * and tried again before the next buffer is fetched.
	 * dk_drain() handles flushing of a saved buffer.
	 *
	 * This keeps order of I/O operations, unlike bufq_put.
	 */

	bp = dksc->sc_deferred;
	dksc->sc_deferred = NULL;

	if (bp == NULL)
		bp = bufq_get(dksc->sc_bufq);

	while (bp != NULL) {

		disk_busy(&dksc->sc_dkdev);
		mutex_exit(&dksc->sc_iolock);
		error = dkd->d_diskstart(dksc->sc_dev, bp);
		mutex_enter(&dksc->sc_iolock);
		if (error == EAGAIN) {
			dksc->sc_deferred = bp;
			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
			break;
		}

		if (error != 0) {
			bp->b_error = error;
			bp->b_resid = bp->b_bcount;
			dk_done1(dksc, bp, false);
		}

		bp = bufq_get(dksc->sc_bufq);
	}

	dksc->sc_busy = false;
done:
	mutex_exit(&dksc->sc_iolock);
}
Beispiel #6
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags)
{
	struct vnode *vp;
	struct mfsnode *mfsp;
	struct proc *p;
	struct buf *bp;
	void *base;
	int sleepreturn = 0, refcnt, error;
	ksiginfoq_t kq;

	/*
	 * Ensure that file system is still mounted when getting mfsnode.
	 * Add a reference to the mfsnode to prevent it disappearing in
	 * this routine.
	 */
	if ((error = vfs_busy(mp, NULL)) != 0)
		return error;
	vp = VFSTOUFS(mp)->um_devvp;
	mfsp = VTOMFS(vp);
	mutex_enter(&mfs_lock);
	mfsp->mfs_refcnt++;
	mutex_exit(&mfs_lock);
	vfs_unbusy(mp, false, NULL);

	base = mfsp->mfs_baseoff;
	mutex_enter(&mfs_lock);
	while (mfsp->mfs_shutdown != 1) {
		while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) {
			mutex_exit(&mfs_lock);
			mfs_doio(bp, base);
			mutex_enter(&mfs_lock);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, or the filesystem is already in the
		 * process of being unmounted, clear the signal (it has been
		 * "processed"), otherwise we will loop here, as tsleep
		 * will always return EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			mutex_exit(&mfs_lock);
			if (dounmount(mp, 0, curlwp) != 0) {
				p = curproc;
				ksiginfo_queue_init(&kq);
				mutex_enter(p->p_lock);
				sigclearall(p, NULL, &kq);
				mutex_exit(p->p_lock);
				ksiginfo_queue_drain(&kq);
			}
			sleepreturn = 0;
			mutex_enter(&mfs_lock);
			continue;
		}

		sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock);
	}
	KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL);
	refcnt = --mfsp->mfs_refcnt;
	mutex_exit(&mfs_lock);
	if (refcnt == 0) {
		bufq_free(mfsp->mfs_buflist);
		cv_destroy(&mfsp->mfs_cv);
		kmem_free(mfsp, sizeof(*mfsp));
	}
	return (sleepreturn);
}
Beispiel #7
0
/*
 * We got an interrupt. Check type of interrupt and call the specific
 * device interrupt handling routine.
 */
void
mbaintr(void *mba)
{
	struct mba_softc * const sc = mba;
	struct mba_device *md;
	struct buf *bp;
	int itype, attn, anr;

	itype = MBA_RCSR(MBA_SR);
	MBA_WCSR(MBA_SR, itype);

	attn = MBA_RCSR(MUREG(0, MU_AS)) & 0xff;
	MBA_WCSR(MUREG(0, MU_AS), attn);

	if (sc->sc_state == SC_AUTOCONF)
		return;	/* During autoconfig */

	md = STAILQ_FIRST(&sc->sc_xfers);
	bp = bufq_peek(md->md_q);
	/*
	 * A data-transfer interrupt. Current operation is finished,
	 * call that device's finish routine to see what to do next.
	 */
	if (sc->sc_state == SC_ACTIVE) {
		sc->sc_state = SC_IDLE;
		switch ((*md->md_finish)(md, itype, &attn)) {

		case XFER_FINISH:
			/*
			 * Transfer is finished. Take buffer of drive
			 * queue, and take drive of adapter queue.
			 * If more to transfer, start the adapter again
			 * by calling mbastart().
			 */
			(void)bufq_get(md->md_q);
			STAILQ_REMOVE_HEAD(&sc->sc_xfers, md_link);
			if (bufq_peek(md->md_q) != NULL) {
				STAILQ_INSERT_TAIL(&sc->sc_xfers, md, md_link);
			}
	
			bp->b_resid = 0;
			biodone(bp);
			if (!STAILQ_EMPTY(&sc->sc_xfers))
				mbastart(sc);
			break;

		case XFER_RESTART:
			/*
			 * Something went wrong with the transfer. Try again.
			 */
			mbastart(sc);
			break;
		}
	}

	while (attn) {
		anr = ffs(attn) - 1;
		attn &= ~(1 << anr);
		if (sc->sc_md[anr]->md_attn == 0)
			panic("Should check for new MBA device %d", anr);
		(*sc->sc_md[anr]->md_attn)(sc->sc_md[anr]);
	}
}
Beispiel #8
0
/*
 * Get the next I/O request started
 */
static void
icapstart(struct icap_softc *sc)
{
	paddr_t phys, phys2;
	vaddr_t virt;
	size_t count;
	uint32_t fl;
	struct buf *bp = sc->sc_bp;

	DEBUG_PRINT(("icapstart %p %p\n",sc,bp), DEBUG_FUNCS);

    /* Were we idle?
     */
 recheck:
    if (bp == NULL) {

        /* Yes, get the next request if any
         */
        bp = bufq_get(sc->sc_buflist);
        DEBUG_PRINT(("icapnext: %p\n",bp), DEBUG_XFERS);
        if (bp == NULL)
            return;
    }

    /* Done with this request?
     */
    if ((bp->b_resid == 0) || bp->b_error) {

        /* Yes, complete and move to next, if any
         */
        sc->sc_bp = NULL;
        biodone(bp);
        DEBUG_PRINT(("icapdone %p\n",bp), DEBUG_XFERS);
        bp = NULL;
        goto recheck;
    }

    /* If new request init the xfer info
     */
    if (sc->sc_bp == NULL) {
        sc->sc_bp = bp;
        sc->sc_data = bp->b_data;
        sc->sc_count = bp->b_resid;
    }

    /* Loop filling as many buffers as will fit in the FIFO
     */
    fl = (bp->b_flags & B_READ) ? ICAPS_F_RECV : ICAPS_F_XMIT;
    for (;;) {

        /* Make sure there's still room in the FIFO, no errors.
         */
        if (sc->sc_dp->Control & (ICAPC_IF_FULL|ICAPC_ERROR))
            break;

        /* How much data do we xfer and where
         */
        virt = (vaddr_t)sc->sc_data;
        phys = kvtophys(virt);
        count = round_page(virt) - virt;
        if (count == 0) count = PAGE_SIZE;/* could(will) be aligned */

        /* How much of it is contiguous
         */
        while (count < sc->sc_count) {
            phys2 = kvtophys(virt + count);
            if (phys2 != (phys + count)) {

                /* No longer contig, ship it
                 */
                break;
            }
            count += PAGE_SIZE;
        }

        /* Trim if we went too far 
         */
        if (count > sc->sc_count)
            count = sc->sc_count;

        /* Ship it
         */
        DEBUG_PRINT(("icapship %" PRIxPADDR " %d\n",phys,count), DEBUG_XFERS);
        sc->sc_dp->SizeAndFlags = fl | count;
        sc->sc_dp->BufferAddressHi32 = 0; /* BUGBUG 64bit */
        sc->sc_dp->BufferAddressLo32 = phys; /* this pushes the fifo */

        /* Adjust pointers and continue 
         */
        sc->sc_data  += count;
        sc->sc_count -= count;

        if (sc->sc_count <= 0)
            break;
    }
}
Beispiel #9
0
/*
 * The kernel thread (one for every active snapshot).
 *
 * After wakeup it cleans the cache and runs the I/O requests.
 */
static void
fss_bs_thread(void *arg)
{
	bool thread_idle, is_valid;
	int error, i, todo, len, crotor, is_read;
	long off;
	char *addr;
	u_int32_t c, cl, ch, *indirp;
	struct buf *bp, *nbp;
	struct fss_softc *sc;
	struct fss_cache *scp, *scl;

	sc = arg;
	scl = sc->sc_cache+sc->sc_cache_size;
	crotor = 0;
	thread_idle = false;

	mutex_enter(&sc->sc_slock);

	for (;;) {
		if (thread_idle)
			cv_wait(&sc->sc_work_cv, &sc->sc_slock);
		thread_idle = true;
		if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
			mutex_exit(&sc->sc_slock);
			kthread_exit(0);
		}

		/*
		 * Process I/O requests (persistent)
		 */

		if (sc->sc_flags & FSS_PERSISTENT) {
			if ((bp = bufq_get(sc->sc_bufq)) == NULL)
				continue;
			is_valid = FSS_ISVALID(sc);
			is_read = (bp->b_flags & B_READ);
			thread_idle = false;
			mutex_exit(&sc->sc_slock);

			if (is_valid) {
				disk_busy(sc->sc_dkdev);
				error = fss_bs_io(sc, FSS_READ, 0,
				    dbtob(bp->b_blkno), bp->b_bcount,
				    bp->b_data);
				disk_unbusy(sc->sc_dkdev,
				    (error ? 0 : bp->b_bcount), is_read);
			} else
				error = ENXIO;

			bp->b_error = error;
			bp->b_resid = (error ? bp->b_bcount : 0);
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		/*
		 * Clean the cache
		 */
		for (i = 0; i < sc->sc_cache_size; i++) {
			crotor = (crotor + 1) % sc->sc_cache_size;
			scp = sc->sc_cache + crotor;
			if (scp->fc_type != FSS_CACHE_VALID)
				continue;
			mutex_exit(&sc->sc_slock);

			thread_idle = false;
			indirp = fss_bs_indir(sc, scp->fc_cluster);
			if (indirp != NULL) {
				error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
				    0, FSS_CLSIZE(sc), scp->fc_data);
			} else
				error = EIO;

			mutex_enter(&sc->sc_slock);
			if (error == 0) {
				*indirp = sc->sc_clnext++;
				sc->sc_indir_dirty = 1;
			} else
				fss_error(sc, "write error on backing store");

			scp->fc_type = FSS_CACHE_FREE;
			cv_broadcast(&sc->sc_cache_cv);
			break;
		}

		/*
		 * Process I/O requests
		 */
		if ((bp = bufq_get(sc->sc_bufq)) == NULL)
			continue;
		is_valid = FSS_ISVALID(sc);
		is_read = (bp->b_flags & B_READ);
		thread_idle = false;

		if (!is_valid) {
			mutex_exit(&sc->sc_slock);

			bp->b_error = ENXIO;
			bp->b_resid = bp->b_bcount;
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		disk_busy(sc->sc_dkdev);

		/*
		 * First read from the snapshotted block device unless
		 * this request is completely covered by backing store.
		 */

		cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
		off = FSS_CLOFF(sc, dbtob(bp->b_blkno));
		ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
		error = 0;
		bp->b_resid = 0;
		bp->b_error = 0;
		for (c = cl; c <= ch; c++) {
			if (isset(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			/* Not on backing store, read from device. */
			nbp = getiobuf(NULL, true);
			nbp->b_flags = B_READ;
			nbp->b_resid = nbp->b_bcount = bp->b_bcount;
			nbp->b_bufsize = bp->b_bcount;
			nbp->b_data = bp->b_data;
			nbp->b_blkno = bp->b_blkno;
			nbp->b_lblkno = 0;
			nbp->b_dev = sc->sc_bdev;
			SET(nbp->b_cflags, BC_BUSY);	/* mark buffer busy */

			bdev_strategy(nbp);

			error = biowait(nbp);
			if (error != 0) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = nbp->b_error;
				disk_unbusy(sc->sc_dkdev, 0, is_read);
				biodone(bp);
			}
			putiobuf(nbp);

			mutex_enter(&sc->sc_slock);
			break;
		}
		if (error)
			continue;

		/*
		 * Replace those parts that have been saved to backing store.
		 */

		addr = bp->b_data;
		todo = bp->b_bcount;
		for (c = cl; c <= ch; c++, off = 0, todo -= len, addr += len) {
			len = FSS_CLSIZE(sc)-off;
			if (len > todo)
				len = todo;
			if (isclr(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			indirp = fss_bs_indir(sc, c);
			if (indirp == NULL || *indirp == 0) {
				/*
				 * Not on backing store. Either in cache
				 * or hole in the snapshotted block device.
				 */

				mutex_enter(&sc->sc_slock);
				for (scp = sc->sc_cache; scp < scl; scp++)
					if (scp->fc_type == FSS_CACHE_VALID &&
					    scp->fc_cluster == c)
						break;
				if (scp < scl)
					memcpy(addr, (char *)scp->fc_data+off,
					    len);
				else
					memset(addr, 0, len);
				continue;
			}

			/*
			 * Read from backing store.
			 */
			error =
			    fss_bs_io(sc, FSS_READ, *indirp, off, len, addr);

			mutex_enter(&sc->sc_slock);
			if (error) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = error;
				break;
			}
		}
		mutex_exit(&sc->sc_slock);

		disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
		biodone(bp);

		mutex_enter(&sc->sc_slock);
	}
}
static void
udf_doshedule(struct udf_mount *ump)
{
	struct buf *buf;
	struct timespec now, *last;
	struct strat_private *priv = PRIV(ump);
	void (*b_callback)(struct buf *);
	int new_queue;
	int error;

	buf = bufq_get(priv->queues[priv->cur_queue]);
	if (buf) {
		/* transfer from the current queue to the device queue */
		mutex_exit(&priv->discstrat_mutex);

		/* transform buffer to synchronous; XXX needed? */
		b_callback = buf->b_iodone;
		buf->b_iodone = NULL;
		CLR(buf->b_flags, B_ASYNC);

		/* issue and wait on completion */
		udf_issue_buf(ump, priv->cur_queue, buf);
		biowait(buf);

		mutex_enter(&priv->discstrat_mutex);

		/* if there is an error, repair this error, otherwise propagate */
		if (buf->b_error && ((buf->b_flags & B_READ) == 0)) {
			/* check what we need to do */
			panic("UDF write error, can't handle yet!\n");
		}

		/* propagate result to higher layers */
		if (b_callback) {
			buf->b_iodone = b_callback;
			(*buf->b_iodone)(buf);
		}

		return;
	}

	/* Check if we're idling in this state */
	vfs_timestamp(&now);
	last = &priv->last_queued[priv->cur_queue];
	if (ump->discinfo.mmc_class == MMC_CLASS_CD) {
		/* dont switch too fast for CD media; its expensive in time */
		if (now.tv_sec - last->tv_sec < 3)
			return;
	}

	/* check if we can/should switch */
	new_queue = priv->cur_queue;

	if (bufq_peek(priv->queues[UDF_SHED_READING]))
		new_queue = UDF_SHED_READING;
	if (bufq_peek(priv->queues[UDF_SHED_WRITING]))		/* only for unmount */
		new_queue = UDF_SHED_WRITING;
	if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]))
		new_queue = UDF_SHED_SEQWRITING;
	if (priv->cur_queue == UDF_SHED_READING) {
		if (new_queue == UDF_SHED_SEQWRITING) {
			/* TODO use flag to signal if this is needed */
			mutex_exit(&priv->discstrat_mutex);

			/* update trackinfo for data and metadata */
			error = udf_update_trackinfo(ump,
					&ump->data_track);
			assert(error == 0);
			error = udf_update_trackinfo(ump,
					&ump->metadata_track);
			assert(error == 0);
			mutex_enter(&priv->discstrat_mutex);
			__USE(error);
		}
	}

	if (new_queue != priv->cur_queue) {
		DPRINTF(SHEDULE, ("switching from %d to %d\n",
			priv->cur_queue, new_queue));
	}

	priv->cur_queue = new_queue;
}
static void
vndthread(void *arg)
{
	struct vnd_softc *vnd = arg;
	int s;

	/* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
	 * directly access the backing vnode.  If we can, use these two
	 * operations to avoid messing with the local buffer cache.
	 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
	 * which are guaranteed to work with any file system. */
	if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
	    ! vnode_has_strategy(vnd))
		vnd->sc_flags |= VNF_USE_VN_RDWR;

#ifdef DEBUG
	if (vnddebug & VDB_INIT)
		printf("vndthread: vp %p, %s\n", vnd->sc_vp,
		    (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
		    "using bmap/strategy operations" :
		    "using read/write operations");
#endif

	s = splbio();
	vnd->sc_flags |= VNF_KTHREAD;
	wakeup(&vnd->sc_kthread);

	/*
	 * Dequeue requests and serve them depending on the available
	 * vnode operations.
	 */
	while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
		struct vndxfer *vnx;
		int flags;
		struct buf *obp;
		struct buf *bp;

		obp = bufq_get(vnd->sc_tab);
		if (obp == NULL) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
			continue;
		};
		if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
			KASSERT(vnd->sc_pending > 0 &&
			    vnd->sc_pending <= VND_MAXPENDING(vnd));
			if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
				wakeup(&vnd->sc_pending);
		}
		splx(s);
		flags = obp->b_flags;
#ifdef DEBUG
		if (vnddebug & VDB_FOLLOW)
			printf("vndthread(%p)\n", obp);
#endif

		if (vnd->sc_vp->v_mount == NULL) {
			obp->b_error = ENXIO;
			goto done;
		}
#ifdef VND_COMPRESSION
		/* handle a compressed read */
		if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
			off_t bn;
			
			/* Convert to a byte offset within the file. */
			bn = obp->b_rawblkno *
			    vnd->sc_dkdev.dk_label->d_secsize;

			compstrategy(obp, bn);
			goto done;
		}
#endif /* VND_COMPRESSION */
		
		/*
		 * Allocate a header for this transfer and link it to the
		 * buffer
		 */
		s = splbio();
		vnx = VND_GETXFER(vnd);
		splx(s);
		vnx->vx_vnd = vnd;

		s = splbio();
		while (vnd->sc_active >= vnd->sc_maxactive) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
		}
		vnd->sc_active++;
		splx(s);

		/* Instrumentation. */
		disk_busy(&vnd->sc_dkdev);

		bp = &vnx->vx_buf;
		buf_init(bp);
		bp->b_flags = (obp->b_flags & B_READ);
		bp->b_oflags = obp->b_oflags;
		bp->b_cflags = obp->b_cflags;
		bp->b_iodone = vndiodone;
		bp->b_private = obp;
		bp->b_vp = vnd->sc_vp;
		bp->b_objlock = bp->b_vp->v_interlock;
		bp->b_data = obp->b_data;
		bp->b_bcount = obp->b_bcount;
		BIO_COPYPRIO(bp, obp);

		/* Handle the request using the appropriate operations. */
		if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
			handle_with_strategy(vnd, obp, bp);
		else
			handle_with_rdwr(vnd, obp, bp);

		s = splbio();
		continue;

done:
		biodone(obp);
		s = splbio();
	}

	vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
	wakeup(&vnd->sc_kthread);
	splx(s);
	kthread_exit(0);
}