static void
udf_queuebuf_seq(struct udf_strat_args *args)
{
	struct udf_mount *ump = args->ump;
	struct buf *nestbuf = args->nestbuf;
	struct strat_private *priv = PRIV(ump);
	int queue;
	int what;

	KASSERT(ump);
	KASSERT(nestbuf);
	KASSERT(nestbuf->b_iodone == nestiobuf_iodone);

	what = nestbuf->b_udf_c_type;
	queue = UDF_SHED_READING;
	if ((nestbuf->b_flags & B_READ) == 0) {
		/* writing */
		queue = UDF_SHED_SEQWRITING;
		if (what == UDF_C_ABSOLUTE)
			queue = UDF_SHED_WRITING;
	}

	/* use our own sheduler lists for more complex sheduling */
	mutex_enter(&priv->discstrat_mutex);
		bufq_put(priv->queues[queue], nestbuf);
		vfs_timestamp(&priv->last_queued[queue]);
	mutex_exit(&priv->discstrat_mutex);

	/* signal our thread that there might be something to do */
	cv_signal(&priv->discstrat_cv);
}
Esempio n. 2
0
/*
 * Strategy function for the device.
 */
static void
icapstrategy(struct buf *bp)
{
	struct icap_softc *sc;
	int s;

	DEBUG_PRINT(("icapstrategy\n"), DEBUG_FUNCS);

	/* We did nothing lest we did */
	bp->b_resid = bp->b_bcount;

	/* Do we know you.  */
	sc = device_lookup_private(&icap_cd, minor(bp->b_dev));
	if (sc == NULL) {
		DEBUG_PRINT(("icapstrategy: nodev %" PRIx64 "\n",bp->b_dev),
		    DEBUG_ERRORS);
		bp->b_error = ENXIO;
		biodone(bp);
		return;
	    }

	/* Add to Q. If Q was empty get it started */
	s = splbio();
	bufq_put(sc->sc_buflist, bp);
	if (bufq_peek(sc->sc_buflist) == bp) {
		icapstart(sc);
	}
	splx(s);
}
Esempio n. 3
0
/*
 * Actually translate the requested transfer into one the physical
 * driver can understand The transfer is described by a buf and will
 * include only one physical transfer.
 */
static void
ssstrategy(struct buf *bp)
{
	struct ss_softc *ss = device_lookup_private(&ss_cd, SSUNIT(bp->b_dev));
	struct scsipi_periph *periph = ss->sc_periph;
	int s;

	SC_DEBUG(ss->sc_periph, SCSIPI_DB1,
	    ("ssstrategy %d bytes @ blk %" PRId64 "\n", bp->b_bcount,
	    bp->b_blkno));

	/* If the device has been made invalid, error out */
	if (!device_is_active(ss->sc_dev)) {
		if (periph->periph_flags & PERIPH_OPEN)
			bp->b_error = EIO;
		else
			bp->b_error = ENODEV;
		goto done;
	}

	/* If negative offset, error */
	if (bp->b_blkno < 0) {
		bp->b_error = EINVAL;
		goto done;
	}

	if (bp->b_bcount > ss->sio.scan_window_size)
		bp->b_bcount = ss->sio.scan_window_size;

	/* If it's a null transfer, return immediatly */
	if (bp->b_bcount == 0)
		goto done;

	s = splbio();

	/*
	 * Place it in the queue of activities for this scanner
	 * at the end (a bit silly because we only have on user..
	 * (but it could fork()))
	 */
	bufq_put(ss->buf_queue, bp);

	/*
	 * Tell the device to get going on the transfer if it's
	 * not doing anything, otherwise just wait for completion
	 * (All a bit silly if we're only allowing 1 open but..)
	 */
	ssstart(ss->sc_periph);

	splx(s);
	return;
done:
	/* Correctly set the buf to indicate a completed xfer */
	bp->b_resid = bp->b_bcount;
	biodone(bp);
}
void
dk_strategy(struct dk_intf *di, struct dk_softc *dksc, struct buf *bp)
{
	int	s;
	int	wlabel;
	daddr_t	blkno;

	DPRINTF_FOLLOW(("dk_strategy(%s, %p, %p)\n",
	    di->di_dkname, dksc, bp));

	if (!(dksc->sc_flags & DKF_INITED)) {
		DPRINTF_FOLLOW(("dk_strategy: not inited\n"));
		bp->b_error  = ENXIO;
		biodone(bp);
		return;
	}

	/* XXX look for some more errors, c.f. ld.c */

	bp->b_resid = bp->b_bcount;

	/* If there is nothing to do, then we are done */
	if (bp->b_bcount == 0) {
		biodone(bp);
		return;
	}

	wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING);
	if (DISKPART(bp->b_dev) != RAW_PART &&
	    bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) {
		biodone(bp);
		return;
	}

	blkno = bp->b_blkno;
	if (DISKPART(bp->b_dev) != RAW_PART) {
		struct partition *pp;

		pp =
		    &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno += pp->p_offset;
	}
	bp->b_rawblkno = blkno;

	/*
	 * Start the unit by calling the start routine
	 * provided by the individual driver.
	 */
	s = splbio();
	bufq_put(dksc->sc_bufq, bp);
	dk_start(di, dksc);
	splx(s);
	return;
}
Esempio n. 5
0
/*
 * Only thing to check here is for legal record lengths (writes only).
 */
void
mtstrategy(struct buf *bp)
{
	struct mt_softc *sc;
	int s;

	sc = device_lookup_private(&mt_cd, MTUNIT(bp->b_dev));

	DPRINTF(MDB_ANY, ("%s strategy", device_xname(sc->sc_dev)));

	if ((bp->b_flags & (B_CMD | B_READ)) == 0) {
#define WRITE_BITS_IGNORED	8
#if 0
		if (bp->b_bcount & ((1 << WRITE_BITS_IGNORED) - 1)) {
			tprintf(sc->sc_ttyp,
				"%s: write record must be multiple of %d\n",
				device_xname(sc->sc_dev), 1 << WRITE_BITS_IGNORED);
			goto error;
		}
#endif
		s = 16 * 1024;
		if (sc->sc_stat2 & SR2_LONGREC) {
			switch (sc->sc_density) {
			    case T_1600BPI:
				s = 32 * 1024;
				break;

			    case T_6250BPI:
			    case T_BADBPI:
				s = 60 * 1024;
				break;
			}
		}
		if (bp->b_bcount > s) {
			tprintf(sc->sc_ttyp,
				"%s: write record (%d) too big: limit (%d)\n",
				device_xname(sc->sc_dev), bp->b_bcount, s);
#if 0 /* XXX see above */
	    error:
#endif
			bp->b_error = EIO;
			biodone(bp);
			return;
		}
	}
	s = splbio();
	bufq_put(sc->sc_tab, bp);
	if (sc->sc_active == 0) {
		sc->sc_active = 1;
		mtustart(sc);
	}
	splx(s);
}
void
dk_start(struct dk_intf *di, struct dk_softc *dksc)
{
	struct	buf *bp;

	DPRINTF_FOLLOW(("dk_start(%s, %p)\n", di->di_dkname, dksc));

	/* Process the work queue */
	while ((bp = bufq_get(dksc->sc_bufq)) != NULL) {
		if (di->di_diskstart(dksc, bp) != 0) {
			bufq_put(dksc->sc_bufq, bp);
			break;
		}
	}
}
Esempio n. 7
0
int
dk_strategy_defer(struct dk_softc *dksc, struct buf *bp)
{
	int error;

	error = dk_strategy1(dksc, bp);
	if (error)
		return error;

	/*
	 * Queue buffer only
	 */
	mutex_enter(&dksc->sc_iolock);
	bufq_put(dksc->sc_bufq, bp);
	mutex_exit(&dksc->sc_iolock);

	return 0;
}
/*
 * Pass I/O requests to the memory filesystem process.
 */
int
mfs_strategy(void *v)
{
	struct vop_strategy_args /* {
		struct vnode *a_vp;
		struct buf *a_bp;
	} */ *ap = v;
	struct vnode *vp = ap->a_vp;
	struct buf *bp = ap->a_bp;
	struct mfsnode *mfsp;

	if (vp->v_type != VBLK || vp->v_usecount == 0)
		panic("mfs_strategy: bad dev");
	mfsp = VTOMFS(vp);
	/* check for mini-root access */
	if (mfsp->mfs_proc == NULL) {
		void *base;

		base = (char *)mfsp->mfs_baseoff + (bp->b_blkno << DEV_BSHIFT);
		if (bp->b_flags & B_READ)
			memcpy(bp->b_data, base, bp->b_bcount);
		else
			memcpy(base, bp->b_data, bp->b_bcount);
		bp->b_resid = 0;
		biodone(bp);
	} else if (mfsp->mfs_proc == curproc) {
		mfs_doio(bp, mfsp->mfs_baseoff);
	} else if (doing_shutdown) {
		/*
		 * bitbucket I/O during shutdown.
		 * Note that reads should *not* happen here, but..
		 */
		if (bp->b_flags & B_READ)
			printf("warning: mfs read during shutdown\n");
		bp->b_resid = 0;
		biodone(bp);
	} else {
		mutex_enter(&mfs_lock);
		bufq_put(mfsp->mfs_buflist, bp);
		cv_broadcast(&mfsp->mfs_cv);
		mutex_exit(&mfs_lock);
	}
	return (0);
}
Esempio n. 9
0
void
fss_strategy(struct buf *bp)
{
	const bool write = ((bp->b_flags & B_READ) != B_READ);
	struct fss_softc *sc = device_lookup_private(&fss_cd, minor(bp->b_dev));

	mutex_enter(&sc->sc_slock);

	if (write || !FSS_ISVALID(sc)) {

		mutex_exit(&sc->sc_slock);

		bp->b_error = (write ? EROFS : ENXIO);
		bp->b_resid = bp->b_bcount;
		biodone(bp);
		return;
	}

	bp->b_rawblkno = bp->b_blkno;
	bufq_put(sc->sc_bufq, bp);
	cv_signal(&sc->sc_work_cv);

	mutex_exit(&sc->sc_slock);
}
Esempio n. 10
0
void
dk_start(struct dk_softc *dksc, struct buf *bp)
{
	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
	int error;

	if (!(dksc->sc_flags & DKF_INITED)) {
		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
		return;
	}

	mutex_enter(&dksc->sc_iolock);

	if (bp != NULL)
		bufq_put(dksc->sc_bufq, bp);

	if (dksc->sc_busy)
		goto done;
	dksc->sc_busy = true;

	/*
	 * Peeking at the buffer queue and committing the operation
	 * only after success isn't atomic.
	 *
	 * So when a diskstart fails, the buffer is saved
	 * and tried again before the next buffer is fetched.
	 * dk_drain() handles flushing of a saved buffer.
	 *
	 * This keeps order of I/O operations, unlike bufq_put.
	 */

	bp = dksc->sc_deferred;
	dksc->sc_deferred = NULL;

	if (bp == NULL)
		bp = bufq_get(dksc->sc_bufq);

	while (bp != NULL) {

		disk_busy(&dksc->sc_dkdev);
		mutex_exit(&dksc->sc_iolock);
		error = dkd->d_diskstart(dksc->sc_dev, bp);
		mutex_enter(&dksc->sc_iolock);
		if (error == EAGAIN) {
			dksc->sc_deferred = bp;
			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
			break;
		}

		if (error != 0) {
			bp->b_error = error;
			bp->b_resid = bp->b_bcount;
			dk_done1(dksc, bp, false);
		}

		bp = bufq_get(dksc->sc_bufq);
	}

	dksc->sc_busy = false;
done:
	mutex_exit(&dksc->sc_iolock);
}
Esempio n. 11
0
/*
 * Read/write routine for a buffer.  Validates the arguments and schedules the
 * transfer.  Does not wait for the transfer to complete.
 */
void
edmcastrategy(struct buf *bp)
{
    struct ed_softc *ed;
    struct disklabel *lp;
    daddr_t blkno;

    ed = device_lookup_private(&ed_cd, DISKUNIT(bp->b_dev));
    lp = ed->sc_dk.dk_label;

    ATADEBUG_PRINT(("edmcastrategy (%s)\n", device_xname(ed->sc_dev)),
                   DEBUG_XFERS);

    /* Valid request?  */
    if (bp->b_blkno < 0 ||
            (bp->b_bcount % lp->d_secsize) != 0 ||
            (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
        bp->b_error = EINVAL;
        goto done;
    }

    /* If device invalidated (e.g. media change, door open), error. */
    if ((ed->sc_flags & WDF_LOADED) == 0) {
        bp->b_error = EIO;
        goto done;
    }

    /* If it's a null transfer, return immediately. */
    if (bp->b_bcount == 0)
        goto done;

    /*
     * Do bounds checking, adjust transfer. if error, process.
     * If end of partition, just return.
     */
    if (DISKPART(bp->b_dev) != RAW_PART &&
            bounds_check_with_label(&ed->sc_dk, bp,
                                    (ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
        goto done;

    /*
     * Now convert the block number to absolute and put it in
     * terms of the device's logical block size.
     */
    if (lp->d_secsize >= DEV_BSIZE)
        blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    else
        blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);

    if (DISKPART(bp->b_dev) != RAW_PART)
        blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;

    bp->b_rawblkno = blkno;

    /* Queue transfer on drive, activate drive and controller if idle. */
    mutex_enter(&ed->sc_q_lock);
    bufq_put(ed->sc_q, bp);
    mutex_exit(&ed->sc_q_lock);

    /* Ring the worker thread */
    wakeup(ed->edc_softc);

    return;
done:
    /* Toss transfer; we're done early. */
    bp->b_resid = bp->b_bcount;
    biodone(bp);
}
Esempio n. 12
0
void
mcdstrategy(struct buf *bp)
{
	struct mcd_softc *sc;
	struct disklabel *lp;
	daddr_t blkno;
	int s;

	sc = device_lookup_private(&mcd_cd, MCDUNIT(bp->b_dev));
	lp = sc->sc_dk.dk_label;

	/* Test validity. */
	MCD_TRACE("strategy: buf=0x%p blkno=%d bcount=%d\n", bp,
	    (int) bp->b_blkno, bp->b_bcount);
	if (bp->b_blkno < 0 ||
	    (bp->b_bcount % sc->blksize) != 0) {
		printf("%s: strategy: blkno = %" PRId64 " bcount = %d\n",
		    device_xname(sc->sc_dev), bp->b_blkno, bp->b_bcount);
		bp->b_error = EINVAL;
		goto done;
	}

	/* If device invalidated (e.g. media change, door open), error. */
	if ((sc->flags & MCDF_LOADED) == 0) {
		MCD_TRACE("strategy: drive not valid%s", "\n");
		bp->b_error = EIO;
		goto done;
	}

	/* No data to read. */
	if (bp->b_bcount == 0)
		goto done;

	/*
	 * Do bounds checking, adjust transfer. if error, process.
	 * If end of partition, just return.
	 */
	if (MCDPART(bp->b_dev) != RAW_PART &&
	    bounds_check_with_label(&sc->sc_dk, bp,
	    (sc->flags & (MCDF_WLABEL|MCDF_LABELLING)) != 0) <= 0)
		goto done;

	/*
	 * Now convert the block number to absolute and put it in
	 * terms of the device's logical block size.
	 */
	blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
	if (MCDPART(bp->b_dev) != RAW_PART)
		blkno += lp->d_partitions[MCDPART(bp->b_dev)].p_offset;

	bp->b_rawblkno = blkno;

	/* Queue it. */
	s = splbio();
	bufq_put(sc->buf_queue, bp);
	splx(s);
	if (!sc->active)
		mcdstart(sc);
	return;

done:
	bp->b_resid = bp->b_bcount;
	biodone(bp);
}
Esempio n. 13
0
/*
 * Queue the request, and wakeup the kernel thread to handle it.
 */
static void
vndstrategy(struct buf *bp)
{
	int unit = vndunit(bp->b_dev);
	struct vnd_softc *vnd =
	    device_lookup_private(&vnd_cd, unit);
	struct disklabel *lp;
	daddr_t blkno;
	int s = splbio();

	if (vnd == NULL) {
		bp->b_error = ENXIO;
		goto done;
	}
	lp = vnd->sc_dkdev.dk_label;

	if ((vnd->sc_flags & VNF_INITED) == 0) {
		bp->b_error = ENXIO;
		goto done;
	}

	/*
	 * The transfer must be a whole number of blocks.
	 */
	if ((bp->b_bcount % lp->d_secsize) != 0) {
		bp->b_error = EINVAL;
		goto done;
	}

	/*
	 * check if we're read-only.
	 */
	if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) {
		bp->b_error = EACCES;
		goto done;
	}

	/* If it's a nil transfer, wake up the top half now. */
	if (bp->b_bcount == 0) {
		goto done;
	}

	/*
	 * Do bounds checking and adjust transfer.  If there's an error,
	 * the bounds check will flag that for us.
	 */
	if (DISKPART(bp->b_dev) == RAW_PART) {
		if (bounds_check_with_mediasize(bp, DEV_BSIZE,
		    vnd->sc_size) <= 0)
			goto done;
	} else {
		if (bounds_check_with_label(&vnd->sc_dkdev,
		    bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0)
			goto done;
	}

	/*
	 * Put the block number in terms of the logical blocksize
	 * of the "device".
	 */

	blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);

	/*
	 * Translate the partition-relative block number to an absolute.
	 */
	if (DISKPART(bp->b_dev) != RAW_PART) {
		struct partition *pp;

		pp = &vnd->sc_dkdev.dk_label->d_partitions[
		    DISKPART(bp->b_dev)];
		blkno += pp->p_offset;
	}
	bp->b_rawblkno = blkno;

#ifdef DEBUG
	if (vnddebug & VDB_FOLLOW)
		printf("vndstrategy(%p): unit %d\n", bp, unit);
#endif
	if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
		KASSERT(vnd->sc_pending >= 0 &&
		    vnd->sc_pending <= VND_MAXPENDING(vnd));
		while (vnd->sc_pending == VND_MAXPENDING(vnd))
			tsleep(&vnd->sc_pending, PRIBIO, "vndpc", 0);
		vnd->sc_pending++;
	}
	bufq_put(vnd->sc_tab, bp);
	wakeup(&vnd->sc_tab);
	splx(s);
	return;

done:
	bp->b_resid = bp->b_bcount;
	biodone(bp);
	splx(s);
}