Ejemplo n.º 1
0
static void
cfi_disk_strategy(struct bio *bp)
{
	struct cfi_disk_softc *sc = bp->bio_disk->d_drv1;

	if (sc == NULL)
		goto invalid;
	if (bp->bio_bcount == 0) {
		bp->bio_resid = bp->bio_bcount;
		biodone(bp);
		return;
	}
	switch (bp->bio_cmd) {
	case BIO_READ:
	case BIO_WRITE:
		mtx_lock(&sc->qlock);
		/* no value in sorting requests? */
		bioq_insert_tail(&sc->bioq, bp);
		mtx_unlock(&sc->qlock);
		taskqueue_enqueue(sc->tq, &sc->iotask);
		return;
	}
	/* fall thru... */
invalid:
	bp->bio_flags |= BIO_ERROR;
	bp->bio_error = EINVAL;
	biodone(bp);
}
Ejemplo n.º 2
0
void
ida_submit_buf(struct ida_softc *ida, struct bio *bp)
{
	mtx_lock(&ida->lock);
	bioq_insert_tail(&ida->bio_queue, bp);
	ida_startio(ida);
	mtx_unlock(&ida->lock);
}
Ejemplo n.º 3
0
int
mfsstrategy(struct dev_strategy_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct bio *bio = ap->a_bio;
	struct buf *bp = bio->bio_buf;
	off_t boff = bio->bio_offset;
	off_t eoff = boff + bp->b_bcount;
	struct mfsnode *mfsp;

	if ((mfsp = dev->si_drv1) == NULL) {
		bp->b_error = ENXIO;
		goto error;
	}
	if (boff < 0)
		goto bad;
	if (eoff > mfsp->mfs_size) {
		if (boff > mfsp->mfs_size || (bp->b_flags & B_BNOCLIP))
			goto bad;
		/*
		 * Return EOF by completing the I/O with 0 bytes transfered.
		 * Set B_INVAL to indicate that any data in the buffer is not
		 * valid.
		 */
		if (boff == mfsp->mfs_size) {
			bp->b_resid = bp->b_bcount;
			bp->b_flags |= B_INVAL;
			goto done;
		}
		bp->b_bcount = mfsp->mfs_size - boff;
	}

	/*
	 * Initiate I/O
	 */
	if (mfsp->mfs_td == curthread) {
		mfs_doio(bio, mfsp);
	} else {
		bioq_insert_tail(&mfsp->bio_queue, bio);
		wakeup((caddr_t)mfsp);
	}
	return(0);

	/*
	 * Failure conditions on bio
	 */
bad:
	bp->b_error = EINVAL;
error:
	bp->b_flags |= B_ERROR | B_INVAL;
done:
	biodone(bio);
	return(0);
}
Ejemplo n.º 4
0
static void ipsd_strategy(struct bio *iobuf)
{
	ipsdisk_softc_t *dsc;

	dsc = iobuf->bio_disk->d_drv1;	
	DEVICE_PRINTF(8,dsc->dev,"in strategy\n");
	iobuf->bio_driver1 = (void *)(uintptr_t)dsc->sc->drives[dsc->disk_number].drivenum;
	mtx_lock(&dsc->sc->queue_mtx);
	bioq_insert_tail(&dsc->sc->queue, iobuf);
	ips_start_io_request(dsc->sc);
	mtx_unlock(&dsc->sc->queue_mtx);
}
Ejemplo n.º 5
0
static void
nvd_strategy(struct bio *bp)
{
	struct nvd_disk *ndisk;

	ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;

	mtx_lock(&ndisk->bioqlock);
	bioq_insert_tail(&ndisk->bioq, bp);
	mtx_unlock(&ndisk->bioqlock);
	taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
}
Ejemplo n.º 6
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static int
ptstrategy(struct dev_strategy_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct bio *bio = ap->a_bio;
	struct buf *bp = bio->bio_buf;
	struct cam_periph *periph;
	struct pt_softc *softc;
	u_int  unit;
	
	unit = minor(dev);
	periph = cam_extend_get(ptperiphs, unit);
	if (periph == NULL) {
		bp->b_error = ENXIO;
		goto bad;		
	}
	cam_periph_lock(periph);
	softc = (struct pt_softc *)periph->softc;

	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
		cam_periph_unlock(periph);
		bp->b_error = ENXIO;
		goto bad;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	bioq_insert_tail(&softc->bio_queue, bio);
	
	/*
	 * Schedule ourselves for performing the work.
	 */
	xpt_schedule(periph, /* XXX priority */1);
	cam_periph_unlock(periph);

	return(0);
bad:
	bp->b_flags |= B_ERROR;

	/*
	 * Correctly set the buf to indicate a completed xfer
	 */
	bp->b_resid = bp->b_bcount;
	biodone(bio);
	return(0);
}
Ejemplo n.º 7
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static void
ptstrategy(struct bio *bp)
{
	struct cam_periph *periph;
	struct pt_softc *softc;
	int    s;
	
	periph = (struct cam_periph *)bp->bio_dev->si_drv1;
	bp->bio_resid = bp->bio_bcount;
	if (periph == NULL) {
		biofinish(bp, NULL, ENXIO);
		return;
	}
	softc = (struct pt_softc *)periph->softc;

	/*
	 * Mask interrupts so that the pack cannot be invalidated until
	 * after we are in the queue.  Otherwise, we might not properly
	 * clean up one of the buffers.
	 */
	s = splbio();
	
	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
		splx(s);
		biofinish(bp, NULL, ENXIO);
		return;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	bioq_insert_tail(&softc->bio_queue, bp);

	splx(s);
	
	/*
	 * Schedule ourselves for performing the work.
	 */
	xpt_schedule(periph, /* XXX priority */1);

	return;
}
Ejemplo n.º 8
0
static void
nand_strategy(struct bio *bp)
{
	struct nand_chip *chip;

	chip = (struct nand_chip *)bp->bio_disk->d_drv1;

	bp->bio_driver1 = BIO_NAND_STD;

	nand_debug(NDBG_GEOM, "Strategy %s on chip %d [%p]",
	    bp->bio_cmd == BIO_READ ? "READ" :
	    (bp->bio_cmd == BIO_WRITE ? "WRITE" :
	    (bp->bio_cmd == BIO_DELETE ? "DELETE" : "UNKNOWN")),
	    chip->num, chip);

	mtx_lock(&chip->qlock);
	bioq_insert_tail(&chip->bioq, bp);
	mtx_unlock(&chip->qlock);
	taskqueue_enqueue(chip->tq, &chip->iotask);
}
Ejemplo n.º 9
0
static void
nand_strategy_raw(struct bio *bp)
{
	struct nand_chip *chip;

	chip = (struct nand_chip *)bp->bio_disk->d_drv1;

	/* Inform taskqueue that it's a raw access */
	bp->bio_driver1 = BIO_NAND_RAW;

	nand_debug(NDBG_GEOM, "Strategy %s on chip %d [%p]",
	    (bp->bio_cmd & BIO_READ) == BIO_READ ? "READ" :
	    ((bp->bio_cmd & BIO_WRITE) == BIO_WRITE ? "WRITE" :
	    ((bp->bio_cmd & BIO_DELETE) == BIO_DELETE ? "DELETE" : "UNKNOWN")),
	    chip->num, chip);

	mtx_lock(&chip->qlock);
	bioq_insert_tail(&chip->bioq, bp);
	mtx_unlock(&chip->qlock);
	taskqueue_enqueue(chip->tq, &chip->iotask);
}
Ejemplo n.º 10
0
/*
 * The function is called to read encrypted data.
 *
 * g_eli_start -> G_ELI_CRYPTO_READ -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver
 */
void
g_eli_crypto_read(struct g_eli_softc *sc, struct bio *bp, boolean_t fromworker)
{
	struct g_consumer *cp;
	struct bio *cbp;

	if (!fromworker) {
		/*
		 * We are not called from the worker thread, so check if
		 * device is suspended.
		 */
		mtx_lock(&sc->sc_queue_mtx);
		if (sc->sc_flags & G_ELI_FLAG_SUSPEND) {
			/*
			 * If device is suspended, we place the request onto
			 * the queue, so it can be handled after resume.
			 */
			G_ELI_DEBUG(0, "device suspended, move onto queue");
			bioq_insert_tail(&sc->sc_queue, bp);
			mtx_unlock(&sc->sc_queue_mtx);
			wakeup(sc);
			return;
		}
		atomic_add_int(&sc->sc_inflight, 1);
		mtx_unlock(&sc->sc_queue_mtx);
	}
	bp->bio_pflags = 0;
	bp->bio_driver2 = NULL;
	cbp = bp->bio_driver1;
	cbp->bio_done = g_eli_read_done;
	cp = LIST_FIRST(&sc->sc_geom->consumer);
	cbp->bio_to = cp->provider;
	G_ELI_LOGREQ(2, cbp, "Sending request.");
	/*
	 * Read encrypted data from provider.
	 */
	g_io_request(cbp, cp);
}
Ejemplo n.º 11
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static void
ptstrategy(struct bio *bp)
{
	struct cam_periph *periph;
	struct pt_softc *softc;
	
	periph = (struct cam_periph *)bp->bio_dev->si_drv1;
	bp->bio_resid = bp->bio_bcount;
	if (periph == NULL) {
		biofinish(bp, NULL, ENXIO);
		return;
	}
	cam_periph_lock(periph);
	softc = (struct pt_softc *)periph->softc;

	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
		cam_periph_unlock(periph);
		biofinish(bp, NULL, ENXIO);
		return;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	bioq_insert_tail(&softc->bio_queue, bp);

	/*
	 * Schedule ourselves for performing the work.
	 */
	xpt_schedule(periph, /* XXX priority */1);
	cam_periph_unlock(periph);

	return;
}
Ejemplo n.º 12
0
/*
 * Seek sort for disks.
 *
 * Sort all requests in a single queue while keeping
 * track of the current position of the disk with last_offset.
 * See above for details.
 */
void
bioq_disksort(struct bio_queue_head *head, struct bio *bp)
{
	struct bio *cur, *prev;
	uoff_t key;

	if ((bp->bio_flags & BIO_ORDERED) != 0) {
		/*
		 * Ordered transactions can only be dispatched
		 * after any currently queued transactions.  They
		 * also have barrier semantics - no transactions
		 * queued in the future can pass them.
		 */
		bioq_insert_tail(head, bp);
		return;
	}

	prev = NULL;
	key = bioq_bio_key(head, bp);
	cur = TAILQ_FIRST(&head->queue);

	if (head->insert_point) {
		prev = head->insert_point;
		cur = TAILQ_NEXT(head->insert_point, bio_queue);
	}

	while (cur != NULL && key >= bioq_bio_key(head, cur)) {
		prev = cur;
		cur = TAILQ_NEXT(cur, bio_queue);
	}

	if (prev == NULL)
		TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue);
	else
		TAILQ_INSERT_AFTER(&head->queue, prev, bp, bio_queue);
}
Ejemplo n.º 13
0
static void
nvd_strategy(struct bio *bp)
{
	struct nvd_disk *ndisk;

	ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;

	if (__predict_false(bp->bio_flags & BIO_ORDERED))
		atomic_add_int(&ndisk->ordered_in_flight, 1);

	if (__predict_true(ndisk->ordered_in_flight == 0)) {
		nvd_bio_submit(ndisk, bp);
		return;
	}

	/*
	 * There are ordered bios in flight, so we need to submit
	 *  bios through the task queue to enforce ordering.
	 */
	mtx_lock(&ndisk->bioqlock);
	bioq_insert_tail(&ndisk->bioq, bp);
	mtx_unlock(&ndisk->bioqlock);
	taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
}