static void pststrategy(struct bio *bp) { struct pst_softc *psc = bp->bio_disk->d_drv1; mtx_lock(&psc->iop->mtx); bioq_disksort(&psc->queue, bp); pst_start(psc); mtx_unlock(&psc->iop->mtx); }
static void opalflash_strategy(struct bio *bp) { struct opalflash_softc *sc; sc = (struct opalflash_softc *)bp->bio_disk->d_drv1; OPALFLASH_LOCK(sc); bioq_disksort(&sc->sc_bio_queue, bp); wakeup(sc); OPALFLASH_UNLOCK(sc); }
static void mambodisk_strategy(struct bio *bp) { struct mambodisk_softc *sc; sc = (struct mambodisk_softc *)bp->bio_disk->d_drv1; MBODISK_LOCK(sc); bioq_disksort(&sc->bio_queue, bp); wakeup(sc); MBODISK_UNLOCK(sc); }
/* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void adastrategy(struct bio *bp) { struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; if (periph == NULL) { biofinish(bp, NULL, ENXIO); return; } softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & ADA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ if (bp->bio_cmd == BIO_DELETE && (softc->flags & ADA_FLAG_CAN_TRIM)) bioq_disksort(&softc->trim_queue, bp); else bioq_disksort(&softc->bio_queue, bp); /* * Schedule ourselves for performing the work. */ adaschedule(periph); cam_periph_unlock(periph); return; }
static void mcdstrategy(struct bio *bp) { struct mcd_softc *sc; sc = (struct mcd_softc *)bp->bio_dev->si_drv1; /* if device invalidated (e.g. media change, door open), error */ MCD_LOCK(sc); if (!(sc->data.flags & MCDVALID)) { device_printf(sc->dev, "media changed\n"); bp->bio_error = EIO; goto bad; } /* read only */ if (!(bp->bio_cmd == BIO_READ)) { bp->bio_error = EROFS; goto bad; } /* no data to read */ if (bp->bio_bcount == 0) goto done; if (!(sc->data.flags & MCDTOC)) { bp->bio_error = EIO; goto bad; } bp->bio_resid = 0; /* queue it */ bioq_disksort(&sc->data.head, bp); /* now check whether we can perform processing */ mcd_start(sc); MCD_UNLOCK(sc); return; bad: bp->bio_flags |= BIO_ERROR; done: MCD_UNLOCK(sc); bp->bio_resid = bp->bio_bcount; biodone(bp); return; }
static void g_uzip_read_done(struct bio *bp) { struct bio *bp2; struct g_geom *gp; struct g_uzip_softc *sc; bp2 = bp->bio_parent; gp = bp2->bio_to->geom; sc = gp->softc; mtx_lock(&sc->queue_mtx); bioq_disksort(&sc->bio_queue, bp); mtx_unlock(&sc->queue_mtx); wakeup(sc); }
static void isf_disk_strategy(struct bio *bp) { struct isf_softc *sc = bp->bio_disk->d_drv1;; /* * We advertise a block size and maximum I/O size up the stack; catch * any attempts to not follow the rules. */ KASSERT(bp->bio_bcount == ISF_SECTORSIZE, ("%s: I/O size not %d", __func__, ISF_SECTORSIZE)); ISF_LOCK(sc); bioq_disksort(&sc->isf_bioq, bp); ISF_WAKEUP(sc); ISF_UNLOCK(sc); }
static void htif_blk_strategy(struct bio *bp) { struct htif_blk_softc *sc; sc = bp->bio_disk->d_drv1; HTIF_BLK_LOCK(sc); if (sc->running > 0) { bioq_disksort(&sc->bio_queue, bp); HTIF_BLK_UNLOCK(sc); wakeup(sc); } else { HTIF_BLK_UNLOCK(sc); biofinish(bp, NULL, ENXIO); } }