void ast_start(struct ata_device *atadev) { struct ast_softc *stp = atadev->driver; struct buf *bp = bufq_first(&stp->queue); u_int32_t blkcount; int8_t ccb[16]; if (!bp) return; bzero(ccb, sizeof(ccb)); if (bp->b_flags & B_READ) ccb[0] = ATAPI_READ; else ccb[0] = ATAPI_WRITE; bufq_remove(&stp->queue, bp); blkcount = bp->b_bcount / stp->blksize; ccb[1] = 1; ccb[2] = blkcount>>16; ccb[3] = blkcount>>8; ccb[4] = blkcount; devstat_start_transaction(&stp->stats); atapi_queue_cmd(stp->device, ccb, bp->b_data, blkcount * stp->blksize, (bp->b_flags & B_READ) ? ATPR_F_READ : 0, 120, ast_done, bp); }
static void ptstart(struct cam_periph *periph, union ccb *start_ccb) { struct pt_softc *softc; struct buf *bp; struct bio *bio; softc = (struct pt_softc *)periph->softc; /* * See if there is a buf with work for us to do.. */ bio = bioq_first(&softc->bio_queue); if (periph->immediate_priority <= periph->pinfo.priority) { CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, ("queuing for immediate ccb\n")); start_ccb->ccb_h.ccb_state = PT_CCB_WAITING; SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, periph_links.sle); periph->immediate_priority = CAM_PRIORITY_NONE; wakeup(&periph->ccb_list); } else if (bio == NULL) { xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bio); bp = bio->bio_buf; devstat_start_transaction(&softc->device_stats); scsi_send_receive(&start_ccb->csio, /*retries*/4, ptdone, MSG_SIMPLE_Q_TAG, (bp->b_cmd == BUF_CMD_READ), /*byte2*/0, bp->b_bcount, bp->b_data, /*sense_len*/SSD_FULL_SIZE, /*timeout*/softc->io_timeout); start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA; /* * Block out any asyncronous callbacks * while we touch the pending ccb list. */ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); start_ccb->ccb_h.ccb_bio = bio; bio = bioq_first(&softc->bio_queue); xpt_action(start_ccb); if (bio != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, /* XXX priority */1); } } }
static int mfi_disk_strategy(struct dev_strategy_args *ap) { struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; struct mfi_disk *sc = ap->a_head.a_dev->si_drv1; struct mfi_softc *controller = sc->ld_controller; if (sc == NULL) { bp->b_error = EINVAL; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; biodone(bio); return (0); } if (controller->hw_crit_error) { bp->b_error = EBUSY; return (0); } if (controller->issuepend_done == 0) { bp->b_error = EBUSY; return (0); } /* * XXX swildner * * If it's a null transfer, do nothing. FreeBSD's original driver * doesn't have this, but that caused hard error messages (even * though everything else continued to work fine). Interestingly, * only when HAMMER was used. * * Several others of our RAID drivers have this check, such as * aac(4) and ida(4), so we insert it here, too. * * The cause of null transfers is yet unknown. */ if (bp->b_bcount == 0) { bp->b_resid = bp->b_bcount; biodone(bio); return (0); } bio->bio_driver_info = sc; lockmgr(&controller->mfi_io_lock, LK_EXCLUSIVE); mfi_enqueue_bio(controller, bio); devstat_start_transaction(&sc->ld_devstat); mfi_startio(controller); lockmgr(&controller->mfi_io_lock, LK_RELEASE); return (0); }
static int ipsd_strategy(struct dev_strategy_args *ap) { cdev_t dev = ap->a_head.a_dev; struct bio *bio = ap->a_bio; ipsdisk_softc_t *dsc; dsc = dev->si_drv1; DEVICE_PRINTF(8, dsc->dev, "in strategy\n"); bio->bio_driver_info = dsc; devstat_start_transaction(&dsc->stats); lockmgr(&dsc->sc->queue_lock, LK_EXCLUSIVE|LK_RETRY); bioqdisksort(&dsc->sc->bio_queue, bio); ips_start_io_request(dsc->sc); lockmgr(&dsc->sc->queue_lock, LK_RELEASE); return(0); }
/* * Read/write routine for a buffer. Finds the proper unit, range checks * arguments, and schedules the transfer. Does not wait for the transfer * to complete. Multi-page transfers are supported. All I/O requests must * be a multiple of a sector in length. */ static int idad_strategy(struct dev_strategy_args *ap) { cdev_t dev = ap->a_head.a_dev; struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; struct idad_softc *drv; drv = idad_getsoftc(dev); if (drv == NULL) { bp->b_error = EINVAL; goto bad; } /* * software write protect check */ if ((drv->flags & DRV_WRITEPROT) && bp->b_cmd != BUF_CMD_READ) { bp->b_error = EROFS; goto bad; } /* * If it's a null transfer, return immediately */ if (bp->b_bcount == 0) goto done; bio->bio_driver_info = drv; crit_enter(); devstat_start_transaction(&drv->stats); ida_submit_buf(drv->controller, bio); crit_exit(); return(0); bad: bp->b_flags |= B_ERROR; done: /* * Correctly set the buf to indicate a completed transfer */ bp->b_resid = bp->b_bcount; biodone(bio); return(0); }
static int vtblk_strategy(struct dev_strategy_args *ap) { struct vtblk_softc *sc; cdev_t dev = ap->a_head.a_dev; sc = dev->si_drv1; struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; if (sc == NULL) { vtblk_finish_bio(bio, EINVAL); return EINVAL; } /* * Fail any write if RO. Unfortunately, there does not seem to * be a better way to report our readonly'ness to GEOM above. * * XXX: Is that true in DFly? */ if (sc->vtblk_flags & VTBLK_FLAG_READONLY && (bp->b_cmd == BUF_CMD_READ || bp->b_cmd == BUF_CMD_FLUSH)) { vtblk_finish_bio(bio, EROFS); return (EINVAL); } lwkt_serialize_enter(&sc->vtblk_slz); if ((sc->vtblk_flags & VTBLK_FLAG_DETACH) == 0) { devstat_start_transaction(&sc->stats); bioqdisksort(&sc->vtblk_bioq, bio); vtblk_startio(sc); } else { vtblk_finish_bio(bio, ENXIO); } lwkt_serialize_exit(&sc->vtblk_slz); return 0; }
/* * Do all IO operations on dm logical devices. */ static int dmstrategy(struct dev_strategy_args *ap) { cdev_t dev = ap->a_head.a_dev; struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; int bypass; dm_dev_t *dmv; dm_table_t *tbl; dm_table_entry_t *table_en; struct buf *nestbuf; uint32_t dev_type; uint64_t buf_start, buf_len, issued_len; uint64_t table_start, table_end; uint64_t start, end; buf_start = bio->bio_offset; buf_len = bp->b_bcount; tbl = NULL; table_end = 0; dev_type = 0; issued_len = 0; dmv = dev->si_drv1; switch(bp->b_cmd) { case BUF_CMD_READ: case BUF_CMD_WRITE: case BUF_CMD_FREEBLKS: bypass = 0; break; case BUF_CMD_FLUSH: bypass = 1; KKASSERT(buf_len == 0); break; default: bp->b_error = EIO; bp->b_resid = bp->b_bcount; biodone(bio); return 0; } if (bypass == 0 && bounds_check_with_mediasize(bio, DEV_BSIZE, dm_table_size(&dmv->table_head)) <= 0) { bp->b_resid = bp->b_bcount; biodone(bio); return 0; } /* Select active table */ tbl = dm_table_get_entry(&dmv->table_head, DM_TABLE_ACTIVE); nestiobuf_init(bio); devstat_start_transaction(&dmv->stats); /* * Find out what tables I want to select. */ SLIST_FOREACH(table_en, tbl, next) { /* * I need need number of bytes not blocks. */ table_start = table_en->start * DEV_BSIZE; table_end = table_start + (table_en->length) * DEV_BSIZE; /* * Calculate the start and end */ start = MAX(table_start, buf_start); end = MIN(table_end, buf_start + buf_len); aprint_debug("----------------------------------------\n"); aprint_debug("table_start %010" PRIu64", table_end %010" PRIu64 "\n", table_start, table_end); aprint_debug("buf_start %010" PRIu64", buf_len %010" PRIu64"\n", buf_start, buf_len); aprint_debug("start-buf_start %010"PRIu64", end %010" PRIu64"\n", start - buf_start, end); aprint_debug("start %010" PRIu64" , end %010" PRIu64"\n", start, end); aprint_debug("\n----------------------------------------\n"); if (bypass) { nestbuf = getpbuf(NULL); nestbuf->b_flags |= bio->bio_buf->b_flags & B_HASBOGUS; nestiobuf_add(bio, nestbuf, 0, 0, &dmv->stats); nestbuf->b_bio1.bio_offset = 0; table_en->target->strategy(table_en, nestbuf); } else if (start < end) { nestbuf = getpbuf(NULL); nestbuf->b_flags |= bio->bio_buf->b_flags & B_HASBOGUS; nestiobuf_add(bio, nestbuf, start - buf_start, (end - start), &dmv->stats); issued_len += end - start; nestbuf->b_bio1.bio_offset = (start - table_start); table_en->target->strategy(table_en, nestbuf); } }
static int ad_strategy(struct dev_strategy_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct bio *bp = ap->a_bio; struct buf *bbp = bp->bio_buf; struct ata_device *atadev = device_get_softc(dev); struct ata_request *request; struct ad_softc *adp = device_get_ivars(dev); if (!(request = ata_alloc_request())) { device_printf(dev, "FAILURE - out of memory in strategy\n"); bbp->b_flags |= B_ERROR; bbp->b_error = ENOMEM; biodone(bp); return(0); } /* setup request */ request->dev = dev; request->bio = bp; request->callback = ad_done; request->timeout = ATA_DEFAULT_TIMEOUT; request->retries = 2; request->data = bbp->b_data; request->bytecount = bbp->b_bcount; /* lba is block granularity, convert byte granularity bio_offset */ request->u.ata.lba = (u_int64_t)(bp->bio_offset >> DEV_BSHIFT); request->u.ata.count = request->bytecount / DEV_BSIZE; request->transfersize = min(bbp->b_bcount, atadev->max_iosize); switch (bbp->b_cmd) { case BUF_CMD_READ: request->flags = ATA_R_READ; if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_READ_DMA; request->flags |= ATA_R_DMA; } else if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_READ_MUL; else request->u.ata.command = ATA_READ; break; case BUF_CMD_WRITE: request->flags = ATA_R_WRITE; if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_WRITE_DMA; request->flags |= ATA_R_DMA; } else if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_WRITE_MUL; else request->u.ata.command = ATA_WRITE; break; case BUF_CMD_FLUSH: request->u.ata.lba = 0; request->u.ata.count = 0; request->u.ata.feature = 0; request->bytecount = 0; request->transfersize = 0; request->flags = ATA_R_CONTROL; request->u.ata.command = ATA_FLUSHCACHE; /* ATA FLUSHCACHE requests may take up to 30 sec to timeout */ request->timeout = 30; break; default: device_printf(dev, "FAILURE - unknown BUF operation\n"); ata_free_request(request); bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return(0); } request->flags |= ATA_R_ORDERED; devstat_start_transaction(&adp->stats); ata_queue_request(request); return(0); }
static int afd_strategy(struct dev_strategy_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct bio *bp = ap->a_bio; struct buf *bbp = bp->bio_buf; struct ata_device *atadev = device_get_softc(dev); struct afd_softc *fdp = device_get_ivars(dev); struct ata_request *request; u_int32_t lba; u_int16_t count; int8_t ccb[16]; /* if it's a null transfer, return immediatly. */ if (bbp->b_bcount == 0) { bbp->b_resid = 0; biodone(bp); return 0; } /* should reject all queued entries if media have changed. */ if (atadev->flags & ATA_D_MEDIA_CHANGED) { bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return 0; } lba = bp->bio_offset / fdp->sectorsize; count = bbp->b_bcount / fdp->sectorsize; bbp->b_resid = bbp->b_bcount; bzero(ccb, sizeof(ccb)); switch(bbp->b_cmd) { case BUF_CMD_READ: ccb[0] = ATAPI_READ_BIG; break; case BUF_CMD_WRITE: ccb[0] = ATAPI_WRITE_BIG; break; default: device_printf(dev, "unknown BUF operation\n"); bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return 0; } ccb[2] = lba >> 24; ccb[3] = lba >> 16; ccb[4] = lba >> 8; ccb[5] = lba; ccb[7] = count>>8; ccb[8] = count; if (!(request = ata_alloc_request())) { bbp->b_flags |= B_ERROR; bbp->b_error = ENOMEM; biodone(bp); return 0; } request->dev = dev; request->bio = bp; bcopy(ccb, request->u.atapi.ccb, (atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12 ? 16 : 12); request->data = bbp->b_data; request->bytecount = count * fdp->sectorsize; request->transfersize = min(request->bytecount, 65534); request->timeout = (ccb[0] == ATAPI_WRITE_BIG) ? 60 : 30; request->retries = 2; request->callback = afd_done; switch (bbp->b_cmd) { case BUF_CMD_READ: request->flags = (ATA_R_ATAPI | ATA_R_READ); break; case BUF_CMD_WRITE: request->flags = (ATA_R_ATAPI | ATA_R_WRITE); break; default: panic("bbp->b_cmd"); } if (atadev->mode >= ATA_DMA) request->flags |= ATA_R_DMA; request->flags |= ATA_R_ORDERED; devstat_start_transaction(&fdp->stats); ata_queue_request(request); return 0; }
/* Read/write routine for a buffer. Finds the proper unit, range checks * arguments, and schedules the transfer. Does not wait for the transfer * to complete. Multi-page transfers are supported. All I/O requests must * be a multiple of a sector in length. */ void idstrategy(struct buf *bp) { int unit = dkunit(bp->b_dev); struct ida_drv *drv; int opri; if (unit >= NID) { printf("ida: unit out of range\n"); bp->b_error = EINVAL; goto bad; } if (!(drv = id_drive[unit]) || !(drv->flags & ID_INIT)) { printf("id%d: drive not initialised\n", unit); bp->b_error = EINVAL; goto bad; } if (bp->b_blkno < 0) { printf("id%d: negative block requested\n", unit); bp->b_error = EINVAL; goto bad; } if (bp->b_bcount % DEV_BSIZE != 0) { /* bounds check */ printf("id%d: count (%lu) not a multiple of a block\n", unit, bp->b_bcount); bp->b_error = EINVAL; goto bad; } idaminphys(bp); /* adjust the transfer size */ /* "soft" write protect check */ if ((drv->flags & ID_WRITEPROT) && (bp->b_flags & B_READ) == 0) { bp->b_error = EROFS; goto bad; } /* If it's a null transfer, return immediately */ if (bp->b_bcount == 0) { goto done; } if (dscheck(bp, drv->slices) <= 0) { goto done; } opri = splbio(); ida_queue_buf(unit, bp); devstat_start_transaction(&drv->dk_stats); ida_start(drv->ctl_unit); /* hit the appropriate controller */ splx(opri); return /*0*/; bad: bp->b_flags |= B_ERROR; done: /* correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; biodone(bp); return /*0*/; }