void ahd_setup_data(struct ahd_softc *ahd, struct scsi_xfer *xs, struct scb *scb) { struct hardware_scb *hscb; int s; hscb = scb->hscb; xs->resid = xs->status = 0; xs->error = CAM_REQ_INPROG; hscb->cdb_len = xs->cmdlen; if (hscb->cdb_len > MAX_CDB_LEN) { ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); xs->error = XS_DRIVER_STUFFUP; scsi_done(xs); ahd_unlock(ahd, &s); return; } memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len); /* Only use S/G if there is a transfer */ if (xs->datalen) { int error; error = bus_dmamap_load(ahd->parent_dmat, scb->dmamap, xs->data, xs->datalen, NULL, ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | ((xs->flags & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); if (error) { #ifdef AHD_DEBUG printf("%s: in ahd_setup_data(): bus_dmamap_load() " "= %d\n", ahd_name(ahd), error); #endif ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); xs->error = XS_DRIVER_STUFFUP; scsi_done(xs); ahd_unlock(ahd, &s); return; } ahd_execute_scb(scb, scb->dmamap->dm_segs, scb->dmamap->dm_nsegs); } else { ahd_execute_scb(scb, NULL, 0); } }
void vdsk_scsi_done(struct scsi_xfer *xs, int error) { xs->error = error; scsi_done(xs); }
/* * Finalise a completed autosense operation */ void umass_scsi_sense_cb(struct umass_softc *sc, void *priv, int residue, int status) { struct scsi_xfer *xs = priv; DPRINTF(UDMASS_CMD,("umass_scsi_sense_cb: xs=%p residue=%d " "status=%d\n", xs, residue, status)); sc->sc_sense = 0; switch (status) { case STATUS_CMD_OK: case STATUS_CMD_UNKNOWN: /* getting sense data succeeded */ if (residue == 0 || residue == 14)/* XXX */ xs->error = XS_SENSE; else xs->error = XS_SHORTSENSE; break; default: DPRINTF(UDMASS_SCSI, ("%s: Autosense failed, status %d\n", sc->sc_dev.dv_xname, status)); xs->error = XS_DRIVER_STUFFUP; break; } DPRINTF(UDMASS_CMD,("umass_scsi_sense_cb: return xs->error=%d, " "xs->flags=0x%x xs->resid=%d\n", xs->error, xs->status, xs->resid)); if ((xs->flags & SCSI_POLL) && (xs->error == XS_NOERROR)) { switch (sc->polled_xfer_status) { case USBD_NORMAL_COMPLETION: xs->error = XS_NOERROR; break; case USBD_TIMEOUT: xs->error = XS_TIMEOUT; break; default: xs->error = XS_DRIVER_STUFFUP; break; } } scsi_done(xs); }
void wdc_atapi_done(struct channel_softc *chp, struct wdc_xfer *xfer, int timeout, struct atapi_return_args *ret) { struct scsi_xfer *sc_xfer = xfer->cmd; WDCDEBUG_PRINT(("wdc_atapi_done %s:%d:%d: flags 0x%x error 0x%x\n", chp->wdc->sc_dev.dv_xname, chp->channel, xfer->drive, (u_int)xfer->c_flags, sc_xfer->error), DEBUG_XFERS); WDC_LOG_ATAPI_DONE(chp, xfer->drive, xfer->c_flags, sc_xfer->error); if (xfer->c_flags & C_POLL) wdc_enable_intr(chp); scsi_done(sc_xfer); xfer->next = NULL; return; }
void qlw_scsi_cmd(struct scsi_xfer *xs) { struct scsi_link *link = xs->sc_link; struct qlw_softc *sc = link->adapter_softc; struct qlw_ccb *ccb; struct qlw_iocb_req0 *iocb; struct qlw_ccb_list list; u_int16_t req, rspin; int offset, error, done; bus_dmamap_t dmap; int bus; int seg; if (xs->cmdlen > sizeof(iocb->cdb)) { DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc), xs->cmdlen); memset(&xs->sense, 0, sizeof(xs->sense)); xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; xs->sense.flags = SKEY_ILLEGAL_REQUEST; xs->sense.add_sense_code = 0x20; xs->error = XS_SENSE; scsi_done(xs); return; } ccb = xs->io; dmap = ccb->ccb_dmamap; if (xs->datalen > 0) { error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); if (error) { xs->error = XS_DRIVER_STUFFUP; scsi_done(xs); return; } bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); } mtx_enter(&sc->sc_queue_mtx); /* put in a sync marker if required */ bus = qlw_xs_bus(sc, xs); if (sc->sc_marker_required[bus]) { req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n", DEVNAME(sc), req); offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); qlw_put_marker(sc, bus, iocb); bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); sc->sc_marker_required[bus] = 0; } req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); ccb->ccb_xs = xs; DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req); qlw_put_cmd(sc, iocb, xs, ccb); seg = QLW_IOCB_SEGS_PER_CMD; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); while (seg < ccb->ccb_dmamap->dm_nsegs) { req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req); qlw_put_cont(sc, iocb, xs, ccb, seg); seg += QLW_IOCB_SEGS_PER_CONT; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); } qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); if (!ISSET(xs->flags, SCSI_POLL)) { mtx_leave(&sc->sc_queue_mtx); return; } done = 0; SIMPLEQ_INIT(&list); do { u_int16_t isr, info; delay(100); if (qlw_read_isr(sc, &isr, &info) == 0) { continue; } if (isr != QLW_INT_TYPE_IO) { qlw_handle_intr(sc, isr, info); continue; } qlw_clear_isr(sc, isr); rspin = qlw_queue_read(sc, QLW_RESP_IN); while (rspin != sc->sc_last_resp_id) { ccb = qlw_handle_resp(sc, sc->sc_last_resp_id); sc->sc_last_resp_id++; if (sc->sc_last_resp_id == sc->sc_maxresponses) sc->sc_last_resp_id = 0; if (ccb != NULL) SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link); if (ccb == xs->io) done = 1; } qlw_queue_write(sc, QLW_RESP_OUT, rspin); } while (done == 0); mtx_leave(&sc->sc_queue_mtx); while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) { SIMPLEQ_REMOVE_HEAD(&list, ccb_link); scsi_done(ccb->ccb_xs); } }
void qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info) { int i; u_int16_t rspin; struct qlw_ccb *ccb; switch (isr) { case QLW_INT_TYPE_ASYNC: qlw_async(sc, info); qlw_clear_isr(sc, isr); break; case QLW_INT_TYPE_IO: qlw_clear_isr(sc, isr); rspin = qlw_queue_read(sc, QLW_RESP_IN); if (rspin == sc->sc_last_resp_id) { /* seems to happen a lot on 2200s when mbox commands * complete but it doesn't want to give us the register * semaphore, or something. * * if we're waiting on a mailbox command, don't ack * the interrupt yet. */ if (sc->sc_mbox_pending) { DPRINTF(QLW_D_MBOX, "%s: ignoring premature" " mbox int\n", DEVNAME(sc)); return; } break; } if (sc->sc_responses == NULL) break; DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n", DEVNAME(sc), sc->sc_last_resp_id, rspin); do { ccb = qlw_handle_resp(sc, sc->sc_last_resp_id); if (ccb) scsi_done(ccb->ccb_xs); sc->sc_last_resp_id++; sc->sc_last_resp_id %= sc->sc_maxresponses; } while (sc->sc_last_resp_id != rspin); qlw_queue_write(sc, QLW_RESP_OUT, rspin); break; case QLW_INT_TYPE_MBOX: if (sc->sc_mbox_pending) { if (info == QLW_MBOX_COMPLETE) { for (i = 1; i < nitems(sc->sc_mbox); i++) { sc->sc_mbox[i] = qlw_read_mbox(sc, i); } } else { sc->sc_mbox[0] = info; } wakeup(sc->sc_mbox); } else { DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:" " %x\n", DEVNAME(sc), info); } qlw_clear_isr(sc, isr); break; default: /* maybe log something? */ break; } }
void umass_scsi_cb(struct umass_softc *sc, void *priv, int residue, int status) { struct umass_scsi_softc *scbus = (struct umass_scsi_softc *)sc->bus; struct scsi_xfer *xs = priv; struct scsi_link *link = xs->sc_link; int cmdlen; #ifdef UMASS_DEBUG struct timeval tv; u_int delta; microtime(&tv); delta = (tv.tv_sec - sc->tv.tv_sec) * 1000000 + tv.tv_usec - sc->tv.tv_usec; #endif DPRINTF(UDMASS_CMD, ("umass_scsi_cb: at %lld.%06ld, delta=%u: xs=%p residue=%d" " status=%d\n", (long long)tv.tv_sec, tv.tv_usec, delta, xs, residue, status)); xs->resid = residue; switch (status) { case STATUS_CMD_OK: xs->error = XS_NOERROR; break; case STATUS_CMD_UNKNOWN: DPRINTF(UDMASS_CMD, ("umass_scsi_cb: status cmd unknown\n")); /* we can't issue REQUEST SENSE */ if (xs->sc_link->quirks & ADEV_NOSENSE) { /* * If no residue and no other USB error, * command succeeded. */ if (residue == 0) { xs->error = XS_NOERROR; break; } /* * Some devices return a short INQUIRY * response, omitting response data from the * "vendor specific data" on... */ if (xs->cmd->opcode == INQUIRY && residue < xs->datalen) { xs->error = XS_NOERROR; break; } xs->error = XS_DRIVER_STUFFUP; break; } /* FALLTHROUGH */ case STATUS_CMD_FAILED: DPRINTF(UDMASS_CMD, ("umass_scsi_cb: status cmd failed for " "scsi op 0x%02x\n", xs->cmd->opcode)); /* fetch sense data */ sc->sc_sense = 1; memset(&scbus->sc_sense_cmd, 0, sizeof(scbus->sc_sense_cmd)); scbus->sc_sense_cmd.opcode = REQUEST_SENSE; scbus->sc_sense_cmd.byte2 = link->lun << SCSI_CMD_LUN_SHIFT; scbus->sc_sense_cmd.length = sizeof(xs->sense); cmdlen = sizeof(scbus->sc_sense_cmd); if (xs->flags & SCSI_POLL) { usbd_set_polling(sc->sc_udev, 1); sc->sc_xfer_flags = USBD_SYNCHRONOUS; sc->polled_xfer_status = USBD_INVAL; } /* scsi_done() has already been called. */ sc->sc_methods->wire_xfer(sc, link->lun, &scbus->sc_sense_cmd, cmdlen, &xs->sense, sizeof(xs->sense), DIR_IN, xs->timeout, umass_scsi_sense_cb, xs); if (xs->flags & SCSI_POLL) { sc->sc_xfer_flags = 0; usbd_set_polling(sc->sc_udev, 0); } return; case STATUS_WIRE_FAILED: xs->error = XS_RESET; break; default: panic("%s: Unknown status %d in umass_scsi_cb", sc->sc_dev.dv_xname, status); } DPRINTF(UDMASS_CMD,("umass_scsi_cb: at %lld.%06ld: return error=%d, " "status=0x%x resid=%d\n", (long long)tv.tv_sec, tv.tv_usec, xs->error, xs->status, xs->resid)); if ((xs->flags & SCSI_POLL) && (xs->error == XS_NOERROR)) { switch (sc->polled_xfer_status) { case USBD_NORMAL_COMPLETION: xs->error = XS_NOERROR; break; case USBD_TIMEOUT: xs->error = XS_TIMEOUT; break; default: xs->error = XS_DRIVER_STUFFUP; break; } } scsi_done(xs); }
void umass_scsi_cmd(struct scsi_xfer *xs) { struct scsi_link *sc_link = xs->sc_link; struct umass_softc *sc = sc_link->adapter_softc; struct scsi_generic *cmd; int cmdlen, dir; #ifdef UMASS_DEBUG microtime(&sc->tv); #endif DIF(UDMASS_UPPER, sc_link->flags |= SCSIDEBUG_LEVEL); DPRINTF(UDMASS_CMD, ("%s: umass_scsi_cmd: at %lld.%06ld: %d:%d " "xs=%p cmd=0x%02x datalen=%d (quirks=0x%x, poll=%d)\n", sc->sc_dev.dv_xname, (long long)sc->tv.tv_sec, sc->tv.tv_usec, sc_link->target, sc_link->lun, xs, xs->cmd->opcode, xs->datalen, sc_link->quirks, xs->flags & SCSI_POLL)); if (usbd_is_dying(sc->sc_udev)) { xs->error = XS_DRIVER_STUFFUP; goto done; } #if defined(UMASS_DEBUG) if (sc_link->target != UMASS_SCSIID_DEVICE) { DPRINTF(UDMASS_SCSI, ("%s: wrong SCSI ID %d\n", sc->sc_dev.dv_xname, sc_link->target)); xs->error = XS_DRIVER_STUFFUP; goto done; } #endif cmd = xs->cmd; cmdlen = xs->cmdlen; dir = DIR_NONE; if (xs->datalen) { switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { case SCSI_DATA_IN: dir = DIR_IN; break; case SCSI_DATA_OUT: dir = DIR_OUT; break; } } if (xs->datalen > UMASS_MAX_TRANSFER_SIZE) { printf("umass_cmd: large datalen, %d\n", xs->datalen); xs->error = XS_DRIVER_STUFFUP; goto done; } if (xs->flags & SCSI_POLL) { DPRINTF(UDMASS_SCSI, ("umass_scsi_cmd: sync dir=%d\n", dir)); usbd_set_polling(sc->sc_udev, 1); sc->sc_xfer_flags = USBD_SYNCHRONOUS; sc->polled_xfer_status = USBD_INVAL; sc->sc_methods->wire_xfer(sc, sc_link->lun, cmd, cmdlen, xs->data, xs->datalen, dir, xs->timeout, umass_scsi_cb, xs); sc->sc_xfer_flags = 0; DPRINTF(UDMASS_SCSI, ("umass_scsi_cmd: done err=%d\n", sc->polled_xfer_status)); usbd_set_polling(sc->sc_udev, 0); /* scsi_done() has already been called. */ return; } else { DPRINTF(UDMASS_SCSI, ("umass_scsi_cmd: async dir=%d, cmdlen=%d" " datalen=%d\n", dir, cmdlen, xs->datalen)); sc->sc_methods->wire_xfer(sc, sc_link->lun, cmd, cmdlen, xs->data, xs->datalen, dir, xs->timeout, umass_scsi_cb, xs); /* scsi_done() has already been called. */ return; } /* Return if command finishes early. */ done: scsi_done(xs); }
void vdsk_scsi_cmd(struct scsi_xfer *xs) { struct scsi_rw *rw; struct scsi_rw_big *rwb; u_int64_t lba; u_int32_t sector_count; uint8_t operation; switch (xs->cmd->opcode) { case READ_BIG: case READ_COMMAND: operation = VD_OP_BREAD; break; case WRITE_BIG: case WRITE_COMMAND: operation = VD_OP_BWRITE; break; case SYNCHRONIZE_CACHE: operation = VD_OP_FLUSH; break; case INQUIRY: vdsk_scsi_inq(xs); return; case READ_CAPACITY: vdsk_scsi_capacity(xs); return; case READ_CAPACITY_16: vdsk_scsi_capacity16(xs); return; case TEST_UNIT_READY: case START_STOP: case PREVENT_ALLOW: vdsk_scsi_done(xs, XS_NOERROR); return; default: printf("%s cmd 0x%02x\n", __func__, xs->cmd->opcode); case MODE_SENSE: case MODE_SENSE_BIG: case REPORT_LUNS: vdsk_scsi_done(xs, XS_DRIVER_STUFFUP); return; } if (xs->cmdlen == 6) { rw = (struct scsi_rw *)xs->cmd; lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); sector_count = rw->length ? rw->length : 0x100; } else { rwb = (struct scsi_rw_big *)xs->cmd; lba = _4btol(rwb->addr); sector_count = _2btol(rwb->length); } { struct vdsk_softc *sc = xs->sc_link->adapter_softc; struct ldc_map *map = sc->sc_lm; struct vio_dring_msg dm; vaddr_t va; paddr_t pa; int len, ncookies; int desc, s; int timeout; s = splbio(); if (sc->sc_vio_state != VIO_ESTABLISHED || sc->sc_tx_cnt >= sc->sc_vd->vd_nentries) { xs->error = XS_NO_CCB; scsi_done(xs); splx(s); return; } desc = sc->sc_tx_prod; ncookies = 0; len = xs->datalen; va = (vaddr_t)xs->data; while (len > 0) { pmap_extract(pmap_kernel(), va, &pa); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR | LDC_MTE_CPW; map->lm_slot[map->lm_next].entry |= LDC_MTE_IOR | LDC_MTE_IOW; map->lm_slot[map->lm_next].entry |= LDC_MTE_R | LDC_MTE_W; map->lm_count++; sc->sc_vd->vd_desc[desc].cookie[ncookies].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[desc].cookie[ncookies].size = min(len, PAGE_SIZE); sc->sc_vsd[desc].vsd_map_idx[ncookies] = map->lm_next; va += PAGE_SIZE; len -= PAGE_SIZE; ncookies++; } sc->sc_vd->vd_desc[desc].hdr.ack = 1; sc->sc_vd->vd_desc[desc].operation = operation; sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE; sc->sc_vd->vd_desc[desc].status = 0xffffffff; sc->sc_vd->vd_desc[desc].offset = lba; sc->sc_vd->vd_desc[desc].size = xs->datalen; sc->sc_vd->vd_desc[desc].ncookies = ncookies; membar(Sync); sc->sc_vd->vd_desc[desc].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[desc].vsd_xs = xs; sc->sc_tx_prod++; sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); sc->sc_tx_cnt++; bzero(&dm, sizeof(dm)); dm.tag.type = VIO_TYPE_DATA; dm.tag.stype = VIO_SUBTYPE_INFO; dm.tag.stype_env = VIO_DRING_DATA; dm.tag.sid = sc->sc_local_sid; dm.seq_no = sc->sc_seq_no++; dm.dring_ident = sc->sc_dring_ident; dm.start_idx = dm.end_idx = desc; vdsk_sendmsg(sc, &dm, sizeof(dm)); if (!ISSET(xs->flags, SCSI_POLL)) { splx(s); return; } timeout = 1000; do { if (vdsk_rx_intr(sc) && sc->sc_vd->vd_desc[desc].status == VIO_DESC_FREE) break; delay(1000); } while(--timeout > 0); splx(s); } }
void ahd_action(struct scsi_xfer *xs) { struct ahd_softc *ahd; struct scb *scb; struct hardware_scb *hscb; u_int target_id; u_int our_id; int s; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int col_idx; u_int16_t quirks; SC_DEBUG(xs->sc_link, SDEV_DB3, ("ahd_action\n")); ahd = (struct ahd_softc *)xs->sc_link->adapter_softc; target_id = xs->sc_link->target; our_id = SCSI_SCSI_ID(ahd, xs->sc_link); ahd_lock(ahd, &s); if ((ahd->flags & AHD_INITIATORROLE) == 0) { xs->error = XS_DRIVER_STUFFUP; scsi_done(xs); ahd_unlock(ahd, &s); return; } /* * get an scb to use. */ tinfo = ahd_fetch_transinfo(ahd, 'A', our_id, target_id, &tstate); quirks = xs->sc_link->quirks; if ((quirks & SDEV_NOTAGS) != 0 || (tinfo->curr.ppr_options & MSG_EXT_PPR_PROT_IUS) != 0) col_idx = AHD_NEVER_COL_IDX; else col_idx = AHD_BUILD_COL_IDX(target_id, xs->sc_link->lun); if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { ahd->flags |= AHD_RESOURCE_SHORTAGE; xs->error = XS_NO_CCB; scsi_done(xs); ahd_unlock(ahd, &s); return; } ahd_unlock(ahd, &s); hscb = scb->hscb; SC_DEBUG(xs->sc_link, SDEV_DB3, ("start scb(%p)\n", scb)); scb->xs = xs; timeout_set(&xs->stimeout, ahd_timeout, scb); /* * Put all the arguments for the xfer in the scb */ hscb->control = 0; hscb->scsiid = BUILD_SCSIID(ahd, xs->sc_link, target_id, our_id); hscb->lun = xs->sc_link->lun; if (xs->xs_control & XS_CTL_RESET) { hscb->cdb_len = 0; scb->flags |= SCB_DEVICE_RESET; hscb->control |= MK_MESSAGE; hscb->task_management = SIU_TASKMGMT_LUN_RESET; ahd_execute_scb(scb, NULL, 0); } else { hscb->task_management = 0; ahd_setup_data(ahd, xs, scb); } }
/* * We have an scb which has been processed by the * adaptor, now we look to see how the operation * went. */ void ahd_done(struct ahd_softc *ahd, struct scb *scb) { struct scsi_xfer *xs = scb->xs; int s; /* XXX in ahc there is some bus_dmamap_sync(PREREAD|PREWRITE); */ LIST_REMOVE(scb, pending_links); timeout_del(&xs->stimeout); if (xs->datalen) { int op; if ((xs->flags & SCSI_DATA_IN) != 0) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0, scb->dmamap->dm_mapsize, op); bus_dmamap_unload(ahd->parent_dmat, scb->dmamap); } /* Translate the CAM status code to a SCSI error code. */ switch (xs->error) { case CAM_SCSI_STATUS_ERROR: case CAM_REQ_INPROG: case CAM_REQ_CMP: switch (xs->status) { case SCSI_TASKSET_FULL: case SCSI_BUSY: xs->error = XS_BUSY; break; case SCSI_CHECK: case SCSI_TERMINATED: if ((scb->flags & SCB_SENSE) == 0) { /* CHECK on CHECK? */ xs->error = XS_DRIVER_STUFFUP; } else xs->error = XS_NOERROR; break; default: xs->error = XS_NOERROR; break; } break; case CAM_BUSY: case CAM_REQUEUE_REQ: xs->error = XS_BUSY; break; case CAM_CMD_TIMEOUT: xs->error = XS_TIMEOUT; break; case CAM_BDR_SENT: case CAM_SCSI_BUS_RESET: xs->error = XS_RESET; break; case CAM_SEL_TIMEOUT: xs->error = XS_SELTIMEOUT; break; default: xs->error = XS_DRIVER_STUFFUP; break; } if (xs->error != XS_NOERROR) { /* Don't clobber any existing error state */ } else if ((scb->flags & SCB_SENSE) != 0) { /* * We performed autosense retrieval. * * Zero any sense not transferred by the * device. The SCSI spec mandates that any * untransferred data should be assumed to be * zero. Complete the 'bounce' of sense information * through buffers accessible via bus-space by * copying it into the clients csio. */ memset(&xs->sense, 0, sizeof(struct scsi_sense_data)); memcpy(&xs->sense, ahd_get_sense_buf(ahd, scb), sizeof(struct scsi_sense_data)); xs->error = XS_SENSE; } else if ((scb->flags & SCB_PKT_SENSE) != 0) { struct scsi_status_iu_header *siu; u_int32_t len; siu = (struct scsi_status_iu_header *)scb->sense_data; len = SIU_SENSE_LENGTH(siu); memset(&xs->sense, 0, sizeof(xs->sense)); memcpy(&xs->sense, SIU_SENSE_DATA(siu), ulmin(len, sizeof(xs->sense))); xs->error = XS_SENSE; } ahd_lock(ahd, &s); ahd_free_scb(ahd, scb); scsi_done(xs); ahd_unlock(ahd, &s); }
void wdc_atapi_send_cmd(struct scsi_xfer *sc_xfer) { struct atapiscsi_softc *as = sc_xfer->sc_link->adapter_softc; struct channel_softc *chp = as->chp; struct ata_drive_datas *drvp = &chp->ch_drive[as->drive]; struct wdc_xfer *xfer; int s; int idx; WDCDEBUG_PRINT(("wdc_atapi_send_cmd %s:%d:%d start\n", chp->wdc->sc_dev.dv_xname, chp->channel, as->drive), DEBUG_XFERS); if (sc_xfer->sc_link->target != 0) { sc_xfer->error = XS_DRIVER_STUFFUP; scsi_done(sc_xfer); return; } xfer = sc_xfer->io; wdc_scrub_xfer(xfer); if (sc_xfer->flags & SCSI_POLL) xfer->c_flags |= C_POLL; xfer->drive = as->drive; xfer->c_flags |= C_ATAPI; xfer->cmd = sc_xfer; xfer->databuf = sc_xfer->data; xfer->c_bcount = sc_xfer->datalen; xfer->c_start = wdc_atapi_start; xfer->c_intr = wdc_atapi_intr; timeout_set(&xfer->atapi_poll_to, wdc_atapi_timer_handler, chp); WDCDEBUG_PRINT(("wdc_atapi_send_cmd %s:%d:%d ", chp->wdc->sc_dev.dv_xname, chp->channel, as->drive), DEBUG_XFERS | DEBUG_ERRORS); for (idx = 0; idx < sc_xfer->cmdlen; idx++) { WDCDEBUG_PRINT((" %02x", ((unsigned char *)sc_xfer->cmd)[idx]), DEBUG_XFERS | DEBUG_ERRORS); } WDCDEBUG_PRINT(("\n"), DEBUG_XFERS | DEBUG_ERRORS); s = splbio(); if (drvp->atapi_cap & ACAP_DSC) { WDCDEBUG_PRINT(("about to send cmd 0x%x ", sc_xfer->cmd->opcode), DEBUG_DSC); switch (sc_xfer->cmd->opcode) { case READ: case WRITE: xfer->c_flags |= C_MEDIA_ACCESS; /* If we are not in buffer availability mode, we limit the first request to 0 bytes, which gets us into buffer availability mode without holding the bus. */ if (!(drvp->drive_flags & DRIVE_DSCBA)) { xfer->c_bcount = 0; xfer->transfer_len = _3btol(((struct scsi_rw_tape *) sc_xfer->cmd)->len); _lto3b(0, ((struct scsi_rw_tape *) sc_xfer->cmd)->len); xfer->c_done = wdc_atapi_tape_done; WDCDEBUG_PRINT( ("R/W in completion mode, do 0 blocks\n"), DEBUG_DSC); } else WDCDEBUG_PRINT(("R/W %d blocks %d bytes\n", _3btol(((struct scsi_rw_tape *) sc_xfer->cmd)->len), sc_xfer->datalen), DEBUG_DSC); /* DSC will change to buffer availability mode. We reflect this in wdc_atapi_intr. */ break; case ERASE: /* Media access commands */ case LOAD: case REWIND: case SPACE: case WRITE_FILEMARKS: #if 0 case LOCATE: case READ_POSITION: #endif xfer->c_flags |= C_MEDIA_ACCESS; break; default: WDCDEBUG_PRINT(("no media access\n"), DEBUG_DSC); } } wdc_exec_xfer(chp, xfer); splx(s); }