static void mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) { struct scsipi_periph *periph = xs->xs_periph; request_t *req; MSG_SCSI_IO_REQUEST *mpt_req; int error, s; s = splbio(); req = mpt_get_request(mpt); if (__predict_false(req == NULL)) { /* This should happen very infrequently. */ xs->error = XS_RESOURCE_SHORTAGE; scsipi_done(xs); splx(s); return; } splx(s); /* Link the req and the scsipi_xfer. */ req->xfer = xs; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof(*mpt_req)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; mpt_req->Bus = mpt->bus; mpt_req->SenseBufferLength = (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when * we get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index); /* Which physical device to do the I/O on. */ mpt_req->TargetID = periph->periph_target; mpt_req->LUN[1] = periph->periph_lun; /* Set the direction of the transfer. */ if (xs->xs_control & XS_CTL_DATA_IN) mpt_req->Control = MPI_SCSIIO_CONTROL_READ; else if (xs->xs_control & XS_CTL_DATA_OUT) mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; else mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; /* Set the queue behavior. */ if (__predict_true((!mpt->is_scsi) || (mpt->mpt_tag_enable & (1 << periph->periph_target)))) { switch (XS_CTL_TAGTYPE(xs)) { case XS_CTL_HEAD_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; #if 0 /* XXX */ case XS_CTL_ACA_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; #endif case XS_CTL_ORDERED_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case XS_CTL_SIMPLE_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; default: if (mpt->is_scsi) mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; else mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; if (__predict_false(mpt->is_scsi && (mpt->mpt_disc_enable & (1 << periph->periph_target)) == 0)) mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; mpt_req->Control = htole32(mpt_req->Control); /* Copy the SCSI command block into place. */ memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); mpt_req->CDBLength = xs->cmdlen; mpt_req->DataLength = htole32(xs->datalen); mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); /* * Map the DMA transfer. */ if (xs->datalen) { SGE_SIMPLE32 *se; error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, xs->datalen, NULL, ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); switch (error) { case 0: break; case ENOMEM: case EAGAIN: xs->error = XS_RESOURCE_SHORTAGE; goto out_bad; default: xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "error %d loading DMA map", error); out_bad: s = splbio(); mpt_free_request(mpt, req); scsipi_done(xs); splx(s); return; } if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { int seg, i, nleft = req->dmap->dm_nsegs; uint32_t flags; SGE_CHAIN32 *ce; seg = 0; flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (xs->xs_control & XS_CTL_DATA_OUT) flags |= MPI_SGE_FLAGS_HOST_TO_IOC; se = (SGE_SIMPLE32 *) &mpt_req->SGL; for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, seg++) { uint32_t tf; memset(se, 0, sizeof(*se)); se->Address = htole32(req->dmap->dm_segs[seg].ds_addr); MPI_pSGE_SET_LENGTH(se, req->dmap->dm_segs[seg].ds_len); tf = flags; if (i == MPT_NSGL_FIRST(mpt) - 2) tf |= MPI_SGE_FLAGS_LAST_ELEMENT; MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); nleft--; } /* * Tell the IOC where to find the first chain element. */ mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2; /* * Until we're finished with all segments... */ while (nleft) { int ntodo; /* * Construct the chain element that points to * the next segment. */ ce = (SGE_CHAIN32 *) se++; if (nleft > MPT_NSGL(mpt)) { ntodo = MPT_NSGL(mpt) - 1; ce->NextChainOffset = (MPT_RQSL(mpt) - sizeof(SGE_SIMPLE32)) >> 2; ce->Length = htole16(MPT_NSGL(mpt) * sizeof(SGE_SIMPLE32)); } else {
/* * Callback routine from "bus_dmamap_load" or in simple case called directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req; union ccb *ccb; mpt_softc_t *mpt; MSG_SCSI_IO_REQUEST *mpt_req; SGE_SIMPLE32 *se; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; mpt_req = req->req_vbuf; if (error == 0 && nseg > MPT_SGL_MAX) { error = EFBIG; } if (error != 0) { if (error != EFBIG) mpt_prt(mpt, "bus_dmamap_load returned %d", error); if (ccb->ccb_h.status == CAM_REQ_INPROG) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status = CAM_DEV_QFRZN; if (error == EFBIG) ccb->ccb_h.status |= CAM_REQ_TOO_BIG; else ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); CAMLOCK_2_MPTLOCK(mpt); mpt_free_request(mpt, req); MPTLOCK_2_CAMLOCK(mpt); return; } if (nseg > MPT_NSGL_FIRST(mpt)) { int i, nleft = nseg; u_int32_t flags; bus_dmasync_op_t op; SGE_CHAIN32 *ce; mpt_req->DataLength = ccb->csio.dxfer_len; flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) flags |= MPI_SGE_FLAGS_HOST_TO_IOC; se = (SGE_SIMPLE32 *) &mpt_req->SGL; for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) { u_int32_t tf; bzero(se, sizeof (*se)); se->Address = dm_segs->ds_addr; MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (i == MPT_NSGL_FIRST(mpt) - 2) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } MPI_pSGE_SET_FLAGS(se, tf); nleft -= 1; } /* * Tell the IOC where to find the first chain element */ mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2; /* * Until we're finished with all segments... */ while (nleft) { int ntodo; /* * Construct the chain element that point to the * next segment. */ ce = (SGE_CHAIN32 *) se++; if (nleft > MPT_NSGL(mpt)) { ntodo = MPT_NSGL(mpt) - 1; ce->NextChainOffset = (MPT_RQSL(mpt) - sizeof (SGE_SIMPLE32)) >> 2; } else {