void mpt_cam_attach(mpt_softc_t *mpt) { struct cam_devq *devq; struct cam_sim *sim; int maxq; mpt->bus = 0; maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))? mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { return; } /* * Construct our SIM entry. */ sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, mpt->unit, 1, maxq, devq); if (sim == NULL) { cam_simq_free(devq); return; } /* * Register exactly the bus. */ if (xpt_bus_register(sim, 0) != CAM_SUCCESS) { cam_sim_free(sim, TRUE); return; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(sim)); cam_sim_free(sim, TRUE); return; } mpt->sim = sim; }
void mpt_scsipi_attach(mpt_softc_t *mpt) { struct scsipi_adapter *adapt = &mpt->sc_adapter; struct scsipi_channel *chan = &mpt->sc_channel; int maxq; mpt->bus = 0; /* XXX ?? */ maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); /* Fill in the scsipi_adapter. */ memset(adapt, 0, sizeof(*adapt)); adapt->adapt_dev = mpt->sc_dev; adapt->adapt_nchannels = 1; adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/ adapt->adapt_max_periph = maxq - 2; adapt->adapt_request = mpt_scsipi_request; adapt->adapt_minphys = mpt_minphys; adapt->adapt_ioctl = mpt_ioctl; /* Fill in the scsipi_channel. */ memset(chan, 0, sizeof(*chan)); chan->chan_adapter = adapt; if (mpt->is_sas) { chan->chan_bustype = &scsi_sas_bustype; } else if (mpt->is_fc) { chan->chan_bustype = &scsi_fc_bustype; } else { chan->chan_bustype = &scsi_bustype; } chan->chan_channel = 0; chan->chan_flags = 0; chan->chan_nluns = 8; chan->chan_ntargets = mpt->mpt_max_devices; chan->chan_id = mpt->mpt_ini_id; /* * Save the output of the config so we can rescan the bus in case of * errors */ mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel, scsiprint); }
static void mpt_restart(mpt_softc_t *mpt, request_t *req0) { int i, s, nreq; request_t *req; struct scsipi_xfer *xs; /* first, reset the IOC, leaving stopped so all requests are idle */ if (mpt_soft_reset(mpt) != MPT_OK) { mpt_prt(mpt, "soft reset failed"); /* * Don't try a hard reset since this mangles the PCI * configuration registers. */ return; } /* Freeze the channel so scsipi doesn't queue more commands. */ scsipi_channel_freeze(&mpt->sc_channel, 1); /* Return all pending requests to scsipi and de-allocate them. */ s = splbio(); nreq = 0; for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { req = &mpt->request_pool[i]; xs = req->xfer; if (xs != NULL) { if (xs->datalen != 0) bus_dmamap_unload(mpt->sc_dmat, req->dmap); req->xfer = NULL; callout_stop(&xs->xs_callout); if (req != req0) { nreq++; xs->error = XS_REQUEUE; } scsipi_done(xs); /* * Don't need to mpt_free_request() since mpt_init() * below will free all requests anyway. */ mpt_free_request(mpt, req); } } splx(s); if (nreq > 0) mpt_prt(mpt, "re-queued %d requests", nreq); /* Re-initialize the IOC (which restarts it). */ if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0) mpt_prt(mpt, "restart succeeded"); /* else error message already printed */ /* Thaw the channel, causing scsipi to re-queue the commands. */ scsipi_channel_thaw(&mpt->sc_channel, 1); }
static void mpt_done(mpt_softc_t *mpt, uint32_t reply) { struct scsipi_xfer *xs = NULL; struct scsipi_periph *periph; int index; request_t *req; MSG_REQUEST_HEADER *mpt_req; MSG_SCSI_IO_REPLY *mpt_reply; int restart = 0; /* nonzero if we need to restart the IOC*/ if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { /* context reply (ok) */ mpt_reply = NULL; index = reply & MPT_CONTEXT_MASK; } else { /* address reply (error) */ /* XXX BUS_DMASYNC_POSTREAD XXX */ mpt_reply = MPT_REPLY_PTOV(mpt, reply); if (mpt_reply != NULL) { if (mpt->verbose > 1) { uint32_t *pReply = (uint32_t *) mpt_reply; mpt_prt(mpt, "Address Reply (index %u):", le32toh(mpt_reply->MsgContext) & 0xffff); mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0], pReply[1], pReply[2], pReply[3]); mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4], pReply[5], pReply[6], pReply[7]); mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8], pReply[9], pReply[10], pReply[11]); } index = le32toh(mpt_reply->MsgContext); } else index = reply & MPT_CONTEXT_MASK; } /* * Address reply with MessageContext high bit set. * This is most likely a notify message, so we try * to process it, then free it. */ if (__predict_false((index & 0x80000000) != 0)) { if (mpt_reply != NULL) mpt_ctlop(mpt, mpt_reply, reply); else mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__, index); return; } /* Did we end up with a valid index into the table? */ if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__, index); return; } req = &mpt->request_pool[index]; /* Make sure memory hasn't been trashed. */ if (__predict_false(req->index != index)) { mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__, index); return; } MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); mpt_req = req->req_vbuf; /* Short cut for task management replies; nothing more for us to do. */ if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { if (mpt->verbose > 1) mpt_prt(mpt, "%s: TASK MGMT", __func__); KASSERT(req == mpt->mngt_req); mpt->mngt_req = NULL; goto done; } if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) goto done; /* * At this point, it had better be a SCSI I/O command, but don't * crash if it isn't. */ if (__predict_false(mpt_req->Function != MPI_FUNCTION_SCSI_IO_REQUEST)) { if (mpt->verbose > 1) mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)", __func__, mpt_req->Function, index); goto done; } /* Recover scsipi_xfer from the request structure. */ xs = req->xfer; /* Can't have a SCSI command without a scsipi_xfer. */ if (__predict_false(xs == NULL)) { mpt_prt(mpt, "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__, req->index, req->sequence); mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); mpt_prt(mpt, "mpt_request:"); mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); if (mpt_reply != NULL) { mpt_prt(mpt, "mpt_reply:"); mpt_print_reply(mpt_reply); } else { mpt_prt(mpt, "context reply: 0x%08x", reply); } goto done; } callout_stop(&xs->xs_callout); periph = xs->xs_periph; /* * If we were a data transfer, unload the map that described * the data buffer. */ if (__predict_true(xs->datalen != 0)) { bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, req->dmap->dm_mapsize, (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(mpt->sc_dmat, req->dmap); } if (__predict_true(mpt_reply == NULL)) { /* * Context reply; report that the command was * successful! * * Also report the xfer mode, if necessary. */ if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { if ((mpt->mpt_report_xfer_mode & (1 << periph->periph_target)) != 0) mpt_get_xfer_mode(mpt, periph); } xs->error = XS_NOERROR; xs->status = SCSI_OK; xs->resid = 0; mpt_free_request(mpt, req); scsipi_done(xs); return; } xs->status = mpt_reply->SCSIStatus; switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) { case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC overrun!", __func__); break; case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* * Yikes! Tagged queue full comes through this path! * * So we'll change it to a status error and anything * that returns status should probably be a status * error as well. */ xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount); if (mpt_reply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) { xs->error = XS_DRIVER_STUFFUP; break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: switch (xs->status) { case SCSI_OK: /* Report the xfer mode, if necessary. */ if ((mpt->mpt_report_xfer_mode & (1 << periph->periph_target)) != 0) mpt_get_xfer_mode(mpt, periph); xs->resid = 0; break; case SCSI_CHECK: xs->error = XS_SENSE; break; case SCSI_BUSY: case SCSI_QUEUE_FULL: xs->error = XS_BUSY; break; default: scsipi_printaddr(periph); printf("invalid status code %d\n", xs->status); xs->error = XS_DRIVER_STUFFUP; break; } break; case MPI_IOCSTATUS_BUSY: case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: xs->error = XS_RESOURCE_SHORTAGE; break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: xs->error = XS_SELTIMEOUT; break; case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__); restart = 1; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* XXX What should we do here? */ mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__); restart = 1; break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* XXX */ xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__); restart = 1; break; case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* XXX */ xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC task terminated!", __func__); restart = 1; break; case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* XXX This is a bus-reset */ xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__); restart = 1; break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* * FreeBSD and Linux indicate this is a phase error between * the IOC and the drive itself. When this happens, the IOC * becomes unhappy and stops processing all transactions. * Call mpt_timeout which knows how to get the IOC back * on its feet. */ mpt_prt(mpt, "%s: IOC indicates protocol error -- " "recovering...", __func__); xs->error = XS_TIMEOUT; restart = 1; break; default: /* XXX unrecognized HBA error */ xs->error = XS_DRIVER_STUFFUP; mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__, le16toh(mpt_reply->IOCStatus)); restart = 1; break; } if (mpt_reply != NULL) { if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { memcpy(&xs->sense.scsi_sense, req->sense_vbuf, sizeof(xs->sense.scsi_sense)); } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { /* * This will cause the scsipi layer to issue * a REQUEST SENSE. */ if (xs->status == SCSI_CHECK) xs->error = XS_BUSY; } } done: if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__); mpt_ctlop(mpt, mpt_reply, reply); } /* If IOC done with this request, free it up. */ if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) mpt_free_request(mpt, req); /* If address reply, give the buffer back to the IOC. */ if (mpt_reply != NULL) mpt_free_reply(mpt, (reply << 1)); if (xs != NULL) scsipi_done(xs); if (restart) { mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__); mpt_restart(mpt, NULL); } }
int mpt_dma_mem_alloc(mpt_softc_t *mpt) { bus_dma_segment_t reply_seg, request_seg; int reply_rseg, request_rseg; bus_addr_t pptr, end; char *vptr; size_t len; int error, i; /* Check if we have already allocated the reply memory. */ if (mpt->reply != NULL) return (0); /* * Allocate the request pool. This isn't really DMA'd memory, * but it's a convenient place to do it. */ len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); if (mpt->request_pool == NULL) { aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n"); return (ENOMEM); } /* * Allocate DMA resources for reply buffers. */ error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &reply_seg, 1, &reply_rseg, 0); if (error) { aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n", error); goto fail_0; } error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); if (error) { aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n", error); goto fail_1; } error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 0, &mpt->reply_dmap); if (error) { aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n", error); goto fail_2; } error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, PAGE_SIZE, NULL, 0); if (error) { aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n", error); goto fail_3; } mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; /* * Allocate DMA resources for request buffers. */ error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); if (error) { aprint_error_dev(mpt->sc_dev, "unable to allocate request area, " "error = %d\n", error); goto fail_4; } error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0); if (error) { aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n", error); goto fail_5; } error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); if (error) { aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, " "error = %d\n", error); goto fail_6; } error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, MPT_REQ_MEM_SIZE(mpt), NULL, 0); if (error) { aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n", error); goto fail_7; } mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; pptr = mpt->request_phys; vptr = (void *) mpt->request; end = pptr + MPT_REQ_MEM_SIZE(mpt); for (i = 0; pptr < end; i++) { request_t *req = &mpt->request_pool[i]; req->index = i; /* Store location of Request Data */ req->req_pbuf = pptr; req->req_vbuf = vptr; pptr += MPT_REQUEST_AREA; vptr += MPT_REQUEST_AREA; req->sense_pbuf = (pptr - MPT_SENSE_SIZE); req->sense_vbuf = (vptr - MPT_SENSE_SIZE); error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); if (error) { aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, " "error = %d\n", i, error); goto fail_8; } } return (0); fail_8: for (--i; i >= 0; i--) { request_t *req = &mpt->request_pool[i]; if (req->dmap != NULL) bus_dmamap_destroy(mpt->sc_dmat, req->dmap); } bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); fail_7: bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); fail_6: bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE); fail_5: bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); fail_4: bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); fail_3: bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); fail_2: bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE); fail_1: bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); fail_0: free(mpt->request_pool, M_DEVBUF); mpt->reply = NULL; mpt->request = NULL; mpt->request_pool = NULL; return (error); }
static int mpt_dma_mem_alloc(struct mpt_softc *mpt) { size_t len; struct mpt_map_info mi; /* Check if we alreay have allocated the reply memory */ if (mpt->reply_phys != 0) { return 0; } len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt); #ifdef RELENG_4 mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } memset(mpt->request_pool, 0, len); #else mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } #endif /* * Create a parent dma tag for this device. * * Align at byte boundaries, * Limit to 32-bit addressing for request/reply queues. */ if (mpt_dma_tag_create(mpt, /*parent*/bus_get_dma_tag(mpt->dev), /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &mpt->parent_dmat) != 0) { mpt_prt(mpt, "cannot create parent dma tag\n"); return (1); } /* Create a child tag for reply buffers */ if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->reply_dmat) != 0) { mpt_prt(mpt, "cannot create a dma tag for replies\n"); return (1); } /* Allocate some DMA accessible memory for replies */ if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply, BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) { mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n", (u_long) (2 * PAGE_SIZE)); return (1); } mi.mpt = mpt; mi.error = 0; /* Load and lock it into "bus space" */ bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply, 2 * PAGE_SIZE, mpt_map_rquest, &mi, 0); if (mi.error) { mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n", mi.error); return (1); } mpt->reply_phys = mi.phys; return (0); }