Ejemplo n.º 1
0
static void
mpt_restart(mpt_softc_t *mpt, request_t *req0)
{
	int i, s, nreq;
	request_t *req;
	struct scsipi_xfer *xs;

	/* first, reset the IOC, leaving stopped so all requests are idle */
	if (mpt_soft_reset(mpt) != MPT_OK) {
		mpt_prt(mpt, "soft reset failed");
		/* 
		* Don't try a hard reset since this mangles the PCI 
		* configuration registers.
		*/
		return;
	}

	/* Freeze the channel so scsipi doesn't queue more commands. */
	scsipi_channel_freeze(&mpt->sc_channel, 1);

	/* Return all pending requests to scsipi and de-allocate them. */
	s = splbio();
	nreq = 0;
	for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
		req = &mpt->request_pool[i];
		xs = req->xfer;
		if (xs != NULL) {
			if (xs->datalen != 0)
				bus_dmamap_unload(mpt->sc_dmat, req->dmap);
			req->xfer = NULL;
			callout_stop(&xs->xs_callout);
			if (req != req0) {
				nreq++;
				xs->error = XS_REQUEUE;
			}
			scsipi_done(xs);
			/*
			* Don't need to mpt_free_request() since mpt_init() 
			* below will free all requests anyway.
			*/
			mpt_free_request(mpt, req);
		}
	}
	splx(s);
	if (nreq > 0)
		mpt_prt(mpt, "re-queued %d requests", nreq);

	/* Re-initialize the IOC (which restarts it). */
	if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
		mpt_prt(mpt, "restart succeeded");
	/* else error message already printed */

	/* Thaw the channel, causing scsipi to re-queue the commands. */
	scsipi_channel_thaw(&mpt->sc_channel, 1);
}
Ejemplo n.º 2
0
static void
mpt_link_peer(struct mpt_softc *mpt)
{
	struct mpt_softc *mpt2;

	if (mpt->unit == 0) {
		return;
	}
	/*
	 * XXX: depends on probe order
	 */
	mpt2 = (struct mpt_softc *)devclass_get_softc(mpt_devclass,mpt->unit-1);

	if (mpt2 == NULL) {
		return;
	}
	if (pci_get_vendor(mpt2->dev) != pci_get_vendor(mpt->dev)) {
		return;
	}
	if (pci_get_device(mpt2->dev) != pci_get_device(mpt->dev)) {
		return;
	}
	mpt->mpt2 = mpt2;
	mpt2->mpt2 = mpt;
	if (mpt->verbose >= MPT_PRT_DEBUG) {
		mpt_prt(mpt, "linking with peer (mpt%d)\n",
		    device_get_unit(mpt2->dev));
	}
}
int
mpt_intr(void *arg)
{
	mpt_softc_t *mpt = arg;
	int nrepl = 0;
	uint32_t reply;

	if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
		return (0);

	reply = mpt_pop_reply_queue(mpt);
	while (reply != MPT_REPLY_EMPTY) {
		nrepl++;
		if (mpt->verbose > 1) {
			if ((reply & MPT_CONTEXT_REPLY) != 0) {
				/* Address reply; IOC has something to say */
				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
			} else {
				/* Context reply; all went well */
				mpt_prt(mpt, "context %u reply OK", reply);
			}
		}
		mpt_done(mpt, reply);
		reply = mpt_pop_reply_queue(mpt);
	}
	return (nrepl != 0);
}
Ejemplo n.º 4
0
void
mpt_dump_request(struct mpt_softc *mpt, request_t *req)
{
    uint32_t *pReq = req->req_vbuf;
    int o;
    mpt_prt(mpt, "Send Request %d (%jx):",
            req->index, (uintmax_t) req->req_pbuf);
    for (o = 0; o < mpt->ioc_facts.RequestFrameSize; o++) {
        if ((o & 0x7) == 0) {
            mpt_prtc(mpt, "\n");
            mpt_prt(mpt, " ");
        }
        mpt_prtc(mpt, " %08x", pReq[o]);
    }
    mpt_prtc(mpt, "\n");
}
Ejemplo n.º 5
0
/*
 * This routine is called if the 9x9 does not return completion status
 * for a command after a CAM specified time.
 */
static void
mpttimeout(void *arg)
{
	request_t *req;
	union ccb *ccb = arg;
	u_int32_t oseq;
	mpt_softc_t *mpt;

	mpt = ccb->ccb_h.ccb_mpt_ptr;
	MPT_LOCK(mpt);
	req = ccb->ccb_h.ccb_req_ptr;
	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt_prt(mpt, "bullet missed in timeout");
			MPT_UNLOCK(mpt);
			return;
		}
		mpt_prt(mpt, "bullet U-turned in timeout: got us");
	}
	mpt_prt(mpt,
	    "time out on request index = 0x%02x sequence = 0x%08x",
	    req->index, req->sequence);
        mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status %08x; Mask %08x; Doorbell %08x",
		mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
		mpt_read(mpt, MPT_OFFSET_INTR_MASK),
		mpt_read(mpt, MPT_OFFSET_DOORBELL) );
	printf("request state %s\n", mpt_req_state(req->debug)); 
	if (ccb != req->ccb) {
		printf("time out: ccb %p != req->ccb %p\n",
			ccb,req->ccb);
	}
	mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
	req->debug = REQ_TIMEOUT;
	req->ccb = NULL;
	req->link.sle_next = (void *) mpt;
	(void) timeout(mpttimeout2, (caddr_t)req, hz / 10);
	ccb->ccb_h.status = CAM_CMD_TIMEOUT;
	ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
	mpt->outofbeer = 0;
	MPTLOCK_2_CAMLOCK(mpt);
	xpt_done(ccb);
	CAMLOCK_2_MPTLOCK(mpt);
	MPT_UNLOCK(mpt);
}
static void
mpt_timeout(void *arg)
{
	request_t *req = arg;
	struct scsipi_xfer *xs = req->xfer;
	struct scsipi_periph *periph = xs->xs_periph;
	mpt_softc_t *mpt = DEV_TO_MPT(
	    periph->periph_channel->chan_adapter->adapt_dev);
	uint32_t oseq;
	int s;

	scsipi_printaddr(periph);
	printf("command timeout\n");

	s = splbio();

	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt_prt(mpt, "recovered from command timeout");
			splx(s);
			return;
		}
	}
	mpt_prt(mpt,
	    "timeout on request index = 0x%x, seq = 0x%08x",
	    req->index, req->sequence);
	mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
	if (mpt->verbose > 1)
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
	req->xfer = NULL;
	mpt_free_request(mpt, req);

	xs->error = XS_TIMEOUT;
	scsipi_done(xs);

	splx(s);
}
Ejemplo n.º 7
0
void
mpt_dump_request(struct mpt_softc *mpt, request_t *req)
{
        uint32_t *pReq = req->req_vbuf;
	int o;
#if __FreeBSD_version >= 500000
	mpt_prt(mpt, "Send Request %d (%jx):",
	    req->index, (uintmax_t) req->req_pbuf);
#else
	mpt_prt(mpt, "Send Request %d (%llx):",
	    req->index, (unsigned long long) req->req_pbuf);
#endif
	for (o = 0; o < mpt->ioc_facts.RequestFrameSize; o++) {
		if ((o & 0x7) == 0) {
			mpt_prtc(mpt, "\n");
			mpt_prt(mpt, " ");
		}
		mpt_prtc(mpt, " %08x", pReq[o]);
	}
	mpt_prtc(mpt, "\n");
}
Ejemplo n.º 8
0
void
mpt_dump_data(struct mpt_softc *mpt, const char *msg, void *addr, int len)
{
	int offset;
	uint8_t *cp = addr;
	mpt_prt(mpt, "%s:", msg);
	for (offset = 0; offset < len; offset++) {
		if ((offset & 0xf) == 0) {
			mpt_prtc(mpt, "\n");
		}
		mpt_prtc(mpt, " %02x", cp[offset]);
	}
	mpt_prtc(mpt, "\n");
}
Ejemplo n.º 9
0
static int 
mpt_drain_queue(mpt_softc_t *mpt)
{
	int nrepl = 0;
	uint32_t reply;

	reply = mpt_pop_reply_queue(mpt);
	while (reply != MPT_REPLY_EMPTY) {
		nrepl++;
		if (mpt->verbose > 1) {
			if ((reply & MPT_CONTEXT_REPLY) != 0) {
				/* Address reply; IOC has something to say */
				mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
			} else {
				/* Context reply; all went well */
				mpt_prt(mpt, "context %u reply OK", reply);
			}
		}
		mpt_done(mpt, reply);
		reply = mpt_pop_reply_queue(mpt);
	}
	return (nrepl);
}
Ejemplo n.º 10
0
static void
mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
{
	struct scsipi_periph *periph = xs->xs_periph;
	request_t *req;
	MSG_SCSI_IO_REQUEST *mpt_req;
	int error, s;

	s = splbio();
	req = mpt_get_request(mpt);
	if (__predict_false(req == NULL)) {
		/* This should happen very infrequently. */
		xs->error = XS_RESOURCE_SHORTAGE;
		scsipi_done(xs);
		splx(s);
		return;
	}
	splx(s);

	/* Link the req and the scsipi_xfer. */
	req->xfer = xs;

	/* Now we build the command for the IOC */
	mpt_req = req->req_vbuf;
	memset(mpt_req, 0, sizeof(*mpt_req));

	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
	mpt_req->Bus = mpt->bus;

	mpt_req->SenseBufferLength =
	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;

	/*
	 * We use the message context to find the request structure when
	 * we get the command completion interrupt from the IOC.
	 */
	mpt_req->MsgContext = htole32(req->index);

	/* Which physical device to do the I/O on. */
	mpt_req->TargetID = periph->periph_target;
	mpt_req->LUN[1] = periph->periph_lun;

	/* Set the direction of the transfer. */
	if (xs->xs_control & XS_CTL_DATA_IN)
		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
	else if (xs->xs_control & XS_CTL_DATA_OUT)
		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
	else
		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;

	/* Set the queue behavior. */
	if (__predict_true((!mpt->is_scsi) ||
			   (mpt->mpt_tag_enable &
			    (1 << periph->periph_target)))) {
		switch (XS_CTL_TAGTYPE(xs)) {
		case XS_CTL_HEAD_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
			break;

#if 0	/* XXX */
		case XS_CTL_ACA_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
			break;
#endif

		case XS_CTL_ORDERED_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
			break;

		case XS_CTL_SIMPLE_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
			break;

		default:
			if (mpt->is_scsi)
				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
			else
				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
			break;
		}
	} else
		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;

	if (__predict_false(mpt->is_scsi &&
			    (mpt->mpt_disc_enable &
			     (1 << periph->periph_target)) == 0))
		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;

	mpt_req->Control = htole32(mpt_req->Control);

	/* Copy the SCSI command block into place. */
	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);

	mpt_req->CDBLength = xs->cmdlen;
	mpt_req->DataLength = htole32(xs->datalen);
	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);

	/*
	 * Map the DMA transfer.
	 */
	if (xs->datalen) {
		SGE_SIMPLE32 *se;

		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
		    xs->datalen, NULL,
		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
						       : BUS_DMA_WAITOK) |
		    BUS_DMA_STREAMING |
		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
						       : BUS_DMA_WRITE));
		switch (error) {
		case 0:
			break;

		case ENOMEM:
		case EAGAIN:
			xs->error = XS_RESOURCE_SHORTAGE;
			goto out_bad;

		default:
			xs->error = XS_DRIVER_STUFFUP;
			mpt_prt(mpt, "error %d loading DMA map", error);
 out_bad:
			s = splbio();
			mpt_free_request(mpt, req);
			scsipi_done(xs);
			splx(s);
			return;
		}

		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
			int seg, i, nleft = req->dmap->dm_nsegs;
			uint32_t flags;
			SGE_CHAIN32 *ce;

			seg = 0;
			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
			if (xs->xs_control & XS_CTL_DATA_OUT)
				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;

			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
			     i++, se++, seg++) {
				uint32_t tf;

				memset(se, 0, sizeof(*se));
				se->Address =
				    htole32(req->dmap->dm_segs[seg].ds_addr);
				MPI_pSGE_SET_LENGTH(se,
				    req->dmap->dm_segs[seg].ds_len);
				tf = flags;
				if (i == MPT_NSGL_FIRST(mpt) - 2)
					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
				MPI_pSGE_SET_FLAGS(se, tf);
				se->FlagsLength = htole32(se->FlagsLength);
				nleft--;
			}

			/*
			 * Tell the IOC where to find the first chain element.
			 */
			mpt_req->ChainOffset =
			    ((char *)se - (char *)mpt_req) >> 2;

			/*
			 * Until we're finished with all segments...
			 */
			while (nleft) {
				int ntodo;

				/*
				 * Construct the chain element that points to
				 * the next segment.
				 */
				ce = (SGE_CHAIN32 *) se++;
				if (nleft > MPT_NSGL(mpt)) {
					ntodo = MPT_NSGL(mpt) - 1;
					ce->NextChainOffset = (MPT_RQSL(mpt) -
					    sizeof(SGE_SIMPLE32)) >> 2;
					ce->Length = htole16(MPT_NSGL(mpt)
						* sizeof(SGE_SIMPLE32));
				} else {
Ejemplo n.º 11
0
static void
mpt_done(mpt_softc_t *mpt, uint32_t reply)
{
	struct scsipi_xfer *xs = NULL;
	struct scsipi_periph *periph;
	int index;
	request_t *req;
	MSG_REQUEST_HEADER *mpt_req;
	MSG_SCSI_IO_REPLY *mpt_reply;
	int restart = 0; /* nonzero if we need to restart the IOC*/

	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
		/* context reply (ok) */
		mpt_reply = NULL;
		index = reply & MPT_CONTEXT_MASK;
	} else {
		/* address reply (error) */

		/* XXX BUS_DMASYNC_POSTREAD XXX */
		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
		if (mpt_reply != NULL) {
			if (mpt->verbose > 1) {
				uint32_t *pReply = (uint32_t *) mpt_reply;

				mpt_prt(mpt, "Address Reply (index %u):",
				    le32toh(mpt_reply->MsgContext) & 0xffff);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
				    pReply[1], pReply[2], pReply[3]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
				    pReply[5], pReply[6], pReply[7]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
				    pReply[9], pReply[10], pReply[11]);
			}
			index = le32toh(mpt_reply->MsgContext);
		} else
			index = reply & MPT_CONTEXT_MASK;
	}

	/*
	 * Address reply with MessageContext high bit set.
	 * This is most likely a notify message, so we try
	 * to process it, then free it.
	 */
	if (__predict_false((index & 0x80000000) != 0)) {
		if (mpt_reply != NULL)
			mpt_ctlop(mpt, mpt_reply, reply);
		else
			mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
			    index);
		return;
	}

	/* Did we end up with a valid index into the table? */
	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
		mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
		    index);
		return;
	}

	req = &mpt->request_pool[index];

	/* Make sure memory hasn't been trashed. */
	if (__predict_false(req->index != index)) {
		mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
		    index);
		return;
	}

	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
	mpt_req = req->req_vbuf;

	/* Short cut for task management replies; nothing more for us to do. */
	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: TASK MGMT", __func__);
		KASSERT(req == mpt->mngt_req);
		mpt->mngt_req = NULL;
		goto done;
	}

	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
		goto done;

	/*
	 * At this point, it had better be a SCSI I/O command, but don't
	 * crash if it isn't.
	 */
	if (__predict_false(mpt_req->Function !=
			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
			    __func__, mpt_req->Function, index);
		goto done;
	}

	/* Recover scsipi_xfer from the request structure. */
	xs = req->xfer;

	/* Can't have a SCSI command without a scsipi_xfer. */
	if (__predict_false(xs == NULL)) {
		mpt_prt(mpt,
		    "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
		    req->index, req->sequence);
		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
		mpt_prt(mpt, "mpt_request:");
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

		if (mpt_reply != NULL) {
			mpt_prt(mpt, "mpt_reply:");
			mpt_print_reply(mpt_reply);
		} else {
			mpt_prt(mpt, "context reply: 0x%08x", reply);
		}
		goto done;
	}

	callout_stop(&xs->xs_callout);

	periph = xs->xs_periph;

	/*
	 * If we were a data transfer, unload the map that described
	 * the data buffer.
	 */
	if (__predict_true(xs->datalen != 0)) {
		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
		    req->dmap->dm_mapsize,
		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
						      : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
	}

	if (__predict_true(mpt_reply == NULL)) {
		/*
		 * Context reply; report that the command was
		 * successful!
		 *
		 * Also report the xfer mode, if necessary.
		 */
		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
		}
		xs->error = XS_NOERROR;
		xs->status = SCSI_OK;
		xs->resid = 0;
		mpt_free_request(mpt, req);
		scsipi_done(xs);
		return;
	}

	xs->status = mpt_reply->SCSIStatus;
	switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC overrun!", __func__);
		break;

	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
		/*
		 * Yikes!  Tagged queue full comes through this path!
		 *
		 * So we'll change it to a status error and anything
		 * that returns status should probably be a status
		 * error as well.
		 */
		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
		if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		/* FALLTHROUGH */
	case MPI_IOCSTATUS_SUCCESS:
	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
		switch (xs->status) {
		case SCSI_OK:
			/* Report the xfer mode, if necessary. */
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
			xs->resid = 0;
			break;

		case SCSI_CHECK:
			xs->error = XS_SENSE;
			break;

		case SCSI_BUSY:
		case SCSI_QUEUE_FULL:
			xs->error = XS_BUSY;
			break;

		default:
			scsipi_printaddr(periph);
			printf("invalid status code %d\n", xs->status);
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		break;

	case MPI_IOCSTATUS_BUSY:
	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
		xs->error = XS_RESOURCE_SHORTAGE;
		break;

	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
		xs->error = XS_SELTIMEOUT;
		break;

	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
		/* XXX What should we do here? */
		mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
		/* XXX This is a bus-reset */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
		/*
		 * FreeBSD and Linux indicate this is a phase error between
		 * the IOC and the drive itself. When this happens, the IOC
		 * becomes unhappy and stops processing all transactions.  
		 * Call mpt_timeout which knows how to get the IOC back
		 * on its feet.
		 */
		 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
		     "recovering...", __func__);
		xs->error = XS_TIMEOUT;
		restart = 1;

		break;

	default:
		/* XXX unrecognized HBA error */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
		    le16toh(mpt_reply->IOCStatus));
		restart = 1;
		break;
	}

	if (mpt_reply != NULL) {
		if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
			memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
			    sizeof(xs->sense.scsi_sense));
		} else if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_AUTOSENSE_FAILED) {
			/*
			 * This will cause the scsipi layer to issue
			 * a REQUEST SENSE.
			 */
			if (xs->status == SCSI_CHECK)
				xs->error = XS_BUSY;
		}
	}

 done:
	if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) & 
	MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
		mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
		mpt_ctlop(mpt, mpt_reply, reply);
	}

	/* If IOC done with this request, free it up. */
	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
		mpt_free_request(mpt, req);

	/* If address reply, give the buffer back to the IOC. */
	if (mpt_reply != NULL)
		mpt_free_reply(mpt, (reply << 1));

	if (xs != NULL)
		scsipi_done(xs);

	if (restart) {
		mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
		mpt_restart(mpt, NULL);
	}
}
Ejemplo n.º 12
0
static void
mpt_timeout(void *arg)
{
	request_t *req = arg;
	struct scsipi_xfer *xs;
	struct scsipi_periph *periph;
	mpt_softc_t *mpt;
 	uint32_t oseq;
	int s, nrepl = 0;
 
	if (req->xfer  == NULL) {
		printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
		req->index, req->sequence);
		return;
	}
	xs = req->xfer;
	periph = xs->xs_periph;
	mpt = (void *) periph->periph_channel->chan_adapter->adapt_dev;
	scsipi_printaddr(periph);
	printf("command timeout\n");

	s = splbio();

	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt->success++;
			mpt_prt(mpt, "recovered from command timeout");
			splx(s);
			return;
		}
	}

	/*
	 * Ensure the IOC is really done giving us data since it appears it can
	 * sometimes fail to give us interrupts under heavy load.
	 */
	nrepl = mpt_drain_queue(mpt);
	if (nrepl ) {
		mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
	}

	if (req->sequence != oseq) {
		mpt->success++;
		splx(s);
		return;
	}

	mpt_prt(mpt,
	    "timeout on request index = 0x%x, seq = 0x%08x",
	    req->index, req->sequence);
	mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
	if (mpt->verbose > 1)
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

	xs->error = XS_TIMEOUT;
	splx(s);
	mpt_restart(mpt, req);
}
Ejemplo n.º 13
0
static int
mpt_dma_mem_alloc(struct mpt_softc *mpt)
{
	size_t len;
	struct mpt_map_info mi;

	/* Check if we alreay have allocated the reply memory */
	if (mpt->reply_phys != 0) {
		return 0;
	}

	len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt);
#ifdef	RELENG_4
	mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK);
	if (mpt->request_pool == NULL) {
		mpt_prt(mpt, "cannot allocate request pool\n");
		return (1);
	}
	memset(mpt->request_pool, 0, len);
#else
	mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
	if (mpt->request_pool == NULL) {
		mpt_prt(mpt, "cannot allocate request pool\n");
		return (1);
	}
#endif

	/*
	 * Create a parent dma tag for this device.
	 *
	 * Align at byte boundaries,
	 * Limit to 32-bit addressing for request/reply queues.
	 */
	if (mpt_dma_tag_create(mpt, /*parent*/bus_get_dma_tag(mpt->dev),
	    /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR,
	    /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL,
	    /*maxsize*/BUS_SPACE_MAXSIZE_32BIT,
	    /*nsegments*/BUS_SPACE_UNRESTRICTED,
	    /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0,
	    &mpt->parent_dmat) != 0) {
		mpt_prt(mpt, "cannot create parent dma tag\n");
		return (1);
	}

	/* Create a child tag for reply buffers */
	if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
	    NULL, NULL, 2 * PAGE_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0,
	    &mpt->reply_dmat) != 0) {
		mpt_prt(mpt, "cannot create a dma tag for replies\n");
		return (1);
	}

	/* Allocate some DMA accessible memory for replies */
	if (bus_dmamem_alloc(mpt->reply_dmat, (void **)&mpt->reply,
	    BUS_DMA_NOWAIT, &mpt->reply_dmap) != 0) {
		mpt_prt(mpt, "cannot allocate %lu bytes of reply memory\n",
		    (u_long) (2 * PAGE_SIZE));
		return (1);
	}

	mi.mpt = mpt;
	mi.error = 0;

	/* Load and lock it into "bus space" */
	bus_dmamap_load(mpt->reply_dmat, mpt->reply_dmap, mpt->reply,
	    2 * PAGE_SIZE, mpt_map_rquest, &mi, 0);

	if (mi.error) {
		mpt_prt(mpt, "error %d loading dma map for DMA reply queue\n",
		    mi.error);
		return (1);
	}
	mpt->reply_phys = mi.phys;

	return (0);
}
Ejemplo n.º 14
0
static int
mpt_pci_attach(device_t dev)
{
	struct mpt_softc *mpt;
	int		  iqd;
	uint32_t	  data, cmd;
	int		  mpt_io_bar, mpt_mem_bar;

	mpt  = (struct mpt_softc*)device_get_softc(dev);

	switch (pci_get_device(dev)) {
	case MPI_MANUFACTPAGE_DEVICEID_FC909_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC909:
	case MPI_MANUFACTPAGE_DEVICEID_FC919:
	case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC929:
	case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC929X:
	case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC919X:
	case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC949E:
	case MPI_MANUFACTPAGE_DEVICEID_FC949X:
		mpt->is_fc = 1;
		break;
	case MPI_MANUFACTPAGE_DEVID_SAS1078:
	case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB:
		mpt->is_1078 = 1;
		/* FALLTHROUGH */
	case MPI_MANUFACTPAGE_DEVID_SAS1064:
	case MPI_MANUFACTPAGE_DEVID_SAS1064A:
	case MPI_MANUFACTPAGE_DEVID_SAS1064E:
	case MPI_MANUFACTPAGE_DEVID_SAS1066:
	case MPI_MANUFACTPAGE_DEVID_SAS1066E:
	case MPI_MANUFACTPAGE_DEVID_SAS1068:
	case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB:
	case MPI_MANUFACTPAGE_DEVID_SAS1068E:
	case MPI_MANUFACTPAGE_DEVID_SAS1068E_FB:
		mpt->is_sas = 1;
		break;
	default:
		mpt->is_spi = 1;
		break;
	}
	mpt->dev = dev;
	mpt->unit = device_get_unit(dev);
	mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT;
	mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT;
	mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT;
	mpt->verbose = MPT_PRT_NONE;
	mpt->role = MPT_ROLE_NONE;
	mpt->mpt_ini_id = MPT_INI_ID_NONE;
#ifdef __sparc64__
	if (mpt->is_spi)
		mpt->mpt_ini_id = OF_getscsinitid(dev);
#endif
	mpt_set_options(mpt);
	if (mpt->verbose == MPT_PRT_NONE) {
		mpt->verbose = MPT_PRT_WARN;
		/* Print INFO level (if any) if bootverbose is set */
		mpt->verbose += (bootverbose != 0)? 1 : 0;
	}
	/* Make sure memory access decoders are enabled */
	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
	if ((cmd & PCIM_CMD_MEMEN) == 0) {
		device_printf(dev, "Memory accesses disabled");
		return (ENXIO);
	}

	/*
	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
	 */
	cmd |=
	    PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN |
	    PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
	pci_write_config(dev, PCIR_COMMAND, cmd, 2);

	/*
	 * Make sure we've disabled the ROM.
	 */
	data = pci_read_config(dev, PCIR_BIOS, 4);
	data &= ~PCIM_BIOS_ENABLE;
	pci_write_config(dev, PCIR_BIOS, data, 4);

	/*
	 * Is this part a dual?
	 * If so, link with our partner (around yet)
	 */
	switch (pci_get_device(dev)) {
	case MPI_MANUFACTPAGE_DEVICEID_FC929:
	case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC949E:
	case MPI_MANUFACTPAGE_DEVICEID_FC949X:
	case MPI_MANUFACTPAGE_DEVID_53C1030:
	case MPI_MANUFACTPAGE_DEVID_53C1030ZC:
		mpt_link_peer(mpt);
		break;
	default:
		break;
	}

	/*
	 * Figure out which are the I/O and MEM Bars
	 */
	data = pci_read_config(dev, PCIR_BAR(0), 4);
	if (PCI_BAR_IO(data)) {
		/* BAR0 is IO, BAR1 is memory */
		mpt_io_bar = 0;
		mpt_mem_bar = 1;
	} else {
		/* BAR0 is memory, BAR1 is IO */
		mpt_mem_bar = 0;
		mpt_io_bar = 1;
	}

	/*
	 * Set up register access.  PIO mode is required for
	 * certain reset operations (but must be disabled for
	 * some cards otherwise).
	 */
	mpt_io_bar = PCIR_BAR(mpt_io_bar);
	mpt->pci_pio_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
	    &mpt_io_bar, RF_ACTIVE);
	if (mpt->pci_pio_reg == NULL) {
		if (bootverbose) {
			device_printf(dev,
			    "unable to map registers in PIO mode\n");
		}
	} else {
		mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg);
		mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg);
	}

	mpt_mem_bar = PCIR_BAR(mpt_mem_bar);
	mpt->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &mpt_mem_bar, RF_ACTIVE);
	if (mpt->pci_reg == NULL) {
		if (bootverbose || mpt->is_sas || mpt->pci_pio_reg == NULL) {
			device_printf(dev,
			    "Unable to memory map registers.\n");
		}
		if (mpt->is_sas || mpt->pci_pio_reg == NULL) {
			device_printf(dev, "Giving Up.\n");
			goto bad;
		}
		if (bootverbose) {
			device_printf(dev, "Falling back to PIO mode.\n");
		}
		mpt->pci_st = mpt->pci_pio_st;
		mpt->pci_sh = mpt->pci_pio_sh;
	} else {
		mpt->pci_st = rman_get_bustag(mpt->pci_reg);
		mpt->pci_sh = rman_get_bushandle(mpt->pci_reg);
	}

	/* Get a handle to the interrupt */
	iqd = 0;
	if (mpt->msi_enable) {
		/*
		 * First try to alloc an MSI-X message.  If that
		 * fails, then try to alloc an MSI message instead.
		 */
		if (pci_msix_count(dev) == 1) {
			mpt->pci_msi_count = 1;
			if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) {
				iqd = 1;
			} else {
				mpt->pci_msi_count = 0;
			}
		}
		if (iqd == 0 && pci_msi_count(dev) == 1) {
			mpt->pci_msi_count = 1;
			if (pci_alloc_msi(dev, &mpt->pci_msi_count) == 0) {
				iqd = 1;
			} else {
				mpt->pci_msi_count = 0;
			}
		}
	}
	mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
	    RF_ACTIVE | (mpt->pci_msi_count ? 0 : RF_SHAREABLE));
	if (mpt->pci_irq == NULL) {
		device_printf(dev, "could not allocate interrupt\n");
		goto bad;
	}

	MPT_LOCK_SETUP(mpt);

	/* Disable interrupts at the part */
	mpt_disable_ints(mpt);

	/* Register the interrupt handler */
	if (mpt_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, NULL, mpt_pci_intr,
	    mpt, &mpt->ih)) {
		device_printf(dev, "could not setup interrupt\n");
		goto bad;
	}

	/* Allocate dma memory */
	if (mpt_dma_mem_alloc(mpt)) {
		mpt_prt(mpt, "Could not allocate DMA memory\n");
		goto bad;
	}

#if 0
	/*
	 * Save the PCI config register values
 	 *
	 * Hard resets are known to screw up the BAR for diagnostic
	 * memory accesses (Mem1).
	 *
	 * Using Mem1 is known to make the chip stop responding to 
	 * configuration space transfers, so we need to save it now
	 */

	mpt_read_config_regs(mpt);
#endif

	/*
	 * Disable PIO until we need it
	 */
	if (mpt->is_sas) {
		pci_disable_io(dev, SYS_RES_IOPORT);
	}

	/* Initialize the hardware */
	if (mpt->disabled == 0) {
		if (mpt_attach(mpt) != 0) {
			goto bad;
		}
	} else {
		mpt_prt(mpt, "device disabled at user request\n");
		goto bad;
	}

	mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown,
	    dev, SHUTDOWN_PRI_DEFAULT);

	if (mpt->eh == NULL) {
		mpt_prt(mpt, "shutdown event registration failed\n");
		(void) mpt_detach(mpt);
		goto bad;
	}
	return (0);

bad:
	mpt_dma_mem_free(mpt);
	mpt_free_bus_resources(mpt);
	mpt_unlink_peer(mpt);

	MPT_LOCK_DESTROY(mpt);

	/*
	 * but return zero to preserve unit numbering
	 */
	return (0);
}
Ejemplo n.º 15
0
/*
 * Callback routine from "bus_dmamap_load" or in simple case called directly.
 *
 * Takes a list of physical segments and builds the SGL for SCSI IO command
 * and forwards the commard to the IOC after one last check that CAM has not
 * aborted the transaction.
 */
static void
mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
	request_t *req;
	union ccb *ccb;
	mpt_softc_t *mpt;
	MSG_SCSI_IO_REQUEST *mpt_req;
	SGE_SIMPLE32 *se;

	req = (request_t *)arg;
	ccb = req->ccb;

	mpt = ccb->ccb_h.ccb_mpt_ptr;
	req = ccb->ccb_h.ccb_req_ptr;
	mpt_req = req->req_vbuf;

	if (error == 0 && nseg > MPT_SGL_MAX) {
		error = EFBIG;
	}

	if (error != 0) {
		if (error != EFBIG)
			mpt_prt(mpt, "bus_dmamap_load returned %d", error);
		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
			xpt_freeze_devq(ccb->ccb_h.path, 1);
			ccb->ccb_h.status = CAM_DEV_QFRZN;
			if (error == EFBIG)
				ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
			else
				ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
		}
		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
		xpt_done(ccb);
		CAMLOCK_2_MPTLOCK(mpt);
		mpt_free_request(mpt, req);
		MPTLOCK_2_CAMLOCK(mpt);
		return;
	}
	
	if (nseg > MPT_NSGL_FIRST(mpt)) {
		int i, nleft = nseg;
		u_int32_t flags;
		bus_dmasync_op_t op;
		SGE_CHAIN32 *ce;

		mpt_req->DataLength = ccb->csio.dxfer_len;
		flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;

		se = (SGE_SIMPLE32 *) &mpt_req->SGL;
		for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
			u_int32_t tf;

			bzero(se, sizeof (*se));
			se->Address = dm_segs->ds_addr;
			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
			tf = flags;
			if (i == MPT_NSGL_FIRST(mpt) - 2) {
				tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
			}
			MPI_pSGE_SET_FLAGS(se, tf);
			nleft -= 1;
		}

		/*
		 * Tell the IOC where to find the first chain element
		 */
		mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;

		/*
		 * Until we're finished with all segments...
		 */
		while (nleft) {
			int ntodo;
			/*
			 * Construct the chain element that point to the
			 * next segment.
			 */
			ce = (SGE_CHAIN32 *) se++;
			if (nleft > MPT_NSGL(mpt)) {
				ntodo = MPT_NSGL(mpt) - 1;
				ce->NextChainOffset = (MPT_RQSL(mpt) -
				    sizeof (SGE_SIMPLE32)) >> 2;
			} else {