int
scsipi_print_sense_real(struct scsipi_xfer *xs, int verbosity)
{
	scsipi_printaddr(xs->xs_periph);
 	printf(" Check Condition on CDB: ");
	scsipi_print_cdb(xs->cmd);
 	printf("\n");
	scsipi_print_sense_data(&xs->sense.scsi_sense, verbosity);
	return 1;
}
Exemple #2
0
/*
 * used by specific sci controller
 *
 * it appears that the higher level code does nothing with LUN's
 * so I will too.  I could plug it in, however so could they
 * in scsi_scsipi_cmd().
 */
void
sci_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
                   void *arg)
{
	struct scsipi_xfer *xs;
	struct scsipi_periph *periph;
	struct sci_softc *dev = device_private(chan->chan_adapter->adapt_dev);
	int flags, s;

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		flags = xs->xs_control;

		if (flags & XS_CTL_DATA_UIO)
		panic("sci: scsi data uio requested");

		s = splbio();

		if (dev->sc_xs && flags & XS_CTL_POLL)
			panic("sci_scsicmd: busy");

#ifdef DIAGNOSTIC
		/*
		 * This should never happen as we track the resources
		 * in the mid-layer.
		 */
		if (dev->sc_xs) {
			scsipi_printaddr(periph);
			printf("unable to allocate scb\n");
			panic("sea_scsipi_request");
		}
#endif

		dev->sc_xs = xs;
		splx(s);

		/*
		 * nothing is pending do it now.
		 */
		sci_donextcmd(dev);

		return;

	case ADAPTER_REQ_GROW_RESOURCES:
		return;

	case ADAPTER_REQ_SET_XFER_MODE:
		return;
	}
}
static void
mpt_timeout(void *arg)
{
	request_t *req = arg;
	struct scsipi_xfer *xs = req->xfer;
	struct scsipi_periph *periph = xs->xs_periph;
	mpt_softc_t *mpt = DEV_TO_MPT(
	    periph->periph_channel->chan_adapter->adapt_dev);
	uint32_t oseq;
	int s;

	scsipi_printaddr(periph);
	printf("command timeout\n");

	s = splbio();

	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt_prt(mpt, "recovered from command timeout");
			splx(s);
			return;
		}
	}
	mpt_prt(mpt,
	    "timeout on request index = 0x%x, seq = 0x%08x",
	    req->index, req->sequence);
	mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
	if (mpt->verbose > 1)
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
	req->xfer = NULL;
	mpt_free_request(mpt, req);

	xs->error = XS_TIMEOUT;
	scsipi_done(xs);

	splx(s);
}
static void
ncr5380_show_scsi_cmd(struct scsipi_xfer *xs)
{
	u_char	*b = (u_char *) xs->cmd;
	int	i  = 0;

	scsipi_printaddr(xs->xs_periph);
	if (!(xs->xs_control & XS_CTL_RESET)) {
		while (i < xs->cmdlen) {
			if (i)
				printf(",");
			printf("%x",b[i++]);
		}
		printf("-\n");
	}
	else {
		printf("-RESET-\n");
	}
}
/* Pseudo strategy function
 * Called by scsipi_do_ioctl() via physio/physstrat if there is to
 * be data transfered, and directly if there is no data transfer.
 *
 * Should I reorganize this so it returns to physio instead
 * of sleeping in scsiio_scsipi_cmd?  Is there any advantage, other
 * than avoiding the probable duplicate wakeup in iodone? [PD]
 *
 * No, seems ok to me... [JRE]
 * (I don't see any duplicate wakeups)
 *
 * Can't be used with block devices or raw_read/raw_write directly
 * from the cdevsw/bdevsw tables because they couldn't have added
 * the screq structure. [JRE]
 */
static void
scsistrategy(struct buf *bp)
{
	struct scsi_ioctl *si;
	scsireq_t *screq;
	struct scsipi_periph *periph;
	int error;
	int flags = 0;

	si = si_find(bp);
	if (si == NULL) {
		printf("scsistrategy: "
		    "No matching ioctl request found in queue\n");
		error = EINVAL;
		goto done;
	}
	screq = &si->si_screq;
	periph = si->si_periph;
	SC_DEBUG(periph, SCSIPI_DB2, ("user_strategy\n"));

	/*
	 * We're in trouble if physio tried to break up the transfer.
	 */
	if (bp->b_bcount != screq->datalen) {
		scsipi_printaddr(periph);
		printf("physio split the request.. cannot proceed\n");
		error = EIO;
		goto done;
	}

	if (screq->timeout == 0) {
		error = EINVAL;
		goto done;
	}

	if (screq->cmdlen > sizeof(struct scsipi_generic)) {
		scsipi_printaddr(periph);
		printf("cmdlen too big\n");
		error = EFAULT;
		goto done;
	}

	if ((screq->flags & SCCMD_READ) && screq->datalen > 0)
		flags |= XS_CTL_DATA_IN;
	if ((screq->flags & SCCMD_WRITE) && screq->datalen > 0)
		flags |= XS_CTL_DATA_OUT;
	if (screq->flags & SCCMD_TARGET)
		flags |= XS_CTL_TARGET;
	if (screq->flags & SCCMD_ESCAPE)
		flags |= XS_CTL_ESCAPE;

	error = scsipi_command(periph, (void *)screq->cmd, screq->cmdlen,
	    (void *)bp->b_data, screq->datalen,
	    0, /* user must do the retries *//* ignored */
	    screq->timeout, bp, flags | XS_CTL_USERCMD);

done:
	if (error)
		bp->b_resid = bp->b_bcount;
	bp->b_error = error;
	biodone(bp);
	return;
}
/*
 * We let the user interpret his own sense in the generic scsi world.
 * This routine is called at interrupt time if the XS_CTL_USERCMD bit was set
 * in the flags passed to scsi_scsipi_cmd(). No other completion processing
 * takes place, even if we are running over another device driver.
 * The lower level routines that call us here, will free the xs and restart
 * the device's queue if such exists.
 */
void
scsipi_user_done(struct scsipi_xfer *xs)
{
	struct buf *bp;
	struct scsi_ioctl *si;
	scsireq_t *screq;
	struct scsipi_periph *periph = xs->xs_periph;
	int s;

	bp = xs->bp;
#ifdef DIAGNOSTIC
	if (bp == NULL) {
		scsipi_printaddr(periph);
		printf("user command with no buf\n");
		panic("scsipi_user_done");
	}
#endif
	si = si_find(bp);
#ifdef DIAGNOSTIC
	if (si == NULL) {
		scsipi_printaddr(periph);
		printf("user command with no ioctl\n");
		panic("scsipi_user_done");
	}
#endif

	screq = &si->si_screq;

	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("user-done\n"));

	screq->retsts = 0;
	screq->status = xs->status;
	switch (xs->error) {
	case XS_NOERROR:
		SC_DEBUG(periph, SCSIPI_DB3, ("no error\n"));
		screq->datalen_used =
		    xs->datalen - xs->resid;	/* probably rubbish */
		screq->retsts = SCCMD_OK;
		break;
	case XS_SENSE:
		SC_DEBUG(periph, SCSIPI_DB3, ("have sense\n"));
		screq->senselen_used = min(sizeof(xs->sense.scsi_sense),
		    SENSEBUFLEN);
		memcpy(screq->sense, &xs->sense.scsi_sense, screq->senselen);
		screq->retsts = SCCMD_SENSE;
		break;
	case XS_SHORTSENSE:
		SC_DEBUG(periph, SCSIPI_DB3, ("have short sense\n"));
		screq->senselen_used = min(sizeof(xs->sense.atapi_sense),
		    SENSEBUFLEN);
		memcpy(screq->sense, &xs->sense.scsi_sense, screq->senselen);
		screq->retsts = SCCMD_UNKNOWN; /* XXX need a shortsense here */
		break;
	case XS_DRIVER_STUFFUP:
		scsipi_printaddr(periph);
		printf("passthrough: adapter inconsistency\n");
		screq->retsts = SCCMD_UNKNOWN;
		break;
	case XS_SELTIMEOUT:
		SC_DEBUG(periph, SCSIPI_DB3, ("seltimeout\n"));
		screq->retsts = SCCMD_TIMEOUT;
		break;
	case XS_TIMEOUT:
		SC_DEBUG(periph, SCSIPI_DB3, ("timeout\n"));
		screq->retsts = SCCMD_TIMEOUT;
		break;
	case XS_BUSY:
		SC_DEBUG(periph, SCSIPI_DB3, ("busy\n"));
		screq->retsts = SCCMD_BUSY;
		break;
	default:
		scsipi_printaddr(periph);
		printf("unknown error category %d from adapter\n",
		    xs->error);
		screq->retsts = SCCMD_UNKNOWN;
		break;
	}

	if (xs->xs_control & XS_CTL_ASYNC) {
		s = splbio();
		scsipi_put_xs(xs);
		splx(s);
	}
}
static void
mpt_done(mpt_softc_t *mpt, uint32_t reply)
{
	struct scsipi_xfer *xs = NULL;
	struct scsipi_periph *periph;
	int index;
	request_t *req;
	MSG_REQUEST_HEADER *mpt_req;
	MSG_SCSI_IO_REPLY *mpt_reply;
	int restart = 0; /* nonzero if we need to restart the IOC*/

	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
		/* context reply (ok) */
		mpt_reply = NULL;
		index = reply & MPT_CONTEXT_MASK;
	} else {
		/* address reply (error) */

		/* XXX BUS_DMASYNC_POSTREAD XXX */
		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
		if (mpt_reply != NULL) {
			if (mpt->verbose > 1) {
				uint32_t *pReply = (uint32_t *) mpt_reply;

				mpt_prt(mpt, "Address Reply (index %u):",
				    le32toh(mpt_reply->MsgContext) & 0xffff);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
				    pReply[1], pReply[2], pReply[3]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
				    pReply[5], pReply[6], pReply[7]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
				    pReply[9], pReply[10], pReply[11]);
			}
			index = le32toh(mpt_reply->MsgContext);
		} else
			index = reply & MPT_CONTEXT_MASK;
	}

	/*
	 * Address reply with MessageContext high bit set.
	 * This is most likely a notify message, so we try
	 * to process it, then free it.
	 */
	if (__predict_false((index & 0x80000000) != 0)) {
		if (mpt_reply != NULL)
			mpt_ctlop(mpt, mpt_reply, reply);
		else
			mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
			    index);
		return;
	}

	/* Did we end up with a valid index into the table? */
	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
		mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
		    index);
		return;
	}

	req = &mpt->request_pool[index];

	/* Make sure memory hasn't been trashed. */
	if (__predict_false(req->index != index)) {
		mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
		    index);
		return;
	}

	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
	mpt_req = req->req_vbuf;

	/* Short cut for task management replies; nothing more for us to do. */
	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: TASK MGMT", __func__);
		KASSERT(req == mpt->mngt_req);
		mpt->mngt_req = NULL;
		goto done;
	}

	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
		goto done;

	/*
	 * At this point, it had better be a SCSI I/O command, but don't
	 * crash if it isn't.
	 */
	if (__predict_false(mpt_req->Function !=
			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
			    __func__, mpt_req->Function, index);
		goto done;
	}

	/* Recover scsipi_xfer from the request structure. */
	xs = req->xfer;

	/* Can't have a SCSI command without a scsipi_xfer. */
	if (__predict_false(xs == NULL)) {
		mpt_prt(mpt,
		    "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
		    req->index, req->sequence);
		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
		mpt_prt(mpt, "mpt_request:");
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

		if (mpt_reply != NULL) {
			mpt_prt(mpt, "mpt_reply:");
			mpt_print_reply(mpt_reply);
		} else {
			mpt_prt(mpt, "context reply: 0x%08x", reply);
		}
		goto done;
	}

	callout_stop(&xs->xs_callout);

	periph = xs->xs_periph;

	/*
	 * If we were a data transfer, unload the map that described
	 * the data buffer.
	 */
	if (__predict_true(xs->datalen != 0)) {
		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
		    req->dmap->dm_mapsize,
		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
						      : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
	}

	if (__predict_true(mpt_reply == NULL)) {
		/*
		 * Context reply; report that the command was
		 * successful!
		 *
		 * Also report the xfer mode, if necessary.
		 */
		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
		}
		xs->error = XS_NOERROR;
		xs->status = SCSI_OK;
		xs->resid = 0;
		mpt_free_request(mpt, req);
		scsipi_done(xs);
		return;
	}

	xs->status = mpt_reply->SCSIStatus;
	switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC overrun!", __func__);
		break;

	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
		/*
		 * Yikes!  Tagged queue full comes through this path!
		 *
		 * So we'll change it to a status error and anything
		 * that returns status should probably be a status
		 * error as well.
		 */
		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
		if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		/* FALLTHROUGH */
	case MPI_IOCSTATUS_SUCCESS:
	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
		switch (xs->status) {
		case SCSI_OK:
			/* Report the xfer mode, if necessary. */
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
			xs->resid = 0;
			break;

		case SCSI_CHECK:
			xs->error = XS_SENSE;
			break;

		case SCSI_BUSY:
		case SCSI_QUEUE_FULL:
			xs->error = XS_BUSY;
			break;

		default:
			scsipi_printaddr(periph);
			printf("invalid status code %d\n", xs->status);
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		break;

	case MPI_IOCSTATUS_BUSY:
	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
		xs->error = XS_RESOURCE_SHORTAGE;
		break;

	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
		xs->error = XS_SELTIMEOUT;
		break;

	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
		/* XXX What should we do here? */
		mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
		/* XXX This is a bus-reset */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
		/*
		 * FreeBSD and Linux indicate this is a phase error between
		 * the IOC and the drive itself. When this happens, the IOC
		 * becomes unhappy and stops processing all transactions.  
		 * Call mpt_timeout which knows how to get the IOC back
		 * on its feet.
		 */
		 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
		     "recovering...", __func__);
		xs->error = XS_TIMEOUT;
		restart = 1;

		break;

	default:
		/* XXX unrecognized HBA error */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
		    le16toh(mpt_reply->IOCStatus));
		restart = 1;
		break;
	}

	if (mpt_reply != NULL) {
		if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
			memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
			    sizeof(xs->sense.scsi_sense));
		} else if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_AUTOSENSE_FAILED) {
			/*
			 * This will cause the scsipi layer to issue
			 * a REQUEST SENSE.
			 */
			if (xs->status == SCSI_CHECK)
				xs->error = XS_BUSY;
		}
	}

 done:
	if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) & 
	MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
		mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
		mpt_ctlop(mpt, mpt_reply, reply);
	}

	/* If IOC done with this request, free it up. */
	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
		mpt_free_request(mpt, req);

	/* If address reply, give the buffer back to the IOC. */
	if (mpt_reply != NULL)
		mpt_free_reply(mpt, (reply << 1));

	if (xs != NULL)
		scsipi_done(xs);

	if (restart) {
		mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
		mpt_restart(mpt, NULL);
	}
}
static void
mpt_timeout(void *arg)
{
	request_t *req = arg;
	struct scsipi_xfer *xs;
	struct scsipi_periph *periph;
	mpt_softc_t *mpt;
 	uint32_t oseq;
	int s, nrepl = 0;
 
	if (req->xfer  == NULL) {
		printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
		req->index, req->sequence);
		return;
	}
	xs = req->xfer;
	periph = xs->xs_periph;
	mpt = (void *) periph->periph_channel->chan_adapter->adapt_dev;
	scsipi_printaddr(periph);
	printf("command timeout\n");

	s = splbio();

	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt->success++;
			mpt_prt(mpt, "recovered from command timeout");
			splx(s);
			return;
		}
	}

	/*
	 * Ensure the IOC is really done giving us data since it appears it can
	 * sometimes fail to give us interrupts under heavy load.
	 */
	nrepl = mpt_drain_queue(mpt);
	if (nrepl ) {
		mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
	}

	if (req->sequence != oseq) {
		mpt->success++;
		splx(s);
		return;
	}

	mpt_prt(mpt,
	    "timeout on request index = 0x%x, seq = 0x%08x",
	    req->index, req->sequence);
	mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
	if (mpt->verbose > 1)
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

	xs->error = XS_TIMEOUT;
	splx(s);
	mpt_restart(mpt, req);
}
/* prepare tables before sending a cmd */
void
siop_setuptables(struct siop_common_cmd *siop_cmd)
{
	int i;
	struct siop_common_softc *sc = siop_cmd->siop_sc;
	struct scsipi_xfer *xs = siop_cmd->xs;
	int target = xs->xs_periph->periph_target;
	int lun = xs->xs_periph->periph_lun;
	int msgoffset = 1;

	siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
	memset(siop_cmd->siop_tables->msg_out, 0,
	    sizeof(siop_cmd->siop_tables->msg_out));
	/* request sense doesn't disconnect */
	if (xs->xs_control & XS_CTL_REQSENSE)
		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
	else if ((sc->features & SF_CHIP_GEBUG) &&
	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
		/*
		 * 1010 bug: it seems that the 1010 has problems with reselect
		 * when not in wide mode (generate false SCSI gross error).
		 * The FreeBSD sym driver has comments about it but their
		 * workaround (disable SCSI gross error reporting) doesn't
		 * work with my adapter. So disable disconnect when not
		 * wide.
		 */
		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
	else
		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
	if (xs->xs_tag_type != 0) {
		if ((sc->targets[target]->flags & TARF_TAG) == 0) {
			scsipi_printaddr(xs->xs_periph);
			printf(": tagged command type %d id %d\n",
			    siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
			panic("tagged command for non-tagging device");
		}
		siop_cmd->flags |= CMDFL_TAG;
		siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
		/*
		 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
		 * different one
		 */
		siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
		msgoffset = 3;
	}
	siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
	if (sc->targets[target]->status == TARST_ASYNC) {
		if ((sc->targets[target]->flags & TARF_DT) &&
		    (sc->mode == STEST4_MODE_LVD)) {
			sc->targets[target]->status = TARST_PPR_NEG;
			siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
			    sc->maxoff);
		} else if (sc->targets[target]->flags & TARF_WIDE) {
			sc->targets[target]->status = TARST_WIDE_NEG;
			siop_wdtr_msg(siop_cmd, msgoffset,
			    MSG_EXT_WDTR_BUS_16_BIT);
		} else if (sc->targets[target]->flags & TARF_SYNC) {
			sc->targets[target]->status = TARST_SYNC_NEG;
			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
			(sc->maxoff > 31) ? 31 :  sc->maxoff);
		} else {
			sc->targets[target]->status = TARST_OK;
			siop_update_xfer_mode(sc, target);
		}
	}
	siop_cmd->siop_tables->status =
	    siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */

	siop_cmd->siop_tables->cmd.count =
	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
	siop_cmd->siop_tables->cmd.addr =
	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
			siop_cmd->siop_tables->data[i].count =
			    siop_htoc32(sc,
				siop_cmd->dmamap_data->dm_segs[i].ds_len);
			siop_cmd->siop_tables->data[i].addr =
			    siop_htoc32(sc,
				siop_cmd->dmamap_data->dm_segs[i].ds_addr);
		}
	}
}
Exemple #10
0
static void
vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
    request, void *arg)
{
	struct vioscsi_softc *sc =
	    device_private(chan->chan_adapter->adapt_dev);
	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
	struct scsipi_xfer *xs; 
	struct scsipi_periph *periph;
	struct vioscsi_req *vr;
	struct virtio_scsi_req_hdr *req;
	struct virtqueue *vq = &sc->sc_vqs[2];
	int slot, error;

	DPRINTF(("%s: enter\n", __func__));

	if (request != ADAPTER_REQ_RUN_XFER) {
		DPRINTF(("%s: unhandled %d\n", __func__, request));
		return;
	}
	
	xs = arg;
	periph = xs->xs_periph;

	vr = vioscsi_req_get(sc);
#ifdef DIAGNOSTIC
	/*
	 * This should never happen as we track the resources
	 * in the mid-layer.
	 */
	if (vr == NULL) {
		scsipi_printaddr(xs->xs_periph);
		panic("%s: unable to allocate request\n", __func__);
	}
#endif
	req = &vr->vr_req;
	slot = vr - sc->sc_reqs;

	vr->vr_xs = xs;

	/*
	 * "The only supported format for the LUN field is: first byte set to
	 * 1, second byte set to target, third and fourth byte representing a
	 * single level LUN structure, followed by four zero bytes."
	 */
	if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
		DPRINTF(("%s: bad target %u or lun %u\n", __func__,
		    periph->periph_target, periph->periph_lun));
		goto stuffup;
	}
	req->lun[0] = 1;
	req->lun[1] = periph->periph_target - 1;
	req->lun[2] = 0x40 | (periph->periph_lun >> 8);
	req->lun[3] = periph->periph_lun;
	memset(req->lun + 4, 0, 4);
	DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
	    periph->periph_target - 1, periph->periph_lun, slot));

	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
		    (size_t)xs->cmdlen, sizeof(req->cdb)));
		goto stuffup;
	}

	memset(req->cdb, 0, sizeof(req->cdb));
	memcpy(req->cdb, xs->cmd, xs->cmdlen);

	error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
	    xs->data, xs->datalen, NULL, XS2DMA(xs));
	switch (error) {
	case 0:
		break;
	case ENOMEM:
	case EAGAIN:
		xs->error = XS_RESOURCE_SHORTAGE;
		goto nomore;
	default:
		aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
		    error);
	stuffup:
		xs->error = XS_DRIVER_STUFFUP;
nomore:
		// XXX: free req?
		scsipi_done(xs);
		return;
	}

	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		nsegs += vr->vr_data->dm_nsegs;

	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
	if (error) {
		DPRINTF(("%s: error reserving %d\n", __func__, error));
		goto stuffup;
	}

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
	    sizeof(struct virtio_scsi_req_hdr),
	    BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr),
	    BUS_DMASYNC_PREREAD);
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
		    XS2DMAPRE(xs));

	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
            sizeof(struct virtio_scsi_req_hdr), 1);
	if (xs->xs_control & XS_CTL_DATA_OUT)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr), 0);
	if (xs->xs_control & XS_CTL_DATA_IN)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
	virtio_enqueue_commit(vsc, vq, slot, 1);

	if ((xs->xs_control & XS_CTL_POLL) == 0)
		return;

	DPRINTF(("%s: polling...\n", __func__));
	// XXX: do this better.
	int timeout = 1000;
	do {
		(*vsc->sc_intrhand)(vsc);
		if (vr->vr_xs != xs)
			break;
		delay(1000);
	} while (--timeout > 0);

	if (vr->vr_xs == xs) {
		// XXX: Abort!
		xs->error = XS_TIMEOUT;
		xs->resid = xs->datalen;
		DPRINTF(("%s: polling timeout\n", __func__));
		scsipi_done(xs);
	}
	DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
}