示例#1
0
void
iscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
					 void *arg)
{
	struct scsipi_adapter *adapt = chan->chan_adapter;
	struct scsipi_xfer *xs;
	session_t *session;
	int flags;
	struct scsipi_xfer_mode *xm;

	session = (session_t *) adapt;	/* adapter is first field in session */

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		DEB(9, ("ISCSI: scsipi_request RUN_XFER\n"));
		xs = arg;
		flags = xs->xs_control;

		if ((flags & XS_CTL_POLL) != 0) {
			xs->error = XS_DRIVER_STUFFUP;
			DEBOUT(("Run Xfer request with polling\n"));
			scsipi_done(xs);
			return;
		}
		/*
		 * NOTE: It appears that XS_CTL_DATA_UIO is not actually used anywhere.
         *       Since it really would complicate matters to handle offsets
         *       into scatter-gather lists, and a number of other drivers don't
         *       handle uio-based data as well, XS_CTL_DATA_UIO isn't
         *       implemented in this driver (at least for now).
		 */
		if (flags & XS_CTL_DATA_UIO) {
			xs->error = XS_DRIVER_STUFFUP;
			DEBOUT(("Run Xfer with data in UIO\n"));
			scsipi_done(xs);
			return;
		}

		send_run_xfer(session, xs);
		DEB(9, ("scsipi_req returns\n"));
		return;

	case ADAPTER_REQ_GROW_RESOURCES:
		DEBOUT(("ISCSI: scsipi_request GROW_RESOURCES\n"));
		return;

	case ADAPTER_REQ_SET_XFER_MODE:
		DEB(5, ("ISCSI: scsipi_request SET_XFER_MODE\n"));
		xm = (struct scsipi_xfer_mode *)arg;
		xm->xm_mode = PERIPH_CAP_TQING;
		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
		return;

	default:
		break;
	}
	DEBOUT(("ISCSI: scsipi_request with invalid REQ code %d\n", req));
}
示例#2
0
void
sci_scsidone(struct sci_softc *dev, int stat)
{
	struct scsipi_xfer *xs;

	xs = dev->sc_xs;
#ifdef DIAGNOSTIC
	if (xs == NULL)
		panic("sci_scsidone");
#endif
	xs->status = stat;
	if (stat == 0)
		xs->resid = 0;
	else {
		switch(stat) {
		case SCSI_CHECK:
			xs->resid = 0;
			/* FALLTHROUGH */
		case SCSI_BUSY:
			xs->error = XS_BUSY;
			break;
		default:
			xs->error = XS_DRIVER_STUFFUP;
			QPRINTF(("sci_scsicmd() bad %x\n", stat));
			break;
		}
	}

	scsipi_done(xs);

}
示例#3
0
static void
wdc_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer)
{
	struct atac_softc *atac = chp->ch_atac;
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
	int drive = xfer->c_drive;

	ATADEBUG_PRINT(("wdc_atapi_done %s:%d:%d: flags 0x%x\n",
	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
	    (u_int)xfer->c_flags), DEBUG_XFERS);
	callout_stop(&chp->ch_callout);
	/* mark controller inactive and free the command */
	chp->ch_queue->active_xfer = NULL;
	ata_free_xfer(chp, xfer);

	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
		sc_xfer->error = XS_DRIVER_STUFFUP;
		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
		wakeup(&chp->ch_queue->active_xfer);
	}

	ATADEBUG_PRINT(("wdc_atapi_done: scsipi_done\n"), DEBUG_XFERS);
	scsipi_done(sc_xfer);
	ATADEBUG_PRINT(("atastart from wdc_atapi_done, flags 0x%x\n",
	    chp->ch_flags), DEBUG_XFERS);
	atastart(chp);
}
extern inline void finish_req(SC_REQ *reqp)
{
	int			sps;
	struct scsipi_xfer	*xs = reqp->xs;

#ifdef REAL_DMA
	/*
	 * If we bounced, free the bounce buffer
	 */
	if (reqp->dr_flag & DRIVER_BOUNCING) 
		free_bounceb(reqp->bounceb);
#endif /* REAL_DMA */
#ifdef DBG_REQ
	if (dbg_target_mask & (1 << reqp->targ_id))
		show_request(reqp, "DONE");
#endif
#ifdef DBG_ERR_RET
	if (reqp->xs->error != 0)
		show_request(reqp, "ERR_RET");
#endif
	/*
	 * Return request to free-q
	 */
	sps = splbio();
	reqp->next = free_head;
	free_head  = reqp;
	splx(sps);

	xs->xs_status |= XS_STS_DONE;
	if (!(reqp->dr_flag & DRIVER_LINKCHK))
		scsipi_done(xs);
}
示例#5
0
void
icpsp_intr(struct icp_ccb *ic)
{
	struct scsipi_xfer *xs;
 	struct icp_softc *icp;
 	int soff;

#ifdef DIAGNOSTIC
	struct icpsp_softc *sc = device_private(ic->ic_dv);
#endif
	xs = ic->ic_context;
	icp = device_private(device_parent(ic->ic_dv));
	soff = ICP_SCRATCH_SENSE + ic->ic_ident *
	    sizeof(struct scsi_sense_data);

	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("icpsp_intr\n"));

	bus_dmamap_sync(icp->icp_dmat, icp->icp_scr_dmamap, soff,
	    sizeof(xs->sense.scsi_sense), BUS_DMASYNC_POSTREAD);

	if (ic->ic_status == ICP_S_OK) {
		xs->status = SCSI_OK;
		xs->resid = 0;
	} else if (ic->ic_status != ICP_S_RAW_SCSI || icp->icp_info >= 0x100) {
		xs->error = XS_SELTIMEOUT;
		xs->resid = xs->datalen;
	} else {
		xs->status = icp->icp_info;

		switch (xs->status) {
		case SCSI_OK:
#ifdef DIAGNOSTIC
			printf("%s: error return (%d), but SCSI_OK?\n",
			    device_xname(sc->sc_dv), icp->icp_info);
#endif
			xs->resid = 0;
			break;
		case SCSI_CHECK:
			memcpy(&xs->sense.scsi_sense,
			    (char *)icp->icp_scr + soff,
			    sizeof(xs->sense.scsi_sense));
			xs->error = XS_SENSE;
			/* FALLTHROUGH */
		default:
			/*
			 * XXX Don't know how to get residual count.
			 */
			xs->resid = xs->datalen;
			break;
		}
	}

	if (xs->datalen != 0)
		icp_ccb_unmap(icp, ic);
	icp_ccb_free(icp, ic);
	scsipi_done(xs);
}
示例#6
0
static void
mpt_restart(mpt_softc_t *mpt, request_t *req0)
{
	int i, s, nreq;
	request_t *req;
	struct scsipi_xfer *xs;

	/* first, reset the IOC, leaving stopped so all requests are idle */
	if (mpt_soft_reset(mpt) != MPT_OK) {
		mpt_prt(mpt, "soft reset failed");
		/* 
		* Don't try a hard reset since this mangles the PCI 
		* configuration registers.
		*/
		return;
	}

	/* Freeze the channel so scsipi doesn't queue more commands. */
	scsipi_channel_freeze(&mpt->sc_channel, 1);

	/* Return all pending requests to scsipi and de-allocate them. */
	s = splbio();
	nreq = 0;
	for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
		req = &mpt->request_pool[i];
		xs = req->xfer;
		if (xs != NULL) {
			if (xs->datalen != 0)
				bus_dmamap_unload(mpt->sc_dmat, req->dmap);
			req->xfer = NULL;
			callout_stop(&xs->xs_callout);
			if (req != req0) {
				nreq++;
				xs->error = XS_REQUEUE;
			}
			scsipi_done(xs);
			/*
			* Don't need to mpt_free_request() since mpt_init() 
			* below will free all requests anyway.
			*/
			mpt_free_request(mpt, req);
		}
	}
	splx(s);
	if (nreq > 0)
		mpt_prt(mpt, "re-queued %d requests", nreq);

	/* Re-initialize the IOC (which restarts it). */
	if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
		mpt_prt(mpt, "restart succeeded");
	/* else error message already printed */

	/* Thaw the channel, causing scsipi to re-queue the commands. */
	scsipi_channel_thaw(&mpt->sc_channel, 1);
}
示例#7
0
文件: esc.c 项目: ryo/netbsd-src
/*
 * used by specific esc controller
 */
void
esc_scsi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
 								void *arg)
{
	struct scsipi_xfer *xs;
	struct esc_softc	*dev = device_private(chan->chan_adapter->adapt_dev);
	struct scsipi_periph	*periph;
	struct esc_pending	*pendp;
	int			 flags, s, target;

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		flags = xs->xs_control;
		target = periph->periph_target;

		if (flags & XS_CTL_DATA_UIO)
			panic("esc: scsi data uio requested");

		if ((flags & XS_CTL_POLL) && (dev->sc_flags & ESC_ACTIVE))
			panic("esc_scsicmd: busy");

/* Get hold of a esc_pending block. */
		s = splbio();
		pendp = dev->sc_xs_free.tqh_first;
		if (pendp == NULL) {
			splx(s);
			xs->error = XS_RESOURCE_SHORTAGE;
			scsipi_done(xs);
			return;
		}
		TAILQ_REMOVE(&dev->sc_xs_free, pendp, link);
		pendp->xs = xs;
		splx(s);


/* If the chip if busy OR the unit is busy, we have to wait for out turn. */
		if ((dev->sc_flags & ESC_ACTIVE) ||
		    (dev->sc_nexus[target].flags & ESC_NF_UNIT_BUSY)) {
			s = splbio();
			TAILQ_INSERT_TAIL(&dev->sc_xs_pending, pendp, link);
			splx(s);
		} else
			esc_donextcmd(dev, pendp);

		return;
	case ADAPTER_REQ_GROW_RESOURCES:
	case ADAPTER_REQ_SET_XFER_MODE:
		/* XXX Not supported. */
		return;
	}

}
示例#8
0
static void
vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
    struct vioscsi_req *vr)
{
	struct scsipi_xfer *xs = vr->vr_xs;
	struct scsi_sense_data *sense = &xs->sense.scsi_sense;
	size_t sense_len;

	DPRINTF(("%s: enter\n", __func__));

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
	    sizeof(struct virtio_scsi_req_hdr),
	    BUS_DMASYNC_POSTWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
	    sizeof(struct virtio_scsi_res_hdr),
	    BUS_DMASYNC_POSTREAD);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
	    XS2DMAPOST(xs));

	switch (vr->vr_res.response) {
	case VIRTIO_SCSI_S_OK:
		sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
		memcpy(&xs->sense, vr->vr_res.sense, sense_len);
		xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
		break;
	case VIRTIO_SCSI_S_BAD_TARGET:
		DPRINTF(("%s: bad target\n", __func__));
		memset(sense, 0, sizeof(*sense));
		sense->response_code = 0x70;
		sense->flags = SKEY_ILLEGAL_REQUEST;
		xs->error = XS_SENSE;
		xs->status = 0;
		xs->resid = 0;
		break;
	default:
		DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
		xs->error = XS_DRIVER_STUFFUP;
		xs->resid = xs->datalen;
		break;
	}

	xs->status = vr->vr_res.status;
	xs->resid = vr->vr_res.residual;

	DPRINTF(("%s: done %d, %d, %d\n", __func__,
	    xs->error, xs->status, xs->resid));

	vr->vr_xs = NULL;
	vioscsi_req_put(sc, vr);
	scsipi_done(xs);
}
示例#9
0
文件: esc.c 项目: ryo/netbsd-src
void
esc_scsidone(struct esc_softc *dev, struct scsipi_xfer *xs, int stat)
{
	struct esc_pending	*pendp;
	int			 s;

	xs->status = stat;

	if (stat == 0)
		xs->resid = 0;
	else {
		switch(stat) {
		case SCSI_CHECK:
		case SCSI_BUSY:
			xs->error = XS_BUSY;
			break;
		case -1:
			xs->error = XS_DRIVER_STUFFUP;
			QPRINTF(("esc_scsicmd() bad %x\n", stat));
			break;
		default:
			xs->error = XS_TIMEOUT;
			break;
		}
	}

/* Steal the next command from the queue so that one unit can't hog the bus. */
	s = splbio();
	pendp = dev->sc_xs_pending.tqh_first;
	while(pendp) {
		if (!(dev->sc_nexus[pendp->xs->xs_periph->periph_target].flags &
		      ESC_NF_UNIT_BUSY))
			break;
		pendp = pendp->link.tqe_next;
	}

	if (pendp != NULL) {
		TAILQ_REMOVE(&dev->sc_xs_pending, pendp, link);
	}

	splx(s);
	scsipi_done(xs);

	if (pendp)
		esc_donextcmd(dev, pendp);
}
static void
mpt_timeout(void *arg)
{
	request_t *req = arg;
	struct scsipi_xfer *xs = req->xfer;
	struct scsipi_periph *periph = xs->xs_periph;
	mpt_softc_t *mpt = DEV_TO_MPT(
	    periph->periph_channel->chan_adapter->adapt_dev);
	uint32_t oseq;
	int s;

	scsipi_printaddr(periph);
	printf("command timeout\n");

	s = splbio();

	oseq = req->sequence;
	mpt->timeouts++;
	if (mpt_intr(mpt)) {
		if (req->sequence != oseq) {
			mpt_prt(mpt, "recovered from command timeout");
			splx(s);
			return;
		}
	}
	mpt_prt(mpt,
	    "timeout on request index = 0x%x, seq = 0x%08x",
	    req->index, req->sequence);
	mpt_check_doorbell(mpt);
	mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
	    mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
	    mpt_read(mpt, MPT_OFFSET_INTR_MASK),
	    mpt_read(mpt, MPT_OFFSET_DOORBELL));
	mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
	if (mpt->verbose > 1)
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

	/* XXX WHAT IF THE IOC IS STILL USING IT?? */
	req->xfer = NULL;
	mpt_free_request(mpt, req);

	xs->error = XS_TIMEOUT;
	scsipi_done(xs);

	splx(s);
}
示例#11
0
/*
 * iscsi_done:
 *
 * A CCB has completed execution.  Pass the status back to the
 * upper layer.
 */
void
iscsi_done(ccb_t *ccb)
{
	struct scsipi_xfer *xs = ccb->xs;
	/*DEBOUT (("iscsi_done\n")); */

	if (xs != NULL) {
		xs->resid = ccb->residual;

		switch (ccb->status) {
		case ISCSI_STATUS_SUCCESS:
			xs->error = 0;
			break;

		case ISCSI_STATUS_CHECK_CONDITION:
			xs->error = XS_SENSE;
#ifdef ISCSI_DEBUG
			{
				uint8_t *s = (uint8_t *) (&xs->sense);
				DEB(5, ("Scsipi_done, error=XS_SENSE, sense data=%02x "
						"%02x %02x %02x...\n",
						s[0], s[1], s[2], s[3]));
			}
#endif
			break;

		case ISCSI_STATUS_TARGET_BUSY:
			xs->error = XS_BUSY;
			break;

		case ISCSI_STATUS_SOCKET_ERROR:
		case ISCSI_STATUS_TIMEOUT:
			xs->error = XS_SELTIMEOUT;
			break;

		default:
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}

		DEB(99, ("Calling scsipi_done (%p), err = %d\n", xs, xs->error));
		scsipi_done(xs);
		DEB(99, ("scsipi_done returned\n"));
	}
}
示例#12
0
static void
wdc_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
{
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;

	/* remove this command from xfer queue */
	switch (reason) {
	case KILL_GONE:
		sc_xfer->error = XS_DRIVER_STUFFUP;
		break;
	case KILL_RESET:
		sc_xfer->error = XS_RESET;
		break;
	default:
		printf("wdc_ata_bio_kill_xfer: unknown reason %d\n",
		    reason);
		panic("wdc_ata_bio_kill_xfer");
	}
	ata_free_xfer(chp, xfer);
	scsipi_done(sc_xfer);
}
示例#13
0
/*
 * Finalise a completed autosense operation
 */
Static void
umass_scsipi_sense_cb(struct umass_softc *sc, void *priv, int residue,
		      int status)
{
	struct scsipi_xfer *xs = priv;
	int s;

	DPRINTF(UDMASS_CMD,("umass_scsipi_sense_cb: xs=%p residue=%d "
		"status=%d\n", xs, residue, status));

	sc->sc_sense = 0;
	switch (status) {
	case STATUS_CMD_OK:
	case STATUS_CMD_UNKNOWN:
		/* getting sense data succeeded */
		if (residue == 0 || residue == 14)/* XXX */
			xs->error = XS_SENSE;
		else
			xs->error = XS_SHORTSENSE;
		break;
	default:
		DPRINTF(UDMASS_SCSI, ("%s: Autosense failed, status %d\n",
			device_xname(sc->sc_dev), status));
		xs->error = XS_DRIVER_STUFFUP;
		break;
	}

	DPRINTF(UDMASS_CMD,("umass_scsipi_sense_cb: return xs->error=%d, "
		"xs->xs_status=0x%x xs->resid=%d\n", xs->error, xs->xs_status,
		xs->resid));

	s = splbio();
	KERNEL_LOCK(1, curlwp);
	scsipi_done(xs);
	KERNEL_UNLOCK_ONE(curlwp);
	splx(s);
}
示例#14
0
void
icpsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
		     void *arg)
{
	struct scsipi_xfer *xs;
	struct scsipi_periph *periph;
	struct icpsp_softc *sc;
	struct icp_rawcmd *rc;
	struct icp_softc *icp;
	struct icp_ccb *ic;
	int rv, flags, s, soff;

	sc = device_private(chan->chan_adapter->adapt_dev);
	icp = device_private(device_parent(sc->sc_dv));

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		flags = xs->xs_control;

		SC_DEBUG(periph, SCSIPI_DB2, ("icpsp_scsi_request run_xfer\n"));

		if ((flags & XS_CTL_RESET) != 0) {
			/* XXX Unimplemented. */
			xs->error = XS_DRIVER_STUFFUP;
			scsipi_done(xs);
			return;
		}

#if defined(ICP_DEBUG) || defined(SCSIDEBUG)
		if (xs->cmdlen > sizeof(rc->rc_cdb))
			panic("%s: CDB too large", device_xname(sc->sc_dv));
#endif

		/*
		 * Allocate a CCB.
		 */
		if (__predict_false((ic = icp_ccb_alloc(icp)) == NULL)) {
			xs->error = XS_RESOURCE_SHORTAGE;
			scsipi_done(xs);
			return;
		}
		rc = &ic->ic_cmd.cmd_packet.rc;
		ic->ic_sg = rc->rc_sg;
		ic->ic_service = ICP_SCSIRAWSERVICE;
		soff = ICP_SCRATCH_SENSE + ic->ic_ident *
		    sizeof(struct scsi_sense_data);

		/*
		 * Build the command.  We don't need to actively prevent
		 * access to array components, since the controller kindly
		 * takes care of that for us.
		 */
		ic->ic_cmd.cmd_opcode = htole16(ICP_WRITE);
		memcpy(rc->rc_cdb, xs->cmd, xs->cmdlen);

		rc->rc_padding0 = 0;
		rc->rc_direction = htole32((flags & XS_CTL_DATA_IN) != 0 ?
		    ICP_DATA_IN : ICP_DATA_OUT);
		rc->rc_mdisc_time = 0;
		rc->rc_mcon_time = 0;
		rc->rc_clen = htole32(xs->cmdlen);
		rc->rc_target = periph->periph_target;
		rc->rc_lun = periph->periph_lun;
		rc->rc_bus = sc->sc_busno;
		rc->rc_priority = 0;
		rc->rc_sense_len = htole32(sizeof(xs->sense.scsi_sense));
		rc->rc_sense_addr =
		    htole32(soff + icp->icp_scr_seg[0].ds_addr);
		rc->rc_padding1 = 0;

		if (xs->datalen != 0) {
			rv = icp_ccb_map(icp, ic, xs->data, xs->datalen,
			   (flags & XS_CTL_DATA_IN) != 0 ? IC_XFER_IN :
			   IC_XFER_OUT);
			if (rv != 0) {
				icp_ccb_free(icp, ic);
				xs->error = XS_DRIVER_STUFFUP;
				scsipi_done(xs);
				return;
			}

			rc->rc_nsgent = htole32(ic->ic_nsgent);
			rc->rc_sdata = ~0;
			rc->rc_sdlen = htole32(xs->datalen);
		} else {
			rc->rc_nsgent = 0;
			rc->rc_sdata = 0;
			rc->rc_sdlen = 0;
		}

		ic->ic_cmdlen = (u_long)ic->ic_sg - (u_long)&ic->ic_cmd +
		    ic->ic_nsgent * sizeof(*ic->ic_sg);

		bus_dmamap_sync(icp->icp_dmat, icp->icp_scr_dmamap, soff,
		    sizeof(xs->sense.scsi_sense), BUS_DMASYNC_PREREAD);

		/*
		 * Fire it off to the controller.
		 */
 		ic->ic_intr = icpsp_intr;
		ic->ic_context = xs;
		ic->ic_dv = sc->sc_dv;

		if ((flags & XS_CTL_POLL) != 0) {
			s = splbio();
			rv = icp_ccb_poll(icp, ic, xs->timeout);
			if (rv != 0) {
				if (xs->datalen != 0)
					icp_ccb_unmap(icp, ic);
				icp_ccb_free(icp, ic);
				xs->error = XS_TIMEOUT;
				scsipi_done(xs);

				/*
				 * XXX We're now in a bad way, because we
				 * don't know how to abort the command.
				 * That shouldn't matter too much, since
				 * polled commands won't be used while the
				 * system is running.
				 */
			}
			splx(s);
		} else
			icp_ccb_enqueue(icp, ic);

		break;

	case ADAPTER_REQ_GROW_RESOURCES:
	case ADAPTER_REQ_SET_XFER_MODE:
		/*
		 * Neither of these cases are supported, and neither of them
		 * is particulatly relevant, since we have an abstract view
		 * of the bus; the controller takes care of all the nitty
		 * gritty.
		 */
		break;
	}
}
/*
 * This is pretty much a CD target for now
 */
static void
scsitest_request(struct scsipi_channel *chan,
	scsipi_adapter_req_t req, void *arg)
{
	struct scsipi_xfer *xs = arg;
	struct scsipi_generic *cmd = xs->cmd;
#ifdef USE_TOSI_ISO
	int error;
#endif

	if (req != ADAPTER_REQ_RUN_XFER)
		return;

	//show_scsipi_xs(xs);

	switch (cmd->opcode) {
	case SCSI_TEST_UNIT_READY:
		if (isofd == -1)
			sense_notready(xs);

		break;
	case INQUIRY: {
		struct scsipi_inquiry_data *inqbuf = (void *)xs->data;

		memset(inqbuf, 0, sizeof(*inqbuf));
		inqbuf->device = T_CDROM;
		inqbuf->dev_qual2 = SID_REMOVABLE;
		strcpy(inqbuf->vendor, "RUMPHOBO");
		strcpy(inqbuf->product, "It's a LIE");
		strcpy(inqbuf->revision, "0.00");
		break;
	}
	case READ_CD_CAPACITY: {
		struct scsipi_read_cd_cap_data *ret = (void *)xs->data;

		_lto4b(CDBLOCKSIZE, ret->length);
		_lto4b(mycdsize, ret->addr);

		break;
	}
	case READ_DISCINFO: {
		struct scsipi_read_discinfo_data *ret = (void *)xs->data;

		memset(ret, 0, sizeof(*ret));
		break;
	}
	case READ_TRACKINFO: {
		struct scsipi_read_trackinfo_data *ret = (void *)xs->data;

		_lto4b(mycdsize, ret->track_size);
		break;
	}
	case READ_TOC: {
		struct scsipi_toc_header *ret = (void *)xs->data;

		memset(ret, 0, sizeof(*ret));
		break;
	}
	case START_STOP: {
		struct scsipi_start_stop *param = (void *)cmd;

		if (param->how & SSS_LOEJ) {
#ifdef USE_TOSI_ISO
			rumpuser_close(isofd, &error);
#endif
			isofd = -1;
		}
		break;
	}
	case SCSI_SYNCHRONIZE_CACHE_10: {
		if (isofd == -1) {
			if ((xs->xs_control & XS_CTL_SILENT) == 0)
				atomic_inc_uint(&rump_scsitest_err
				    [RUMP_SCSITEST_NOISYSYNC]);
			
			sense_notready(xs);
		}

		break;
	}
	case GET_CONFIGURATION: {
		memset(xs->data, 0, sizeof(struct scsipi_get_conf_data));
		break;
	}
	case SCSI_READ_6_COMMAND: {
#ifdef USE_TOSI_ISO
		struct scsi_rw_6 *param = (void *)cmd;

		printf("reading %d bytes from %d\n",
		    param->length * CDBLOCKSIZE,
		    _3btol(param->addr) * CDBLOCKSIZE);
		rumpuser_pread(isofd, xs->data,
		     param->length * CDBLOCKSIZE,
		     _3btol(param->addr) * CDBLOCKSIZE,
		     &error);
#endif

		break;
	}
	case SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL:
		/* hardcoded for now */
		break;
	default:
		printf("unhandled opcode 0x%x\n", cmd->opcode);
		break;
	}

	scsipi_done(xs);
}
示例#16
0
Static void
umass_scsipi_cb(struct umass_softc *sc, void *priv, int residue, int status)
{
	struct umass_scsipi_softc *scbus = (struct umass_scsipi_softc *)sc->bus;
	struct scsipi_xfer *xs = priv;
	struct scsipi_periph *periph = xs->xs_periph;
	int cmdlen, senselen;
	int s;
#ifdef UMASS_DEBUG
	struct timeval tv;
	u_int delta;
	microtime(&tv);
	delta = (tv.tv_sec - sc->tv.tv_sec) * 1000000 + tv.tv_usec - sc->tv.tv_usec;
#endif

	DPRINTF(UDMASS_CMD,("umass_scsipi_cb: at %"PRIu64".%06"PRIu64", delta=%u: xs=%p residue=%d"
	    " status=%d\n", tv.tv_sec, (uint64_t)tv.tv_usec, delta, xs, residue, status));

	xs->resid = residue;

	switch (status) {
	case STATUS_CMD_OK:
		xs->error = XS_NOERROR;
		break;

	case STATUS_CMD_UNKNOWN:
		/* FALLTHROUGH */
	case STATUS_CMD_FAILED:
		/* fetch sense data */
		sc->sc_sense = 1;
		memset(&scbus->sc_sense_cmd, 0, sizeof(scbus->sc_sense_cmd));
		scbus->sc_sense_cmd.opcode = SCSI_REQUEST_SENSE;
		scbus->sc_sense_cmd.byte2 = periph->periph_lun <<
		    SCSI_CMD_LUN_SHIFT;

		if (sc->sc_cmd == UMASS_CPROTO_UFI ||
		    sc->sc_cmd == UMASS_CPROTO_ATAPI)
			cmdlen = UFI_COMMAND_LENGTH;	/* XXX */
		else
			cmdlen = sizeof(scbus->sc_sense_cmd);
		if (periph->periph_version < 0x05) /* SPC-3 */
			senselen = 18;
		else
			senselen = sizeof(xs->sense);
		scbus->sc_sense_cmd.length = senselen;
		sc->sc_methods->wire_xfer(sc, periph->periph_lun,
					  &scbus->sc_sense_cmd, cmdlen,
					  &xs->sense, senselen,
					  DIR_IN, xs->timeout, 0,
					  umass_scsipi_sense_cb, xs);
		return;

	case STATUS_WIRE_FAILED:
		xs->error = XS_RESET;
		break;

	default:
		panic("%s: Unknown status %d in umass_scsipi_cb",
			device_xname(sc->sc_dev), status);
	}

	DPRINTF(UDMASS_CMD,("umass_scsipi_cb: at %"PRIu64".%06"PRIu64": return xs->error="
            "%d, xs->xs_status=0x%x xs->resid=%d\n",
	     tv.tv_sec, (uint64_t)tv.tv_usec,
	     xs->error, xs->xs_status, xs->resid));

	s = splbio();
	KERNEL_LOCK(1, curlwp);
	scsipi_done(xs);
	KERNEL_UNLOCK_ONE(curlwp);
	splx(s);
}
示例#17
0
Static void
umass_scsipi_request(struct scsipi_channel *chan,
		scsipi_adapter_req_t req, void *arg)
{
	struct scsipi_adapter *adapt = chan->chan_adapter;
	struct scsipi_periph *periph;
	struct scsipi_xfer *xs;
	struct umass_softc *sc = device_private(adapt->adapt_dev);
	struct umass_scsipi_softc *scbus = (struct umass_scsipi_softc *)sc->bus;
	struct scsipi_generic *cmd;
	int cmdlen;
	int dir;
#ifdef UMASS_DEBUG
	microtime(&sc->tv);
#endif
	switch(req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		DIF(UDMASS_UPPER, periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS);

		DPRINTF(UDMASS_CMD, ("%s: umass_scsi_cmd: at %"PRIu64".%06"PRIu64": %d:%d "
		    "xs=%p cmd=0x%02x datalen=%d (quirks=0x%x, poll=%d)\n",
		    device_xname(sc->sc_dev), sc->tv.tv_sec, (uint64_t)sc->tv.tv_usec,
		    periph->periph_target, periph->periph_lun,
		    xs, xs->cmd->opcode, xs->datalen,
		    periph->periph_quirks, xs->xs_control & XS_CTL_POLL));
#if defined(UMASS_DEBUG) && defined(SCSIPI_DEBUG)
		if (umassdebug & UDMASS_SCSI)
			show_scsipi_xs(xs);
		else if (umassdebug & ~UDMASS_CMD)
			show_scsipi_cmd(xs);
#endif

		if (sc->sc_dying) {
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}

#ifdef UMASS_DEBUG
		if (SCSIPI_BUSTYPE_TYPE(chan->chan_bustype->bustype_type) ==
		    SCSIPI_BUSTYPE_ATAPI ?
		    periph->periph_target != UMASS_ATAPI_DRIVE :
		    periph->periph_target == chan->chan_id) {
			DPRINTF(UDMASS_SCSI, ("%s: wrong SCSI ID %d\n",
			    device_xname(sc->sc_dev),
			    periph->periph_target));
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}
#endif

		cmd = xs->cmd;
		cmdlen = xs->cmdlen;

		dir = DIR_NONE;
		if (xs->datalen) {
			switch (xs->xs_control &
			    (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
			case XS_CTL_DATA_IN:
				dir = DIR_IN;
				break;
			case XS_CTL_DATA_OUT:
				dir = DIR_OUT;
				break;
			}
		}

		if (xs->datalen > UMASS_MAX_TRANSFER_SIZE) {
			printf("umass_cmd: large datalen, %d\n", xs->datalen);
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}

		if (xs->xs_control & XS_CTL_POLL) {
			/* Use sync transfer. XXX Broken! */
			DPRINTF(UDMASS_SCSI,
			    ("umass_scsi_cmd: sync dir=%d\n", dir));
			scbus->sc_sync_status = USBD_INVAL;
			sc->sc_methods->wire_xfer(sc, periph->periph_lun, cmd,
						  cmdlen, xs->data,
						  xs->datalen, dir,
						  xs->timeout, USBD_SYNCHRONOUS,
						  0, xs);
			DPRINTF(UDMASS_SCSI, ("umass_scsi_cmd: done err=%d\n",
					      scbus->sc_sync_status));
			switch (scbus->sc_sync_status) {
			case USBD_NORMAL_COMPLETION:
				xs->error = XS_NOERROR;
				break;
			case USBD_TIMEOUT:
				xs->error = XS_TIMEOUT;
				break;
			default:
				xs->error = XS_DRIVER_STUFFUP;
				break;
			}
			goto done;
		} else {
			DPRINTF(UDMASS_SCSI,
			    ("umass_scsi_cmd: async dir=%d, cmdlen=%d"
				      " datalen=%d\n",
				      dir, cmdlen, xs->datalen));
			sc->sc_methods->wire_xfer(sc, periph->periph_lun, cmd,
						  cmdlen, xs->data,
						  xs->datalen, dir,
						  xs->timeout, 0,
						  umass_scsipi_cb, xs);
			return;
		}

		/* Return if command finishes early. */
 done:
		KERNEL_LOCK(1, curlwp);
		scsipi_done(xs);
		KERNEL_UNLOCK_ONE(curlwp);
		return;
	default:
		/* Not supported, nothing to do. */
		;
	}
}
示例#18
0
static void
mpt_done(mpt_softc_t *mpt, uint32_t reply)
{
	struct scsipi_xfer *xs = NULL;
	struct scsipi_periph *periph;
	int index;
	request_t *req;
	MSG_REQUEST_HEADER *mpt_req;
	MSG_SCSI_IO_REPLY *mpt_reply;
	int restart = 0; /* nonzero if we need to restart the IOC*/

	if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
		/* context reply (ok) */
		mpt_reply = NULL;
		index = reply & MPT_CONTEXT_MASK;
	} else {
		/* address reply (error) */

		/* XXX BUS_DMASYNC_POSTREAD XXX */
		mpt_reply = MPT_REPLY_PTOV(mpt, reply);
		if (mpt_reply != NULL) {
			if (mpt->verbose > 1) {
				uint32_t *pReply = (uint32_t *) mpt_reply;

				mpt_prt(mpt, "Address Reply (index %u):",
				    le32toh(mpt_reply->MsgContext) & 0xffff);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
				    pReply[1], pReply[2], pReply[3]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
				    pReply[5], pReply[6], pReply[7]);
				mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
				    pReply[9], pReply[10], pReply[11]);
			}
			index = le32toh(mpt_reply->MsgContext);
		} else
			index = reply & MPT_CONTEXT_MASK;
	}

	/*
	 * Address reply with MessageContext high bit set.
	 * This is most likely a notify message, so we try
	 * to process it, then free it.
	 */
	if (__predict_false((index & 0x80000000) != 0)) {
		if (mpt_reply != NULL)
			mpt_ctlop(mpt, mpt_reply, reply);
		else
			mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
			    index);
		return;
	}

	/* Did we end up with a valid index into the table? */
	if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
		mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
		    index);
		return;
	}

	req = &mpt->request_pool[index];

	/* Make sure memory hasn't been trashed. */
	if (__predict_false(req->index != index)) {
		mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
		    index);
		return;
	}

	MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
	mpt_req = req->req_vbuf;

	/* Short cut for task management replies; nothing more for us to do. */
	if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: TASK MGMT", __func__);
		KASSERT(req == mpt->mngt_req);
		mpt->mngt_req = NULL;
		goto done;
	}

	if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
		goto done;

	/*
	 * At this point, it had better be a SCSI I/O command, but don't
	 * crash if it isn't.
	 */
	if (__predict_false(mpt_req->Function !=
			    MPI_FUNCTION_SCSI_IO_REQUEST)) {
		if (mpt->verbose > 1)
			mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
			    __func__, mpt_req->Function, index);
		goto done;
	}

	/* Recover scsipi_xfer from the request structure. */
	xs = req->xfer;

	/* Can't have a SCSI command without a scsipi_xfer. */
	if (__predict_false(xs == NULL)) {
		mpt_prt(mpt,
		    "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
		    req->index, req->sequence);
		mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
		mpt_prt(mpt, "mpt_request:");
		mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);

		if (mpt_reply != NULL) {
			mpt_prt(mpt, "mpt_reply:");
			mpt_print_reply(mpt_reply);
		} else {
			mpt_prt(mpt, "context reply: 0x%08x", reply);
		}
		goto done;
	}

	callout_stop(&xs->xs_callout);

	periph = xs->xs_periph;

	/*
	 * If we were a data transfer, unload the map that described
	 * the data buffer.
	 */
	if (__predict_true(xs->datalen != 0)) {
		bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
		    req->dmap->dm_mapsize,
		    (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
						      : BUS_DMASYNC_POSTWRITE);
		bus_dmamap_unload(mpt->sc_dmat, req->dmap);
	}

	if (__predict_true(mpt_reply == NULL)) {
		/*
		 * Context reply; report that the command was
		 * successful!
		 *
		 * Also report the xfer mode, if necessary.
		 */
		if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
		}
		xs->error = XS_NOERROR;
		xs->status = SCSI_OK;
		xs->resid = 0;
		mpt_free_request(mpt, req);
		scsipi_done(xs);
		return;
	}

	xs->status = mpt_reply->SCSIStatus;
	switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC overrun!", __func__);
		break;

	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
		/*
		 * Yikes!  Tagged queue full comes through this path!
		 *
		 * So we'll change it to a status error and anything
		 * that returns status should probably be a status
		 * error as well.
		 */
		xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
		if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_NO_SCSI_STATUS) {
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		/* FALLTHROUGH */
	case MPI_IOCSTATUS_SUCCESS:
	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
		switch (xs->status) {
		case SCSI_OK:
			/* Report the xfer mode, if necessary. */
			if ((mpt->mpt_report_xfer_mode &
			     (1 << periph->periph_target)) != 0)
				mpt_get_xfer_mode(mpt, periph);
			xs->resid = 0;
			break;

		case SCSI_CHECK:
			xs->error = XS_SENSE;
			break;

		case SCSI_BUSY:
		case SCSI_QUEUE_FULL:
			xs->error = XS_BUSY;
			break;

		default:
			scsipi_printaddr(periph);
			printf("invalid status code %d\n", xs->status);
			xs->error = XS_DRIVER_STUFFUP;
			break;
		}
		break;

	case MPI_IOCSTATUS_BUSY:
	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
		xs->error = XS_RESOURCE_SHORTAGE;
		break;

	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
		xs->error = XS_SELTIMEOUT;
		break;

	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
		/* XXX What should we do here? */
		mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
		/* XXX */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC task terminated!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
		/* XXX This is a bus-reset */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
		restart = 1;
		break;

	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
		/*
		 * FreeBSD and Linux indicate this is a phase error between
		 * the IOC and the drive itself. When this happens, the IOC
		 * becomes unhappy and stops processing all transactions.  
		 * Call mpt_timeout which knows how to get the IOC back
		 * on its feet.
		 */
		 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
		     "recovering...", __func__);
		xs->error = XS_TIMEOUT;
		restart = 1;

		break;

	default:
		/* XXX unrecognized HBA error */
		xs->error = XS_DRIVER_STUFFUP;
		mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
		    le16toh(mpt_reply->IOCStatus));
		restart = 1;
		break;
	}

	if (mpt_reply != NULL) {
		if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
			memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
			    sizeof(xs->sense.scsi_sense));
		} else if (mpt_reply->SCSIState &
		    MPI_SCSI_STATE_AUTOSENSE_FAILED) {
			/*
			 * This will cause the scsipi layer to issue
			 * a REQUEST SENSE.
			 */
			if (xs->status == SCSI_CHECK)
				xs->error = XS_BUSY;
		}
	}

 done:
	if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) & 
	MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
		mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
		mpt_ctlop(mpt, mpt_reply, reply);
	}

	/* If IOC done with this request, free it up. */
	if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
		mpt_free_request(mpt, req);

	/* If address reply, give the buffer back to the IOC. */
	if (mpt_reply != NULL)
		mpt_free_reply(mpt, (reply << 1));

	if (xs != NULL)
		scsipi_done(xs);

	if (restart) {
		mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
		mpt_restart(mpt, NULL);
	}
}
示例#19
0
static void
wdc_atapi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    void *arg)
{
	struct scsipi_adapter *adapt = chan->chan_adapter;
	struct scsipi_periph *periph;
	struct scsipi_xfer *sc_xfer;
	struct wdc_softc *wdc = device_private(adapt->adapt_dev);
	struct atac_softc *atac = &wdc->sc_atac;
	struct ata_xfer *xfer;
	int channel = chan->chan_channel;
	int drive, s;

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		sc_xfer = arg;
		periph = sc_xfer->xs_periph;
		drive = periph->periph_target;

		ATADEBUG_PRINT(("wdc_atapi_scsipi_request %s:%d:%d\n",
		    device_xname(atac->atac_dev), channel, drive),
		    DEBUG_XFERS);
		if (!device_is_active(atac->atac_dev)) {
			sc_xfer->error = XS_DRIVER_STUFFUP;
			scsipi_done(sc_xfer);
			return;
		}

		xfer = ata_get_xfer(ATAXF_NOSLEEP);
		if (xfer == NULL) {
			sc_xfer->error = XS_RESOURCE_SHORTAGE;
			scsipi_done(sc_xfer);
			return;
		}

		if (sc_xfer->xs_control & XS_CTL_POLL)
			xfer->c_flags |= C_POLL;
#if NATA_DMA
		if ((atac->atac_channels[channel]->ch_drive[drive].drive_flags &
		    (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) && sc_xfer->datalen > 0)
			xfer->c_flags |= C_DMA;
#endif
#if NATA_DMA && NATA_PIOBM
		else
#endif
#if NATA_PIOBM
		if ((atac->atac_cap & ATAC_CAP_PIOBM) &&
		    sc_xfer->datalen > 0)
			xfer->c_flags |= C_PIOBM;
#endif
		xfer->c_drive = drive;
		xfer->c_flags |= C_ATAPI;
#if NATA_DMA
		if (sc_xfer->cmd->opcode == GPCMD_REPORT_KEY ||
		    sc_xfer->cmd->opcode == GPCMD_SEND_KEY ||
		    sc_xfer->cmd->opcode == GPCMD_READ_DVD_STRUCTURE) {
			/*
			 * DVD authentication commands must always be done in
			 * PIO mode.
			 */
			xfer->c_flags &= ~C_DMA;
		}

		/*
		 * DMA normally can't deal with transfers which are not a
		 * multiple of its databus width. It's a bug to request odd
		 * length transfers for ATAPI.
		 *
		 * Some devices also can't cope with unaligned DMA xfers
		 * either. Also some devices seem to not handle DMA xfers of
		 * less than 4 bytes.
		 *
		 * By enforcing at least 4 byte aligned offset and length for
		 * DMA, we might use PIO where DMA could be allowed but better
		 * safe than sorry as recent problems proved.
		 *
		 * Offending structures that are thus done by PIO instead of
		 * DMA are normally small structures since all bulkdata is
		 * aligned. But as the request may come from userland, we have
		 * to protect against it anyway.
		 *
		 * XXX check for the 32 bit wide flag?
		 */

		if (((uintptr_t) sc_xfer->data) & 0x03)
			xfer->c_flags &= ~C_DMA;
		if ((sc_xfer->datalen < 4) || (sc_xfer->datalen & 0x03))
			xfer->c_flags &= ~C_DMA;
#endif	/* NATA_DMA */

		xfer->c_cmd = sc_xfer;
		xfer->c_databuf = sc_xfer->data;
		xfer->c_bcount = sc_xfer->datalen;
		xfer->c_start = wdc_atapi_start;
		xfer->c_intr = wdc_atapi_intr;
		xfer->c_kill_xfer = wdc_atapi_kill_xfer;
		xfer->c_dscpoll = 0;
		s = splbio();
		ata_exec_xfer(atac->atac_channels[channel], xfer);
#ifdef DIAGNOSTIC
		if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 &&
		    (sc_xfer->xs_status & XS_STS_DONE) == 0)
			panic("wdc_atapi_scsipi_request: polled command "
			    "not done");
#endif
		splx(s);
		return;

	default:
		/* Not supported, nothing to do. */
		;
	}
}
/*
 * Carry out a request from the high level driver.
 */
static void
ncr5380_scsi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
    void *arg)
{
	struct scsipi_xfer *xs;
	struct ncr_softc *sc = device_private(chan->chan_adapter->adapt_dev);
	int	sps, flags;
	SC_REQ	*reqp, *link, *tmp;

	switch (req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		flags = xs->xs_control;

		/*
		 * We do not queue RESET commands
		 */
		if (flags & XS_CTL_RESET) {
			scsi_reset_verbose(sc, "Got reset-command");
			scsipi_done(xs);
			return;
		}

		/*
		 * Get a request block
		 */
		sps = splbio();
		if ((reqp = free_head) == 0) {
			xs->error = XS_RESOURCE_SHORTAGE;
			scsipi_done(xs);
			return;
		}
		free_head  = reqp->next;
		reqp->next = NULL;
		splx(sps);

		/*
		 * Initialize our private fields
		 */
		reqp->dr_flag   = (flags & XS_CTL_POLL) ? DRIVER_NOINT : 0;
		reqp->phase     = NR_PHASE;
		reqp->msgout    = MSG_NOOP;
		reqp->status    = SCSGOOD;
		reqp->message   = 0xff;
		reqp->link      = NULL;
		reqp->xs        = xs;
		reqp->targ_id   = xs->xs_periph->periph_target;
		reqp->targ_lun  = xs->xs_periph->periph_lun;
		reqp->xdata_ptr = (u_char*)xs->data;
		reqp->xdata_len = xs->datalen;
		memcpy(&reqp->xcmd, xs->cmd, xs->cmdlen);
		reqp->xcmd_len = xs->cmdlen;
		reqp->xcmd.bytes[0] |= reqp->targ_lun << 5;

#ifdef REAL_DMA
		/*
		 * Check if DMA can be used on this request
		 */
		if (scsi_dmaok(reqp))
			reqp->dr_flag |= DRIVER_DMAOK;
#endif /* REAL_DMA */

		/*
		 * Insert the command into the issue queue. Note that
		 * 'REQUEST SENSE' commands are inserted at the head of the
		 * queue since any command will clear the existing contingent
		 * allegience condition and the sense data is only valid while
		 * the condition exists.
		 * When possible, link the command to a previous command to
		 * the same target. This is not very sensible when AUTO_SENSE
		 * is not defined!  Interrupts are disabled while we are
		 * fiddling with the issue-queue.
		 */
		sps = splbio();
		link = NULL;
		if ((issue_q == NULL) ||
		    (reqp->xcmd.opcode == SCSI_REQUEST_SENSE)) {
			reqp->next = issue_q;
			issue_q    = reqp;
		} else {
			tmp  = issue_q;
			do {
				if (!link && (tmp->targ_id == reqp->targ_id) &&
				    !tmp->link)
					link = tmp;
			} while (tmp->next && (tmp = tmp->next));
			tmp->next = reqp;
#ifdef AUTO_SENSE
			if (link && (ncr_will_link & (1<<reqp->targ_id))) {
				link->link = reqp;
				link->xcmd.bytes[link->xs->cmdlen-2] |= 1;
			}
#endif
		}
#ifdef AUTO_SENSE
		/*
		 * If we haven't already, check the target for link support.
		 * Do this by prefixing the current command with a dummy
		 * Request_Sense command, link the dummy to the current
		 * command, and insert the dummy command at the head of the
		 * issue queue.  Set the DRIVER_LINKCHK flag so that we'll
		 * ignore the results of the dummy command, since we only
		 * care about whether it was accepted or not.
		 */
		if (!link && !(ncr_test_link & (1<<reqp->targ_id)) &&
		    (tmp = free_head) && !(reqp->dr_flag & DRIVER_NOINT)) {
			free_head = tmp->next;
			tmp->dr_flag =
			    (reqp->dr_flag & ~DRIVER_DMAOK) | DRIVER_LINKCHK;
			tmp->phase = NR_PHASE;
			tmp->msgout = MSG_NOOP;
			tmp->status = SCSGOOD;
			tmp->xs = reqp->xs;
			tmp->targ_id = reqp->targ_id;
			tmp->targ_lun = reqp->targ_lun;
			memcpy(&tmp->xcmd, sense_cmd, sizeof(sense_cmd));
			tmp->xcmd_len = sizeof(sense_cmd);
			tmp->xdata_ptr = (u_char *)&tmp->xs->sense.scsi_sense;
			tmp->xdata_len = sizeof(tmp->xs->sense.scsi_sense);
			ncr_test_link |= 1<<tmp->targ_id;
			tmp->link = reqp;
			tmp->xcmd.bytes[sizeof(sense_cmd)-2] |= 1;
			tmp->next = issue_q;
			issue_q = tmp;
#ifdef DBG_REQ
			if (dbg_target_mask & (1 << tmp->targ_id))
				show_request(tmp, "LINKCHK");
#endif
		}
#endif
		splx(sps);

#ifdef DBG_REQ
		if (dbg_target_mask & (1 << reqp->targ_id))
			show_request(reqp,
			    (reqp->xcmd.opcode == SCSI_REQUEST_SENSE) ?
			    "HEAD":"TAIL");
#endif

		run_main(sc);
		return;

	case ADAPTER_REQ_GROW_RESOURCES:
		/* XXX Not supported. */
		return;

	case ADAPTER_REQ_SET_XFER_MODE:
		/* XXX Not supported. */
		return;
	}
}
示例#21
0
static void
mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
{
	struct scsipi_periph *periph = xs->xs_periph;
	request_t *req;
	MSG_SCSI_IO_REQUEST *mpt_req;
	int error, s;

	s = splbio();
	req = mpt_get_request(mpt);
	if (__predict_false(req == NULL)) {
		/* This should happen very infrequently. */
		xs->error = XS_RESOURCE_SHORTAGE;
		scsipi_done(xs);
		splx(s);
		return;
	}
	splx(s);

	/* Link the req and the scsipi_xfer. */
	req->xfer = xs;

	/* Now we build the command for the IOC */
	mpt_req = req->req_vbuf;
	memset(mpt_req, 0, sizeof(*mpt_req));

	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
	mpt_req->Bus = mpt->bus;

	mpt_req->SenseBufferLength =
	    (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
	    sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;

	/*
	 * We use the message context to find the request structure when
	 * we get the command completion interrupt from the IOC.
	 */
	mpt_req->MsgContext = htole32(req->index);

	/* Which physical device to do the I/O on. */
	mpt_req->TargetID = periph->periph_target;
	mpt_req->LUN[1] = periph->periph_lun;

	/* Set the direction of the transfer. */
	if (xs->xs_control & XS_CTL_DATA_IN)
		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
	else if (xs->xs_control & XS_CTL_DATA_OUT)
		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
	else
		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;

	/* Set the queue behavior. */
	if (__predict_true((!mpt->is_scsi) ||
			   (mpt->mpt_tag_enable &
			    (1 << periph->periph_target)))) {
		switch (XS_CTL_TAGTYPE(xs)) {
		case XS_CTL_HEAD_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
			break;

#if 0	/* XXX */
		case XS_CTL_ACA_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
			break;
#endif

		case XS_CTL_ORDERED_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
			break;

		case XS_CTL_SIMPLE_TAG:
			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
			break;

		default:
			if (mpt->is_scsi)
				mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
			else
				mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
			break;
		}
	} else
		mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;

	if (__predict_false(mpt->is_scsi &&
			    (mpt->mpt_disc_enable &
			     (1 << periph->periph_target)) == 0))
		mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;

	mpt_req->Control = htole32(mpt_req->Control);

	/* Copy the SCSI command block into place. */
	memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);

	mpt_req->CDBLength = xs->cmdlen;
	mpt_req->DataLength = htole32(xs->datalen);
	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);

	/*
	 * Map the DMA transfer.
	 */
	if (xs->datalen) {
		SGE_SIMPLE32 *se;

		error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
		    xs->datalen, NULL,
		    ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
						       : BUS_DMA_WAITOK) |
		    BUS_DMA_STREAMING |
		    ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
						       : BUS_DMA_WRITE));
		switch (error) {
		case 0:
			break;

		case ENOMEM:
		case EAGAIN:
			xs->error = XS_RESOURCE_SHORTAGE;
			goto out_bad;

		default:
			xs->error = XS_DRIVER_STUFFUP;
			mpt_prt(mpt, "error %d loading DMA map", error);
 out_bad:
			s = splbio();
			mpt_free_request(mpt, req);
			scsipi_done(xs);
			splx(s);
			return;
		}

		if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
			int seg, i, nleft = req->dmap->dm_nsegs;
			uint32_t flags;
			SGE_CHAIN32 *ce;

			seg = 0;
			flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
			if (xs->xs_control & XS_CTL_DATA_OUT)
				flags |= MPI_SGE_FLAGS_HOST_TO_IOC;

			se = (SGE_SIMPLE32 *) &mpt_req->SGL;
			for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
			     i++, se++, seg++) {
				uint32_t tf;

				memset(se, 0, sizeof(*se));
				se->Address =
				    htole32(req->dmap->dm_segs[seg].ds_addr);
				MPI_pSGE_SET_LENGTH(se,
				    req->dmap->dm_segs[seg].ds_len);
				tf = flags;
				if (i == MPT_NSGL_FIRST(mpt) - 2)
					tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
				MPI_pSGE_SET_FLAGS(se, tf);
				se->FlagsLength = htole32(se->FlagsLength);
				nleft--;
			}

			/*
			 * Tell the IOC where to find the first chain element.
			 */
			mpt_req->ChainOffset =
			    ((char *)se - (char *)mpt_req) >> 2;

			/*
			 * Until we're finished with all segments...
			 */
			while (nleft) {
				int ntodo;

				/*
				 * Construct the chain element that points to
				 * the next segment.
				 */
				ce = (SGE_CHAIN32 *) se++;
				if (nleft > MPT_NSGL(mpt)) {
					ntodo = MPT_NSGL(mpt) - 1;
					ce->NextChainOffset = (MPT_RQSL(mpt) -
					    sizeof(SGE_SIMPLE32)) >> 2;
					ce->Length = htole16(MPT_NSGL(mpt)
						* sizeof(SGE_SIMPLE32));
				} else {
示例#22
0
static void
vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
    request, void *arg)
{
	struct vioscsi_softc *sc =
	    device_private(chan->chan_adapter->adapt_dev);
	struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
	struct scsipi_xfer *xs; 
	struct scsipi_periph *periph;
	struct vioscsi_req *vr;
	struct virtio_scsi_req_hdr *req;
	struct virtqueue *vq = &sc->sc_vqs[2];
	int slot, error;

	DPRINTF(("%s: enter\n", __func__));

	if (request != ADAPTER_REQ_RUN_XFER) {
		DPRINTF(("%s: unhandled %d\n", __func__, request));
		return;
	}
	
	xs = arg;
	periph = xs->xs_periph;

	vr = vioscsi_req_get(sc);
#ifdef DIAGNOSTIC
	/*
	 * This should never happen as we track the resources
	 * in the mid-layer.
	 */
	if (vr == NULL) {
		scsipi_printaddr(xs->xs_periph);
		panic("%s: unable to allocate request\n", __func__);
	}
#endif
	req = &vr->vr_req;
	slot = vr - sc->sc_reqs;

	vr->vr_xs = xs;

	/*
	 * "The only supported format for the LUN field is: first byte set to
	 * 1, second byte set to target, third and fourth byte representing a
	 * single level LUN structure, followed by four zero bytes."
	 */
	if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
		DPRINTF(("%s: bad target %u or lun %u\n", __func__,
		    periph->periph_target, periph->periph_lun));
		goto stuffup;
	}
	req->lun[0] = 1;
	req->lun[1] = periph->periph_target - 1;
	req->lun[2] = 0x40 | (periph->periph_lun >> 8);
	req->lun[3] = periph->periph_lun;
	memset(req->lun + 4, 0, 4);
	DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
	    periph->periph_target - 1, periph->periph_lun, slot));

	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
		    (size_t)xs->cmdlen, sizeof(req->cdb)));
		goto stuffup;
	}

	memset(req->cdb, 0, sizeof(req->cdb));
	memcpy(req->cdb, xs->cmd, xs->cmdlen);

	error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
	    xs->data, xs->datalen, NULL, XS2DMA(xs));
	switch (error) {
	case 0:
		break;
	case ENOMEM:
	case EAGAIN:
		xs->error = XS_RESOURCE_SHORTAGE;
		goto nomore;
	default:
		aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
		    error);
	stuffup:
		xs->error = XS_DRIVER_STUFFUP;
nomore:
		// XXX: free req?
		scsipi_done(xs);
		return;
	}

	int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		nsegs += vr->vr_data->dm_nsegs;

	error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
	if (error) {
		DPRINTF(("%s: error reserving %d\n", __func__, error));
		goto stuffup;
	}

	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
	    sizeof(struct virtio_scsi_req_hdr),
	    BUS_DMASYNC_PREWRITE);
	bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr),
	    BUS_DMASYNC_PREREAD);
	if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
		bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
		    XS2DMAPRE(xs));

	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_req),
            sizeof(struct virtio_scsi_req_hdr), 1);
	if (xs->xs_control & XS_CTL_DATA_OUT)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
	virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
	    offsetof(struct vioscsi_req, vr_res),
            sizeof(struct virtio_scsi_res_hdr), 0);
	if (xs->xs_control & XS_CTL_DATA_IN)
		virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
	virtio_enqueue_commit(vsc, vq, slot, 1);

	if ((xs->xs_control & XS_CTL_POLL) == 0)
		return;

	DPRINTF(("%s: polling...\n", __func__));
	// XXX: do this better.
	int timeout = 1000;
	do {
		(*vsc->sc_intrhand)(vsc);
		if (vr->vr_xs != xs)
			break;
		delay(1000);
	} while (--timeout > 0);

	if (vr->vr_xs == xs) {
		// XXX: Abort!
		xs->error = XS_TIMEOUT;
		xs->resid = xs->datalen;
		DPRINTF(("%s: polling timeout\n", __func__));
		scsipi_done(xs);
	}
	DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
}