Esempio n. 1
0
/*
 * mrsas_scsiio_timeout:	Callback function for IO timed out
 * input:					mpt command context
 *
 * This function will execute after timeout value provided by ccb header from
 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
 * coming from CAM layer. This function is callback function for IO timeout
 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
 * so that it will execute OCR/Kill adpter from ocr_thread context.
 */
static void
mrsas_scsiio_timeout(void *data)
{
	struct mrsas_mpt_cmd *cmd;
	struct mrsas_softc *sc;

	cmd = (struct mrsas_mpt_cmd *)data;
	sc = cmd->sc;

	if (cmd->ccb_ptr == NULL) {
		printf("command timeout with NULL ccb\n");
		return;
	}
	/*
	 * Below callout is dummy entry so that it will be cancelled from
	 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
	 * on OCR enable/disable property of Controller from ocr_thread
	 * context.
	 */
#if (__FreeBSD_version >= 1000510)
	callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
	    mrsas_scsiio_timeout, cmd, 0);
#else
	callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
	    mrsas_scsiio_timeout, cmd);
#endif
	sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR;
	if (sc->ocr_thread_active)
		wakeup(&sc->ocr_chan);
}
Esempio n. 2
0
static void
pit_timer_start_cntr0(struct vatpit *vatpit)
{
	struct channel *c;
	sbintime_t now, delta, precision;

	c = &vatpit->channel[0];
	if (c->initial != 0) {
		delta = c->initial * vatpit->freq_sbt;
		precision = delta >> tc_precexp;
		c->callout_sbt = c->callout_sbt + delta;

		/*
		 * Reset 'callout_sbt' if the time that the callout
		 * was supposed to fire is more than 'c->initial'
		 * ticks in the past.
		 */
		now = sbinuptime();
		if (c->callout_sbt < now)
			c->callout_sbt = now + delta;

		callout_reset_sbt(&c->callout, c->callout_sbt,
		    precision, vatpit_callout_handler, &c->callout_arg,
		    C_ABSOLUTE);
	}
Esempio n. 3
0
void  os_request_timer(void * osext, HPT_U32 interval)
{
	PVBUS_EXT vbus_ext = osext;

	HPT_ASSERT(vbus_ext->ext_type==EXT_TYPE_VBUS);

	callout_reset_sbt(&vbus_ext->timer, SBT_1US * interval, 0,
	    os_timer_for_ldm, vbus_ext, 0);
}
Esempio n. 4
0
static void
initialize_tempmon(struct imx6_anatop_softc *sc)
{
	uint32_t cal;
	struct sysctl_ctx_list *ctx;

	/*
	 * Fetch calibration data: a sensor count at room temperature (25C),
	 * a sensor count at a high temperature, and that temperature
	 */
	cal = fsl_ocotp_read_4(FSL_OCOTP_ANA1);
	sc->temp_room_cnt = (cal & 0xFFF00000) >> 20;
	sc->temp_high_cnt = (cal & 0x000FFF00) >> 8;
	sc->temp_high_val = (cal & 0x000000FF) * 10;

	/*
	 * Throttle to a lower cpu freq at 10C below the "hot" temperature, and
	 * reset back to max cpu freq at 5C below the trigger.
	 */
	sc->temp_throttle_val = sc->temp_high_val - 100;
	sc->temp_throttle_trigger_cnt =
	    temp_to_count(sc, sc->temp_throttle_val);
	sc->temp_throttle_reset_cnt = 
	    temp_to_count(sc, sc->temp_throttle_val - 50);

	/*
	 * Set the sensor to sample automatically at 16Hz (32.768KHz/0x800), set
	 * the throttle count, and begin making measurements.
	 */
	imx6_anatop_write_4(IMX6_ANALOG_TEMPMON_TEMPSENSE1, 0x0800);
	imx6_anatop_write_4(IMX6_ANALOG_TEMPMON_TEMPSENSE0,
	    (sc->temp_throttle_trigger_cnt << 
	    IMX6_ANALOG_TEMPMON_TEMPSENSE0_ALARM_SHIFT) |
	    IMX6_ANALOG_TEMPMON_TEMPSENSE0_MEASURE);

	/*
	 * XXX Note that the alarm-interrupt feature isn't working yet, so
	 * we'll use a callout handler to check at 10Hz.  Make sure we have an
	 * initial temperature reading before starting up the callouts so we
	 * don't get a bogus reading of zero.
	 */
	while (sc->temp_last_cnt == 0)
		temp_update_count(sc);
	sc->temp_throttle_delay = 100 * SBT_1MS;
	callout_init(&sc->temp_throttle_callout, 0);
	callout_reset_sbt(&sc->temp_throttle_callout, sc->temp_throttle_delay, 
	    0, tempmon_throttle_check, sc, 0);

	ctx = device_get_sysctl_ctx(sc->dev);
	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
	    OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
	    temp_sysctl_handler, "IK", "Current die temperature");
	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
	    OID_AUTO, "throttle_temperature", CTLTYPE_INT | CTLFLAG_RW, sc,
	    0, temp_throttle_sysctl_handler, "IK", 
	    "Throttle CPU when exceeding this temperature");
}
Esempio n. 5
0
void  os_request_timer(void * osext, HPT_U32 interval)
{
	PVBUS_EXT vbus_ext = osext;

	HPT_ASSERT(vbus_ext->ext_type==EXT_TYPE_VBUS);

#if (__FreeBSD_version >= 1000510)
	callout_reset_sbt(&vbus_ext->timer, SBT_1US * interval, 0,
	    os_timer_for_ldm, vbus_ext, 0);
#else 
	untimeout(os_timer_for_ldm, vbus_ext, vbus_ext->timer);
	vbus_ext->timer = timeout(os_timer_for_ldm, vbus_ext, interval * hz / 1000000);
#endif
}
Esempio n. 6
0
static void
tempmon_throttle_check(void *arg)
{
	struct imx6_anatop_softc *sc = arg;

	/* Lower counts are higher temperatures. */
	if (sc->temp_last_cnt < sc->temp_throttle_trigger_cnt)
		tempmon_goslow(sc);
	else if (sc->temp_last_cnt > (sc->temp_throttle_reset_cnt))
		tempmon_gofast(sc);

	callout_reset_sbt(&sc->temp_throttle_callout, sc->temp_throttle_delay,
		0, tempmon_throttle_check, sc, 0);

}
Esempio n. 7
0
static void
pit_timer_start_cntr0(struct vatpit *vatpit)
{
	struct channel *c;
	sbintime_t delta, precision;

	c = &vatpit->channel[0];
	if (c->initial != 0) {
		delta = c->initial * vatpit->freq_sbt;
		precision = delta >> tc_precexp;
		c->callout_sbt = c->callout_sbt + delta;

		callout_reset_sbt(&c->callout, c->callout_sbt,
		    precision, vatpit_callout_handler, &c->callout_arg,
		    C_ABSOLUTE);
	}
Esempio n. 8
0
static void
adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
{

	if (!dumping)
		mtx_assert(&adv->lock, MA_OWNED);
	if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
		adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
	if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
		int openings;

		openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
		if (openings >= adv->openings_needed) {
			adv->state &= ~ADV_RESOURCE_SHORTAGE;
			adv->openings_needed = 0;
		}
	}
		
	if ((adv->state & ADV_IN_TIMEOUT) != 0) {
		struct adv_ccb_info *cinfo;

		cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
		if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
			struct ccb_hdr *ccb_h;

			/*
			 * We now traverse our list of pending CCBs
			 * and reinstate their timeouts.
			 */
			ccb_h = LIST_FIRST(&adv->pending_ccbs);
			while (ccb_h != NULL) {
				cinfo = ccb_h->ccb_cinfo_ptr;
				callout_reset_sbt(&cinfo->timer,
				    SBT_1MS * ccb_h->timeout, 0,
				    adv_timeout, ccb_h, 0);
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
			}
			adv->state &= ~ADV_IN_TIMEOUT;
			device_printf(adv->dev, "No longer in timeout\n");
		}
	}
	if (adv->state == 0)
		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
}
Esempio n. 9
0
static void
logtimeout(void *arg)
{

	if (!log_open)
		return;
	if (msgbuftrigger == 0)
		goto done;
	msgbuftrigger = 0;
	selwakeuppri(&logsoftc.sc_selp, LOG_RDPRI);
	KNOTE_LOCKED(&logsoftc.sc_selp.si_note, 0);
	if ((logsoftc.sc_state & LOG_ASYNC) && logsoftc.sc_sigio != NULL)
		pgsigio(&logsoftc.sc_sigio, SIGIO, 0);
	cv_broadcastpri(&log_wakeup, LOG_RDPRI);
done:
	if (log_wakeups_per_second < 1) {
		printf("syslog wakeup is less than one.  Adjusting to 1.\n");
		log_wakeups_per_second = 1;
	}
	callout_reset_sbt(&logsoftc.sc_callout,
	    SBT_1S / log_wakeups_per_second, 0, logtimeout, NULL, C_PREL(1));
}
Esempio n. 10
0
/*ARGSUSED*/
static	int
logopen(struct cdev *dev, int flags, int mode, struct thread *td)
{

	if (log_wakeups_per_second < 1) {
		printf("syslog wakeup is less than one.  Adjusting to 1.\n");
		log_wakeups_per_second = 1;
	}

	mtx_lock(&msgbuf_lock);
	if (log_open) {
		mtx_unlock(&msgbuf_lock);
		return (EBUSY);
	}
	log_open = 1;
	callout_reset_sbt(&logsoftc.sc_callout,
	    SBT_1S / log_wakeups_per_second, 0, logtimeout, NULL, C_PREL(1));
	mtx_unlock(&msgbuf_lock);

	fsetown(td->td_proc->p_pid, &logsoftc.sc_sigio);	/* signal process only */
	return (0);
}
Esempio n. 11
0
/*
 * mrsas_startio:	SCSI IO entry point
 * input:			Adapter instance soft state
 * 					pointer to CAM Control Block
 *
 * This function is the SCSI IO entry point and it initiates IO processing. It
 * copies the IO and depending if the IO is read/write or inquiry, it would
 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively.  It returns 0
 * if the command is sent to firmware successfully, otherwise it returns 1.
 */
static int32_t
mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
    union ccb *ccb)
{
	struct mrsas_mpt_cmd *cmd;
	struct ccb_hdr *ccb_h = &(ccb->ccb_h);
	struct ccb_scsiio *csio = &(ccb->csio);
	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
	u_int8_t cmd_type;

	if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
		ccb->ccb_h.status = CAM_REQ_CMP;
		xpt_done(ccb);
		return (0);
	}
	ccb_h->status |= CAM_SIM_QUEUED;
	cmd = mrsas_get_mpt_cmd(sc);

	if (!cmd) {
		ccb_h->status |= CAM_REQUEUE_REQ;
		xpt_done(ccb);
		return (0);
	}
	if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
		if (ccb_h->flags & CAM_DIR_IN)
			cmd->flags |= MRSAS_DIR_IN;
		if (ccb_h->flags & CAM_DIR_OUT)
			cmd->flags |= MRSAS_DIR_OUT;
	} else
		cmd->flags = MRSAS_DIR_NONE;	/* no data */

/* For FreeBSD 9.2 and higher */
#if (__FreeBSD_version >= 902001)
	/*
	 * XXX We don't yet support physical addresses here.
	 */
	switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
	case CAM_DATA_PADDR:
	case CAM_DATA_SG_PADDR:
		device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
		    __func__);
		mrsas_release_mpt_cmd(cmd);
		ccb_h->status = CAM_REQ_INVALID;
		ccb_h->status &= ~CAM_SIM_QUEUED;
		goto done;
	case CAM_DATA_SG:
		device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
		    __func__);
		mrsas_release_mpt_cmd(cmd);
		ccb_h->status = CAM_REQ_INVALID;
		goto done;
	case CAM_DATA_VADDR:
		if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
			mrsas_release_mpt_cmd(cmd);
			ccb_h->status = CAM_REQ_TOO_BIG;
			goto done;
		}
		cmd->length = csio->dxfer_len;
		if (cmd->length)
			cmd->data = csio->data_ptr;
		break;
	case CAM_DATA_BIO:
		if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
			mrsas_release_mpt_cmd(cmd);
			ccb_h->status = CAM_REQ_TOO_BIG;
			goto done;
		}
		cmd->length = csio->dxfer_len;
		if (cmd->length)
			cmd->data = csio->data_ptr;
		break;
	default:
		ccb->ccb_h.status = CAM_REQ_INVALID;
		goto done;
	}
#else
	if (!(ccb_h->flags & CAM_DATA_PHYS)) {	/* Virtual data address */
		if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
			if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
				mrsas_release_mpt_cmd(cmd);
				ccb_h->status = CAM_REQ_TOO_BIG;
				goto done;
			}
			cmd->length = csio->dxfer_len;
			if (cmd->length)
				cmd->data = csio->data_ptr;
		} else {
			mrsas_release_mpt_cmd(cmd);
			ccb_h->status = CAM_REQ_INVALID;
			goto done;
		}
	} else {			/* Data addresses are physical. */
		mrsas_release_mpt_cmd(cmd);
		ccb_h->status = CAM_REQ_INVALID;
		ccb_h->status &= ~CAM_SIM_QUEUED;
		goto done;
	}
#endif
	/* save ccb ptr */
	cmd->ccb_ptr = ccb;

	req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
	if (!req_desc) {
		device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
		return (FAIL);
	}
	memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
	cmd->request_desc = req_desc;

	if (ccb_h->flags & CAM_CDB_POINTER)
		bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
	else
		bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
	mtx_lock(&sc->raidmap_lock);

	/* Check for IO type READ-WRITE targeted for Logical Volume */
	cmd_type = mrsas_find_io_type(sim, ccb);
	switch (cmd_type) {
	case READ_WRITE_LDIO:
		/* Build READ-WRITE IO for Logical Volume  */
		if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
			device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
			mtx_unlock(&sc->raidmap_lock);
			return (1);
		}
		break;
	case NON_READ_WRITE_LDIO:
		/* Build NON READ-WRITE IO for Logical Volume  */
		if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
			device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
			mtx_unlock(&sc->raidmap_lock);
			return (1);
		}
		break;
	case READ_WRITE_SYSPDIO:
	case NON_READ_WRITE_SYSPDIO:
		if (sc->secure_jbod_support &&
		    (cmd_type == NON_READ_WRITE_SYSPDIO)) {
			/* Build NON-RW IO for JBOD */
			if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
				device_printf(sc->mrsas_dev,
				    "Build SYSPDIO failed.\n");
				mtx_unlock(&sc->raidmap_lock);
				return (1);
			}
		} else {
			/* Build RW IO for JBOD */
			if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
				device_printf(sc->mrsas_dev,
				    "Build SYSPDIO failed.\n");
				mtx_unlock(&sc->raidmap_lock);
				return (1);
			}
		}
	}
	mtx_unlock(&sc->raidmap_lock);

	if (cmd->flags == MRSAS_DIR_IN)	/* from device */
		cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
	else if (cmd->flags == MRSAS_DIR_OUT)	/* to device */
		cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;

	cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
	cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
	cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
	cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;

	req_desc = cmd->request_desc;
	req_desc->SCSIIO.SMID = cmd->index;

	/*
	 * Start timer for IO timeout. Default timeout value is 90 second.
	 */
#if (__FreeBSD_version >= 1000510)
	callout_reset_sbt(&cmd->cm_callout, SBT_1S * 600, 0,
	    mrsas_scsiio_timeout, cmd, 0);
#else
	callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
	    mrsas_scsiio_timeout, cmd);
#endif
	mrsas_atomic_inc(&sc->fw_outstanding);

	if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
		sc->io_cmds_highwater++;

	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
	return (0);

done:
	xpt_done(ccb);
	return (0);
}
Esempio n. 12
0
static void
ahaexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
	struct	 aha_ccb *accb;
	union	 ccb *ccb;
	struct	 aha_softc *aha;
	uint32_t paddr;

	accb = (struct aha_ccb *)arg;
	ccb = accb->ccb;
	aha = (struct aha_softc *)ccb->ccb_h.ccb_aha_ptr;

	if (error != 0) {
		if (error != EFBIG)
			device_printf(aha->dev,
			    "Unexepected error 0x%x returned from "
			    "bus_dmamap_load\n", error);
		if (ccb->ccb_h.status == CAM_REQ_INPROG) {
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
		}
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	if (nseg != 0) {
		aha_sg_t *sg;
		bus_dma_segment_t *end_seg;
		bus_dmasync_op_t op;

		end_seg = dm_segs + nseg;

		/* Copy the segments into our SG list */
		sg = accb->sg_list;
		while (dm_segs < end_seg) {
			ahautoa24(dm_segs->ds_len, sg->len);
			ahautoa24(dm_segs->ds_addr, sg->addr);
			sg++;
			dm_segs++;
		}

		if (nseg > 1) {
			accb->hccb.opcode = aha->ccb_sg_opcode;
			ahautoa24((sizeof(aha_sg_t) * nseg),
			    accb->hccb.data_len);
			ahautoa24(accb->sg_list_phys, accb->hccb.data_addr);
		} else {
			bcopy(accb->sg_list->len, accb->hccb.data_len, 3);
			bcopy(accb->sg_list->addr, accb->hccb.data_addr, 3);
		}

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_PREREAD;
		else
			op = BUS_DMASYNC_PREWRITE;

		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);

	} else {
		accb->hccb.opcode = INITIATOR_CCB;
		ahautoa24(0, accb->hccb.data_len);
		ahautoa24(0, accb->hccb.data_addr);
	}

	/*
	 * Last time we need to check if this CCB needs to
	 * be aborted.
	 */
	if (ccb->ccb_h.status != CAM_REQ_INPROG) {
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		return;
	}

	accb->flags = ACCB_ACTIVE;
	ccb->ccb_h.status |= CAM_SIM_QUEUED;
	LIST_INSERT_HEAD(&aha->pending_ccbs, &ccb->ccb_h, sim_links.le);

	callout_reset_sbt(&accb->timer, SBT_1MS * ccb->ccb_h.timeout, 0,
	    ahatimeout, accb, 0);

	/* Tell the adapter about this command */
	if (aha->cur_outbox->action_code != AMBO_FREE) {
		/*
		 * We should never encounter a busy mailbox.
		 * If we do, warn the user, and treat it as
		 * a resource shortage.  If the controller is
		 * hung, one of the pending transactions will
		 * timeout causing us to start recovery operations.
		 */
		device_printf(aha->dev,
		    "Encountered busy mailbox with %d out of %d "
		    "commands active!!!", aha->active_ccbs, aha->max_ccbs);
		callout_stop(&accb->timer);
		if (nseg != 0)
			bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
		ahafreeccb(aha, accb);
		aha->resource_shortage = TRUE;
		xpt_freeze_simq(aha->sim, /*count*/1);
		ccb->ccb_h.status = CAM_REQUEUE_REQ;
		xpt_done(ccb);
		return;
	}
	paddr = ahaccbvtop(aha, accb);
	ahautoa24(paddr, aha->cur_outbox->ccb_addr);
	aha->cur_outbox->action_code = AMBO_START;
	aha_outb(aha, COMMAND_REG, AOP_START_MBOX);

	ahanextoutbox(aha);
}
Esempio n. 13
0
static void
ahadone(struct aha_softc *aha, struct aha_ccb *accb, aha_mbi_comp_code_t comp_code)
{
	union  ccb	  *ccb;
	struct ccb_scsiio *csio;

	ccb = accb->ccb;
	csio = &accb->ccb->csio;

	if ((accb->flags & ACCB_ACTIVE) == 0) {
		device_printf(aha->dev, 
		    "ahadone - Attempt to free non-active ACCB %p\n",
		    (void *)accb);
		return;
	}

	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
		bus_dmasync_op_t op;

		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_POSTREAD;
		else
			op = BUS_DMASYNC_POSTWRITE;
		bus_dmamap_sync(aha->buffer_dmat, accb->dmamap, op);
		bus_dmamap_unload(aha->buffer_dmat, accb->dmamap);
	}

	if (accb == aha->recovery_accb) {
		/*
		 * The recovery ACCB does not have a CCB associated
		 * with it, so short circuit the normal error handling.
		 * We now traverse our list of pending CCBs and process
		 * any that were terminated by the recovery CCBs action.
		 * We also reinstate timeouts for all remaining, pending,
		 * CCBs.
		 */
		struct cam_path *path;
		struct ccb_hdr *ccb_h;
		cam_status error;

		/* Notify all clients that a BDR occurred */
		error = xpt_create_path(&path, /*periph*/NULL,
		    cam_sim_path(aha->sim), accb->hccb.target,
		    CAM_LUN_WILDCARD);

		if (error == CAM_REQ_CMP) {
			xpt_async(AC_SENT_BDR, path, NULL);
			xpt_free_path(path);
		}

		ccb_h = LIST_FIRST(&aha->pending_ccbs);
		while (ccb_h != NULL) {
			struct aha_ccb *pending_accb;

			pending_accb = (struct aha_ccb *)ccb_h->ccb_accb_ptr;
			if (pending_accb->hccb.target == accb->hccb.target) {
				pending_accb->hccb.ahastat = AHASTAT_HA_BDR;
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
				ahadone(aha, pending_accb, AMBI_ERROR);
			} else {
				callout_reset_sbt(&pending_accb->timer,
				    SBT_1MS * ccb_h->timeout, 0, ahatimeout,
				    pending_accb, 0);
				ccb_h = LIST_NEXT(ccb_h, sim_links.le);
			}
		}
		device_printf(aha->dev, "No longer in timeout\n");
		return;
	}

	callout_stop(&accb->timer);

	switch (comp_code) {
	case AMBI_FREE:
		device_printf(aha->dev,
		    "ahadone - CCB completed with free status!\n");
		break;
	case AMBI_NOT_FOUND:
		device_printf(aha->dev,
		    "ahadone - CCB Abort failed to find CCB\n");
		break;
	case AMBI_ABORT:
	case AMBI_ERROR:
		/* An error occurred */
		if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
			csio->resid = 0;
		else
			csio->resid = aha_a24tou(accb->hccb.data_len);
		switch(accb->hccb.ahastat) {
		case AHASTAT_DATARUN_ERROR:
		{
			if (csio->resid <= 0) {
				csio->ccb_h.status = CAM_DATA_RUN_ERR;
				break;
			}
			/* FALLTHROUGH */
		}
		case AHASTAT_NOERROR:
			csio->scsi_status = accb->hccb.sdstat;
			csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
			switch(csio->scsi_status) {
			case SCSI_STATUS_CHECK_COND:
			case SCSI_STATUS_CMD_TERMINATED:
				csio->ccb_h.status |= CAM_AUTOSNS_VALID;
				/*
				 * The aha writes the sense data at different
				 * offsets based on the scsi cmd len
				 */
				bcopy((caddr_t) &accb->hccb.scsi_cdb +
				    accb->hccb.cmd_len,
				    (caddr_t) &csio->sense_data,
				    accb->hccb.sense_len);
				break;
			default:
				break;
			case SCSI_STATUS_OK:
				csio->ccb_h.status = CAM_REQ_CMP;
				break;
			}
			break;
		case AHASTAT_SELTIMEOUT:
			csio->ccb_h.status = CAM_SEL_TIMEOUT;
			break;
		case AHASTAT_UNEXPECTED_BUSFREE:
			csio->ccb_h.status = CAM_UNEXP_BUSFREE;
			break;
		case AHASTAT_INVALID_PHASE:
			csio->ccb_h.status = CAM_SEQUENCE_FAIL;
			break;
		case AHASTAT_INVALID_ACTION_CODE:
			panic("%s: Inavlid Action code", aha_name(aha));
			break;
		case AHASTAT_INVALID_OPCODE:
			if (accb->hccb.opcode < INITIATOR_CCB_WRESID)
				panic("%s: Invalid CCB Opcode %x hccb = %p",
				    aha_name(aha), accb->hccb.opcode,
				    &accb->hccb);
			device_printf(aha->dev,
			    "AHA-1540A compensation failed\n");
			xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
			csio->ccb_h.status = CAM_REQUEUE_REQ;
			break;
		case AHASTAT_LINKED_CCB_LUN_MISMATCH:
			/* We don't even support linked commands... */
			panic("%s: Linked CCB Lun Mismatch", aha_name(aha));
			break;
		case AHASTAT_INVALID_CCB_OR_SG_PARAM:
			panic("%s: Invalid CCB or SG list", aha_name(aha));
			break;
		case AHASTAT_HA_SCSI_BUS_RESET:
			if ((csio->ccb_h.status & CAM_STATUS_MASK)
			    != CAM_CMD_TIMEOUT)
				csio->ccb_h.status = CAM_SCSI_BUS_RESET;
			break;
		case AHASTAT_HA_BDR:
			if ((accb->flags & ACCB_DEVICE_RESET) == 0)
				csio->ccb_h.status = CAM_BDR_SENT;
			else
				csio->ccb_h.status = CAM_CMD_TIMEOUT;
			break;
		}
		if (csio->ccb_h.status != CAM_REQ_CMP) {
			xpt_freeze_devq(csio->ccb_h.path, /*count*/1);
			csio->ccb_h.status |= CAM_DEV_QFRZN;
		}
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	case AMBI_OK:
		/* All completed without incident */
		/* XXX DO WE NEED TO COPY SENSE BYTES HERE???? XXX */
		/* I don't think so since it works???? */
		ccb->ccb_h.status |= CAM_REQ_CMP;
		if ((accb->flags & ACCB_RELEASE_SIMQ) != 0)
			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
		ahafreeccb(aha, accb);
		xpt_done(ccb);
		break;
	}
}
Esempio n. 14
0
static void
adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
		int nsegments, int error)
{
	struct	ccb_scsiio *csio;
	struct	ccb_hdr *ccb_h;
	struct	cam_sim *sim;
        struct	adv_softc *adv;
	struct	adv_ccb_info *cinfo;
	struct	adv_scsi_q scsiq;
	struct	adv_sg_head sghead;

	csio = (struct ccb_scsiio *)arg;
	ccb_h = &csio->ccb_h;
	sim = xpt_path_sim(ccb_h->path);
	adv = (struct adv_softc *)cam_sim_softc(sim);
	cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
	if (!dumping)
		mtx_assert(&adv->lock, MA_OWNED);

	/*
	 * Setup our done routine to release the simq on
	 * the next ccb that completes.
	 */
	if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
		adv->state |= ADV_BUSDMA_BLOCK_CLEARED;

	if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
		if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
			/* XXX Need phystovirt!!!! */
			/* How about pmap_kenter??? */
			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
		} else {
			scsiq.cdbptr = csio->cdb_io.cdb_ptr;
		}
	} else {
		scsiq.cdbptr = csio->cdb_io.cdb_bytes;
	}
	/*
	 * Build up the request
	 */
	scsiq.q1.status = 0;
	scsiq.q1.q_no = 0;
	scsiq.q1.cntl = 0;
	scsiq.q1.sg_queue_cnt = 0;
	scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
	scsiq.q1.target_lun = ccb_h->target_lun;
	scsiq.q1.sense_len = csio->sense_len;
	scsiq.q1.extra_bytes = 0;
	scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
	scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
					      ccb_h->target_lun);
	scsiq.q2.flag = 0;
	scsiq.q2.cdb_len = csio->cdb_len;
	if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
		scsiq.q2.tag_code = csio->tag_action;
	else
		scsiq.q2.tag_code = 0;
	scsiq.q2.vm_id = 0;

	if (nsegments != 0) {
		bus_dmasync_op_t op;

		scsiq.q1.data_addr = dm_segs->ds_addr;
                scsiq.q1.data_cnt = dm_segs->ds_len;
		if (nsegments > 1) {
			scsiq.q1.cntl |= QC_SG_HEAD;
			sghead.entry_cnt
			    = sghead.entry_to_copy
			    = nsegments;
			sghead.res = 0;
			sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
			scsiq.sg_head = &sghead;
		} else {
			scsiq.sg_head = NULL;
		}
		if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
			op = BUS_DMASYNC_PREREAD;
		else
			op = BUS_DMASYNC_PREWRITE;
		bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
	} else {
		scsiq.q1.data_addr = 0;	
		scsiq.q1.data_cnt = 0;
		scsiq.sg_head = NULL;
	}

	/*
	 * Last time we need to check if this SCB needs to
	 * be aborted.
	 */             
	if (ccb_h->status != CAM_REQ_INPROG) {
		if (nsegments != 0)
			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
		adv_clear_state(adv, (union ccb *)csio);
		adv_free_ccb_info(adv, cinfo);
		xpt_done((union ccb *)csio);
		return;
	}

	if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
		/* Temporary resource shortage */
		adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
		if (nsegments != 0)
			bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
		csio->ccb_h.status = CAM_REQUEUE_REQ;
		adv_clear_state(adv, (union ccb *)csio);
		adv_free_ccb_info(adv, cinfo);
		xpt_done((union ccb *)csio);
		return;
	}
	cinfo->state |= ACCB_ACTIVE;
	ccb_h->status |= CAM_SIM_QUEUED;
	LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
	/* Schedule our timeout */
	callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0,
	    adv_timeout, csio, 0);
}