Beispiel #1
0
/**
 * 		IO is being aborted, waiting for completion from firmware.
 */
static void
bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_COMP_GOOD:
	case BFA_IOIM_SM_COMP:
	case BFA_IOIM_SM_DONE:
	case BFA_IOIM_SM_FREE:
		break;

	case BFA_IOIM_SM_ABORT_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_ABORT_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_COMP_UTAG:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
		ioim->iosp->abort_explicit = BFA_FALSE;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->reqq,
					  &ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Beispiel #2
0
/**
 * Send port disable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
			bfa_lpuid(fcport->bfa));
	m->msgtag = fcport->msgtag;

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);

	return BFA_TRUE;
}
Beispiel #3
0
/**
 * Send port enable message to firmware.
 */
static          bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_enable_req_s *m;

	/**
	 * Increment message tag before queue check, so that responses to old
	 * requests are discarded.
	 */
	fcport->msgtag++;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
	if (!m) {
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
							&fcport->reqq_wait);
		return BFA_FALSE;
	}

	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
				bfa_lpuid(fcport->bfa));
	m->nwwn = fcport->nwwn;
	m->pwwn = fcport->pwwn;
	m->port_cfg = fcport->cfg;
	m->msgtag = fcport->msgtag;
	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
	return BFA_TRUE;
}
Beispiel #4
0
static void
bfa_fcport_send_stats_clear(void *cbarg)
{
	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
	struct bfi_fcport_req_s *msg;

	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);

	if (!msg) {
		fcport->stats_qfull = BFA_TRUE;
		bfa_reqq_winit(&fcport->stats_reqq_wait,
				bfa_fcport_send_stats_clear, fcport);
		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
				&fcport->stats_reqq_wait);
		return;
	}
	fcport->stats_qfull = BFA_FALSE;

	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
			bfa_lpuid(fcport->bfa));
	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
}
Beispiel #5
0
static void
bfa_port_qos_stats_clear(void *cbarg)
{
	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
	bfi_pport_clear_qos_stats_req_t *msg;

	msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);

	if (!msg) {
		port->stats_qfull = BFA_TRUE;
		bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
			       port);
		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
		return;
	}
	port->stats_qfull = BFA_FALSE;

	bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
	bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
		    bfa_lpuid(port->bfa));
	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
	return;
}
Beispiel #6
0
/**
 * Send I/O request to firmware.
 */
static          bfa_boolean_t
bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
{
	struct bfa_itnim_s *itnim = ioim->itnim;
	struct bfi_ioim_req_s *m;
	static struct fcp_cmnd_s cmnd_z0 = { 0 };
	struct bfi_sge_s      *sge;
	u32        pgdlen = 0;
	u64 addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	/**
	 * check for room in queue to send request now
	 */
	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
	if (!m) {
		bfa_reqq_wait(ioim->bfa, ioim->reqq,
				  &ioim->iosp->reqq_wait);
		return BFA_FALSE;
	}

	/**
	 * build i/o request message next
	 */
	m->io_tag = bfa_os_htons(ioim->iotag);
	m->rport_hdl = ioim->itnim->rport->fw_handle;
	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);

	/**
	 * build inline IO SG element here
	 */
	sge = &m->sges[0];
	if (ioim->nsges) {
		sg = (struct scatterlist *)scsi_sglist(cmnd);
		addr = bfa_os_sgaddr(sg_dma_address(sg));
		sge->sga = *(union bfi_addr_u *) &addr;
		pgdlen = sg_dma_len(sg);
		sge->sg_len = pgdlen;
		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
		bfa_sge_to_be(sge);
		sge++;
	}

	if (ioim->nsges > BFI_SGE_INLINE) {
		sge->sga = ioim->sgpg->sgpg_pa;
	} else {
		sge->sga.a32.addr_lo = 0;
		sge->sga.a32.addr_hi = 0;
	}
	sge->sg_len = pgdlen;
	sge->flags = BFI_SGE_PGDLEN;
	bfa_sge_to_be(sge);

	/**
	 * set up I/O command parameters
	 */
	bfa_os_assign(m->cmnd, cmnd_z0);
	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
	bfa_os_assign(m->cmnd.cdb,
			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));

	/**
	 * set up I/O message header
	 */
	switch (m->cmnd.iodir) {
	case FCP_IODIR_READ:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, input_reqs);
		break;
	case FCP_IODIR_WRITE:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
		bfa_stats(itnim, output_reqs);
		break;
	case FCP_IODIR_RW:
		bfa_stats(itnim, input_reqs);
		bfa_stats(itnim, output_reqs);
	default:
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
	}
	if (itnim->seq_rec ||
	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));

#ifdef IOIM_ADVANCED
	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);

	/**
	 * Handle large CDB (>16 bytes).
	 */
	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
					FCP_CMND_CDB_LEN) / sizeof(u32);
	if (m->cmnd.addl_cdb_len) {
		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
				m->cmnd.addl_cdb_len * sizeof(u32));
		fcp_cmnd_fcpdl(&m->cmnd) =
				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
	}
#endif

	/**
	 * queue I/O message to firmware
	 */
	bfa_reqq_produce(ioim->bfa, ioim->reqq);
	return BFA_TRUE;
}
Beispiel #7
0
/**
 * 		IO is active.
 */
static void
bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_trc_fp(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_COMP_GOOD:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
			      __bfa_cb_ioim_good_comp, ioim);
		break;

	case BFA_IOIM_SM_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
			      ioim);
		break;

	case BFA_IOIM_SM_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
			      ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		ioim->iosp->abort_explicit = BFA_TRUE;
		ioim->io_cbfn = __bfa_cb_ioim_abort;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
					  &ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_CLEANUP:
		ioim->iosp->abort_explicit = BFA_FALSE;
		ioim->io_cbfn = __bfa_cb_ioim_failed;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq,
					  &ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_assert(0);
	}
}