Beispiel #1
0
/**
 * Called by itnim to clean up IO while going offline.
 */
void
bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_fcpim_stats(ioim->fcpim, io_cleanups);

	ioim->iosp->tskim = NULL;
	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
}
static void
bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
			    enum port_fdmi_event event)
{
	struct bfa_fcs_port_s *port = fdmi->ms->port;

	bfa_trc(port->fcs, port->port_cfg.pwwn);
	bfa_trc(port->fcs, event);

	switch (event) {
	case FDMISM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
		break;

	default:
		bfa_sm_fault(port->fcs, event);
	}
}
Beispiel #3
0
/**
 * Get default minimum ratelim speed
 */
enum bfa_pport_speed
bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
{
	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);

	bfa_trc(bfa, pport->cfg.trl_def_speed);
	return (pport->cfg.trl_def_speed);

}
Beispiel #4
0
/*
 * Setup MSI-X vector for catapult
 */
void
bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
{
	WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
	bfa_trc(bfa, nvecs);

	bfa->msix.nvecs = nvecs;
	bfa_hwct_msix_uninstall(bfa);
}
Beispiel #5
0
void
bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_fcpim_stats(ioim->fcpim, io_tmaborts);

	ioim->iosp->tskim = tskim;
	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
}
Beispiel #6
0
/**
 * 		Active IO is being aborted, waiting for room in request CQ.
 */
static void
bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_QRESUME:
		bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
		bfa_ioim_send_abort(ioim);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
		ioim->iosp->abort_explicit = BFA_FALSE;
		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
		break;

	case BFA_IOIM_SM_COMP_GOOD:
	case BFA_IOIM_SM_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #7
0
static void
bfa_fcs_port_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
				void *cbarg, bfa_status_t req_status,
				u32 rsp_len, u32 resid_len,
				struct fchs_s *rsp_fchs)
{
	struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
	struct bfa_fcs_port_s *port = ns->port;
	struct ct_hdr_s       *cthdr = NULL;

	bfa_trc(port->fcs, port->port_cfg.pwwn);

	/*
	 * Sanity Checks
	 */
	if (req_status != BFA_STATUS_OK) {
		bfa_trc(port->fcs, req_status);
		port->stats.ns_rffid_rsp_err++;
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
		return;
	}

	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);

	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
		port->stats.ns_rffid_accepts++;
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
		return;
	}

	port->stats.ns_rffid_rejects++;
	bfa_trc(port->fcs, cthdr->reason_code);
	bfa_trc(port->fcs, cthdr->exp_code);

	if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
		/*
		 * if this command is not supported, we don't retry
		 */
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
	} else {
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
	}
}
Beispiel #8
0
static void
bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
			    enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_TIMEOUT:
		if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) {
			itnim->prli_retries++;
			bfa_trc(itnim->fcs, itnim->prli_retries);
			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
			bfa_fcs_itnim_send_prli(itnim, NULL);
		} else {
			/* invoke target offline */
			bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
			bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP);
		}
		break;


	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		bfa_timer_stop(&itnim->timer);
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_sm_fault(itnim->fcs, event);
	}
}
Beispiel #9
0
static void
bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi,
			  enum port_fdmi_event event)
{
	struct bfa_fcs_port_s *port = fdmi->ms->port;

	bfa_trc(port->fcs, port->port_cfg.pwwn);
	bfa_trc(port->fcs, event);

	switch (event) {
	case FDMISM_EVENT_RSP_ERROR:
		/*
		 * if max retries have not been reached, start timer for a
		 * delayed retry
		 */
		if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
			bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rprt_retry);
			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
					&fdmi->timer, bfa_fcs_port_fdmi_timeout,
					fdmi, BFA_FCS_RETRY_TIMEOUT);

		} else {
			/*
			 * set state to offline
			 */
			bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
			fdmi->retry_cnt = 0;
		}
		break;

	case FDMISM_EVENT_RSP_OK:
		fdmi->retry_cnt = 0;
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_online);
		break;

	case FDMISM_EVENT_PORT_OFFLINE:
		bfa_fcxp_discard(fdmi->fcxp);
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
		break;

	default:
		bfa_sm_fault(port->fcs, event);
	}
}
Beispiel #10
0
/**
 * 		BFA callback for unsolicited frame receive handler.
 *
 * @param[in]		cbarg		callback arg for receive handler
 * @param[in]		uf		unsolicited frame descriptor
 *
 * @return None
 */
static void
bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
{
	struct bfa_fcs_s      *fcs = (struct bfa_fcs_s *) cbarg;
	struct fchs_s         *fchs = bfa_uf_get_frmbuf(uf);
	u16        len = bfa_uf_get_frmlen(uf);
	struct fc_vft_s       *vft;
	struct bfa_fcs_fabric_s *fabric;

	/**
	 * check for VFT header
	 */
	if (fchs->routing == FC_RTG_EXT_HDR &&
		fchs->cat_info == FC_CAT_VFT_HDR) {
		bfa_stats(fcs, uf.tagged);
		vft = bfa_uf_get_frmbuf(uf);
		if (fcs->port_vfid == vft->vf_id)
			fabric = &fcs->fabric;
		else
			fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);

		/**
		 * drop frame if vfid is unknown
		 */
		if (!fabric) {
			bfa_assert(0);
			bfa_stats(fcs, uf.vfid_unknown);
			bfa_uf_free(uf);
			return;
		}

		/**
		 * skip vft header
		 */
		fchs = (struct fchs_s *) (vft + 1);
		len -= sizeof(struct fc_vft_s);

		bfa_trc(fcs, vft->vf_id);
	} else {
		bfa_stats(fcs, uf.untagged);
		fabric = &fcs->fabric;
	}

	bfa_trc(fcs, ((u32 *) fchs)[0]);
	bfa_trc(fcs, ((u32 *) fchs)[1]);
	bfa_trc(fcs, ((u32 *) fchs)[2]);
	bfa_trc(fcs, ((u32 *) fchs)[3]);
	bfa_trc(fcs, ((u32 *) fchs)[4]);
	bfa_trc(fcs, ((u32 *) fchs)[5]);
	bfa_trc(fcs, len);

	bfa_fcs_fabric_uf_recv(fabric, fchs, len);
	bfa_uf_free(uf);
}
Beispiel #11
0
static void
bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
		      enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_RSP_OK:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online);
		bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec);
		break;

	case BFA_FCS_ITNIM_SM_RSP_ERROR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry);
		bfa_timer_start(itnim->fcs->bfa, &itnim->timer,
				bfa_fcs_itnim_timeout, itnim,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_discard(itnim->fcxp);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		/*
		 * dont discard fcxp. accept will reach same state
		 */
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_discard(itnim->fcxp);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #12
0
/**
 * FDISC is sent and awaiting reply from fabric.
 */
static void
bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
		       enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
		bfa_lps_discard(vport->lps);
		bfa_fcs_vport_do_logo(vport);
		break;

	case BFA_FCS_VPORT_SM_OFFLINE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
		bfa_lps_discard(vport->lps);
		break;

	case BFA_FCS_VPORT_SM_RSP_OK:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
		bfa_fcs_port_online(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_RSP_ERROR:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
		bfa_timer_start(__vport_bfa(vport), &vport->timer,
				bfa_fcs_vport_timeout, vport,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case BFA_FCS_VPORT_SM_RSP_FAILED:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
		break;

	case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #13
0
static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
{
	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;

	fcport->speed = pevent->link_state.speed;
	fcport->topology = pevent->link_state.topology;

	if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
		fcport->myalpa =
			pevent->link_state.tl.loop_info.myalpa;

	/* QoS Details */
	bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
	bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);

	bfa_trc(fcport->bfa, fcport->speed);
	bfa_trc(fcport->bfa, fcport->topology);
}
Beispiel #14
0
/*
 * ADISC to rport
 * Already did offline actions
 */
static void
bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport,
			enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_ACCEPTED:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
		bfa_fcs_rport_hal_online(rport);
		break;

	case RPSM_EVENT_PLOGI_RCVD:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
		bfa_fcxp_discard(rport->fcxp);
		bfa_fcs_rport_send_plogiacc(rport, NULL);
		break;

	case RPSM_EVENT_FAILED:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
		bfa_timer_start(rport->fcs->bfa, &rport->timer,
			bfa_fcs_rport_timeout, rport,
			bfa_fcs_rport_del_timeout);
		break;

	case RPSM_EVENT_DELETE:
	case RPSM_EVENT_SCN_OFFLINE:
	case RPSM_EVENT_LOGO_IMP:
	case RPSM_EVENT_LOGO_RCVD:
	case RPSM_EVENT_PRLO_RCVD:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
		bfa_fcxp_discard(rport->fcxp);
		bfa_timer_start(rport->fcs->bfa, &rport->timer,
			bfa_fcs_rport_timeout, rport,
			bfa_fcs_rport_del_timeout);
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Beispiel #15
0
/**
 *   Called by fcs/port to notify transition to offline state.
 */
void
bfa_fcs_port_n2n_offline(struct bfa_fcs_port_s *port)
{
	struct bfa_fcs_port_n2n_s *n2n_port = &port->port_topo.pn2n;

	bfa_trc(port->fcs, port->pid);
	port->pid = 0;
	n2n_port->rem_port_wwn = 0;
	n2n_port->reply_oxid = 0;
}
Beispiel #16
0
/**
 * Called when Rport becomes online
 */
void  bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
{
	bfa_trc(rport->fcs, rport->pid);

	if (__fcs_min_cfg(rport->port->fcs))
		return;

	if (bfa_fcs_fabric_is_switched(rport->port->fabric))
		bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
}
Beispiel #17
0
bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
{
	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);

	bfa_trc(bfa, maxfrsize);
	bfa_trc(bfa, fcport->cfg.maxfrsize);

	/* with in range */
	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
		return BFA_STATUS_INVLD_DFSZ;

	/* power of 2, if not the max frame size of 2112 */
	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
		return BFA_STATUS_INVLD_DFSZ;

	fcport->cfg.maxfrsize = maxfrsize;
	return BFA_STATUS_OK;
}
Beispiel #18
0
static void
bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
	struct bfa_fcs_rport_s *rport = rpf->rport;

	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPFSM_EVENT_RPORT_OFFLINE:
		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
		rpf->rpsc_retries = 0;
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Beispiel #19
0
/**
 * Configure default minimum ratelim speed
 */
bfa_status_t
bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
{
	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);

	bfa_trc(bfa, speed);

	/*
	 * Auto and speeds greater than the supported speed, are invalid
	 */
	if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
		bfa_trc(bfa, pport->speed_sup);
		return BFA_STATUS_UNSUPP_SPEED;
	}

	pport->cfg.trl_def_speed = speed;

	return (BFA_STATUS_OK);
}
Beispiel #20
0
static void
bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
			       void *cbarg, bfa_status_t req_status,
			       u32 rsp_len, u32 resid_len,
			       struct fchs_s *rsp_fchs)
{
	struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg;
	struct bfa_fcs_port_s *port = ns->port;
	/* struct fc_logi_s *plogi_resp; */
	struct fc_els_cmd_s   *els_cmd;
	struct fc_ls_rjt_s    *ls_rjt;

	bfa_trc(port->fcs, req_status);
	bfa_trc(port->fcs, port->port_cfg.pwwn);

	/*
	 * Sanity Checks
	 */
	if (req_status != BFA_STATUS_OK) {
		bfa_trc(port->fcs, req_status);
		port->stats.ns_plogi_rsp_err++;
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
		return;
	}

	els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);

	switch (els_cmd->els_code) {

	case FC_ELS_ACC:
		if (rsp_len < sizeof(struct fc_logi_s)) {
			bfa_trc(port->fcs, rsp_len);
			port->stats.ns_plogi_acc_err++;
			bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
			break;
		}
		port->stats.ns_plogi_accepts++;
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
		break;

	case FC_ELS_LS_RJT:
		ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);

		bfa_trc(port->fcs, ls_rjt->reason_code);
		bfa_trc(port->fcs, ls_rjt->reason_code_expl);

		port->stats.ns_rejects++;

		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
		break;

	default:
		port->stats.ns_plogi_unknown_rsp++;
		bfa_trc(port->fcs, els_cmd->els_code);
		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
	}
}
Beispiel #21
0
/**
 * 		IO is waiting for room in request CQ
 */
static void
bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_QRESUME:
		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
		bfa_ioim_send_ioreq(ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Beispiel #22
0
static void
bfa_fcs_port_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
			     bfa_status_t req_status, u32 rsp_len,
			       u32 resid_len, struct fchs_s *rsp_fchs)
{
	struct bfa_fcs_port_ms_s *ms = (struct bfa_fcs_port_ms_s *)cbarg;
	struct bfa_fcs_port_s *port = ms->port;
	struct ct_hdr_s       *cthdr = NULL;
	wwn_t          *gfn_resp;

	bfa_trc(port->fcs, req_status);
	bfa_trc(port->fcs, port->port_cfg.pwwn);

	/*
	 * Sanity Checks
	 */
	if (req_status != BFA_STATUS_OK) {
		bfa_trc(port->fcs, req_status);
		bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
		return;
	}

	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);

	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
		gfn_resp = (wwn_t *) (cthdr + 1);
		/*
		 * check if it has actually changed
		 */
		if ((memcmp
		     ((void *)&bfa_fcs_port_get_fabric_name(port), gfn_resp,
		      sizeof(wwn_t)) != 0))
			bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
		bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
		return;
	}

	bfa_trc(port->fcs, cthdr->reason_code);
	bfa_trc(port->fcs, cthdr->exp_code);
	bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
Beispiel #23
0
/**
 * 		Start in offline state - awaiting linkup
 */
static void
bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns,
			   enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_PORT_ONLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_sending);
		bfa_fcs_port_ns_send_plogi(ns, NULL);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #24
0
/**
 * Lport cleanup is in progress since vport is being deleted. Fabric is
 * offline, so no LOGO is needed to complete vport deletion.
 */
static void
bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
			 enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELCOMP:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
		bfa_fcs_vport_free(vport);
		break;

	case BFA_FCS_VPORT_SM_DELETE:
		break;

	default:
		bfa_assert(0);
	}
}
static void
bfa_iocfc_stats_timeout(void *bfa_arg)
{
	struct bfa_s		*bfa = bfa_arg;
	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;

	bfa_trc(bfa, 0);

	iocfc->stats_status = BFA_STATUS_ETIMER;
	bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
}
Beispiel #26
0
/**
 * 		Start in offline state - awaiting NS to send start.
 */
static void
bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms,
			   enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_PORT_ONLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_sending);
		bfa_fcs_port_ms_send_plogi(ms, NULL);
		break;

	case MSSM_EVENT_PORT_OFFLINE:
		break;

	default:
		bfa_sm_fault(ms->port->fcs, event);
	}
}
Beispiel #27
0
/**
 * 		IO is waiting for SG pages.
 */
static void
bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_SGALLOCED:
		if (!bfa_ioim_send_ioreq(ioim)) {
			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
			break;
		}
		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
			      ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_assert(0);
	}
}
Beispiel #28
0
static void
bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
		    struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
{
	struct bfa_sgpg_mod_s	*mod = BFA_SGPG_MOD(bfa);
	int				i;
	struct bfa_sgpg_s		*hsgpg;
	struct bfi_sgpg_s 	*sgpg;
	u64		align_len;

	union {
		u64        pa;
		union bfi_addr_u      addr;
	} sgpg_pa;

	INIT_LIST_HEAD(&mod->sgpg_q);
	INIT_LIST_HEAD(&mod->sgpg_wait_q);

	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);

	mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
	mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
	align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
	mod->sgpg_arr_pa += align_len;
	mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
						align_len);
	mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
						align_len);

	hsgpg = mod->hsgpg_arr;
	sgpg = mod->sgpg_arr;
	sgpg_pa.pa = mod->sgpg_arr_pa;
	mod->free_sgpgs = mod->num_sgpgs;

	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));

	for (i = 0; i < mod->num_sgpgs; i++) {
		bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
		bfa_os_memset(sgpg, 0, sizeof(*sgpg));

		hsgpg->sgpg = sgpg;
		hsgpg->sgpg_pa = sgpg_pa.addr;
		list_add_tail(&hsgpg->qe, &mod->sgpg_q);

		hsgpg++;
		sgpg++;
		sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
	}

	bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
	bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
	bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
}
Beispiel #29
0
/*
 * bfa_cee_get_attr_isr()
 *
 * @brief CEE ISR for get-attributes responses from f/w
 *
 * @param[in] cee - Pointer to the CEE module
 *		    status - Return status from the f/w
 *
 * @return void
 */
static void
bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
{
	struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;

	cee->get_attr_status = status;
	bfa_trc(cee, 0);
	if (status == BFA_STATUS_OK) {
		bfa_trc(cee, 0);
		memcpy(cee->attr, cee->attr_dma.kva,
			sizeof(struct bfa_cee_attr_s));
		lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
		lldp_cfg->enabled_system_cap =
				be16_to_cpu(lldp_cfg->enabled_system_cap);
	}
	cee->get_attr_pending = BFA_FALSE;
	if (cee->cbfn.get_attr_cbfn) {
		bfa_trc(cee, 0);
		cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
	}
}
Beispiel #30
0
/**
 * Configure port topology.
 */
bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
{
	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);

	bfa_trc(bfa, topology);
	bfa_trc(bfa, fcport->cfg.topology);

	switch (topology) {
	case BFA_PPORT_TOPOLOGY_P2P:
	case BFA_PPORT_TOPOLOGY_LOOP:
	case BFA_PPORT_TOPOLOGY_AUTO:
		break;

	default:
		return BFA_STATUS_EINVAL;
	}

	fcport->cfg.topology = topology;
	return BFA_STATUS_OK;
}