Esempio n. 1
0
/*
 * Rport has sent LOGO. Awaiting FC-4 offline completion callback.
 */
static void
bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
			enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_FC4_OFFLINE:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
		bfa_fcs_rport_hal_offline(rport);
		break;

	case RPSM_EVENT_DELETE:
		if (rport->pid && (rport->prlo == BFA_TRUE))
			bfa_fcs_rport_send_prlo_acc(rport);
		if (rport->pid && (rport->prlo == BFA_FALSE))
			bfa_fcs_rport_send_logo_acc(rport);

		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
		break;

	case RPSM_EVENT_SCN_ONLINE:
	case RPSM_EVENT_SCN_OFFLINE:
	case RPSM_EVENT_HCB_ONLINE:
	case RPSM_EVENT_LOGO_RCVD:
	case RPSM_EVENT_PRLO_RCVD:
	case RPSM_EVENT_ADDRESS_CHANGE:
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 2
0
static void
bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns,
				 enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_TIMEOUT:
		/*
		 * Retry Timer Expired. Re-send
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
		bfa_fcs_port_ns_send_rspn_id(ns, NULL);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_timer_stop(&ns->timer);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 3
0
static void
bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns,
			  enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
		break;

	case NSSM_EVENT_RSP_ERROR:
		/*
		 * TBD: for certain reject codes, we don't need to retry
		 */
		/*
		 * Start timer for a delayed retry
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_gid_ft_retry);
		ns->port->stats.ns_retries++;
		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
				bfa_fcs_port_ns_timeout, ns,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_fcxp_discard(ns->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 4
0
static void
bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
	struct bfa_fcs_rport_s *rport = rpf->rport;

	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPFSM_EVENT_TIMEOUT:
		/* re-send the RPSC */
		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
		bfa_fcs_rpf_send_rpsc2(rpf, NULL);
		break;

	case RPFSM_EVENT_RPORT_OFFLINE:
		bfa_timer_stop(&rpf->timer);
		bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
		rpf->rpsc_retries = 0;
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 5
0
/**
 * IO bfa callback is pending. IO resource cannot be freed.
 */
static void
bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_HCB:
		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
		break;

	case BFA_IOIM_SM_FREE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Esempio n. 6
0
static void
bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms,
			     enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_TIMEOUT:
		/*
		 * Retry Timer Expired. Re-send
		 */
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
		bfa_fcs_port_ms_send_gfn(ms, NULL);
		break;

	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		bfa_timer_stop(&ms->timer);
		break;

	default:
		bfa_sm_fault(ms->port->fcs, event);
	}
}
Esempio n. 7
0
/*
 * This state is set when a discovered rport is also in intiator mode.
 * This ITN is marked as no_op and is not active and will not be truned into
 * online state.
 */
static void
bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
		 enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
		break;

	/*
	 * fcs_online is expected here for well known initiator ports
	 */
	case BFA_FCS_ITNIM_SM_FCS_ONLINE:
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
		break;

	case BFA_FCS_ITNIM_SM_RSP_ERROR:
	case BFA_FCS_ITNIM_SM_INITIATOR:
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_sm_fault(itnim->fcs, event);
	}
}
Esempio n. 8
0
/**
 * Memory initialization.
 */
static void
bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
	struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
	struct bfa_fcport_ln_s *ln = &fcport->ln;

	bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
	fcport->bfa = bfa;
	ln->fcport = fcport;

	bfa_fcport_mem_claim(fcport, meminfo);

	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);

	/**
	 * initialize and set default configuration
	 */
	port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
	port_cfg->speed = BFA_PPORT_SPEED_AUTO;
	port_cfg->trunked = BFA_FALSE;
	port_cfg->maxfrsize = 0;

	port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;

	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
Esempio n. 9
0
/**
 * Offline state - awaiting ONLINE event from fabric SM.
 */
static void
bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
			 enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
		bfa_fcs_port_delete(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_ONLINE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
		vport->fdisc_retries = 0;
		bfa_fcs_vport_do_fdisc(vport);
		break;

	case BFA_FCS_VPORT_SM_OFFLINE:
		/*
		 * This can happen if the vport couldn't be initialzied due
		 * the fact that the npiv was not enabled on the switch. In
		 * that case we will put the vport in offline state. However,
		 * the link can go down and cause the this event to be sent when
		 * we are already offline. Ignore it.
		 */
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 10
0
static void
bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns,
			 enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_RSP_ERROR:
		/*
		 * Start timer for a delayed retry
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_plogi_retry);
		ns->port->stats.ns_retries++;
		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
				bfa_fcs_port_ns_timeout, ns,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case NSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_rspn_id);
		bfa_fcs_port_ns_send_rspn_id(ns, NULL);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_fcxp_discard(ns->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 11
0
static void
bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
			 enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_START:
		if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
		    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
			bfa_fcs_vport_do_fdisc(vport);
		} else {
			
			vport->vport_stats.fab_no_npiv++;
			bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
		}
		break;

	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
		bfa_fcs_port_delete(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_ONLINE:
	case BFA_FCS_VPORT_SM_OFFLINE:
		
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 12
0
/*
 *		Beginning state.
 */
static void
bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_PLOGI_SEND:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
		rport->plogi_retries = 0;
		bfa_fcs_rport_send_plogi(rport, NULL);
		break;

	case RPSM_EVENT_PLOGI_RCVD:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
		bfa_fcs_rport_send_plogiacc(rport, NULL);
		break;

	case RPSM_EVENT_PLOGI_COMP:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
		bfa_fcs_rport_hal_online(rport);
		break;

	case RPSM_EVENT_ADDRESS_CHANGE:
	case RPSM_EVENT_ADDRESS_DISC:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
		rport->ns_retries = 0;
		bfa_fcs_rport_send_nsdisc(rport, NULL);
		break;
	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 13
0
/*
 *		Rport is being deleted. FC-4s are offline.
 *  Awaiting BFA rport offline
 *		callback to send LOGO.
 */
static void
bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
		 enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_HCB_OFFLINE:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending);
		bfa_fcs_rport_send_logo(rport, NULL);
		break;

	case RPSM_EVENT_LOGO_RCVD:
		bfa_fcs_rport_send_logo_acc(rport);
	case RPSM_EVENT_PRLO_RCVD:
		if (rport->prlo == BFA_TRUE)
			bfa_fcs_rport_send_prlo_acc(rport);

		bfa_sm_set_state(rport, bfa_fcs_rport_sm_delete_pending);
		break;

	case RPSM_EVENT_SCN_ONLINE:
	case RPSM_EVENT_SCN_OFFLINE:
	case RPSM_EVENT_ADDRESS_CHANGE:
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 14
0
/*
 *		LOGO needs to be sent to rport. Awaiting FC-4 offline completion
 *		callback.
 */
static void
bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
	 enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_FC4_OFFLINE:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
		bfa_fcs_rport_hal_offline(rport);
		break;

	case RPSM_EVENT_LOGO_RCVD:
		bfa_fcs_rport_send_logo_acc(rport);
	case RPSM_EVENT_PRLO_RCVD:
		if (rport->prlo == BFA_TRUE)
			bfa_fcs_rport_send_prlo_acc(rport);
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_off_delete);
		break;

	case RPSM_EVENT_HCB_ONLINE:
	case RPSM_EVENT_DELETE:
		/* Rport is being deleted */
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 15
0
static void
bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms,
			  enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		/*
		 * now invoke MS related sub-modules
		 */
		bfa_fcs_port_fdmi_offline(ms);
		break;

	case MSSM_EVENT_PORT_FABRIC_RSCN:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_sending);
		ms->retry_cnt = 0;
		bfa_fcs_port_ms_send_gfn(ms, NULL);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 16
0
/**
 * FDISC attempt failed - a timer is active to retry FDISC.
 */
static void
bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
			     enum bfa_fcs_vport_event event)
{
	bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
	bfa_trc(__vport_fcs(vport), event);

	switch (event) {
	case BFA_FCS_VPORT_SM_DELETE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
		bfa_timer_stop(&vport->timer);
		bfa_fcs_port_delete(&vport->lport);
		break;

	case BFA_FCS_VPORT_SM_OFFLINE:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
		bfa_timer_stop(&vport->timer);
		break;

	case BFA_FCS_VPORT_SM_TIMEOUT:
		bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
		vport->vport_stats.fdisc_retries++;
		vport->fdisc_retries++;
		bfa_fcs_vport_do_fdisc(vport);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 17
0
static void
bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event)
{
	bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
	bfa_trc(ms->port->fcs, event);

	switch (event) {
	case MSSM_EVENT_RSP_ERROR:
		/*
		 * Start timer for a delayed retry
		 */
		if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
			bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gfn_retry);
			bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
					&ms->timer, bfa_fcs_port_ms_timeout, ms,
					BFA_FCS_RETRY_TIMEOUT);
		} else {
			bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
			ms->retry_cnt = 0;
		}
		break;

	case MSSM_EVENT_RSP_OK:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online);
		break;

	case MSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
		bfa_fcxp_discard(ms->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 18
0
static void
bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
			    enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_TIMEOUT:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
		bfa_fcs_itnim_send_prli(itnim, NULL);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		bfa_timer_stop(&itnim->timer);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_timer_stop(&itnim->timer);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 19
0
static void
bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
		 enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_FCS_ONLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send);
		itnim->prli_retries = 0;
		bfa_fcs_itnim_send_prli(itnim, NULL);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_sm_fault(itnim->fcs, event);
	}

}
Esempio n. 20
0
static void
bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
			enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
		bfa_fcb_itnim_offline(itnim->itnim_drv);
		bfa_itnim_offline(itnim->bfa_itnim);
		if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
		else
			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 21
0
static void
bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi,
			       enum port_fdmi_event event)
{
	struct bfa_fcs_port_s *port = fdmi->ms->port;

	bfa_trc(port->fcs, port->port_cfg.pwwn);
	bfa_trc(port->fcs, event);

	switch (event) {
	case FDMISM_EVENT_TIMEOUT:
		/*
		 * Retry Timer Expired. Re-send
		 */
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa);
		bfa_fcs_port_fdmi_send_rpa(fdmi, NULL);
		break;

	case FDMISM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
		bfa_timer_stop(&fdmi->timer);
		break;

	default:
		bfa_sm_fault(port->fcs, event);
	}
}
Esempio n. 22
0
/*
 * This state is set when a discovered rport is also in intiator mode.
 * This ITN is marked as no_op and is not active and will not be truned into
 * online state.
 */
static void
bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
			   enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_rport_itnim_ack(itnim->rport);
		break;

	case BFA_FCS_ITNIM_SM_RSP_ERROR:
	case BFA_FCS_ITNIM_SM_ONLINE:
	case BFA_FCS_ITNIM_SM_INITIATOR:
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 23
0
static void
bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
		 enum bfa_fcs_itnim_event event)
{
	bfa_trc(itnim->fcs, itnim->rport->pwwn);
	bfa_trc(itnim->fcs, event);

	switch (event) {
	case BFA_FCS_ITNIM_SM_FRMSENT:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli);
		break;

	case BFA_FCS_ITNIM_SM_INITIATOR:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE);
		break;

	case BFA_FCS_ITNIM_SM_OFFLINE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE);
		break;

	case BFA_FCS_ITNIM_SM_DELETE:
		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
		bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe);
		bfa_fcs_itnim_free(itnim);
		break;

	default:
		bfa_sm_fault(itnim->fcs, event);
	}
}
Esempio n. 24
0
static void
bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns,
			  enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		break;

	case NSSM_EVENT_NS_QUERY:
		/*
		 * If the port role is Initiator Mode issue NS query.
		 * If it is Target Mode, skip this and go to online.
		 */
		if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
			bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
			bfa_fcs_port_ns_send_gid_ft(ns, NULL);
		};
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 25
0
static void
bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
{
	bfa_trc(pport->bfa, event);

	switch (event) {
	case BFA_PPORT_SM_LINKUP:
		bfa_pport_update_linkinfo(pport);
		bfa_sm_set_state(pport, bfa_pport_sm_linkup);
		bfa_assert(pport->event_cbfn);
		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
		bfa_pport_callback(pport, BFA_PPORT_LINKUP);
		bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
		/**
		 * If QoS is enabled and it is not online,
		 * Send a separate event.
		 */
		if ((pport->cfg.qos_enabled)
		    && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
			bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);

		break;

	case BFA_PPORT_SM_LINKDOWN:
		/**
		 * Possible to get link down event.
		 */
		break;

	case BFA_PPORT_SM_ENABLE:
		/**
		 * Already enabled.
		 */
		break;

	case BFA_PPORT_SM_DISABLE:
		if (bfa_pport_send_disable(pport))
			bfa_sm_set_state(pport, bfa_pport_sm_disabling);
		else
			bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);

		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
		bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
		break;

	case BFA_PPORT_SM_STOP:
		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
		break;

	case BFA_PPORT_SM_HWFAIL:
		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
		break;

	default:
		bfa_sm_fault(pport->bfa, event);
	}
}
Esempio n. 26
0
/*
 *		An SCN event is received in ONLINE state. ADISC is to rport.
 *		FC-4s are paused.
 */
static void
bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport,
				enum rport_event event)
{
	bfa_trc(rport->fcs, rport->pwwn);
	bfa_trc(rport->fcs, rport->pid);
	bfa_trc(rport->fcs, event);

	switch (event) {
	case RPSM_EVENT_ACCEPTED:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
		break;

	case RPSM_EVENT_PLOGI_RCVD:
		/*
		 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
		 * At least go offline when a PLOGI is received.
		 */
		bfa_fcxp_discard(rport->fcxp);
		/*
		 * !!! fall through !!!
		 */

	case RPSM_EVENT_FAILED:
	case RPSM_EVENT_ADDRESS_CHANGE:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
		bfa_fcs_rport_hal_offline_action(rport);
		break;

	case RPSM_EVENT_DELETE:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
		bfa_fcxp_discard(rport->fcxp);
		bfa_fcs_rport_hal_offline_action(rport);
		break;

	case RPSM_EVENT_FAB_SCN:
		/*
		 * already processing RSCN
		 */
		break;

	case RPSM_EVENT_LOGO_IMP:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
		bfa_fcxp_discard(rport->fcxp);
		bfa_fcs_rport_hal_offline_action(rport);
		break;

	case RPSM_EVENT_LOGO_RCVD:
	case RPSM_EVENT_PRLO_RCVD:
		bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
		bfa_fcxp_discard(rport->fcxp);
		bfa_fcs_rport_hal_offline_action(rport);
		break;

	default:
		bfa_sm_fault(rport->fcs, event);
	}
}
Esempio n. 27
0
static void
bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns,
			  enum vport_ns_event event)
{
	bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
	bfa_trc(ns->port->fcs, event);

	switch (event) {
	case NSSM_EVENT_RSP_OK:

		/*
		 * If min cfg mode is enabled, we donot initiate rport
		 * discovery with the fabric. Instead, we will retrieve the
		 * boot targets from HAL/FW.
		 */
		if (__fcs_min_cfg(ns->port->fcs)) {
			bfa_fcs_port_ns_boot_target_disc(ns->port);
			bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
			return;
		}

		/*
		 * If the port role is Initiator Mode issue NS query.
		 * If it is Target Mode, skip this and go to online.
		 */
		if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
			bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft);
			bfa_fcs_port_ns_send_gid_ft(ns, NULL);
		} else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) {
			bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online);
		}
		/*
		 * kick off mgmt srvr state machine
		 */
		bfa_fcs_port_ms_online(ns->port);
		break;

	case NSSM_EVENT_RSP_ERROR:
		/*
		 * Start timer for a delayed retry
		 */
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry);
		ns->port->stats.ns_retries++;
		bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer,
				bfa_fcs_port_ns_timeout, ns,
				BFA_FCS_RETRY_TIMEOUT);
		break;

	case NSSM_EVENT_PORT_OFFLINE:
		bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline);
		bfa_fcxp_discard(ns->fcxp);
		break;

	default:
		bfa_assert(0);
	}
}
Esempio n. 28
0
/**
 * IO is being cleaned up (implicit abort), waiting for completion from
 * firmware.
 */
static void
bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc(ioim->bfa, ioim->iotag);
	bfa_trc(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_COMP_GOOD:
	case BFA_IOIM_SM_COMP:
	case BFA_IOIM_SM_DONE:
	case BFA_IOIM_SM_FREE:
		break;

	case BFA_IOIM_SM_ABORT:
		/**
		 * IO is already being aborted implicitly
		 */
		ioim->io_cbfn = __bfa_cb_ioim_abort;
		break;

	case BFA_IOIM_SM_ABORT_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_ABORT_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_COMP_UTAG:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	case BFA_IOIM_SM_CLEANUP:
		/**
		 * IO can be in cleanup state already due to TM command. 2nd cleanup
		 * request comes from ITN offline event.
		 */
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Esempio n. 29
0
void
bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms)
{
	struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;

	fdmi->ms = ms;
	if (ms->port->fcs->fdmi_enabled)
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
	else
		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
}
Esempio n. 30
0
static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
			    enum bfa_fcport_sm_event event)
{
	bfa_trc(fcport->bfa, event);

	switch (event) {
	case BFA_FCPORT_SM_QRESUME:
		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
		bfa_fcport_send_enable(fcport);
		break;

	case BFA_FCPORT_SM_STOP:
		bfa_reqq_wcancel(&fcport->reqq_wait);
		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
		break;

	case BFA_FCPORT_SM_ENABLE:
		/**
		 * Already enable is in progress.
		 */
		break;

	case BFA_FCPORT_SM_DISABLE:
		/**
		 * Just send disable request to firmware when room becomes
		 * available in request queue.
		 */
		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
		bfa_reqq_wcancel(&fcport->reqq_wait);
		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
		break;

	case BFA_FCPORT_SM_LINKUP:
	case BFA_FCPORT_SM_LINKDOWN:
		/**
		 * Possible to get link events when doing back-to-back
		 * enable/disables.
		 */
		break;

	case BFA_FCPORT_SM_HWFAIL:
		bfa_reqq_wcancel(&fcport->reqq_wait);
		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
		break;

	default:
		bfa_sm_fault(fcport->bfa, event);
	}
}