/* * An SCN event is received in ONLINE state. ADISC is to rport. * FC-4s are paused. */ static void bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); break; case RPSM_EVENT_PLOGI_RCVD: /* * Too complex to cleanup FC-4 & rport and then acc to PLOGI. * At least go offline when a PLOGI is received. */ bfa_fcxp_discard(rport->fcxp); /* * !!! fall through !!! */ case RPSM_EVENT_FAILED: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: /* * already processing RSCN */ break; case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } }
/* * PLOGI is being sent. */ static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_free(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_FAB_SCN: /* query the NS */ bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); WARN_ON(!(bfa_fcport_get_topology(rport->port->fcs->bfa) != BFA_PORT_TOPOLOGY_LOOP)); bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); break; case RPSM_EVENT_LOGO_IMP: rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; default: bfa_sm_fault(rport->fcs, event); } }
/** * IO is being cleaned up (implicit abort), waiting for completion from * firmware. */ static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: case BFA_IOIM_SM_DONE: case BFA_IOIM_SM_FREE: break; case BFA_IOIM_SM_ABORT: /** * IO is already being aborted implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; break; case BFA_IOIM_SM_ABORT_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_ABORT_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_COMP_UTAG: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; case BFA_IOIM_SM_CLEANUP: /** * IO can be in cleanup state already due to TM command. 2nd cleanup * request comes from ITN offline event. */ break; default: bfa_sm_fault(ioim->bfa, event); } }
static void bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns, enum vport_ns_event event) { bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn); bfa_trc(ns->port->fcs, event); switch (event) { case NSSM_EVENT_RSP_OK: /* * If min cfg mode is enabled, we donot initiate rport * discovery with the fabric. Instead, we will retrieve the * boot targets from HAL/FW. */ if (__fcs_min_cfg(ns->port->fcs)) { bfa_fcs_port_ns_boot_target_disc(ns->port); bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online); return; } /* * If the port role is Initiator Mode issue NS query. * If it is Target Mode, skip this and go to online. */ if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) { bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_sending_gid_ft); bfa_fcs_port_ns_send_gid_ft(ns, NULL); } else if (BFA_FCS_VPORT_IS_TARGET_MODE(ns->port)) { bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_online); } /* * kick off mgmt srvr state machine */ bfa_fcs_port_ms_online(ns->port); break; case NSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_rff_id_retry); ns->port->stats.ns_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port), &ns->timer, bfa_fcs_port_ns_timeout, ns, BFA_FCS_RETRY_TIMEOUT); break; case NSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ns, bfa_fcs_port_ns_sm_offline); bfa_fcxp_discard(ns->fcxp); break; default: bfa_sm_fault(ns->port->fcs, event); } }
/** * IO is being aborted, waiting for completion from firmware. */ static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: case BFA_IOIM_SM_DONE: case BFA_IOIM_SM_FREE: break; case BFA_IOIM_SM_ABORT_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_ABORT_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_COMP_UTAG: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_CLEANUP: bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); ioim->iosp->abort_explicit = BFA_FALSE; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); else { bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); bfa_reqq_wait(ioim->bfa, ioim->itnim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
/* * An SCN event is received in ONLINE state. NS query is sent to rport. * FC-4s are paused. */ static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); break; case RPSM_EVENT_FAILED: rport->ns_retries++; if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery_sending); bfa_fcs_rport_send_nsdisc(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); } break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_PLOGI_COMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } }
static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_QRESUME: bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); bfa_fcport_send_enable(fcport); break; case BFA_FCPORT_SM_STOP: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: /** * Already enable is in progress. */ break; case BFA_FCPORT_SM_DISABLE: /** * Just send disable request to firmware when room becomes * available in request queue. */ bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); bfa_reqq_wcancel(&fcport->reqq_wait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_reqq_wcancel(&fcport->reqq_wait); bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_RSP_OK: if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); else bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hal_rport_online); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); bfa_timer_start(itnim->fcs->bfa, &itnim->timer, bfa_fcs_itnim_timeout, itnim, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
/** * Active IO is being aborted, waiting for room in request CQ. */ static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_QRESUME: bfa_sm_set_state(ioim, bfa_ioim_sm_abort); bfa_ioim_send_abort(ioim); break; case BFA_IOIM_SM_CLEANUP: bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); ioim->iosp->abort_explicit = BFA_FALSE; bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); break; case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: case BFA_FCPORT_SM_LINKDOWN: bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); break; case BFA_FCPORT_SM_LINKUP: bfa_fcport_update_linkinfo(fcport); bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); bfa_assert(fcport->event_cbfn); bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); break; case BFA_FCPORT_SM_ENABLE: /** * Already being enabled. */ break; case BFA_FCPORT_SM_DISABLE: if (bfa_fcport_send_disable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_FWRSP: bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; case BFA_FCPORT_SM_DISABLE: /** * Already being disabled. */ break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } }
static void bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) { bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn); bfa_trc(ms->port->fcs, event); switch (event) { case MSSM_EVENT_RSP_ERROR: /* * Start timer for a delayed retry */ bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_plogi_retry); ms->port->stats.ms_retries++; bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port), &ms->timer, bfa_fcs_port_ms_timeout, ms, BFA_FCS_RETRY_TIMEOUT); break; case MSSM_EVENT_RSP_OK: /* * since plogi is done, now invoke MS related sub-modules */ bfa_fcs_port_fdmi_online(ms); /** * if this is a Vport, go to online state. */ if (ms->port->vport) { bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_online); break; } /* * For a base port we need to get the * switch's IP address. */ bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_gmal_sending); bfa_fcs_port_ms_send_gmal(ms, NULL); break; case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); bfa_fcxp_discard(ms->fcxp); break; default: bfa_sm_fault(ms->port->fcs, event); } }
/* * Rport is ONLINE. FC-4s active. */ static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FAB_SCN: if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery_sending); rport->ns_retries = 0; bfa_fcs_rport_send_nsdisc(rport, NULL); } else { bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online_sending); bfa_fcs_rport_send_adisc(rport, NULL); } break; case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_SCN_ONLINE: case RPSM_EVENT_PLOGI_COMP: break; default: bfa_sm_fault(rport->fcs, event); } }
/** * Link state is up */ static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); break; default: bfa_sm_fault(ln->fcport->bfa, event); } }
/** * Active IO is being cleaned up, waiting for room in request CQ. */ static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_QRESUME: bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); bfa_ioim_send_abort(ioim); break; case BFA_IOIM_SM_ABORT: /** * IO is already being cleaned up implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; break; case BFA_IOIM_SM_COMP_GOOD: case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_reqq_wcancel(&ioim->iosp->reqq_wait); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
static void bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_port_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_RSP_ERROR: /* * if max retries have not been reached, start timer for a * delayed retry */ if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) { bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_rhba_retry); bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port), &fdmi->timer, bfa_fcs_port_fdmi_timeout, fdmi, BFA_FCS_RETRY_TIMEOUT); } else { /* * set state to offline */ bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); } break; case FDMISM_EVENT_RSP_OK: /* * Initiate Register Port Attributes */ bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_sending_rpa); fdmi->retry_cnt = 0; bfa_fcs_port_fdmi_send_rpa(fdmi, NULL); break; case FDMISM_EVENT_PORT_OFFLINE: bfa_fcxp_discard(fdmi->fcxp); bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } }
/* * PLOGI is complete. Awaiting BFA rport online callback. FC-4s * are offline. */ static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_HCB_ONLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_online); bfa_fcs_rport_hal_online_action(rport); break; case RPSM_EVENT_PLOGI_COMP: break; case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_LOGO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: case RPSM_EVENT_SCN_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_PLOGI_RCVD: rport->plogi_pending = BFA_TRUE; bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcs_rport_fcs_offline_action(rport); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcs_rport_fcs_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } }
static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPSC_COMP: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); /* Update speed info in f/w via BFA */ if (rpf->rpsc_speed != BFA_PPORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed); else if (rpf->assigned_speed != BFA_PPORT_SPEED_UNKNOWN) bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed); break; case RPFSM_EVENT_RPSC_FAIL: /* RPSC not supported by rport */ bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); break; case RPFSM_EVENT_RPSC_ERROR: /* need to retry...delayed a bit. */ if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) { bfa_timer_start(rport->fcs->bfa, &rpf->timer, bfa_fcs_rpf_timeout, rpf, BFA_FCS_RPF_RETRY_TIMEOUT); bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry); } else { bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online); } break; case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); bfa_fcxp_discard(rpf->fcxp); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } }
/* * Rport is going offline. Awaiting FC-4 offline completion callback. */ static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FC4_OFFLINE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); bfa_fcs_rport_hal_offline(rport); break; case RPSM_EVENT_SCN_ONLINE: break; case RPSM_EVENT_LOGO_RCVD: /* * Rport is going offline. Just ack the logo */ bfa_fcs_rport_send_logo_acc(rport); break; case RPSM_EVENT_PRLO_RCVD: bfa_fcs_rport_send_prlo_acc(rport); break; case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_HCB_ONLINE: case RPSM_EVENT_FAB_SCN: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: /* * rport is already going offline. * SCN - ignore and wait till transitioning to offline state */ break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); break; default: bfa_sm_fault(rport->fcs, event); } }
static void rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) { switch (event) { case RSPQ_E_START: bfa_fsm_set_state(rspq, rspq_sm_init_wait); break; case RSPQ_E_STOP: case RSPQ_E_FAIL: /* No-op */ break; default: bfa_sm_fault(event); } }
static void cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) { switch (event) { case CMDQ_E_STOP: case CMDQ_E_FAIL: bfa_fsm_set_state(cmdq, cmdq_sm_stopped); break; case CMDQ_E_POST: bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); break; default: bfa_sm_fault(event); } }
/* * An SCN event is received in ONLINE state. ADISC is being sent for * authenticating with rport. FC-4s are paused. */ static void bfa_fcs_rport_sm_adisc_online_sending(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_FCXP_SENT: bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_online); break; case RPSM_EVENT_DELETE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_ADDRESS_CHANGE: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; case RPSM_EVENT_FAB_SCN: break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline); bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe); bfa_fcs_rport_hal_offline_action(rport); break; default: bfa_sm_fault(rport->fcs, event); } }
static void rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) { switch (event) { case RSPQ_E_STOP: case RSPQ_E_FAIL: bfa_fsm_set_state(rspq, rspq_sm_stopped); break; case RSPQ_E_RESP: bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); break; default: bfa_sm_fault(event); } }
/** * IO is waiting for SG pages. */ static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc(ioim->bfa, ioim->iotag); bfa_trc(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_SGALLOCED: if (!bfa_ioim_send_ioreq(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); break; } bfa_sm_set_state(ioim, bfa_ioim_sm_active); break; case BFA_IOIM_SM_CLEANUP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); bfa_ioim_notify_cleanup(ioim); break; case BFA_IOIM_SM_ABORT: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, enum port_fdmi_event event) { struct bfa_fcs_port_s *port = fdmi->ms->port; bfa_trc(port->fcs, port->port_cfg.pwwn); bfa_trc(port->fcs, event); switch (event) { case FDMISM_EVENT_PORT_OFFLINE: bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); break; default: bfa_sm_fault(port->fcs, event); } }
static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_TIMEOUT: if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { itnim->prli_retries++; bfa_trc(itnim->fcs, itnim->prli_retries); bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); bfa_fcs_itnim_send_prli(itnim, NULL); } else { /* invoke target offline */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
/* * ADISC to rport * Already did offline actions */ static void bfa_fcs_rport_sm_adisc_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) { bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPSM_EVENT_ACCEPTED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online); bfa_fcs_rport_hal_online(rport); break; case RPSM_EVENT_PLOGI_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); bfa_fcxp_discard(rport->fcxp); bfa_fcs_rport_send_plogiacc(rport, NULL); break; case RPSM_EVENT_FAILED: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; case RPSM_EVENT_DELETE: case RPSM_EVENT_SCN_OFFLINE: case RPSM_EVENT_LOGO_IMP: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); bfa_fcxp_discard(rport->fcxp); bfa_timer_start(rport->fcs->bfa, &rport->timer, bfa_fcs_rport_timeout, rport, bfa_fcs_rport_del_timeout); break; default: bfa_sm_fault(rport->fcs, event); } }
static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { bfa_trc(fcport->bfa, event); switch (event) { case BFA_FCPORT_SM_START: /** * Ignore start event for a port that is disabled. */ break; case BFA_FCPORT_SM_STOP: bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; case BFA_FCPORT_SM_ENABLE: if (bfa_fcport_send_enable(fcport)) bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; case BFA_FCPORT_SM_DISABLE: /** * Already disabled. */ break; case BFA_FCPORT_SM_HWFAIL: bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: bfa_sm_fault(fcport->bfa, event); } }
/** * Link state is waiting for up notification */ static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, enum bfa_fcport_ln_sm_event event) { bfa_trc(ln->fcport->bfa, event); switch (event) { case BFA_FCPORT_LN_SM_LINKDOWN: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; case BFA_FCPORT_LN_SM_NOTIFICATION: bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); break; default: bfa_sm_fault(ln->fcport->bfa, event); } }
static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) { struct bfa_fcs_rport_s *rport = rpf->rport; bfa_trc(rport->fcs, rport->pwwn); bfa_trc(rport->fcs, rport->pid); bfa_trc(rport->fcs, event); switch (event) { case RPFSM_EVENT_RPORT_OFFLINE: bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline); rpf->rpsc_retries = 0; break; default: bfa_sm_fault(rport->fcs, event); } }