static void bfa_fcs_port_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)cbarg; struct bfa_fcs_port_s *port = fdmi->ms->port; struct ct_hdr_s *cthdr = NULL; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); return; } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); return; } bfa_trc(port->fcs, cthdr->reason_code); bfa_trc(port->fcs, cthdr->exp_code); bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); }
static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FRMSENT: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
/* * This state is set when a discovered rport is also in intiator mode. * This ITN is marked as no_op and is not active and will not be truned into * online state. */ static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; /* * fcs_online is expected here for well known initiator ports */ case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: case BFA_FCS_ITNIM_SM_INITIATOR: break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg; struct bfa_fcs_rport_s *rport = rpf->rport; struct fc_ls_rjt_s *ls_rjt; struct fc_rpsc2_acc_s *rpsc2_acc; u16 num_ents; bfa_trc(rport->fcs, req_status); if (req_status != BFA_STATUS_OK) { bfa_trc(rport->fcs, req_status); if (req_status == BFA_STATUS_ETIMER) rport->stats.rpsc_failed++; bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); return; } rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); if (rpsc2_acc->els_cmd == FC_ELS_ACC) { rport->stats.rpsc_accs++; num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); bfa_trc(rport->fcs, num_ents); if (num_ents > 0) { bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); bfa_trc(rport->fcs, bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); bfa_trc(rport->fcs, bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); bfa_trc(rport->fcs, bfa_os_ntohs(rpsc2_acc->port_info[0].index)); bfa_trc(rport->fcs, rpsc2_acc->port_info[0].type); if (rpsc2_acc->port_info[0].speed == 0) { bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); return; } rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); } } else { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(rport->fcs, ls_rjt->reason_code); bfa_trc(rport->fcs, ls_rjt->reason_code_expl); rport->stats.rpsc_rejects++; if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL); } else { bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR); } } }
static void bfa_fcs_port_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)cbarg; struct bfa_fcs_port_s *port = ns->port; /* struct fc_logi_s *plogi_resp; */ struct fc_els_cmd_s *els_cmd; struct fc_ls_rjt_s *ls_rjt; bfa_trc(port->fcs, req_status); bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); port->stats.ns_plogi_rsp_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); switch (els_cmd->els_code) { case FC_ELS_ACC: if (rsp_len < sizeof(struct fc_logi_s)) { bfa_trc(port->fcs, rsp_len); port->stats.ns_plogi_acc_err++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); break; } port->stats.ns_plogi_accepts++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK); break; case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(port->fcs, ls_rjt->reason_code); bfa_trc(port->fcs, ls_rjt->reason_code_expl); port->stats.ns_rejects++; bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); break; default: port->stats.ns_plogi_unknown_rsp++; bfa_trc(port->fcs, els_cmd->els_code); bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } }
/** * FDISC Response */ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) { struct bfa_fcs_vport_s *vport = uarg; bfa_trc(__vport_fcs(vport), __vport_pwwn(vport)); bfa_trc(__vport_fcs(vport), status); switch (status) { case BFA_STATUS_OK: /* * Initialiaze the V-Port fields */ __vport_fcid(vport) = bfa_lps_get_pid(vport->lps); vport->vport_stats.fdisc_accepts++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); break; case BFA_STATUS_INVALID_MAC: /* * Only for CNA */ vport->vport_stats.fdisc_acc_bad++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); break; case BFA_STATUS_EPROTOCOL: switch (bfa_lps_get_extstatus(vport->lps)) { case BFA_EPROTO_BAD_ACCEPT: vport->vport_stats.fdisc_acc_bad++; break; case BFA_EPROTO_UNKNOWN_RSP: vport->vport_stats.fdisc_unknown_rsp++; break; default: break; } bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); break; case BFA_STATUS_FABRIC_RJT: vport->vport_stats.fdisc_rejects++; bfa_fcs_vport_fdisc_rejected(vport); break; default: vport->vport_stats.fdisc_rsp_err++; bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR); } }
static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_RSP_OK: if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); else bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hal_rport_online); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); bfa_timer_start(itnim->fcs->bfa, &itnim->timer, bfa_fcs_itnim_timeout, itnim, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
/** * Use this function to instantiate a new FCS vport object. This * function will not trigger any HW initialization process (which will be * done in vport_start() call) * * param[in] vport - pointer to bfa_fcs_vport_t. This space * needs to be allocated by the driver. * param[in] fcs - FCS instance * param[in] vport_cfg - vport configuration * param[in] vf_id - VF_ID if vport is created within a VF. * FC_VF_ID_NULL to specify base fabric. * param[in] vport_drv - Opaque handle back to the driver's vport * structure * * retval BFA_STATUS_OK - on success. * retval BFA_STATUS_FAILED - on failure. */ bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, u16 vf_id, struct bfa_port_cfg_s *vport_cfg, struct bfad_vport_s *vport_drv) { if (vport_cfg->pwwn == 0) return BFA_STATUS_INVALID_WWN; if (bfa_fcs_port_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn) return BFA_STATUS_VPORT_WWN_BP; if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) return BFA_STATUS_VPORT_EXISTS; if (bfa_fcs_fabric_vport_count(&fcs->fabric) == bfa_fcs_vport_get_max(fcs)) return BFA_STATUS_VPORT_MAX; vport->lps = bfa_lps_alloc(fcs->bfa); if (!vport->lps) return BFA_STATUS_VPORT_MAX; vport->vport_drv = vport_drv; bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); return BFA_STATUS_OK; }
/** * Driver I/O abort request. */ void bfa_ioim_abort(struct bfa_ioim_s *ioim) { bfa_trc(ioim->bfa, ioim->iotag); bfa_fcpim_stats(ioim->fcpim, io_aborts); bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT); }
static void bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg; struct bfa_fcs_rport_s *rport = rpf->rport; struct bfa_fcs_port_s *port = rport->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(rport->fcs, rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { bfa_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe, bfa_fcs_rpf_send_rpsc2, rpf); return; } rpf->fcxp = fcxp; len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_port_get_fcid(port), &rport->pid, 1); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response, rpf, FC_MAX_PDUSZ, FC_RA_TOV); rport->stats.rpsc_sent++; bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT); }
static void bfa_fcs_port_fdmi_timeout(void *arg) { struct bfa_fcs_port_fdmi_s *fdmi = (struct bfa_fcs_port_fdmi_s *)arg; bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); }
bfa_status_t bfa_fcport_enable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); struct bfa_iocfc_s *iocfc = &bfa->iocfc; struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; /* if port is PBC disabled, return error */ if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) { bfa_trc(bfa, fcport->pwwn); return BFA_STATUS_PBC; } if (bfa_ioc_is_disabled(&bfa->ioc)) return BFA_STATUS_IOC_DISABLED; if (fcport->diag_busy) return BFA_STATUS_DIAG_BUSY; else if (bfa_sm_cmp_state (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait)) return BFA_STATUS_DEVBUSY; bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); return BFA_STATUS_OK; }
static void bfa_fcs_port_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_port_ms_s *ms = ms_cbarg; struct bfa_fcs_port_s *port = ms->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { bfa_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe, bfa_fcs_port_ms_send_gfn, ms); return; } ms->fcxp = fcxp; len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_port_get_fcid(port), bfa_lps_get_peer_nwwn(port->fabric->lps)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_gfn_response, (void *)ms, FC_MAX_PDUSZ, FC_FCCT_TOV); bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT); }
/** * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) { itnim->stats.onlines++; if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) { bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE); } else { /* * For well known addresses, we set the itnim to initiator * state */ itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } }
/** * Use this function to delete a vport object. Fabric object should * be stopped before this function call. * * param[in] vport - pointer to bfa_fcs_vport_t. * * return None */ bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); return BFA_STATUS_OK; }
/** * Use this function quiese the vport object. This function will return * immediately, when the vport is actually stopped, the * bfa_drv_vport_stop_cb() will be called. * * param[in] vport - pointer to bfa_fcs_vport_t. * * return None */ bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) { bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP); return BFA_STATUS_OK; }
static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_itnim_s *itnim = itnim_cbarg; struct bfa_fcs_rport_s *rport = itnim->rport; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(itnim->fcs, itnim->rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { itnim->stats.fcxp_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, bfa_fcs_itnim_send_prli, itnim, BFA_TRUE); return; } itnim->fcxp = fcxp; len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, FC_ELS_TOV); itnim->stats.prli_sent++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); }
static void bfa_fcport_qresume(void *cbarg) { struct bfa_fcport_s *fcport = cbarg; bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); }
/** * Register FC4-Types * TBD, Need to retrieve this from the OS driver, in case IPFC is enabled ? */ static void bfa_fcs_port_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_port_ns_s *ns = ns_cbarg; struct bfa_fcs_port_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { port->stats.ns_rftid_alloc_wait++; bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_port_ns_send_rft_id, ns); return; } ns->fcxp = fcxp; len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_fcs_port_get_fcid(port), 0, port->port_cfg.roles); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_rft_id_response, (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); port->stats.ns_rftid_sent++; bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT); }
/** * Query Fabric for FC4-Types Devices. * * TBD : Need to use a local (FCS private) response buffer, since the response * can be larger than 2K. */ static void bfa_fcs_port_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_port_ns_s *ns = ns_cbarg; struct bfa_fcs_port_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { port->stats.ns_gidft_alloc_wait++; bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_port_ns_send_gid_ft, ns); return; } ns->fcxp = fcxp; /* * This query is only initiated for FCP initiator mode. */ len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), ns->port->pid, FC_TYPE_FCP); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_gid_ft_response, (void *)ns, bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_RA_TOV); port->stats.ns_gidft_sent++; bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT); }
static void bfa_pport_qresume(void *cbarg) { struct bfa_pport_s *port = cbarg; bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME); }
static void bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_port_ns_s *ns = ns_cbarg; struct bfa_fcs_port_s *port = ns->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { port->stats.ns_plogi_alloc_wait++; bfa_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe, bfa_fcs_port_ns_send_plogi, ns); return; } ns->fcxp = fcxp; len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_os_hton3b(FC_NAME_SERVER), bfa_fcs_port_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, bfa_pport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, (void *)ns, FC_MAX_PDUSZ, FC_RA_TOV); port->stats.ns_plogi_sent++; bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT); }
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); itnim->prli_retries = 0; bfa_fcs_itnim_send_prli(itnim, NULL); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } }
/* * Called by rport when remote port is known to be an initiator from * PRLI received. */ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); }
static void bfa_fcs_itnim_timeout(void *arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; itnim->stats.timeout++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); }
void bfa_fcs_port_ns_query(struct bfa_fcs_port_s *port) { struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); bfa_trc(port->fcs, port->pid); bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); }
void bfa_fcs_port_ns_online(struct bfa_fcs_port_s *port) { struct bfa_fcs_port_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port); ns->port = port; bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE); }
/** * This routine will be called by bfa_timer on timer timeouts. * * param[in] port - pointer to bfa_fcs_port_t. * * return * void * * Special Considerations: * * note */ static void bfa_fcs_port_ns_timeout(void *arg) { struct bfa_fcs_port_ns_s *ns = (struct bfa_fcs_port_ns_s *)arg; ns->port->stats.ns_timeouts++; bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT); }
/* * BFA completion callback for bfa_itnim_offline(). */ void bfa_cb_itnim_offline(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); }
/* * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim) { itnim->stats.onlines++; if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE); }