/** * or after the link comes back. */ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) { /** * If path tov timer expired, failback with PATHTOV status - these * IO requests are not normally retried by IO stack. * * Otherwise device cameback online and fail it with normal failed * status so that IO stack retries these failed IO requests. */ if (iotov) ioim->io_cbfn = __bfa_cb_ioim_pathtov; else ioim->io_cbfn = __bfa_cb_ioim_failed; bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); /** * Move IO to fcpim global queue since itnim will be * freed. */ list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); }
/** * IO is not started (unallocated). */ static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc_fp(ioim->bfa, ioim->iotag); bfa_trc_fp(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_START: if (!bfa_itnim_is_online(ioim->itnim)) { if (!bfa_itnim_hold_io(ioim->itnim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_pathtov, ioim); } else { list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->itnim->pending_q); } break; } if (ioim->nsges > BFI_SGE_INLINE) { if (!bfa_ioim_sge_setup(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); return; } } if (!bfa_ioim_send_ioreq(ioim)) { bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); break; } bfa_sm_set_state(ioim, bfa_ioim_sm_active); break; case BFA_IOIM_SM_IOTOV: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_pathtov, ioim); break; case BFA_IOIM_SM_ABORT: /** * IO in pending queue can get abort requests. Complete abort * requests immediately. */ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
/** * IO is active. */ static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) { bfa_trc_fp(ioim->bfa, ioim->iotag); bfa_trc_fp(ioim->bfa, event); switch (event) { case BFA_IOIM_SM_COMP_GOOD: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_good_comp, ioim); break; case BFA_IOIM_SM_COMP: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, ioim); break; case BFA_IOIM_SM_DONE: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp, ioim); break; case BFA_IOIM_SM_ABORT: ioim->iosp->abort_explicit = BFA_TRUE; ioim->io_cbfn = __bfa_cb_ioim_abort; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_abort); else { bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_CLEANUP: ioim->iosp->abort_explicit = BFA_FALSE; ioim->io_cbfn = __bfa_cb_ioim_failed; if (bfa_ioim_send_abort(ioim)) bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup); else { bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); bfa_reqq_wait(ioim->bfa, ioim->reqq, &ioim->iosp->reqq_wait); } break; case BFA_IOIM_SM_HWFAIL: bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); list_del(&ioim->qe); list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed, ioim); break; default: bfa_sm_fault(ioim->bfa, event); } }
static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) { ln->ln_event = event; bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); }
/** * Firmware message handler. */ void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); union bfi_fcport_i2h_msg_u i2hmsg; i2hmsg.msg = msg; fcport->event_arg.i2hmsg = i2hmsg; switch (msg->mhdr.msg_id) { case BFI_FCPORT_I2H_ENABLE_RSP: if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_FCPORT_I2H_DISABLE_RSP: if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_FCPORT_I2H_EVENT: switch (i2hmsg.event->link_state.linkstate) { case BFA_PPORT_LINKUP: bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); break; case BFA_PPORT_LINKDOWN: bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); break; case BFA_PPORT_TRUNK_LINKDOWN: /** todo: event notification */ break; } break; case BFI_FCPORT_I2H_STATS_GET_RSP: /* * check for timer pop before processing the rsp */ if (fcport->stats_busy == BFA_FALSE || fcport->stats_status == BFA_STATUS_ETIMER) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = i2hmsg.pstatsget_rsp->status; bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, fcport); break; case BFI_FCPORT_I2H_STATS_CLEAR_RSP: /* * check for timer pop before processing the rsp */ if (fcport->stats_busy == BFA_FALSE || fcport->stats_status == BFA_STATUS_ETIMER) break; bfa_timer_stop(&fcport->timer); fcport->stats_status = BFA_STATUS_OK; bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_clr, fcport); break; default: bfa_assert(0); break; } }