void sfc_ev_qpoll(struct sfc_evq *evq) { SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || evq->init_state == SFC_EVQ_STARTING); /* Synchronize the DMA memory for reading not required */ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { struct sfc_adapter *sa = evq->sa; int rc; if (evq->dp_rxq != NULL) { unsigned int rxq_sw_index; rxq_sw_index = evq->dp_rxq->dpq.queue_id; sfc_warn(sa, "restart RxQ %u because of exception on its EvQ %u", rxq_sw_index, evq->evq_index); sfc_rx_qstop(sa, rxq_sw_index); rc = sfc_rx_qstart(sa, rxq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart RxQ %u", rxq_sw_index); } if (evq->dp_txq != NULL) { unsigned int txq_sw_index; txq_sw_index = evq->dp_txq->dpq.queue_id; sfc_warn(sa, "restart TxQ %u because of exception on its EvQ %u", txq_sw_index, evq->evq_index); sfc_tx_qstop(sa, txq_sw_index); rc = sfc_tx_qstart(sa, txq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart TxQ %u", txq_sw_index); } if (evq->exception) sfc_panic(sa, "unrecoverable exception on EvQ %u", evq->evq_index); sfc_adapter_unlock(sa); } /* Poll-mode driver does not re-prime the event queue for interrupts */ }
static void sfc_mcdi_timeout(struct sfc_adapter *sa) { sfc_warn(sa, "MC TIMEOUT"); sfc_panic(sa, "MCDI timeout handling is not implemented\n"); }
static boolean_t sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) { struct sfc_evq *evq = arg; if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) return B_FALSE; evq->exception = B_TRUE; sfc_warn(evq->sa, "hardware exception %s (code=%u, data=%#x) on EVQ %u;" " needs recovery", (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : "UNKNOWN", code, data, evq->evq_index); return B_TRUE; }
static void sfc_ev_mgmt_periodic_qpoll(void *arg) { struct sfc_adapter *sa = arg; int rc; sfc_ev_mgmt_qpoll(sa); rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, sfc_ev_mgmt_periodic_qpoll, sa); if (rc == -ENOTSUP) { sfc_warn(sa, "alarms are not supported"); sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); } else if (rc != 0) { sfc_err(sa, "cannot rearm management EVQ polling alarm (rc=%d)", rc); } }
static void sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme) { struct sfc_adapter *sa = (struct sfc_adapter *)arg; sfc_warn(sa, "MC %s", (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" : (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN"); sfc_panic(sa, "MCDI exceptions handling is not implemented\n"); }
int sfc_tx_start(struct sfc_adapter *sa) { unsigned int sw_index; int rc = 0; sfc_log_init(sa, "txq_count = %u", sa->txq_count); if (sa->tso) { if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; } } rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init; for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { if (!(sa->txq_info[sw_index].deferred_start) || sa->txq_info[sw_index].deferred_started) { rc = sfc_tx_qstart(sa, sw_index); if (rc != 0) goto fail_tx_qstart; } } return 0; fail_tx_qstart: while (sw_index-- > 0) sfc_tx_qstop(sa, sw_index); efx_tx_fini(sa->nic); fail_efx_tx_init: sfc_log_init(sa, "failed (rc = %d)", rc); return rc; }