void sfc_ev_qpoll(struct sfc_evq *evq) { SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || evq->init_state == SFC_EVQ_STARTING); /* Synchronize the DMA memory for reading not required */ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { struct sfc_adapter *sa = evq->sa; int rc; if (evq->dp_rxq != NULL) { unsigned int rxq_sw_index; rxq_sw_index = evq->dp_rxq->dpq.queue_id; sfc_warn(sa, "restart RxQ %u because of exception on its EvQ %u", rxq_sw_index, evq->evq_index); sfc_rx_qstop(sa, rxq_sw_index); rc = sfc_rx_qstart(sa, rxq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart RxQ %u", rxq_sw_index); } if (evq->dp_txq != NULL) { unsigned int txq_sw_index; txq_sw_index = evq->dp_txq->dpq.queue_id; sfc_warn(sa, "restart TxQ %u because of exception on its EvQ %u", txq_sw_index, evq->evq_index); sfc_tx_qstop(sa, txq_sw_index); rc = sfc_tx_qstart(sa, txq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart TxQ %u", txq_sw_index); } if (evq->exception) sfc_panic(sa, "unrecoverable exception on EvQ %u", evq->evq_index); sfc_adapter_unlock(sa); } /* Poll-mode driver does not re-prime the event queue for interrupts */ }
int sfc_tx_start(struct sfc_adapter *sa) { unsigned int sw_index; int rc = 0; sfc_log_init(sa, "txq_count = %u", sa->txq_count); if (sa->tso) { if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; } } rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init; for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { if (!(sa->txq_info[sw_index].deferred_start) || sa->txq_info[sw_index].deferred_started) { rc = sfc_tx_qstart(sa, sw_index); if (rc != 0) goto fail_tx_qstart; } } return 0; fail_tx_qstart: while (sw_index-- > 0) sfc_tx_qstop(sa, sw_index); efx_tx_fini(sa->nic); fail_efx_tx_init: sfc_log_init(sa, "failed (rc = %d)", rc); return rc; }