static boolean_t sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) { struct sfc_evq *evq = arg; struct sfc_dp_txq *dp_txq; struct sfc_efx_txq *txq; unsigned int stop; unsigned int delta; dp_txq = evq->dp_txq; SFC_ASSERT(dp_txq != NULL); txq = sfc_efx_txq_by_dp_txq(dp_txq); SFC_ASSERT(txq->evq == evq); if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) goto done; stop = (id + 1) & txq->ptr_mask; id = txq->pending & txq->ptr_mask; delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); txq->pending += delta; done: return B_FALSE; }
void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_txq_info *txq_info; struct sfc_txq *txq; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; SFC_ASSERT(txq != NULL); SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); sa->dp_tx->qdestroy(txq->dp); txq->dp = NULL; txq_info->txq = NULL; txq_info->entries = 0; sfc_dma_free(sa, &txq->mem); sfc_ev_qfini(txq->evq); txq->evq = NULL; rte_free(txq); }
static void sfc_mcdi_ev_cpl(void *arg) { struct sfc_adapter *sa = (struct sfc_adapter *)arg; struct sfc_mcdi *mcdi __rte_unused; mcdi = &sa->mcdi; SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); /* MCDI is polled, completions are not expected */ SFC_ASSERT(0); }
static boolean_t sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) { struct sfc_evq *evq = arg; struct sfc_dp_txq *dp_txq; dp_txq = evq->dp_txq; SFC_ASSERT(dp_txq != NULL); SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL); return evq->sa->dp_tx->qtx_ev(dp_txq, id); }
void sfc_ev_qfini(struct sfc_evq *evq) { struct sfc_adapter *sa = evq->sa; SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); sfc_dma_free(sa, &evq->mem); rte_free(evq); SFC_ASSERT(sa->evq_count > 0); sa->evq_count--; }
static void sfc_mcdi_poll(struct sfc_adapter *sa) { efx_nic_t *enp; unsigned int delay_total; unsigned int delay_us; boolean_t aborted __rte_unused; delay_total = 0; delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US; enp = sa->nic; do { if (efx_mcdi_request_poll(enp)) return; if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) { aborted = efx_mcdi_request_abort(enp); SFC_ASSERT(aborted); sfc_mcdi_timeout(sa); return; } rte_delay_us(delay_us); delay_total += delay_us; /* Exponentially back off the poll frequency */ RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2); delay_us *= 2; if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US) delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US; } while (1); }
static boolean_t sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) { struct sfc_evq *evq = arg; struct sfc_dp_txq *dp_txq; struct sfc_txq *txq; dp_txq = evq->dp_txq; SFC_ASSERT(dp_txq != NULL); txq = sfc_txq_by_dp_txq(dp_txq); SFC_ASSERT(txq != NULL); SFC_ASSERT(txq->hw_index == txq_hw_index); SFC_ASSERT(txq->evq == evq); sfc_tx_qflush_done(txq); return B_FALSE; }
static boolean_t sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) { struct sfc_evq *evq = arg; struct sfc_dp_rxq *dp_rxq; struct sfc_rxq *rxq; dp_rxq = evq->dp_rxq; SFC_ASSERT(dp_rxq != NULL); rxq = sfc_rxq_by_dp_rxq(dp_rxq); SFC_ASSERT(rxq != NULL); SFC_ASSERT(rxq->hw_index == rxq_hw_index); SFC_ASSERT(rxq->evq == evq); sfc_rx_qflush_failed(rxq); return B_FALSE; }
struct sfc_txq * sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq) { const struct sfc_dp_queue *dpq = &dp_txq->dpq; struct rte_eth_dev *eth_dev; struct sfc_adapter *sa; struct sfc_txq *txq; SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); eth_dev = &rte_eth_devices[dpq->port_id]; sa = eth_dev->data->dev_private; SFC_ASSERT(dpq->queue_id < sa->txq_count); txq = sa->txq_info[dpq->queue_id].txq; SFC_ASSERT(txq != NULL); return txq; }
void sfc_ev_qpoll(struct sfc_evq *evq) { SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || evq->init_state == SFC_EVQ_STARTING); /* Synchronize the DMA memory for reading not required */ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { struct sfc_adapter *sa = evq->sa; int rc; if (evq->dp_rxq != NULL) { unsigned int rxq_sw_index; rxq_sw_index = evq->dp_rxq->dpq.queue_id; sfc_warn(sa, "restart RxQ %u because of exception on its EvQ %u", rxq_sw_index, evq->evq_index); sfc_rx_qstop(sa, rxq_sw_index); rc = sfc_rx_qstart(sa, rxq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart RxQ %u", rxq_sw_index); } if (evq->dp_txq != NULL) { unsigned int txq_sw_index; txq_sw_index = evq->dp_txq->dpq.queue_id; sfc_warn(sa, "restart TxQ %u because of exception on its EvQ %u", txq_sw_index, evq->evq_index); sfc_tx_qstop(sa, txq_sw_index); rc = sfc_tx_qstart(sa, txq_sw_index); if (rc != 0) sfc_err(sa, "cannot restart TxQ %u", txq_sw_index); } if (evq->exception) sfc_panic(sa, "unrecoverable exception on EvQ %u", evq->evq_index); sfc_adapter_unlock(sa); } /* Poll-mode driver does not re-prime the event queue for interrupts */ }
int sfc_mcdi_init(struct sfc_adapter *sa) { struct sfc_mcdi *mcdi; size_t max_msg_size; efx_mcdi_transport_t *emtp; int rc; sfc_log_init(sa, "entry"); mcdi = &sa->mcdi; SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED); rte_spinlock_init(&mcdi->lock); mcdi->state = SFC_MCDI_INITIALIZED; max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2; rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id, &mcdi->mem); if (rc != 0) goto fail_dma_alloc; /* Convert negative error to positive used in the driver */ rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING, sfc_kvarg_bool_handler, &mcdi->logging); if (rc != 0) goto fail_kvargs_process; emtp = &mcdi->transport; emtp->emt_context = sa; emtp->emt_dma_mem = &mcdi->mem; emtp->emt_execute = sfc_mcdi_execute; emtp->emt_ev_cpl = sfc_mcdi_ev_cpl; emtp->emt_exception = sfc_mcdi_exception; emtp->emt_logger = sfc_mcdi_logger; sfc_log_init(sa, "init MCDI"); rc = efx_mcdi_init(sa->nic, emtp); if (rc != 0) goto fail_mcdi_init; return 0; fail_mcdi_init: memset(emtp, 0, sizeof(*emtp)); fail_kvargs_process: sfc_dma_free(sa, &mcdi->mem); fail_dma_alloc: mcdi->state = SFC_MCDI_UNINITIALIZED; return rc; }
static boolean_t sfc_ev_initialized(void *arg) { struct sfc_evq *evq = arg; /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || evq->init_state == SFC_EVQ_STARTED); evq->init_state = SFC_EVQ_STARTED; return B_FALSE; }
static const char * sfc_evq_type2str(enum sfc_evq_type type) { switch (type) { case SFC_EVQ_TYPE_MGMT: return "mgmt-evq"; case SFC_EVQ_TYPE_RX: return "rx-evq"; case SFC_EVQ_TYPE_TX: return "tx-evq"; default: SFC_ASSERT(B_FALSE); return NULL; } }
/* It is not actually used on datapath, but required on RxQ flush */ static boolean_t sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) { struct sfc_evq *evq = arg; struct sfc_dp_rxq *dp_rxq; dp_rxq = evq->dp_rxq; SFC_ASSERT(dp_rxq != NULL); if (evq->sa->dp_rx->qrx_ps_ev != NULL) return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id); else return B_FALSE; }
/** * Destroy excess queues that are no longer needed after reconfiguration * or complete close. */ static void sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues) { int sw_index; SFC_ASSERT(nb_tx_queues <= sa->txq_count); sw_index = sa->txq_count; while (--sw_index >= (int)nb_tx_queues) { if (sa->txq_info[sw_index].txq != NULL) sfc_tx_qfini(sa, sw_index); } sa->txq_count = nb_tx_queues; }
static void sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp) { struct sfc_adapter *sa = (struct sfc_adapter *)arg; struct sfc_mcdi *mcdi = &sa->mcdi; rte_spinlock_lock(&mcdi->lock); SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); efx_mcdi_request_start(sa->nic, emrp, B_FALSE); sfc_mcdi_poll(sa); rte_spinlock_unlock(&mcdi->lock); }
int sfc_ev_qinit(struct sfc_adapter *sa, enum sfc_evq_type type, unsigned int type_index, unsigned int entries, int socket_id, struct sfc_evq **evqp) { struct sfc_evq *evq; int rc; sfc_log_init(sa, "type=%s type_index=%u", sfc_evq_type2str(type), type_index); SFC_ASSERT(rte_is_power_of_2(entries)); rc = ENOMEM; evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, socket_id); if (evq == NULL) goto fail_evq_alloc; evq->sa = sa; evq->type = type; evq->entries = entries; /* Allocate DMA space */ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); if (rc != 0) goto fail_dma_alloc; evq->init_state = SFC_EVQ_INITIALIZED; sa->evq_count++; *evqp = evq; return 0; fail_dma_alloc: rte_free(evq); fail_evq_alloc: sfc_log_init(sa, "failed %d", rc); return rc; }
static int sfc_filter_cache_match_supported(struct sfc_adapter *sa) { struct sfc_filter *filter = &sa->filter; size_t num = filter->supported_match_num; uint32_t *buf = filter->supported_match; unsigned int retry; int rc; /* Just a guess of possibly sufficient entries */ if (num == 0) num = 16; for (retry = 0; retry < 2; ++retry) { if (num != filter->supported_match_num) { rc = ENOMEM; buf = rte_realloc(buf, num * sizeof(*buf), 0); if (buf == NULL) goto fail_realloc; } rc = efx_filter_supported_filters(sa->nic, buf, num, &num); if (rc == 0) { filter->supported_match_num = num; filter->supported_match = buf; return 0; } else if (rc != ENOSPC) { goto fail_efx_filter_supported_filters; } } SFC_ASSERT(rc == ENOSPC); fail_efx_filter_supported_filters: fail_realloc: /* Original pointer is not freed by rte_realloc() on failure */ rte_free(buf); filter->supported_match = NULL; filter->supported_match_num = 0; return rc; }
/* * The function is used to insert or update VLAN tag; * the firmware has state of the firmware tag to insert per TxQ * (controlled by option descriptors), hence, if the tag of the * packet to be sent is different from one remembered by the firmware, * the function will update it */ static unsigned int sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m, efx_desc_t **pend) { uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ? m->vlan_tci : 0); if (this_tag == txq->hw_vlan_tci) return 0; /* * The expression inside SFC_ASSERT() is not desired to be checked in * a non-debug build because it might be too expensive on the data path */ SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled); efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag), *pend); (*pend)++; txq->hw_vlan_tci = this_tag; return 1; }
unsigned int sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, struct rte_mbuf **in_seg, size_t *in_off) { struct rte_mbuf *m = *in_seg; size_t bytes_to_copy = 0; size_t bytes_left = header_len; unsigned int segments_copied = 0; do { bytes_to_copy = MIN(bytes_left, m->data_len); rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *), bytes_to_copy); bytes_left -= bytes_to_copy; tsoh += bytes_to_copy; if (bytes_left > 0) { m = m->next; SFC_ASSERT(m != NULL); segments_copied++; } } while (bytes_left > 0); if (bytes_to_copy == m->data_len) { *in_seg = m->next; *in_off = 0; segments_copied++; } else { *in_seg = m; *in_off = bytes_to_copy; } return segments_copied; }
void sfc_mcdi_fini(struct sfc_adapter *sa) { struct sfc_mcdi *mcdi; efx_mcdi_transport_t *emtp; sfc_log_init(sa, "entry"); mcdi = &sa->mcdi; emtp = &mcdi->transport; rte_spinlock_lock(&mcdi->lock); SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); mcdi->state = SFC_MCDI_UNINITIALIZED; sfc_log_init(sa, "fini MCDI"); efx_mcdi_fini(sa->nic); memset(emtp, 0, sizeof(*emtp)); rte_spinlock_unlock(&mcdi->lock); sfc_dma_free(sa, &mcdi->mem); }
static uint16_t sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue; struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq); unsigned int added = txq->added; unsigned int pushed = added; unsigned int pkts_sent = 0; efx_desc_t *pend = &txq->pend_desc[0]; const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1); const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh; unsigned int fill_level = added - txq->completed; boolean_t reap_done; int rc __rte_unused; struct rte_mbuf **pktp; if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0)) goto done; /* * If insufficient space for a single packet is present, * we should reap; otherwise, we shouldn't do that all the time * to avoid latency increase */ reap_done = (fill_level > soft_max_fill); if (reap_done) { sfc_efx_tx_reap(txq); /* * Recalculate fill level since 'txq->completed' * might have changed on reap */ fill_level = added - txq->completed; } for (pkts_sent = 0, pktp = &tx_pkts[0]; (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill); pkts_sent++, pktp++) { struct rte_mbuf *m_seg = *pktp; size_t pkt_len = m_seg->pkt_len; unsigned int pkt_descs = 0; size_t in_off = 0; /* * Here VLAN TCI is expected to be zero in case if no * DEV_TX_VLAN_OFFLOAD capability is advertised; * if the calling app ignores the absence of * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then * TX_ERROR will occur */ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend); if (m_seg->ol_flags & PKT_TX_TCP_SEG) { /* * We expect correct 'pkt->l[2, 3, 4]_len' values * to be set correctly by the caller */ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend, &pkt_descs, &pkt_len) != 0) { /* We may have reached this place for * one of the following reasons: * * 1) Packet header length is greater * than SFC_TSOH_STD_LEN * 2) TCP header starts at more then * 208 bytes into the frame * * We will deceive RTE saying that we have sent * the packet, but we will actually drop it. * Hence, we should revert 'pend' to the * previous state (in case we have added * VLAN descriptor) and start processing * another one packet. But the original * mbuf shouldn't be orphaned */ pend -= pkt_descs; rte_pktmbuf_free(*pktp); continue; } /* * We've only added 2 FATSOv2 option descriptors * and 1 descriptor for the linearized packet header. * The outstanding work will be done in the same manner * as for the usual non-TSO path */ } for (; m_seg != NULL; m_seg = m_seg->next) { efsys_dma_addr_t next_frag; size_t seg_len; seg_len = m_seg->data_len; next_frag = rte_mbuf_data_dma_addr(m_seg); /* * If we've started TSO transaction few steps earlier, * we'll skip packet header using an offset in the * current segment (which has been set to the * first one containing payload) */ seg_len -= in_off; next_frag += in_off; in_off = 0; do { efsys_dma_addr_t frag_addr = next_frag; size_t frag_len; /* * It is assumed here that there is no * limitation on address boundary * crossing by DMA descriptor. */ frag_len = MIN(seg_len, txq->dma_desc_size_max); next_frag += frag_len; seg_len -= frag_len; pkt_len -= frag_len; efx_tx_qdesc_dma_create(txq->common, frag_addr, frag_len, (pkt_len == 0), pend++); pkt_descs++; } while (seg_len != 0); } added += pkt_descs; fill_level += pkt_descs; if (unlikely(fill_level > hard_max_fill)) { /* * Our estimation for maximum number of descriptors * required to send a packet seems to be wrong. * Try to reap (if we haven't yet). */ if (!reap_done) { sfc_efx_tx_reap(txq); reap_done = B_TRUE; fill_level = added - txq->completed; if (fill_level > hard_max_fill) { pend -= pkt_descs; break; } } else { pend -= pkt_descs; break; } } /* Assign mbuf to the last used desc */ txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp; } if (likely(pkts_sent > 0)) { rc = efx_tx_qdesc_post(txq->common, txq->pend_desc, pend - &txq->pend_desc[0], txq->completed, &txq->added); SFC_ASSERT(rc == 0); if (likely(pushed != txq->added)) efx_tx_qpush(txq->common, txq->added, pushed); } #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE if (!reap_done) sfc_efx_tx_reap(txq); #endif done: return pkts_sent; }
void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) { struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; if (txq->state == SFC_TXQ_INITIALIZED) return; SFC_ASSERT(txq->state & SFC_TXQ_STARTED); sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr); /* * Retry TX queue flushing in case of flush failed or * timeout; in the worst case it can delay for 6 seconds */ for (retry_count = 0; ((txq->state & SFC_TXQ_FLUSHED) == 0) && (retry_count < SFC_TX_QFLUSH_ATTEMPTS); ++retry_count) { if (efx_tx_qflush(txq->common) != 0) { txq->state |= SFC_TXQ_FLUSHING; break; } /* * Wait for TX queue flush done or flush failed event at least * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied * by SFC_TX_QFLUSH_POLL_ATTEMPTS) */ wait_count = 0; do { rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS); sfc_ev_qpoll(txq->evq); } while ((txq->state & SFC_TXQ_FLUSHING) && wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); if (txq->state & SFC_TXQ_FLUSHING) sfc_err(sa, "TxQ %u flush timed out", sw_index); if (txq->state & SFC_TXQ_FLUSHED) sfc_info(sa, "TxQ %u flushed", sw_index); } sa->dp_tx->qreap(txq->dp); txq->state = SFC_TXQ_INITIALIZED; efx_tx_qdestroy(txq->common); sfc_ev_qstop(txq->evq); /* * It seems to be used by DPDK for debug purposes only ('rte_ether') */ dev_data = sa->eth_dev->data; dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; }
int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) { struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; struct sfc_evq *evq; uint16_t flags; unsigned int desc_index; int rc = 0; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); evq = txq->evq; rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; /* * It seems that DPDK has no controls regarding IPv4 offloads, * hence, we always enable it here */ if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) || (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) { flags = EFX_TXQ_CKSUM_IPV4; } else { flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; if (sa->tso) flags |= EFX_TXQ_FATSOV2; } rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem, txq_info->entries, 0 /* not used on EF10 */, flags, evq->common, &txq->common, &desc_index); if (rc != 0) { if (sa->tso && (rc == ENOSPC)) sfc_err(sa, "ran out of TSO contexts"); goto fail_tx_qcreate; } efx_tx_qenable(txq->common); txq->state |= SFC_TXQ_STARTED; rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index); if (rc != 0) goto fail_dp_qstart; /* * It seems to be used by DPDK for debug purposes only ('rte_ether') */ dev_data = sa->eth_dev->data; dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; return 0; fail_dp_qstart: txq->state = SFC_TXQ_INITIALIZED; efx_tx_qdestroy(txq->common); fail_tx_qcreate: sfc_ev_qstop(evq); fail_ev_qstart: return rc; }
int sfc_ev_qprime(struct sfc_evq *evq) { SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); return efx_ev_qprime(evq->common, evq->read_ptr); }
/* Event queue HW index allocation scheme is described in sfc_ev.h. */ int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) { struct sfc_adapter *sa = evq->sa; efsys_mem_t *esmp; uint32_t evq_flags = sa->evq_flags; unsigned int total_delay_us; unsigned int delay_us; int rc; sfc_log_init(sa, "hw_index=%u", hw_index); esmp = &evq->mem; evq->evq_index = hw_index; /* Clear all events */ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; else evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; /* Create the common code event queue */ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 0 /* unused on EF10 */, 0, evq_flags, &evq->common); if (rc != 0) goto fail_ev_qcreate; SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); if (evq->dp_rxq != 0) { if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) evq->callbacks = &sfc_ev_callbacks_efx_rx; else evq->callbacks = &sfc_ev_callbacks_dp_rx; } else if (evq->dp_txq != 0) { if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) evq->callbacks = &sfc_ev_callbacks_efx_tx; else evq->callbacks = &sfc_ev_callbacks_dp_tx; } else { evq->callbacks = &sfc_ev_callbacks; } evq->init_state = SFC_EVQ_STARTING; /* Wait for the initialization event */ total_delay_us = 0; delay_us = SFC_EVQ_INIT_BACKOFF_START_US; do { (void)sfc_ev_qpoll(evq); /* Check to see if the initialization complete indication * posted by the hardware. */ if (evq->init_state == SFC_EVQ_STARTED) goto done; /* Give event queue some time to init */ rte_delay_us(delay_us); total_delay_us += delay_us; /* Exponential backoff */ delay_us *= 2; if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); rc = ETIMEDOUT; goto fail_timedout; done: return 0; fail_timedout: evq->init_state = SFC_EVQ_INITIALIZED; efx_ev_qdestroy(evq->common); fail_ev_qcreate: sfc_log_init(sa, "failed %d", rc); return rc; }
int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_txq_info *txq_info; struct sfc_evq *evq; struct sfc_txq *txq; int rc = 0; struct sfc_dp_tx_qcreate_info info; sfc_log_init(sa, "TxQ = %u", sw_index); rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf); if (rc != 0) goto fail_bad_conf; SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries); txq_info->entries = nb_tx_desc; rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, txq_info->entries, socket_id, &evq); if (rc != 0) goto fail_ev_qinit; rc = ENOMEM; txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id); if (txq == NULL) goto fail_txq_alloc; txq_info->txq = txq; txq->hw_index = sw_index; txq->evq = evq; txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : SFC_TX_DEFAULT_FREE_THRESH; txq->flags = tx_conf->txq_flags; rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), socket_id, &txq->mem); if (rc != 0) goto fail_dma_alloc; memset(&info, 0, sizeof(info)); info.free_thresh = txq->free_thresh; info.flags = tx_conf->txq_flags; info.txq_entries = txq_info->entries; info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; info.txq_hw_ring = txq->mem.esm_base; info.evq_entries = txq_info->entries; info.evq_hw_ring = evq->mem.esm_base; info.hw_index = txq->hw_index; info.mem_bar = sa->mem_bar.esb_base; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &SFC_DEV_TO_PCI(sa->eth_dev)->addr, socket_id, &info, &txq->dp); if (rc != 0) goto fail_dp_tx_qinit; evq->dp_txq = txq->dp; txq->state = SFC_TXQ_INITIALIZED; txq_info->deferred_start = (tx_conf->tx_deferred_start != 0); return 0; fail_dp_tx_qinit: sfc_dma_free(sa, &txq->mem); fail_dma_alloc: txq_info->txq = NULL; rte_free(txq); fail_txq_alloc: sfc_ev_qfini(evq); fail_ev_qinit: txq_info->entries = 0; fail_bad_conf: sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc); return rc; }
static boolean_t sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, uint32_t size, uint16_t flags) { struct sfc_evq *evq = arg; struct sfc_efx_rxq *rxq; unsigned int stop; unsigned int pending_id; unsigned int delta; unsigned int i; struct sfc_efx_rx_sw_desc *rxd; if (unlikely(evq->exception)) goto done; rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); SFC_ASSERT(rxq != NULL); SFC_ASSERT(rxq->evq == evq); SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); stop = (id + 1) & rxq->ptr_mask; pending_id = rxq->pending & rxq->ptr_mask; delta = (stop >= pending_id) ? (stop - pending_id) : (rxq->ptr_mask + 1 - pending_id + stop); if (delta == 0) { /* * Rx event with no new descriptors done and zero length * is used to abort scattered packet when there is no room * for the tail. */ if (unlikely(size != 0)) { evq->exception = B_TRUE; sfc_err(evq->sa, "EVQ %u RxQ %u invalid RX abort " "(id=%#x size=%u flags=%#x); needs restart", evq->evq_index, rxq->dp.dpq.queue_id, id, size, flags); goto done; } /* Add discard flag to the first fragment */ rxq->sw_desc[pending_id].flags |= EFX_DISCARD; /* Remove continue flag from the last fragment */ rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; } else if (unlikely(delta > rxq->batch_max)) { evq->exception = B_TRUE; sfc_err(evq->sa, "EVQ %u RxQ %u completion out of order " "(id=%#x delta=%u flags=%#x); needs restart", evq->evq_index, rxq->dp.dpq.queue_id, id, delta, flags); goto done; } for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { rxd = &rxq->sw_desc[i]; rxd->flags = flags; SFC_ASSERT(size < (1 << 16)); rxd->size = (uint16_t)size; } rxq->pending += delta; done: return B_FALSE; }