int sfc_ev_start(struct sfc_adapter *sa) { int rc; sfc_log_init(sa, "entry"); rc = efx_ev_init(sa->nic); if (rc != 0) goto fail_ev_init; /* Start management EVQ used for global events */ /* * Management event queue start polls the queue, but it cannot * interfere with other polling contexts since mgmt_evq_running * is false yet. */ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); if (rc != 0) goto fail_mgmt_evq_start; rte_spinlock_lock(&sa->mgmt_evq_lock); sa->mgmt_evq_running = true; rte_spinlock_unlock(&sa->mgmt_evq_lock); if (sa->intr.lsc_intr) { rc = sfc_ev_qprime(sa->mgmt_evq); if (rc != 0) goto fail_mgmt_evq_prime; } /* * Start management EVQ polling. If interrupts are disabled * (not used), it is required to process link status change * and other device level events to avoid unrecoverable * error because the event queue overflow. */ sfc_ev_mgmt_periodic_qpoll_start(sa); /* * Rx/Tx event queues are started/stopped when corresponding * Rx/Tx queue is started/stopped. */ return 0; fail_mgmt_evq_prime: sfc_ev_qstop(sa->mgmt_evq); fail_mgmt_evq_start: efx_ev_fini(sa->nic); fail_ev_init: sfc_log_init(sa, "failed %d", rc); return rc; }
int sfc_mcdi_init(struct sfc_adapter *sa) { struct sfc_mcdi *mcdi; size_t max_msg_size; efx_mcdi_transport_t *emtp; int rc; sfc_log_init(sa, "entry"); mcdi = &sa->mcdi; SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED); rte_spinlock_init(&mcdi->lock); mcdi->state = SFC_MCDI_INITIALIZED; max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2; rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id, &mcdi->mem); if (rc != 0) goto fail_dma_alloc; /* Convert negative error to positive used in the driver */ rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING, sfc_kvarg_bool_handler, &mcdi->logging); if (rc != 0) goto fail_kvargs_process; emtp = &mcdi->transport; emtp->emt_context = sa; emtp->emt_dma_mem = &mcdi->mem; emtp->emt_execute = sfc_mcdi_execute; emtp->emt_ev_cpl = sfc_mcdi_ev_cpl; emtp->emt_exception = sfc_mcdi_exception; emtp->emt_logger = sfc_mcdi_logger; sfc_log_init(sa, "init MCDI"); rc = efx_mcdi_init(sa->nic, emtp); if (rc != 0) goto fail_mcdi_init; return 0; fail_mcdi_init: memset(emtp, 0, sizeof(*emtp)); fail_kvargs_process: sfc_dma_free(sa, &mcdi->mem); fail_dma_alloc: mcdi->state = SFC_MCDI_UNINITIALIZED; return rc; }
void sfc_filter_detach(struct sfc_adapter *sa) { struct sfc_filter *filter = &sa->filter; sfc_log_init(sa, "entry"); rte_free(filter->supported_match); filter->supported_match = NULL; filter->supported_match_num = 0; sfc_log_init(sa, "done"); }
static int sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) { sfc_log_init(sa, "TxQ = %u", sw_index); return 0; }
void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index) { struct sfc_txq_info *txq_info; struct sfc_txq *txq; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; SFC_ASSERT(txq != NULL); SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); sa->dp_tx->qdestroy(txq->dp); txq->dp = NULL; txq_info->txq = NULL; txq_info->entries = 0; sfc_dma_free(sa, &txq->mem); sfc_ev_qfini(txq->evq); txq->evq = NULL; rte_free(txq); }
int sfc_ev_qinit(struct sfc_adapter *sa, enum sfc_evq_type type, unsigned int type_index, unsigned int entries, int socket_id, struct sfc_evq **evqp) { struct sfc_evq *evq; int rc; sfc_log_init(sa, "type=%s type_index=%u", sfc_evq_type2str(type), type_index); SFC_ASSERT(rte_is_power_of_2(entries)); rc = ENOMEM; evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, socket_id); if (evq == NULL) goto fail_evq_alloc; evq->sa = sa; evq->type = type; evq->entries = entries; /* Allocate DMA space */ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); if (rc != 0) goto fail_dma_alloc; evq->init_state = SFC_EVQ_INITIALIZED; sa->evq_count++; *evqp = evq; return 0; fail_dma_alloc: rte_free(evq); fail_evq_alloc: sfc_log_init(sa, "failed %d", rc); return rc; }
int sfc_filter_attach(struct sfc_adapter *sa) { int rc; unsigned int i; sfc_log_init(sa, "entry"); rc = efx_filter_init(sa->nic); if (rc != 0) goto fail_filter_init; rc = sfc_filter_cache_match_supported(sa); if (rc != 0) goto fail_cache_match_supported; efx_filter_fini(sa->nic); sa->filter.supports_ip_proto_or_addr_filter = B_FALSE; sa->filter.supports_rem_or_local_port_filter = B_FALSE; for (i = 0; i < sa->filter.supported_match_num; ++i) { if (sa->filter.supported_match[i] & (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST)) sa->filter.supports_ip_proto_or_addr_filter = B_TRUE; if (sa->filter.supported_match[i] & (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT)) sa->filter.supports_rem_or_local_port_filter = B_TRUE; } sfc_log_init(sa, "done"); return 0; fail_cache_match_supported: efx_filter_fini(sa->nic); fail_filter_init: sfc_log_init(sa, "failed %d", rc); return rc; }
void sfc_ev_detach(struct sfc_adapter *sa) { sfc_log_init(sa, "entry"); sfc_ev_qfini(sa->mgmt_evq); if (sa->evq_count != 0) sfc_err(sa, "%u EvQs are not destroyed before detach", sa->evq_count); }
int sfc_tx_start(struct sfc_adapter *sa) { unsigned int sw_index; int rc = 0; sfc_log_init(sa, "txq_count = %u", sa->txq_count); if (sa->tso) { if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) { sfc_warn(sa, "TSO support was unable to be restored"); sa->tso = B_FALSE; } } rc = efx_tx_init(sa->nic); if (rc != 0) goto fail_efx_tx_init; for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) { if (!(sa->txq_info[sw_index].deferred_start) || sa->txq_info[sw_index].deferred_started) { rc = sfc_tx_qstart(sa, sw_index); if (rc != 0) goto fail_tx_qstart; } } return 0; fail_tx_qstart: while (sw_index-- > 0) sfc_tx_qstop(sa, sw_index); efx_tx_fini(sa->nic); fail_efx_tx_init: sfc_log_init(sa, "failed (rc = %d)", rc); return rc; }
int sfc_ev_attach(struct sfc_adapter *sa) { int rc; sfc_log_init(sa, "entry"); sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, sfc_kvarg_perf_profile_handler, &sa->evq_flags); if (rc != 0) { sfc_err(sa, "invalid %s parameter value", SFC_KVARG_PERF_PROFILE); goto fail_kvarg_perf_profile; } sa->mgmt_evq_index = 0; rte_spinlock_init(&sa->mgmt_evq_lock); rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, sa->socket_id, &sa->mgmt_evq); if (rc != 0) goto fail_mgmt_evq_init; /* * Rx/Tx event queues are created/destroyed when corresponding * Rx/Tx queue is created/destroyed. */ return 0; fail_mgmt_evq_init: fail_kvarg_perf_profile: sfc_log_init(sa, "failed %d", rc); return rc; }
void sfc_mcdi_fini(struct sfc_adapter *sa) { struct sfc_mcdi *mcdi; efx_mcdi_transport_t *emtp; sfc_log_init(sa, "entry"); mcdi = &sa->mcdi; emtp = &mcdi->transport; rte_spinlock_lock(&mcdi->lock); SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED); mcdi->state = SFC_MCDI_UNINITIALIZED; sfc_log_init(sa, "fini MCDI"); efx_mcdi_fini(sa->nic); memset(emtp, 0, sizeof(*emtp)); rte_spinlock_unlock(&mcdi->lock); sfc_dma_free(sa, &mcdi->mem); }
void sfc_ev_stop(struct sfc_adapter *sa) { sfc_log_init(sa, "entry"); sfc_ev_mgmt_periodic_qpoll_stop(sa); rte_spinlock_lock(&sa->mgmt_evq_lock); sa->mgmt_evq_running = false; rte_spinlock_unlock(&sa->mgmt_evq_lock); sfc_ev_qstop(sa->mgmt_evq); efx_ev_fini(sa->nic); }
void sfc_tx_stop(struct sfc_adapter *sa) { unsigned int sw_index; sfc_log_init(sa, "txq_count = %u", sa->txq_count); sw_index = sa->txq_count; while (sw_index-- > 0) { if (sa->txq_info[sw_index].txq != NULL) sfc_tx_qstop(sa, sw_index); } efx_tx_fini(sa->nic); }
void sfc_ev_qstop(struct sfc_evq *evq) { if (evq == NULL) return; sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); if (evq->init_state != SFC_EVQ_STARTED) return; evq->init_state = SFC_EVQ_INITIALIZED; evq->callbacks = NULL; evq->read_ptr = 0; evq->exception = B_FALSE; efx_ev_qdestroy(evq->common); evq->evq_index = 0; }
/* Event queue HW index allocation scheme is described in sfc_ev.h. */ int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) { struct sfc_adapter *sa = evq->sa; efsys_mem_t *esmp; uint32_t evq_flags = sa->evq_flags; unsigned int total_delay_us; unsigned int delay_us; int rc; sfc_log_init(sa, "hw_index=%u", hw_index); esmp = &evq->mem; evq->evq_index = hw_index; /* Clear all events */ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; else evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; /* Create the common code event queue */ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 0 /* unused on EF10 */, 0, evq_flags, &evq->common); if (rc != 0) goto fail_ev_qcreate; SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); if (evq->dp_rxq != 0) { if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) evq->callbacks = &sfc_ev_callbacks_efx_rx; else evq->callbacks = &sfc_ev_callbacks_dp_rx; } else if (evq->dp_txq != 0) { if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) evq->callbacks = &sfc_ev_callbacks_efx_tx; else evq->callbacks = &sfc_ev_callbacks_dp_tx; } else { evq->callbacks = &sfc_ev_callbacks; } evq->init_state = SFC_EVQ_STARTING; /* Wait for the initialization event */ total_delay_us = 0; delay_us = SFC_EVQ_INIT_BACKOFF_START_US; do { (void)sfc_ev_qpoll(evq); /* Check to see if the initialization complete indication * posted by the hardware. */ if (evq->init_state == SFC_EVQ_STARTED) goto done; /* Give event queue some time to init */ rte_delay_us(delay_us); total_delay_us += delay_us; /* Exponential backoff */ delay_us *= 2; if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); rc = ETIMEDOUT; goto fail_timedout; done: return 0; fail_timedout: evq->init_state = SFC_EVQ_INITIALIZED; efx_ev_qdestroy(evq->common); fail_ev_qcreate: sfc_log_init(sa, "failed %d", rc); return rc; }
void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index) { struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; unsigned int retry_count; unsigned int wait_count; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; if (txq->state == SFC_TXQ_INITIALIZED) return; SFC_ASSERT(txq->state & SFC_TXQ_STARTED); sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr); /* * Retry TX queue flushing in case of flush failed or * timeout; in the worst case it can delay for 6 seconds */ for (retry_count = 0; ((txq->state & SFC_TXQ_FLUSHED) == 0) && (retry_count < SFC_TX_QFLUSH_ATTEMPTS); ++retry_count) { if (efx_tx_qflush(txq->common) != 0) { txq->state |= SFC_TXQ_FLUSHING; break; } /* * Wait for TX queue flush done or flush failed event at least * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied * by SFC_TX_QFLUSH_POLL_ATTEMPTS) */ wait_count = 0; do { rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS); sfc_ev_qpoll(txq->evq); } while ((txq->state & SFC_TXQ_FLUSHING) && wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS); if (txq->state & SFC_TXQ_FLUSHING) sfc_err(sa, "TxQ %u flush timed out", sw_index); if (txq->state & SFC_TXQ_FLUSHED) sfc_info(sa, "TxQ %u flushed", sw_index); } sa->dp_tx->qreap(txq->dp); txq->state = SFC_TXQ_INITIALIZED; efx_tx_qdestroy(txq->common); sfc_ev_qstop(txq->evq); /* * It seems to be used by DPDK for debug purposes only ('rte_ether') */ dev_data = sa->eth_dev->data; dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED; }
int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index) { struct rte_eth_dev_data *dev_data; struct sfc_txq_info *txq_info; struct sfc_txq *txq; struct sfc_evq *evq; uint16_t flags; unsigned int desc_index; int rc = 0; sfc_log_init(sa, "TxQ = %u", sw_index); SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; txq = txq_info->txq; SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED); evq = txq->evq; rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; /* * It seems that DPDK has no controls regarding IPv4 offloads, * hence, we always enable it here */ if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) || (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) { flags = EFX_TXQ_CKSUM_IPV4; } else { flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP; if (sa->tso) flags |= EFX_TXQ_FATSOV2; } rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem, txq_info->entries, 0 /* not used on EF10 */, flags, evq->common, &txq->common, &desc_index); if (rc != 0) { if (sa->tso && (rc == ENOSPC)) sfc_err(sa, "ran out of TSO contexts"); goto fail_tx_qcreate; } efx_tx_qenable(txq->common); txq->state |= SFC_TXQ_STARTED; rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index); if (rc != 0) goto fail_dp_qstart; /* * It seems to be used by DPDK for debug purposes only ('rte_ether') */ dev_data = sa->eth_dev->data; dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED; return 0; fail_dp_qstart: txq->state = SFC_TXQ_INITIALIZED; efx_tx_qdestroy(txq->common); fail_tx_qcreate: sfc_ev_qstop(evq); fail_ev_qstart: return rc; }
int sfc_tx_configure(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; int rc = 0; sfc_log_init(sa, "nb_tx_queues=%u (old %u)", nb_tx_queues, sa->txq_count); /* * The datapath implementation assumes absence of boundary * limits on Tx DMA descriptors. Addition of these checks on * datapath would simply make the datapath slower. */ if (encp->enc_tx_dma_desc_boundary != 0) { rc = ENOTSUP; goto fail_tx_dma_desc_boundary; } rc = sfc_tx_check_mode(sa, &dev_conf->txmode); if (rc != 0) goto fail_check_mode; if (nb_tx_queues == sa->txq_count) goto done; if (sa->txq_info == NULL) { sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, sizeof(sa->txq_info[0]), 0, sa->socket_id); if (sa->txq_info == NULL) goto fail_txqs_alloc; } else { struct sfc_txq_info *new_txq_info; if (nb_tx_queues < sa->txq_count) sfc_tx_fini_queues(sa, nb_tx_queues); new_txq_info = rte_realloc(sa->txq_info, nb_tx_queues * sizeof(sa->txq_info[0]), 0); if (new_txq_info == NULL && nb_tx_queues > 0) goto fail_txqs_realloc; sa->txq_info = new_txq_info; if (nb_tx_queues > sa->txq_count) memset(&sa->txq_info[sa->txq_count], 0, (nb_tx_queues - sa->txq_count) * sizeof(sa->txq_info[0])); } while (sa->txq_count < nb_tx_queues) { rc = sfc_tx_qinit_info(sa, sa->txq_count); if (rc != 0) goto fail_tx_qinit_info; sa->txq_count++; } done: return 0; fail_tx_qinit_info: fail_txqs_realloc: fail_txqs_alloc: sfc_tx_close(sa); fail_check_mode: fail_tx_dma_desc_boundary: sfc_log_init(sa, "failed (rc = %d)", rc); return rc; }
int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_txq_info *txq_info; struct sfc_evq *evq; struct sfc_txq *txq; int rc = 0; struct sfc_dp_tx_qcreate_info info; sfc_log_init(sa, "TxQ = %u", sw_index); rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf); if (rc != 0) goto fail_bad_conf; SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries); txq_info->entries = nb_tx_desc; rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, txq_info->entries, socket_id, &evq); if (rc != 0) goto fail_ev_qinit; rc = ENOMEM; txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id); if (txq == NULL) goto fail_txq_alloc; txq_info->txq = txq; txq->hw_index = sw_index; txq->evq = evq; txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : SFC_TX_DEFAULT_FREE_THRESH; txq->flags = tx_conf->txq_flags; rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries), socket_id, &txq->mem); if (rc != 0) goto fail_dma_alloc; memset(&info, 0, sizeof(info)); info.free_thresh = txq->free_thresh; info.flags = tx_conf->txq_flags; info.txq_entries = txq_info->entries; info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max; info.txq_hw_ring = txq->mem.esm_base; info.evq_entries = txq_info->entries; info.evq_hw_ring = evq->mem.esm_base; info.hw_index = txq->hw_index; info.mem_bar = sa->mem_bar.esb_base; rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index, &SFC_DEV_TO_PCI(sa->eth_dev)->addr, socket_id, &info, &txq->dp); if (rc != 0) goto fail_dp_tx_qinit; evq->dp_txq = txq->dp; txq->state = SFC_TXQ_INITIALIZED; txq_info->deferred_start = (tx_conf->tx_deferred_start != 0); return 0; fail_dp_tx_qinit: sfc_dma_free(sa, &txq->mem); fail_dma_alloc: txq_info->txq = NULL; rte_free(txq); fail_txq_alloc: sfc_ev_qfini(evq); fail_ev_qinit: txq_info->entries = 0; fail_bad_conf: sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc); return rc; }