Esempio n. 1
0
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
	int rc = 0;

	switch (txmode->mq_mode) {
	case ETH_MQ_TX_NONE:
		break;
	default:
		sfc_err(sa, "Tx multi-queue mode %u not supported",
			txmode->mq_mode);
		rc = EINVAL;
	}

	/*
	 * These features are claimed to be i40e-specific,
	 * but it does make sense to double-check their absence
	 */
	if (txmode->hw_vlan_reject_tagged) {
		sfc_err(sa, "Rejecting tagged packets not supported");
		rc = EINVAL;
	}

	if (txmode->hw_vlan_reject_untagged) {
		sfc_err(sa, "Rejecting untagged packets not supported");
		rc = EINVAL;
	}

	if (txmode->hw_vlan_insert_pvid) {
		sfc_err(sa, "Port-based VLAN insertion not supported");
		rc = EINVAL;
	}

	return rc;
}
Esempio n. 2
0
void
sfc_ev_qpoll(struct sfc_evq *evq)
{
	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
		   evq->init_state == SFC_EVQ_STARTING);

	/* Synchronize the DMA memory for reading not required */

	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);

	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
		struct sfc_adapter *sa = evq->sa;
		int rc;

		if (evq->dp_rxq != NULL) {
			unsigned int rxq_sw_index;

			rxq_sw_index = evq->dp_rxq->dpq.queue_id;

			sfc_warn(sa,
				 "restart RxQ %u because of exception on its EvQ %u",
				 rxq_sw_index, evq->evq_index);

			sfc_rx_qstop(sa, rxq_sw_index);
			rc = sfc_rx_qstart(sa, rxq_sw_index);
			if (rc != 0)
				sfc_err(sa, "cannot restart RxQ %u",
					rxq_sw_index);
		}

		if (evq->dp_txq != NULL) {
			unsigned int txq_sw_index;

			txq_sw_index = evq->dp_txq->dpq.queue_id;

			sfc_warn(sa,
				 "restart TxQ %u because of exception on its EvQ %u",
				 txq_sw_index, evq->evq_index);

			sfc_tx_qstop(sa, txq_sw_index);
			rc = sfc_tx_qstart(sa, txq_sw_index);
			if (rc != 0)
				sfc_err(sa, "cannot restart TxQ %u",
					txq_sw_index);
		}

		if (evq->exception)
			sfc_panic(sa, "unrecoverable exception on EvQ %u",
				  evq->evq_index);

		sfc_adapter_unlock(sa);
	}

	/* Poll-mode driver does not re-prime the event queue for interrupts */
}
Esempio n. 3
0
static boolean_t
sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
		evq->evq_index, label, id);
	return B_TRUE;
}
Esempio n. 4
0
static boolean_t
sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected link change event",
		evq->evq_index);
	return B_TRUE;
}
Esempio n. 5
0
static boolean_t
sfc_ev_timer(void *arg, uint32_t index)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
		evq->evq_index, index);
	return B_TRUE;
}
Esempio n. 6
0
static boolean_t
sfc_ev_sram(void *arg, uint32_t code)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
		evq->evq_index, code);
	return B_TRUE;
}
Esempio n. 7
0
static boolean_t
sfc_ev_software(void *arg, uint16_t magic)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
		evq->evq_index, magic);
	return B_TRUE;
}
Esempio n. 8
0
static boolean_t
sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
		evq->evq_index, txq_hw_index);
	return B_TRUE;
}
Esempio n. 9
0
static boolean_t
sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
		evq->evq_index, rxq_hw_index);
	return B_TRUE;
}
Esempio n. 10
0
static boolean_t
sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
	      uint32_t size, uint16_t flags)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa,
		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
		evq->evq_index, label, id, size, flags);
	return B_TRUE;
}
Esempio n. 11
0
static boolean_t
sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
		 uint32_t pkt_count, uint16_t flags)
{
	struct sfc_evq *evq = arg;

	sfc_err(evq->sa,
		"EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
		evq->evq_index, label, id, pkt_count, flags);
	return B_TRUE;
}
Esempio n. 12
0
void
sfc_ev_detach(struct sfc_adapter *sa)
{
	sfc_log_init(sa, "entry");

	sfc_ev_qfini(sa->mgmt_evq);

	if (sa->evq_count != 0)
		sfc_err(sa, "%u EvQs are not destroyed before detach",
			sa->evq_count);
}
Esempio n. 13
0
static void
sfc_ev_mgmt_periodic_qpoll(void *arg)
{
	struct sfc_adapter *sa = arg;
	int rc;

	sfc_ev_mgmt_qpoll(sa);

	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
			       sfc_ev_mgmt_periodic_qpoll, sa);
	if (rc == -ENOTSUP) {
		sfc_warn(sa, "alarms are not supported");
		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
	} else if (rc != 0) {
		sfc_err(sa,
			"cannot rearm management EVQ polling alarm (rc=%d)",
			rc);
	}
}
Esempio n. 14
0
int
sfc_ev_attach(struct sfc_adapter *sa)
{
	int rc;

	sfc_log_init(sa, "entry");

	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
				sfc_kvarg_perf_profile_handler,
				&sa->evq_flags);
	if (rc != 0) {
		sfc_err(sa, "invalid %s parameter value",
			SFC_KVARG_PERF_PROFILE);
		goto fail_kvarg_perf_profile;
	}

	sa->mgmt_evq_index = 0;
	rte_spinlock_init(&sa->mgmt_evq_lock);

	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
			  sa->socket_id, &sa->mgmt_evq);
	if (rc != 0)
		goto fail_mgmt_evq_init;

	/*
	 * Rx/Tx event queues are created/destroyed when corresponding
	 * Rx/Tx queue is created/destroyed.
	 */

	return 0;

fail_mgmt_evq_init:

fail_kvarg_perf_profile:
	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
Esempio n. 15
0
static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
		   const struct rte_eth_txconf *tx_conf)
{
	unsigned int flags = tx_conf->txq_flags;
	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
	int rc = 0;

	if (tx_conf->tx_rs_thresh != 0) {
		sfc_err(sa, "RS bit in transmit descriptor is not supported");
		rc = EINVAL;
	}

	if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
		sfc_err(sa,
			"TxQ free threshold too large: %u vs maximum %u",
			tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
		rc = EINVAL;
	}

	if (tx_conf->tx_thresh.pthresh != 0 ||
	    tx_conf->tx_thresh.hthresh != 0 ||
	    tx_conf->tx_thresh.wthresh != 0) {
		sfc_err(sa,
			"prefetch/host/writeback thresholds are not supported");
		rc = EINVAL;
	}

	if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
	    (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
		sfc_err(sa, "Multi-segment is not supported by %s datapath",
			sa->dp_tx->dp.name);
		rc = EINVAL;
	}

	if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
		if (!encp->enc_hw_tx_insert_vlan_enabled) {
			sfc_err(sa, "VLAN offload is not supported");
			rc = EINVAL;
		} else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
			sfc_err(sa,
				"VLAN offload is not supported by %s datapath",
				sa->dp_tx->dp.name);
			rc = EINVAL;
		}
	}

	if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
		sfc_err(sa, "SCTP offload is not supported");
		rc = EINVAL;
	}

	/* We either perform both TCP and UDP offload, or no offload at all */
	if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
	    ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
		sfc_err(sa, "TCP and UDP offloads can't be set independently");
		rc = EINVAL;
	}

	return rc;
}
Esempio n. 16
0
void
sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
{
	struct rte_eth_dev_data *dev_data;
	struct sfc_txq_info *txq_info;
	struct sfc_txq *txq;
	unsigned int retry_count;
	unsigned int wait_count;

	sfc_log_init(sa, "TxQ = %u", sw_index);

	SFC_ASSERT(sw_index < sa->txq_count);
	txq_info = &sa->txq_info[sw_index];

	txq = txq_info->txq;

	if (txq->state == SFC_TXQ_INITIALIZED)
		return;

	SFC_ASSERT(txq->state & SFC_TXQ_STARTED);

	sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);

	/*
	 * Retry TX queue flushing in case of flush failed or
	 * timeout; in the worst case it can delay for 6 seconds
	 */
	for (retry_count = 0;
	     ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
	     (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
	     ++retry_count) {
		if (efx_tx_qflush(txq->common) != 0) {
			txq->state |= SFC_TXQ_FLUSHING;
			break;
		}

		/*
		 * Wait for TX queue flush done or flush failed event at least
		 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
		 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
		 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
		 */
		wait_count = 0;
		do {
			rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
			sfc_ev_qpoll(txq->evq);
		} while ((txq->state & SFC_TXQ_FLUSHING) &&
			 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);

		if (txq->state & SFC_TXQ_FLUSHING)
			sfc_err(sa, "TxQ %u flush timed out", sw_index);

		if (txq->state & SFC_TXQ_FLUSHED)
			sfc_info(sa, "TxQ %u flushed", sw_index);
	}

	sa->dp_tx->qreap(txq->dp);

	txq->state = SFC_TXQ_INITIALIZED;

	efx_tx_qdestroy(txq->common);

	sfc_ev_qstop(txq->evq);

	/*
	 * It seems to be used by DPDK for debug purposes only ('rte_ether')
	 */
	dev_data = sa->eth_dev->data;
	dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
}
Esempio n. 17
0
int
sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
{
	struct rte_eth_dev_data *dev_data;
	struct sfc_txq_info *txq_info;
	struct sfc_txq *txq;
	struct sfc_evq *evq;
	uint16_t flags;
	unsigned int desc_index;
	int rc = 0;

	sfc_log_init(sa, "TxQ = %u", sw_index);

	SFC_ASSERT(sw_index < sa->txq_count);
	txq_info = &sa->txq_info[sw_index];

	txq = txq_info->txq;

	SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);

	evq = txq->evq;

	rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
	if (rc != 0)
		goto fail_ev_qstart;

	/*
	 * It seems that DPDK has no controls regarding IPv4 offloads,
	 * hence, we always enable it here
	 */
	if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
	    (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
		flags = EFX_TXQ_CKSUM_IPV4;
	} else {
		flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;

		if (sa->tso)
			flags |= EFX_TXQ_FATSOV2;
	}

	rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
			    txq_info->entries, 0 /* not used on EF10 */,
			    flags, evq->common,
			    &txq->common, &desc_index);
	if (rc != 0) {
		if (sa->tso && (rc == ENOSPC))
			sfc_err(sa, "ran out of TSO contexts");

		goto fail_tx_qcreate;
	}

	efx_tx_qenable(txq->common);

	txq->state |= SFC_TXQ_STARTED;

	rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
	if (rc != 0)
		goto fail_dp_qstart;

	/*
	 * It seems to be used by DPDK for debug purposes only ('rte_ether')
	 */
	dev_data = sa->eth_dev->data;
	dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;

	return 0;

fail_dp_qstart:
	txq->state = SFC_TXQ_INITIALIZED;
	efx_tx_qdestroy(txq->common);

fail_tx_qcreate:
	sfc_ev_qstop(evq);

fail_ev_qstart:
	return rc;
}
Esempio n. 18
0
static boolean_t
sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
	      uint32_t size, uint16_t flags)
{
	struct sfc_evq *evq = arg;
	struct sfc_efx_rxq *rxq;
	unsigned int stop;
	unsigned int pending_id;
	unsigned int delta;
	unsigned int i;
	struct sfc_efx_rx_sw_desc *rxd;

	if (unlikely(evq->exception))
		goto done;

	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);

	SFC_ASSERT(rxq != NULL);
	SFC_ASSERT(rxq->evq == evq);
	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);

	stop = (id + 1) & rxq->ptr_mask;
	pending_id = rxq->pending & rxq->ptr_mask;
	delta = (stop >= pending_id) ? (stop - pending_id) :
		(rxq->ptr_mask + 1 - pending_id + stop);

	if (delta == 0) {
		/*
		 * Rx event with no new descriptors done and zero length
		 * is used to abort scattered packet when there is no room
		 * for the tail.
		 */
		if (unlikely(size != 0)) {
			evq->exception = B_TRUE;
			sfc_err(evq->sa,
				"EVQ %u RxQ %u invalid RX abort "
				"(id=%#x size=%u flags=%#x); needs restart",
				evq->evq_index, rxq->dp.dpq.queue_id,
				id, size, flags);
			goto done;
		}

		/* Add discard flag to the first fragment */
		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
		/* Remove continue flag from the last fragment */
		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
	} else if (unlikely(delta > rxq->batch_max)) {
		evq->exception = B_TRUE;

		sfc_err(evq->sa,
			"EVQ %u RxQ %u completion out of order "
			"(id=%#x delta=%u flags=%#x); needs restart",
			evq->evq_index, rxq->dp.dpq.queue_id,
			id, delta, flags);

		goto done;
	}

	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
		rxd = &rxq->sw_desc[i];

		rxd->flags = flags;

		SFC_ASSERT(size < (1 << 16));
		rxd->size = (uint16_t)size;
	}

	rxq->pending += delta;

done:
	return B_FALSE;
}