Esempio n. 1
0
static void
port_ieee1588_tx_timestamp_check(portid_t pi)
{
	struct port_ieee1588_ops *ieee_ops;
	uint64_t tx_tmst;
	unsigned wait_us;

	ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx;
	wait_us = 0;
	while ((ieee_ops->tx_tmst_read(pi, &tx_tmst) < 0) &&
	       (wait_us < MAX_TX_TMST_WAIT_MICROSECS)) {
		rte_delay_us(1);
		wait_us++;
	}
	if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) {
		printf("Port %u: TX timestamp registers not valid after"
		       "%u micro-seconds\n",
		       (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS);
		return;
	}
	printf("Port %u TX timestamp value 0x%"PRIu64" validated after "
	       "%u micro-second%s\n",
	       (unsigned) pi, tx_tmst, wait_us,
	       (wait_us == 1) ? "" : "s");
}
Esempio n. 2
0
static void
sfc_mcdi_poll(struct sfc_adapter *sa)
{
	efx_nic_t *enp;
	unsigned int delay_total;
	unsigned int delay_us;
	boolean_t aborted __rte_unused;

	delay_total = 0;
	delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
	enp = sa->nic;

	do {
		if (efx_mcdi_request_poll(enp))
			return;

		if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
			aborted = efx_mcdi_request_abort(enp);
			SFC_ASSERT(aborted);
			sfc_mcdi_timeout(sa);
			return;
		}

		rte_delay_us(delay_us);

		delay_total += delay_us;

		/* Exponentially back off the poll frequency */
		RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
		delay_us *= 2;
		if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
			delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;

	} while (1);
}
Esempio n. 3
0
/*
 * Softnic packet forward
 */
static void
softnic_fwd(struct fwd_stream *fs)
{
	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
	uint16_t nb_rx;
	uint16_t nb_tx;
	uint32_t retry;

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	uint64_t start_tsc;
	uint64_t end_tsc;
	uint64_t core_cycles;
#endif

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	start_tsc = rte_rdtsc();
#endif

	/*  Packets Receive */
	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
			pkts_burst, nb_pkt_per_burst);
	fs->rx_packets += nb_rx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif

	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
			pkts_burst, nb_rx);

	/* Retry if necessary */
	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
		retry = 0;
		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
			rte_delay_us(burst_tx_delay_time);
			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
					&pkts_burst[nb_tx], nb_rx - nb_tx);
		}
	}
	fs->tx_packets += nb_tx;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif

	if (unlikely(nb_tx < nb_rx)) {
		fs->fwd_dropped += (nb_rx - nb_tx);
		do {
			rte_pktmbuf_free(pkts_burst[nb_tx]);
		} while (++nb_tx < nb_rx);
	}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	end_tsc = rte_rdtsc();
	core_cycles = (end_tsc - start_tsc);
	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}
Esempio n. 4
0
static inline int
mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
			void *rxmsg, uint16_t rxsize)
{
	int res = 0, wait;
	uint16_t len;
	struct mbox_ram_hdr rx_hdr;
	uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
	uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);

	/* Wait for response */
	wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
	while (wait > 0) {
		rte_delay_us(100);
		rx_hdr.u64 = rte_read64(ram_mbox_hdr);
		if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
			break;
		--wait;
	}

	hdr->res_code = rx_hdr.res_code;
	m->tag_own++;

	/* Timeout */
	if (wait <= 0) {
		res = -ETIMEDOUT;
		goto error;
	}

	/* Tag mismatch */
	if (m->tag_own != rx_hdr.tag) {
		res = -EINVAL;
		goto error;
	}

	/* PF nacked the msg */
	if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
		res = -EBADMSG;
		goto error;
	}

	len = RTE_MIN(rx_hdr.len, rxsize);
	if (rxmsg)
		mbox_msgcpy(rxmsg, ram_mbox_msg, len);

	return len;

error:
	mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
			m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
			hdr->res_code);
	return res;
}
Esempio n. 5
0
/* Test that the flush function is able to move packets between workers when
 * one worker shuts down..
 */
static int
test_flush_with_worker_shutdown(struct worker_params *wp,
		struct rte_mempool *p)
{
	struct rte_distributor *d = wp->dist;
	struct rte_mbuf *bufs[BURST];
	unsigned i;

	printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);

	clear_packet_count();
	if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
		printf("line %d: Error getting mbufs from pool\n", __LINE__);
		return -1;
	}

	/* now set all hash values in all buffers to zero, so all pkts go to the
	 * one worker thread */
	for (i = 0; i < BURST; i++)
		bufs[i]->hash.usr = 0;

	rte_distributor_process(d, bufs, BURST);
	/* at this point, we will have processed some packets and have a full
	 * backlog for the other ones at worker 0.
	 */

	/* get worker zero to quit */
	zero_quit = 1;

	/* flush the distributor */
	rte_distributor_flush(d);

	rte_delay_us(10000);

	zero_quit = 0;
	for (i = 0; i < rte_lcore_count() - 1; i++)
		printf("Worker %u handled %u packets\n", i,
				worker_stats[i].handled_packets);

	if (total_packet_count() != BURST) {
		printf("Line %d: Error, not all packets flushed. "
				"Expected %u, got %u\n",
				__LINE__, BURST, total_packet_count());
		return -1;
	}

	printf("Flush test with worker shutdown passed\n\n");
	return 0;
}
Esempio n. 6
0
int
test_cycles(void)
{
	unsigned i;
	uint64_t start_cycles, cycles, prev_cycles;
	uint64_t hz = rte_get_hpet_hz();
	uint64_t max_inc = (hz / 100); /* 10 ms max between 2 reads */

	/* check that the timer is always incrementing */
	start_cycles = rte_get_hpet_cycles();
	prev_cycles = start_cycles;
	for (i=0; i<N; i++) {
		cycles = rte_get_hpet_cycles();
		if ((uint64_t)(cycles - prev_cycles) > max_inc) {
			printf("increment too high or going backwards\n");
			return -1;
		}
		prev_cycles = cycles;
	}

	/* check that waiting 1 second is precise */
	prev_cycles = rte_get_hpet_cycles();
	rte_delay_us(1000000);
	cycles = rte_get_hpet_cycles();

	if ((uint64_t)(cycles - prev_cycles) > (hz + max_inc)) {
		printf("delay_us is not accurate: too long\n");
		return -1;
	}
	if ((uint64_t)(cycles - prev_cycles) < (hz - max_inc)) {
		printf("delay_us is not accurate: too short\n");
		return -1;
	}

	return 0;
}
Esempio n. 7
0
/* Perform a sanity test of the distributor with a large number of packets,
 * where we allocate a new set of mbufs for each burst. The workers then
 * free the mbufs. This ensures that we don't have any packet leaks in the
 * library.
 */
static int
sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
{
	struct rte_distributor *d = wp->dist;
	unsigned i;
	struct rte_mbuf *bufs[BURST];

	printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);

	clear_packet_count();
	for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
		unsigned j;
		while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
			rte_distributor_process(d, NULL, 0);
		for (j = 0; j < BURST; j++) {
			bufs[j]->hash.usr = (i+j) << 1;
			rte_mbuf_refcnt_set(bufs[j], 1);
		}

		rte_distributor_process(d, bufs, BURST);
	}

	rte_distributor_flush(d);

	rte_delay_us(10000);

	if (total_packet_count() < (1<<ITER_POWER)) {
		printf("Line %u: Packet count is incorrect, %u, expected %u\n",
				__LINE__, total_packet_count(),
				(1<<ITER_POWER));
		return -1;
	}

	printf("Sanity test with mbuf alloc/free passed\n\n");
	return 0;
}
Esempio n. 8
0
/**
 * Proceed VF reset operation.
 */
int
i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
{
	uint32_t val, i;
	struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
	uint16_t vf_id, abs_vf_id, vf_msix_num;
	int ret;
	struct i40e_virtchnl_queue_select qsel;

	if (vf == NULL)
		return -EINVAL;

	vf_id = vf->vf_idx;
	abs_vf_id = vf_id + hw->func_caps.vf_base_id;

	/* Notify VF that we are in VFR progress */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);

	/*
	 * If require a SW VF reset, a VFLR interrupt will be generated,
	 * this function will be called again. To avoid it,
	 * disable interrupt first.
	 */
	if (do_hw_reset) {
		vf->state = I40E_VF_INRESET;
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
		val |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
		I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
		I40E_WRITE_FLUSH(hw);
	}

#define VFRESET_MAX_WAIT_CNT 100
	/* Wait until VF reset is done */
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(10);
		val = I40E_READ_REG(hw, I40E_VPGEN_VFRSTAT(vf_id));
		if (val & I40E_VPGEN_VFRSTAT_VFRD_MASK)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "VF reset timeout\n");
		return -ETIMEDOUT;
	}

	/* This is not first time to do reset, do cleanup job first */
	if (vf->vsi) {
		/* Disable queues */
		memset(&qsel, 0, sizeof(qsel));
		for (i = 0; i < vf->vsi->nb_qps; i++)
			qsel.rx_queues |= 1 << i;
		qsel.tx_queues = qsel.rx_queues;
		ret = i40e_pf_host_switch_queues(vf, &qsel, false);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Disable VF queues failed\n");
			return -EFAULT;
		}

		/* Disable VF interrupt setting */
		vf_msix_num = hw->func_caps.num_msix_vectors_vf;
		for (i = 0; i < vf_msix_num; i++) {
			if (!i)
				val = I40E_VFINT_DYN_CTL0(vf_id);
			else
				val = I40E_VFINT_DYN_CTLN(((vf_msix_num - 1) *
							(vf_id)) + (i - 1));
			I40E_WRITE_REG(hw, val, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
		}
		I40E_WRITE_FLUSH(hw);

		/* remove VSI */
		ret = i40e_vsi_release(vf->vsi);
		if (ret != I40E_SUCCESS) {
			PMD_DRV_LOG(ERR, "Release VSI failed\n");
			return -EFAULT;
		}
	}

#define I40E_VF_PCI_ADDR  0xAA
#define I40E_VF_PEND_MASK 0x20
	/* Check the pending transactions of this VF */
	/* Use absolute VF id, refer to datasheet for details */
	I40E_WRITE_REG(hw, I40E_PF_PCI_CIAA, I40E_VF_PCI_ADDR |
		(abs_vf_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
	for (i = 0; i < VFRESET_MAX_WAIT_CNT; i++) {
		rte_delay_us(1);
		val = I40E_READ_REG(hw, I40E_PF_PCI_CIAD);
		if ((val & I40E_VF_PEND_MASK) == 0)
			break;
	}

	if (i >= VFRESET_MAX_WAIT_CNT) {
		PMD_DRV_LOG(ERR, "Wait VF PCI transaction end timeout\n");
		return -ETIMEDOUT;
	}

	/* Reset done, Set COMPLETE flag and clear reset bit */
	I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
	val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
	val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
	I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
	vf->reset_cnt++;
	I40E_WRITE_FLUSH(hw);

	/* Allocate resource again */
	vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV,
			vf->pf->main_vsi, vf->vf_idx);
	if (vf->vsi == NULL) {
		PMD_DRV_LOG(ERR, "Add vsi failed\n");
		return -EFAULT;
	}

	ret = i40e_pf_vf_queues_mapping(vf);
	if (ret != I40E_SUCCESS) {
		PMD_DRV_LOG(ERR, "queue mapping error\n");
		i40e_vsi_release(vf->vsi);
		return -EFAULT;
	}

	return ret;
}
Esempio n. 9
0
static enum test_result func_test3(struct test_config *tcfg)
{
	enum test_result result = PASS;
	uint32_t i = 0;

	printf("%s", tcfg->msg);

	if (test_rte_red_init(tcfg) != PASS) {
		result = FAIL;
		goto out;
	}

	rte_red_rt_data_init(tcfg->tqueue->rdata);

	if (increase_actual_qsize(tcfg->tconfig->rconfig,
				  tcfg->tqueue->rdata,
				  tcfg->tqueue->q,
				  *tcfg->tlevel,
				  tcfg->tqueue->q_ramp_up) != 0) {
		result = FAIL;
		goto out;
	}

	if (increase_average_qsize(tcfg->tconfig->rconfig,
				   tcfg->tqueue->rdata,
				   tcfg->tqueue->q,
				   *tcfg->tlevel,
				   tcfg->tqueue->avg_ramp_up) != 0) {
		result = FAIL;
		goto out;
	}

	printf("%s", tcfg->htxt);

	for (i = 0; i < tcfg->tvar->num_iterations; i++) {
		double avg_before = 0;
		double avg_after = 0;
                double exp_avg = 0;
		double diff = 0.0;

		avg_before = rte_red_get_avg_float(tcfg->tconfig->rconfig, tcfg->tqueue->rdata);

		/**
		* empty the queue
		*/
		*tcfg->tqueue->q = 0;
		rte_red_mark_queue_empty(tcfg->tqueue->rdata, get_port_ts());

		rte_delay_us(tcfg->tvar->wait_usec);

		/**
		 * enqueue one packet to recalculate average queue size
		 */
		if (rte_red_enqueue(tcfg->tconfig->rconfig,
				    tcfg->tqueue->rdata,
				    *tcfg->tqueue->q,
				    get_port_ts()) == 0) {
			(*tcfg->tqueue->q)++;
		} else {
			printf("%s:%d: packet enqueued on empty queue was dropped\n", __func__, __LINE__);
			result = FAIL;
		}

		exp_avg = calc_exp_avg_on_empty(avg_before,
					      (1 << *tcfg->tconfig->wq_log2),
					      tcfg->tvar->wait_usec);
		avg_after = rte_red_get_avg_float(tcfg->tconfig->rconfig,
						  tcfg->tqueue->rdata);
		if (!check_avg(&diff, avg_after, exp_avg, (double)tcfg->tqueue->avg_tolerance))
		        result = FAIL;

		printf("%-15.4lf%-15.4lf%-15.4lf%-15.4lf%-15.4lf%-15s\n",
		       avg_before, avg_after, exp_avg, diff,
		       (double)tcfg->tqueue->avg_tolerance,
		       diff <= (double)tcfg->tqueue->avg_tolerance ? "pass" : "fail");
	}
out:
	return result;
}
Esempio n. 10
0
/**
 * Performance test function to measure enqueue performance when the
 * queue is empty. This runs performance tests 4, 5 and 6
 */
static enum test_result perf2_test(struct test_config *tcfg)
{
	enum test_result result = PASS;
	struct rdtsc_prof prof = {0, 0, 0, 0, 0.0, NULL};
	uint32_t total = 0;
	uint32_t i = 0;

	printf("%s", tcfg->msg);

	rdtsc_prof_init(&prof, "enqueue");

	if (test_rte_red_init(tcfg) != PASS) {
		result = FAIL;
		goto out;
	}

	printf("%s", tcfg->htxt);

	for (i = 0; i < tcfg->tvar->num_iterations; i++) {
		uint32_t count = 0;
		uint64_t ts = 0;
		double avg_before = 0;
		int ret = 0;

		/**
		 * set average queue size to target level
		 */
		*tcfg->tqueue->q = *tcfg->tlevel;
		count = (*tcfg->tqueue->rdata).count;

		/**
		 * initialize the rte_red run time data structure
		 */
		rte_red_rt_data_init(tcfg->tqueue->rdata);
		(*tcfg->tqueue->rdata).count = count;

		/**
		 * set the queue average
		 */
		rte_red_set_avg_int(tcfg->tconfig->rconfig, tcfg->tqueue->rdata, *tcfg->tlevel);
		avg_before = rte_red_get_avg_float(tcfg->tconfig->rconfig, tcfg->tqueue->rdata);
		if ((avg_before < *tcfg->tlevel) || (avg_before > *tcfg->tlevel)) {
			result = FAIL;
			goto out;
		}

		/**
		 * empty the queue
		 */
		*tcfg->tqueue->q = 0;
		rte_red_mark_queue_empty(tcfg->tqueue->rdata, get_port_ts());

		/**
		 * wait for specified period of time
		 */
		rte_delay_us(tcfg->tvar->wait_usec);

		/**
		 * measure performance of enqueue operation while queue is empty
		 */
		ts = get_port_ts();
		rdtsc_prof_start(&prof);
		ret = rte_red_enqueue(tcfg->tconfig->rconfig, tcfg->tqueue->rdata,
				      *tcfg->tqueue->q, ts );
		rdtsc_prof_end(&prof);

		/**
		 * gather enqueued/dropped statistics
		 */
		if (ret == 0)
			(*tcfg->tvar->enqueued)++;
		else
			(*tcfg->tvar->dropped)++;

		/**
		 * on first and last iteration, confirm that
		 * average queue size was computed correctly
		 */
		if ((i == 0) || (i == tcfg->tvar->num_iterations - 1)) {
			double avg_after = 0;
			double exp_avg = 0;
			double diff = 0.0;
			int ok = 0;

			avg_after = rte_red_get_avg_float(tcfg->tconfig->rconfig, tcfg->tqueue->rdata);
			exp_avg = calc_exp_avg_on_empty(avg_before,
						  (1 << *tcfg->tconfig->wq_log2),
						  tcfg->tvar->wait_usec);
			if (check_avg(&diff, avg_after, exp_avg, (double)tcfg->tqueue->avg_tolerance))
		        	ok = 1;
			printf("%-15u%-15.4lf%-15.4lf%-15.4lf%-15.4lf%-15.4lf%-15s\n",
				i, avg_before, avg_after, exp_avg, diff,
				(double)tcfg->tqueue->avg_tolerance, ok ? "pass" : "fail");
			if (!ok) {
				result = FAIL;
				goto out;
			}
		}
	}
	total =  *tcfg->tvar->enqueued +  *tcfg->tvar->dropped;
	printf("\ntotal: %u, enqueued: %u (%.2lf%%), dropped: %u (%.2lf%%)\n", total,
	       *tcfg->tvar->enqueued, ((double)(*tcfg->tvar->enqueued) / (double)total) * 100.0,
	       *tcfg->tvar->dropped, ((double)(*tcfg->tvar->dropped) / (double)total) * 100.0);

	rdtsc_prof_print(&prof);
out:
	return result;
}
Esempio n. 11
0
File: env.c Progetto: spdk/spdk
void spdk_delay_us(unsigned int us)
{
	rte_delay_us(us);
}
Esempio n. 12
0
/*
 * Forwarding of packets in MAC mode with a wait and retry on TX to reduce packet loss.
 * Change the source and the destination Ethernet addressed of packets
 * before forwarding them.
 */
static void
pkt_burst_mac_retry_forward(struct fwd_stream *fs)
{
	struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
	struct rte_mbuf  *mb;
	struct ether_hdr *eth_hdr;
	uint32_t retry;
	uint16_t nb_rx;
	uint16_t nb_tx;
	uint16_t i;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	uint64_t start_tsc;
	uint64_t end_tsc;
	uint64_t core_cycles;
#endif

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	start_tsc = rte_rdtsc();
#endif

	/*
	 * Receive a burst of packets and forward them.
	 */
	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
				 nb_pkt_per_burst);
	if (unlikely(nb_rx == 0))
		return;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
	fs->rx_packets += nb_rx;
	for (i = 0; i < nb_rx; i++) {
		mb = pkts_burst[i];
		eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
				&eth_hdr->d_addr);
		ether_addr_copy(&ports[fs->tx_port].eth_addr,
				&eth_hdr->s_addr);
	}
	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);

	/*
	 * If not all packets have been TX'd then wait and retry.
	 */
	if (unlikely(nb_tx < nb_rx)) {
		for (retry = 0; retry < burst_tx_retry_num; retry++) {
			rte_delay_us(burst_tx_delay_time);
			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
				&pkts_burst[nb_tx], nb_rx - nb_tx);
			if (nb_tx == nb_rx)
				break;
		}
	}

	fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif
	if (unlikely(nb_tx < nb_rx)) {
		fs->fwd_dropped += (nb_rx - nb_tx);
		do {
			rte_pktmbuf_free(pkts_burst[nb_tx]);
		} while (++nb_tx < nb_rx);
	}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	end_tsc = rte_rdtsc();
	core_cycles = (end_tsc - start_tsc);
	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}
Esempio n. 13
0
/* Event queue HW index allocation scheme is described in sfc_ev.h. */
int
sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
{
	struct sfc_adapter *sa = evq->sa;
	efsys_mem_t *esmp;
	uint32_t evq_flags = sa->evq_flags;
	unsigned int total_delay_us;
	unsigned int delay_us;
	int rc;

	sfc_log_init(sa, "hw_index=%u", hw_index);

	esmp = &evq->mem;

	evq->evq_index = hw_index;

	/* Clear all events */
	(void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));

	if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
	else
		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;

	/* Create the common code event queue */
	rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
			    0 /* unused on EF10 */, 0, evq_flags,
			    &evq->common);
	if (rc != 0)
		goto fail_ev_qcreate;

	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
	if (evq->dp_rxq != 0) {
		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
			evq->callbacks = &sfc_ev_callbacks_efx_rx;
		else
			evq->callbacks = &sfc_ev_callbacks_dp_rx;
	} else if (evq->dp_txq != 0) {
		if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
			evq->callbacks = &sfc_ev_callbacks_efx_tx;
		else
			evq->callbacks = &sfc_ev_callbacks_dp_tx;
	} else {
		evq->callbacks = &sfc_ev_callbacks;
	}

	evq->init_state = SFC_EVQ_STARTING;

	/* Wait for the initialization event */
	total_delay_us = 0;
	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
	do {
		(void)sfc_ev_qpoll(evq);

		/* Check to see if the initialization complete indication
		 * posted by the hardware.
		 */
		if (evq->init_state == SFC_EVQ_STARTED)
			goto done;

		/* Give event queue some time to init */
		rte_delay_us(delay_us);

		total_delay_us += delay_us;

		/* Exponential backoff */
		delay_us *= 2;
		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;

	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);

	rc = ETIMEDOUT;
	goto fail_timedout;

done:
	return 0;

fail_timedout:
	evq->init_state = SFC_EVQ_INITIALIZED;
	efx_ev_qdestroy(evq->common);

fail_ev_qcreate:
	sfc_log_init(sa, "failed %d", rc);
	return rc;
}
Esempio n. 14
0
/*
 * Forwarding of packets in MAC mode.
 * Change the source and the destination Ethernet addressed of packets
 * before forwarding them.
 */
static void
pkt_burst_mac_forward(struct fwd_stream *fs)
{
	struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
	struct rte_port  *txp;
	struct rte_mbuf  *mb;
	struct ether_hdr *eth_hdr;
	uint32_t retry;
	uint16_t nb_rx;
	uint16_t nb_tx;
	uint16_t i;
	uint64_t ol_flags = 0;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	uint64_t start_tsc;
	uint64_t end_tsc;
	uint64_t core_cycles;
#endif

#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	start_tsc = rte_rdtsc();
#endif

	/*
	 * Receive a burst of packets and forward them.
	 */
	nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
				 nb_pkt_per_burst);
	if (unlikely(nb_rx == 0))
		return;

#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
	fs->rx_packets += nb_rx;
	txp = &ports[fs->tx_port];
	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
		ol_flags = PKT_TX_VLAN_PKT;
	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
		ol_flags |= PKT_TX_QINQ_PKT;
	if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
		ol_flags |= PKT_TX_MACSEC;
	for (i = 0; i < nb_rx; i++) {
		if (likely(i < nb_rx - 1))
			rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
						       void *));
		mb = pkts_burst[i];
		eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
		ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
				&eth_hdr->d_addr);
		ether_addr_copy(&ports[fs->tx_port].eth_addr,
				&eth_hdr->s_addr);
		mb->ol_flags = ol_flags;
		mb->l2_len = sizeof(struct ether_hdr);
		mb->l3_len = sizeof(struct ipv4_hdr);
		mb->vlan_tci = txp->tx_vlan_id;
		mb->vlan_tci_outer = txp->tx_vlan_id_outer;
	}
	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
	/*
	 * Retry if necessary
	 */
	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
		retry = 0;
		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
			rte_delay_us(burst_tx_delay_time);
			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
					&pkts_burst[nb_tx], nb_rx - nb_tx);
		}
	}

	fs->tx_packets += nb_tx;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
#endif
	if (unlikely(nb_tx < nb_rx)) {
		fs->fwd_dropped += (nb_rx - nb_tx);
		do {
			rte_pktmbuf_free(pkts_burst[nb_tx]);
		} while (++nb_tx < nb_rx);
	}
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
	end_tsc = rte_rdtsc();
	core_cycles = (end_tsc - start_tsc);
	fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
#endif
}