Пример #1
0
static inline void flush_one_port(struct output_buffer *outbuf, uint8_t outp) {
	unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs, outbuf->count);
	if (unlikely(nb_tx < outbuf->count)) {
		pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx);
	}
	outbuf->count = 0;
}
Пример #2
0
Файл: main.c Проект: Cosios/dpdk
static inline void
flush_one_port(struct output_buffer *outbuf, uint8_t outp)
{
	unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs,
			outbuf->count);
	app_stats.tx.ro_tx_pkts += nb_tx;

	if (unlikely(nb_tx < outbuf->count)) {
		/* free the mbufs which failed from transmit */
		app_stats.tx.ro_tx_failed_pkts += (outbuf->count - nb_tx);
		LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
		pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx);
	}
	outbuf->count = 0;
}
Пример #3
0
Файл: main.c Проект: Cosios/dpdk
/**
 * This thread receives mbufs from the port and affects them an internal
 * sequence number to keep track of their order of arrival through an
 * mbuf structure.
 * The mbufs are then passed to the worker threads via the rx_to_workers
 * ring.
 */
static int
rx_thread(struct rte_ring *ring_out)
{
	const uint8_t nb_ports = rte_eth_dev_count();
	uint32_t seqn = 0;
	uint16_t i, ret = 0;
	uint16_t nb_rx_pkts;
	uint8_t port_id;
	struct rte_mbuf *pkts[MAX_PKTS_BURST];

	RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
							rte_lcore_id());

	while (!quit_signal) {

		for (port_id = 0; port_id < nb_ports; port_id++) {
			if ((portmask & (1 << port_id)) != 0) {

				/* receive packets */
				nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
								pkts, MAX_PKTS_BURST);
				if (nb_rx_pkts == 0) {
					LOG_DEBUG(REORDERAPP,
					"%s():Received zero packets\n",	__func__);
					continue;
				}
				app_stats.rx.rx_pkts += nb_rx_pkts;

				/* mark sequence number */
				for (i = 0; i < nb_rx_pkts; )
					pkts[i++]->seqn = seqn++;

				/* enqueue to rx_to_workers ring */
				ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
								nb_rx_pkts);
				app_stats.rx.enqueue_pkts += ret;
				if (unlikely(ret < nb_rx_pkts)) {
					app_stats.rx.enqueue_failed_pkts +=
									(nb_rx_pkts-ret);
					pktmbuf_free_bulk(&pkts[ret], nb_rx_pkts - ret);
				}
			}
		}
	}
	return 0;
}
Пример #4
0
Файл: main.c Проект: Cosios/dpdk
/**
 * This thread takes bursts of packets from the rx_to_workers ring and
 * Changes the input port value to output port value. And feds it to
 * workers_to_tx
 */
static int
worker_thread(void *args_ptr)
{
	const uint8_t nb_ports = rte_eth_dev_count();
	uint16_t i, ret = 0;
	uint16_t burst_size = 0;
	struct worker_thread_args *args;
	struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL };
	struct rte_ring *ring_in, *ring_out;
	const unsigned xor_val = (nb_ports > 1);

	args = (struct worker_thread_args *) args_ptr;
	ring_in  = args->ring_in;
	ring_out = args->ring_out;

	RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
							rte_lcore_id());

	while (!quit_signal) {

		/* dequeue the mbufs from rx_to_workers ring */
		burst_size = rte_ring_dequeue_burst(ring_in,
				(void *)burst_buffer, MAX_PKTS_BURST);
		if (unlikely(burst_size == 0))
			continue;

		__sync_fetch_and_add(&app_stats.wkr.dequeue_pkts, burst_size);

		/* just do some operation on mbuf */
		for (i = 0; i < burst_size;)
			burst_buffer[i++]->port ^= xor_val;

		/* enqueue the modified mbufs to workers_to_tx ring */
		ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
		__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
		if (unlikely(ret < burst_size)) {
			/* Return the mbufs to their respective pool, dropping packets */
			__sync_fetch_and_add(&app_stats.wkr.enqueue_failed_pkts,
					(int)burst_size - ret);
			pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret);
		}
	}
	return 0;
}