Beispiel #1
0
static void
kni_ring_to_kni(struct kni_port_params *p)
{
	uint8_t i, port_id;
	unsigned nb_rx, num;
	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];

	if (p == NULL)
		return;

	port_id = p->port_id;

	/* Burst rx from ring */
	nb_rx = rte_ring_dequeue_burst(p->ring,(void **)&pkts_burst, PKT_BURST_SZ);

	if (unlikely(nb_rx > PKT_BURST_SZ)) {
		RTE_LOG(ERR, APP, "Error receiving from eth\n");
	return;
	}

	/* Burst tx to kni */
	num = rte_kni_tx_burst(p->kni, pkts_burst, nb_rx);
	//kni_stats[port_id].rx_packets += num;

	rte_kni_handle_request(p->kni);
	if (unlikely(num < nb_rx)) {
		/* Free mbufs not tx to kni interface */
		kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
		//kni_stats[port_id].rx_dropped += nb_rx - num;
	}

	return;
}
Beispiel #2
0
static uint16_t
aesni_mb_pmd_dequeue_burst(void *queue_pair,
		struct rte_mbuf **bufs,	uint16_t nb_bufs)
{
	struct aesni_mb_qp *qp = queue_pair;

	unsigned nb_dequeued;

	nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
			(void **)bufs, nb_bufs);
	qp->qp_stats.dequeued_count += nb_dequeued;

	return nb_dequeued;
}
Beispiel #3
0
/**
 * This thread takes bursts of packets from the rx_to_workers ring and
 * Changes the input port value to output port value. And feds it to
 * workers_to_tx
 */
static int
worker_thread(void *args_ptr)
{
	const uint8_t nb_ports = rte_eth_dev_count();
	uint16_t i, ret = 0;
	uint16_t burst_size = 0;
	struct worker_thread_args *args;
	struct rte_mbuf *burst_buffer[MAX_PKTS_BURST] = { NULL };
	struct rte_ring *ring_in, *ring_out;
	const unsigned xor_val = (nb_ports > 1);

	args = (struct worker_thread_args *) args_ptr;
	ring_in  = args->ring_in;
	ring_out = args->ring_out;

	RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
							rte_lcore_id());

	while (!quit_signal) {

		/* dequeue the mbufs from rx_to_workers ring */
		burst_size = rte_ring_dequeue_burst(ring_in,
				(void *)burst_buffer, MAX_PKTS_BURST);
		if (unlikely(burst_size == 0))
			continue;

		__sync_fetch_and_add(&app_stats.wkr.dequeue_pkts, burst_size);

		/* just do some operation on mbuf */
		for (i = 0; i < burst_size;)
			burst_buffer[i++]->port ^= xor_val;

		/* enqueue the modified mbufs to workers_to_tx ring */
		ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
		__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
		if (unlikely(ret < burst_size)) {
			/* Return the mbufs to their respective pool, dropping packets */
			__sync_fetch_and_add(&app_stats.wkr.enqueue_failed_pkts,
					(int)burst_size - ret);
			pktmbuf_free_bulk(&burst_buffer[ret], burst_size - ret);
		}
	}
	return 0;
}
Beispiel #4
0
/**
 * Dequeue mbufs from the workers_to_tx ring and transmit them
 */
static int
tx_thread(struct rte_ring *ring_in)
{
	uint32_t i, dqnum;
	uint8_t outp;
	static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
	struct rte_mbuf *mbufs[MAX_PKTS_BURST];
	struct output_buffer *outbuf;

	RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
							rte_lcore_id());
	while (!quit_signal) {

		/* deque the mbufs from workers_to_tx ring */
		dqnum = rte_ring_dequeue_burst(ring_in,
				(void *)mbufs, MAX_PKTS_BURST);

		if (unlikely(dqnum == 0))
			continue;

		app_stats.tx.dequeue_pkts += dqnum;

		for (i = 0; i < dqnum; i++) {
			outp = mbufs[i]->port;
			/* skip ports that are not enabled */
			if ((portmask & (1 << outp)) == 0) {
				rte_pktmbuf_free(mbufs[i]);
				continue;
			}

			outbuf = &tx_buffers[outp];
			outbuf->mbufs[outbuf->count++] = mbufs[i];
			if (outbuf->count == MAX_PKTS_BURST)
				flush_one_port(outbuf, outp);
		}
	}

	return 0;
}
Beispiel #5
0
/**
 * Dequeue mbufs from the workers_to_tx ring and reorder them before
 * transmitting.
 */
static int
send_thread(struct send_thread_args *args)
{
	int ret;
	unsigned int i, dret;
	uint16_t nb_dq_mbufs;
	uint8_t outp;
	static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
	struct rte_mbuf *mbufs[MAX_PKTS_BURST];
	struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL};

	RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id());

	while (!quit_signal) {

		/* deque the mbufs from workers_to_tx ring */
		nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
				(void *)mbufs, MAX_PKTS_BURST);

		if (unlikely(nb_dq_mbufs == 0))
			continue;

		app_stats.tx.dequeue_pkts += nb_dq_mbufs;

		for (i = 0; i < nb_dq_mbufs; i++) {
			/* send dequeued mbufs for reordering */
			ret = rte_reorder_insert(args->buffer, mbufs[i]);

			if (ret == -1 && rte_errno == ERANGE) {
				/* Too early pkts should be transmitted out directly */
				LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
						"direct enqueuing to TX\n", __func__);
				outp = mbufs[i]->port;
				if ((portmask & (1 << outp)) == 0) {
					rte_pktmbuf_free(mbufs[i]);
					continue;
				}
				if (rte_eth_tx_burst(outp, 0, (void *)mbufs[i], 1) != 1) {
					rte_pktmbuf_free(mbufs[i]);
					app_stats.tx.early_pkts_tx_failed_woro++;
				} else
					app_stats.tx.early_pkts_txtd_woro++;
			} else if (ret == -1 && rte_errno == ENOSPC) {
				/**
				 * Early pkts just outside of window should be dropped
				 */
				rte_pktmbuf_free(mbufs[i]);
			}
		}

		/*
		 * drain MAX_PKTS_BURST of reordered
		 * mbufs for transmit
		 */
		dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST);
		for (i = 0; i < dret; i++) {

			struct output_buffer *outbuf;
			uint8_t outp1;

			outp1 = rombufs[i]->port;
			/* skip ports that are not enabled */
			if ((portmask & (1 << outp1)) == 0) {
				rte_pktmbuf_free(rombufs[i]);
				continue;
			}

			outbuf = &tx_buffers[outp1];
			outbuf->mbufs[outbuf->count++] = rombufs[i];
			if (outbuf->count == MAX_PKTS_BURST)
				flush_one_port(outbuf, outp1);
		}
	}
	return 0;
}
void send_loop(void)
{
	RTE_LOG(INFO, APP, "send_loop()\n");
	char pkt[PKT_SIZE] = {0};
	int nreceived;

	int retval = 0;
	(void) retval;
#ifdef CALC_CHECKSUM
	unsigned int kk = 0;
#endif
	srand(time(NULL));

	//Initializate packet contents
	int i;
	for(i = 0; i < PKT_SIZE; i++)
		pkt[i] = rand()%256;

#if ALLOC_METHOD == ALLOC_APP
	struct rte_mempool * packets_pool = rte_mempool_lookup("ovs_mp_1500_0_262144");
	//struct rte_mempool * packets_pool = rte_mempool_lookup("packets");

	//Create mempool
	//struct rte_mempool * packets_pool = rte_mempool_create(
	//	"packets",
	//	NUM_PKTS,
	//	MBUF_SIZE,
	//	CACHE_SIZE,					//This is the size of the mempool cache
	//	sizeof(struct rte_pktmbuf_pool_private),
	//	rte_pktmbuf_pool_init,
	//	NULL,
	//	rte_pktmbuf_init,
	//	NULL,
	//	rte_socket_id(),
	//	0 /*NO_FLAGS*/);


	if(packets_pool == NULL)
	{
		RTE_LOG(INFO, APP, "rte_errno: %s\n", rte_strerror(rte_errno));
		rte_exit(EXIT_FAILURE, "Cannot find memory pool\n");
	}

	RTE_LOG(INFO, APP, "There are %d free packets in the pool\n",
		rte_mempool_count(packets_pool));

#endif

#ifdef USE_BURST
	struct rte_mbuf * packets_array[BURST_SIZE] = {0};
	struct rte_mbuf * packets_array_rx[BURST_SIZE] = {0};
	int ntosend;
	int n;
	(void) n;

	/* prealloc packets */
	do
	{
		n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE);
	} while(n != 0 && !stop);
	ntosend = BURST_SIZE;

#else
	struct rte_mbuf * mbuf;
	/* prealloc packet */
	do {
		mbuf = rte_pktmbuf_alloc(packets_pool);
	} while(mbuf == NULL);

#endif

	RTE_LOG(INFO, APP, "Starting sender loop\n");
	signal (SIGINT, crtl_c_handler);
	stop = 0;
	while(likely(!stop))
	{
		while(pause_);
#ifdef USE_BURST

	#if ALLOC_METHOD == ALLOC_OVS
		//Try to get BURS_SIZE free slots
		ntosend = rte_ring_dequeue_burst(alloc_q, (void **) packets_array, BURST_SIZE);
	#elif ALLOC_METHOD == ALLOC_APP
		//do
		//{
		//	n = rte_mempool_get_bulk(packets_pool, (void **) packets_array, BURST_SIZE);
		//} while(n != 0 && !stop);
		//ntosend = BURST_SIZE;
	#else
		#error "No implemented"
	#endif

		//Copy data to the buffers
		for(i = 0; i < ntosend; i++)
		{
			rte_memcpy(packets_array[i]->buf_addr, pkt, PKT_SIZE);
			//fill_packet(packets_array[i]->pkt.data);
			packets_array[i]->next = NULL;
			packets_array[i]->pkt_len = PKT_SIZE;
			packets_array[i]->data_len = PKT_SIZE;

		#ifdef CALC_CHECKSUM
			for(i = 0; i < ntosend; i++)
				for(kk = 0; kk < 8; kk++)
					checksum += ((uint64_t *)packets_array[i]->buf_addr)[kk];
		#endif
		}

		//Enqueue data (try until all the allocated packets are enqueue)
		i = 0;
		while(i < ntosend && !stop)
		{
			i += rte_ring_enqueue_burst(tx_ring, (void **) &packets_array[i], ntosend - i);

			/* also dequeue some packets */
			nreceived= rte_ring_dequeue_burst(rx_ring, (void **) packets_array_rx, BURST_SIZE);
			rx += nreceived; /* update statistics */
		}

#else	// [NO] USE_BURST
	#if ALLOC_METHOD  == ALLOC_OVS //Method 1
		//Read a buffer to be used as a buffer for a packet
		retval = rte_ring_dequeue(alloc_q, (void **)&mbuf);
		if(retval != 0)
		{
		#ifdef CALC_ALLOC_STATS
			//stats.alloc_fails++;
		#endif
			continue;
		}
	#elif ALLOC_METHOD  == ALLOC_APP //Method 2
		//mbuf = rte_pktmbuf_alloc(packets_pool);
		//if(mbuf == NULL)
		//{
		//#ifdef CALC_ALLOC_STATS
		//	stats.alloc_fails++;
		//#endif
		//	continue;
		//}
	#else
		#error "ALLOC_METHOD has a non valid value"
	#endif

	#if DELAY_CYCLES > 0
		//This loop increases mumber of packets per second (don't ask me why)
		unsigned long long j = 0;
		for(j = 0; j < DELAY_CYCLES; j++)
			asm("");
	#endif

		//Copy packet to the correct buffer
		rte_memcpy(mbuf->buf_addr, pkt, PKT_SIZE);
		//fill_packet(mbuf->pkt.data);
		//mbuf->pkt.next = NULL;
		//mbuf->pkt.pkt_len = PKT_SIZE;
		//mbuf->pkt.data_len = PKT_SIZE;
		(void) pkt;
		mbuf->next = NULL;
		mbuf->pkt_len = PKT_SIZE;
		mbuf->data_len = PKT_SIZE;

	#ifdef CALC_CHECKSUM
		for(kk = 0; kk < 8; kk++)
			checksum += ((uint64_t *)mbuf->buf_addr)[kk];
	#endif

		//this method avoids dropping packets:
		//Simple tries until the packet is inserted in the queue
		tryagain:
		retval = rte_ring_enqueue(tx_ring, (void *) mbuf);
		if(retval == -ENOBUFS && !stop)
		{
	#ifdef CALC_TX_TRIES
			//stats.tx_retries++;
	#endif
			goto tryagain;
		}

	#ifdef CALC_TX_STATS
		//stats.tx++;
	#endif

#endif //USE_BURST
	}

#ifdef CALC_CHECKSUM
	printf("Checksum was %" PRIu64 "\n", checksum);
#endif

}