Ejemplo n.º 1
0
static inline void
send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
{
	uint32_t nb_tx = 0, i;

	nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
				p->tx_buf_count);

	/* We sent all the packets in a first try */
	if (nb_tx >= p->tx_buf_count) {
		p->tx_buf_count = 0;
		return;
	}

	for (i = 0; i < p->n_retries; i++) {
		nb_tx += rte_ring_sp_enqueue_burst(p->ring,
				(void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);

		/* We sent all the packets in more than one try */
		if (nb_tx >= p->tx_buf_count) {
			p->tx_buf_count = 0;
			return;
		}
	}

	/* We didn't send the packets in maximum allowed attempts */
	RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
		rte_pktmbuf_free(p->tx_buf[nb_tx]);

	p->tx_buf_count = 0;
}
Ejemplo n.º 2
0
rte_port_ring_writer_tx_bulk_internal(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask,
		uint32_t is_multi)
{
	struct rte_port_ring_writer *p =
		(struct rte_port_ring_writer *) port;

	uint64_t bsz_mask = p->bsz_mask;
	uint32_t tx_buf_count = p->tx_buf_count;
	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
			((pkts_mask & bsz_mask) ^ bsz_mask);

	if (expr == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t n_pkts_ok;

		if (tx_buf_count) {
			if (is_multi)
				send_burst_mp(p);
			else
				send_burst(p);
		}

		RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
		if (is_multi)
			n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
				n_pkts);
		else
			n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
				n_pkts);

		RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
		for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
			struct rte_mbuf *pkt = pkts[n_pkts_ok];

			rte_pktmbuf_free(pkt);
		}
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[tx_buf_count++] = pkt;
			RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
			pkts_mask &= ~pkt_mask;
		}

		p->tx_buf_count = tx_buf_count;
		if (tx_buf_count >= p->tx_burst_sz) {
			if (is_multi)
				send_burst_mp(p);
			else
				send_burst(p);
		}
	}

	return 0;
}
Ejemplo n.º 3
0
/**
 * Put pending mbufs into worker queue and flush pending mbufs.
 * This function is called from I/O (Input) thread.
 */
static inline void
app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers) {
  uint32_t worker;

  for (worker = 0; worker < n_workers; worker ++) {
    uint32_t ret, n_mbufs;

    n_mbufs = lp->rx.mbuf_out[worker].n_mbufs;
    if (likely((lp->rx.mbuf_out_flush[worker] == 0) ||
               (n_mbufs == 0))) {
      continue;
    }
    ret = rte_ring_sp_enqueue_burst(lp->rx.rings[worker],
                                    (void **) lp->rx.mbuf_out[worker].array,
                                    n_mbufs);
    if (unlikely(ret < n_mbufs)) {
      uint32_t k;
      for (k = ret; k < n_mbufs; k ++) {
        struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
        rte_pktmbuf_free(pkt_to_free);
      }
    }
    lp->rx.mbuf_out[worker].n_mbufs = 0;
    lp->rx.mbuf_out_flush[worker] = 0;
  }
}
Ejemplo n.º 4
0
static inline void
send_burst(struct rte_port_ring_writer *p)
{
	uint32_t nb_tx;

	nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
			p->tx_buf_count);

	RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
	for ( ; nb_tx < p->tx_buf_count; nb_tx++)
		rte_pktmbuf_free(p->tx_buf[nb_tx]);

	p->tx_buf_count = 0;
}
Ejemplo n.º 5
0
/**
 * Put mbuf (bsz packets) into worker queue.
 * The function is called from I/O (Input) thread.
 */
static inline void
app_lcore_io_rx_buffer_to_send (
  struct app_lcore_params_io *lp,
  uint32_t worker,
  struct rte_mbuf *mbuf,
  uint32_t bsz) {
  uint32_t pos;
  int ret;

  pos = lp->rx.mbuf_out[worker].n_mbufs;
  lp->rx.mbuf_out[worker].array[pos ++] = mbuf;
  if (likely(pos < bsz)) {
    lp->rx.mbuf_out[worker].n_mbufs = pos;
    lp->rx.mbuf_out_flush[worker] = 1;
    return;
  }

  lp->rx.mbuf_out_flush[worker] = 0;
  ret = rte_ring_sp_enqueue_burst(lp->rx.rings[worker],
                                  (void **) lp->rx.mbuf_out[worker].array,
                                  bsz);

  if (unlikely(ret < (int)bsz)) {
    uint32_t k;
    for (k = (uint32_t)ret; k < bsz; k++) {
      struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
      rte_pktmbuf_free(m);
    }
  }

  lp->rx.mbuf_out[worker].n_mbufs = 0;

#if APP_STATS
  lp->rx.rings_iters[worker] += bsz;
  lp->rx.rings_count[worker] += ret;
  if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) {
    unsigned lcore = rte_lcore_id();

    printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n",
           lcore,
           (unsigned)worker,
           ((double) lp->rx.rings_count[worker]) / ((double)
               lp->rx.rings_iters[worker]));
    lp->rx.rings_iters[worker] = 0;
    lp->rx.rings_count[worker] = 0;
  }
#endif
}
Ejemplo n.º 6
0
rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
		struct rte_mbuf **pkts,
		uint64_t pkts_mask,
		uint32_t is_multi)
{
	struct rte_port_ring_writer_nodrop *p =
		(struct rte_port_ring_writer_nodrop *) port;

	uint64_t bsz_mask = p->bsz_mask;
	uint32_t tx_buf_count = p->tx_buf_count;
	uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
			((pkts_mask & bsz_mask) ^ bsz_mask);

	if (expr == 0) {
		uint64_t n_pkts = __builtin_popcountll(pkts_mask);
		uint32_t n_pkts_ok;

		if (tx_buf_count) {
			if (is_multi)
				send_burst_mp_nodrop(p);
			else
				send_burst_nodrop(p);
		}

		RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
		if (is_multi)
			n_pkts_ok =
				rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
		else
			n_pkts_ok =
				rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);

		if (n_pkts_ok >= n_pkts)
			return 0;

		/*
		 * If we didn't manage to send all packets in single burst, move
		 * remaining packets to the buffer and call send burst.
		 */
		for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
			struct rte_mbuf *pkt = pkts[n_pkts_ok];

			p->tx_buf[p->tx_buf_count++] = pkt;
		}
		if (is_multi)
			send_burst_mp_nodrop(p);
		else
			send_burst_nodrop(p);
	} else {
		for ( ; pkts_mask; ) {
			uint32_t pkt_index = __builtin_ctzll(pkts_mask);
			uint64_t pkt_mask = 1LLU << pkt_index;
			struct rte_mbuf *pkt = pkts[pkt_index];

			p->tx_buf[tx_buf_count++] = pkt;
			RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
			pkts_mask &= ~pkt_mask;
		}

		p->tx_buf_count = tx_buf_count;
		if (tx_buf_count >= p->tx_burst_sz) {
			if (is_multi)
				send_burst_mp_nodrop(p);
			else
				send_burst_nodrop(p);
		}
	}

	return 0;
}
Ejemplo n.º 7
0
int do_nf(void *useless)
{
	(void) useless; //XXX: this line suppresses the "unused-parameter" error

	int i;
	unsigned int p;
	mbuf_array_t pkts_received;

	//Init the regex engine
	if(!initializeRegEx(&re_bbc, re_extra_bbc,BBC))
		return 0;

	mbuf_array_t *pkts_to_send = (mbuf_array_t*)malloc(NUM_PORTS * sizeof(mbuf_array_t));
	for(p = 0; p < NUM_PORTS; p++)
		pkts_to_send[p].n_mbufs = 0;

	while(1)
	{
#ifdef ENABLE_SEMAPHORE
		sem_wait(nf_params.semaphore);
#endif

		/*0) Iterates on all the ports */
		for(p = 0; p < NUM_PORTS; p++)
		{
			/*1) Receive incoming packets */

			pkts_received.n_mbufs = rte_ring_sc_dequeue_burst(nf_params.ports[p].to_nf_queue,(void **)&pkts_received.array[0],PKT_TO_NF_THRESHOLD);

			if(likely(pkts_received.n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Received %d pkts on port %d (%s)\n", NAME, pkts_received.n_mbufs,p,nf_params.ports[p].name);
#endif

				for (i=0;i < pkts_received.n_mbufs;i++)
				{
					/*2) Operate on the packet */

					unsigned char *pkt = rte_pktmbuf_mtod(pkts_received.array[i],unsigned char *);
#ifdef ENABLE_LOG
					fprintf(logFile,"[%s] Packet size: %d\n",NAME,rte_pktmbuf_pkt_len(pkts_received.array[i]));
					fprintf(logFile,"[%s] %.2x:%.2x:%.2x:%.2x:%.2x:%.2x -> %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",NAME,pkt[6],pkt[7],pkt[8],pkt[9],pkt[10],pkt[11],pkt[0],pkt[1],pkt[2],pkt[3],pkt[4],pkt[5]);
#endif

					/**
					*	If the packet arrives from the first port, check if it must be dropped
					*/
					if(p == 0)
					{
#ifdef ENABLE_LOG
						fprintf(logFile,"[%s] I'm going to check if the packet must be dropped.\n", NAME);
#endif
						if(drop(pkt,rte_pktmbuf_pkt_len(pkts_received.array[i])))
						{
							//The packet must be dropped
#ifdef ENABLE_LOG
 		                                       fprintf(logFile,"[%s] The packet is dropped.\n", NAME);
#endif
							rte_pktmbuf_free(pkts_received.array[i]);
							continue;
						}
					}
					unsigned int output_port = (p+1) % NUM_PORTS;

					pkts_to_send[output_port].array[pkts_to_send[output_port].n_mbufs] = pkts_received.array[i];
					pkts_to_send[output_port].n_mbufs++;
				}//end of iteration on the packets received from the current port
			} //end if(likely(pkts_received.n_mbufs > 0))
		}//end iteration on the ports

		/*3) Send the processed packet not transmitted yet*/
		for(p = 0; p < NUM_PORTS; p++)
		{
			if(likely(pkts_to_send[p].n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Sending %d packets on port %x (%s).\n", NAME,pkts_to_send[p].n_mbufs,p,nf_params.ports[p].name);
#endif
				int ret = rte_ring_sp_enqueue_burst(nf_params.ports[p].to_xdpd_queue,(void *const*)pkts_to_send[p].array,(unsigned)pkts_to_send[p].n_mbufs);

	        	if (unlikely(ret < pkts_to_send[p].n_mbufs))
		        {
		        	fprintf(logFile,"[%s] Not enough room in port %d towards xDPD to enqueue; the packet will be dropped.\n", NAME,p);
					do {
						struct rte_mbuf *pkt_to_free = pkts_to_send[p].array[ret];
						rte_pktmbuf_free(pkt_to_free);
					} while (++ret < pkts_to_send[p].n_mbufs);
				}
			}
			pkts_to_send[p].n_mbufs = 0;
		}/* End of iteration on the ports */

	}/*End of while true*/
Ejemplo n.º 8
0
/* Port tests */
int
test_port_ring_reader(void)
{
	int status, i;
	struct rte_port_ring_reader_params port_ring_reader_params;
	void *port;

	/* Invalid params */
	port = rte_port_ring_reader_ops.f_create(NULL, 0);
	if (port != NULL)
		return -1;

	status = rte_port_ring_reader_ops.f_free(port);
	if (status >= 0)
		return -2;

	/* Create and free */
	port_ring_reader_params.ring = RING_RX;
	port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);
	if (port == NULL)
		return -3;

	status = rte_port_ring_reader_ops.f_free(port);
	if (status != 0)
		return -4;

	/* -- Traffic RX -- */
	int expected_pkts, received_pkts;
	struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
	void *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];

	port_ring_reader_params.ring = RING_RX;
	port = rte_port_ring_reader_ops.f_create(&port_ring_reader_params, 0);

	/* Single packet */
	mbuf[0] = (void *)rte_pktmbuf_alloc(pool);

	expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
		mbuf, 1);
	received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);

	if (received_pkts < expected_pkts)
		return -5;

	rte_pktmbuf_free(res_mbuf[0]);

	/* Multiple packets */
	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		mbuf[i] = rte_pktmbuf_alloc(pool);

	expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
		(void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
	received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
		RTE_PORT_IN_BURST_SIZE_MAX);

	if (received_pkts < expected_pkts)
		return -6;

	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		rte_pktmbuf_free(res_mbuf[i]);

	return 0;
}