示例#1
0
static uint64_t tsc_ctrl(struct lcore_cfg *lconf)
{
	const uint8_t n_tasks_all = lconf->n_tasks_all;
	void *msgs[MAX_RING_BURST];
	uint16_t n_msgs;

	for (uint8_t task_id = 0; task_id < n_tasks_all; ++task_id) {
		if (lconf->ctrl_rings_m[task_id] && lconf->ctrl_func_m[task_id]) {
#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
			n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST);
#else
			n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST, NULL);
#endif
			if (n_msgs) {
				lconf->ctrl_func_m[task_id](lconf->tasks_all[task_id], msgs, n_msgs);
			}
		}
		if (lconf->ctrl_rings_p[task_id] && lconf->ctrl_func_p[task_id]) {
#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1)
			n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST);
#else
			n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST, NULL);
#endif
			if (n_msgs) {
				lconf->ctrl_func_p[task_id](lconf->tasks_all[task_id], (struct rte_mbuf **)msgs, n_msgs);
			}
		}
	}
	return lconf->ctrl_timeout;
}
示例#2
0
uint16_t rx_pkt_sw(struct rte_mbuf **rx_mbuf, struct task_base *ptask)
{
	START_EMPTY_MEASSURE();
#ifdef BRAS_RX_BULK
	if (unlikely (rte_ring_sc_dequeue_bulk(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST)) < 0) {
		++ptask->rx_params_sw.last_read_ring;
		if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
			ptask->rx_params_sw.last_read_ring = 0;
		}
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
	else {

		return MAX_RING_BURST;
	}
#else
	uint16_t nb_rx = rte_ring_sc_dequeue_burst(ptask->rx_params_sw.rx_rings[ptask->rx_params_sw.last_read_ring], (void **)rx_mbuf, MAX_RING_BURST);
	++ptask->rx_params_sw.last_read_ring;
	if (unlikely(ptask->rx_params_sw.last_read_ring == ptask->rx_params_sw.nb_rxrings)) {
		ptask->rx_params_sw.last_read_ring = 0;
	}

	if (nb_rx != 0) {
		return nb_rx;
	}
	else {
		INCR_EMPTY_CYCLES(ptask->stats, rte_rdtsc() - cur_tsc);
		return 0;
	}
#endif
}
示例#3
0
static uint16_t ring_deq(struct rte_ring* r, struct rte_mbuf **mbufs)
{
	void** v_mbufs = (void **)mbufs;
#ifdef BRAS_RX_BULK
	return rte_ring_sc_dequeue_bulk(r, v_mbufs, MAX_RING_BURST) < 0? 0 : MAX_RING_BURST;
#else
	return rte_ring_sc_dequeue_burst(r, v_mbufs, MAX_RING_BURST);
#endif
}
示例#4
0
static int
rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
	struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
	uint32_t nb_rx;

	nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
	RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);

	return nb_rx;
}
static void
vr_dpdk_packet_ring_drain(struct vr_usocket *usockp)
{
    int i;
    unsigned nb_pkts;
    struct rte_mbuf *mbuf_arr[VR_DPDK_RX_BURST_SZ];
    const unsigned lcore_id = rte_lcore_id();
    struct vr_interface_stats *stats;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: draining packet ring...\n", __func__,
            pthread_self());

    if (unlikely(usockp->usock_parent->usock_vif == NULL))
        return;

    rcu_thread_offline();

    stats = vif_get_stats(usockp->usock_parent->usock_vif, lcore_id);
    do {
        nb_pkts = rte_ring_sc_dequeue_burst(vr_dpdk.packet_ring,
            (void **)&mbuf_arr, VR_DPDK_RX_BURST_SZ);
        for (i = 0; i < nb_pkts; i++) {
            if (usock_mbuf_write(usockp->usock_parent, mbuf_arr[i]) >= 0)
                stats->vis_port_opackets++;
            else {
                stats->vis_port_oerrors++;
                RTE_LOG(DEBUG, USOCK,
                        "%s: Error writing mbuf to packet socket: %s (%d)\n",
                        __func__, rte_strerror(errno), errno);
            }

            rte_pktmbuf_free(mbuf_arr[i]);
        }
    } while (nb_pkts > 0);

    rcu_thread_online();
}
示例#6
0
/**
 * Dequeue mbufs from output queue and send to ethernet port.
 * This function is called from I/O (Output) thread.
 */
static inline void
app_lcore_io_tx(struct app_lcore_params_io *lp,
                uint32_t n_workers,
                uint32_t bsz_rd,
                uint32_t bsz_wr) {
  uint32_t worker;

  for (worker = 0; worker < n_workers; worker ++) {
    uint32_t i;

    for (i = 0; i < lp->tx.n_nic_ports; i ++) {
      uint8_t port = lp->tx.nic_ports[i];
      struct rte_ring *ring = lp->tx.rings[port][worker];
      struct interface *ifp;
      uint32_t n_mbufs, n_pkts;
      int ret;

      n_mbufs = lp->tx.mbuf_out[port].n_mbufs;
      ret = rte_ring_sc_dequeue_burst(ring,
                                      (void **) &lp->tx.mbuf_out[port].array[n_mbufs],
                                      bsz_rd - n_mbufs);

      if (unlikely(ret == 0)) {
        continue;
      }

      n_mbufs += (uint32_t)ret;

#if APP_IO_TX_DROP_ALL_PACKETS
      {
        uint32_t j;
        APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[0]);
        APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[1]);

        for (j = 0; j < n_mbufs; j ++) {
          if (likely(j < n_mbufs - 2)) {
            APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[j + 2]);
          }
          rte_pktmbuf_free(lp->tx.mbuf_out[port].array[j]);
        }
        lp->tx.mbuf_out[port].n_mbufs = 0;
        continue;
      }
#endif
      if (unlikely(n_mbufs < bsz_wr)) {
        lp->tx.mbuf_out[port].n_mbufs = n_mbufs;
        lp->tx.mbuf_out_flush[port] = 1;
        continue;
      }
      ifp = dpdk_interface_lookup(port);
      if (ifp != NULL && ifp->sched_port != NULL) {
        struct lagopus_packet *pkt;
        struct rte_mbuf *m;
        int qidx, color;

        for (i = 0; i < n_mbufs; i++) {
          m = lp->tx.mbuf_out[port].array[i];
          pkt = (struct lagopus_packet *)
                (m->buf_addr + APP_DEFAULT_MBUF_LOCALDATA_OFFSET);
          if (unlikely(pkt->queue_id != 0)) {
            qidx = dpdk_interface_queue_id_to_index(ifp, pkt->queue_id);
            color = rte_meter_trtcm_color_blind_check(&ifp->ifqueue.meters[qidx],
                    rte_rdtsc(),
                    OS_M_PKTLEN(m));
            rte_sched_port_pkt_write(m, 0, 0, 0, qidx, color);
          }
        }
        n_mbufs = rte_sched_port_enqueue(ifp->sched_port,
                                         lp->tx.mbuf_out[port].array,
                                         n_mbufs);
        n_mbufs = rte_sched_port_dequeue(ifp->sched_port,
                                         lp->tx.mbuf_out[port].array,
                                         n_mbufs);
      }
      DPRINTF("send %d pkts\n", n_mbufs);
      n_pkts = rte_eth_tx_burst(port,
                                0,
                                lp->tx.mbuf_out[port].array,
                                (uint16_t) n_mbufs);
      DPRINTF("sent %d pkts\n", n_pkts);

#if APP_STATS
      lp->tx.nic_ports_iters[port] ++;
      lp->tx.nic_ports_count[port] += n_pkts;
      if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) {
        unsigned lcore = rte_lcore_id();

        printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
               lcore,
               (unsigned) port,
               ((double) lp->tx.nic_ports_count[port]) / ((double)
                   lp->tx.nic_ports_iters[port]));
        lp->tx.nic_ports_iters[port] = 0;
        lp->tx.nic_ports_count[port] = 0;
      }
#endif

      if (unlikely(n_pkts < n_mbufs)) {
        uint32_t k;
        for (k = n_pkts; k < n_mbufs; k ++) {
          struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
          rte_pktmbuf_free(pkt_to_free);
        }
      }
      lp->tx.mbuf_out[port].n_mbufs = 0;
      lp->tx.mbuf_out_flush[port] = 0;
    }
  }
}
示例#7
0
int do_nf(void *useless)
{
	(void) useless; //XXX: this line suppresses the "unused-parameter" error

	int i;
	unsigned int p;
	mbuf_array_t pkts_received;

	//Init the regex engine
	if(!initializeRegEx(&re_bbc, re_extra_bbc,BBC))
		return 0;

	mbuf_array_t *pkts_to_send = (mbuf_array_t*)malloc(NUM_PORTS * sizeof(mbuf_array_t));
	for(p = 0; p < NUM_PORTS; p++)
		pkts_to_send[p].n_mbufs = 0;

	while(1)
	{
#ifdef ENABLE_SEMAPHORE
		sem_wait(nf_params.semaphore);
#endif

		/*0) Iterates on all the ports */
		for(p = 0; p < NUM_PORTS; p++)
		{
			/*1) Receive incoming packets */

			pkts_received.n_mbufs = rte_ring_sc_dequeue_burst(nf_params.ports[p].to_nf_queue,(void **)&pkts_received.array[0],PKT_TO_NF_THRESHOLD);

			if(likely(pkts_received.n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Received %d pkts on port %d (%s)\n", NAME, pkts_received.n_mbufs,p,nf_params.ports[p].name);
#endif

				for (i=0;i < pkts_received.n_mbufs;i++)
				{
					/*2) Operate on the packet */

					unsigned char *pkt = rte_pktmbuf_mtod(pkts_received.array[i],unsigned char *);
#ifdef ENABLE_LOG
					fprintf(logFile,"[%s] Packet size: %d\n",NAME,rte_pktmbuf_pkt_len(pkts_received.array[i]));
					fprintf(logFile,"[%s] %.2x:%.2x:%.2x:%.2x:%.2x:%.2x -> %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",NAME,pkt[6],pkt[7],pkt[8],pkt[9],pkt[10],pkt[11],pkt[0],pkt[1],pkt[2],pkt[3],pkt[4],pkt[5]);
#endif

					/**
					*	If the packet arrives from the first port, check if it must be dropped
					*/
					if(p == 0)
					{
#ifdef ENABLE_LOG
						fprintf(logFile,"[%s] I'm going to check if the packet must be dropped.\n", NAME);
#endif
						if(drop(pkt,rte_pktmbuf_pkt_len(pkts_received.array[i])))
						{
							//The packet must be dropped
#ifdef ENABLE_LOG
 		                                       fprintf(logFile,"[%s] The packet is dropped.\n", NAME);
#endif
							rte_pktmbuf_free(pkts_received.array[i]);
							continue;
						}
					}
					unsigned int output_port = (p+1) % NUM_PORTS;

					pkts_to_send[output_port].array[pkts_to_send[output_port].n_mbufs] = pkts_received.array[i];
					pkts_to_send[output_port].n_mbufs++;
				}//end of iteration on the packets received from the current port
			} //end if(likely(pkts_received.n_mbufs > 0))
		}//end iteration on the ports

		/*3) Send the processed packet not transmitted yet*/
		for(p = 0; p < NUM_PORTS; p++)
		{
			if(likely(pkts_to_send[p].n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Sending %d packets on port %x (%s).\n", NAME,pkts_to_send[p].n_mbufs,p,nf_params.ports[p].name);
#endif
				int ret = rte_ring_sp_enqueue_burst(nf_params.ports[p].to_xdpd_queue,(void *const*)pkts_to_send[p].array,(unsigned)pkts_to_send[p].n_mbufs);

	        	if (unlikely(ret < pkts_to_send[p].n_mbufs))
		        {
		        	fprintf(logFile,"[%s] Not enough room in port %d towards xDPD to enqueue; the packet will be dropped.\n", NAME,p);
					do {
						struct rte_mbuf *pkt_to_free = pkts_to_send[p].array[ret];
						rte_pktmbuf_free(pkt_to_free);
					} while (++ret < pkts_to_send[p].n_mbufs);
				}
			}
			pkts_to_send[p].n_mbufs = 0;
		}/* End of iteration on the ports */

	}/*End of while true*/
示例#8
0
int
test_port_ring_writer(void)
{
	int status, i;
	struct rte_port_ring_writer_params port_ring_writer_params;
	void *port;

	/* Invalid params */
	port = rte_port_ring_writer_ops.f_create(NULL, 0);
	if (port != NULL)
		return -1;

	status = rte_port_ring_writer_ops.f_free(port);
	if (status >= 0)
		return -2;

	port_ring_writer_params.ring = NULL;

	port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
	if (port != NULL)
		return -3;

	port_ring_writer_params.ring = RING_TX;
	port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX + 1;

	port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
	if (port != NULL)
		return -4;

	/* Create and free */
	port_ring_writer_params.ring = RING_TX;
	port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;

	port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);
	if (port == NULL)
		return -5;

	status = rte_port_ring_writer_ops.f_free(port);
	if (status != 0)
		return -6;

	/* -- Traffic TX -- */
	int expected_pkts, received_pkts;
	struct rte_mbuf *mbuf[RTE_PORT_IN_BURST_SIZE_MAX];
	struct rte_mbuf *res_mbuf[RTE_PORT_IN_BURST_SIZE_MAX];

	port_ring_writer_params.ring = RING_TX;
	port_ring_writer_params.tx_burst_sz = RTE_PORT_IN_BURST_SIZE_MAX;
	port = rte_port_ring_writer_ops.f_create(&port_ring_writer_params, 0);

	/* Single packet */
	mbuf[0] = rte_pktmbuf_alloc(pool);

	rte_port_ring_writer_ops.f_tx(port, mbuf[0]);
	rte_port_ring_writer_ops.f_flush(port);
	expected_pkts = 1;
	received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
		(void **)res_mbuf, port_ring_writer_params.tx_burst_sz);

	if (received_pkts < expected_pkts)
		return -7;

	rte_pktmbuf_free(res_mbuf[0]);

	/* Multiple packets */
	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
		mbuf[i] = rte_pktmbuf_alloc(pool);
		rte_port_ring_writer_ops.f_tx(port, mbuf[i]);
	}

	expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
	received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
		(void **)res_mbuf, port_ring_writer_params.tx_burst_sz);

	if (received_pkts < expected_pkts)
		return -8;

	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		rte_pktmbuf_free(res_mbuf[i]);

	/* TX Bulk */
	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		mbuf[i] = rte_pktmbuf_alloc(pool);
	rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-1);

	expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
	received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
		(void **)res_mbuf, port_ring_writer_params.tx_burst_sz);

	if (received_pkts < expected_pkts)
		return -8;

	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		rte_pktmbuf_free(res_mbuf[i]);

	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		mbuf[i] = rte_pktmbuf_alloc(pool);
	rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)-3);
	rte_port_ring_writer_ops.f_tx_bulk(port, mbuf, (uint64_t)2);

	expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
	received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
		(void **)res_mbuf, port_ring_writer_params.tx_burst_sz);

	if (received_pkts < expected_pkts)
		return -9;

	for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
		rte_pktmbuf_free(res_mbuf[i]);

	return 0;
}
示例#9
0
static int
rte_port_ring_reader_ipv4_frag_rx(void *port,
		struct rte_mbuf **pkts,
		uint32_t n_pkts)
{
	struct rte_port_ring_reader_ipv4_frag *p =
			(struct rte_port_ring_reader_ipv4_frag *) port;
	uint32_t n_pkts_out;

	n_pkts_out = 0;

	/* Get packets from the "frag" buffer */
	if (p->n_frags >= n_pkts) {
		memcpy(pkts, &p->frags[p->pos_frags], n_pkts * sizeof(void *));
		p->pos_frags += n_pkts;
		p->n_frags -= n_pkts;

		return n_pkts;
	}

	memcpy(pkts, &p->frags[p->pos_frags], p->n_frags * sizeof(void *));
	n_pkts_out = p->n_frags;
	p->n_frags = 0;

	/* Look to "pkts" buffer to get more packets */
	for ( ; ; ) {
		struct rte_mbuf *pkt;
		uint32_t n_pkts_to_provide, i;
		int status;

		/* If "pkts" buffer is empty, read packet burst from ring */
		if (p->n_pkts == 0) {
			p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
				(void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
			if (p->n_pkts == 0)
				return n_pkts_out;
			p->pos_pkts = 0;
		}

		/* Read next packet from "pkts" buffer */
		pkt = p->pkts[p->pos_pkts++];
		p->n_pkts--;

		/* If not jumbo, pass current packet to output */
		if (pkt->pkt_len <= IPV4_MTU_DEFAULT) {
			pkts[n_pkts_out++] = pkt;

			n_pkts_to_provide = n_pkts - n_pkts_out;
			if (n_pkts_to_provide == 0)
				return n_pkts;

			continue;
		}

		/* Fragment current packet into the "frags" buffer */
		status = rte_ipv4_fragment_packet(
			pkt,
			p->frags,
			IPV4_MAX_FRAGS_PER_PACKET,
			p->mtu,
			p->pool_direct,
			p->pool_indirect
		);

		if (status < 0) {
			rte_pktmbuf_free(pkt);
			continue;
		}

		p->n_frags = (uint32_t) status;
		p->pos_frags = 0;

		/* Copy meta-data from input jumbo packet to its fragments */
		for (i = 0; i < p->n_frags; i++) {
			uint8_t *src = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
			uint8_t *dst =
				RTE_MBUF_METADATA_UINT8_PTR(p->frags[i], 0);

			memcpy(dst, src, p->metadata_size);
		}

		/* Free input jumbo packet */
		rte_pktmbuf_free(pkt);

		/* Get packets from "frag" buffer */
		n_pkts_to_provide = n_pkts - n_pkts_out;
		if (p->n_frags >= n_pkts_to_provide) {
			memcpy(&pkts[n_pkts_out], p->frags,
				n_pkts_to_provide * sizeof(void *));
			p->n_frags -= n_pkts_to_provide;
			p->pos_frags += n_pkts_to_provide;

			return n_pkts;
		}

		memcpy(&pkts[n_pkts_out], p->frags,
			p->n_frags * sizeof(void *));
		n_pkts_out += p->n_frags;
		p->n_frags = 0;
	}
}