int handle_lb_qinq_bulk_set_port(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_lb_qinq *task = (struct task_lb_qinq *)tbase; uint8_t out[MAX_PKT_BURST]; uint16_t j; #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) uint32_t port_id = mbufs[0]->pkt.in_port; #else uint32_t port_id = mbufs[0]->port; #endif if (tbase->rx_pkt == rx_pkt_hw) { port_id = tbase->rx_params_hw.last_read_portid + tbase->rx_params_hw.nb_rxports; port_id = ( port_id - 1 ) % tbase->rx_params_hw.nb_rxports; port_id = tbase->rx_params_hw.rx_pq[port_id].port; } else if (tbase->rx_pkt == rx_pkt_hw1) { port_id = tbase->rx_params_hw1.rx_pq.port; } prefetch_first(mbufs, n_pkts); for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { #ifdef PROX_PREFETCH_OFFSET PREFETCH0(mbufs[j + PREFETCH_OFFSET]); PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); #endif #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) mbufs[j]->pkt.in_port = port_id; #else mbufs[j]->port = port_id; #endif out[j] = handle_lb_qinq(task, mbufs[j]); } #ifdef PROX_PREFETCH_OFFSET PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); for (; j < n_pkts; ++j) { #if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) mbufs[j]->pkt.in_port = port_id; #else mbufs[j]->port = port_id; #endif out[j] = handle_lb_qinq(task, mbufs[j]); } #endif return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); }
void handle_lb_qinq_bulk(struct rte_mbuf **rx_mbuf, struct task_base *ptask, uint16_t n_pkts) { struct task_lb_qinq *port = (struct task_lb_qinq *)ptask; uint16_t j; uint8_t dest_wt[MAX_RING_BURST]; uint16_t not_dropped = 0; #ifdef BRAS_PREFETCH_OFFSET for (j = 0; (j < BRAS_PREFETCH_OFFSET) && (j < n_pkts); ++j) { PREFETCH0(rx_mbuf[j]); } for (j = 1; (j < BRAS_PREFETCH_OFFSET) && (j < n_pkts); ++j) { PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[j - 1], struct ether_hdr *)); } #endif for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { #ifdef BRAS_PREFETCH_OFFSET PREFETCH0(rx_mbuf[j + PREFETCH_OFFSET]); PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[j + PREFETCH_OFFSET - 1], struct ether_hdr *)); #endif rx_mbuf[not_dropped] = rx_mbuf[j]; not_dropped += handle_lb_qinq(rx_mbuf[j], port, &dest_wt[not_dropped]); } #ifdef BRAS_PREFETCH_OFFSET PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[n_pkts - 1], struct ether_hdr *)); for (; (j < n_pkts); ++j) { rx_mbuf[not_dropped] = rx_mbuf[j]; not_dropped += handle_lb_qinq(rx_mbuf[j], port, &dest_wt[not_dropped]); } #endif if (likely(not_dropped)) { for (j = 0; j < not_dropped; ++j) { tx_buf_pkt_single(&port->base, rx_mbuf[j], dest_wt[j]); } port->tx_pkt(&port->base); } }
int handle_lb_qinq_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_lb_qinq *task = (struct task_lb_qinq *)tbase; uint8_t out[MAX_PKT_BURST]; uint16_t j; prefetch_first(mbufs, n_pkts); for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { #ifdef PROX_PREFETCH_OFFSET PREFETCH0(mbufs[j + PREFETCH_OFFSET]); PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); #endif out[j] = handle_lb_qinq(task, mbufs[j]); } #ifdef PROX_PREFETCH_OFFSET PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); for (; j < n_pkts; ++j) { out[j] = handle_lb_qinq(task, mbufs[j]); } #endif return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); }