Esempio n. 1
0
static inline void handle_unmpls(struct task_unmpls *task, struct rte_mbuf *mbuf)
{
	struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);

	switch (peth->ether_type) {
	case ETYPE_MPLSU:
		/* MPLS Decapsulation */
		mpls_decap(mbuf);
		peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
		ether_addr_copy(&task->edaddr, &peth->d_addr);
		break;
	case ETYPE_LLDP:
		INCR_TX_DROP_COUNT(task->base.stats, 1);
		rte_pktmbuf_free(mbuf);
		return;
	case ETYPE_IPv6:
		tx_buf_pkt_single(&task->base, mbuf, 0);
		break;
	case ETYPE_IPv4:
		tx_buf_pkt_single(&task->base, mbuf, 0);
		break;
	default:
		mprintf("Core %u Error Removing MPLS: ether_type = %#06x\n", task->lconf->id, peth->ether_type);
		rte_pktmbuf_free(mbuf);
	}
}
Esempio n. 2
0
void handle_lb_qinq_bulk(struct rte_mbuf **rx_mbuf, struct task_base *ptask, uint16_t n_pkts)
{
	struct task_lb_qinq *port = (struct task_lb_qinq *)ptask;
	uint16_t j;
	uint8_t dest_wt[MAX_RING_BURST];
	uint16_t not_dropped = 0;
#ifdef BRAS_PREFETCH_OFFSET
	for (j = 0; (j < BRAS_PREFETCH_OFFSET) && (j < n_pkts); ++j) {
		PREFETCH0(rx_mbuf[j]);
	}
	for (j = 1; (j < BRAS_PREFETCH_OFFSET) && (j < n_pkts); ++j) {
		PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[j - 1], struct ether_hdr *));
	}
#endif
	for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) {
#ifdef BRAS_PREFETCH_OFFSET
		PREFETCH0(rx_mbuf[j + PREFETCH_OFFSET]);
		PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[j + PREFETCH_OFFSET - 1], struct ether_hdr *));
#endif
		rx_mbuf[not_dropped] = rx_mbuf[j];
		not_dropped += handle_lb_qinq(rx_mbuf[j], port, &dest_wt[not_dropped]);
	}
#ifdef BRAS_PREFETCH_OFFSET
	PREFETCH0(rte_pktmbuf_mtod(rx_mbuf[n_pkts - 1], struct ether_hdr *));
	for (; (j < n_pkts); ++j) {
		rx_mbuf[not_dropped] = rx_mbuf[j];
		not_dropped += handle_lb_qinq(rx_mbuf[j], port, &dest_wt[not_dropped]);
	}
#endif

	if (likely(not_dropped)) {
		for (j = 0; j < not_dropped; ++j) {
			tx_buf_pkt_single(&port->base, rx_mbuf[j], dest_wt[j]);
		}
		port->tx_pkt(&port->base);
	}
}