int odp_eth_rx_burst(pktio_entry_t *pktio_entry, odp_packet_t *rx_pkts, unsigned int nb_pkts) { int nb_rx; struct odp_eth_dev *dev; uint8_t port_id = pktio_entry->s.pkt_odp.portid; uint16_t queue_id = pktio_entry->s.pkt_odp.queueid; dev = &odp_eth_devices[port_id]; if (pktio_cls_enabled(pktio_entry, queue_id)) { odp_packet_t tmpbuf[64]; odp_packet_t onepkt; odp_packet_hdr_t *pkt_hdr; odp_pktio_t id; int i, j; if (nb_pkts > 64) nb_pkts = 64; nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], (void **)tmpbuf, nb_pkts); if (odp_unlikely(nb_rx <= 0)) return nb_rx; id = pktio_entry->s.handle; for (i = 0, j = 0; i < nb_rx; i++) { onepkt = tmpbuf[i]; pkt_hdr = odp_packet_hdr(onepkt); pkt_hdr->input = id; packet_parse_reset(pkt_hdr); packet_parse_l2(pkt_hdr); if (0 > _odp_packet_classifier(pktio_entry, queue_id, onepkt)) { rx_pkts[j++] = onepkt; } } nb_rx = j; } else { nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], (void **)rx_pkts, nb_pkts); } #ifdef ODP_ETHDEV_RXTX_CALLBACKS struct odp_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id]; if (odp_unlikely(cb)) { do { nb_rx = cb->fn.rx(port_id, queue_id, (void **)rx_pkts, nb_rx, nb_pkts, cb->param); cb = cb->next; } while (cb); } #endif return nb_rx; }
static int loopback_recv(pktio_entry_t *pktio_entry, odp_packet_t pkts[], unsigned len) { int nbr, i, j; odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX]; queue_entry_t *qentry; odp_packet_hdr_t *pkt_hdr; odp_packet_t pkt; nbr = 0; qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq); nbr = queue_deq_multi(qentry, hdr_tbl, len); if (pktio_cls_enabled(pktio_entry)) { for (i = 0, j = 0; i < nbr; i++) { pkt = _odp_packet_from_buffer(odp_hdr_to_buf (hdr_tbl[i])); pkt_hdr = odp_packet_hdr(pkt); packet_parse_reset(pkt_hdr); packet_parse_l2(pkt_hdr); if (0 > _odp_packet_classifier(pktio_entry, pkt)) pkts[j++] = pkt; } nbr = j; } else { for (i = 0; i < nbr; ++i) { pkts[i] = _odp_packet_from_buffer(odp_hdr_to_buf (hdr_tbl[i])); pkt_hdr = odp_packet_hdr(pkts[i]); packet_parse_reset(pkt_hdr); packet_parse_l2(pkt_hdr); } } return nbr; }
static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry, pkt_sock_mmap_t *pkt_sock, odp_packet_t pkt_table[], unsigned len, unsigned char if_mac[]) { union frame_map ppd; odp_time_t ts_val; odp_time_t *ts = NULL; unsigned frame_num, next_frame_num; uint8_t *pkt_buf; int pkt_len; struct ethhdr *eth_hdr; unsigned i; unsigned nb_rx; struct ring *ring; int ret; if (pktio_entry->s.config.pktin.bit.ts_all || pktio_entry->s.config.pktin.bit.ts_ptp) ts = &ts_val; ring = &pkt_sock->rx_ring; frame_num = ring->frame_num; for (i = 0, nb_rx = 0; i < len; i++) { odp_packet_hdr_t *hdr; odp_packet_hdr_t parsed_hdr; odp_pool_t pool = pkt_sock->pool; int num; if (!mmap_rx_kernel_ready(ring->rd[frame_num].iov_base)) break; if (ts != NULL) ts_val = odp_time_global(); ppd.raw = ring->rd[frame_num].iov_base; next_frame_num = (frame_num + 1) % ring->rd_num; pkt_buf = (uint8_t *)ppd.raw + ppd.v2->tp_h.tp_mac; pkt_len = ppd.v2->tp_h.tp_snaplen; /* Don't receive packets sent by ourselves */ eth_hdr = (struct ethhdr *)pkt_buf; if (odp_unlikely(ethaddrs_equal(if_mac, eth_hdr->h_source))) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } if (ppd.v2->tp_h.tp_status & TP_STATUS_VLAN_VALID) pkt_buf = pkt_mmap_vlan_insert(pkt_buf, ppd.v2->tp_h.tp_mac, ppd.v2->tp_h.tp_vlan_tci, &pkt_len); if (pktio_cls_enabled(pktio_entry)) { if (cls_classify_packet(pktio_entry, pkt_buf, pkt_len, pkt_len, &pool, &parsed_hdr)) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } } num = packet_alloc_multi(pool, pkt_len, &pkt_table[nb_rx], 1); if (odp_unlikely(num != 1)) { pkt_table[nb_rx] = ODP_PACKET_INVALID; mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } hdr = odp_packet_hdr(pkt_table[nb_rx]); ret = odp_packet_copy_from_mem(pkt_table[nb_rx], 0, pkt_len, pkt_buf); if (ret != 0) { odp_packet_free(pkt_table[nb_rx]); mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } hdr->input = pktio_entry->s.handle; if (pktio_cls_enabled(pktio_entry)) copy_packet_cls_metadata(&parsed_hdr, hdr); else packet_parse_l2(&hdr->p, pkt_len); packet_set_ts(hdr, ts); mmap_rx_user_ready(ppd.raw); frame_num = next_frame_num; nb_rx++; } ring->frame_num = frame_num; return nb_rx; }
static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry, pkt_sock_mmap_t *pkt_sock, odp_packet_t pkt_table[], unsigned len, unsigned char if_mac[]) { union frame_map ppd; unsigned frame_num, next_frame_num; uint8_t *pkt_buf; int pkt_len; struct ethhdr *eth_hdr; unsigned i = 0; uint8_t nb_rx = 0; struct ring *ring; int ret; ring = &pkt_sock->rx_ring; frame_num = ring->frame_num; while (i < len) { if (!mmap_rx_kernel_ready(ring->rd[frame_num].iov_base)) break; ppd.raw = ring->rd[frame_num].iov_base; next_frame_num = (frame_num + 1) % ring->rd_num; pkt_buf = (uint8_t *)ppd.raw + ppd.v2->tp_h.tp_mac; pkt_len = ppd.v2->tp_h.tp_snaplen; /* Don't receive packets sent by ourselves */ eth_hdr = (struct ethhdr *)pkt_buf; if (odp_unlikely(ethaddrs_equal(if_mac, eth_hdr->h_source))) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } if (pktio_cls_enabled(pktio_entry)) { ret = _odp_packet_cls_enq(pktio_entry, pkt_buf, pkt_len, &pkt_table[nb_rx]); if (ret) nb_rx++; } else { odp_packet_hdr_t *hdr; pkt_table[i] = packet_alloc(pkt_sock->pool, pkt_len, 1); if (odp_unlikely(pkt_table[i] == ODP_PACKET_INVALID)) { mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } hdr = odp_packet_hdr(pkt_table[i]); ret = odp_packet_copydata_in(pkt_table[i], 0, pkt_len, pkt_buf); if (ret != 0) { odp_packet_free(pkt_table[i]); mmap_rx_user_ready(ppd.raw); /* drop */ frame_num = next_frame_num; continue; } packet_parse_l2(hdr); nb_rx++; } mmap_rx_user_ready(ppd.raw); frame_num = next_frame_num; i++; } ring->frame_num = frame_num; return nb_rx; }