static int send_packets(odp_pktout_queue_t pktout, odp_packet_t *pkt_tbl, unsigned pkts) { unsigned tx_drops; unsigned sent = 0; if (pkts == 0) return 0; while (sent < pkts) { int ret; ret = odp_pktout_send(pktout, &pkt_tbl[sent], pkts - sent); if (odp_likely(ret > 0)) sent += ret; else break; } tx_drops = pkts - sent; if (odp_unlikely(tx_drops)) odp_packet_free_multi(&pkt_tbl[sent], tx_drops); return sent; }
enum ofp_return_code ofp_eth_vlan_processing(odp_packet_t *pkt) { uint16_t vlan = 0, ethtype; struct ofp_ether_header *eth; struct ofp_ifnet *ifnet = odp_packet_user_ptr(*pkt); eth = (struct ofp_ether_header *)odp_packet_l2_ptr(*pkt, NULL); if (odp_unlikely(eth == NULL)) { OFP_DBG("eth is NULL"); return OFP_PKT_DROP; } ethtype = odp_be_to_cpu_16(eth->ether_type); if (ethtype == OFP_ETHERTYPE_VLAN) { struct ofp_ether_vlan_header *vlan_hdr; vlan_hdr = (struct ofp_ether_vlan_header *)eth; vlan = OFP_EVL_VLANOFTAG(odp_be_to_cpu_16(vlan_hdr->evl_tag)); ethtype = odp_be_to_cpu_16(vlan_hdr->evl_proto); ifnet = ofp_get_ifnet(ifnet->port, vlan); if (!ifnet) return OFP_PKT_DROP; if (odp_likely(ofp_if_type(ifnet) != OFP_IFT_VXLAN)) odp_packet_user_ptr_set(*pkt, ifnet); } OFP_DBG("ETH TYPE = %04x", ethtype); /* network layer classifier */ switch (ethtype) { /* STUB: except for ARP, just terminate all traffic to slowpath. * FIXME: test/implement other cases */ #ifdef INET case OFP_ETHERTYPE_IP: return ofp_ipv4_processing(pkt); #endif /* INET */ #ifdef INET6 case OFP_ETHERTYPE_IPV6: return ofp_ipv6_processing(pkt); #endif /* INET6 */ #if 0 case OFP_ETHERTYPE_MPLS: return OFP_PKT_DROP; #endif case OFP_ETHERTYPE_ARP: return ofp_arp_processing(pkt); default: return OFP_PKT_CONTINUE; } }
static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring, const odp_packet_t pkt_table[], unsigned len) { union frame_map ppd; uint32_t pkt_len; unsigned first_frame_num, frame_num, frame_count; int ret; uint8_t *buf; unsigned n, i = 0; unsigned nb_tx = 0; int send_errno; int total_len = 0; first_frame_num = ring->frame_num; frame_num = first_frame_num; frame_count = ring->rd_num; while (i < len) { ppd.raw = ring->rd[frame_num].iov_base; if (!odp_unlikely(mmap_tx_kernel_ready(ppd.raw))) break; pkt_len = odp_packet_len(pkt_table[i]); ppd.v2->tp_h.tp_snaplen = pkt_len; ppd.v2->tp_h.tp_len = pkt_len; total_len += pkt_len; buf = (uint8_t *)ppd.raw + TPACKET2_HDRLEN - sizeof(struct sockaddr_ll); odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, buf); mmap_tx_user_ready(ppd.raw); if (++frame_num >= frame_count) frame_num = 0; i++; } ret = sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0); send_errno = errno; /* On success, the return value indicates the number of bytes sent. On * failure a value of -1 is returned, even if the failure occurred * after some of the packets in the ring have already been sent, so we * need to inspect the packet status to determine which were sent. */ if (odp_likely(ret == total_len)) { nb_tx = i; ring->frame_num = frame_num; } else if (ret == -1) { for (frame_num = first_frame_num, n = 0; n < i; ++n) { struct tpacket2_hdr *hdr = ring->rd[frame_num].iov_base; if (odp_likely(hdr->tp_status == TP_STATUS_AVAILABLE || hdr->tp_status == TP_STATUS_SENDING)) { nb_tx++; } else { /* The remaining frames weren't sent, clear * their status to indicate we're not waiting * for the kernel to process them. */ hdr->tp_status = TP_STATUS_AVAILABLE; } if (++frame_num >= frame_count) frame_num = 0; } ring->frame_num = (first_frame_num + nb_tx) % frame_count; if (nb_tx == 0 && SOCK_ERR_REPORT(send_errno)) { __odp_errno = send_errno; /* ENOBUFS indicates that the transmit queue is full, * which will happen regularly when overloaded so don't * print it */ if (errno != ENOBUFS) ODP_ERR("sendto(pkt mmap): %s\n", strerror(send_errno)); return -1; } } else { /* Short send, return value is number of bytes sent so use this * to determine number of complete frames sent. */ for (n = 0; n < i && ret > 0; ++n) { ret -= odp_packet_len(pkt_table[n]); if (ret >= 0) nb_tx++; } ring->frame_num = (first_frame_num + nb_tx) % frame_count; } for (i = 0; i < nb_tx; ++i) odp_packet_free(pkt_table[i]); return nb_tx; }
int default_event_dispatcher(void *arg) { odp_event_t ev; odp_packet_t pkt; odp_queue_t in_queue; int event_idx = 0; int event_cnt = 0; ofp_pkt_processing_func pkt_func = (ofp_pkt_processing_func)arg; odp_bool_t *is_running = NULL; if (ofp_init_local()) { OFP_ERR("ofp_init_local failed"); return -1; } int rx_burst = global_param->evt_rx_burst_size; odp_event_t events[rx_burst]; is_running = ofp_get_processing_state(); if (is_running == NULL) { OFP_ERR("ofp_get_processing_state failed"); ofp_term_local(); return -1; } /* PER CORE DISPATCHER */ while (*is_running) { event_cnt = odp_schedule_multi(&in_queue, ODP_SCHED_WAIT, events, rx_burst); for (event_idx = 0; event_idx < event_cnt; event_idx++) { odp_event_type_t ev_type; ev = events[event_idx]; if (ev == ODP_EVENT_INVALID) continue; ev_type = odp_event_type(ev); if (odp_likely(ev_type == ODP_EVENT_PACKET)) { pkt = odp_packet_from_event(ev); #if 0 if (odp_unlikely(odp_packet_has_error(pkt))) { OFP_DBG("Dropping packet with error"); odp_packet_free(pkt); continue; } #endif ofp_packet_input(pkt, in_queue, pkt_func); continue; } if (ev_type == ODP_EVENT_TIMEOUT) { ofp_timer_handle(ev); continue; } OFP_ERR("Unexpected event type: %u", ev_type); odp_event_free(ev); } ofp_send_pending_pkt(); } if (ofp_term_local()) OFP_ERR("ofp_term_local failed"); return 0; }