static int virtio_tx_packet(CC_UNUSED void* data, struct packet* pkt) { struct vif* rvif = (struct vif*)data; struct vif* vif = (struct vif*)pkt->lldev->vif; int q_no = pkt->q_no * VIRTIO_QNUM + VIRTIO_RXQ; // VIF can be null, as this function is called from various path. if (rvif && (rvif != vif)) { // Local vm to a local vm // Copy the pkt data to new alloc mbuf. // Try to send the packet to remote q running on same core. // Remote VM may not have same number of queues as TX VM has. if (pkt->q_no > rvif->lldev->dev->virt_qp_nb) { // Send on the first queue. q_no = VIRTIO_RXQ; } // TX on Remote VIF vif = rvif; } // TODO: Handle burst mode if (rte_vhost_enqueue_burst(vif->lldev->dev, q_no, &pkt->mbuf, 1) == 1) { rte_atomic64_inc(&vif->tx_packets); rte_atomic64_inc(&vif->lldev->queue[q_no].tx_packets); } else { rte_atomic64_inc(&vif->dropped_packets); rte_atomic64_inc(&vif->lldev->queue[q_no].dropped_packets); printf("virtio_tx_packet Packet dropped\n"); } return 0; }
/* Check for decapsulation and pass packets directly to VIRTIO device */ int vxlan_rx_pkts(struct virtio_net *dev, struct rte_mbuf **pkts_burst, uint32_t rx_count) { uint32_t i = 0; uint32_t count = 0; int ret; struct rte_mbuf *pkts_valid[rx_count]; for (i = 0; i < rx_count; i++) { if (enable_stats) { rte_atomic64_add( &dev_statistics[dev->device_fh].rx_bad_ip_csum, (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0); rte_atomic64_add( &dev_statistics[dev->device_fh].rx_bad_ip_csum, (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0); } ret = vxlan_rx_process(pkts_burst[i]); if (unlikely(ret < 0)) continue; pkts_valid[count] = pkts_burst[i]; count++; } ret = rte_vhost_enqueue_burst(dev, VIRTIO_RXQ, pkts_valid, count); return ret; }