static inline void app_lcore_io_tx_kni(struct app_lcore_params_io *lp, uint32_t bsz) { struct rte_mbuf *pkts_burst[bsz]; unsigned num; uint8_t portid; uint16_t nb_tx; unsigned i, j; return; for (i = 0; i < lp->tx.n_nic_ports; i++) { portid = lp->tx.nic_ports[i]; if (lagopus_kni[portid] == NULL) { continue; } num = rte_kni_rx_burst(lagopus_kni[portid], pkts_burst, bsz); if (num == 0 || (uint32_t)num > bsz) { continue; } nb_tx = rte_eth_tx_burst(portid, 0, pkts_burst, (uint16_t)num); if (unlikely(nb_tx < (uint16_t)num)) { /* Free mbufs not tx to NIC */ for (j = nb_tx; j < num; j++) { rte_pktmbuf_free(pkts_burst[j]); } } } }
unsigned rw_piot_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) { return(rte_kni_rx_burst(kni, mbufs, num)); }
/** * Interface to dequeue mbufs from tx_q and burst tx */ static void kni_egress(struct kni_port_params* p, uint32_t lcore_id) { uint8_t i, port_id; unsigned nb_tx, num; uint32_t nb_kni; struct rte_mbuf* pkts_burst[MAX_PKT_BURST]; uint16_t queue_num; if (p == NULL) return; nb_kni = p->nb_kni; port_id = p->port_id; queue_num = p->tx_queue_id; for (i = 0; i < nb_kni; i++) { /* Burst rx from kni */ num = rte_kni_rx_burst(p->kni[i], pkts_burst, MAX_PKT_BURST); if (unlikely(num > MAX_PKT_BURST)) { RTE_LOG(ERR, KNI, "Error receiving from KNI\n"); return; } /* Burst tx to eth */ nb_tx = rte_eth_tx_burst(port_id, queue_num, pkts_burst, (uint16_t)num); rte_kni_handle_request(p->kni[i]); stats[lcore_id].nb_kni_rx += num; stats[lcore_id].nb_tx += nb_tx; if (unlikely(nb_tx < num)) { /* Free mbufs not tx to NIC */ kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); stats[lcore_id].nb_kni_dropped += num - nb_tx; } } }
/** * Interface to dequeue mbufs from tx_q and burst tx */ static void kni_kni_to_eth(struct kni_port_params *p) { uint8_t i, port_id; unsigned nb_tx, num; uint32_t nb_kni; struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; if (p == NULL) return; port_id = p->port_id; /* Burst rx from kni */ num = rte_kni_rx_burst(p->kni, pkts_burst, PKT_BURST_SZ); if (unlikely(num > PKT_BURST_SZ)) { RTE_LOG(ERR, APP, "Error receiving from KNI\n"); return; } /* Burst tx to eth */ nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num); // kni_stats[port_id].tx_packets += nb_tx; if (unlikely(nb_tx < num)) { /* Free mbufs not tx to NIC */ kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); // kni_stats[port_id].tx_dropped += num - nb_tx; } return; }
static int dpdk_knidev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts) { struct dpdk_knidev_reader *p = (struct dpdk_knidev_reader *) port; uint32_t nb_rx; nb_rx = rte_kni_rx_burst(p->kni, pkts, n_pkts); DPDK_KNIDEV_READER_STATS_PKTS_IN_ADD(p, nb_rx); return nb_rx; }
/* * Receive burst of packets from a KNI fifo */ static void receive_from_kni(uint8_t vportid) { int i = 0; int rslt = 0; struct rte_mbuf *buf[PKT_BURST_SIZE] = {0}; struct statistics *s = NULL; s = &vport_stats[vportid]; rslt = rte_kni_rx_burst(&rte_kni_list[vportid & KNI_MASK], buf, PKT_BURST_SIZE); if (rslt != 0) { s->tx += rslt; for (i = 0; i < rslt; i++) { switch_packet(buf[i], vportid); } } }
uint16_t kni_dev_rx_burst(uint8_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { return rte_kni_rx_burst(dev_list[port_id]->kni, rx_pkts, nb_pkts); }