/** * Interface to dequeue mbufs from tx_q and burst tx */ static void kni_egress(struct kni_port_params* p, uint32_t lcore_id) { uint8_t i, port_id; unsigned nb_tx, num; uint32_t nb_kni; struct rte_mbuf* pkts_burst[MAX_PKT_BURST]; uint16_t queue_num; if (p == NULL) return; nb_kni = p->nb_kni; port_id = p->port_id; queue_num = p->tx_queue_id; for (i = 0; i < nb_kni; i++) { /* Burst rx from kni */ num = rte_kni_rx_burst(p->kni[i], pkts_burst, MAX_PKT_BURST); if (unlikely(num > MAX_PKT_BURST)) { RTE_LOG(ERR, KNI, "Error receiving from KNI\n"); return; } /* Burst tx to eth */ nb_tx = rte_eth_tx_burst(port_id, queue_num, pkts_burst, (uint16_t)num); rte_kni_handle_request(p->kni[i]); stats[lcore_id].nb_kni_rx += num; stats[lcore_id].nb_tx += nb_tx; if (unlikely(nb_tx < num)) { /* Free mbufs not tx to NIC */ kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx); stats[lcore_id].nb_kni_dropped += num - nb_tx; } } }
static void kni_ring_to_kni(struct kni_port_params *p) { uint8_t i, port_id; unsigned nb_rx, num; struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; if (p == NULL) return; port_id = p->port_id; /* Burst rx from ring */ nb_rx = rte_ring_dequeue_burst(p->ring,(void **)&pkts_burst, PKT_BURST_SZ); if (unlikely(nb_rx > PKT_BURST_SZ)) { RTE_LOG(ERR, APP, "Error receiving from eth\n"); return; } /* Burst tx to kni */ num = rte_kni_tx_burst(p->kni, pkts_burst, nb_rx); //kni_stats[port_id].rx_packets += num; rte_kni_handle_request(p->kni); if (unlikely(num < nb_rx)) { /* Free mbufs not tx to kni interface */ kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num); //kni_stats[port_id].rx_dropped += nb_rx - num; } return; }
/* * Application main function - loops through * receiving and processing packets. Never returns */ int main(int argc, char *argv[]) { int retval = 0; uint8_t port = 0; char *port_name; if ((retval = rte_eal_init(argc, argv)) < 0) { RTE_LOG(INFO, APP, "EAL init failed.\n"); return -1; } argc -= retval; argv += retval; if (parse_app_args(argc, argv) < 0) rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); memset(kni_list, 0, sizeof(struct rte_kni) * MAX_KNI_PORTS); /* Open KNI or exit */ kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); if (kni_fd < 0) { RTE_LOG(ERR, KNI, "Can not open /dev/%s\n", KNI_DEVICE); return -1; } /* Lookup for vports struct */ if (ovs_vport_lookup_vport_info() == NULL) return -1; /* Initialise the devices for each port*/ for (port = 0; port < ports_n; port++) { port_name = port_names[port]; RTE_LOG(INFO, KNI, "Attaching queues for port '%s'\n", port_name); if (create_kni_device(&kni_list[port], port_name, port) < 0) return -1; } RTE_LOG(INFO, KNI, "\nKNI client handling packets \n"); RTE_LOG(INFO, KNI, "[Press Ctrl-C to quit ...]\n"); for (;;) { for (port = 0; port < ports_n; port++) { /* Sleep to reduce processor load. As long as we respond * before rtnetlink times out we will still be able to ifup * and change mtu */ sleep(1); rte_kni_handle_request(&kni_list[port]); } } return 0; }
/* Handle all KNIs attached */ void vr_dpdk_knidev_all_handle(void) { int i; vr_dpdk_if_lock(); for (i = 0; i < VR_DPDK_MAX_KNI_INTERFACES; i++) { if (vr_dpdk.knis[i] != NULL) rte_kni_handle_request(vr_dpdk.knis[i]); } vr_dpdk_if_unlock(); }
static inline void app_lcore_io_tx_flush(struct app_lcore_params_io *lp, void *arg) { uint8_t portid, i; for (i = 0; i < lp->tx.n_nic_ports; i++) { uint32_t n_pkts; struct port *port; portid = lp->tx.nic_ports[i]; #ifdef __linux__ rte_kni_handle_request(lagopus_kni[portid]); #endif /* __linux__ */ port = dp_port_lookup(portid); if (port != NULL) { update_port_link_status(port); } if (likely((lp->tx.mbuf_out_flush[portid] == 0) || (lp->tx.mbuf_out[portid].n_mbufs == 0))) { continue; } DPRINTF("flush: send %d pkts\n", lp->tx.mbuf_out[portid].n_mbufs); n_pkts = rte_eth_tx_burst(portid, 0, lp->tx.mbuf_out[portid].array, (uint16_t)lp->tx.mbuf_out[portid].n_mbufs); DPRINTF("flus: sent %d pkts\n", n_pkts); if (unlikely(n_pkts < lp->tx.mbuf_out[portid].n_mbufs)) { uint32_t k; for (k = n_pkts; k < lp->tx.mbuf_out[portid].n_mbufs; k ++) { struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[portid].array[k]; rte_pktmbuf_free(pkt_to_free); } } lp->tx.mbuf_out[portid].n_mbufs = 0; lp->tx.mbuf_out_flush[portid] = 0; } }
int rw_piot_kni_handle_request(struct rte_kni *kni) { return(rte_kni_handle_request(kni)); }
int kni_dev_init(struct rte_mempool *mbuf_pool) { struct conf_sect *s = conf_get_sect("interface"); struct conf_opt *opt; struct rte_kni_conf conf; struct rte_kni_ops ops; struct rte_kni *kni; struct net_device *dev; struct knidev *knidev; pthread_t tid; struct ifconfig_arg arg; int i = 0, x = rte_eth_dev_count(); for (opt = s->opt; opt; opt = opt->next) { const char *busid = conf_get_subopt(opt, "busid"); if (!strcmp(busid, "kni")) kni_cnt++; } if (!kni_cnt) return 0; rte_kni_init(kni_cnt); dev_list = rte_malloc(NULL, kni_cnt * sizeof(void *), 0); memset(&conf, 0, sizeof(conf)); memset(&ops, 0, sizeof(ops)); ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_if; for (opt = s->opt; opt; opt = opt->next) { const char *busid = conf_get_subopt(opt, "busid"); if (strcmp(busid, "kni")) continue; strcpy(conf.name, opt->name); conf.group_id = i; conf.mbuf_size = ETHER_MAX_LEN + 8; ops.port_id = i; kni = rte_kni_alloc(mbuf_pool, &conf, &ops); if (!kni) { fprintf(stderr, "failed to create %s\n", opt->name); return -1; } dev = netdev_alloc(opt->name, sizeof(*knidev), NULL); dev->xmit = knidev_xmit; knidev = netdev_priv(dev); knidev->port = i; knidev->xport = i + x; knidev->dev = dev; knidev->kni = kni; dev_list[i] = knidev; arg.dev = knidev; arg.opt = opt; arg.err = -1; pthread_create(&tid, NULL, kni_ifconfig, &arg); while (arg.err == -1) rte_kni_handle_request(kni); pthread_join(tid, NULL); if (arg.err != 0) return -1; } return 0; }