int kni_alloc(uint8_t port_id, struct rte_mempool* pktmbuf_pool) { uint8_t i; struct rte_kni* kni; struct rte_kni_conf conf; struct kni_port_params** params = kni_port_params_array; if (port_id >= RTE_MAX_ETHPORTS || !params[port_id]) return -1; params[port_id]->nb_kni = params[port_id]->nb_lcore_k ? params[port_id]->nb_lcore_k : 1; for (i = 0; i < params[port_id]->nb_kni; i++) { /* Clear conf at first */ memset(&conf, 0, sizeof(conf)); if (params[port_id]->nb_lcore_k > 1) { snprintf(conf.name, RTE_KNI_NAMESIZE, "dpdk%u_%u", port_id, i); conf.core_id = params[port_id]->lcore_k[i]; conf.force_bind = 1; } else snprintf(conf.name, RTE_KNI_NAMESIZE, "dpdk%u", port_id); conf.group_id = (uint16_t)port_id; conf.mbuf_size = MAX_PACKET_SZ; /* * The first KNI device associated to a port * is the master, for multiple kernel thread * environment. */ if (i == 0) { struct rte_kni_ops ops; struct rte_eth_dev_info dev_info; memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info); conf.addr = dev_info.pci_dev->addr; conf.id = dev_info.pci_dev->id; memset(&ops, 0, sizeof(ops)); ops.port_id = port_id; ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_interface; kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops); } else kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL); if (!kni) rte_exit(EXIT_FAILURE, "Fail to create kni for " "port: %d\n", port_id); params[port_id]->kni[i] = kni; } return 0; }
static void app_init_kni(struct app_params *app) { uint32_t i; if (app->n_pktq_kni == 0) return; rte_kni_init(app->n_pktq_kni); for (i = 0; i < app->n_pktq_kni; i++) { struct app_pktq_kni_params *p_kni = &app->kni_params[i]; struct app_link_params *p_link; struct rte_eth_dev_info dev_info; struct app_mempool_params *mempool_params; struct rte_mempool *mempool; struct rte_kni_conf conf; struct rte_kni_ops ops; /* LINK */ p_link = app_get_link_for_kni(app, p_kni); memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(p_link->pmd_id, &dev_info); /* MEMPOOL */ mempool_params = &app->mempool_params[p_kni->mempool_id]; mempool = app->mempool[p_kni->mempool_id]; /* KNI */ memset(&conf, 0, sizeof(conf)); snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name); conf.force_bind = p_kni->force_bind; if (conf.force_bind) { int lcore_id; lcore_id = cpu_core_map_get_lcore_id(app->core_map, p_kni->socket_id, p_kni->core_id, p_kni->hyper_th_id); if (lcore_id < 0) rte_panic("%s invalid CPU core\n", p_kni->name); conf.core_id = (uint32_t) lcore_id; } conf.group_id = p_link->pmd_id; conf.mbuf_size = mempool_params->buffer_size; conf.addr = dev_info.pci_dev->addr; conf.id = dev_info.pci_dev->id; memset(&ops, 0, sizeof(ops)); ops.port_id = (uint8_t) p_link->pmd_id; ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_interface; APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name); app->kni[i] = rte_kni_alloc(mempool, &conf, &ops); if (!app->kni[i]) rte_panic("%s init error\n", p_kni->name); } }
/* Alloc KNI Devices for PORT_ID */ static int odp_kni_alloc(uint8_t port_id) { uint8_t i; struct rte_kni *kni; struct rte_kni_conf conf; struct kni_port_params **params = kni_port_params_array; unsigned lcore_id = params[port_id]->lcore_id; unsigned lcore_socket = rte_lcore_to_socket_id(lcore_id); struct rte_mempool * kni_mempool = odp_pktmbuf_pool[lcore_socket]; if (port_id >= RTE_MAX_ETHPORTS || !params[port_id]) return -1; memset(&conf, 0, sizeof(conf)); snprintf(conf.name, RTE_KNI_NAMESIZE,"keth%u", port_id); conf.group_id = (uint16_t)port_id; conf.mbuf_size = MAX_PACKET_SZ; struct rte_kni_ops ops; struct rte_eth_dev_info dev_info; memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info); conf.addr = dev_info.pci_dev->addr; conf.id = dev_info.pci_dev->id; memset(&ops, 0, sizeof(ops)); ops.port_id = port_id; ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_interface; kni = rte_kni_alloc(kni_mempool, &conf, &ops); if (!kni) rte_exit(EXIT_FAILURE, "Fail to create kni for " "port: %d\n", port_id); params[port_id]->kni = kni; /* Create Ring to recieve the pkts from other cores */ char ring_name[32]; snprintf(ring_name,sizeof(ring_name),"kni_ring_s%u_p%u",lcore_socket,port_id); params[port_id]->ring = rte_ring_create(ring_name,ODP_KNI_RING_SIZE, lcore_socket,RING_F_SC_DEQ); if(!params[port_id]->ring) rte_exit(EXIT_FAILURE, "Fail to create ring for kni %s",ring_name); return 0; }
/* * @dev - real device kni attach to. * @kniname - optional, kni device name or auto generate. */ int kni_add_dev(struct netif_port *dev, const char *kniname) { struct rte_kni_conf conf; struct rte_kni *kni; int err; if (!dev) return EDPVS_INVAL; if (dev->type == PORT_TYPE_BOND_SLAVE) return EDPVS_NOTSUPP; if (kni_dev_exist(dev)) { RTE_LOG(ERR, Kni, "%s: dev %s has already attached with kni\n", __func__, dev->name); return EDPVS_EXIST; } kni_fill_conf(dev, kniname, &conf); kni = rte_kni_alloc(kni_mbuf_pool[dev->socket], &conf, NULL); if (!kni) return EDPVS_DPDKAPIFAIL; err = kni_rtnl_init(dev); if (err != EDPVS_OK) { rte_kni_release(kni); return err; } /* * kni device should use same mac as real device, * because it may config same IP of real device. * diff mac means kni cannot accept packets sent * to real-device. */ err = linux_set_if_mac(conf.name, (unsigned char *)&dev->addr); if (err != EDPVS_OK) { char mac[18]; ether_format_addr(mac, sizeof(mac), &dev->addr); RTE_LOG(WARNING, Kni, "%s: fail to set mac %s for %s: %s\n", __func__, mac, conf.name, strerror(errno)); } snprintf(dev->kni.name, sizeof(dev->kni.name), "%s", conf.name); dev->kni.addr = dev->addr; dev->kni.kni = kni; return EDPVS_OK; }
void app_init_kni(void) { uint8_t portid; for (portid = 0; portid < rte_eth_dev_count(); portid++) { struct rte_kni *kni; struct rte_kni_ops ops; struct rte_kni_conf conf; struct rte_eth_dev_info dev_info; continue; /* XXX */ /* Clear conf at first */ memset(&conf, 0, sizeof(conf)); memset(&dev_info, 0, sizeof(dev_info)); memset(&ops, 0, sizeof(ops)); snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u", portid); conf.group_id = (uint16_t)portid; conf.mbuf_size = MAX_PACKET_SZ; rte_eth_dev_info_get(portid, &dev_info); conf.addr = dev_info.pci_dev->addr; conf.id = dev_info.pci_dev->id; ops.port_id = portid; ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_interface, /* XXX socket 0 */ kni = rte_kni_alloc(app.pools[0], &conf, &ops); if (kni == NULL) { lagopus_msg_error("Fail to create kni dev for port: %d\n", portid); continue; } lagopus_kni[portid] = kni; printf("KNI: %s is configured.\n", conf.name); printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", (unsigned) portid, lagopus_ports_eth_addr[portid].addr_bytes[0], lagopus_ports_eth_addr[portid].addr_bytes[1], lagopus_ports_eth_addr[portid].addr_bytes[2], lagopus_ports_eth_addr[portid].addr_bytes[3], lagopus_ports_eth_addr[portid].addr_bytes[4], lagopus_ports_eth_addr[portid].addr_bytes[5]); /* initialize port stats */ memset(&port_statistics, 0, sizeof(port_statistics)); } }
//struct rte_kni * mg_create_kni(uint8_t port_id, uint8_t core_id, struct rte_mempool* pktmbuf_pool){ struct rte_kni * mg_create_kni(uint8_t port_id, uint8_t core_id, void* mempool_ptr, const char name[]){ //printf("create kni\n"); //printf("mempool ptr 1 : %p\n", mempool_ptr); struct rte_kni *kni; struct rte_kni_conf conf; /* Clear conf at first */ memset(&conf, 0, sizeof(conf)); snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name); conf.core_id = core_id; conf.force_bind = 1; conf.group_id = (uint16_t)port_id; conf.mbuf_size = MAX_PACKET_SZ; struct rte_eth_dev_info dev_info; memset(&dev_info, 0, sizeof(dev_info)); //printf("get dev info\n"); rte_eth_dev_info_get(port_id, &dev_info); //printf("dev done\n"); //printf("pci dev: %p\n", dev_info.pci_dev); conf.addr = dev_info.pci_dev->addr; //printf("a\n"); conf.id = dev_info.pci_dev->id; //printf("b\n"); struct rte_kni_ops ops; //printf("c\n"); memset(&ops, 0, sizeof(ops)); //printf("d\n"); ops.port_id = port_id; //printf("e\n"); ops.change_mtu = kni_change_mtu; //printf("f\n"); ops.config_network_if = kni_config_network_interface; //printf("alloc\n"); struct rte_mempool * pktmbuf_pool = (struct rte_mempool *)(mempool_ptr); //printf("mempool pointer: %p\n", pktmbuf_pool); kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops); //printf("done, kni = %p\n", kni); //rte_eth_dev_start(port_id); return kni; }
/* * Create KNI for specified device */ struct rte_kni * rw_piot_kni_create(rw_piot_api_handle_t api_handle, struct rte_kni_conf *conf, struct rte_mempool * pktmbuf_pool) { struct rte_kni_ops ops; struct rte_eth_dev_info dev_info; struct rte_kni *kni; rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle); if ((conf->name[0] == 0)) { ASSERT(rw_piot_dev); rte_snprintf(conf->name, RTE_KNI_NAMESIZE,"vEth%u", rw_piot_dev->rte_port_id); } conf->mbuf_size = MAX_PACKET_SZ; memset(&dev_info, 0, sizeof(dev_info)); memset(&ops, 0, sizeof(ops)); if (rw_piot_dev) { rte_eth_dev_info_get(rw_piot_dev->rte_port_id, &dev_info); conf->group_id = (uint16_t)rw_piot_dev->rte_port_id; conf->addr = dev_info.pci_dev->addr; conf->id = dev_info.pci_dev->id; ops.port_id = rw_piot_dev->rte_port_id; } ops.change_mtu = rw_piot_kni_change_mtu; ops.config_network_if = rw_piot_kni_config_network_interface; kni = rte_kni_alloc(pktmbuf_pool, conf, &ops); if (NULL == kni) { printf("rte_kni_alloc returned failure\n"); return NULL; } return kni; }
/* It is deprecated and just for backward compatibility */ struct rte_kni * rte_kni_create(uint8_t port_id, unsigned mbuf_size, struct rte_mempool *pktmbuf_pool, struct rte_kni_ops *ops) { struct rte_kni_conf conf; struct rte_eth_dev_info info; memset(&info, 0, sizeof(info)); memset(&conf, 0, sizeof(conf)); rte_eth_dev_info_get(port_id, &info); snprintf(conf.name, sizeof(conf.name), "vEth%u", port_id); conf.addr = info.pci_dev->addr; conf.id = info.pci_dev->id; conf.group_id = (uint16_t)port_id; conf.mbuf_size = mbuf_size; /* Save the port id for request handling */ ops->port_id = port_id; return rte_kni_alloc(pktmbuf_pool, &conf, ops); }
/* Init KNI RX queue */ struct vr_dpdk_queue * vr_dpdk_kni_rx_queue_init(unsigned lcore_id, struct vr_interface *vif, unsigned host_lcore_id) { struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id]; const unsigned socket_id = rte_lcore_to_socket_id(lcore_id); uint8_t port_id = 0; unsigned vif_idx = vif->vif_idx; struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif_idx]; struct vr_dpdk_queue_params *rx_queue_params = &lcore->lcore_rx_queue_params[vif_idx]; if (vif->vif_type == VIF_TYPE_HOST) { port_id = (((struct vr_dpdk_ethdev *)(vif->vif_bridge->vif_os))-> ethdev_port_id); } /* init queue */ rx_queue->rxq_ops = dpdk_knidev_reader_ops; rx_queue->q_queue_h = NULL; rx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx); /* create the queue */ struct dpdk_knidev_reader_params reader_params = { .kni = vif->vif_os, }; rx_queue->q_queue_h = rx_queue->rxq_ops.f_create(&reader_params, socket_id); if (rx_queue->q_queue_h == NULL) { RTE_LOG(ERR, VROUTER, " error creating KNI device %s RX queue" " at eth device %" PRIu8 "\n", vif->vif_name, port_id); return NULL; } /* store queue params */ rx_queue_params->qp_release_op = &dpdk_kni_rx_queue_release; return rx_queue; } /* Release KNI TX queue */ static void dpdk_kni_tx_queue_release(unsigned lcore_id, struct vr_interface *vif) { struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id]; struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif->vif_idx]; struct vr_dpdk_queue_params *tx_queue_params = &lcore->lcore_tx_queue_params[vif->vif_idx]; tx_queue->txq_ops.f_tx = NULL; rte_wmb(); /* flush and free the queue */ if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) { RTE_LOG(ERR, VROUTER, " error freeing lcore %u KNI device TX queue\n", lcore_id); } /* reset the queue */ vrouter_put_interface(tx_queue->q_vif); memset(tx_queue, 0, sizeof(*tx_queue)); memset(tx_queue_params, 0, sizeof(*tx_queue_params)); } /* Init KNI TX queue */ struct vr_dpdk_queue * vr_dpdk_kni_tx_queue_init(unsigned lcore_id, struct vr_interface *vif, unsigned host_lcore_id) { struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id]; const unsigned socket_id = rte_lcore_to_socket_id(lcore_id); uint8_t port_id = 0; unsigned vif_idx = vif->vif_idx; struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif_idx]; struct vr_dpdk_queue_params *tx_queue_params = &lcore->lcore_tx_queue_params[vif_idx]; struct vr_dpdk_ethdev *ethdev; if (vif->vif_type == VIF_TYPE_HOST) { ethdev = vif->vif_bridge->vif_os; if (ethdev == NULL) { RTE_LOG(ERR, VROUTER, " error creating KNI device %s TX queue:" " bridge vif %u ethdev is not initialized\n", vif->vif_name, vif->vif_bridge->vif_idx); return NULL; } port_id = ethdev->ethdev_port_id; } /* init queue */ tx_queue->txq_ops = dpdk_knidev_writer_ops; tx_queue->q_queue_h = NULL; tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx); /* create the queue */ struct dpdk_knidev_writer_params writer_params = { .kni = vif->vif_os, .tx_burst_sz = VR_DPDK_TX_BURST_SZ, }; tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id); if (tx_queue->q_queue_h == NULL) { RTE_LOG(ERR, VROUTER, " error creating KNI device %s TX queue" " at eth device %" PRIu8 "\n", vif->vif_name, port_id); return NULL; } /* store queue params */ tx_queue_params->qp_release_op = &dpdk_kni_tx_queue_release; return tx_queue; } /* Change KNI MTU size callback */ static int dpdk_knidev_change_mtu(uint8_t port_id, unsigned new_mtu) { struct vrouter *router = vrouter_get(0); struct vr_interface *vif; int i, ret; uint8_t ethdev_port_id, slave_port_id; struct vr_dpdk_ethdev *ethdev = NULL; RTE_LOG(INFO, VROUTER, "Changing eth device %" PRIu8 " MTU to %u\n", port_id, new_mtu); if (port_id >= rte_eth_dev_count()) { RTE_LOG(ERR, VROUTER, "Error changing eth device %"PRIu8" MTU: invalid eth device\n", port_id); return -EINVAL; } /* * TODO: DPDK bond PMD does not implement mtu_set op, so we need to * set the MTU manually for all the slaves. */ /* Bond vif uses first slave port ID. */ if (router->vr_eth_if) { ethdev = (struct vr_dpdk_ethdev *)router->vr_eth_if->vif_os; if (ethdev && ethdev->ethdev_nb_slaves > 0) { for (i = 0; i < ethdev->ethdev_nb_slaves; i++) { if (port_id == ethdev->ethdev_slaves[i]) break; } /* Clear ethdev if no port match. */ if (i >= ethdev->ethdev_nb_slaves) ethdev = NULL; } } if (ethdev && ethdev->ethdev_nb_slaves > 0) { for (i = 0; i < ethdev->ethdev_nb_slaves; i++) { slave_port_id = ethdev->ethdev_slaves[i]; RTE_LOG(INFO, VROUTER, " changing bond member eth device %" PRIu8 " MTU to %u\n", slave_port_id, new_mtu); ret = rte_eth_dev_set_mtu(slave_port_id, new_mtu); if (ret < 0) { RTE_LOG(ERR, VROUTER, " error changing bond member eth device %" PRIu8 " MTU: %s (%d)\n", slave_port_id, rte_strerror(-ret), -ret); return ret; } } } else { ret = rte_eth_dev_set_mtu(port_id, new_mtu); if (ret < 0) { RTE_LOG(ERR, VROUTER, "Error changing eth device %" PRIu8 " MTU: %s (%d)\n", port_id, rte_strerror(-ret), -ret); } return ret; } /* On success, inform vrouter about new MTU */ for (i = 0; i < router->vr_max_interfaces; i++) { vif = __vrouter_get_interface(router, i); if (vif && (vif->vif_type == VIF_TYPE_PHYSICAL)) { ethdev_port_id = (((struct vr_dpdk_ethdev *)(vif->vif_os))-> ethdev_port_id); if (ethdev_port_id == port_id) { /* Ethernet header size */ new_mtu += sizeof(struct vr_eth); if (vr_dpdk.vlan_tag != VLAN_ID_INVALID) { /* 802.1q header size */ new_mtu += sizeof(uint32_t); } vif->vif_mtu = new_mtu; if (vif->vif_bridge) vif->vif_bridge->vif_mtu = new_mtu; } } } return 0; } /* Configure KNI state callback */ static int dpdk_knidev_config_network_if(uint8_t port_id, uint8_t if_up) { int ret = 0; RTE_LOG(INFO, VROUTER, "Configuring eth device %" PRIu8 " %s\n", port_id, if_up ? "UP" : "DOWN"); if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) { RTE_LOG(ERR, VROUTER, "Invalid eth device %" PRIu8 "\n", port_id); return -EINVAL; } if (if_up) ret = rte_eth_dev_start(port_id); else rte_eth_dev_stop(port_id); if (ret < 0) { RTE_LOG(ERR, VROUTER, "Configuring eth device %" PRIu8 " UP" "failed (%d)\n", port_id, ret); } return ret; } /* Init KNI */ int vr_dpdk_knidev_init(uint8_t port_id, struct vr_interface *vif) { int i; struct rte_eth_dev_info dev_info; struct rte_kni_conf kni_conf; struct rte_kni_ops kni_ops; struct rte_kni *kni; struct rte_config *rte_conf = rte_eal_get_configuration(); if (!vr_dpdk.kni_inited) { /* * If the host does not support KNIs (i.e. RedHat), we'll get * a panic here. */ rte_kni_init(VR_DPDK_MAX_KNI_INTERFACES); vr_dpdk.kni_inited = true; } /* get eth device info */ memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info); /* create KNI configuration */ memset(&kni_conf, 0, sizeof(kni_conf)); strncpy(kni_conf.name, (char *)vif->vif_name, sizeof(kni_conf.name) - 1); kni_conf.addr = dev_info.pci_dev->addr; kni_conf.id = dev_info.pci_dev->id; kni_conf.group_id = port_id; kni_conf.mbuf_size = VR_DPDK_MAX_PACKET_SZ; /* * Due to DPDK commit 41a6ebd, now to prevent packet reordering in KNI * we have to bind KNI kernel thread to a first online unused CPU. */ for (i = 0; i < RTE_MAX_LCORE; i++) { if (lcore_config[i].detected && rte_conf->lcore_role[VR_DPDK_FWD_LCORE_ID + i] == ROLE_OFF) { kni_conf.force_bind = 1; kni_conf.core_id = i; RTE_LOG(INFO, VROUTER, " bind KNI kernel thread to CPU %d\n", i); break; } } /* KNI options * * Changing state of the KNI interface can change state of the physical * interface. This is useful for the vhost, but not for the VLAN * forwarding interface. */ if (vif->vif_type == VIF_TYPE_VLAN) { memset(&kni_ops, 0, sizeof(kni_ops)); } else { kni_ops.port_id = port_id; kni_ops.change_mtu = dpdk_knidev_change_mtu; kni_ops.config_network_if = dpdk_knidev_config_network_if; } /* allocate KNI device */ kni = rte_kni_alloc(vr_dpdk.rss_mempool, &kni_conf, &kni_ops); if (kni == NULL) { RTE_LOG(ERR, VROUTER, " error allocation KNI device %s" " at eth device %" PRIu8 "\n", vif->vif_name, port_id); return -ENOMEM; } /* store pointer to KNI for further use */ vif->vif_os = kni; /* add interface to the table of KNIs */ for (i = 0; i < VR_DPDK_MAX_KNI_INTERFACES; i++) { if (vr_dpdk.knis[i] == NULL) { vr_dpdk.knis[i] = vif->vif_os; break; } } return 0; }
int kni_dev_init(struct rte_mempool *mbuf_pool) { struct conf_sect *s = conf_get_sect("interface"); struct conf_opt *opt; struct rte_kni_conf conf; struct rte_kni_ops ops; struct rte_kni *kni; struct net_device *dev; struct knidev *knidev; pthread_t tid; struct ifconfig_arg arg; int i = 0, x = rte_eth_dev_count(); for (opt = s->opt; opt; opt = opt->next) { const char *busid = conf_get_subopt(opt, "busid"); if (!strcmp(busid, "kni")) kni_cnt++; } if (!kni_cnt) return 0; rte_kni_init(kni_cnt); dev_list = rte_malloc(NULL, kni_cnt * sizeof(void *), 0); memset(&conf, 0, sizeof(conf)); memset(&ops, 0, sizeof(ops)); ops.change_mtu = kni_change_mtu; ops.config_network_if = kni_config_network_if; for (opt = s->opt; opt; opt = opt->next) { const char *busid = conf_get_subopt(opt, "busid"); if (strcmp(busid, "kni")) continue; strcpy(conf.name, opt->name); conf.group_id = i; conf.mbuf_size = ETHER_MAX_LEN + 8; ops.port_id = i; kni = rte_kni_alloc(mbuf_pool, &conf, &ops); if (!kni) { fprintf(stderr, "failed to create %s\n", opt->name); return -1; } dev = netdev_alloc(opt->name, sizeof(*knidev), NULL); dev->xmit = knidev_xmit; knidev = netdev_priv(dev); knidev->port = i; knidev->xport = i + x; knidev->dev = dev; knidev->kni = kni; dev_list[i] = knidev; arg.dev = knidev; arg.opt = opt; arg.err = -1; pthread_create(&tid, NULL, kni_ifconfig, &arg); while (arg.err == -1) rte_kni_handle_request(kni); pthread_join(tid, NULL); if (arg.err != 0) return -1; } return 0; }