int rte_kni_release(struct rte_kni *kni) { struct rte_kni_device_info dev_info; uint32_t slot_id; if (!kni || !kni->in_use) return -1; snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name); if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) { RTE_LOG(ERR, KNI, "Fail to release kni device\n"); return -1; } /* mbufs in all fifo should be released, except request/response */ kni_free_fifo(kni->tx_q); kni_free_fifo(kni->rx_q); kni_free_fifo(kni->alloc_q); kni_free_fifo(kni->free_q); slot_id = kni->slot_id; /* Memset the KNI struct */ memset(kni, 0, sizeof(struct rte_kni)); /* Release memzone */ if (slot_id > kni_memzone_pool.max_ifaces) { rte_panic("KNI pool: corrupted slot ID: %d, max: %d\n", slot_id, kni_memzone_pool.max_ifaces); } kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]); return 0; }
struct rte_kni * rte_kni_alloc(struct rte_mempool *pktmbuf_pool, const struct rte_kni_conf *conf, struct rte_kni_ops *ops) { int ret; struct rte_kni_device_info dev_info; struct rte_kni *ctx; char intf_name[RTE_KNI_NAMESIZE]; char mz_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; const struct rte_mempool *mp; struct rte_kni_memzone_slot *slot = NULL; if (!pktmbuf_pool || !conf || !conf->name[0]) return NULL; /* Check if KNI subsystem has been initialized */ if (kni_memzone_pool.initialized != 1) { RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n"); return NULL; } /* Get an available slot from the pool */ slot = kni_memzone_pool_alloc(); if (!slot) { RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n", kni_memzone_pool.max_ifaces); return NULL; } /* Recover ctx */ ctx = slot->m_ctx->addr; snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name); if (ctx->in_use) { RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name); return NULL; } memset(ctx, 0, sizeof(struct rte_kni)); if (ops) memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops)); memset(&dev_info, 0, sizeof(dev_info)); dev_info.bus = conf->addr.bus; dev_info.devid = conf->addr.devid; dev_info.function = conf->addr.function; dev_info.vendor_id = conf->id.vendor_id; dev_info.device_id = conf->id.device_id; dev_info.core_id = conf->core_id; dev_info.force_bind = conf->force_bind; dev_info.group_id = conf->group_id; dev_info.mbuf_size = conf->mbuf_size; #ifdef RTE_LIBRW_PIOT dev_info.no_data = conf->no_data; dev_info.no_pci = conf->no_pci; dev_info.ifindex = conf->ifindex; dev_info.always_up = conf->always_up; dev_info.no_tx = conf->no_tx; dev_info.loopback = conf->loopback; dev_info.no_user_ring = conf->no_user_ring; dev_info.mtu = conf->mtu; dev_info.vlanid = conf->vlanid; memcpy(dev_info.mac, conf->mac, 6); strncpy(dev_info.netns_name, conf->netns_name, sizeof(dev_info.netns_name)); dev_info.netns_fd = conf->netns_fd; dev_info.pid = getpid(); #ifdef RTE_LIBRW_NOHUGE dev_info.nohuge = conf->nohuge; dev_info.nl_pid = conf->nl_pid; #endif #endif snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name); snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name); RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", dev_info.bus, dev_info.devid, dev_info.function, dev_info.vendor_id, dev_info.device_id); /* TX RING */ mz = slot->m_tx_q; ctx->tx_q = mz->addr; kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX); dev_info.tx_phys = mz->phys_addr; /* RX RING */ mz = slot->m_rx_q; ctx->rx_q = mz->addr; kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX); dev_info.rx_phys = mz->phys_addr; /* ALLOC RING */ mz = slot->m_alloc_q; ctx->alloc_q = mz->addr; kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX); dev_info.alloc_phys = mz->phys_addr; /* FREE RING */ mz = slot->m_free_q; ctx->free_q = mz->addr; kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX); dev_info.free_phys = mz->phys_addr; #ifndef RTE_LIBRW_PIOT /* Request RING */ mz = slot->m_req_q; ctx->req_q = mz->addr; kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX); dev_info.req_phys = mz->phys_addr; /* Response RING */ mz = slot->m_resp_q; ctx->resp_q = mz->addr; kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX); dev_info.resp_phys = mz->phys_addr; /* Req/Resp sync mem area */ mz = slot->m_sync_addr; ctx->sync_addr = mz->addr; dev_info.sync_va = mz->addr; dev_info.sync_phys = mz->phys_addr; #endif /* MBUF mempool */ snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, pktmbuf_pool->name); mz = rte_memzone_lookup(mz_name); KNI_MEM_CHECK(mz == NULL); mp = (struct rte_mempool *)mz->addr; /* KNI currently requires to have only one memory chunk */ if (mp->nb_mem_chunks != 1) goto kni_fail; dev_info.mbuf_va = STAILQ_FIRST(&mp->mem_list)->addr; dev_info.mbuf_phys = STAILQ_FIRST(&mp->mem_list)->phys_addr; ctx->pktmbuf_pool = pktmbuf_pool; ctx->group_id = conf->group_id; ctx->slot_id = slot->id; ctx->mbuf_size = conf->mbuf_size; ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info); KNI_MEM_CHECK(ret < 0); ctx->in_use = 1; /* Allocate mbufs and then put them into alloc_q */ kni_allocate_mbufs(ctx); return ctx; kni_fail: if (slot) kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]); return NULL; }
struct rte_kni * rte_kni_alloc(struct rte_mempool *pktmbuf_pool, const struct rte_kni_conf *conf, struct rte_kni_ops *ops) { int ret; struct rte_kni_device_info dev_info; struct rte_kni *ctx; char intf_name[RTE_KNI_NAMESIZE]; const struct rte_memzone *mz; struct rte_kni_memzone_slot *slot = NULL; if (!pktmbuf_pool || !conf || !conf->name[0]) return NULL; /* Check if KNI subsystem has been initialized */ if (kni_memzone_pool.initialized != 1) { RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n"); return NULL; } /* Get an available slot from the pool */ slot = kni_memzone_pool_alloc(); if (!slot) { RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n", kni_memzone_pool.max_ifaces); return NULL; } /* Recover ctx */ ctx = slot->m_ctx->addr; snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name); if (ctx->in_use) { RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name); return NULL; } memset(ctx, 0, sizeof(struct rte_kni)); if (ops) memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops)); memset(&dev_info, 0, sizeof(dev_info)); dev_info.bus = conf->addr.bus; dev_info.devid = conf->addr.devid; dev_info.function = conf->addr.function; dev_info.vendor_id = conf->id.vendor_id; dev_info.device_id = conf->id.device_id; dev_info.core_id = conf->core_id; dev_info.force_bind = conf->force_bind; dev_info.group_id = conf->group_id; dev_info.mbuf_size = conf->mbuf_size; snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name); snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name); RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", dev_info.bus, dev_info.devid, dev_info.function, dev_info.vendor_id, dev_info.device_id); /* TX RING */ mz = slot->m_tx_q; ctx->tx_q = mz->addr; kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX); dev_info.tx_phys = mz->phys_addr; /* RX RING */ mz = slot->m_rx_q; ctx->rx_q = mz->addr; kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX); dev_info.rx_phys = mz->phys_addr; /* ALLOC RING */ mz = slot->m_alloc_q; ctx->alloc_q = mz->addr; kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX); dev_info.alloc_phys = mz->phys_addr; /* FREE RING */ mz = slot->m_free_q; ctx->free_q = mz->addr; kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX); dev_info.free_phys = mz->phys_addr; /* Request RING */ mz = slot->m_req_q; ctx->req_q = mz->addr; kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX); dev_info.req_phys = mz->phys_addr; /* Response RING */ mz = slot->m_resp_q; ctx->resp_q = mz->addr; kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX); dev_info.resp_phys = mz->phys_addr; /* Req/Resp sync mem area */ mz = slot->m_sync_addr; ctx->sync_addr = mz->addr; dev_info.sync_va = mz->addr; dev_info.sync_phys = mz->phys_addr; ctx->pktmbuf_pool = pktmbuf_pool; ctx->group_id = conf->group_id; ctx->slot_id = slot->id; ctx->mbuf_size = conf->mbuf_size; ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info); KNI_MEM_CHECK(ret < 0); ctx->in_use = 1; /* Allocate mbufs and then put them into alloc_q */ kni_allocate_mbufs(ctx); return ctx; kni_fail: if (slot) kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]); return NULL; }