示例#1
0
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
	      const struct rte_kni_conf *conf,
	      struct rte_kni_ops *ops)
{
	int ret;
	struct rte_kni_device_info dev_info;
	struct rte_kni *ctx;
	char intf_name[RTE_KNI_NAMESIZE];
	char mz_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;
	const struct rte_mempool *mp;
	struct rte_kni_memzone_slot *slot = NULL;

	if (!pktmbuf_pool || !conf || !conf->name[0])
		return NULL;

	/* Check if KNI subsystem has been initialized */
	if (kni_memzone_pool.initialized != 1) {
		RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
		return NULL;
	}

	/* Get an available slot from the pool */
	slot = kni_memzone_pool_alloc();
	if (!slot) {
		RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
			kni_memzone_pool.max_ifaces);
		return NULL;
	}

	/* Recover ctx */
	ctx = slot->m_ctx->addr;
	snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);

	if (ctx->in_use) {
		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
		return NULL;
	}
	memset(ctx, 0, sizeof(struct rte_kni));
	if (ops)
		memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	memset(&dev_info, 0, sizeof(dev_info));
	dev_info.bus = conf->addr.bus;
	dev_info.devid = conf->addr.devid;
	dev_info.function = conf->addr.function;
	dev_info.vendor_id = conf->id.vendor_id;
	dev_info.device_id = conf->id.device_id;
	dev_info.core_id = conf->core_id;
	dev_info.force_bind = conf->force_bind;
	dev_info.group_id = conf->group_id;
	dev_info.mbuf_size = conf->mbuf_size;
#ifdef RTE_LIBRW_PIOT
        dev_info.no_data = conf->no_data;
        dev_info.no_pci     = conf->no_pci;
        dev_info.ifindex    = conf->ifindex;
        dev_info.always_up  = conf->always_up;
        dev_info.no_tx  = conf->no_tx;
        dev_info.loopback = conf->loopback;
        dev_info.no_user_ring = conf->no_user_ring;
        dev_info.mtu = conf->mtu;
        dev_info.vlanid = conf->vlanid;
        memcpy(dev_info.mac, conf->mac, 6);
        strncpy(dev_info.netns_name, conf->netns_name, sizeof(dev_info.netns_name));
        dev_info.netns_fd = conf->netns_fd;
        dev_info.pid      = getpid();
#ifdef RTE_LIBRW_NOHUGE
        dev_info.nohuge = conf->nohuge;
        dev_info.nl_pid = conf->nl_pid;
#endif
#endif
	snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
	snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);

	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
		dev_info.bus, dev_info.devid, dev_info.function,
			dev_info.vendor_id, dev_info.device_id);
	/* TX RING */
	mz = slot->m_tx_q;
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	mz = slot->m_rx_q;
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	mz = slot->m_alloc_q;
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	mz = slot->m_free_q;
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;
#ifndef RTE_LIBRW_PIOT
	/* Request RING */
	mz = slot->m_req_q;
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	mz = slot->m_resp_q;
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	mz = slot->m_sync_addr;
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;
#endif

	/* MBUF mempool */
	snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT,
		pktmbuf_pool->name);
	mz = rte_memzone_lookup(mz_name);
	KNI_MEM_CHECK(mz == NULL);
	mp = (struct rte_mempool *)mz->addr;
	/* KNI currently requires to have only one memory chunk */
	if (mp->nb_mem_chunks != 1)
		goto kni_fail;

	dev_info.mbuf_va = STAILQ_FIRST(&mp->mem_list)->addr;
	dev_info.mbuf_phys = STAILQ_FIRST(&mp->mem_list)->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->group_id = conf->group_id;
	ctx->slot_id = slot->id;
	ctx->mbuf_size = conf->mbuf_size;

	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
	KNI_MEM_CHECK(ret < 0);

	ctx->in_use = 1;

	/* Allocate mbufs and then put them into alloc_q */
	kni_allocate_mbufs(ctx);

	return ctx;

kni_fail:
	if (slot)
		kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);

	return NULL;
}
示例#2
0
struct rte_kni *
rte_kni_create(uint8_t port_id,
		unsigned mbuf_size,
		struct rte_mempool *pktmbuf_pool,
		struct rte_kni_ops *ops)
{
	struct rte_kni_device_info dev_info;
	struct rte_eth_dev_info eth_dev_info;
	struct rte_kni *ctx;
	char itf_name[IFNAMSIZ];
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	const struct rte_memzone *mz;

	if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL || !ops)
		return NULL;

	/* Check FD and open once */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0) {
			RTE_LOG(ERR, KNI, "Can not open /dev/%s\n",
							KNI_DEVICE);
			return NULL;
		}
	}

	rte_eth_dev_info_get(port_id, &eth_dev_info);
	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
					eth_dev_info.pci_dev->addr.bus,
					eth_dev_info.pci_dev->addr.devid,
					eth_dev_info.pci_dev->addr.function,
					eth_dev_info.pci_dev->id.vendor_id,
					eth_dev_info.pci_dev->id.device_id);
	dev_info.bus = eth_dev_info.pci_dev->addr.bus;
	dev_info.devid = eth_dev_info.pci_dev->addr.devid;
	dev_info.function = eth_dev_info.pci_dev->addr.function;
	dev_info.vendor_id = eth_dev_info.pci_dev->id.vendor_id;
	dev_info.device_id = eth_dev_info.pci_dev->id.device_id;

	ctx = rte_zmalloc("kni devs", sizeof(struct rte_kni), 0);
	if (ctx == NULL)
		rte_panic("Cannot allocate memory for kni dev\n");
	memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	rte_snprintf(itf_name, IFNAMSIZ, "vEth%u", port_id);
	rte_snprintf(ctx->name, IFNAMSIZ, itf_name);
	rte_snprintf(dev_info.name, IFNAMSIZ, itf_name);

	/* TX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_tx_%d queue\n", port_id);
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_rx_%d queue\n", port_id);
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_alloc_%d queue\n", port_id);
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_free_%d queue\n", port_id);
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_req_%d ring\n", port_id);
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_resp_%d ring\n", port_id);
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_sync_%d mem\n", port_id);
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	/* MBUF mempool */
	mz = rte_memzone_lookup("MP_mbuf_pool");
	if (mz == NULL) {
		RTE_LOG(ERR, KNI, "Can not find MP_mbuf_pool\n");
		goto fail;
	}
	dev_info.mbuf_va = mz->addr;
	dev_info.mbuf_phys = mz->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->port_id = port_id;
	ctx->mbuf_size = mbuf_size;

	/* Configure the buffer size which will be checked in kernel module */
	dev_info.mbuf_size = ctx->mbuf_size;

	if (ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info) < 0) {
		RTE_LOG(ERR, KNI, "Fail to create kni device\n");
		goto fail;
	}

	return ctx;

fail:
	if (ctx != NULL)
		rte_free(ctx);

	return NULL;
}
示例#3
0
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
	      const struct rte_kni_conf *conf,
	      struct rte_kni_ops *ops)
{
	int ret;
	struct rte_kni_device_info dev_info;
	struct rte_kni *ctx;
	char intf_name[RTE_KNI_NAMESIZE];
	const struct rte_memzone *mz;
	struct rte_kni_memzone_slot *slot = NULL;

	if (!pktmbuf_pool || !conf || !conf->name[0])
		return NULL;

	/* Check if KNI subsystem has been initialized */
	if (kni_memzone_pool.initialized != 1) {
		RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
		return NULL;
	}

	/* Get an available slot from the pool */
	slot = kni_memzone_pool_alloc();
	if (!slot) {
		RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
			kni_memzone_pool.max_ifaces);
		return NULL;
	}

	/* Recover ctx */
	ctx = slot->m_ctx->addr;
	snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);

	if (ctx->in_use) {
		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
		return NULL;
	}
	memset(ctx, 0, sizeof(struct rte_kni));
	if (ops)
		memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	memset(&dev_info, 0, sizeof(dev_info));
	dev_info.bus = conf->addr.bus;
	dev_info.devid = conf->addr.devid;
	dev_info.function = conf->addr.function;
	dev_info.vendor_id = conf->id.vendor_id;
	dev_info.device_id = conf->id.device_id;
	dev_info.core_id = conf->core_id;
	dev_info.force_bind = conf->force_bind;
	dev_info.group_id = conf->group_id;
	dev_info.mbuf_size = conf->mbuf_size;

	snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
	snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);

	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
		dev_info.bus, dev_info.devid, dev_info.function,
			dev_info.vendor_id, dev_info.device_id);
	/* TX RING */
	mz = slot->m_tx_q;
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	mz = slot->m_rx_q;
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	mz = slot->m_alloc_q;
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	mz = slot->m_free_q;
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	mz = slot->m_req_q;
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	mz = slot->m_resp_q;
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	mz = slot->m_sync_addr;
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->group_id = conf->group_id;
	ctx->slot_id = slot->id;
	ctx->mbuf_size = conf->mbuf_size;

	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
	KNI_MEM_CHECK(ret < 0);

	ctx->in_use = 1;

	/* Allocate mbufs and then put them into alloc_q */
	kni_allocate_mbufs(ctx);

	return ctx;

kni_fail:
	if (slot)
		kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);

	return NULL;
}
示例#4
0
文件: kni_vhost.c 项目: fleitner/dpdk
static int
kni_vhost_backend_init(struct kni_dev *kni)
{
	struct kni_vhost_queue *q;
	struct net *net = current->nsproxy->net_ns;
	int err, i, sockfd;
	struct rte_kni_fifo *fifo;
	struct sk_buff *elem;

	if (kni->vhost_queue != NULL)
		return -1;

	if (!(q = (struct kni_vhost_queue *)sk_alloc(
		      net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
		return -ENOMEM;

	err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
	if (err)
		goto free_sk;

	sockfd = kni_sock_map_fd(q->sock);
	if (sockfd < 0) {
		err = sockfd;
		goto free_sock;
	}

	/* cache init */
	q->cache = (struct sk_buff*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
			GFP_KERNEL);
	if (!q->cache)
		goto free_fd;

	fifo = (struct rte_kni_fifo*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
			+ sizeof(struct rte_kni_fifo), GFP_KERNEL);
	if (!fifo)
		goto free_cache;

	kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);

	for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
		elem = &q->cache[i];
		kni_fifo_put(fifo, (void**)&elem, 1);
	}
	q->fifo = fifo;

	/* store sockfd in vhost_queue */
	q->sockfd = sockfd;

	/* init socket */
	q->sock->type = SOCK_RAW;
	q->sock->state = SS_CONNECTED;
	q->sock->ops = &kni_socket_ops;
	sock_init_data(q->sock, &q->sk);

	/* init sock data */
	q->sk.sk_write_space = kni_sk_write_space;
	q->sk.sk_destruct = kni_sk_destruct;
	q->flags = IFF_NO_PI | IFF_TAP;
	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
#ifdef RTE_KNI_VHOST_VNET_HDR_EN
	q->flags |= IFF_VNET_HDR;
#endif

	/* bind kni_dev with vhost_queue */
	q->kni = kni;
	kni->vhost_queue = q;

	wmb();

	kni->vq_status = BE_START;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
	KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
		  "sk->sk_wq=0x%16llx",
		  q->sockfd, (uint64_t)q->sock->wq,
		  (uint64_t)q->sk.sk_wq);
#else
	KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
		  "sk->sk_sleep=0x%16llx",
		  q->sockfd, (uint64_t)&q->sock->wait,
		  (uint64_t)q->sk.sk_sleep);
#endif

	return 0;

free_cache:
	kfree(q->cache);
	q->cache = NULL;

free_fd:
	put_unused_fd(sockfd);

free_sock:
	q->kni = NULL;
	kni->vhost_queue = NULL;
	kni->vq_status |= BE_FINISH;
	sock_release(q->sock);
	q->sock->ops = NULL;
	q->sock = NULL;

free_sk:
	sk_free((struct sock*)q);

	return err;
}
示例#5
0
struct rte_kni *
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
	      const struct rte_kni_conf *conf,
	      struct rte_kni_ops *ops)
{
	int ret;
	struct rte_kni_device_info dev_info;
	struct rte_kni *ctx;
	char intf_name[RTE_KNI_NAMESIZE];
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	char mz_name[RTE_MEMZONE_NAMESIZE];
	const struct rte_memzone *mz;

	if (!pktmbuf_pool || !conf || !conf->name[0])
		return NULL;

	/* Check FD and open once */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0) {
			RTE_LOG(ERR, KNI, "Can not open /dev/%s\n",
							KNI_DEVICE);
			return NULL;
		}
	}

	rte_snprintf(intf_name, RTE_KNI_NAMESIZE, conf->name);
	rte_snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%s", intf_name);
	mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni), 
				SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx = mz->addr;

	if (ctx->in_use) {
		RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
		goto fail;
	}
	memset(ctx, 0, sizeof(struct rte_kni));
	if (ops)
		memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	memset(&dev_info, 0, sizeof(dev_info));
	dev_info.bus = conf->addr.bus;
	dev_info.devid = conf->addr.devid;
	dev_info.function = conf->addr.function;
	dev_info.vendor_id = conf->id.vendor_id;
	dev_info.device_id = conf->id.device_id;
	dev_info.core_id = conf->core_id;
	dev_info.force_bind = conf->force_bind;
	dev_info.group_id = conf->group_id;
	dev_info.mbuf_size = conf->mbuf_size;

	rte_snprintf(ctx->name, RTE_KNI_NAMESIZE, intf_name);
	rte_snprintf(dev_info.name, RTE_KNI_NAMESIZE, intf_name);

	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
		dev_info.bus, dev_info.devid, dev_info.function,
			dev_info.vendor_id, dev_info.device_id);

	/* TX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%s", intf_name);
	mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	KNI_MZ_CHECK(mz == NULL);
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	/* MBUF mempool */
	rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", pktmbuf_pool->name);
	mz = rte_memzone_lookup(mz_name);
	KNI_MZ_CHECK(mz == NULL);
	dev_info.mbuf_va = mz->addr;
	dev_info.mbuf_phys = mz->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->group_id = conf->group_id;
	ctx->mbuf_size = conf->mbuf_size;

	ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
	KNI_MZ_CHECK(ret < 0);

	ctx->in_use = 1;

	return ctx;

fail:

	return NULL;
}