static inline void
send_burst(struct dpdk_knidev_writer *p)
{
    uint32_t nb_tx;

    nb_tx = rte_kni_tx_burst(p->kni, p->tx_buf, p->tx_buf_count);

    DPDK_KNIDEV_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
    for ( ; nb_tx < p->tx_buf_count; nb_tx++)
        /* TODO: a separate counter for this drop */
        vr_dpdk_pfree(p->tx_buf[nb_tx], VP_DROP_INTERFACE_DROP);

    p->tx_buf_count = 0;
}
static int
dpdk_knidev_writer_tx(void *port, struct rte_mbuf *pkt)
{
    struct dpdk_knidev_writer *p = (struct dpdk_knidev_writer *) port;
    struct rte_mbuf *pkt_copy;

    /*
     * KNI kernel module uses a trick to speed up packet processing. It takes
     * a physical address of a memory pool, converts it to the kernel virtual
     * address with phys_to_virt() and saves the address.
     *
     * Then in kni_net_rx_normal() instead of using phys_to_virt() per each
     * packet, KNI just calculates the difference between the previously
     * converted physical address of the given mempool and the packets
     * physical address.
     *
     * It works well for the mbufs from the same mempool. It also works fine
     * with any mempool allocated from the same physically contiguous memory
     * segment.
     *
     * As soon as we get a mempool allocated from another memory segment, the
     * difference calculations fail and thus we might have a crash.
     *
     * So we make sure the packet is from the RSS mempool. If not, we make
     * a copy to the RSS mempool.
     */
    if (unlikely(pkt->pool != vr_dpdk.rss_mempool ||
            /* Check indirect mbuf's data is within the RSS mempool. */
            rte_pktmbuf_mtod(pkt, uintptr_t) < vr_dpdk.rss_mempool->elt_va_start ||
            rte_pktmbuf_mtod(pkt, uintptr_t) > vr_dpdk.rss_mempool->elt_va_end
            )) {
        pkt_copy = vr_dpdk_pktmbuf_copy(pkt, vr_dpdk.rss_mempool);
        /* The original mbuf is no longer needed. */
        vr_dpdk_pfree(pkt, VP_DROP_CLONED_ORIGINAL);

        if (unlikely(pkt_copy == NULL)) {
            DPDK_KNIDEV_WRITER_STATS_PKTS_DROP_ADD(p, 1);
            return -1;
        }

        pkt = pkt_copy;
    }

    p->tx_buf[p->tx_buf_count++] = pkt;
    DPDK_KNIDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
    if (p->tx_buf_count >= p->tx_burst_sz)
        send_burst(p);

    return 0;
}
static void
vr_dpdk_packet_receive(struct vr_usocket *usockp)
{
    const unsigned lcore_id = rte_lcore_id();
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    struct vr_interface_stats *stats;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d\n", __func__, pthread_self(),
                usockp->usock_fd);
    /**
     * Packets is read from the agent's socket here. On success, a counter for
     * packets dequeued from the interface is incremented.
     */
    stats = vif_get_stats(usockp->usock_vif, lcore_id);
    if (usockp->usock_vif) {
        stats->vis_port_ipackets++;
        /* buf_addr and data_off do not change */
        usockp->usock_mbuf->data_len = usockp->usock_read_len;
        usockp->usock_mbuf->pkt_len = usockp->usock_read_len;
        /* convert mbuf to vr_packet */
        vr_dpdk_packet_get(usockp->usock_mbuf, usockp->usock_vif);
        /* send the mbuf to vRouter */
        vr_dpdk_lcore_vroute(lcore, usockp->usock_vif, &usockp->usock_mbuf, 1);
        /* flush packet TX queues immediately */
        vr_dpdk_lcore_flush(lcore);
    } else {
        /**
         * If reading from socket failed, increment counter for interface
         * dequeue drops.
         */
        RTE_LOG(ERR, VROUTER, "Error receiving from packet socket: no vif attached\n");
        vr_dpdk_pfree(usockp->usock_mbuf, VP_DROP_INTERFACE_DROP);
        stats->vis_port_ierrors++;
    }

    usockp->usock_mbuf = NULL;
    usockp->usock_rx_buf = NULL;
    usockp->usock_buf_len = 0;

    return;
}