static void
dpdk_fragment_queue_exit(void)
{
    int i;

    if (!per_cpu_queues) {
        return;
    }

    for (i = 0; i < vr_dpdk.nb_fwd_lcores; ++i) {
        if (per_cpu_queues[i].queue.vfq_tail != NULL)
            vr_fragment_queue_free(&per_cpu_queues[i].queue);
    }

    vr_free(per_cpu_queues, VR_FRAGMENT_QUEUE_OBJECT);
    per_cpu_queues = NULL;
}
static void
vr_linux_fragment_queue_exit(void)
{
    int i;

    if (vr_lfq_pcpu_queues) {
        if (vr_linux_assembler_wq) {
            for (i = 0; i < vr_num_cpus; i++) {
                cancel_work_sync(&vr_lfq_pcpu_queues[i].vrlfq_work);
            }
            flush_workqueue(vr_linux_assembler_wq);
            destroy_workqueue(vr_linux_assembler_wq);
            vr_linux_assembler_wq = NULL;
        }

        for (i = 0; i < vr_num_cpus; i++)
            vr_fragment_queue_free(&vr_lfq_pcpu_queues[i].vrlfq_queue);

        vr_free(vr_lfq_pcpu_queues, VR_FRAGMENT_QUEUE_OBJECT);
        vr_lfq_pcpu_queues = NULL;
    }

    return;
}