/**
 * @name Private functions
 * @{
 */
static void
dpdk_fragment_assembler(void *arg)
{
    uint32_t hash, index;
    unsigned int cpu;
    struct vr_packet *pkt;
    struct fragment_bucket *bucket;
    struct vr_fragment_queue_element *tail, *tail_n, *tail_p, *tail_pn;
    struct per_cpu_fragment_queue *queue =
        (struct per_cpu_fragment_queue *)arg;

    tail = __sync_lock_test_and_set(&queue->queue.vfq_tail, NULL);
    if (!tail)
        return;

    cpu = vr_get_cpu() - VR_DPDK_FWD_LCORE_ID;
    assert(cpu >= 0 && cpu < (vr_num_cpus - VR_DPDK_FWD_LCORE_ID));

    /*
     * first, reverse the list, since packets that came later are at the
     * head of the list
     */
    tail_p = tail->fqe_next;
    tail->fqe_next = NULL;
    while (tail_p) {
        tail_pn = tail_p->fqe_next;
        tail_p->fqe_next = tail;
        tail = tail_p;
        tail_p = tail_pn;
    }

    /* go through the list and insert it in the assembler work area */
    while (tail) {
        tail_n = tail->fqe_next;
        tail->fqe_next = NULL;

        pkt = tail->fqe_pnode.pl_packet;
        if (pkt) {
            hash = vr_fragment_get_hash(tail->fqe_pnode.pl_vrf, pkt);
            index = (hash % VR_LINUX_ASSEMBLER_BUCKETS);
            bucket = &assembler_table[cpu][index];

            vr_fragment_assembler(&bucket->frag_list, tail);
        }

        tail = tail_n;
    }

    return;
}
void
lh_fragment_sync_assemble(struct vr_fragment_queue_element *vfqe)
{
    uint32_t hash, index;
    struct vr_linux_fragment_bucket *vfb;

    hash = vr_fragment_get_hash(&vfqe->fqe_pnode);
    index = (hash % VR_ASSEMBLER_BUCKET_COUNT);
    vfb = &vr_linux_assembler_table[index];

    spin_lock_bh(&vfb->vfb_lock);
    vr_fragment_assemble(&vfb->vfb_frag_list, vfqe);
    spin_unlock_bh(&vfb->vfb_lock);
}