/**
 * Enqueue a packet to the assembler.
 *
 * Executed only from the forwarding lcores.
 */
int
dpdk_fragment_assembler_enqueue(struct vrouter *router, struct vr_packet *pkt,
                                struct vr_forwarding_md *fmd)
{
    int ret;
    unsigned int cpu;
    struct vr_dpdk_lcore *lcore;

    cpu = vr_get_cpu();
    if (cpu >= vr_num_cpus || cpu < VR_DPDK_FWD_LCORE_ID) {
        RTE_LOG(ERR, VROUTER, "%s:%d Enqueue to the assembler can only be "
                "done on forwarding lcores, not on cpu %u\n",
                __FUNCTION__, __LINE__, cpu);
        vr_pfree(pkt, VP_DROP_FRAGMENTS);
        return -EINVAL;
    }

    ret = vr_fragment_enqueue(router,
                              &per_cpu_queues[cpu - VR_DPDK_FWD_LCORE_ID].queue, pkt, fmd);

    if (!ret) {
        lcore = vr_dpdk.lcores[cpu];
        vr_dpdk_lcore_schedule_assembler_work(lcore, dpdk_fragment_assembler,
                                              &per_cpu_queues[cpu - VR_DPDK_FWD_LCORE_ID].queue);
    }

    return 0;
}
/**
 * A callback for timeouts.
 *
 * Called on forwarding lcores only.
 */
void
dpdk_fragment_assembler_table_scan(void *arg)
{
    unsigned int i, j, scanned = 0;
    unsigned int cpu;
    struct fragment_bucket *vfb;

    cpu = vr_get_cpu() - VR_DPDK_FWD_LCORE_ID;
    assert(cpu >= 0 && cpu < (vr_num_cpus - VR_DPDK_FWD_LCORE_ID));

    i = assembler_scan_index;
    for (j = 0; j < VR_LINUX_ASSEMBLER_BUCKETS; j++) {
        vfb = &assembler_table[cpu][(i + j) % VR_LINUX_ASSEMBLER_BUCKETS];

        if (vfb->frag_list)
            scanned += vr_assembler_table_scan(&vfb->frag_list);

        if (scanned > assembler_scan_thresh) {
            j++;
            break;
        }
    }

    assembler_scan_index = (i + j) % VR_LINUX_ASSEMBLER_BUCKETS;
    return;
}
Ejemplo n.º 3
0
static void
vr_flow_entry_set_hold(struct vrouter *router, struct vr_flow_entry *flow_e)
{
    unsigned int cpu;
    uint64_t act_count;
    struct vr_flow_table_info *infop = router->vr_flow_table_info;

    cpu = vr_get_cpu();
    if (cpu >= vr_num_cpus) {
        vr_printf("vrouter: Set HOLD failed (cpu %u num_cpus %u)\n",
                cpu, vr_num_cpus);
        return;
    }

    flow_e->fe_action = VR_FLOW_ACTION_HOLD;

    if (infop->vfti_hold_count[cpu] + 1 < infop->vfti_hold_count[cpu]) {
        (void)__sync_add_and_fetch(&infop->vfti_oflows, 1);
        act_count = infop->vfti_action_count;
        if (act_count > infop->vfti_hold_count[cpu]) {
           (void)__sync_sub_and_fetch(&infop->vfti_action_count,
                    infop->vfti_hold_count[cpu]);
            infop->vfti_hold_count[cpu] = 0;
        } else {
            infop->vfti_hold_count[cpu] -= act_count;
            (void)__sync_sub_and_fetch(&infop->vfti_action_count,
                    act_count);
        }
    }

    infop->vfti_hold_count[cpu]++;

    return;
}
Ejemplo n.º 4
0
static void
vr_flow_entry_set_hold(struct vrouter *router, struct vr_flow_entry *flow_e)
{
    unsigned int cpu;
    uint64_t act_count;
    struct vr_flow_table_info *infop = router->vr_flow_table_info;

    cpu = vr_get_cpu();
    flow_e->fe_action = VR_FLOW_ACTION_HOLD;

    if (infop->vfti_hold_count[cpu] + 1 < infop->vfti_hold_count[cpu]) {
        act_count = infop->vfti_action_count;
        if (act_count > infop->vfti_hold_count[cpu]) {
           (void)__sync_sub_and_fetch(&infop->vfti_action_count,
                    infop->vfti_hold_count[cpu]);
            infop->vfti_hold_count[cpu] = 0;
        } else {
            infop->vfti_hold_count[cpu] -= act_count;
            (void)__sync_sub_and_fetch(&infop->vfti_action_count,
                    act_count);
        }
    }

    infop->vfti_hold_count[cpu]++;

    return;
}
Ejemplo n.º 5
0
void
vr_free_stats(unsigned int object)
{
    struct vrouter *router = vrouter_get(0);
    unsigned int cpu;

    cpu = vr_get_cpu();
    if (router->vr_malloc_stats && router->vr_malloc_stats[cpu])
        router->vr_malloc_stats[cpu][object].ms_free++;

    return;
}
/**
 * @name Private functions
 * @{
 */
static void
dpdk_fragment_assembler(void *arg)
{
    uint32_t hash, index;
    unsigned int cpu;
    struct vr_packet *pkt;
    struct fragment_bucket *bucket;
    struct vr_fragment_queue_element *tail, *tail_n, *tail_p, *tail_pn;
    struct per_cpu_fragment_queue *queue =
        (struct per_cpu_fragment_queue *)arg;

    tail = __sync_lock_test_and_set(&queue->queue.vfq_tail, NULL);
    if (!tail)
        return;

    cpu = vr_get_cpu() - VR_DPDK_FWD_LCORE_ID;
    assert(cpu >= 0 && cpu < (vr_num_cpus - VR_DPDK_FWD_LCORE_ID));

    /*
     * first, reverse the list, since packets that came later are at the
     * head of the list
     */
    tail_p = tail->fqe_next;
    tail->fqe_next = NULL;
    while (tail_p) {
        tail_pn = tail_p->fqe_next;
        tail_p->fqe_next = tail;
        tail = tail_p;
        tail_p = tail_pn;
    }

    /* go through the list and insert it in the assembler work area */
    while (tail) {
        tail_n = tail->fqe_next;
        tail->fqe_next = NULL;

        pkt = tail->fqe_pnode.pl_packet;
        if (pkt) {
            hash = vr_fragment_get_hash(tail->fqe_pnode.pl_vrf, pkt);
            index = (hash % VR_LINUX_ASSEMBLER_BUCKETS);
            bucket = &assembler_table[cpu][index];

            vr_fragment_assembler(&bucket->frag_list, tail);
        }

        tail = tail_n;
    }

    return;
}
Ejemplo n.º 7
0
void
vr_malloc_stats(unsigned int size, unsigned int object)
{
    struct vrouter *router = vrouter_get(0);
    unsigned int cpu;

    cpu = vr_get_cpu();
    if (router->vr_malloc_stats) {
        if (router->vr_malloc_stats[cpu]) {
            router->vr_malloc_stats[cpu][object].ms_size += size;
            router->vr_malloc_stats[cpu][object].ms_alloc++;
        }
    }

    return;
}
Ejemplo n.º 8
0
static int
vr_flow_schedule_transition(struct vrouter *router, vr_flow_req *req,
        struct vr_flow_entry *fe)
{
    struct vr_flow_md *flmd = NULL;

    flmd = (struct vr_flow_md *)vr_malloc(sizeof(*flmd));
    if (!flmd)
        return -ENOMEM;

    flmd->flmd_router = router;
    flmd->flmd_index = req->fr_index;
    flmd->flmd_action = req->fr_action;
    flmd->flmd_flags = req->fr_flags;

    vr_schedule_work(vr_get_cpu(), vr_flow_flush, (void *)flmd);
    return 0;
}
int
lh_enqueue_to_assembler(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    int ret;
    unsigned int cpu;

    cpu = vr_get_cpu();
    if (cpu >= vr_num_cpus) {
        printk("cpu is %u, but max cpu is only %u\n", cpu, vr_num_cpus);
        vr_pfree(pkt, VP_DROP_FRAGMENTS);
        return -EINVAL;
    }

    ret = vr_fragment_enqueue(router, &vr_lfq_pcpu_queues[cpu].vrlfq_queue,
            pkt, fmd);
    if (!ret)
        queue_work(vr_linux_assembler_wq, &vr_lfq_pcpu_queues[cpu].vrlfq_work);

    return 0;
}
Ejemplo n.º 10
0
static int
vr_malloc_stats_init(struct vrouter *router)
{
    unsigned int i, size, cpu, total_size = 0;

    if (router->vr_malloc_stats)
        return 0;

    size = vr_num_cpus * sizeof(void *);
    router->vr_malloc_stats = vr_zalloc(size, VR_MALLOC_OBJECT);
    if (!router->vr_malloc_stats)
        return -ENOMEM;
    total_size += size;

    size = VR_VROUTER_MAX_OBJECT * sizeof(struct vr_malloc_stats);
    /*
     * align the allocation to cache line size so that per-cpu variable
     * do not result in cache thrashing
     */
    if (size % 64) {
        size = size + (64 - (size % 64));
    }

    for (i = 0; i < vr_num_cpus; i++) {
        router->vr_malloc_stats[i] = vr_zalloc(size, VR_MALLOC_OBJECT);
        if (!router->vr_malloc_stats)
            return -ENOMEM;
        total_size += size;
    }

    cpu = vr_get_cpu();
    router->vr_malloc_stats[cpu][VR_MALLOC_OBJECT].ms_alloc = vr_num_cpus + 1;
    router->vr_malloc_stats[cpu][VR_MALLOC_OBJECT].ms_size = total_size;

    return 0;
}