Пример #1
0
void
vr_htable_release_hentry(vr_htable_t htable, vr_hentry_t *ent)
{
    unsigned int cpu_num, delete_index;
    struct vr_hentry_delete_data *delete_data;
    vr_hentry_t *head_ent;
    struct vr_htable *table = (struct vr_htable *)htable;

    if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID))
        return;

    (void)__sync_sub_and_fetch(&table->ht_used_entries, 1);

    /* Mark it as Invalid */
    ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID;

    if (ent->hentry_index < table->ht_hentries)
        return;

    if (vr_not_ready)
        return;

    ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_MARKED;

    head_ent = __vr_htable_get_hentry_by_index(htable, ent->hentry_bucket_index);
    delete_index = head_ent->hentry_index / table->ht_bucket_size;
    delete_data = vr_btable_get(table->ht_dtable, delete_index);

    (void)__sync_add_and_fetch(&delete_data->hd_count, 1);

    /* Schedule the deltion only if it is not already scheduled */
    if (__sync_bool_compare_and_swap(&delete_data->hd_scheduled, 0, 1)) {

        delete_data->hd_table = (struct vr_htable *)htable;
        delete_data->hd_index = head_ent->hentry_index;

        /* Schedule the deletion on a cpu based on bucket index */
        cpu_num = head_ent->hentry_index % vr_num_cpus;
        if (vr_schedule_work(cpu_num, vr_htable_hentry_scheduled_delete,
                                                (void *)delete_data)) {
            /*
             * We can only write back the status as not scheduled. There
             * might be some entries that get marked as Deleted, but
             * would not be pushed to free list as work queue is not
             * scheduled. These marked entries would be deleted only if
             * this hash bucket is revisisted
             */
            (void)__sync_bool_compare_and_swap(&delete_data->hd_scheduled,1, 0);
        }
    }

    return;
}
Пример #2
0
static int
vr_flow_schedule_transition(struct vrouter *router, vr_flow_req *req,
        struct vr_flow_entry *fe)
{
    struct vr_flow_md *flmd = NULL;

    flmd = (struct vr_flow_md *)vr_malloc(sizeof(*flmd));
    if (!flmd)
        return -ENOMEM;

    flmd->flmd_router = router;
    flmd->flmd_index = req->fr_index;
    flmd->flmd_action = req->fr_action;
    flmd->flmd_flags = req->fr_flags;

    vr_schedule_work(vr_get_cpu(), vr_flow_flush, (void *)flmd);
    return 0;
}
Пример #3
0
void
vr_htable_release_hentry(vr_htable_t htable, vr_hentry_t *ent)
{
    unsigned int cpu_num, delete_index;
    struct vr_hentry_delete_data *delete_data;
    vr_hentry_t *head_ent;
    struct vr_htable *table = (struct vr_htable *)htable;

    if (!(ent->hentry_flags & VR_HENTRY_FLAG_VALID))
        return;

    /* Mark it as Invalid */
    ent->hentry_flags &= ~VR_HENTRY_FLAG_VALID;

    if (ent->hentry_index < table->ht_hentries)
        return;

    if (vr_not_ready) {
        __vr_htable_oentry_invalidate(table, ent);
        return;
    }

    ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_MARKED;

    head_ent = __vr_htable_get_hentry_by_index(htable, ent->hentry_bucket_index);
    delete_index = head_ent->hentry_index / table->ht_bucket_size;
    delete_data = vr_btable_get(table->ht_dtable, delete_index);

    /* Schedule the deltion only if it is not already scheduled */
    if (__sync_add_and_fetch(&delete_data->hd_scheduled, 1) == 1) {

        delete_data->hd_table = (struct vr_htable *)htable;
        delete_data->hd_index = head_ent->hentry_index;

        /* Schedule the deletion on a cpu based on bucket index */
        cpu_num = head_ent->hentry_index % vr_num_cpus;
        vr_schedule_work(cpu_num, vr_htable_hentry_scheduled_delete,
                                                (void *)delete_data);
    }

    return;
}