Exemplo n.º 1
0
int
__vr_mirror_del(struct vrouter *router, unsigned int index)
{
    struct vr_nexthop *nh;
    struct vr_mirror_entry *mirror;
    struct vr_defer_data *defer;

    if (index >= router->vr_max_mirror_indices)
        return -EINVAL;

    mirror = router->vr_mirrors[index];
    if (!mirror)
        return -EINVAL;

    nh = mirror->mir_nh;
    router->vr_mirrors[index] = NULL;
    mirror->mir_nh = NULL;

    if (!vr_not_ready) {
        defer = vr_get_defer_data(sizeof(*defer));
        if (defer) {
            defer->vdd_data = (void *)mirror;
            vr_defer(router, vr_mirror_defer_delete, (void *)defer);
        } else {
            vr_delay_op();
            vr_free(mirror, VR_MIRROR_OBJECT);
        }
    } else {
        vr_free(mirror, VR_MIRROR_OBJECT);
    }
    vrouter_put_nexthop(nh);

    return 0;
}
Exemplo n.º 2
0
static int
mtrie_free_bkt_defer(struct vrouter *router, struct ip_bucket *bkt)
{

    struct vr_defer_data *defer;

    defer = vr_get_defer_data(sizeof(*defer));
    if (!defer)
        return -ENOMEM;

    defer->vdd_data = bkt;
    vr_defer(router, mtrie_free_bkt_cb, (void *)defer);

    return 0;
}
Exemplo n.º 3
0
static int
vr_qos_map_free_fc_defer(struct vrouter *router,
        struct vr_forwarding_class *fc_p)
{
    struct vr_defer_data *defer;

    defer = vr_get_defer_data(sizeof(*defer));
    if (!defer)
        return -ENOMEM;

    defer->vdd_data = fc_p;
    vr_defer(router, vr_qos_map_free_fc_cb, (void *)defer);

    return 0;
}
Exemplo n.º 4
0
static void
vr_mirror_meta_entry_destroy(unsigned int index, void *arg)
{
    struct vr_mirror_meta_entry *me = (struct vr_mirror_meta_entry *)arg;
    struct vr_defer_data *defer;

    if (me && me != VR_ITABLE_ERR_PTR) {
        if (!vr_not_ready) {
            defer = vr_get_defer_data(sizeof(*defer));
            if (!defer) {
                vr_delay_op();
                vr_mirror_meta_destroy(me);
                return;
            }
            defer->vdd_data = (void *)me;
            vr_defer(me->mirror_router, vr_mirror_meta_destructor, (void *)defer);
        }
    }

    return;
}
Exemplo n.º 5
0
static void
vr_htable_hentry_scheduled_delete(void *arg)
{
    unsigned int count;
    struct vr_hentry_delete_data *delete_data, *defer_data;
    vr_hentry_t *head_ent, *ent, *prev, *next;
    struct vr_htable *table;


    delete_data = (struct vr_hentry_delete_data *)arg;
    table = delete_data->hd_table;

    head_ent = __vr_htable_get_hentry_by_index((vr_htable_t)(table),
                                                delete_data->hd_index);

    if (!head_ent)
        return;

    (void)__sync_bool_compare_and_swap(&delete_data->hd_scheduled, 1, 0);

    /*
     * We attempt to delete only those many entries that have been
     * delete marked. If some new entries are delete marked while
     * processing these, they will get scheduled in new work item
     */
    count = delete_data->hd_count;
    (void)__sync_sub_and_fetch(&delete_data->hd_count, count);

    prev = head_ent;
    ent = head_ent->hentry_next;

    while (count && ent) {

        /*
         * Process only if delete marked. If already processed,
         * delete marking is changed to delete processed
         */
        if (ent->hentry_flags & VR_HENTRY_FLAG_DELETE_MARKED) {

            /*
             * As the insertion happens only at head entry, it has
             * to be verified if something is inserted while delete
             * attemped. If inserted, traversal needs to restart, to
             * get hold of the new previous
             */
            if (prev == head_ent) {
                if (!__sync_bool_compare_and_swap(&prev->hentry_next,
                                                      ent, ent->hentry_next)) {
                    prev = head_ent;
                    ent = head_ent->hentry_next;
                    continue;
                }
            } else {
                prev->hentry_next = ent->hentry_next;
            }

            count--;

            /* update next index for the previous */
            if (ent->hentry_next)
                prev->hentry_next_index = ent->hentry_next->hentry_index;
            else
                prev->hentry_next_index = VR_INVALID_HENTRY_INDEX;

            ent->hentry_flags &= ~VR_HENTRY_FLAG_DELETE_MARKED;
            ent->hentry_flags |= VR_HENTRY_FLAG_DELETE_PROCESSED;
        }

        next = ent->hentry_next;

        /*
         * A separate check for VR_HENTRY_FLAG_DELETE_PROCESSED flag to
         * defer the entry if we ever failed to allocate memeory while
         * deferring it
         */
        if (ent->hentry_flags & VR_HENTRY_FLAG_DELETE_PROCESSED) {

            /*
             * Defer the entry to reset the values. If alloc of
             * defer data fails, this entry will be in delete state
             * for ever
             */
            if (!vr_not_ready) {
                defer_data = vr_get_defer_data(sizeof(*defer_data));
                if (defer_data) {
                    defer_data->hd_table = delete_data->hd_table;
                    defer_data->hd_index = ent->hentry_index;
                    vr_defer(delete_data->hd_table->ht_router,
                         vr_htable_hentry_defer_delete, (void *)defer_data);
                }
            } else {
                vr_htable_oentry_invalidate(table, ent);
                ent = next;
                continue;
            }
        }

        /* Previous should not be under deletion */
        if (!(ent->hentry_flags & VR_HENTRY_FLAG_UNDER_DELETION))
            prev = ent;

        ent = next;
    }

    return;
}