示例#1
0
static int
mtrie_stats_init(struct vr_rtable *rtable)
{
    int ret = 0, i = 0;
    unsigned int stats_memory;

    if (!mtrie_vrf_stats) {
        stats_memory = sizeof(void *) * rtable->algo_max_vrfs;
        mtrie_vrf_stats = vr_zalloc(stats_memory, VR_MTRIE_STATS_OBJECT);
        if (!mtrie_vrf_stats)
            return vr_module_error(-ENOMEM, __FUNCTION__,
                    __LINE__, stats_memory);
        for (i = 0; i < rtable->algo_max_vrfs; i++) {
            stats_memory = sizeof(struct vr_vrf_stats) * vr_num_cpus;
            mtrie_vrf_stats[i] = vr_zalloc(stats_memory,
                    VR_MTRIE_STATS_OBJECT);
            if (!mtrie_vrf_stats[i] && (ret = -ENOMEM)) {
                vr_module_error(ret, __FUNCTION__, __LINE__, i);
                goto cleanup;
            }
        }

        rtable->vrf_stats = mtrie_vrf_stats;
    }

    if (!invalid_vrf_stats) {
        invalid_vrf_stats = vr_zalloc(sizeof(struct vr_vrf_stats) *
                vr_num_cpus, VR_MTRIE_STATS_OBJECT);
        if (!invalid_vrf_stats && (ret = -ENOMEM)) {
            vr_module_error(ret, __FUNCTION__, __LINE__, -1);
            goto cleanup;
        }

    }

    return 0;

cleanup:
    if (!i)
        return ret;

    for (--i; i >= 0; i--) {
        if (mtrie_vrf_stats[i]) {
            vr_free(mtrie_vrf_stats[i], VR_MTRIE_STATS_OBJECT);
            mtrie_vrf_stats[i] = NULL;
        }
    }

    if (mtrie_vrf_stats) {
        vr_free(mtrie_vrf_stats, VR_MTRIE_STATS_OBJECT);
        mtrie_vrf_stats = NULL;
    }

    if (invalid_vrf_stats) {
        vr_free(invalid_vrf_stats, VR_MTRIE_STATS_OBJECT);
        invalid_vrf_stats = NULL;
    }

    return ret;
}
示例#2
0
static int
vr_pkt_drop_stats_init(struct vrouter *router)
{
    unsigned int i = 0;
    unsigned int size = 0;

    if (router->vr_pdrop_stats)
        return 0;

    size = sizeof(void *) * vr_num_cpus;
    router->vr_pdrop_stats = vr_zalloc(size);
    if (!router->vr_pdrop_stats) {
        vr_module_error(-ENOMEM, __FUNCTION__,
                        __LINE__, size);
        goto cleanup;
    }

    size = VP_DROP_MAX * sizeof(uint64_t);
    for (i = 0; i < vr_num_cpus; i++) {
        router->vr_pdrop_stats[i] = vr_zalloc(size);
        if (!router->vr_pdrop_stats[i]) {
            vr_module_error(-ENOMEM, __FUNCTION__,
                            __LINE__, i);
            goto cleanup;
        }
    }

    return 0;

cleanup:
    vr_pkt_drop_stats_exit(router);
    return -ENOMEM;
}
static int
dpdk_assembler_table_init(void)
{
    unsigned int size;
    int i;

    /* Allocate array of pointers to the assembler tables for each fwd lcore */
    size = sizeof(struct fragment_bucket *) * vr_dpdk.nb_fwd_lcores;
    assembler_table = vr_zalloc(size, VR_ASSEMBLER_TABLE_OBJECT);
    if (!assembler_table) {
        RTE_LOG(ERR, VROUTER, "%s:%d Allocation for %u failed\n",
                __FUNCTION__, __LINE__, size);
        return -ENOMEM;
    }

    /* Now allocate the assembler tables for each fwd lcore */
    size = sizeof(struct fragment_bucket) * VR_LINUX_ASSEMBLER_BUCKETS;
    for (i = 0; i < vr_dpdk.nb_fwd_lcores; ++i) {
        assembler_table[i] = vr_zalloc(size, VR_ASSEMBLER_TABLE_OBJECT);
        if (!assembler_table[i]) {
            RTE_LOG(ERR, VROUTER, "%s:%d Allocation for %u failed\n",
                    __FUNCTION__, __LINE__, size);
            return -ENOMEM;
        }
    }

    /* Intentionally the vr_assembler_table_scan_init() is not called here as
     * it would set up timers on the timer lcore. For the timers the forwarding
     * lcores are used, therefore allowing for complete lock elimination. */

    return 0;
}
示例#4
0
static int
inet_rtb_family_init(struct rtable_fspec *fs, struct vrouter *router)
{
    int ret;
    struct vr_rtable *table = NULL;
    unsigned int i;

    if (router->vr_inet_rtable || router->vr_inet_mcast_rtable)
        return vr_module_error(-EEXIST, __FUNCTION__, __LINE__, 0);

    for (i = 0; i < RT_MAX; i++) {
        if (fs->algo_init[i]) {

            table = vr_zalloc(sizeof(struct vr_rtable));
            if (!table) 
                return vr_module_error(-ENOMEM, __FUNCTION__,
                        __LINE__, i);

            ret = fs->algo_init[i](table, fs);
            if (ret)
                return vr_module_error(ret, __FUNCTION__, __LINE__, i);

            if (i == RT_UCAST) 
                router->vr_inet_rtable = table;

            if (i == RT_MCAST)
                router->vr_inet_mcast_rtable = table;
        }
    }

    return 0;
}
示例#5
0
/*
 * alloc a mtrie bucket
 */
static struct ip_bucket *
mtrie_alloc_bucket(struct mtrie_bkt_info *ip_bkt_info, unsigned char level,
                   struct ip_bucket_entry *parent, int data_is_nh)
{
    unsigned int                bkt_size;
    unsigned int                i;
    struct ip_bucket           *bkt;
    struct ip_bucket_entry     *ent;

    bkt_size = ip_bkt_info[level].bi_size;
    bkt = vr_zalloc(sizeof(struct ip_bucket)
                    + sizeof(struct ip_bucket_entry) * bkt_size,
                    VR_MTRIE_BUCKET_OBJECT);
    if (!bkt)
        return NULL;

    for (i = 0; i < bkt_size; i++) {
        ent = &bkt->bkt_data[i];
        if (data_is_nh) {
            set_entry_to_nh(ent, parent->entry_nh_p);
        } else {
            set_entry_to_vdata(ent, parent->entry_vdata_p);
        }
        ent->entry_prefix_len = parent->entry_prefix_len;
        ent->entry_label_flags = parent->entry_label_flags;
        ent->entry_label = parent->entry_label;
        ent->entry_bridge_index = parent->entry_bridge_index;
    }

    return bkt;
}
/*
 * bind a child socket to the parent. binding in this context means adding
 * a child usocket to parent poll list. an example where this will be required
 * is when one has created an event usocket. An event usocket by itself cannot
 * do anything useful in the context of dpdk vrouter application. Hence it needs
 * to be bound to the parent socket that does something useful, in this case
 * the packet socket. Another example is that of netlink socket. when thexi
 * netlink socket accepts new connection and the new connected socket has to be
 * polled, in which case we will need to bind it to the parent socket poll list
 */
static int
usock_bind_usockets(struct vr_usocket *parent, struct vr_usocket *child)
{
    unsigned int i;
    int ret;
    struct vr_usocket *child_pair;

    if (parent->usock_state == LIMITED)
        return -ENOSPC;

    RTE_LOG(DEBUG, USOCK, "%s[%lx]: parent FD %d child FD %d\n", __func__,
            pthread_self(), parent->usock_fd, child->usock_fd);

    if (child->usock_proto == EVENT) {
        child_pair = vr_usocket(EVENT, RAW);
        if (!child_pair)
            return -ENOMEM;

        RTE_LOG(DEBUG, USOCK, "%s[%lx]: parent FD %d closing child FD %d\n",
                    __func__, pthread_self(), parent->usock_fd, child->usock_fd);
        close(child->usock_fd);
        child->usock_fd = child_pair->usock_fd;
        child = child_pair;
    }

    ret = usock_init_poll(parent);
    if (ret)
        return ret;

    if (!parent->usock_children) {
        parent->usock_children = vr_zalloc(sizeof(struct vr_usocket *) *
                USOCK_MAX_CHILD_FDS + 1, VR_USOCK_OBJECT);
        if (!parent->usock_children) {
            usock_set_error(parent, -ENOMEM);
            return -ENOMEM;
        }
    }

    child->usock_parent = parent;
    parent->usock_cfds++;
    if (parent->usock_cfds == USOCK_MAX_CHILD_FDS)
        parent->usock_state = LIMITED;

    for (i = 1; i <= parent->usock_max_cfds; i++) {
        if (!parent->usock_children[i]) {
            parent->usock_children[i] = child;
            parent->usock_pfds[i].fd = child->usock_fd;
            parent->usock_pfds[i].events = POLLIN;
            child->usock_child_index = i;
            break;
        }
    }

    if (child->usock_proto == EVENT)
        child->usock_state = READING_DATA;

    usock_read_init(child);

    return 0;
}
示例#7
0
/*
 * Exact-match
 * returns the next-hop on exact match. NULL otherwise
 */
static int
mtrie_get(unsigned int vrf_id, struct vr_route_req *rt)
{
    struct vr_nexthop *nh;
    struct vr_route_req breq;
    vr_route_req *req = &rt->rtr_req;

    nh = mtrie_lookup(vrf_id, rt);
    if (nh)
        req->rtr_nh_id = nh->nh_id;
    else
        req->rtr_nh_id = -1;

    if (req->rtr_index != VR_BE_INVALID_INDEX) {
        req->rtr_mac = vr_zalloc(VR_ETHER_ALEN, VR_ROUTE_REQ_MAC_OBJECT);
        req->rtr_mac_size = VR_ETHER_ALEN;

        breq.rtr_req.rtr_mac = req->rtr_mac;
        breq.rtr_req.rtr_index = req->rtr_index;
        breq.rtr_req.rtr_mac_size = VR_ETHER_ALEN;
        vr_bridge_lookup(req->rtr_vrf_id, &breq);

    } else {
        req->rtr_mac_size = 0;
        req->rtr_mac = NULL;
    }

    return 0;
}
static int
vr_linux_fragment_queue_init(void)
{
    unsigned int i, size;

    size = sizeof(struct vr_linux_fragment_queue) * vr_num_cpus;
    vr_lfq_pcpu_queues = vr_zalloc(size, VR_FRAGMENT_QUEUE_OBJECT);
    if (!vr_lfq_pcpu_queues) {
        printk("%s:%d Allocation for %u failed\n",
                __FUNCTION__, __LINE__, size);
        return -ENOMEM;
    }

    for (i = 0; i < vr_num_cpus; i++) {
        INIT_WORK(&vr_lfq_pcpu_queues[i].vrlfq_work,
                vr_linux_fragment_assembler);
    }

    vr_linux_assembler_wq = create_workqueue("vr_linux_assembler");
    if (!vr_linux_assembler_wq) {
        printk("%s:%d Failed to create assembler work queue\n",
                __FUNCTION__, __LINE__);
        return -ENOMEM;
    }

    return 0;
}
示例#9
0
static void
vr_drop_stats_get(unsigned int core)
{
    int ret = 0;
    unsigned int cpu;
    struct vrouter *router = vrouter_get(0);
    vr_drop_stats_req *response = NULL;

    if (!router && (ret = -ENOENT))
        goto exit_get;

    response = vr_zalloc(sizeof(*response), VR_DROP_STATS_REQ_OBJECT);
    if (!response && (ret = -ENOMEM))
        goto exit_get;

    if (core == (unsigned)-1) {
        /* summed up stats */
        for (cpu = 0; cpu < vr_num_cpus; cpu++) {
            vr_drop_stats_add_response(response, router->vr_pdrop_stats[cpu]);
        }
    } else if (core < vr_num_cpus) {
        /* stats for a specific core */
        vr_drop_stats_add_response(response, router->vr_pdrop_stats[core]);
    }
    /* otherwise the counters will be zeros */

exit_get:
    vr_message_response(VR_DROP_STATS_OBJECT_ID, ret ? NULL : response, ret);

    if (response != NULL)
        vr_free(response, VR_DROP_STATS_REQ_OBJECT);

    return;
}
示例#10
0
struct vr_message_dumper *
vr_message_dump_init(void *req)
{
    char *buf;
    struct vr_message_dumper *dumper;
    struct vr_mproto *proto;
    struct vr_mtransport *trans;

    proto = message_h.vm_proto;
    trans = message_h.vm_trans;
    if (!proto || !trans)
        return NULL;

    dumper = vr_zalloc(sizeof(*dumper));
    if (!dumper)
        return NULL;

    buf = trans->mtrans_alloc(VR_MESSAGE_PAGE_SIZE);
    if (!buf) {
        vr_free(dumper);
        return NULL;
    }

    dumper->dump_buffer = buf;
    dumper->dump_buf_len = VR_MESSAGE_PAGE_SIZE;
    dumper->dump_offset = 0;
    dumper->dump_req = req;

    return dumper;
}
示例#11
0
static int
inet_rtb_family_init(struct rtable_fspec *fs, struct vrouter *router)
{
    int ret;
    struct vr_rtable *table = NULL;
    unsigned int i;

    for (i = 0; i < RT_MAX; i++) {

        if (!fs->algo_init[i])
            continue;

        if (vr_get_inet_table(router, i))
            continue;

        table = vr_zalloc(sizeof(struct vr_rtable));
        if (!table) 
            return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, i);

        ret = fs->algo_init[i](table, fs);
        if (ret)
            return vr_module_error(ret, __FUNCTION__, __LINE__, i);

        vr_put_inet_table(router, i, table);
    }

    return 0;
}
示例#12
0
static void
mtrie_dumper_make_response(struct vr_message_dumper *dumper, vr_route_req *resp,
        struct ip_bucket_entry *ent, int8_t *prefix, unsigned int prefix_len)
{
    vr_route_req *req = (vr_route_req *)dumper->dump_req;
     struct vr_route_req lreq;

    resp->rtr_vrf_id = req->rtr_vrf_id;
    resp->rtr_family = req->rtr_family;
    memcpy(resp->rtr_prefix, prefix, prefix_len / IPBUCKET_LEVEL_BITS);
    resp->rtr_prefix_size = req->rtr_prefix_size;
    resp->rtr_marker_size = 0;
    resp->rtr_marker = NULL;
    resp->rtr_prefix_len = prefix_len;
    resp->rtr_rid = req->rtr_rid;
    resp->rtr_label_flags = ent->entry_label_flags;
    resp->rtr_label = ent->entry_label;
    resp->rtr_nh_id = ent->entry_nh_p->nh_id;
    resp->rtr_index = ent->entry_bridge_index;
    if (resp->rtr_index != VR_BE_INVALID_INDEX) {
        resp->rtr_mac = vr_zalloc(VR_ETHER_ALEN, VR_ROUTE_REQ_MAC_OBJECT);
        resp->rtr_mac_size = VR_ETHER_ALEN;
        lreq.rtr_req.rtr_mac = resp->rtr_mac;
        lreq.rtr_req.rtr_index = resp->rtr_index;
        lreq.rtr_req.rtr_mac_size = VR_ETHER_ALEN;
        vr_bridge_lookup(resp->rtr_vrf_id, &lreq);
    } else {
        resp->rtr_mac_size = 0;
        resp->rtr_mac = NULL;
    }
    resp->rtr_replace_plen = ent->entry_prefix_len;

    return;
}
示例#13
0
static vr_qos_map_req *
vr_qos_map_req_get(void)
{
    vr_qos_map_req *req;

    req = vr_zalloc(sizeof(vr_qos_map_req), VR_QOS_MAP_OBJECT);
    if (!req)
        return NULL;

    req->qmr_dscp =
        vr_zalloc(sizeof(uint8_t) * VR_DSCP_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_dscp) {
        goto alloc_failure;
    }
    req->qmr_dscp_size = VR_DSCP_QOS_ENTRIES;

    req->qmr_dscp_fc_id =
        vr_zalloc(sizeof(uint8_t) * VR_DSCP_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_dscp_fc_id) {
        goto alloc_failure;
    }
    req->qmr_dscp_fc_id_size = VR_DSCP_QOS_ENTRIES;

    req->qmr_mpls_qos =
        vr_zalloc(sizeof(uint8_t) * VR_MPLS_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_mpls_qos) {
        goto alloc_failure;
    }
    req->qmr_mpls_qos_size = VR_MPLS_QOS_ENTRIES;

    req->qmr_mpls_qos_fc_id =
        vr_zalloc(sizeof(uint8_t) * VR_MPLS_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_mpls_qos_fc_id) {
        goto alloc_failure;
    }
    req->qmr_mpls_qos_fc_id_size = VR_MPLS_QOS_ENTRIES;

    req->qmr_dotonep =
        vr_zalloc(sizeof(uint8_t) * VR_DOTONEP_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_dotonep) {
        goto alloc_failure;
    }
    req->qmr_dotonep_size = VR_DOTONEP_QOS_ENTRIES;

    req->qmr_dotonep_fc_id =
        vr_zalloc(sizeof(uint8_t) * VR_DOTONEP_QOS_ENTRIES, VR_QOS_MAP_OBJECT);
    if (!req->qmr_dotonep_fc_id) {
        goto alloc_failure;
    }
    req->qmr_dotonep_fc_id_size = VR_DOTONEP_QOS_ENTRIES;

    return req;

alloc_failure:
    vr_qos_map_req_destroy(req);
    return NULL;
}
示例#14
0
int
mtrie_algo_init(struct vr_rtable *rtable, struct rtable_fspec *fs)
{
    int ret = 0;
    unsigned int table_memory;

    if (algo_init_done)
        return 0;

    if (!rtable->algo_data) {
        table_memory = 2 * sizeof(void *) * fs->rtb_max_vrfs;
        rtable->algo_data = vr_zalloc(table_memory, VR_MTRIE_TABLE_OBJECT);
        if (!rtable->algo_data)
            return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, table_memory);
    }

    rtable->algo_max_vrfs = fs->rtb_max_vrfs;
    if ((ret = mtrie_stats_init(rtable))) {
        vr_module_error(ret, __FUNCTION__, __LINE__, 0);
        goto init_fail;
    }

    rtable->algo_add = mtrie_add;
    rtable->algo_del = mtrie_delete;
    rtable->algo_lookup = mtrie_lookup;
    rtable->algo_get = mtrie_get;
    rtable->algo_dump = mtrie_dump;
    rtable->algo_stats_get = mtrie_stats_get;
    rtable->algo_stats_dump = mtrie_stats_dump;

    vr_inet_vrf_stats = mtrie_stats;
    /* local cache */
    /* ipv4 table */
    vn_rtable[0] = (struct ip_mtrie **)rtable->algo_data;
    /* ipv6 table */
    vn_rtable[1] = (struct ip_mtrie **)((unsigned char **)rtable->algo_data
                                                 + fs->rtb_max_vrfs);

    mtrie_ip_bkt_info_init(ip4_bkt_info, IP4_PREFIX_LEN);
    mtrie_ip_bkt_info_init(ip6_bkt_info, IP6_PREFIX_LEN);

    algo_init_done = 1;
    return 0;

init_fail:
    if (rtable->algo_data) {
        vr_free(rtable->algo_data, VR_MTRIE_TABLE_OBJECT);
        rtable->algo_data = NULL;
    }

    return ret;
}
示例#15
0
int
vr_mirror_add(vr_mirror_req *req)
{
    int ret = 0;
    struct vrouter *router;
    struct vr_nexthop *nh, *old_nh = NULL;
    struct vr_mirror_entry *mirror;

    router = vrouter_get(req->mirr_rid);
    if (!router) {
        ret = -EINVAL;
        goto generate_resp;
    }

    if ((unsigned int)req->mirr_index >= router->vr_max_mirror_indices) {
        ret = -EINVAL;
        goto generate_resp;
    }

    nh = vrouter_get_nexthop(req->mirr_rid, req->mirr_nhid);
    if (!nh) {
        ret = -EINVAL;
        goto generate_resp;
    }

    mirror = router->vr_mirrors[req->mirr_index];
    if (!mirror) {
        mirror = vr_zalloc(sizeof(*mirror), VR_MIRROR_OBJECT);
        if (!mirror) {
            ret = -ENOMEM;
            vrouter_put_nexthop(nh);
            goto generate_resp;
        }
    } else {
        old_nh = mirror->mir_nh;
    }

    mirror->mir_nh = nh;
    mirror->mir_rid = req->mirr_rid;
    mirror->mir_flags = req->mirr_flags;
    mirror->mir_vni = req->mirr_vni;
    mirror->mir_vlan_id = req->mirr_vlan;
    router->vr_mirrors[req->mirr_index] = mirror;

    if (old_nh)
        vrouter_put_nexthop(old_nh);

generate_resp:
    vr_send_response(ret);

    return ret;
}
示例#16
0
static vr_bridge_table_data *
vr_bridge_table_data_get(void)
{
    vr_bridge_table_data *data = vr_zalloc(sizeof(vr_bridge_table_data),
                VR_BRIDGE_TABLE_DATA_OBJECT);
    if (!data)
        return NULL;

    if (vr_bridge_table_path) {
        data->btable_file_path = vr_zalloc(VR_UNIX_PATH_MAX,
                VR_BRIDGE_TABLE_DATA_OBJECT);
        if (!data->btable_file_path) {
            goto exit_func;
        }
    }

    return data;

exit_func:
    vr_bridge_table_data_destroy(data);
    return NULL;
}
示例#17
0
static int
vr_enqueue_flow(struct vr_flow_entry *fe, struct vr_packet *pkt,
        unsigned short proto, struct vr_forwarding_md *fmd)
{
    unsigned int i = 0;
    unsigned short drop_reason = 0;
    struct vr_list_node **head = &fe->fe_hold_list.node_p;
    struct vr_packet_node *pnode;

    while (*head && ++i) {
        head = &(*head)->node_n;
    }

    if (i >= VR_MAX_FLOW_QUEUE_ENTRIES) {
        drop_reason = VP_DROP_FLOW_QUEUE_LIMIT_EXCEEDED;
        goto drop;
    }

    pnode = (struct vr_packet_node *)vr_zalloc(sizeof(struct vr_packet_node));
    if (!pnode) {
        drop_reason = VP_DROP_FLOW_NO_MEMORY;
        goto drop;
    }

    /*
     * we cannot cache nexthop here. to cache, we need to hold reference
     * to the nexthop. to hold a reference, we will have to hold a lock,
     * which we cannot. the only known case of misbehavior if we do not
     * cache is ECMP. when the packet comes from the fabric, the nexthop
     * actually points to a local composite, whereas a route lookup actually
     * returns a different nexthop, in which case the ecmp index will return
     * a bad nexthop. to avoid that, we will cache the label, and reuse it
     */
    pkt->vp_nh = NULL;

    pnode->pl_packet = pkt;
    pnode->pl_proto = proto;
    pnode->pl_vif_idx = pkt->vp_if->vif_idx;
    if (fmd) {
        pnode->pl_outer_src_ip = fmd->fmd_outer_src_ip;
        pnode->pl_label = fmd->fmd_label;
    }

    *head = &pnode->pl_node;

    return 0;

drop:
    vr_pfree(pkt, drop_reason);
    return 0;
}
示例#18
0
int
vr_qos_init(struct vrouter *router)
{
    unsigned long size;

    if (!router->vr_qos_map) {
        size = vr_qos_map_entries * sizeof(struct vr_forwarding_class *);
        router->vr_qos_map = vr_zalloc(size, VR_QOS_MAP_OBJECT);
        if (!router->vr_qos_map) {
            return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, size);
        }
    }

    if (!router->vr_fc_table) {
        size = vr_fc_map_entries * sizeof(struct vr_forwarding_class);
        router->vr_fc_table = vr_zalloc(size, VR_FC_OBJECT);
        if (!router->vr_fc_table) {
            return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, size);
        }
    }

    return 0;
}
示例#19
0
static int
vr_malloc_stats_init(struct vrouter *router)
{
    unsigned int i, size, cpu, total_size = 0;

    if (router->vr_malloc_stats)
        return 0;

    size = vr_num_cpus * sizeof(void *);
    router->vr_malloc_stats = vr_zalloc(size, VR_MALLOC_OBJECT);
    if (!router->vr_malloc_stats)
        return -ENOMEM;
    total_size += size;

    size = VR_VROUTER_MAX_OBJECT * sizeof(struct vr_malloc_stats);
    /*
     * align the allocation to cache line size so that per-cpu variable
     * do not result in cache thrashing
     */
    if (size % 64) {
        size = size + (64 - (size % 64));
    }

    for (i = 0; i < vr_num_cpus; i++) {
        router->vr_malloc_stats[i] = vr_zalloc(size, VR_MALLOC_OBJECT);
        if (!router->vr_malloc_stats)
            return -ENOMEM;
        total_size += size;
    }

    cpu = vr_get_cpu();
    router->vr_malloc_stats[cpu][VR_MALLOC_OBJECT].ms_alloc = vr_num_cpus + 1;
    router->vr_malloc_stats[cpu][VR_MALLOC_OBJECT].ms_size = total_size;

    return 0;
}
示例#20
0
/*
 * API to initialize vdata mtrie
 */
struct ip_mtrie *
vdata_mtrie_init (unsigned int prefix_len, void *data)
{
    struct ip_mtrie *mtrie;

    mtrie = vr_zalloc(sizeof(struct ip_mtrie), VR_MTRIE_OBJECT);
    if (mtrie) {
        mtrie->root.entry_type = ENTRY_TYPE_VDATA;
        mtrie->root.entry_prefix_len = prefix_len;
        mtrie->root.entry_vdata_p = data;
        mtrie->root.entry_bridge_index =  VR_BE_INVALID_INDEX;
        mtrie->root.entry_label = 0xFFFFFF;
    }

    return mtrie;
}
示例#21
0
static int
vr_message_queue_response(char *buf, int len)
{
    struct vr_message *response;

    response = vr_zalloc(sizeof(*response));
    if (!response)
        return -ENOMEM;

    response->vr_message_buf = buf;
    response->vr_message_len = len;;
    vr_queue_enqueue(&message_h.vm_response_queue,
            &response->vr_message_queue);

    return 0;
}
示例#22
0
int
vr_mpls_init(struct vrouter *router)
{
    int ilm_memory;

    if (!router->vr_ilm) {
        router->vr_max_labels = VR_MAX_LABELS;
        ilm_memory = sizeof(struct vr_nexthop *) * router->vr_max_labels;
        router->vr_ilm = vr_zalloc(ilm_memory);
        if (!router->vr_ilm)
            return vr_module_error(-ENOMEM, __FUNCTION__,
                    __LINE__, ilm_memory);
    }

    return 0;
}
static int
dpdk_fragment_queue_init(void)
{
    unsigned int size;

    size = sizeof(struct per_cpu_fragment_queue) * vr_dpdk.nb_fwd_lcores;
    per_cpu_queues = vr_zalloc(size, VR_FRAGMENT_QUEUE_OBJECT);

    if (!per_cpu_queues) {
        RTE_LOG(ERR, VROUTER, "%s: Error allocating fragmentation queues\n",
                __func__);
        return -ENOMEM;
    }

    return 0;
}
示例#24
0
static int
vr_message_queue_response(char *buf, int len, bool broadcast)
{
    struct vr_message *response;

    response = vr_zalloc(sizeof(*response), VR_MESSAGE_RESPONSE_OBJECT);
    if (!response)
        return -ENOMEM;

    response->vr_message_buf = buf;
    response->vr_message_len = len;
    response->vr_message_broadcast = broadcast;
    vr_queue_enqueue(&message_h.vm_response_queue,
            &response->vr_message_queue);

    return 0;
}
示例#25
0
static struct vr_timer *
fragment_table_scanner_init(struct vrouter *router, struct vr_btable *table)
{
    unsigned int num_entries;
    struct vr_timer *vtimer;
    struct scanner_params *scanner;

    if (!table)
        return NULL;

    num_entries = vr_btable_entries(table);

    scanner = vr_zalloc(sizeof(*scanner), VR_FRAGMENT_SCANNER_OBJECT);
    if (!scanner) {
        vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries);
        return NULL;
    }

    scanner->sp_router = router;
    scanner->sp_fragment_table = table;
    scanner->sp_num_entries = num_entries;
    scanner->sp_last_scanned_entry = -1;

    vtimer = vr_malloc(sizeof(*vtimer), VR_TIMER_OBJECT);
    if (!vtimer) {
        vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries);
        goto fail_init;
    }

    vtimer->vt_timer = fragment_table_scanner;
    vtimer->vt_vr_arg = scanner;
    vtimer->vt_msecs = 1000;

    if (vr_create_timer(vtimer)) {
        vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries);
        goto fail_init;
    }

    return vtimer;

fail_init:
    if (scanner)
        vr_free(scanner, VR_FRAGMENT_SCANNER_OBJECT);

    return NULL;
}
示例#26
0
static int
bridge_entry_make_req(struct vr_route_req *resp, struct vr_bridge_entry *ent)
{
    memset(resp, 0, sizeof(struct vr_route_req));
    resp->rtr_req.rtr_mac_size = VR_ETHER_ALEN;
    resp->rtr_req.rtr_mac = vr_zalloc(VR_ETHER_ALEN);
    if (!resp->rtr_req.rtr_mac)
        return -ENOMEM;
    VR_MAC_COPY(resp->rtr_req.rtr_mac, ent->be_key.be_mac);
    resp->rtr_req.rtr_vrf_id = ent->be_key.be_vrf_id;
    if (ent->be_nh)
        resp->rtr_req.rtr_nh_id = ent->be_nh->nh_id;
    resp->rtr_req.rtr_family = AF_BRIDGE;
    resp->rtr_req.rtr_label = ent->be_label;
    if (ent->be_flags & VR_BE_FLAG_LABEL_VALID)
        resp->rtr_req.rtr_label_flags = VR_RT_LABEL_VALID_FLAG;
    return 0;
}
示例#27
0
static int
vr_flow_table_info_init(struct vrouter *router)
{
    unsigned int size;
    struct vr_flow_table_info *infop;

    if (router->vr_flow_table_info)
        return 0;

    size = sizeof(struct vr_flow_table_info) + sizeof(uint32_t) * vr_num_cpus;
    infop = (struct vr_flow_table_info *)vr_zalloc(size);
    if (!infop)
        return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, size);

    router->vr_flow_table_info = infop;
    router->vr_flow_table_info_size = size;

    return 0;
}
示例#28
0
static int
bridge_entry_make_req(struct vr_route_req *resp, struct vr_bridge_entry *ent)
{
    memset(resp, 0, sizeof(struct vr_route_req));
    resp->rtr_req.rtr_mac_size = VR_ETHER_ALEN;
    resp->rtr_req.rtr_mac = vr_zalloc(VR_ETHER_ALEN, VR_ROUTE_REQ_MAC_OBJECT);
    if (!resp->rtr_req.rtr_mac)
        return -ENOMEM;
    VR_MAC_COPY(resp->rtr_req.rtr_mac, ent->be_key.be_mac);
    resp->rtr_req.rtr_vrf_id = ent->be_key.be_vrf_id;
    if (ent->be_nh)
        resp->rtr_req.rtr_nh_id = ent->be_nh->nh_id;
    resp->rtr_req.rtr_family = AF_BRIDGE;
    resp->rtr_req.rtr_label = ent->be_label;
    resp->rtr_req.rtr_label_flags = ent->be_flags;
    resp->rtr_req.rtr_index = ent->be_hentry.hentry_index;

    return 0;
}
示例#29
0
static int
inet_rtb_family_init(struct rtable_fspec *fs, struct vrouter *router)
{
    int ret;

    if (!fs->algo_init)
        return 1;

    if (!router->vr_inet_rtable) {
        router->vr_inet_rtable = vr_zalloc(sizeof(struct vr_rtable));
        if (!router->vr_inet_rtable)
            return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, 0);
    }

    ret = fs->algo_init(router->vr_inet_rtable, fs);
    if (ret)
        return vr_module_error(ret, __FUNCTION__, __LINE__, 0);

    return 0;
}
static int
vr_linux_assembler_table_init(void)
{
    unsigned int i, size;

    size = sizeof(struct vr_linux_fragment_bucket) * VR_ASSEMBLER_BUCKET_COUNT;
    vr_linux_assembler_table = vr_zalloc(size, VR_ASSEMBLER_TABLE_OBJECT);
    if (!vr_linux_assembler_table) {
        printk("%s:%d Allocation for %u failed\n",
                __FUNCTION__, __LINE__, size);
        return -ENOMEM;
    }

    for (i = 0; i < VR_ASSEMBLER_BUCKET_COUNT; i++) {
        spin_lock_init(&vr_linux_assembler_table[i].vfb_lock);
    }

    vr_assembler_table_scan_init(vr_linux_assembler_table_scan);

    return 0;
}