void vr_btable_free(struct vr_btable *table) { unsigned int i; if (!table) return; if (table->vb_mem) { for (i = 0; i < table->vb_partitions; i++) { if (table->vb_mem[i]) { vr_page_free(table->vb_mem[i], table->vb_table_info[i].vb_mem_size); } } } if (table->vb_table_info) vr_free(table->vb_table_info); if (table->vb_mem) vr_free(table->vb_mem); vr_free(table); return; }
static int mtrie_stats_init(struct vr_rtable *rtable) { int ret = 0, i = 0; unsigned int stats_memory; if (!mtrie_vrf_stats) { stats_memory = sizeof(void *) * rtable->algo_max_vrfs; mtrie_vrf_stats = vr_zalloc(stats_memory, VR_MTRIE_STATS_OBJECT); if (!mtrie_vrf_stats) return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, stats_memory); for (i = 0; i < rtable->algo_max_vrfs; i++) { stats_memory = sizeof(struct vr_vrf_stats) * vr_num_cpus; mtrie_vrf_stats[i] = vr_zalloc(stats_memory, VR_MTRIE_STATS_OBJECT); if (!mtrie_vrf_stats[i] && (ret = -ENOMEM)) { vr_module_error(ret, __FUNCTION__, __LINE__, i); goto cleanup; } } rtable->vrf_stats = mtrie_vrf_stats; } if (!invalid_vrf_stats) { invalid_vrf_stats = vr_zalloc(sizeof(struct vr_vrf_stats) * vr_num_cpus, VR_MTRIE_STATS_OBJECT); if (!invalid_vrf_stats && (ret = -ENOMEM)) { vr_module_error(ret, __FUNCTION__, __LINE__, -1); goto cleanup; } } return 0; cleanup: if (!i) return ret; for (--i; i >= 0; i--) { if (mtrie_vrf_stats[i]) { vr_free(mtrie_vrf_stats[i], VR_MTRIE_STATS_OBJECT); mtrie_vrf_stats[i] = NULL; } } if (mtrie_vrf_stats) { vr_free(mtrie_vrf_stats, VR_MTRIE_STATS_OBJECT); mtrie_vrf_stats = NULL; } if (invalid_vrf_stats) { vr_free(invalid_vrf_stats, VR_MTRIE_STATS_OBJECT); invalid_vrf_stats = NULL; } return ret; }
static void mtrie_stats_cleanup(struct vr_rtable *rtable, bool soft_reset) { unsigned int i, stats_memory_size; if (!mtrie_vrf_stats) return; stats_memory_size = sizeof(struct vr_vrf_stats) * vr_num_cpus; for (i = 0; i < rtable->algo_max_vrfs; i++) { if (mtrie_vrf_stats[i]) { if (soft_reset) { memset(mtrie_vrf_stats[i], 0, stats_memory_size); } else { vr_free(mtrie_vrf_stats[i], VR_MTRIE_STATS_OBJECT); mtrie_vrf_stats[i] = NULL; } } } if (!soft_reset) { vr_free(mtrie_vrf_stats, VR_MTRIE_STATS_OBJECT); rtable->vrf_stats = mtrie_vrf_stats = NULL; if (invalid_vrf_stats) { vr_free(invalid_vrf_stats, VR_MTRIE_STATS_OBJECT); invalid_vrf_stats = NULL; } } else { if (invalid_vrf_stats) memset(invalid_vrf_stats, 0, stats_memory_size); } return; }
aeEventLoop *aeCreateEventLoop(int setsize) { aeEventLoop *eventLoop; int i; if ((eventLoop = vr_alloc(sizeof(*eventLoop))) == NULL) goto err; eventLoop->events = vr_alloc(sizeof(aeFileEvent)*setsize); eventLoop->fired = vr_alloc(sizeof(aeFiredEvent)*setsize); if (eventLoop->events == NULL || eventLoop->fired == NULL) goto err; eventLoop->setsize = setsize; eventLoop->lastTime = time(NULL); eventLoop->timeEventHead = NULL; eventLoop->timeEventNextId = 0; eventLoop->stop = 0; eventLoop->maxfd = -1; eventLoop->beforesleep = NULL; eventLoop->bsdata = NULL; if (aeApiCreate(eventLoop) == -1) goto err; /* Events with mask == AE_NONE are not set. So let's initialize the * vector with it. */ for (i = 0; i < setsize; i++) eventLoop->events[i].mask = AE_NONE; return eventLoop; err: if (eventLoop) { vr_free(eventLoop->events); vr_free(eventLoop->fired); vr_free(eventLoop); } return NULL; }
int __vr_mirror_del(struct vrouter *router, unsigned int index) { struct vr_nexthop *nh; struct vr_mirror_entry *mirror; struct vr_defer_data *defer; if (index >= router->vr_max_mirror_indices) return -EINVAL; mirror = router->vr_mirrors[index]; if (!mirror) return -EINVAL; nh = mirror->mir_nh; router->vr_mirrors[index] = NULL; mirror->mir_nh = NULL; if (!vr_not_ready) { defer = vr_get_defer_data(sizeof(*defer)); if (defer) { defer->vdd_data = (void *)mirror; vr_defer(router, vr_mirror_defer_delete, (void *)defer); } else { vr_delay_op(); vr_free(mirror, VR_MIRROR_OBJECT); } } else { vr_free(mirror, VR_MIRROR_OBJECT); } vrouter_put_nexthop(nh); return 0; }
void vr_qos_exit(struct vrouter *router, bool soft_reset) { unsigned int i; unsigned long size; if (soft_reset) { size = vr_qos_map_entries * sizeof(struct vr_forwarding_class *); if (router->vr_qos_map) { memset(router->vr_qos_map, 0, size); } size = vr_fc_map_entries * sizeof(struct vr_forwarding_class); if (router->vr_fc_table) { memset(router->vr_fc_table, 0, size); } } else { if (router->vr_qos_map) { for (i = 0; i < vr_qos_map_entries; i++) { if (router->vr_qos_map[i]) { vr_free(router->vr_qos_map[i], VR_QOS_MAP_OBJECT); router->vr_qos_map[i] = NULL; } } vr_free(router->vr_qos_map, VR_QOS_MAP_OBJECT); router->vr_qos_map = NULL; } if (router->vr_fc_table) { vr_free(router->vr_fc_table, VR_FC_OBJECT); router->vr_fc_table = NULL; } } return; }
static void vr_mirror_meta_destroy(struct vr_mirror_meta_entry *me) { if (!me) return; if (me->mirror_md) vr_free(me->mirror_md, VR_MIRROR_META_OBJECT); vr_free(me, VR_MIRROR_META_OBJECT); return; }
void redisOpArrayFree(redisOpArray *oa) { while(oa->numops) { int j; redisOp *op; oa->numops--; op = oa->ops+oa->numops; for (j = 0; j < op->argc; j++) decrRefCount(op->argv[j]); vr_free(op->argv); } vr_free(oa->ops); }
static void vr_bridge_table_data_destroy(vr_bridge_table_data *data) { if (data) { if (data->btable_file_path) { vr_free(data->btable_file_path, VR_BRIDGE_TABLE_DATA_OBJECT); data->btable_file_path = NULL; } vr_free(data, VR_BRIDGE_TABLE_DATA_OBJECT); } return; }
static void usock_close(struct vr_usocket *usockp) { int i; struct vr_usocket *parent; RTE_SET_USED(parent); if (!usockp) return; RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d\n", __func__, pthread_self(), usockp->usock_fd); usock_unbind(usockp); usock_deinit_poll(usockp); for (i = 0; i < usockp->usock_cfds; i++) { usock_close(usockp->usock_children[i]); } RTE_LOG(DEBUG, USOCK, "%s: closing FD %d\n", __func__, usockp->usock_fd); close(usockp->usock_fd); if (!usockp->usock_mbuf_pool && usockp->usock_rx_buf) { vr_free(usockp->usock_rx_buf, VR_USOCK_BUF_OBJECT); usockp->usock_rx_buf = NULL; } if (usockp->usock_iovec) { vr_free(usockp->usock_iovec, VR_USOCK_IOVEC_OBJECT); usockp->usock_iovec = NULL; } if (usockp->usock_mbuf_pool) { /* no api to destroy a pool */ } if (usockp->usock_proto == PACKET) { RTE_LOG(DEBUG, USOCK, "%s[%lx]: unlinking %s\n", __func__, pthread_self(), VR_PACKET_UNIX_FILE); unlink(VR_PACKET_UNIX_FILE); } usockp->usock_io_in_progress = 0; vr_free(usockp, VR_USOCK_OBJECT); return; }
static void vr_drop_stats_get(unsigned int core) { int ret = 0; unsigned int cpu; struct vrouter *router = vrouter_get(0); vr_drop_stats_req *response = NULL; if (!router && (ret = -ENOENT)) goto exit_get; response = vr_zalloc(sizeof(*response), VR_DROP_STATS_REQ_OBJECT); if (!response && (ret = -ENOMEM)) goto exit_get; if (core == (unsigned)-1) { /* summed up stats */ for (cpu = 0; cpu < vr_num_cpus; cpu++) { vr_drop_stats_add_response(response, router->vr_pdrop_stats[cpu]); } } else if (core < vr_num_cpus) { /* stats for a specific core */ vr_drop_stats_add_response(response, router->vr_pdrop_stats[core]); } /* otherwise the counters will be zeros */ exit_get: vr_message_response(VR_DROP_STATS_OBJECT_ID, ret ? NULL : response, ret); if (response != NULL) vr_free(response, VR_DROP_STATS_REQ_OBJECT); return; }
static void vr_message_default_free(char *buf) { if (buf) vr_free(buf); return; }
static void mtrie_free_entry(struct ip_bucket_entry *entry, unsigned int level) { unsigned int i; struct ip_bucket *bkt; if (!ENTRY_IS_BUCKET(entry)) { if (ENTRY_IS_NEXTHOP(entry)) { vrouter_put_nexthop(entry->entry_nh_p); entry->entry_nh_p = NULL; } else { entry->entry_vdata_p = NULL; } return; } bkt = entry_to_bucket(entry); if (!bkt) return; for (i = 0; i < IPBUCKET_LEVEL_SIZE; i++) { if (ENTRY_IS_BUCKET(&bkt->bkt_data[i])) { mtrie_free_entry(&bkt->bkt_data[i], level + 1); } else { if (ENTRY_IS_NEXTHOP(&bkt->bkt_data[i])) { vrouter_put_nexthop(bkt->bkt_data[i].entry_nh_p); } } } entry->entry_bkt_p = NULL; vr_free(bkt, VR_MTRIE_BUCKET_OBJECT); return; }
static void vr_message_default_free(char *buf) { if (buf) vr_free(buf, VR_MESSAGE_OBJECT); return; }
struct vr_message_dumper * vr_message_dump_init(void *req) { char *buf; struct vr_message_dumper *dumper; struct vr_mproto *proto; struct vr_mtransport *trans; proto = message_h.vm_proto; trans = message_h.vm_trans; if (!proto || !trans) return NULL; dumper = vr_zalloc(sizeof(*dumper)); if (!dumper) return NULL; buf = trans->mtrans_alloc(VR_MESSAGE_PAGE_SIZE); if (!buf) { vr_free(dumper); return NULL; } dumper->dump_buffer = buf; dumper->dump_buf_len = VR_MESSAGE_PAGE_SIZE; dumper->dump_offset = 0; dumper->dump_req = req; return dumper; }
static void vr_qos_map_delete(vr_qos_map_req *req) { int ret = 0; struct vr_forwarding_class *fc_p; struct vrouter *router = vrouter_get(req->qmr_rid); if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = 0; goto generate_response; } vr_qos_map_set_fc(router, req->qmr_id, NULL); if (vr_qos_map_free_fc_defer(router, fc_p)) { vr_delay_op(); vr_free(fc_p, VR_QOS_MAP_OBJECT); } (void)vr_offload_qos_map_del(req); generate_response: vr_send_response(ret); return; }
static void vr_interface_service_disable(struct vr_interface *vif) { vif->vif_rx = eth_rx; /* * once everybody sees the change, we are free to do whatever * we want with the vrf assign table */ vr_delay_op(); /* * it is possible that when this function is called from * interface delete, the table users are +ve, and hence * the memory will not be freed here. our saving grace * is vif_free (called from last put operation), which * takes care of freeing the memory */ if (vif->vif_vrf_table && !vif->vif_vrf_table_users) { vr_free(vif->vif_vrf_table); vif->vif_vrf_table = NULL; } return; }
int vr_mirror_meta_entry_set(struct vrouter *router, unsigned int index, unsigned int mir_sip, unsigned short mir_sport, void *meta_data, unsigned int meta_data_len, unsigned short mirror_vrf) { char *buf; struct vr_mirror_meta_entry *me, *me_old; me = vr_malloc(sizeof(*me), VR_MIRROR_META_OBJECT); if (!me) return -ENOMEM; buf = vr_malloc(meta_data_len, VR_MIRROR_META_OBJECT); if (!buf) { vr_free(me, VR_MIRROR_META_OBJECT); return -ENOMEM; } memcpy(buf, meta_data, meta_data_len); me->mirror_router = router; me->mirror_md = buf; me->mirror_md_len = meta_data_len; me->mirror_sip = mir_sip; me->mirror_sport = mir_sport; me->mirror_vrf = mirror_vrf; me_old = vr_itable_set(router->vr_mirror_md, index, me); if (me_old && me_old != VR_ITABLE_ERR_PTR) vr_mirror_meta_entry_destroy(index, (void *)me_old); return 0; }
void vr_message_dump_exit(void *context, int ret) { struct vr_mproto *proto; struct vr_mtransport *trans; struct vr_message_dumper *dumper = (struct vr_message_dumper *)context; proto = message_h.vm_proto; trans = message_h.vm_trans; if (!proto || !trans) return; if (dumper) ret = dumper->dump_num_dumped; vr_send_response(ret); if (dumper) { if (!dumper->dump_offset) { if (dumper->dump_buffer) trans->mtrans_free(dumper->dump_buffer); } else vr_message_queue_response(dumper->dump_buffer, dumper->dump_offset); vr_free(dumper); } return; }
static int mtrie_dump_entry(struct vr_message_dumper *dumper, struct ip_bucket_entry *orig_ent, int8_t *prefix, int level) { int i = 0, j, ret; uint32_t rt_prefix[4]; struct ip_bucket *bkt; struct ip_bucket_entry *ent; struct mtrie_bkt_info *ip_bkt_info; vr_route_req *req = dumper->dump_req; if (!orig_ent|| level > ip_bkt_get_max_level(req->rtr_family)) return 0; ip_bkt_info = ip_bkt_info_get(req->rtr_family); if (ENTRY_IS_BUCKET(orig_ent)) { bkt = entry_to_bucket(orig_ent); if (!dumper->dump_been_to_marker) { i = ip_bkt_info[level].bi_mask & (PREFIX_TO_INDEX(req->rtr_marker, level)); ent = index_to_entry(bkt, i); prefix[level] = i; if (mtrie_dump_entry(dumper, ent, prefix, level + 1)) return -1; i++; } j = ip_bkt_info[level].bi_size - i; for (; j > 0; j--, i++) { ent = index_to_entry(bkt, i); prefix[level] = i; if (mtrie_dump_entry(dumper, ent, prefix, level + 1) < 0) return -1; } } else if (orig_ent->entry_nh_p) { if (!dumper->dump_been_to_marker) { dumper->dump_been_to_marker = 1; return 0; } memset(rt_prefix, 0, sizeof(rt_prefix)); dump_resp.rtr_prefix = (uint8_t*)&rt_prefix; mtrie_dumper_make_response(dumper, &dump_resp, orig_ent, prefix, ip_bkt_info[level - 1].bi_pfx_len); ret = mtrie_dumper_route_encode(dumper, &dump_resp); if (dump_resp.rtr_mac_size) { vr_free(dump_resp.rtr_mac, VR_ROUTE_REQ_MAC_OBJECT); dump_resp.rtr_mac_size = 0; dump_resp.rtr_mac = NULL; } dump_resp.rtr_prefix = NULL; if (ret <= 0) return -1; } return 0; }
static void dpdk_nl_trans_free(char *buf) { buf -= HDR_LEN; vr_free(buf, VR_MESSAGE_OBJECT); return; }
static void vr_mirror_defer_delete(struct vrouter *router, void *arg) { struct vr_defer_data *defer = (struct vr_defer_data *)arg; vr_free(defer->vdd_data, VR_MIRROR_OBJECT); return; }
static void vr_malloc_stats_exit(struct vrouter *router) { unsigned int i; if (!router->vr_malloc_stats) return; for (i = 0; i < vr_num_cpus; i++) { if (router->vr_malloc_stats[i]) { vr_free(router->vr_malloc_stats[i], VR_MALLOC_OBJECT); router->vr_malloc_stats[i] = NULL; } } vr_free(router->vr_malloc_stats, VR_MALLOC_OBJECT); return; }
void vr_bitmap_delete(vr_bmap_t b) { struct vr_bitmap *bmap = (struct vr_bitmap *)b; if (bmap) vr_free(bmap, VR_BITMAP_OBJECT); return; }
static void vr_pkt_drop_stats_exit(struct vrouter *router) { unsigned int i; if (!router->vr_pdrop_stats) return; for (i = 0; i < vr_num_cpus; i++) { if (!router->vr_pdrop_stats[i]) break; vr_free(router->vr_pdrop_stats[i]); router->vr_pdrop_stats[i] = NULL; } vr_free(router->vr_pdrop_stats); router->vr_pdrop_stats = NULL; return; }
void vr_message_free(struct vr_message *message) { if (message) { if (message->vr_message_buf) vr_mtrans_free(message->vr_message_buf); vr_free(message); } return; }
/* RCU callback called on packet lcore */ void vr_dpdk_packet_rcu_cb(struct rcu_head *rh) { struct vr_dpdk_rcu_cb_data *cb_data; cb_data = CONTAINER_OF(rcd_rcu, struct vr_dpdk_rcu_cb_data, rh); /* Call the user call back */ cb_data->rcd_user_cb(cb_data->rcd_router, cb_data->rcd_user_data); vr_free(cb_data, VR_DEFER_OBJECT); }
static void vr_qos_map_free_fc_cb(struct vrouter *router, void *data) { struct vr_defer_data *defer = (struct vr_defer_data *)data; if (!defer) return; vr_free(defer->vdd_data, VR_QOS_MAP_OBJECT); return; }
void vr_assembler_table_scan_exit(void) { if (vr_assembler_table_scan_timer) { vr_delete_timer(vr_assembler_table_scan_timer); vr_free(vr_assembler_table_scan_timer, VR_TIMER_OBJECT); vr_assembler_table_scan_timer = NULL; } return; }
static void vr_fragment_queue_element_free(struct vr_fragment_queue_element *vfqe, unsigned int drop_reason) { if (vfqe->fqe_pnode.pl_packet) { vr_pfree(vfqe->fqe_pnode.pl_packet, drop_reason); } vr_free(vfqe, VR_FRAGMENT_QUEUE_ELEMENT_OBJECT); return; }