int vr_route_get(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; struct vr_rtable *rtable; int ret = 0; vr_req.rtr_req = *req; router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_response; } else { rtable = vr_get_inet_table(router, req->rtr_rt_type); if (!rtable) { ret = -ENOENT; goto generate_response; } ret = rtable->algo_get(vr_req.rtr_req.rtr_vrf_id, &vr_req); } generate_response: vr_message_response(VR_ROUTE_OBJECT_ID, ret ? NULL : &vr_req, ret); return ret; }
int vr_mpls_get(vr_mpls_req *req) { int ret = 0; struct vr_nexthop *nh = NULL; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router || req->mr_label > (int)router->vr_max_labels) { ret = -ENODEV; } else { nh = vrouter_get_label(req->mr_rid, req->mr_label); if (!nh) ret = -ENOENT; } if (!ret) vr_mpls_make_req(req, nh, req->mr_label); else req = NULL; vr_message_response(VR_MPLS_OBJECT_ID, req, ret); return 0; }
static void vr_drop_stats_get(unsigned int core) { int ret = 0; unsigned int cpu; struct vrouter *router = vrouter_get(0); vr_drop_stats_req *response = NULL; if (!router && (ret = -ENOENT)) goto exit_get; response = vr_zalloc(sizeof(*response), VR_DROP_STATS_REQ_OBJECT); if (!response && (ret = -ENOMEM)) goto exit_get; if (core == (unsigned)-1) { /* summed up stats */ for (cpu = 0; cpu < vr_num_cpus; cpu++) { vr_drop_stats_add_response(response, router->vr_pdrop_stats[cpu]); } } else if (core < vr_num_cpus) { /* stats for a specific core */ vr_drop_stats_add_response(response, router->vr_pdrop_stats[core]); } /* otherwise the counters will be zeros */ exit_get: vr_message_response(VR_DROP_STATS_OBJECT_ID, ret ? NULL : response, ret); if (response != NULL) vr_free(response, VR_DROP_STATS_REQ_OBJECT); return; }
/* * sandesh handler for vr_flow_req */ void vr_flow_req_process(void *s_req) { int ret = 0; struct vrouter *router; vr_flow_req *req = (vr_flow_req *)s_req; router = vrouter_get(req->fr_rid); switch (req->fr_op) { case FLOW_OP_FLOW_TABLE_GET: req->fr_ftable_size = vr_flow_table_size(router) + vr_oflow_table_size(router); #ifdef __KERNEL__ req->fr_ftable_dev = vr_flow_major; #endif break; case FLOW_OP_FLOW_SET: ret = vr_flow_set(router, req); break; default: ret = -EINVAL; } vr_message_response(VR_FLOW_OBJECT_ID, req, ret); return; }
int vr_mpls_get(vr_mpls_req *req) { int ret = 0; struct vr_nexthop *nh = NULL; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router) { ret = -ENODEV; goto generate_response; } if (((unsigned int)req->mr_label >= router->vr_max_labels)) { ret = -EINVAL; goto generate_response; } nh = vrouter_get_label(req->mr_rid, req->mr_label); if (!nh) { ret = -ENOENT; goto generate_response; } vr_mpls_make_req(req, nh, req->mr_label); generate_response: if (ret) req = NULL; vr_message_response(VR_MPLS_OBJECT_ID, req, ret); return 0; }
int vr_send_broadcast(unsigned int object_type, void *object, unsigned int sandesh_op, int code) { if (!vr_nl_broadcast_supported) return 0; // We only broadcast requests that have succeeded if (code >= 0) return vr_message_response(object_type, object, code, true); return code; }
static void vr_fc_map_get(vr_fc_map_req *req) { int ret = 0; vr_fc_map_req *resp = NULL; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id) { ret = -EINVAL; goto generate_response; } if (req->fmr_id[0] >= vr_fc_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_fc_map_get_fc(router, req->fmr_id[0]); if (!fc_p || !fc_p->vfc_valid) { ret = -ENOENT; goto generate_response; } resp = vr_fc_map_req_get(1); if (!resp) { ret = -ENOMEM; goto generate_response; } resp->fmr_id[0] = req->fmr_id[0]; resp->fmr_dscp[0] = fc_p->vfc_dscp; resp->fmr_mpls_qos[0] = fc_p->vfc_mpls_qos; resp->fmr_dotonep[0] = fc_p->vfc_dotonep_qos; resp->fmr_queue_id[0] = fc_p->vfc_queue_id; /* Debug comparison to check if matching entry is programmed on NIC */ (void)vr_offload_fc_map_get(resp); generate_response: vr_message_response(VR_FC_MAP_OBJECT_ID, ret < 0 ? NULL : resp, ret, false); if (resp) vr_fc_map_req_destroy(resp); return; }
void vr_bridge_table_data_process(void *s_req) { int ret = 0; struct vrouter *router; vr_bridge_table_data *resp = NULL, *req = (vr_bridge_table_data *)s_req; router = vrouter_get(req->btable_rid); if (!router) { ret = -EINVAL; goto generate_response; } resp = vr_bridge_table_data_get(); if (!resp) { ret = -ENOMEM; goto generate_response; } resp->btable_op = req->btable_op; switch (req->btable_op) { case SANDESH_OP_GET: resp->btable_size = vr_bridge_table_size(router); #if defined(__linux__) && defined(__KERNEL__) resp->btable_dev = vr_bridge_table_major; #endif if (vr_bridge_table_path) { strncpy(resp->btable_file_path, vr_bridge_table_path, VR_UNIX_PATH_MAX - 1); } break; default: ret = -EINVAL; break; } generate_response: vr_message_response(VR_BRIDGE_TABLE_DATA_OBJECT_ID, resp, ret); if (resp) { vr_bridge_table_data_destroy(resp); resp = NULL; } return; }
static void vr_qos_map_get(vr_qos_map_req *req) { int ret = 0; vr_qos_map_req *resp; struct vrouter *router = vrouter_get(req->qmr_rid); struct vr_forwarding_class *fc_p = NULL; if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto get_error; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = -ENOENT; goto get_error; } resp = vr_qos_map_req_get(); if (!resp) { ret = -ENOMEM; goto get_error; } vr_qos_map_make_req(req->qmr_id, resp, fc_p); /* Debug comparison to check if matching entry is programmed on NIC */ (void)vr_offload_qos_map_get(resp); vr_message_response(VR_QOS_MAP_OBJECT_ID, resp, ret, false); if (resp) { vr_qos_map_req_destroy(resp); } return; get_error: vr_send_response(ret); return; }
int vr_route_get(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; int ret = 0; uint32_t rt_prefix[4]; struct vr_rtable *rtable; vr_req.rtr_req = *req; vr_req.rtr_req.rtr_marker_size = 0; vr_req.rtr_req.rtr_prefix_size = req->rtr_prefix_size; if (req->rtr_prefix_size) { vr_req.rtr_req.rtr_prefix = (uint8_t*)&rt_prefix; memcpy(vr_req.rtr_req.rtr_prefix, req->rtr_prefix, RT_IP_ADDR_SIZE(req->rtr_family)); } else vr_req.rtr_req.rtr_prefix = NULL; router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_response; } else { rtable = router->vr_inet_rtable; if (!rtable) { ret = -ENOENT; goto generate_response; } ret = rtable->algo_get(vr_req.rtr_req.rtr_vrf_id, &vr_req); } generate_response: vr_message_response(VR_ROUTE_OBJECT_ID, ret ? NULL : &vr_req, ret); return ret; }
static void vr_inet_vrf_stats_get(struct vrouter *router, vr_vrf_stats_req *req) { int ret = 0; struct vr_rtable *rtable; vr_vrf_stats_req response; rtable = router->vr_inet_rtable; if (!rtable) { ret = -ENOENT; goto generate_error; } if (req->vsr_vrf >= 0 && (unsigned int)req->vsr_vrf >= rtable->algo_max_vrfs) { ret = -EINVAL; goto generate_error; } ret = rtable->algo_stats_get(req, &response); generate_error: vr_message_response(VR_VRF_STATS_OBJECT_ID, ret ? NULL : &response, ret); return; }
static int vr_mirror_get(vr_mirror_req *req) { int ret = 0; struct vrouter *router; struct vr_mirror_entry *mirror = NULL; router = vrouter_get(req->mirr_rid); if (!router || (unsigned int)req->mirr_index >= router->vr_max_mirror_indices) { ret = -ENODEV; } else { mirror = router->vr_mirrors[req->mirr_index]; if (!mirror) ret = -ENOENT; } if (mirror) { vr_mirror_make_req(req, mirror, req->mirr_index); } else req = NULL; return vr_message_response(VR_MIRROR_OBJECT_ID, req, ret, false); }
int vr_send_response(int code) { return vr_message_response(VR_NULL_OBJECT_ID, NULL, code); }
static void vr_drop_stats_get(unsigned int core) { int ret = 0; unsigned int cpu; struct vrouter *router = vrouter_get(0); vr_drop_stats_req *response = NULL; struct vr_drop_stats *stats_block, *stats = NULL; if (!router && (ret = -ENOENT)) goto exit_get; stats = vr_zalloc(sizeof(*stats)); if (!stats && (ret = -ENOMEM)) goto exit_get; response = vr_zalloc(sizeof(*response)); if (!response && (ret = -ENOMEM)) goto exit_get; if (core == 0) { /* user or agent wants summed up stats */ for (cpu = 0; cpu < vr_num_cpus; cpu++) { stats_block = (struct vr_drop_stats *)router->vr_pdrop_stats[cpu]; stats->vds_discard += stats_block->vds_discard; stats->vds_pull += stats_block->vds_pull; stats->vds_invalid_if += stats_block->vds_invalid_if; stats->vds_arp_no_where_to_go += stats_block->vds_arp_no_where_to_go; stats->vds_garp_from_vm += stats_block->vds_garp_from_vm; stats->vds_invalid_arp += stats_block->vds_invalid_arp; stats->vds_trap_no_if += stats_block->vds_trap_no_if; stats->vds_nowhere_to_go += stats_block->vds_nowhere_to_go; stats->vds_flow_queue_limit_exceeded += stats_block->vds_flow_queue_limit_exceeded; stats->vds_flow_no_memory += stats_block->vds_flow_no_memory; stats->vds_flow_invalid_protocol += stats_block->vds_flow_invalid_protocol; stats->vds_flow_nat_no_rflow += stats_block->vds_flow_nat_no_rflow; stats->vds_flow_action_drop += stats_block->vds_flow_action_drop; stats->vds_flow_action_invalid += stats_block->vds_flow_action_invalid; stats->vds_flow_unusable += stats_block->vds_flow_unusable; stats->vds_flow_table_full += stats_block->vds_flow_table_full; stats->vds_interface_tx_discard += stats_block->vds_interface_tx_discard; stats->vds_interface_drop += stats_block->vds_interface_drop; stats->vds_duplicated += stats_block->vds_duplicated; stats->vds_push += stats_block->vds_push; stats->vds_ttl_exceeded += stats_block->vds_ttl_exceeded; stats->vds_invalid_nh += stats_block->vds_invalid_nh; stats->vds_invalid_label += stats_block->vds_invalid_label; stats->vds_invalid_protocol += stats_block->vds_invalid_protocol; stats->vds_interface_rx_discard += stats_block->vds_interface_rx_discard; stats->vds_invalid_mcast_source += stats_block->vds_invalid_mcast_source; stats->vds_head_alloc_fail += stats_block->vds_head_alloc_fail; stats->vds_head_space_reserve_fail += stats_block->vds_head_space_reserve_fail; stats->vds_pcow_fail += stats_block->vds_pcow_fail; stats->vds_mcast_df_bit += stats_block->vds_mcast_df_bit; stats->vds_mcast_clone_fail += stats_block->vds_mcast_clone_fail; stats->vds_composite_invalid_interface += stats_block->vds_composite_invalid_interface; stats->vds_rewrite_fail += stats_block->vds_rewrite_fail; stats->vds_misc += stats_block->vds_misc; stats->vds_invalid_packet += stats_block->vds_invalid_packet; stats->vds_cksum_err += stats_block->vds_cksum_err; stats->vds_clone_fail += stats_block->vds_clone_fail; stats->vds_no_fmd += stats_block->vds_no_fmd; stats->vds_cloned_original += stats_block->vds_cloned_original; stats->vds_invalid_vnid += stats_block->vds_invalid_vnid; stats->vds_frag_err += stats_block->vds_frag_err; stats->vds_invalid_source += stats_block->vds_invalid_source; stats->vds_arp_no_route += stats_block->vds_arp_no_route; stats->vds_l2_no_route += stats_block->vds_l2_no_route; stats->vds_arp_reply_no_route += stats_block->vds_arp_reply_no_route; } } else if (core > 0) { /** * What user really asks for is real_core = core - 1. * If a request came for stats for 1st core, it means user * asked for 0th core. If request was made for 2nd, user wanted * the 1st, and so on. * * TODO: This would be much simplier if agent could explicitly ask * for stats for 'minus 1st' core, meaning 'all the cores'. */ stats_block = (struct vr_drop_stats *)router->vr_pdrop_stats[core - 1]; stats->vds_discard = stats_block->vds_discard; stats->vds_pull = stats_block->vds_pull; stats->vds_invalid_if = stats_block->vds_invalid_if; stats->vds_arp_no_where_to_go = stats_block->vds_arp_no_where_to_go; stats->vds_garp_from_vm = stats_block->vds_garp_from_vm; stats->vds_invalid_arp = stats_block->vds_invalid_arp; stats->vds_trap_no_if = stats_block->vds_trap_no_if; stats->vds_nowhere_to_go = stats_block->vds_nowhere_to_go; stats->vds_flow_queue_limit_exceeded = stats_block->vds_flow_queue_limit_exceeded; stats->vds_flow_no_memory = stats_block->vds_flow_no_memory; stats->vds_flow_invalid_protocol = stats_block->vds_flow_invalid_protocol; stats->vds_flow_nat_no_rflow = stats_block->vds_flow_nat_no_rflow; stats->vds_flow_action_drop = stats_block->vds_flow_action_drop; stats->vds_flow_action_invalid = stats_block->vds_flow_action_invalid; stats->vds_flow_unusable = stats_block->vds_flow_unusable; stats->vds_flow_table_full = stats_block->vds_flow_table_full; stats->vds_interface_tx_discard = stats_block->vds_interface_tx_discard; stats->vds_interface_drop = stats_block->vds_interface_drop; stats->vds_duplicated = stats_block->vds_duplicated; stats->vds_push = stats_block->vds_push; stats->vds_ttl_exceeded = stats_block->vds_ttl_exceeded; stats->vds_invalid_nh = stats_block->vds_invalid_nh; stats->vds_invalid_label = stats_block->vds_invalid_label; stats->vds_invalid_protocol = stats_block->vds_invalid_protocol; stats->vds_interface_rx_discard = stats_block->vds_interface_rx_discard; stats->vds_invalid_mcast_source = stats_block->vds_invalid_mcast_source; stats->vds_head_alloc_fail = stats_block->vds_head_alloc_fail; stats->vds_head_space_reserve_fail = stats_block->vds_head_space_reserve_fail; stats->vds_pcow_fail = stats_block->vds_pcow_fail; stats->vds_mcast_df_bit = stats_block->vds_mcast_df_bit; stats->vds_mcast_clone_fail = stats_block->vds_mcast_clone_fail; stats->vds_composite_invalid_interface = stats_block->vds_composite_invalid_interface; stats->vds_rewrite_fail = stats_block->vds_rewrite_fail; stats->vds_misc = stats_block->vds_misc; stats->vds_invalid_packet = stats_block->vds_invalid_packet; stats->vds_cksum_err = stats_block->vds_cksum_err; stats->vds_clone_fail = stats_block->vds_clone_fail; stats->vds_no_fmd = stats_block->vds_no_fmd; stats->vds_cloned_original = stats_block->vds_cloned_original; stats->vds_invalid_vnid = stats_block->vds_invalid_vnid; stats->vds_frag_err = stats_block->vds_frag_err; stats->vds_invalid_source = stats_block->vds_invalid_source; stats->vds_arp_no_route = stats_block->vds_arp_no_route; stats->vds_l2_no_route = stats_block->vds_l2_no_route; stats->vds_arp_reply_no_route = stats_block->vds_arp_reply_no_route; } vr_drop_stats_fill_response(response, stats); exit_get: vr_message_response(VR_DROP_STATS_OBJECT_ID, ret ? NULL : response, ret); if (stats != NULL) vr_free(stats); if (response != NULL) vr_free(response); return; }
static void vr_mem_stats_get(void) { int ret = 0; unsigned int cpu, i; int64_t alloced = 0, freed = 0; struct vrouter *router = vrouter_get(0); struct vr_malloc_stats *stats_block; vr_mem_stats_req *response = NULL; if (!router && (ret = -ENOENT)) goto exit_get; response = vr_zalloc(sizeof(*response), VR_MEM_STATS_REQ_OBJECT); if (!response && (ret = -ENOMEM)) goto exit_get; for (cpu = 0; cpu < vr_num_cpus; cpu++) { stats_block = (struct vr_malloc_stats *)router->vr_malloc_stats[cpu]; response->vms_assembler_table_object += (stats_block[VR_ASSEMBLER_TABLE_OBJECT].ms_alloc - stats_block[VR_ASSEMBLER_TABLE_OBJECT].ms_free); response->vms_bridge_mac_object += (stats_block[VR_BRIDGE_MAC_OBJECT].ms_alloc - stats_block[VR_BRIDGE_MAC_OBJECT].ms_free); response->vms_btable_object += (stats_block[VR_BTABLE_OBJECT].ms_alloc - stats_block[VR_BTABLE_OBJECT].ms_free); response->vms_build_info_object += (stats_block[VR_BUILD_INFO_OBJECT].ms_alloc - stats_block[VR_BUILD_INFO_OBJECT].ms_free); response->vms_defer_object += (stats_block[VR_DEFER_OBJECT].ms_alloc - stats_block[VR_DEFER_OBJECT].ms_free); response->vms_drop_stats_object += (stats_block[VR_DROP_STATS_OBJECT].ms_alloc - stats_block[VR_DROP_STATS_OBJECT].ms_free); response->vms_drop_stats_req_object += (stats_block[VR_DROP_STATS_REQ_OBJECT].ms_alloc - stats_block[VR_DROP_STATS_REQ_OBJECT].ms_free); response->vms_flow_queue_object += (stats_block[VR_FLOW_QUEUE_OBJECT].ms_alloc - stats_block[VR_FLOW_QUEUE_OBJECT].ms_free); response->vms_flow_req_object += (stats_block[VR_FLOW_REQ_OBJECT].ms_alloc - stats_block[VR_FLOW_REQ_OBJECT].ms_free); response->vms_flow_req_path_object += (stats_block[VR_FLOW_REQ_PATH_OBJECT].ms_alloc - stats_block[VR_FLOW_REQ_PATH_OBJECT].ms_free); response->vms_flow_hold_stat_object += (stats_block[VR_FLOW_HOLD_STAT_OBJECT].ms_alloc - stats_block[VR_FLOW_HOLD_STAT_OBJECT].ms_free); response->vms_flow_link_local_object += (stats_block[VR_FLOW_LINK_LOCAL_OBJECT].ms_alloc - stats_block[VR_FLOW_LINK_LOCAL_OBJECT].ms_free); response->vms_flow_metadata_object += (stats_block[VR_FLOW_METADATA_OBJECT].ms_alloc - stats_block[VR_FLOW_METADATA_OBJECT].ms_free); response->vms_flow_table_info_object += (stats_block[VR_FLOW_TABLE_INFO_OBJECT].ms_alloc - stats_block[VR_FLOW_TABLE_INFO_OBJECT].ms_free); response->vms_fragment_object += (stats_block[VR_FRAGMENT_OBJECT].ms_alloc - stats_block[VR_FRAGMENT_OBJECT].ms_free); response->vms_fragment_queue_object += (stats_block[VR_FRAGMENT_QUEUE_OBJECT].ms_alloc - stats_block[VR_FRAGMENT_QUEUE_OBJECT].ms_free); response->vms_fragment_queue_element_object += (stats_block[VR_FRAGMENT_QUEUE_ELEMENT_OBJECT].ms_alloc - stats_block[VR_FRAGMENT_QUEUE_ELEMENT_OBJECT].ms_free); response->vms_fragment_scanner_object += (stats_block[VR_FRAGMENT_SCANNER_OBJECT].ms_alloc - stats_block[VR_FRAGMENT_SCANNER_OBJECT].ms_free); response->vms_hpacket_pool_object += (stats_block[VR_HPACKET_POOL_OBJECT].ms_alloc - stats_block[VR_HPACKET_POOL_OBJECT].ms_free); response->vms_htable_object += (stats_block[VR_HTABLE_OBJECT].ms_alloc - stats_block[VR_HTABLE_OBJECT].ms_free); response->vms_interface_object += (stats_block[VR_INTERFACE_OBJECT].ms_alloc - stats_block[VR_INTERFACE_OBJECT].ms_free); response->vms_interface_mac_object += (stats_block[VR_INTERFACE_MAC_OBJECT].ms_alloc - stats_block[VR_INTERFACE_MAC_OBJECT].ms_free); response->vms_interface_req_object += (stats_block[VR_INTERFACE_REQ_OBJECT].ms_alloc - stats_block[VR_INTERFACE_REQ_OBJECT].ms_free); response->vms_interface_req_mac_object += (stats_block[VR_INTERFACE_REQ_MAC_OBJECT].ms_alloc - stats_block[VR_INTERFACE_REQ_MAC_OBJECT].ms_free); response->vms_interface_req_name_object += (stats_block[VR_INTERFACE_REQ_NAME_OBJECT].ms_alloc - stats_block[VR_INTERFACE_REQ_NAME_OBJECT].ms_free); response->vms_interface_stats_object += (stats_block[VR_INTERFACE_STATS_OBJECT].ms_alloc - stats_block[VR_INTERFACE_STATS_OBJECT].ms_free); response->vms_interface_table_object += (stats_block[VR_INTERFACE_TABLE_OBJECT].ms_alloc - stats_block[VR_INTERFACE_TABLE_OBJECT].ms_free); response->vms_interface_vrf_table_object += (stats_block[VR_INTERFACE_VRF_TABLE_OBJECT].ms_alloc - stats_block[VR_INTERFACE_VRF_TABLE_OBJECT].ms_free); response->vms_itable_object += (stats_block[VR_ITABLE_OBJECT].ms_alloc - stats_block[VR_ITABLE_OBJECT].ms_free); response->vms_malloc_object += (stats_block[VR_MALLOC_OBJECT].ms_alloc - stats_block[VR_MALLOC_OBJECT].ms_free); response->vms_message_object += (stats_block[VR_MESSAGE_OBJECT].ms_alloc - stats_block[VR_MESSAGE_OBJECT].ms_free); response->vms_message_response_object += (stats_block[VR_MESSAGE_RESPONSE_OBJECT].ms_alloc - stats_block[VR_MESSAGE_RESPONSE_OBJECT].ms_free); response->vms_message_dump_object += (stats_block[VR_MESSAGE_DUMP_OBJECT].ms_alloc - stats_block[VR_MESSAGE_DUMP_OBJECT].ms_free); response->vms_mem_stats_req_object += (stats_block[VR_MEM_STATS_REQ_OBJECT].ms_alloc - stats_block[VR_MEM_STATS_REQ_OBJECT].ms_free); response->vms_mirror_object += (stats_block[VR_MIRROR_OBJECT].ms_alloc - stats_block[VR_MIRROR_OBJECT].ms_free); response->vms_mirror_table_object += (stats_block[VR_MIRROR_TABLE_OBJECT].ms_alloc - stats_block[VR_MIRROR_TABLE_OBJECT].ms_free); response->vms_mirror_meta_object += (stats_block[VR_MIRROR_META_OBJECT].ms_alloc - stats_block[VR_MIRROR_META_OBJECT].ms_free); response->vms_mtrie_object += (stats_block[VR_MTRIE_OBJECT].ms_alloc - stats_block[VR_MTRIE_OBJECT].ms_free); response->vms_mtrie_bucket_object += (stats_block[VR_MTRIE_BUCKET_OBJECT].ms_alloc - stats_block[VR_MTRIE_BUCKET_OBJECT].ms_free); response->vms_mtrie_stats_object += (stats_block[VR_MTRIE_STATS_OBJECT].ms_alloc - stats_block[VR_MTRIE_STATS_OBJECT].ms_free); response->vms_mtrie_table_object += (stats_block[VR_MTRIE_TABLE_OBJECT].ms_alloc - stats_block[VR_MTRIE_TABLE_OBJECT].ms_free); response->vms_nexthop_object += (stats_block[VR_NEXTHOP_OBJECT].ms_alloc - stats_block[VR_NEXTHOP_OBJECT].ms_free); response->vms_nexthop_component_object += (stats_block[VR_NEXTHOP_COMPONENT_OBJECT].ms_alloc - stats_block[VR_NEXTHOP_COMPONENT_OBJECT].ms_free); response->vms_nexthop_req_list_object += (stats_block[VR_NEXTHOP_REQ_LIST_OBJECT].ms_alloc - stats_block[VR_NEXTHOP_REQ_LIST_OBJECT].ms_free); response->vms_nexthop_req_encap_object += (stats_block[VR_NEXTHOP_REQ_ENCAP_OBJECT].ms_alloc - stats_block[VR_NEXTHOP_REQ_ENCAP_OBJECT].ms_free); response->vms_nexthop_req_object += (stats_block[VR_NEXTHOP_REQ_OBJECT].ms_alloc - stats_block[VR_NEXTHOP_REQ_OBJECT].ms_free); response->vms_route_table_object += (stats_block[VR_ROUTE_TABLE_OBJECT].ms_alloc - stats_block[VR_ROUTE_TABLE_OBJECT].ms_free); response->vms_route_req_mac_object += (stats_block[VR_ROUTE_REQ_MAC_OBJECT].ms_alloc - stats_block[VR_ROUTE_REQ_MAC_OBJECT].ms_free); response->vms_timer_object += (stats_block[VR_TIMER_OBJECT].ms_alloc - stats_block[VR_TIMER_OBJECT].ms_free); response->vms_usock_object += (stats_block[VR_USOCK_OBJECT].ms_alloc - stats_block[VR_USOCK_OBJECT].ms_free); response->vms_usock_poll_object += (stats_block[VR_USOCK_POLL_OBJECT].ms_alloc - stats_block[VR_USOCK_POLL_OBJECT].ms_free); response->vms_usock_buf_object += (stats_block[VR_USOCK_BUF_OBJECT].ms_alloc - stats_block[VR_USOCK_BUF_OBJECT].ms_free); response->vms_usock_iovec_object += (stats_block[VR_USOCK_IOVEC_OBJECT].ms_alloc - stats_block[VR_USOCK_IOVEC_OBJECT].ms_free); response->vms_vrouter_req_object += (stats_block[VR_VROUTER_REQ_OBJECT].ms_alloc - stats_block[VR_VROUTER_REQ_OBJECT].ms_free); for (i = 0; i < VR_VROUTER_MAX_OBJECT; i++) { alloced += stats_block[i].ms_alloc; freed += stats_block[i].ms_free; } } response->vms_alloced = alloced; response->vms_freed = freed; exit_get: vr_message_response(VR_MEM_STATS_OBJECT_ID, ret ? NULL : response, ret); if (response != NULL) vr_free(response, VR_MEM_STATS_REQ_OBJECT); return; }