static int agent_rx(struct vr_interface *vif, struct vr_packet *pkt, unsigned short vlan_id __attribute__((unused))) { struct agent_hdr *hdr; struct vr_interface *agent_vif; struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu); stats->vis_ibytes += pkt_len(pkt); stats->vis_ipackets++; hdr = (struct agent_hdr *)pkt_pull(pkt, sizeof(struct vr_eth)); if (!hdr || !pkt_pull(pkt, sizeof(*hdr))) { stats->vis_ierrors++; vr_pfree(pkt, VP_DROP_PULL); return 0; } /* * Update the original (OS visible) packet to point to the * l2 header of the injected packet */ vr_pset_data(pkt, pkt->vp_data); if (ntohs(hdr->hdr_cmd) & AGENT_CMD_ROUTE) { /* * XXX * Packet with command "route" from agent may * result in flow setup, this breaks the * assumption that all packets for a flow will * reach same CPU. Need a better way to handle this */ agent_vif = __vrouter_get_interface(vrouter_get(0), ntohs(hdr->hdr_ifindex)); if (!agent_vif) { agent_vif = vif; } pkt->vp_if = agent_vif; vr_interface_input(ntohs(hdr->hdr_vrf), agent_vif, pkt, VLAN_ID_INVALID); } else { vif = __vrouter_get_interface(vrouter_get(0), ntohs(hdr->hdr_ifindex)); if (!vif) { stats->vis_ierrors++; vr_pfree(pkt, VP_DROP_INVALID_IF); return 0; } pkt->vp_type = VP_TYPE_AGENT; pkt_set_network_header(pkt, pkt->vp_data + sizeof(struct vr_eth)); pkt_set_inner_network_header(pkt, pkt->vp_data + sizeof(struct vr_eth)); return vif->vif_tx(vif, pkt); } return 0; }
int vr_mpls_del(vr_mpls_req *req) { int ret = 0; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label >= router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } ret = __vr_mpls_del(router, req->mr_label); generate_resp: vr_send_response(ret); return ret; }
static struct vr_nexthop * vrouter_get_label(unsigned int rid, unsigned int label) { struct vrouter *router = vrouter_get(rid); return __vrouter_get_label(router, label); }
int vr_mpls_get(vr_mpls_req *req) { int ret = 0; struct vr_nexthop *nh = NULL; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router) { ret = -ENODEV; goto generate_response; } if (((unsigned int)req->mr_label >= router->vr_max_labels)) { ret = -EINVAL; goto generate_response; } nh = vrouter_get_label(req->mr_rid, req->mr_label); if (!nh) { ret = -ENOENT; goto generate_response; } vr_mpls_make_req(req, nh, req->mr_label); generate_response: if (ret) req = NULL; vr_message_response(VR_MPLS_OBJECT_ID, req, ret); return 0; }
void vr_dpdk_packet_wakeup(struct vr_interface *vif) { struct vr_interface_stats *stats; struct vrouter *router; if (unlikely(vif == NULL)) { /* get global agent vif */ router = vrouter_get(0); vif = router->vr_agent_if; } if (likely(vr_dpdk.packet_event_sock != NULL)) { if (likely(vif != NULL)) { stats = vif_get_stats(vif, rte_lcore_id()); stats->vis_port_osyscalls++; } else { /* no agent interface - no counter */ } if (vr_usocket_eventfd_write(vr_dpdk.packet_event_sock) < 0) { vr_usocket_close(vr_dpdk.packet_event_sock); vr_dpdk.packet_event_sock = NULL; } } }
static void vr_vrf_stats_op(vr_vrf_stats_req *req) { int ret = 0; struct vrouter *router; router = vrouter_get(req->vsr_rid); if (!router) { ret = -EINVAL; goto generate_error; } switch (req->vsr_family) { case AF_INET: vr_inet_vrf_stats_op(router, req); break; default: ret = -EINVAL; goto generate_error; } return; generate_error: vr_send_response(ret); return; }
int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label > router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } router->vr_ilm[req->mr_label] = nh; generate_resp: vr_send_response(ret); return ret; }
static void vr_drop_stats_get(unsigned int core) { int ret = 0; unsigned int cpu; struct vrouter *router = vrouter_get(0); vr_drop_stats_req *response = NULL; if (!router && (ret = -ENOENT)) goto exit_get; response = vr_zalloc(sizeof(*response), VR_DROP_STATS_REQ_OBJECT); if (!response && (ret = -ENOMEM)) goto exit_get; if (core == (unsigned)-1) { /* summed up stats */ for (cpu = 0; cpu < vr_num_cpus; cpu++) { vr_drop_stats_add_response(response, router->vr_pdrop_stats[cpu]); } } else if (core < vr_num_cpus) { /* stats for a specific core */ vr_drop_stats_add_response(response, router->vr_pdrop_stats[core]); } /* otherwise the counters will be zeros */ exit_get: vr_message_response(VR_DROP_STATS_OBJECT_ID, ret ? NULL : response, ret); if (response != NULL) vr_free(response, VR_DROP_STATS_REQ_OBJECT); return; }
int vr_route_get(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; struct vr_rtable *rtable; int ret = 0; vr_req.rtr_req = *req; router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_response; } else { rtable = vr_get_inet_table(router, req->rtr_rt_type); if (!rtable) { ret = -ENOENT; goto generate_response; } ret = rtable->algo_get(vr_req.rtr_req.rtr_vrf_id, &vr_req); } generate_response: vr_message_response(VR_ROUTE_OBJECT_ID, ret ? NULL : &vr_req, ret); return ret; }
int vr_mpls_get(vr_mpls_req *req) { int ret = 0; struct vr_nexthop *nh = NULL; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router || req->mr_label > (int)router->vr_max_labels) { ret = -ENODEV; } else { nh = vrouter_get_label(req->mr_rid, req->mr_label); if (!nh) ret = -ENOENT; } if (!ret) vr_mpls_make_req(req, nh, req->mr_label); else req = NULL; vr_message_response(VR_MPLS_OBJECT_ID, req, ret); return 0; }
int bridge_table_init(struct vr_rtable *rtable, struct rtable_fspec *fs) { /* If table already exists, dont create again */ if (rtable->algo_data) return 0; if (!vr_bridge_oentries) vr_bridge_oentries = ((vr_bridge_entries / 5) + 1023) & ~1023; rtable->algo_data = vr_htable_attach(vrouter_get(0), vr_bridge_entries, vr_bridge_table, vr_bridge_oentries, vr_bridge_otable, sizeof(struct vr_bridge_entry), sizeof(struct vr_bridge_entry_key), 0, bridge_entry_key); if (!rtable->algo_data) return vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, vr_bridge_entries); /* Max VRF's does not matter as Bridge table is not per VRF. But * still this can be maintained in table */ rtable->algo_max_vrfs = fs->rtb_max_vrfs; rtable->algo_add = bridge_table_add; rtable->algo_del = bridge_table_delete; rtable->algo_lookup = bridge_table_lookup; rtable->algo_get = bridge_table_get; rtable->algo_dump = bridge_table_dump; vr_bridge_lookup = bridge_table_lookup; vn_rtable = rtable->algo_data; return 0; }
int vr_mpls_del(vr_mpls_req *req) { struct vrouter *router; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if (req->mr_label > (int)router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } if (router->vr_ilm[req->mr_label]) vrouter_put_nexthop(router->vr_ilm[req->mr_label]); router->vr_ilm[req->mr_label] = NULL; generate_resp: vr_send_response(ret); return ret; }
static void vr_fc_map_delete(vr_fc_map_req *req) { int ret = 0; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id) { ret = -EINVAL; goto generate_response; } if (req->fmr_id[0] >= vr_fc_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_fc_map_get_fc(router, req->fmr_id[0]); if (!fc_p) { ret = -EINVAL; goto generate_response; } memset(fc_p, 0, sizeof(*fc_p)); (void)vr_offload_fc_map_del(req); vr_send_response(0); return; generate_response: vr_send_response(ret); return; }
static void vr_qos_map_delete(vr_qos_map_req *req) { int ret = 0; struct vr_forwarding_class *fc_p; struct vrouter *router = vrouter_get(req->qmr_rid); if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = 0; goto generate_response; } vr_qos_map_set_fc(router, req->qmr_id, NULL); if (vr_qos_map_free_fc_defer(router, fc_p)) { vr_delay_op(); vr_free(fc_p, VR_QOS_MAP_OBJECT); } (void)vr_offload_qos_map_del(req); generate_response: vr_send_response(ret); return; }
int inet_route_add(struct rtable_fspec *fs, struct vr_route_req *req) { struct vr_rtable *rtable; struct vrouter *router; unsigned int pmask; router = vrouter_get(req->rtr_req.rtr_rid); if (!router) return -EINVAL; rtable = vr_get_inet_table(router, req->rtr_req.rtr_rt_type); if (!rtable || ((unsigned int)req->rtr_req.rtr_vrf_id > fs->rtb_max_vrfs) || ((unsigned int)(req->rtr_req.rtr_prefix_len) > VR_INET_MAX_PLEN)) return -EINVAL; if (req->rtr_req.rtr_prefix_len) { pmask = ~((1 << (32 - req->rtr_req.rtr_prefix_len)) - 1); req->rtr_req.rtr_prefix &= pmask; } else req->rtr_req.rtr_prefix = 0; return rtable->algo_add(rtable, req); }
/* * sandesh handler for vr_flow_req */ void vr_flow_req_process(void *s_req) { int ret = 0; struct vrouter *router; vr_flow_req *req = (vr_flow_req *)s_req; router = vrouter_get(req->fr_rid); switch (req->fr_op) { case FLOW_OP_FLOW_TABLE_GET: req->fr_ftable_size = vr_flow_table_size(router) + vr_oflow_table_size(router); #ifdef __KERNEL__ req->fr_ftable_dev = vr_flow_major; #endif break; case FLOW_OP_FLOW_SET: ret = vr_flow_set(router, req); break; default: ret = -EINVAL; } vr_message_response(VR_FLOW_OBJECT_ID, req, ret); return; }
int inet_route_add(struct rtable_fspec *fs, struct vr_route_req *req) { int i; struct vr_rtable *rtable; struct vrouter *router; unsigned int pmask, pmask_byte; router = vrouter_get(req->rtr_req.rtr_rid); if (!router) return -EINVAL; /* V4 and V6 only */ if (req->rtr_req.rtr_family != AF_INET && req->rtr_req.rtr_family != AF_INET6) return -EINVAL; /* There has to be some prefix to add */ if (!req->rtr_req.rtr_prefix_size) return -EINVAL; rtable = router->vr_inet_rtable; if (!rtable || ((unsigned int)req->rtr_req.rtr_vrf_id >= fs->rtb_max_vrfs) || ((unsigned int)(req->rtr_req.rtr_prefix_len) > (RT_IP_ADDR_SIZE(req->rtr_req.rtr_family)*8))) return -EINVAL; if (req->rtr_req.rtr_prefix) { if (req->rtr_req.rtr_family == AF_INET) pmask = ~((1 << (32 - req->rtr_req.rtr_prefix_len)) - 1); else pmask = 0; /* TBD: Assume V6 prefix length will be multiple of 8 */ pmask_byte = req->rtr_req.rtr_prefix_len/8; if (pmask_byte < (RT_IP_ADDR_SIZE(req->rtr_req.rtr_family)-1)) { for (i=pmask_byte+1; i<RT_IP_ADDR_SIZE(req->rtr_req.rtr_family); i++) { req->rtr_req.rtr_prefix[i] = 0; pmask = pmask >> 8; } req->rtr_req.rtr_prefix[pmask_byte] = req->rtr_req.rtr_prefix[pmask_byte] & (pmask & 0xff); } } if (rtable) { if (rtable->algo_add) return rtable->algo_add(rtable, req); else return -1; } else { return -1; } }
static struct vr_nexthop * vrouter_get_label(unsigned int rid, unsigned int label) { struct vrouter *router = vrouter_get(rid); if (!router || label > router->vr_max_labels) return NULL; return router->vr_ilm[label]; }
struct vr_mirror_entry * vrouter_get_mirror(unsigned int rid, unsigned int index) { struct vrouter *router = vrouter_get(rid); if (!router || index >= router->vr_max_mirror_indices) return NULL; return router->vr_mirrors[index]; }
int vr_route_dump(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; struct vr_rtable *rtable = NULL; int ret; uint32_t rt_prefix[4], rt_marker[4]; vr_req.rtr_req = *req; vr_req.rtr_req.rtr_prefix_size = req->rtr_prefix_size; if (req->rtr_prefix_size) { vr_req.rtr_req.rtr_prefix = (uint8_t*)&rt_prefix; memcpy(vr_req.rtr_req.rtr_prefix, req->rtr_prefix, RT_IP_ADDR_SIZE(req->rtr_family)); } else { vr_req.rtr_req.rtr_prefix = NULL; } vr_req.rtr_req.rtr_marker_size = req->rtr_marker_size; vr_req.rtr_req.rtr_marker_plen = req->rtr_prefix_len; if (req->rtr_marker_size) { vr_req.rtr_req.rtr_marker = (uint8_t*)&rt_marker; memcpy(vr_req.rtr_req.rtr_marker, req->rtr_marker, RT_IP_ADDR_SIZE(req->rtr_family)); } else { vr_req.rtr_req.rtr_marker = NULL; } router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_error; } else { if (req->rtr_family == AF_BRIDGE) { rtable = router->vr_bridge_rtable; } else { rtable = router->vr_inet_rtable; } if (!rtable) { ret = -ENOENT; goto generate_error; } ret = rtable->algo_dump(NULL, &vr_req); } return ret; generate_error: vr_send_response(ret); return ret; }
void vr_free_stats(unsigned int object) { struct vrouter *router = vrouter_get(0); unsigned int cpu; cpu = vr_get_cpu(); if (router->vr_malloc_stats && router->vr_malloc_stats[cpu]) router->vr_malloc_stats[cpu][object].ms_free++; return; }
int vr_mirror_add(vr_mirror_req *req) { int ret = 0; struct vrouter *router; struct vr_nexthop *nh, *old_nh = NULL; struct vr_mirror_entry *mirror; router = vrouter_get(req->mirr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mirr_index >= router->vr_max_mirror_indices) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mirr_rid, req->mirr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } mirror = router->vr_mirrors[req->mirr_index]; if (!mirror) { mirror = vr_zalloc(sizeof(*mirror), VR_MIRROR_OBJECT); if (!mirror) { ret = -ENOMEM; vrouter_put_nexthop(nh); goto generate_resp; } } else { old_nh = mirror->mir_nh; } mirror->mir_nh = nh; mirror->mir_rid = req->mirr_rid; mirror->mir_flags = req->mirr_flags; mirror->mir_vni = req->mirr_vni; mirror->mir_vlan_id = req->mirr_vlan; router->vr_mirrors[req->mirr_index] = mirror; if (old_nh) vrouter_put_nexthop(old_nh); generate_resp: vr_send_response(ret); return ret; }
static struct vr_flow_entry * vr_add_flow(unsigned int rid, struct vr_flow_key *key, unsigned int *fe_index) { struct vr_flow_entry *flow_e; struct vrouter *router = vrouter_get(rid); flow_e = vr_find_flow(router, key, fe_index); if (!flow_e) flow_e = vr_find_free_entry(router, key, fe_index); return flow_e; }
int vr_mirror_del(vr_mirror_req *req) { int ret = -EINVAL; struct vrouter *router; router = vrouter_get(req->mirr_rid); if (router) ret = __vr_mirror_del(router, req->mirr_index); vr_send_response(ret); return ret; }
static void vr_fc_map_dump(vr_fc_map_req *req) { int ret = 0; unsigned int i; vr_fc_map_req *resp; struct vr_forwarding_class *fc_p; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_message_dumper *dumper = NULL; if (req->fmr_marker + 1 >= vr_fc_map_entries) goto generate_response; dumper = vr_message_dump_init(req); if (!dumper) { ret = -ENOMEM; goto generate_response; } for (i = (req->fmr_marker + 1); i < vr_fc_map_entries; i++) { fc_p = vr_fc_map_get_fc(router, i); if (!fc_p || !fc_p->vfc_valid) continue; resp = vr_fc_map_req_get(1); if (!resp) { ret = -ENOMEM; goto generate_response; } resp->fmr_id[0] = i; resp->fmr_dscp[0] = fc_p->vfc_dscp; resp->fmr_mpls_qos[0] = fc_p->vfc_mpls_qos; resp->fmr_dotonep[0] = fc_p->vfc_dotonep_qos; resp->fmr_queue_id[0] = fc_p->vfc_queue_id; (void)vr_offload_fc_map_get(resp); ret = vr_message_dump_object(dumper, VR_FC_MAP_OBJECT_ID, resp); vr_fc_map_req_destroy(resp); if (ret <= 0) break; } generate_response: vr_message_dump_exit(dumper, ret); return; }
void vr_malloc_stats(unsigned int size, unsigned int object) { struct vrouter *router = vrouter_get(0); unsigned int cpu; cpu = vr_get_cpu(); if (router->vr_malloc_stats) { if (router->vr_malloc_stats[cpu]) { router->vr_malloc_stats[cpu][object].ms_size += size; router->vr_malloc_stats[cpu][object].ms_alloc++; } } return; }
int bridge_entry_add(struct rtable_fspec *fs, struct vr_route_req *req) { struct vrouter *router; router = vrouter_get(req->rtr_req.rtr_rid); if (!router) return -EINVAL; if (!router->vr_bridge_rtable || ((unsigned int)req->rtr_req.rtr_vrf_id >= fs->rtb_max_vrfs) || ((unsigned int)(req->rtr_req.rtr_mac_size) != VR_ETHER_ALEN)) return -EINVAL; return router->vr_bridge_rtable->algo_add(router->vr_bridge_rtable, req); }
static void vr_fc_map_get(vr_fc_map_req *req) { int ret = 0; vr_fc_map_req *resp = NULL; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id) { ret = -EINVAL; goto generate_response; } if (req->fmr_id[0] >= vr_fc_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_fc_map_get_fc(router, req->fmr_id[0]); if (!fc_p || !fc_p->vfc_valid) { ret = -ENOENT; goto generate_response; } resp = vr_fc_map_req_get(1); if (!resp) { ret = -ENOMEM; goto generate_response; } resp->fmr_id[0] = req->fmr_id[0]; resp->fmr_dscp[0] = fc_p->vfc_dscp; resp->fmr_mpls_qos[0] = fc_p->vfc_mpls_qos; resp->fmr_dotonep[0] = fc_p->vfc_dotonep_qos; resp->fmr_queue_id[0] = fc_p->vfc_queue_id; /* Debug comparison to check if matching entry is programmed on NIC */ (void)vr_offload_fc_map_get(resp); generate_response: vr_message_response(VR_FC_MAP_OBJECT_ID, ret < 0 ? NULL : resp, ret, false); if (resp) vr_fc_map_req_destroy(resp); return; }
static void vr_fc_map_add(vr_fc_map_req *req) { int ret = 0; unsigned int i; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id || !req->fmr_id_size || !req->fmr_dscp || !req->fmr_dscp_size || !req->fmr_mpls_qos || !req->fmr_mpls_qos_size || !req->fmr_dotonep || !req->fmr_dotonep_size || !req->fmr_queue_id || !req->fmr_queue_id_size) { ret = -EINVAL; goto generate_response; } for (i = 0; i < req->fmr_id_size; i++) { fc_p = vr_fc_map_get_fc(router, req->fmr_id[i]); if (!fc_p) { ret = -EINVAL; goto generate_response; } fc_p->vfc_id = req->fmr_id[i]; fc_p->vfc_dscp = req->fmr_dscp[i]; fc_p->vfc_mpls_qos = req->fmr_mpls_qos[i]; fc_p->vfc_dotonep_qos = req->fmr_dotonep[i]; fc_p->vfc_queue_id = req->fmr_queue_id[i]; fc_p->vfc_valid = 1; } ret = vr_offload_fc_map_add(req); if (ret) { vr_printf("offload FC map not supported - not configuring\n"); for (i = 0; i < req->fmr_id_size; i++) { fc_p = vr_fc_map_get_fc(router, req->fmr_id[i]); if (fc_p) memset(fc_p, 0, sizeof(*fc_p)); } } generate_response: vr_send_response(ret); return; }
void vr_bridge_table_data_process(void *s_req) { int ret = 0; struct vrouter *router; vr_bridge_table_data *resp = NULL, *req = (vr_bridge_table_data *)s_req; router = vrouter_get(req->btable_rid); if (!router) { ret = -EINVAL; goto generate_response; } resp = vr_bridge_table_data_get(); if (!resp) { ret = -ENOMEM; goto generate_response; } resp->btable_op = req->btable_op; switch (req->btable_op) { case SANDESH_OP_GET: resp->btable_size = vr_bridge_table_size(router); #if defined(__linux__) && defined(__KERNEL__) resp->btable_dev = vr_bridge_table_major; #endif if (vr_bridge_table_path) { strncpy(resp->btable_file_path, vr_bridge_table_path, VR_UNIX_PATH_MAX - 1); } break; default: ret = -EINVAL; break; } generate_response: vr_message_response(VR_BRIDGE_TABLE_DATA_OBJECT_ID, resp, ret); if (resp) { vr_bridge_table_data_destroy(resp); resp = NULL; } return; }