static void vr_fc_map_delete(vr_fc_map_req *req) { int ret = 0; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id) { ret = -EINVAL; goto generate_response; } if (req->fmr_id[0] >= vr_fc_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_fc_map_get_fc(router, req->fmr_id[0]); if (!fc_p) { ret = -EINVAL; goto generate_response; } memset(fc_p, 0, sizeof(*fc_p)); (void)vr_offload_fc_map_del(req); vr_send_response(0); return; generate_response: vr_send_response(ret); return; }
int vr_message_response(unsigned int object_type, void *object, int ret) { char *buf = NULL; unsigned int len = 0; struct vr_mproto *proto; struct vr_mtransport *trans; proto = message_h.vm_proto; trans = message_h.vm_trans; if (!proto || !trans) return 0; len = proto->mproto_buf_len(object_type, object); len += proto->mproto_buf_len(VR_RESPONSE_OBJECT_ID, NULL); buf = trans->mtrans_alloc(len); if (!buf) return -ENOMEM; ret = proto->mproto_encode_response(buf, len, object_type, object, ret); if (ret < 0) goto response_fail; return vr_message_queue_response(buf, ret); response_fail: if (buf) trans->mtrans_free(buf); vr_send_response(ret); return ret; }
void vr_message_dump_exit(void *context, int ret) { struct vr_mproto *proto; struct vr_mtransport *trans; struct vr_message_dumper *dumper = (struct vr_message_dumper *)context; proto = message_h.vm_proto; trans = message_h.vm_trans; if (!proto || !trans) return; if (dumper) ret = dumper->dump_num_dumped; vr_send_response(ret); if (dumper) { if (!dumper->dump_offset) { if (dumper->dump_buffer) trans->mtrans_free(dumper->dump_buffer); } else vr_message_queue_response(dumper->dump_buffer, dumper->dump_offset); vr_free(dumper); } return; }
static void vr_qos_map_delete(vr_qos_map_req *req) { int ret = 0; struct vr_forwarding_class *fc_p; struct vrouter *router = vrouter_get(req->qmr_rid); if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = 0; goto generate_response; } vr_qos_map_set_fc(router, req->qmr_id, NULL); if (vr_qos_map_free_fc_defer(router, fc_p)) { vr_delay_op(); vr_free(fc_p, VR_QOS_MAP_OBJECT); } (void)vr_offload_qos_map_del(req); generate_response: vr_send_response(ret); return; }
int vr_mpls_del(vr_mpls_req *req) { struct vrouter *router; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if (req->mr_label > (int)router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } if (router->vr_ilm[req->mr_label]) vrouter_put_nexthop(router->vr_ilm[req->mr_label]); router->vr_ilm[req->mr_label] = NULL; generate_resp: vr_send_response(ret); return ret; }
int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label > router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } router->vr_ilm[req->mr_label] = nh; generate_resp: vr_send_response(ret); return ret; }
static void vr_vrf_stats_op(vr_vrf_stats_req *req) { int ret = 0; struct vrouter *router; router = vrouter_get(req->vsr_rid); if (!router) { ret = -EINVAL; goto generate_error; } switch (req->vsr_family) { case AF_INET: vr_inet_vrf_stats_op(router, req); break; default: ret = -EINVAL; goto generate_error; } return; generate_error: vr_send_response(ret); return; }
int vr_mpls_del(vr_mpls_req *req) { int ret = 0; struct vrouter *router; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label >= router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } ret = __vr_mpls_del(router, req->mr_label); generate_resp: vr_send_response(ret); return ret; }
int vr_route_add(vr_route_req *req) { struct rtable_fspec *fs; struct vr_route_req vr_req; int ret; uint32_t rt_prefix[4]; fs = vr_get_family(req->rtr_family); if (!fs) { ret = -ENOENT; } else { vr_req.rtr_req = *req; if (req->rtr_prefix_size) { vr_req.rtr_req.rtr_prefix = (uint8_t*)&rt_prefix; memcpy(vr_req.rtr_req.rtr_prefix, req->rtr_prefix, RT_IP_ADDR_SIZE(req->rtr_family)); vr_req.rtr_req.rtr_marker_size = 0; vr_req.rtr_req.rtr_prefix_size = req->rtr_prefix_size; } else { vr_req.rtr_req.rtr_prefix = NULL; } ret = fs->route_add(fs, &vr_req); } vr_send_response(ret); return ret; }
void vr_mem_stats_req_process(void *s_req) { int ret; vr_mem_stats_req *req = (vr_mem_stats_req *)s_req; if ((req->h_op != SANDESH_OP_GET) && (ret = -EOPNOTSUPP)) vr_send_response(ret); vr_mem_stats_get(); return; }
int vr_route_dump(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; struct vr_rtable *rtable = NULL; int ret; uint32_t rt_prefix[4], rt_marker[4]; vr_req.rtr_req = *req; vr_req.rtr_req.rtr_prefix_size = req->rtr_prefix_size; if (req->rtr_prefix_size) { vr_req.rtr_req.rtr_prefix = (uint8_t*)&rt_prefix; memcpy(vr_req.rtr_req.rtr_prefix, req->rtr_prefix, RT_IP_ADDR_SIZE(req->rtr_family)); } else { vr_req.rtr_req.rtr_prefix = NULL; } vr_req.rtr_req.rtr_marker_size = req->rtr_marker_size; vr_req.rtr_req.rtr_marker_plen = req->rtr_prefix_len; if (req->rtr_marker_size) { vr_req.rtr_req.rtr_marker = (uint8_t*)&rt_marker; memcpy(vr_req.rtr_req.rtr_marker, req->rtr_marker, RT_IP_ADDR_SIZE(req->rtr_family)); } else { vr_req.rtr_req.rtr_marker = NULL; } router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_error; } else { if (req->rtr_family == AF_BRIDGE) { rtable = router->vr_bridge_rtable; } else { rtable = router->vr_inet_rtable; } if (!rtable) { ret = -ENOENT; goto generate_error; } ret = rtable->algo_dump(NULL, &vr_req); } return ret; generate_error: vr_send_response(ret); return ret; }
int vr_message_multi_response(struct vr_message_multi *objects) { char *buf = NULL; int ret = 0; unsigned int i, buf_len = 0, len = 0; struct vr_mproto *proto = NULL; struct vr_mtransport *trans = NULL; if ((!objects) || (objects->vr_mm_object_count >= VR_MESSAGE_MULTI_MAX_OBJECTS)) goto response_fail; proto = message_h.vm_proto; trans = message_h.vm_trans; if (!proto || !trans) goto response_fail; for (i = 0; i < objects->vr_mm_object_count; i++) { buf_len += proto->mproto_buf_len(objects->vr_mm_object_type[i], objects->vr_mm_object[i]); } if (!buf_len) goto response_fail; buf = trans->mtrans_alloc(buf_len); if (!buf) { ret = -ENOMEM; goto response_fail; } for (i = 0; i < objects->vr_mm_object_count; i++) { ret = proto->mproto_encode(buf + len, buf_len - len, objects->vr_mm_object_type[i], objects->vr_mm_object[i], VR_MESSAGE_TYPE_RESPONSE); if (ret < 0) goto response_fail; len += ret; } return vr_message_queue_response(buf, len, false); response_fail: if (trans && buf) trans->mtrans_free(buf); vr_send_response(ret); return ret; }
int vr_mirror_add(vr_mirror_req *req) { int ret = 0; struct vrouter *router; struct vr_nexthop *nh, *old_nh = NULL; struct vr_mirror_entry *mirror; router = vrouter_get(req->mirr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mirr_index >= router->vr_max_mirror_indices) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mirr_rid, req->mirr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } mirror = router->vr_mirrors[req->mirr_index]; if (!mirror) { mirror = vr_zalloc(sizeof(*mirror), VR_MIRROR_OBJECT); if (!mirror) { ret = -ENOMEM; vrouter_put_nexthop(nh); goto generate_resp; } } else { old_nh = mirror->mir_nh; } mirror->mir_nh = nh; mirror->mir_rid = req->mirr_rid; mirror->mir_flags = req->mirr_flags; mirror->mir_vni = req->mirr_vni; mirror->mir_vlan_id = req->mirr_vlan; router->vr_mirrors[req->mirr_index] = mirror; if (old_nh) vrouter_put_nexthop(old_nh); generate_resp: vr_send_response(ret); return ret; }
void vr_drop_stats_req_process(void *s_req) { int ret; vr_drop_stats_req *req = (vr_drop_stats_req *)s_req; if ((req->h_op != SANDESH_OP_GET) && (ret = -EOPNOTSUPP)) vr_send_response(ret); /* zero vds_core means to sum up all the per-core stats */ vr_drop_stats_get((unsigned)(req->vds_core - 1)); return; }
int vr_mirror_del(vr_mirror_req *req) { int ret = -EINVAL; struct vrouter *router; router = vrouter_get(req->mirr_rid); if (router) ret = __vr_mirror_del(router, req->mirr_index); vr_send_response(ret); return ret; }
static void vr_fc_map_add(vr_fc_map_req *req) { int ret = 0; unsigned int i; struct vrouter *router = vrouter_get(req->fmr_rid); struct vr_forwarding_class *fc_p; if (!req->fmr_id || !req->fmr_id_size || !req->fmr_dscp || !req->fmr_dscp_size || !req->fmr_mpls_qos || !req->fmr_mpls_qos_size || !req->fmr_dotonep || !req->fmr_dotonep_size || !req->fmr_queue_id || !req->fmr_queue_id_size) { ret = -EINVAL; goto generate_response; } for (i = 0; i < req->fmr_id_size; i++) { fc_p = vr_fc_map_get_fc(router, req->fmr_id[i]); if (!fc_p) { ret = -EINVAL; goto generate_response; } fc_p->vfc_id = req->fmr_id[i]; fc_p->vfc_dscp = req->fmr_dscp[i]; fc_p->vfc_mpls_qos = req->fmr_mpls_qos[i]; fc_p->vfc_dotonep_qos = req->fmr_dotonep[i]; fc_p->vfc_queue_id = req->fmr_queue_id[i]; fc_p->vfc_valid = 1; } ret = vr_offload_fc_map_add(req); if (ret) { vr_printf("offload FC map not supported - not configuring\n"); for (i = 0; i < req->fmr_id_size; i++) { fc_p = vr_fc_map_get_fc(router, req->fmr_id[i]); if (fc_p) memset(fc_p, 0, sizeof(*fc_p)); } } generate_response: vr_send_response(ret); return; }
int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label >= router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } ret = __vr_mpls_del(router, req->mr_label); if (ret) goto generate_resp; nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } ret = __vrouter_set_label(router, req->mr_label, nh); if (ret) { vrouter_put_nexthop(nh); goto generate_resp; } /* hardware packet filtering (Flow Director) support */ if (vrouter_host->hos_add_mpls && nh->nh_type == NH_ENCAP && !(nh->nh_flags & NH_FLAG_MCAST)) vrouter_host->hos_add_mpls(router, req->mr_label); generate_resp: vr_send_response(ret); return ret; }
static void vr_inet_vrf_stats_dump(struct vrouter *router, vr_vrf_stats_req *req) { int ret = 0; struct vr_rtable *rtable; rtable = router->vr_inet_rtable; if (!rtable) { ret = -ENOENT; goto generate_error; } ret = rtable->algo_stats_dump(rtable, req); return; generate_error: vr_send_response(ret); return; }
int vr_route_add(vr_route_req *req) { struct rtable_fspec *fs; struct vr_route_req vr_req; int ret; fs = vr_get_family(req->rtr_family); if (!fs) { ret = -ENOENT; } else { vr_req.rtr_req = *req; ret = fs->route_add(fs, &vr_req); } vr_send_response(ret); return ret; }
int dpdk_netlink_receive(void *usockp, char *nl_buf, unsigned int nl_len) { int ret; struct vr_message request; memset(&request, 0, sizeof(request)); request.vr_message_buf = nl_buf + HDR_LEN; request.vr_message_len = nl_len - HDR_LEN; ret = vr_message_request(&request); if (ret < 0) vr_send_response(ret); dpdk_nl_process_response(usockp, (struct nlmsghdr *)nl_buf); return 0; }
static void vr_qos_map_get(vr_qos_map_req *req) { int ret = 0; vr_qos_map_req *resp; struct vrouter *router = vrouter_get(req->qmr_rid); struct vr_forwarding_class *fc_p = NULL; if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto get_error; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = -ENOENT; goto get_error; } resp = vr_qos_map_req_get(); if (!resp) { ret = -ENOMEM; goto get_error; } vr_qos_map_make_req(req->qmr_id, resp, fc_p); /* Debug comparison to check if matching entry is programmed on NIC */ (void)vr_offload_qos_map_get(resp); vr_message_response(VR_QOS_MAP_OBJECT_ID, resp, ret, false); if (resp) { vr_qos_map_req_destroy(resp); } return; get_error: vr_send_response(ret); return; }
int vr_route_delete(vr_route_req *req) { struct rtable_fspec *fs; struct vr_route_req vr_req; int ret; uint32_t rt_prefix[4]; fs = vr_get_family(req->rtr_family); if (!fs) ret = -ENOENT; else { vr_req.rtr_req = *req; if (req->rtr_family != AF_BRIDGE && !req->rtr_prefix_size) { ret = -EINVAL; goto error; } if (req ->rtr_family == AF_BRIDGE && (!req->rtr_mac_size || !req->rtr_mac)) { ret = -EINVAL; goto error; } if (req ->rtr_family != AF_BRIDGE) { vr_req.rtr_req.rtr_prefix = (uint8_t*)&rt_prefix; memcpy(vr_req.rtr_req.rtr_prefix, req->rtr_prefix, RT_IP_ADDR_SIZE(req->rtr_family)); } vr_req.rtr_req.rtr_marker_size = 0; ret = fs->route_del(fs, &vr_req); } error: vr_send_response(ret); return ret; }
void vr_drop_stats_req_process(void *s_req) { int ret; vr_drop_stats_req *req = (vr_drop_stats_req *)s_req; unsigned int core; /** * Check if requested core number is sane. If not, let's assume the * request was made for summed up stats for all the cores. */ if (req->vds_core > 0 && req->vds_core <= vr_num_cpus) { core = req->vds_core; } else { core = 0; } if ((req->h_op != SANDESH_OP_GET) && (ret = -EOPNOTSUPP)) vr_send_response(ret); vr_drop_stats_get(core); return; }
int vr_route_dump(vr_route_req *req) { struct vr_route_req vr_req; struct vrouter *router; struct vr_rtable *rtable = NULL; int ret; vr_req.rtr_req = *req; router = vrouter_get(req->rtr_rid); if (!router) { ret = -ENOENT; goto generate_error; } else { if (req->rtr_family == AF_INET) { rtable = vr_get_inet_table(router, req->rtr_rt_type); } else if (req->rtr_family == AF_BRIDGE) { rtable = router->vr_bridge_rtable; } if (!rtable) { ret = -ENOENT; goto generate_error; } ret = rtable->algo_dump(NULL, &vr_req); } return ret; generate_error: vr_send_response(ret); return ret; }
NTSTATUS KsyncHandleWrite(PKSYNC_DEVICE_CONTEXT ctx, uint8_t *buffer, size_t buffer_size) { struct vr_message request; struct vr_message *response; uint32_t multi_flag; int ret; /* Received buffer contains tightly packed Netlink headers, thus we can just increment appropriate headers */ struct nlmsghdr *nlh = (struct nlmsghdr *)(buffer); struct genlmsghdr *genlh = (struct genlmsghdr *)(nlh + 1); struct nlattr *nla = (struct nlattr *)(genlh + 1); request.vr_message_buf = NLA_DATA(nla); request.vr_message_len = NLA_LEN(nla); ret = vr_message_request(&request); if (ret) { if (vr_send_response(ret)) { return STATUS_INVALID_PARAMETER; } } multi_flag = 0; while ((response = vr_message_dequeue_response())) { if (!multi_flag && !vr_response_queue_empty()) multi_flag = NLM_F_MULTI; char *data = response->vr_message_buf - NETLINK_HEADER_LEN; size_t data_len = NLMSG_ALIGN(response->vr_message_len + NETLINK_HEADER_LEN); struct nlmsghdr *nlh_resp = (struct nlmsghdr *)(data); nlh_resp->nlmsg_len = data_len; nlh_resp->nlmsg_type = nlh->nlmsg_type; nlh_resp->nlmsg_flags = multi_flag; nlh_resp->nlmsg_seq = nlh->nlmsg_seq; nlh_resp->nlmsg_pid = 0; /* 'genlmsghdr' should be put directly after 'nlmsghdr', thus we can just increment previous header pointer */ struct genlmsghdr *genlh_resp = (struct genlmsghdr *)(nlh_resp + 1); WinRawMemCpy(genlh_resp, genlh, sizeof(*genlh_resp)); /* 'nlattr' should be put directly after 'genlmsghdr', thus we can just increment previous header pointer */ struct nlattr *nla_resp = (struct nlattr *)(genlh_resp + 1); nla_resp->nla_len = response->vr_message_len; nla_resp->nla_type = NL_ATTR_VR_MESSAGE_PROTOCOL; PKSYNC_RESPONSE ks_resp = KsyncResponseCreate(); if (ks_resp != NULL) { ks_resp->message_len = data_len; WinRawMemCpy(ks_resp->buffer, data, ks_resp->message_len); KsyncAppendResponse(ctx, ks_resp); } else { return STATUS_INSUFFICIENT_RESOURCES; } vr_message_free(response); } if (multi_flag) { PKSYNC_RESPONSE ks_resp = KsyncResponseCreate(); if (ks_resp == NULL) { return STATUS_INSUFFICIENT_RESOURCES; } struct nlmsghdr *nlh_done = (struct nlmsghdr *)ks_resp->buffer; nlh_done->nlmsg_len = NLMSG_HDRLEN; nlh_done->nlmsg_type = NLMSG_DONE; nlh_done->nlmsg_flags = 0; nlh_done->nlmsg_seq = nlh->nlmsg_seq; nlh_done->nlmsg_pid = 0; ks_resp->message_len = NLMSG_HDRLEN; KsyncAppendResponse(ctx, ks_resp); } return STATUS_SUCCESS; }
static void vr_qos_message_error(int error) { vr_send_response(error); return; }
static void vr_qos_map_add(vr_qos_map_req *req) { int ret = 0; bool need_set = false; unsigned int size, i; struct vrouter *router = vrouter_get(req->qmr_rid); struct vr_forwarding_class *fc_p, *fc_e; ret = vr_qos_map_request_validate(req); if (ret) { goto generate_response; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { size = vr_qos_map_entry_size * sizeof(struct vr_forwarding_class); fc_p = vr_zalloc(size, VR_QOS_MAP_OBJECT); if (!fc_p) { ret = -ENOMEM; goto generate_response; } need_set = true; } for (i = 0; i < req->qmr_dscp_size; i++) { if (req->qmr_dscp[i] >= VR_DSCP_QOS_ENTRIES) continue; fc_e = &fc_p[req->qmr_dscp[i]]; fc_e->vfc_dscp = req->qmr_dscp[i]; fc_e->vfc_id = req->qmr_dscp_fc_id[i]; fc_e->vfc_valid = 1; } for (i = 0; i < req->qmr_mpls_qos_size; i++) { if (req->qmr_mpls_qos[i] >= VR_MPLS_QOS_ENTRIES) continue; fc_e = &fc_p[VR_DSCP_QOS_ENTRIES + req->qmr_mpls_qos[i]]; fc_e->vfc_mpls_qos = req->qmr_mpls_qos[i]; fc_e->vfc_id = req->qmr_mpls_qos_fc_id[i]; fc_e->vfc_valid = 1; } for (i = 0; i < req->qmr_dotonep_size; i++) { if (req->qmr_dotonep[i] >= VR_DOTONEP_QOS_ENTRIES) continue; fc_e = &fc_p[VR_DSCP_QOS_ENTRIES + VR_MPLS_QOS_ENTRIES + req->qmr_dotonep[i]]; fc_e->vfc_dotonep_qos = req->qmr_dotonep[i]; fc_e->vfc_id = req->qmr_dotonep_fc_id[i]; fc_e->vfc_valid = 1; } ret = vr_offload_qos_map_add(req); if (ret) { vr_printf("offload QoS map not supported - not configuring\n"); vr_free(fc_p, VR_QOS_MAP_OBJECT); goto generate_response; } if (need_set) { vr_qos_map_set_fc(router, req->qmr_id, fc_p); } generate_response: vr_send_response(ret); return; }