static int bridge_table_add(struct vr_rtable * _unused, struct vr_route_req *rt) { int ret; if (!vn_rtable) return -EINVAL; if (IS_MAC_ZERO(rt->rtr_req.rtr_mac)) return -EINVAL; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } ret = __bridge_table_add(rt); vrouter_put_nexthop(rt->rtr_nh); return ret; }
static int bridge_table_dump(struct vr_rtable * __unsued, struct vr_route_req *rt) { int ret = 0; struct vr_message_dumper *dumper; char *mac; dumper = vr_message_dump_init(&rt->rtr_req); if (!dumper) { ret = -ENOMEM; goto generate_response; } if (rt->rtr_req.rtr_mac_size != VR_ETHER_ALEN) return -EINVAL; mac = (char *)(((vr_route_req *)(dumper->dump_req))->rtr_mac); if (!mac) { ret = -EINVAL; goto generate_response; } if (IS_MAC_ZERO(mac)) dumper->dump_been_to_marker = 1; ret = __bridge_table_dump(dumper); generate_response: vr_message_dump_exit(dumper, ret); return 0; }
/* * adds a route to the corresponding vrf table. returns 0 on * success and non-zero otherwise */ static int mtrie_add(struct vr_rtable * _unused, struct vr_route_req *rt) { unsigned int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *mtrie = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); int ret; struct vr_route_req tmp_req; mtrie = (mtrie ? mtrie : mtrie_alloc_vrf(vrf_id, rt->rtr_req.rtr_family)); if (!mtrie) return -ENOMEM; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { tmp_req.rtr_req.rtr_index = rt->rtr_req.rtr_index; tmp_req.rtr_req.rtr_mac_size = VR_ETHER_ALEN; tmp_req.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; tmp_req.rtr_req.rtr_vrf_id = rt->rtr_req.rtr_vrf_id; if (!vr_bridge_lookup(tmp_req.rtr_req.rtr_vrf_id, &tmp_req)) return -ENOENT; rt->rtr_req.rtr_index = tmp_req.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } ret = __mtrie_add(mtrie, rt, 1); vrouter_put_nexthop(rt->rtr_nh); return ret; }
/* * Delete a route from the table. * prefix is in network byte order. * returns 0 on failure; or non-zero if an entry was found. * * When deleting a route: * - Move all descendent bucket (not covered by more-specifics) with the * parent of this node. * - If any buckets contain the same next-hop result, the bucket can be * deleted. Memory should be freed after a delay in order to deal with * concurrency. */ static int mtrie_delete(struct vr_rtable * _unused, struct vr_route_req *rt) { int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *rtable; struct vr_route_req lreq; rtable = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); if (!rtable) return -ENOENT; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { lreq.rtr_req.rtr_index = rt->rtr_req.rtr_index; lreq.rtr_req.rtr_mac_size = VR_ETHER_ALEN; lreq.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; lreq.rtr_req.rtr_vrf_id = vrf_id; if (!vr_bridge_lookup(vrf_id, &lreq)) return -ENOENT; rt->rtr_req.rtr_index = lreq.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } __mtrie_delete(rt, &rtable->root, 0, 0, 1); vrouter_put_nexthop(rt->rtr_nh); return 0; }