static struct vr_nexthop * bridge_table_lookup(unsigned int vrf_id, struct vr_route_req *rt) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; if (rt->rtr_req.rtr_index != VR_BE_INVALID_INDEX) { be = vr_get_hentry_by_index(vn_rtable, rt->rtr_req.rtr_index); if (!be) return NULL; rt->rtr_nh = be->be_nh; if (rt->rtr_req.rtr_mac) VR_MAC_COPY(rt->rtr_req.rtr_mac, be->be_key.be_mac); return rt->rtr_nh; } rt->rtr_nh = NULL; rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (be) { if (be->be_flags & VR_BE_FLAG_LABEL_VALID) rt->rtr_req.rtr_label_flags |= VR_RT_LABEL_VALID_FLAG; rt->rtr_req.rtr_label = be->be_label; rt->rtr_nh = be->be_nh; rt->rtr_req.rtr_index = be->be_index; } return rt->rtr_nh; }
static struct vr_bridge_entry * bridge_add(unsigned int router_id, unsigned int vrf, uint8_t *mac, int nh_id) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; struct vr_nexthop *old_nh; VR_MAC_COPY(key.be_mac, mac); key.be_vrf_id = vrf; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(vrf, mac); if (!be) return NULL; VR_MAC_COPY(be->be_key.be_mac, mac); be->be_key.be_vrf_id = vrf; be->be_packets = 0; be->be_flags = VR_BE_VALID_FLAG; } /* Un ref the old nexthop */ if (be->be_nh_id != nh_id) { old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(router_id, nh_id); if (be->be_nh) { be->be_nh_id = be->be_nh->nh_id; } else { be->be_nh_id = -1; } if (be->be_flags & VR_BE_MAC_MOVED_FLAG) { be->be_flags &= ~VR_BE_MAC_MOVED_FLAG; } if (old_nh) vrouter_put_nexthop(old_nh); } return be; }
static struct vif_bridge_entry * vif_bridge_get(vr_htable_t htable, unsigned short vlan, unsigned char *mac, int *index) { struct vif_bridge_key key; key.vbk_vlan = vlan; VR_MAC_COPY(key.vbk_mac, mac); return (struct vif_bridge_entry *)vr_find_hentry(htable, &key, index); }
struct vr_bridge_entry * vr_find_free_bridge_entry(unsigned int vrf_id, char *mac) { struct vr_bridge_entry_key key; if (!vn_rtable || !mac) return NULL; key.be_vrf_id = vrf_id; VR_MAC_COPY(key.be_mac, mac); return vr_find_free_hentry(vn_rtable, &key, NULL); }
static int __bridge_table_add(struct vr_route_req *rt) { struct vr_bridge_entry *be; struct vr_nexthop *old_nh; struct vr_bridge_entry_key key; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(rt->rtr_req.rtr_vrf_id, (char *)rt->rtr_req.rtr_mac); if (!be) return -ENOMEM; VR_MAC_COPY(be->be_key.be_mac, rt->rtr_req.rtr_mac); be->be_key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be->be_flags |= VR_BE_FLAG_VALID; } if (be->be_nh != rt->rtr_nh) { /* Un ref the old nexthop */ old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (old_nh) vrouter_put_nexthop(old_nh); } if (rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) { be->be_label = rt->rtr_req.rtr_label; be->be_flags |= VR_BE_FLAG_LABEL_VALID; } return 0; }
unsigned short vr_bridge_route_flags(unsigned int vrf_id, unsigned char *mac) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; VR_MAC_COPY(key.be_mac, mac); key.be_vrf_id = vrf_id; be = vr_find_bridge_entry(&key); if (be && (be->be_flags & VR_BE_VALID_FLAG)) return be->be_flags; return 0; }
static void bridge_update_route_req(struct vr_bridge_entry *be, struct vr_route_req *rt) { rt->rtr_req.rtr_label_flags = be->be_flags; rt->rtr_req.rtr_label = be->be_label; rt->rtr_nh = be->be_nh; if (rt->rtr_req.rtr_index != VR_BE_INVALID_INDEX) { if (rt->rtr_req.rtr_mac) { VR_MAC_COPY(rt->rtr_req.rtr_mac, be->be_key.be_mac); } } else { rt->rtr_req.rtr_index = be->be_hentry.hentry_index; } return; }
static int bridge_entry_make_req(struct vr_route_req *resp, struct vr_bridge_entry *ent) { memset(resp, 0, sizeof(struct vr_route_req)); resp->rtr_req.rtr_mac_size = VR_ETHER_ALEN; resp->rtr_req.rtr_mac = vr_zalloc(VR_ETHER_ALEN); if (!resp->rtr_req.rtr_mac) return -ENOMEM; VR_MAC_COPY(resp->rtr_req.rtr_mac, ent->be_key.be_mac); resp->rtr_req.rtr_vrf_id = ent->be_key.be_vrf_id; if (ent->be_nh) resp->rtr_req.rtr_nh_id = ent->be_nh->nh_id; resp->rtr_req.rtr_family = AF_BRIDGE; resp->rtr_req.rtr_label = ent->be_label; if (ent->be_flags & VR_BE_FLAG_LABEL_VALID) resp->rtr_req.rtr_label_flags = VR_RT_LABEL_VALID_FLAG; return 0; }
static int bridge_table_delete(struct vr_rtable * _unused, struct vr_route_req *rt) { struct vr_bridge_entry_key key; struct vr_bridge_entry *be; if (!vn_rtable) return -EINVAL; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (!be) return -ENOENT; bridge_table_entry_free(vn_rtable, (vr_hentry_t )be, 0, NULL); return 0; }
struct vr_bridge_entry * vr_find_free_bridge_entry(unsigned int vrf_id, char *mac) { unsigned int index; struct vr_bridge_entry *be; struct vr_bridge_entry_key key; if (!vn_rtable || !mac) return NULL; key.be_vrf_id = vrf_id; VR_MAC_COPY(key.be_mac, mac); be = vr_find_free_hentry(vn_rtable, &key, &index); if (be) { be->be_index = index; } return be; }
static int bridge_entry_make_req(struct vr_route_req *resp, struct vr_bridge_entry *ent) { memset(resp, 0, sizeof(struct vr_route_req)); resp->rtr_req.rtr_mac_size = VR_ETHER_ALEN; resp->rtr_req.rtr_mac = vr_zalloc(VR_ETHER_ALEN, VR_ROUTE_REQ_MAC_OBJECT); if (!resp->rtr_req.rtr_mac) return -ENOMEM; VR_MAC_COPY(resp->rtr_req.rtr_mac, ent->be_key.be_mac); resp->rtr_req.rtr_vrf_id = ent->be_key.be_vrf_id; if (ent->be_nh) resp->rtr_req.rtr_nh_id = ent->be_nh->nh_id; resp->rtr_req.rtr_family = AF_BRIDGE; resp->rtr_req.rtr_label = ent->be_label; resp->rtr_req.rtr_label_flags = ent->be_flags; resp->rtr_req.rtr_index = ent->be_hentry.hentry_index; return 0; }
static struct vr_bridge_entry * __bridge_lookup(unsigned int vrf_id, struct vr_route_req *rt) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; rt->rtr_req.rtr_label_flags = 0; rt->rtr_nh = NULL; if (rt->rtr_req.rtr_index != VR_BE_INVALID_INDEX) { be = (struct vr_bridge_entry *) vr_htable_get_hentry_by_index(vn_rtable, rt->rtr_req.rtr_index); } else { VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); } return be; }
static struct vr_nexthop * bridge_table_lookup(unsigned int vrf_id, struct vr_route_req *rt, struct vr_packet *pkt) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (be) { if (be->be_flags & VR_BE_FLAG_LABEL_VALID) rt->rtr_req.rtr_label_flags = VR_RT_LABEL_VALID_FLAG; rt->rtr_req.rtr_label = be->be_label; rt->rtr_nh = be->be_nh; return be->be_nh; } return NULL; }
mac_response_t vr_get_proxy_mac(struct vr_packet *pkt, struct vr_forwarding_md *fmd, struct vr_route_req *rt, unsigned char *dmac) { bool from_fabric, stitched, flood; bool to_gateway, no_proxy, to_vcp; unsigned char *resp_mac; struct vr_nexthop *nh = NULL; struct vr_interface *vif = pkt->vp_if; struct vr_vrf_stats *stats; from_fabric = stitched = flood = to_gateway = to_vcp = no_proxy = false; stats = vr_inet_vrf_stats(fmd->fmd_dvrf, pkt->vp_cpu); /* here we will not check for stats, but will check before use */ if (vif->vif_type == VIF_TYPE_PHYSICAL) from_fabric = true; if (vif->vif_flags & VIF_FLAG_NO_ARP_PROXY) no_proxy = true; if (rt->rtr_req.rtr_label_flags & VR_RT_ARP_FLOOD_FLAG) flood = true; if (vr_gateway_nexthop(rt->rtr_nh)) to_gateway = true; /* * the no_proxy flag is set for the vcp ports. From such ports * vrouter should proxy only for the gateway ip. */ if (no_proxy && !to_gateway) return MR_DROP; if (from_fabric) { if (vr_nexthop_is_vcp(rt->rtr_nh)) { to_vcp = true; } } resp_mac = vif->vif_mac; if (rt->rtr_req.rtr_index != VR_BE_INVALID_INDEX) { if ((nh = vr_bridge_lookup(fmd->fmd_dvrf, rt))) { resp_mac = rt->rtr_req.rtr_mac; stitched = true; } } /* If ECMP source, we force routing */ if (fmd->fmd_ecmp_src_nh_index != -1) { resp_mac = vif->vif_mac; fmd->fmd_ecmp_src_nh_index = -1; } /* * situations that are handled here (from_fabric) * * . arp request from vm, but not proxied at the source because of lack * of information at the source. only the compute that hosts the * destination should respond, and that too only if the mac information * is present (and hence the ENCAP check). * * . arp request from a baremetal arriving at a TSN, which if posesses the * mac information for the destination vm, should proxy. If it does not * hold the mac information, the request should be flooded * * . arp request from the uplink port of a vcp */ if (from_fabric) { if (flood && !stitched) { if (stats) stats->vrf_arp_physical_flood++; return MR_FLOOD; } /* * arp requests to gateway coming from the fabric should be dropped * unless the request was for the TSN DNS service (which appears as * the gateway, with the current set of checks). We should not respond * for gateway ip if we are TSN and the request came from baremetal. * TSN does not have gateway route and hence the to_gateway will be * true only for the DNS ip. */ if (to_gateway) { if (fmd->fmd_src != TOR_SOURCE) { return MR_DROP; } } /* * we should proxy if the vm is hosted by us, in which case nh will be * of ENCAP type. we should also proxy for a host in vcp port. In all * other cases, we should proxy only if * * i am a TSN(fmd->fmd_src), * i amd the dns IP or * i have the mac information (nh - (mostly tunnel)) and * the originator is a bare metal (fmd->fmd_src) */ if (to_vcp || to_gateway || (nh && ((nh->nh_type == NH_ENCAP) || (fmd->fmd_src == TOR_SOURCE)))) { if (stats) stats->vrf_arp_physical_stitch++; } else { if (stats) stats->vrf_arp_physical_flood++; return MR_FLOOD; } } else { if (!stitched && flood) { /* * if there is no stitching information, but flood flag is set * we should flood */ if (stats) stats->vrf_arp_virtual_flood++; return MR_FLOOD; } if (stats) { if (stitched) { stats->vrf_arp_virtual_stitch++; } else { stats->vrf_arp_virtual_proxy++; } } } VR_MAC_COPY(dmac, resp_mac); return MR_PROXY; }