int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label > router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } router->vr_ilm[req->mr_label] = nh; generate_resp: vr_send_response(ret); return ret; }
static int __mcast_add(struct vr_route_req *rt) { struct vr_nexthop *old_nh; struct vr_mcast_entry *ent; struct vr_mcast_entry_key key; key.vrf_id = rt->rtr_req.rtr_vrf_id; key.src_ip = rt->rtr_req.rtr_src; key.dst_ip = rt->rtr_req.rtr_prefix; ent = vr_find_mcast_entry(&key); if (!ent) { ent = vr_find_free_mcast_entry(&key); if (!ent) return -ENOMEM; ent->key.vrf_id = key.vrf_id; ent->key.src_ip = key.src_ip; ent->key.dst_ip = key.dst_ip; ent->flags |= VR_MCAST_FLAG_VALID; } /* The nexthop can be changed though entry exits */ if (ent->nh != rt->rtr_nh) { old_nh = ent->nh; ent->nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (old_nh) vrouter_put_nexthop(old_nh); } return 0; }
static int bridge_table_add(struct vr_rtable * _unused, struct vr_route_req *rt) { int ret; if (!vn_rtable) return -EINVAL; if (IS_MAC_ZERO(rt->rtr_req.rtr_mac)) return -EINVAL; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } ret = __bridge_table_add(rt); vrouter_put_nexthop(rt->rtr_nh); return ret; }
int vr_mirror_add(vr_mirror_req *req) { int ret = 0; struct vrouter *router; struct vr_nexthop *nh, *old_nh = NULL; struct vr_mirror_entry *mirror; router = vrouter_get(req->mirr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mirr_index >= router->vr_max_mirror_indices) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mirr_rid, req->mirr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } mirror = router->vr_mirrors[req->mirr_index]; if (!mirror) { mirror = vr_zalloc(sizeof(*mirror), VR_MIRROR_OBJECT); if (!mirror) { ret = -ENOMEM; vrouter_put_nexthop(nh); goto generate_resp; } } else { old_nh = mirror->mir_nh; } mirror->mir_nh = nh; mirror->mir_rid = req->mirr_rid; mirror->mir_flags = req->mirr_flags; mirror->mir_vni = req->mirr_vni; mirror->mir_vlan_id = req->mirr_vlan; router->vr_mirrors[req->mirr_index] = mirror; if (old_nh) vrouter_put_nexthop(old_nh); generate_resp: vr_send_response(ret); return ret; }
static int mcast_add(struct vr_rtable * _unused, struct vr_route_req *rt) { int ret; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; ret = __mcast_add(rt); vrouter_put_nexthop(rt->rtr_nh); return ret; }
/* * the nh pointer is something which will be retained. So, call this function * with an nh * that you are willing to forget about in your function */ static void set_entry_to_nh(struct ip_bucket_entry *entry, struct vr_nexthop *nh) { struct vr_nexthop *tmp_nh; int orig_entry_nh = ENTRY_IS_NEXTHOP(entry); tmp_nh = vrouter_get_nexthop(nh->nh_rid, nh->nh_id); if (tmp_nh != nh) { /* * if the original nexthop was deleted, then there are * two cases * * 1. no new nexthop was created (& hence the null check) * 2. new nexthop has taken it's place (in which case, we need to * put the reference we took above */ if (tmp_nh) vrouter_put_nexthop(tmp_nh); nh = vrouter_get_nexthop(nh->nh_rid, NH_DISCARD_ID); } /* save the original */ tmp_nh = entry->entry_nh_p; /* update the entry */ entry->entry_nh_p = nh; /* set entry type */ entry->entry_type = ENTRY_TYPE_NEXTHOP; /* ...and then take steps to release original */ if (tmp_nh && orig_entry_nh) { vrouter_put_nexthop(tmp_nh); } return; }
/* * adds a route to the corresponding vrf table. returns 0 on * success and non-zero otherwise */ static int mtrie_add(struct vr_rtable * _unused, struct vr_route_req *rt) { unsigned int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *mtrie = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); int ret; struct vr_route_req tmp_req; mtrie = (mtrie ? mtrie : mtrie_alloc_vrf(vrf_id, rt->rtr_req.rtr_family)); if (!mtrie) return -ENOMEM; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { tmp_req.rtr_req.rtr_index = rt->rtr_req.rtr_index; tmp_req.rtr_req.rtr_mac_size = VR_ETHER_ALEN; tmp_req.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; tmp_req.rtr_req.rtr_vrf_id = rt->rtr_req.rtr_vrf_id; if (!vr_bridge_lookup(tmp_req.rtr_req.rtr_vrf_id, &tmp_req)) return -ENOENT; rt->rtr_req.rtr_index = tmp_req.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } ret = __mtrie_add(mtrie, rt, 1); vrouter_put_nexthop(rt->rtr_nh); return ret; }
int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label >= router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } ret = __vr_mpls_del(router, req->mr_label); if (ret) goto generate_resp; nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } ret = __vrouter_set_label(router, req->mr_label, nh); if (ret) { vrouter_put_nexthop(nh); goto generate_resp; } /* hardware packet filtering (Flow Director) support */ if (vrouter_host->hos_add_mpls && nh->nh_type == NH_ENCAP && !(nh->nh_flags & NH_FLAG_MCAST)) vrouter_host->hos_add_mpls(router, req->mr_label); generate_resp: vr_send_response(ret); return ret; }
static struct vr_bridge_entry * bridge_add(unsigned int router_id, unsigned int vrf, uint8_t *mac, int nh_id) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; struct vr_nexthop *old_nh; VR_MAC_COPY(key.be_mac, mac); key.be_vrf_id = vrf; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(vrf, mac); if (!be) return NULL; VR_MAC_COPY(be->be_key.be_mac, mac); be->be_key.be_vrf_id = vrf; be->be_packets = 0; be->be_flags = VR_BE_VALID_FLAG; } /* Un ref the old nexthop */ if (be->be_nh_id != nh_id) { old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(router_id, nh_id); if (be->be_nh) { be->be_nh_id = be->be_nh->nh_id; } else { be->be_nh_id = -1; } if (be->be_flags & VR_BE_MAC_MOVED_FLAG) { be->be_flags &= ~VR_BE_MAC_MOVED_FLAG; } if (old_nh) vrouter_put_nexthop(old_nh); } return be; }
static int __bridge_table_add(struct vr_route_req *rt) { struct vr_bridge_entry *be; struct vr_nexthop *old_nh; struct vr_bridge_entry_key key; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(rt->rtr_req.rtr_vrf_id, (char *)rt->rtr_req.rtr_mac); if (!be) return -ENOMEM; VR_MAC_COPY(be->be_key.be_mac, rt->rtr_req.rtr_mac); be->be_key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be->be_flags |= VR_BE_FLAG_VALID; } if (be->be_nh != rt->rtr_nh) { /* Un ref the old nexthop */ old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (old_nh) vrouter_put_nexthop(old_nh); } if (rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) { be->be_label = rt->rtr_req.rtr_label; be->be_flags |= VR_BE_FLAG_LABEL_VALID; } return 0; }
/* * Delete a route from the table. * prefix is in network byte order. * returns 0 on failure; or non-zero if an entry was found. * * When deleting a route: * - Move all descendent bucket (not covered by more-specifics) with the * parent of this node. * - If any buckets contain the same next-hop result, the bucket can be * deleted. Memory should be freed after a delay in order to deal with * concurrency. */ static int mtrie_delete(struct vr_rtable * _unused, struct vr_route_req *rt) { int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *rtable; struct vr_route_req lreq; rtable = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); if (!rtable) return -ENOENT; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { lreq.rtr_req.rtr_index = rt->rtr_req.rtr_index; lreq.rtr_req.rtr_mac_size = VR_ETHER_ALEN; lreq.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; lreq.rtr_req.rtr_vrf_id = vrf_id; if (!vr_bridge_lookup(vrf_id, &lreq)) return -ENOENT; rt->rtr_req.rtr_index = lreq.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } __mtrie_delete(rt, &rtable->root, 0, 0, 1); vrouter_put_nexthop(rt->rtr_nh); return 0; }
static struct ip_mtrie * mtrie_alloc_vrf(unsigned int vrf_id, unsigned int family) { struct ip_mtrie *mtrie; struct ip_mtrie **mtrie_table; int index = 0; if (family == AF_INET6) index = 1; mtrie = vr_zalloc(sizeof(struct ip_mtrie), VR_MTRIE_OBJECT); if (mtrie) { mtrie->root.entry_nh_p = vrouter_get_nexthop(0, NH_DISCARD_ID); mtrie->root.entry_bridge_index = VR_BE_INVALID_INDEX; mtrie->root.entry_type = ENTRY_TYPE_NEXTHOP; mtrie_table = vn_rtable[index]; mtrie_table[vrf_id] = mtrie; mtrie->root.entry_label = 0xFFFFFF; mtrie->root.entry_label_flags = 0; } return mtrie; }