static int bridge_table_add(struct vr_rtable * _unused, struct vr_route_req *rt) { int ret; if (!vn_rtable) return -EINVAL; if (IS_MAC_ZERO(rt->rtr_req.rtr_mac)) return -EINVAL; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } ret = __bridge_table_add(rt); vrouter_put_nexthop(rt->rtr_nh); return ret; }
static void mtrie_free_entry(struct ip_bucket_entry *entry, unsigned int level) { unsigned int i; struct ip_bucket *bkt; if (!ENTRY_IS_BUCKET(entry)) { if (ENTRY_IS_NEXTHOP(entry)) { vrouter_put_nexthop(entry->entry_nh_p); entry->entry_nh_p = NULL; } else { entry->entry_vdata_p = NULL; } return; } bkt = entry_to_bucket(entry); if (!bkt) return; for (i = 0; i < IPBUCKET_LEVEL_SIZE; i++) { if (ENTRY_IS_BUCKET(&bkt->bkt_data[i])) { mtrie_free_entry(&bkt->bkt_data[i], level + 1); } else { if (ENTRY_IS_NEXTHOP(&bkt->bkt_data[i])) { vrouter_put_nexthop(bkt->bkt_data[i].entry_nh_p); } } } entry->entry_bkt_p = NULL; vr_free(bkt, VR_MTRIE_BUCKET_OBJECT); return; }
int vr_mirror_add(vr_mirror_req *req) { int ret = 0; struct vrouter *router; struct vr_nexthop *nh, *old_nh = NULL; struct vr_mirror_entry *mirror; router = vrouter_get(req->mirr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mirr_index >= router->vr_max_mirror_indices) { ret = -EINVAL; goto generate_resp; } nh = vrouter_get_nexthop(req->mirr_rid, req->mirr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } mirror = router->vr_mirrors[req->mirr_index]; if (!mirror) { mirror = vr_zalloc(sizeof(*mirror), VR_MIRROR_OBJECT); if (!mirror) { ret = -ENOMEM; vrouter_put_nexthop(nh); goto generate_resp; } } else { old_nh = mirror->mir_nh; } mirror->mir_nh = nh; mirror->mir_rid = req->mirr_rid; mirror->mir_flags = req->mirr_flags; mirror->mir_vni = req->mirr_vni; mirror->mir_vlan_id = req->mirr_vlan; router->vr_mirrors[req->mirr_index] = mirror; if (old_nh) vrouter_put_nexthop(old_nh); generate_resp: vr_send_response(ret); return ret; }
int __vr_mirror_del(struct vrouter *router, unsigned int index) { struct vr_nexthop *nh; struct vr_mirror_entry *mirror; struct vr_defer_data *defer; if (index >= router->vr_max_mirror_indices) return -EINVAL; mirror = router->vr_mirrors[index]; if (!mirror) return -EINVAL; nh = mirror->mir_nh; router->vr_mirrors[index] = NULL; mirror->mir_nh = NULL; if (!vr_not_ready) { defer = vr_get_defer_data(sizeof(*defer)); if (defer) { defer->vdd_data = (void *)mirror; vr_defer(router, vr_mirror_defer_delete, (void *)defer); } else { vr_delay_op(); vr_free(mirror, VR_MIRROR_OBJECT); } } else { vr_free(mirror, VR_MIRROR_OBJECT); } vrouter_put_nexthop(nh); return 0; }
static int __mcast_add(struct vr_route_req *rt) { struct vr_nexthop *old_nh; struct vr_mcast_entry *ent; struct vr_mcast_entry_key key; key.vrf_id = rt->rtr_req.rtr_vrf_id; key.src_ip = rt->rtr_req.rtr_src; key.dst_ip = rt->rtr_req.rtr_prefix; ent = vr_find_mcast_entry(&key); if (!ent) { ent = vr_find_free_mcast_entry(&key); if (!ent) return -ENOMEM; ent->key.vrf_id = key.vrf_id; ent->key.src_ip = key.src_ip; ent->key.dst_ip = key.dst_ip; ent->flags |= VR_MCAST_FLAG_VALID; } /* The nexthop can be changed though entry exits */ if (ent->nh != rt->rtr_nh) { old_nh = ent->nh; ent->nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (old_nh) vrouter_put_nexthop(old_nh); } return 0; }
void vr_mpls_exit(struct vrouter *router, bool soft_reset) { unsigned int i; struct vr_nexthop *nh; if (!router->vr_max_labels || !router->vr_ilm) return; for (i = 0; i < router->vr_max_labels; i++) { nh = __vrouter_get_label(router, i); if (nh) { vrouter_put_nexthop(nh); __vrouter_set_label(router, i, NULL); } } if (soft_reset == false) { vr_btable_free(router->vr_ilm); router->vr_ilm = NULL; router->vr_max_labels = 0; } return; }
int vr_mpls_del(vr_mpls_req *req) { struct vrouter *router; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if (req->mr_label > (int)router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } if (router->vr_ilm[req->mr_label]) vrouter_put_nexthop(router->vr_ilm[req->mr_label]); router->vr_ilm[req->mr_label] = NULL; generate_resp: vr_send_response(ret); return ret; }
static void bridge_table_entry_free(vr_htable_t table, vr_hentry_t *hentry, unsigned int index, void *data) { struct vr_nexthop *nh; struct vr_bridge_entry *be = (struct vr_bridge_entry *)hentry; if (!be) return; /* Mark this entry as invalid */ be->be_flags &= ~VR_BE_VALID_FLAG; if (be->be_nh) { nh = be->be_nh; be->be_nh = NULL; be->be_nh_id = -1; vrouter_put_nexthop(nh); } be->be_packets = 0; vr_htable_release_hentry(table, hentry); return; }
/* * adds a route to the corresponding vrf table. returns 0 on * success and non-zero otherwise */ static int mtrie_add(struct vr_rtable * _unused, struct vr_route_req *rt) { unsigned int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *mtrie = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); int ret; struct vr_route_req tmp_req; mtrie = (mtrie ? mtrie : mtrie_alloc_vrf(vrf_id, rt->rtr_req.rtr_family)); if (!mtrie) return -ENOMEM; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; if ((!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) && (rt->rtr_nh->nh_type == NH_TUNNEL)) { vrouter_put_nexthop(rt->rtr_nh); return -EINVAL; } rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { tmp_req.rtr_req.rtr_index = rt->rtr_req.rtr_index; tmp_req.rtr_req.rtr_mac_size = VR_ETHER_ALEN; tmp_req.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; tmp_req.rtr_req.rtr_vrf_id = rt->rtr_req.rtr_vrf_id; if (!vr_bridge_lookup(tmp_req.rtr_req.rtr_vrf_id, &tmp_req)) return -ENOENT; rt->rtr_req.rtr_index = tmp_req.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } ret = __mtrie_add(mtrie, rt, 1); vrouter_put_nexthop(rt->rtr_nh); return ret; }
static int mcast_add(struct vr_rtable * _unused, struct vr_route_req *rt) { int ret; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; ret = __mcast_add(rt); vrouter_put_nexthop(rt->rtr_nh); return ret; }
/* * the nh pointer is something which will be retained. So, call this function * with an nh * that you are willing to forget about in your function */ static void set_entry_to_nh(struct ip_bucket_entry *entry, struct vr_nexthop *nh) { struct vr_nexthop *tmp_nh; int orig_entry_nh = ENTRY_IS_NEXTHOP(entry); tmp_nh = vrouter_get_nexthop(nh->nh_rid, nh->nh_id); if (tmp_nh != nh) { /* * if the original nexthop was deleted, then there are * two cases * * 1. no new nexthop was created (& hence the null check) * 2. new nexthop has taken it's place (in which case, we need to * put the reference we took above */ if (tmp_nh) vrouter_put_nexthop(tmp_nh); nh = vrouter_get_nexthop(nh->nh_rid, NH_DISCARD_ID); } /* save the original */ tmp_nh = entry->entry_nh_p; /* update the entry */ entry->entry_nh_p = nh; /* set entry type */ entry->entry_type = ENTRY_TYPE_NEXTHOP; /* ...and then take steps to release original */ if (tmp_nh && orig_entry_nh) { vrouter_put_nexthop(tmp_nh); } return; }
static void mcast_entry_free(vr_htable_t table, vr_hentry_t hentry, unsigned int index, void *data) { struct vr_mcast_entry *ent = (struct vr_mcast_entry *)hentry; if (!ent) return; ent->flags &= ~VR_MCAST_FLAG_VALID; if (ent->nh) vrouter_put_nexthop(ent->nh); memset(ent, 0, sizeof(struct vr_mcast_entry)); return; }
int __vr_mpls_del(struct vrouter *router, unsigned int label) { struct vr_nexthop *nh; /* hardware packet filtering (Flow Director) support */ nh = __vrouter_get_label(router, label); if (nh) { if (vrouter_host->hos_del_mpls && nh->nh_type == NH_ENCAP && !(nh->nh_flags & NH_FLAG_MCAST)) vrouter_host->hos_del_mpls(router, label); vrouter_put_nexthop(nh); } return __vrouter_set_label(router, label, NULL); }
static void bridge_table_entry_free(vr_htable_t table, vr_hentry_t hentry, unsigned int index, void *data) { struct vr_bridge_entry *be = (struct vr_bridge_entry *)hentry; if (!be) return; /* Mark this entry as invalid */ be->be_flags &= ~VR_BE_FLAG_VALID; if (be->be_nh) vrouter_put_nexthop(be->be_nh); memset(be, 0, sizeof(struct vr_bridge_entry)); return; }
int vr_mpls_add(vr_mpls_req *req) { struct vrouter *router; struct vr_nexthop *nh; int ret = 0; router = vrouter_get(req->mr_rid); if (!router) { ret = -EINVAL; goto generate_resp; } if ((unsigned int)req->mr_label >= router->vr_max_labels) { ret = -EINVAL; goto generate_resp; } ret = __vr_mpls_del(router, req->mr_label); if (ret) goto generate_resp; nh = vrouter_get_nexthop(req->mr_rid, req->mr_nhid); if (!nh) { ret = -EINVAL; goto generate_resp; } ret = __vrouter_set_label(router, req->mr_label, nh); if (ret) { vrouter_put_nexthop(nh); goto generate_resp; } /* hardware packet filtering (Flow Director) support */ if (vrouter_host->hos_add_mpls && nh->nh_type == NH_ENCAP && !(nh->nh_flags & NH_FLAG_MCAST)) vrouter_host->hos_add_mpls(router, req->mr_label); generate_resp: vr_send_response(ret); return ret; }
static void set_entry_to_bucket(struct ip_bucket_entry *ent, struct ip_bucket *bkt) { struct vr_nexthop *tmp_nh = NULL; if (ENTRY_IS_NEXTHOP(ent)) { /* save old... */ tmp_nh = ent->entry_nh_p; } /* update... */ ent->entry_long_i = (uintptr_t) bkt; /* set entry_type */ ent->entry_type = ENTRY_TYPE_BUCKET; /* release old */ if (tmp_nh) vrouter_put_nexthop(tmp_nh); return; }
static struct vr_bridge_entry * bridge_add(unsigned int router_id, unsigned int vrf, uint8_t *mac, int nh_id) { struct vr_bridge_entry *be; struct vr_bridge_entry_key key; struct vr_nexthop *old_nh; VR_MAC_COPY(key.be_mac, mac); key.be_vrf_id = vrf; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(vrf, mac); if (!be) return NULL; VR_MAC_COPY(be->be_key.be_mac, mac); be->be_key.be_vrf_id = vrf; be->be_packets = 0; be->be_flags = VR_BE_VALID_FLAG; } /* Un ref the old nexthop */ if (be->be_nh_id != nh_id) { old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(router_id, nh_id); if (be->be_nh) { be->be_nh_id = be->be_nh->nh_id; } else { be->be_nh_id = -1; } if (be->be_flags & VR_BE_MAC_MOVED_FLAG) { be->be_flags &= ~VR_BE_MAC_MOVED_FLAG; } if (old_nh) vrouter_put_nexthop(old_nh); } return be; }
static int __bridge_table_add(struct vr_route_req *rt) { struct vr_bridge_entry *be; struct vr_nexthop *old_nh; struct vr_bridge_entry_key key; VR_MAC_COPY(key.be_mac, rt->rtr_req.rtr_mac); key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be = vr_find_bridge_entry(&key); if (!be) { be = vr_find_free_bridge_entry(rt->rtr_req.rtr_vrf_id, (char *)rt->rtr_req.rtr_mac); if (!be) return -ENOMEM; VR_MAC_COPY(be->be_key.be_mac, rt->rtr_req.rtr_mac); be->be_key.be_vrf_id = rt->rtr_req.rtr_vrf_id; be->be_flags |= VR_BE_FLAG_VALID; } if (be->be_nh != rt->rtr_nh) { /* Un ref the old nexthop */ old_nh = be->be_nh; be->be_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (old_nh) vrouter_put_nexthop(old_nh); } if (rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) { be->be_label = rt->rtr_req.rtr_label; be->be_flags |= VR_BE_FLAG_LABEL_VALID; } return 0; }
static void mtrie_delete_bkt(struct ip_bucket_entry *ent, struct vr_route_req *rt, int defer_delete, int data_is_nh) { struct ip_bucket *bkt; if (!ENTRY_IS_BUCKET(ent)) { if (ENTRY_IS_NEXTHOP(ent)) { vrouter_put_nexthop(ent->entry_nh_p); ent->entry_nh_p = NULL; } else { ent->entry_vdata_p = NULL; } return; } bkt = entry_to_bucket(ent); if (data_is_nh) { set_entry_to_nh(ent, rt->rtr_nh); } else { set_entry_to_vdata(ent, (void *)rt->rtr_nh); } ent->entry_label_flags = rt->rtr_req.rtr_label_flags; ent->entry_label = rt->rtr_req.rtr_label; ent->entry_bridge_index = rt->rtr_req.rtr_index; if (defer_delete) { mtrie_free_bkt_defer(vrouter_get(0), bkt); } else { if (!vr_not_ready) { if (!mtrie_free_bkt_defer(rt->rtr_nh->nh_router, bkt)) return; vr_delay_op(); } mtrie_free_bkt(bkt); } return; }
/* * Delete a route from the table. * prefix is in network byte order. * returns 0 on failure; or non-zero if an entry was found. * * When deleting a route: * - Move all descendent bucket (not covered by more-specifics) with the * parent of this node. * - If any buckets contain the same next-hop result, the bucket can be * deleted. Memory should be freed after a delay in order to deal with * concurrency. */ static int mtrie_delete(struct vr_rtable * _unused, struct vr_route_req *rt) { int vrf_id = rt->rtr_req.rtr_vrf_id; struct ip_mtrie *rtable; struct vr_route_req lreq; rtable = vrfid_to_mtrie(vrf_id, rt->rtr_req.rtr_family); if (!rtable) return -ENOENT; rt->rtr_nh = vrouter_get_nexthop(rt->rtr_req.rtr_rid, rt->rtr_req.rtr_nh_id); if (!rt->rtr_nh) return -ENOENT; rt->rtr_req.rtr_index = VR_BE_INVALID_INDEX; if ((rt->rtr_req.rtr_mac_size == VR_ETHER_ALEN) && (!IS_MAC_ZERO(rt->rtr_req.rtr_mac))) { lreq.rtr_req.rtr_index = rt->rtr_req.rtr_index; lreq.rtr_req.rtr_mac_size = VR_ETHER_ALEN; lreq.rtr_req.rtr_mac = rt->rtr_req.rtr_mac; lreq.rtr_req.rtr_vrf_id = vrf_id; if (!vr_bridge_lookup(vrf_id, &lreq)) return -ENOENT; rt->rtr_req.rtr_index = lreq.rtr_req.rtr_index; } if (!(rt->rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG)) { rt->rtr_req.rtr_label = 0xFFFFFF; } else { rt->rtr_req.rtr_label &= 0xFFFFFF; } __mtrie_delete(rt, &rtable->root, 0, 0, 1); vrouter_put_nexthop(rt->rtr_nh); return 0; }
void vr_mpls_exit(struct vrouter *router, bool soft_reset) { unsigned int i; if (!router->vr_max_labels || !router->vr_ilm) return; for (i = 0; i < router->vr_max_labels; i++) { if (router->vr_ilm[i]) { vrouter_put_nexthop(router->vr_ilm[i]); router->vr_ilm[i] = NULL; } } if (soft_reset == false) { vr_free(router->vr_ilm); router->vr_ilm = NULL; router->vr_max_labels = 0; } return; }