static void vr_interface_service_disable(struct vr_interface *vif) { vif->vif_rx = eth_rx; /* * once everybody sees the change, we are free to do whatever * we want with the vrf assign table */ vr_delay_op(); /* * it is possible that when this function is called from * interface delete, the table users are +ve, and hence * the memory will not be freed here. our saving grace * is vif_free (called from last put operation), which * takes care of freeing the memory */ if (vif->vif_vrf_table && !vif->vif_vrf_table_users) { vr_free(vif->vif_vrf_table); vif->vif_vrf_table = NULL; } return; }
int __vr_mirror_del(struct vrouter *router, unsigned int index) { struct vr_nexthop *nh; struct vr_mirror_entry *mirror; struct vr_defer_data *defer; if (index >= router->vr_max_mirror_indices) return -EINVAL; mirror = router->vr_mirrors[index]; if (!mirror) return -EINVAL; nh = mirror->mir_nh; router->vr_mirrors[index] = NULL; mirror->mir_nh = NULL; if (!vr_not_ready) { defer = vr_get_defer_data(sizeof(*defer)); if (defer) { defer->vdd_data = (void *)mirror; vr_defer(router, vr_mirror_defer_delete, (void *)defer); } else { vr_delay_op(); vr_free(mirror, VR_MIRROR_OBJECT); } } else { vr_free(mirror, VR_MIRROR_OBJECT); } vrouter_put_nexthop(nh); return 0; }
static void vr_qos_map_delete(vr_qos_map_req *req) { int ret = 0; struct vr_forwarding_class *fc_p; struct vrouter *router = vrouter_get(req->qmr_rid); if (req->qmr_id >= vr_qos_map_entries) { ret = -EINVAL; goto generate_response; } fc_p = vr_qos_map_get_fc(router, req->qmr_id); if (!fc_p) { ret = 0; goto generate_response; } vr_qos_map_set_fc(router, req->qmr_id, NULL); if (vr_qos_map_free_fc_defer(router, fc_p)) { vr_delay_op(); vr_free(fc_p, VR_QOS_MAP_OBJECT); } (void)vr_offload_qos_map_del(req); generate_response: vr_send_response(ret); return; }
static void mtrie_reset_entry(struct ip_bucket_entry *ent, int level, void *data, int data_is_nh) { struct ip_bucket_entry cp_ent; struct ip_bucket *bkt; struct vr_nexthop *nh; struct vrouter *vrouter; memcpy(&cp_ent, ent, sizeof(cp_ent)); /* remove from the tree */ if (data) { if (data_is_nh) { nh = (struct vr_nexthop *) data; set_entry_to_nh(ent, nh); } else { set_entry_to_vdata(ent, data); } } /* ...and then work with the copy */ bkt = entry_to_bucket(&cp_ent); if (!bkt) return; if (data_is_nh) { nh = (struct vr_nexthop *) data; vrouter = nh->nh_router; } else { vrouter = vrouter_get(0); } if (!vr_not_ready) { if (!mtrie_free_bkt_defer(vrouter, bkt)) return; vr_delay_op(); } mtrie_free_entry(&cp_ent, level); return; }
static void vr_mirror_meta_entry_destroy(unsigned int index, void *arg) { struct vr_mirror_meta_entry *me = (struct vr_mirror_meta_entry *)arg; struct vr_defer_data *defer; if (me && me != VR_ITABLE_ERR_PTR) { if (!vr_not_ready) { defer = vr_get_defer_data(sizeof(*defer)); if (!defer) { vr_delay_op(); vr_mirror_meta_destroy(me); return; } defer->vdd_data = (void *)me; vr_defer(me->mirror_router, vr_mirror_meta_destructor, (void *)defer); } } return; }
static void mtrie_delete_bkt(struct ip_bucket_entry *ent, struct vr_route_req *rt, int defer_delete, int data_is_nh) { struct ip_bucket *bkt; if (!ENTRY_IS_BUCKET(ent)) { if (ENTRY_IS_NEXTHOP(ent)) { vrouter_put_nexthop(ent->entry_nh_p); ent->entry_nh_p = NULL; } else { ent->entry_vdata_p = NULL; } return; } bkt = entry_to_bucket(ent); if (data_is_nh) { set_entry_to_nh(ent, rt->rtr_nh); } else { set_entry_to_vdata(ent, (void *)rt->rtr_nh); } ent->entry_label_flags = rt->rtr_req.rtr_label_flags; ent->entry_label = rt->rtr_req.rtr_label; ent->entry_bridge_index = rt->rtr_req.rtr_index; if (defer_delete) { mtrie_free_bkt_defer(vrouter_get(0), bkt); } else { if (!vr_not_ready) { if (!mtrie_free_bkt_defer(rt->rtr_nh->nh_router, bkt)) return; vr_delay_op(); } mtrie_free_bkt(bkt); } return; }