static void fib4_rule_flush_cache(struct fib_rules_ops *ops) { #ifdef FIB_RULE_DEBUG printk(KERN_DEBUG "[NET][IPV4][RULE] %s \n", __func__); #endif rt_cache_flush(ops->fro_net, -1); }
static __inline__ void fib_flush_1(struct device *dev) { struct fib_zone *fz; int found = 0; for (fz = fib_zone_list; fz; fz = fz->fz_next) { if (fz->fz_hash_table) { int i; int tmp = 0; for (i=0; i<RTZ_HASH_DIVISOR; i++) tmp += rt_flush_list(&fz->fz_hash_table[i], dev); fz->fz_nent -= tmp; found += tmp; } else { int tmp; tmp = rt_flush_list(&fz->fz_list, dev); fz->fz_nent -= tmp; found += tmp; } } if (found) rt_cache_flush(); }
static void fib_flush(struct net *net) { int flushed = 0; struct fib_table *tb; struct hlist_node *node; struct hlist_head *head; unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { head = &net->ipv4.fib_table_hash[h]; hlist_for_each_entry(tb, node, head, tb_hlist) flushed += tb->tb_flush(tb); } if (flushed) rt_cache_flush(net, -1); }
static __inline__ int fib_del_1(__u32 dst, __u32 mask, struct device * dev, __u32 gtw, short flags, short metric) { struct fib_node **fp; struct fib_zone *fz; int found=0; if (!mask) { for (fz=fib_zone_list; fz; fz = fz->fz_next) { int tmp; if (fz->fz_hash_table) fp = &fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)]; else fp = &fz->fz_list; tmp = fib_del_list(fp, dst, dev, gtw, flags, metric, mask); fz->fz_nent -= tmp; found += tmp; } } else { if ((fz = fib_zones[rt_logmask(mask)]) != NULL) { if (fz->fz_hash_table) fp = &fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)]; else fp = &fz->fz_list; found = fib_del_list(fp, dst, dev, gtw, flags, metric, mask); fz->fz_nent -= found; } } if (found) { rt_cache_flush(); return 0; } return -ESRCH; }
void fib_flush(void) { int flushed = 0; #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_table *tb; int id; for (id = RT_TABLE_MAX; id>0; id--) { if ((tb = fib_get_table(id))==NULL) continue; flushed += tb->tb_flush(tb); } #else /* CONFIG_IP_MULTIPLE_TABLES */ flushed += main_table->tb_flush(main_table); flushed += local_table->tb_flush(local_table); #endif /* CONFIG_IP_MULTIPLE_TABLES */ if (flushed) rt_cache_flush(-1); }
static void fib4_rule_flush_cache(struct fib_rules_ops *ops) { rt_cache_flush(ops->fro_net, -1); }
static __inline__ void fib_add_1(short flags, __u32 dst, __u32 mask, __u32 gw, struct device *dev, unsigned short mss, unsigned long window, unsigned short irtt, short metric) { struct fib_node *f, *f1; struct fib_node **fp; struct fib_node **dup_fp = NULL; struct fib_zone * fz; struct fib_info * fi; int logmask; /* * Allocate an entry and fill it in. */ f = (struct fib_node *) kmalloc(sizeof(struct fib_node), GFP_KERNEL); if (f == NULL) return; memset(f, 0, sizeof(struct fib_node)); f->fib_dst = dst; f->fib_metric = metric; f->fib_tos = 0; if ((fi = fib_create_info(gw, dev, flags, mss, window, irtt)) == NULL) { kfree_s(f, sizeof(struct fib_node)); return; } f->fib_info = fi; logmask = rt_logmask(mask); fz = fib_zones[logmask]; if (!fz) { int i; fz = kmalloc(sizeof(struct fib_zone), GFP_KERNEL); if (!fz) { fib_free_node(f); return; } memset(fz, 0, sizeof(struct fib_zone)); fz->fz_logmask = logmask; fz->fz_mask = mask; for (i=logmask-1; i>=0; i--) if (fib_zones[i]) break; cli(); if (i<0) { fz->fz_next = fib_zone_list; fib_zone_list = fz; } else { fz->fz_next = fib_zones[i]->fz_next; fib_zones[i]->fz_next = fz; } fib_zones[logmask] = fz; sti(); } /* * If zone overgrows RTZ_HASHING_LIMIT, create hash table. */ if (fz->fz_nent >= RTZ_HASHING_LIMIT && !fz->fz_hash_table && logmask<32) { struct fib_node ** ht; #if RT_CACHE_DEBUG >= 2 printk("fib_add_1: hashing for zone %d started\n", logmask); #endif ht = kmalloc(RTZ_HASH_DIVISOR*sizeof(struct rtable*), GFP_KERNEL); if (ht) { memset(ht, 0, RTZ_HASH_DIVISOR*sizeof(struct fib_node*)); cli(); f1 = fz->fz_list; while (f1) { struct fib_node * next, **end; unsigned hash = fz_hash_code(f1->fib_dst, logmask); next = f1->fib_next; f1->fib_next = NULL; end = &ht[hash]; while(*end != NULL) end = &(*end)->fib_next; *end = f1; f1 = next; } fz->fz_list = NULL; fz->fz_hash_table = ht; sti(); } } if (fz->fz_hash_table) fp = &fz->fz_hash_table[fz_hash_code(dst, logmask)]; else fp = &fz->fz_list; /* * Scan list to find the first route with the same destination */ while ((f1 = *fp) != NULL) { if (f1->fib_dst == dst) break; fp = &f1->fib_next; } /* * Find route with the same destination and less (or equal) metric. */ while ((f1 = *fp) != NULL && f1->fib_dst == dst) { if (f1->fib_metric >= metric) break; /* * Record route with the same destination and gateway, * but less metric. We'll delete it * after instantiation of new route. */ if (f1->fib_info->fib_gateway == gw && (gw || f1->fib_info->fib_dev == dev)) dup_fp = fp; fp = &f1->fib_next; } /* * Is it already present? */ if (f1 && f1->fib_metric == metric && f1->fib_info == fi) { fib_free_node(f); return; } /* * Insert new entry to the list. */ cli(); f->fib_next = f1; *fp = f; if (!fib_loopback && (fi->fib_dev->flags & IFF_LOOPBACK)) fib_loopback = f; sti(); fz->fz_nent++; ip_netlink_msg(RTMSG_NEWROUTE, dst, gw, mask, flags, metric, fi->fib_dev->name); /* * Delete route with the same destination and gateway. * Note that we should have at most one such route. */ if (dup_fp) fp = dup_fp; else fp = &f->fib_next; while ((f1 = *fp) != NULL && f1->fib_dst == dst) { if (f1->fib_info->fib_gateway == gw && (gw || f1->fib_info->fib_dev == dev)) { cli(); *fp = f1->fib_next; if (fib_loopback == f1) fib_loopback = NULL; sti(); ip_netlink_msg(RTMSG_DELROUTE, dst, gw, mask, flags, metric, f1->fib_info->fib_dev->name); fib_free_node(f1); fz->fz_nent--; break; } fp = &f1->fib_next; } rt_cache_flush(); return; }