s32 fib6_restore_by_slot (u32 vsm_board) { struct fib6_table* tb6 = NULL; struct vrf *vrf = NULL; struct fib6_walker_t w; lpm_rt_msg_s rt_msg; for_each_vrf(vrf) { if (maybe_get_vrf(vrf)) { tb6 = fib6_get_table (vrf, 0, RT_TABLE_MAIN); rt_msg.msg_type = LPM_MSG_IPV6_HOTPLUG; rt_msg.skey.ipv6_key.vrf_id = vrf->vrf_id; RT_LPM_INFO(&rt_msg).slot= vsm_board; memset (&w, 0, sizeof(struct fib6_walker_t)); w.func = fib6_restore_rt; w.root = &tb6->tb6_root; w.args = &rt_msg; w.prune = 0; fib6_iterate_table (tb6, &w); put_vrf(vrf); } } return 0; }
struct fib6_table *fib6_new_table(struct net *net, u32 id) { struct fib6_table *tb; if (id == 0) id = RT6_TABLE_MAIN; tb = fib6_get_table(net, id); if (tb) return tb; tb = fib6_alloc_table(net, id); if (tb) fib6_link_table(net, tb); return tb; }
static int vrf_rt6_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); struct dst_entry *dst; struct rt6_info *rt6; int cpu; int rc = -ENOMEM; rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, DST_OBSOLETE_NONE, (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); if (!rt6) goto out; dst = &rt6->dst; rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL); if (!rt6->rt6i_pcpu) { dst_destroy(dst); goto out; } for_each_possible_cpu(cpu) { struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); *p = NULL; } memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst)); INIT_LIST_HEAD(&rt6->rt6i_siblings); INIT_LIST_HEAD(&rt6->rt6i_uncached); rt6->dst.input = vrf_input6; rt6->dst.output = vrf_output6; rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); atomic_set(&rt6->dst.__refcnt, 2); vrf->rt6 = rt6; rc = 0; out: return rc; }
struct fib6_table *fib6_new_table(struct net *net, u32 id) { return fib6_get_table(net, id); }