static struct in_device *inetdev_init(struct net_device *dev) { struct in_device *in_dev; ASSERT_RTNL(); in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; INIT_RCU_HEAD(&in_dev->rcu_head); memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; in_dev->dev = dev; if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) goto out_kfree; /* Reference in_dev->dev */ dev_hold(dev); /* Account for reference dev->ip_ptr (below) */ in_dev_hold(in_dev); devinet_sysctl_register(in_dev); ip_mc_init_dev(in_dev); if (dev->flags & IFF_UP) ip_mc_up(in_dev); /* we can receive as soon as ip_ptr is set -- do this last */ rcu_assign_pointer(dev->ip_ptr, in_dev); out: return in_dev; out_kfree: kfree(in_dev); in_dev = NULL; goto out; }
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int unregister) { struct xfrm_dst *xdst; if (!unregister) return; xdst = (struct xfrm_dst *)dst; if (xdst->u.rt.idev->dev == dev) { struct in_device *loopback_idev = in_dev_get(&loopback_dev); BUG_ON(!loopback_idev); do { in_dev_put(xdst->u.rt.idev); xdst->u.rt.idev = loopback_idev; in_dev_hold(loopback_idev); xdst = (struct xfrm_dst *)xdst->u.dst.child; } while (xdst->u.dst.xfrm); __in_dev_put(loopback_idev); } xfrm_dst_ifdown(dst, dev); }
static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct rtattr **rta = arg; struct net_device *dev; struct in_device *in_dev; struct ifaddrmsg *ifm = NLMSG_DATA(nlh); struct in_ifaddr *ifa; int rc = -EINVAL; ASSERT_RTNL(); if (ifm->ifa_prefixlen > 32 || !rta[IFA_LOCAL - 1]) goto out; rc = -ENODEV; if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL) goto out; rc = -ENOBUFS; if ((in_dev = __in_dev_get(dev)) == NULL) { in_dev = inetdev_init(dev); if (!in_dev) goto out; } if ((ifa = inet_alloc_ifa()) == NULL) goto out; if (!rta[IFA_ADDRESS - 1]) rta[IFA_ADDRESS - 1] = rta[IFA_LOCAL - 1]; memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL - 1]), 4); memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS - 1]), 4); ifa->ifa_prefixlen = ifm->ifa_prefixlen; ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen); if (rta[IFA_BROADCAST - 1]) memcpy(&ifa->ifa_broadcast, RTA_DATA(rta[IFA_BROADCAST - 1]), 4); if (rta[IFA_ANYCAST - 1]) memcpy(&ifa->ifa_anycast, RTA_DATA(rta[IFA_ANYCAST - 1]), 4); ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_scope = ifm->ifa_scope; in_dev_hold(in_dev); ifa->ifa_dev = in_dev; if (rta[IFA_LABEL - 1]) rtattr_strlcpy(ifa->ifa_label, rta[IFA_LABEL - 1], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); rc = inet_insert_ifa(ifa); out: return rc; }
struct in_device *inetdev_init(struct net_device *dev) { struct in_device *in_dev; ASSERT_RTNL(); in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; memset(in_dev, 0, sizeof(*in_dev)); INIT_RCU_HEAD(&in_dev->rcu_head); memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; in_dev->dev = dev; if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) goto out_kfree; /* Reference in_dev->dev */ dev_hold(dev); #ifdef CONFIG_SYSCTL neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4, NET_IPV4_NEIGH, "ipv4", NULL); #endif /* Account for reference dev->ip_ptr (below) */ in_dev_hold(in_dev); #ifdef CONFIG_SYSCTL devinet_sysctl_register(in_dev, &in_dev->cnf); #endif ip_mc_init_dev(in_dev); if (dev->flags & IFF_UP) ip_mc_up(in_dev); out: /* we can receive as soon as ip_ptr is set -- do this last */ smp_wmb(); dev->ip_ptr = in_dev; return in_dev; out_kfree: kfree(in_dev); in_dev = NULL; goto out; }
void ip_mc_inc_group(struct in_device *in_dev, u32 addr) { struct ip_mc_list *im; ASSERT_RTNL(); for (im=in_dev->mc_list; im; im=im->next) { if (im->multiaddr == addr) { im->users++; goto out; } } im = (struct ip_mc_list *)kmalloc(sizeof(*im), GFP_KERNEL); if (!im) goto out; im->users=1; im->interface=in_dev; in_dev_hold(in_dev); im->multiaddr=addr; atomic_set(&im->refcnt, 1); spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST im->tm_running=0; init_timer(&im->timer); im->timer.data=(unsigned long)im; im->timer.function=&igmp_timer_expire; im->unsolicit_count = IGMP_Unsolicited_Report_Count; im->reporter = 0; #endif im->loaded = 0; write_lock_bh(&in_dev->lock); im->next=in_dev->mc_list; in_dev->mc_list=im; write_unlock_bh(&in_dev->lock); igmp_group_added(im); if (in_dev->dev->flags & IFF_UP) ip_rt_multicast_event(in_dev); out: return; }
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); if (!in_dev) { inet_free_ifa(ifa); return -ENOBUFS; } ipv4_devconf_setall(in_dev); if (ifa->ifa_dev != in_dev) { WARN_ON(ifa->ifa_dev); in_dev_hold(in_dev); ifa->ifa_dev = in_dev; } if (ipv4_is_loopback(ifa->ifa_local)) ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa); }
struct in_device *inetdev_init(struct net_device *dev) { struct in_device *in_dev; ASSERT_RTNL(); in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); if (!in_dev) goto out; memset(in_dev, 0, sizeof(*in_dev)); in_dev->lock = RW_LOCK_UNLOCKED; memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); in_dev->cnf.sysctl = NULL; in_dev->dev = dev; if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) goto out_kfree; inet_dev_count++; /* Reference in_dev->dev */ dev_hold(dev); #ifdef CONFIG_SYSCTL neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4, NET_IPV4_NEIGH, "ipv4", NULL); #endif write_lock_bh(&inetdev_lock); dev->ip_ptr = in_dev; /* Account for reference dev->ip_ptr */ in_dev_hold(in_dev); write_unlock_bh(&inetdev_lock); #ifdef CONFIG_SYSCTL devinet_sysctl_register(in_dev, &in_dev->cnf); #endif ip_mc_init_dev(in_dev); if (dev->flags & IFF_UP) ip_mc_up(in_dev); out: return in_dev; out_kfree: kfree(in_dev); in_dev = NULL; goto out; }
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); if (!in_dev) { in_dev = inetdev_init(dev); if (!in_dev) { inet_free_ifa(ifa); return -ENOBUFS; } } if (ifa->ifa_dev != in_dev) { BUG_TRAP(!ifa->ifa_dev); in_dev_hold(in_dev); ifa->ifa_dev = in_dev; } if (LOOPBACK(ifa->ifa_local)) ifa->ifa_scope = RT_SCOPE_HOST; return inet_insert_ifa(ifa); }
static int __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, struct flowi *fl, struct dst_entry **dst_p) { struct dst_entry *dst, *dst_prev; struct rtable *rt0 = (struct rtable*)(*dst_p); struct rtable *rt = rt0; struct flowi fl_tunnel = { .nl_u = { .ip4_u = { .saddr = fl->fl4_src, .daddr = fl->fl4_dst, .tos = fl->fl4_tos } } }; int i; int err; int header_len = 0; int trailer_len = 0; dst = dst_prev = NULL; dst_hold(&rt->u.dst); for (i = 0; i < nx; i++) { struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops); struct xfrm_dst *xdst; if (unlikely(dst1 == NULL)) { err = -ENOBUFS; dst_release(&rt->u.dst); goto error; } if (!dst) dst = dst1; else { dst_prev->child = dst1; dst1->flags |= DST_NOHASH; dst_clone(dst1); } xdst = (struct xfrm_dst *)dst1; xdst->route = &rt->u.dst; xdst->genid = xfrm[i]->genid; dst1->next = dst_prev; dst_prev = dst1; header_len += xfrm[i]->props.header_len; trailer_len += xfrm[i]->props.trailer_len; if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) { unsigned short encap_family = xfrm[i]->props.family; switch(encap_family) { case AF_INET: fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4; fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4; break; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) case AF_INET6: ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6); ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6); break; #endif default: BUG_ON(1); } err = xfrm_dst_lookup((struct xfrm_dst **)&rt, &fl_tunnel, encap_family); if (err) goto error; } else dst_hold(&rt->u.dst); } dst_prev->child = &rt->u.dst; dst->path = &rt->u.dst; *dst_p = dst; dst = dst_prev; dst_prev = *dst_p; i = 0; for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; struct xfrm_state_afinfo *afinfo; x->u.rt.fl = *fl; dst_prev->xfrm = xfrm[i++]; dst_prev->dev = rt->u.dst.dev; if (rt->u.dst.dev) dev_hold(rt->u.dst.dev); dst_prev->obsolete = -1; dst_prev->flags |= DST_HOST; dst_prev->lastuse = jiffies; dst_prev->header_len = header_len; dst_prev->nfheader_len = 0; dst_prev->trailer_len = trailer_len; memcpy(&dst_prev->metrics, &x->route->metrics, sizeof(dst_prev->metrics)); /* Copy neighbout for reachability confirmation */ dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); dst_prev->input = rt->u.dst.input; /* XXX: When IPv6 module can be unloaded, we should manage reference * to xfrm6_output in afinfo->output. Miyazawa * */ afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family); if (!afinfo) { dst = *dst_p; err = -EAFNOSUPPORT; goto error; } dst_prev->output = afinfo->output; xfrm_state_put_afinfo(afinfo); if (dst_prev->xfrm->props.family == AF_INET && rt->peer) atomic_inc(&rt->peer->refcnt); x->u.rt.peer = rt->peer; /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ x->u.rt.rt_flags = rt0->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL); x->u.rt.rt_type = rt->rt_type; x->u.rt.rt_src = rt0->rt_src; x->u.rt.rt_dst = rt0->rt_dst; x->u.rt.rt_gateway = rt->rt_gateway; x->u.rt.rt_spec_dst = rt0->rt_spec_dst; x->u.rt.idev = rt0->idev; in_dev_hold(rt0->idev); header_len -= x->u.dst.xfrm->props.header_len; trailer_len -= x->u.dst.xfrm->props.trailer_len; } xfrm_init_pmtu(dst); return 0; error: if (dst) dst_free(dst); return err; }
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh) { struct nlattr *tb[IFA_MAX+1]; struct in_ifaddr *ifa; struct ifaddrmsg *ifm; struct net_device *dev; struct in_device *in_dev; int err; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); if (err < 0) goto errout; ifm = nlmsg_data(nlh); err = -EINVAL; if (ifm->ifa_prefixlen > 32 || tb[IFA_LOCAL] == NULL) goto errout; dev = __dev_get_by_index(net, ifm->ifa_index); err = -ENODEV; if (dev == NULL) goto errout; in_dev = __in_dev_get_rtnl(dev); err = -ENOBUFS; if (in_dev == NULL) goto errout; ifa = inet_alloc_ifa(); if (ifa == NULL) /* * A potential indev allocation can be left alive, it stays * assigned to its device and is destroy with it. */ goto errout; ipv4_devconf_setall(in_dev); in_dev_hold(in_dev); if (tb[IFA_ADDRESS] == NULL) tb[IFA_ADDRESS] = tb[IFA_LOCAL]; ifa->ifa_prefixlen = ifm->ifa_prefixlen; ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen); ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_dev = in_dev; ifa->ifa_local = nla_get_be32(tb[IFA_LOCAL]); ifa->ifa_address = nla_get_be32(tb[IFA_ADDRESS]); if (tb[IFA_BROADCAST]) ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]); if (tb[IFA_LABEL]) nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); else memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); return ifa; errout: return ERR_PTR(err); }
int rst_restore_ifaddr(struct cpt_context *ctx) { struct net *net = get_exec_env()->ve_netns; int err; loff_t sec = ctx->sections[CPT_SECT_NET_IFADDR]; loff_t endsec; struct cpt_section_hdr h; struct cpt_ifaddr_image di; struct net_device *dev; if (sec == CPT_NULL) return 0; err = ctx->pread(&h, sizeof(h), ctx, sec); if (err) return err; if (h.cpt_section != CPT_SECT_NET_IFADDR || h.cpt_hdrlen < sizeof(h)) return -EINVAL; endsec = sec + h.cpt_next; sec += h.cpt_hdrlen; while (sec < endsec) { int cindex = -1; int err; err = rst_get_object(CPT_OBJ_NET_IFADDR, sec, &di, ctx); if (err) return err; cindex = di.cpt_index; rtnl_lock(); dev = __dev_get_by_index(net, cindex); if (dev && di.cpt_family == AF_INET) { struct in_device *in_dev; struct in_ifaddr *ifa; if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) in_dev = inetdev_init(dev); ifa = inet_alloc_ifa(); if (ifa) { ifa->ifa_local = di.cpt_address[0]; ifa->ifa_address = di.cpt_peer[0]; ifa->ifa_broadcast = di.cpt_broadcast[0]; ifa->ifa_prefixlen = di.cpt_masklen; ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen); ifa->ifa_flags = di.cpt_flags; ifa->ifa_scope = di.cpt_scope; memcpy(ifa->ifa_label, di.cpt_label, IFNAMSIZ); in_dev_hold(in_dev); ifa->ifa_dev = in_dev; err = inet_insert_ifa(ifa); if (err && err != -EEXIST) { rtnl_unlock(); eprintk_ctx("add ifaddr err %d for %d %s\n", err, di.cpt_index, di.cpt_label); return err; } } #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) } else if (dev && di.cpt_family == AF_INET6) { __u32 prefered_lft; __u32 valid_lft; struct net *net = get_exec_env()->ve_ns->net_ns; prefered_lft = (di.cpt_flags & IFA_F_DEPRECATED) ? 0 : di.cpt_prefered_lft; valid_lft = (di.cpt_flags & IFA_F_PERMANENT) ? 0xFFFFFFFF : di.cpt_valid_lft; err = inet6_addr_add(net, dev->ifindex, (struct in6_addr *)di.cpt_address, di.cpt_masklen, 0, prefered_lft, valid_lft); if (err && err != -EEXIST) { rtnl_unlock(); eprintk_ctx("add ifaddr err %d for %d %s\n", err, di.cpt_index, di.cpt_label); return err; } #endif } else { rtnl_unlock(); eprintk_ctx("unknown ifaddr 2 for %d\n", di.cpt_index); return -EINVAL; } rtnl_unlock(); sec += di.cpt_next; } return 0; }
static int __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, struct flowi *fl, struct dst_entry **dst_p) { struct dst_entry *dst, *dst_prev; struct rtable *rt0 = (struct rtable*)(*dst_p); struct rtable *rt = rt0; u32 remote = fl->fl4_dst; u32 local = fl->fl4_src; struct flowi fl_tunnel = { .nl_u = { .ip4_u = { .saddr = local, .daddr = remote, .tos = fl->fl4_tos } } }; int i; int err; int header_len = 0; int trailer_len = 0; dst = dst_prev = NULL; dst_hold(&rt->u.dst); for (i = 0; i < nx; i++) { struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops); struct xfrm_dst *xdst; int tunnel = 0; if (unlikely(dst1 == NULL)) { err = -ENOBUFS; dst_release(&rt->u.dst); goto error; } if (!dst) dst = dst1; else { dst_prev->child = dst1; dst1->flags |= DST_NOHASH; dst_clone(dst1); } xdst = (struct xfrm_dst *)dst1; xdst->route = &rt->u.dst; xdst->genid = xfrm[i]->genid; dst1->next = dst_prev; dst_prev = dst1; if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { remote = xfrm[i]->id.daddr.a4; local = xfrm[i]->props.saddr.a4; tunnel = 1; } header_len += xfrm[i]->props.header_len; trailer_len += xfrm[i]->props.trailer_len; if (tunnel) { fl_tunnel.fl4_src = local; fl_tunnel.fl4_dst = remote; err = xfrm_dst_lookup((struct xfrm_dst **)&rt, &fl_tunnel, AF_INET); if (err) goto error; } else dst_hold(&rt->u.dst); } dst_prev->child = &rt->u.dst; dst->path = &rt->u.dst; *dst_p = dst; dst = dst_prev; dst_prev = *dst_p; i = 0; for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) { struct xfrm_dst *x = (struct xfrm_dst*)dst_prev; x->u.rt.fl = *fl; dst_prev->xfrm = xfrm[i++]; dst_prev->dev = rt->u.dst.dev; if (rt->u.dst.dev) dev_hold(rt->u.dst.dev); dst_prev->obsolete = -1; dst_prev->flags |= DST_HOST; dst_prev->lastuse = jiffies; dst_prev->header_len = header_len; dst_prev->nfheader_len = 0; dst_prev->trailer_len = trailer_len; memcpy(&dst_prev->metrics, &x->route->metrics, sizeof(dst_prev->metrics)); /* Copy neighbout for reachability confirmation */ dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour); dst_prev->input = rt->u.dst.input; dst_prev->output = xfrm4_output; if (rt->peer) atomic_inc(&rt->peer->refcnt); x->u.rt.peer = rt->peer; /* Sheit... I remember I did this right. Apparently, * it was magically lost, so this code needs audit */ x->u.rt.rt_flags = rt0->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL); x->u.rt.rt_type = rt->rt_type; x->u.rt.rt_src = rt0->rt_src; x->u.rt.rt_dst = rt0->rt_dst; x->u.rt.rt_gateway = rt->rt_gateway; x->u.rt.rt_spec_dst = rt0->rt_spec_dst; x->u.rt.idev = rt0->idev; in_dev_hold(rt0->idev); header_len -= x->u.dst.xfrm->props.header_len; trailer_len -= x->u.dst.xfrm->props.trailer_len; } xfrm_init_pmtu(dst); return 0; error: if (dst) dst_free(dst); return err; }