int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct rtattr **rta = arg; struct in_device *in_dev; struct ifaddrmsg *ifm = NLMSG_DATA(nlh); struct in_ifaddr *ifa, **ifap; ASSERT_RTNL(); if ((in_dev = inetdev_by_index(ifm->ifa_index)) == NULL) return -EADDRNOTAVAIL; __in_dev_put(in_dev); for (ifap=&in_dev->ifa_list; (ifa=*ifap)!=NULL; ifap=&ifa->ifa_next) { if ((rta[IFA_LOCAL-1] && memcmp(RTA_DATA(rta[IFA_LOCAL-1]), &ifa->ifa_local, 4)) || (rta[IFA_LABEL-1] && strcmp(RTA_DATA(rta[IFA_LABEL-1]), ifa->ifa_label)) || (rta[IFA_ADDRESS-1] && (ifm->ifa_prefixlen != ifa->ifa_prefixlen || !inet_ifa_match(*(u32*)RTA_DATA(rta[IFA_ADDRESS-1]), ifa)))) continue; inet_del_ifa(in_dev, ifap, 1); return 0; } return -EADDRNOTAVAIL; }
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct ip_mc_socklist *iml, **imlp; rtnl_lock(); for (imlp=&sk->protinfo.af_inet.mc_list; (iml=*imlp)!=NULL; imlp=&iml->next) { if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && iml->multi.imr_address.s_addr==imr->imr_address.s_addr && (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { struct in_device *in_dev; if (--iml->count) { rtnl_unlock(); return 0; } *imlp = iml->next; in_dev = inetdev_by_index(iml->multi.imr_ifindex); if (in_dev) { ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); in_dev_put(in_dev); } rtnl_unlock(); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; } } rtnl_unlock(); return -EADDRNOTAVAIL; }
int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; u32 addr = imr->imr_multiaddr.s_addr; struct ip_mc_socklist *iml, *i; struct in_device *in_dev; int count = 0; if (!MULTICAST(addr)) return -EINVAL; rtnl_shlock(); if (!imr->imr_ifindex) in_dev = ip_mc_find_dev(imr); else { in_dev = inetdev_by_index(imr->imr_ifindex); if (in_dev) __in_dev_put(in_dev); } if (!in_dev) { iml = NULL; err = -ENODEV; goto done; } iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); err = -EADDRINUSE; for (i=sk->protinfo.af_inet.mc_list; i; i=i->next) { if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) { /* New style additions are reference counted */ if (imr->imr_address.s_addr == 0) { i->count++; err = 0; } goto done; } count++; } err = -ENOBUFS; if (iml == NULL || count >= sysctl_igmp_max_memberships) goto done; memcpy(&iml->multi, imr, sizeof(*imr)); iml->next = sk->protinfo.af_inet.mc_list; iml->count = 1; sk->protinfo.af_inet.mc_list = iml; ip_mc_inc_group(in_dev, addr); iml = NULL; err = 0; done: rtnl_shunlock(); if (iml) sock_kfree_s(sk, iml, sizeof(*iml)); return err; }
static int ipgre_close(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { struct in_device *in_dev; in_dev = inetdev_by_index(t->net, t->mlink); if (in_dev) ip_mc_dec_group(in_dev, t->parms.iph.daddr); } return 0; }
void ip_mc_drop_socket(struct sock *sk) { struct ip_mc_socklist *iml; while ((iml=sk->ip_mc_list) != NULL) { struct in_device *in_dev; sk->ip_mc_list = iml->next; if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); sock_kfree_s(sk, iml, sizeof(*iml)); } }
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) { struct net *net = sock_net(skb->sk); struct nlattr *tb[IFA_MAX+1]; struct in_device *in_dev; struct ifaddrmsg *ifm; struct in_ifaddr *ifa, **ifap; int err = -EINVAL; ASSERT_RTNL(); err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy); if (err < 0) goto errout; ifm = nlmsg_data(nlh); in_dev = inetdev_by_index(net, ifm->ifa_index); if (in_dev == NULL) { err = -ENODEV; goto errout; } __in_dev_put(in_dev); for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && ifa->ifa_local != nla_get_be32(tb[IFA_LOCAL])) continue; if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue; if (tb[IFA_ADDRESS] && (ifm->ifa_prefixlen != ifa->ifa_prefixlen || !inet_ifa_match(nla_get_be32(tb[IFA_ADDRESS]), ifa))) continue; __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).pid); return 0; } err = -EADDRNOTAVAIL; errout: return err; }
void ip_mc_drop_socket(struct sock *sk) { struct ip_mc_socklist *iml; if (sk->protinfo.af_inet.mc_list == NULL) return; rtnl_lock(); while ((iml=sk->protinfo.af_inet.mc_list) != NULL) { struct in_device *in_dev; sk->protinfo.af_inet.mc_list = iml->next; if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) { ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); in_dev_put(in_dev); } sock_kfree_s(sk, iml, sizeof(*iml)); } rtnl_unlock(); }
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct ip_mc_socklist *iml, **imlp; for (imlp=&sk->ip_mc_list; (iml=*imlp)!=NULL; imlp=&iml->next) { if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && iml->multi.imr_address.s_addr==imr->imr_address.s_addr && (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { struct in_device *in_dev; if (--iml->count) return 0; *imlp = iml->next; synchronize_bh(); in_dev = inetdev_by_index(iml->multi.imr_ifindex); if (in_dev) ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; } } return -EADDRNOTAVAIL; }