int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct sk_filter *fp; unsigned int fsize = sizeof(struct sock_filter) * fprog->len; int err; /* Make sure new filter is there and in the right amounts. */ if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS) return (-EINVAL); fp = (struct sk_filter *)sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL); if(fp == NULL) return (-ENOMEM); if (copy_from_user(fp->insns, fprog->filter, fsize)) { sock_kfree_s(sk, fp, fsize+sizeof(*fp)); return -EFAULT; } atomic_set(&fp->refcnt, 1); fp->len = fprog->len; if ((err = sk_chk_filter(fp->insns, fp->len))==0) { struct sk_filter *old_fp = sk->filter; sk->filter = fp; synchronize_bh(); fp = old_fp; } if (fp) sk_filter_release(sk, fp); return (err); }
static void ipv6_del_addr(struct inet6_ifaddr *ifp) { struct inet6_ifaddr *iter, **back; int hash; if (atomic_read(&addr_list_lock)) { ifp->flags |= ADDR_INVALID; ipv6_ifa_notify(RTM_DELADDR, ifp); return; } hash = ipv6_addr_hash(&ifp->addr); iter = inet6_addr_lst[hash]; back = &inet6_addr_lst[hash]; for (; iter; iter = iter->lst_next) { if (iter == ifp) { *back = ifp->lst_next; synchronize_bh(); ifp->lst_next = NULL; break; } back = &(iter->lst_next); } iter = ifp->idev->addr_list; back = &ifp->idev->addr_list; for (; iter; iter = iter->if_next) { if (iter == ifp) { *back = ifp->if_next; synchronize_bh(); ifp->if_next = NULL; break; } back = &(iter->if_next); } ipv6_ifa_notify(RTM_DELADDR, ifp); del_timer(&ifp->timer); kfree(ifp); }
static void ipip_tunnel_destroy(struct device *dev) { if (dev == &ipip_fb_tunnel_dev) { tunnels_wc[0] = NULL; synchronize_bh(); } else { ipip_tunnel_unlink((struct ip_tunnel*)dev->priv); kfree(dev); MOD_DEC_USE_COUNT; } }
static void ipip_tunnel_unlink(struct ip_tunnel *t) { struct ip_tunnel **tp; for (tp = ipip_bucket(t); *tp; tp = &(*tp)->next) { if (t == *tp) { *tp = t->next; synchronize_bh(); break; } } }
int ip_mc_dec_group(struct in_device *in_dev, u32 addr) { struct ip_mc_list *i, **ip; for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) { if (i->multiaddr==addr) { if (--i->users == 0) { *ip = i->next; synchronize_bh(); igmp_group_dropped(i); if (in_dev->dev->flags & IFF_UP) ip_rt_multicast_event(in_dev); kfree_s(i, sizeof(*i)); } return 0; } } return -ESRCH; }
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct ip_mc_socklist *iml, **imlp; for (imlp=&sk->ip_mc_list; (iml=*imlp)!=NULL; imlp=&iml->next) { if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && iml->multi.imr_address.s_addr==imr->imr_address.s_addr && (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { struct in_device *in_dev; if (--iml->count) return 0; *imlp = iml->next; synchronize_bh(); in_dev = inetdev_by_index(iml->multi.imr_ifindex); if (in_dev) ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; } } return -EADDRNOTAVAIL; }
static __inline__ void addrconf_lock(void) { atomic_inc(&addr_list_lock); synchronize_bh(); }