int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, int allocation) { struct sock *sk; struct sk_buff *skb2 = NULL; int protocol = ssk->protocol; int failure = 0, delivered = 0; /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); for (sk = nl_table[protocol]; sk; sk = sk->next) { if (ssk == sk) continue; if (sk->protinfo.af_netlink->pid == pid || !(sk->protinfo.af_netlink->groups&group)) continue; if (failure) { netlink_overrun(sk); continue; } sock_hold(sk); if (skb2 == NULL) { if (atomic_read(&skb->users) != 1) { skb2 = skb_clone(skb, allocation); } else { skb2 = skb; atomic_inc(&skb->users); } } if (skb2 == NULL) { netlink_overrun(sk); /* Clone failed. Notify ALL listeners. */ failure = 1; } else if (netlink_broadcast_deliver(sk, skb2)) { netlink_overrun(sk); } else { delivered = 1; skb2 = NULL; } sock_put(sk); } netlink_unlock_table(); if (skb2) kfree_skb(skb2); kfree_skb(skb); if (delivered) return 0; if (failure) return -ENOBUFS; return -ESRCH; }
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, int allocation) { struct netlink_broadcast_data info; struct hlist_node *node; struct sock *sk; info.exclude_sk = ssk; info.pid = pid; info.group = group; info.failure = 0; info.congested = 0; info.delivered = 0; info.allocation = allocation; info.skb = skb; info.skb2 = NULL; netlink_trim(skb, allocation); /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); netlink_unlock_table(); if (info.skb2) kfree_skb(info.skb2); kfree_skb(skb); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) yield(); return 0; } if (info.failure) return -ENOBUFS; return -ESRCH; }