/* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) { struct sock *sk; struct hlist_head *head; int delivered = 0; struct net *net; read_lock(&raw_v4_hashinfo.lock); head = &raw_v4_hashinfo.ht[hash]; if (hlist_empty(head)) goto out; net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { delivered = 1; if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); } out: read_unlock(&raw_v4_hashinfo.lock); return delivered; }
/* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ void raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) { struct sock *sk; struct hlist_head *head; read_lock(&raw_v4_lock); head = &raw_v4_htable[hash]; if (hlist_empty(head)) goto out; sk = __raw_v4_lookup(__sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = __raw_v4_lookup(sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); } out: read_unlock(&raw_v4_lock); }
/* IP input processing comes here for RAW socket delivery. * This is fun as to avoid copies we want to make no surplus * copies. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ struct sock *raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) { struct sock *sk; read_lock(&raw_v4_lock); if ((sk = raw_v4_htable[hash]) == NULL) goto out; sk = __raw_v4_lookup(sk, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { struct sock *sknext = __raw_v4_lookup(sk->next, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { struct sk_buff *clone; if (!sknext) break; clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = sknext; } out: if (sk) sock_hold(sk); read_unlock(&raw_v4_lock); return sk; }
/* IP input processing comes here for RAW socket delivery. * Caller owns SKB, so we must make clones. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) { struct sock *sk; struct hlist_head *head; int delivered = 0; struct net *net; read_lock(&raw_v4_hashinfo.lock); head = &raw_v4_hashinfo.ht[hash]; if (hlist_empty(head)) goto out; //head就是指向一个链表...该链表的结点都是protocol相同的struct sock... //然后遍历该链表来确定是否有哪个struct sock来处理该数据包啦.. net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { //来到这里代表至少说有一个struct sock可以处理该数据包....因此设置返回值为1... delivered = 1; if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } //可能存在多个struct sock同时处理一个数据包啦...主要还是看匹配规则(lookup).. //这里看到只是匹配协议/源地址/目的地址. sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); } out: read_unlock(&raw_v4_hashinfo.lock); return delivered; }
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; const struct iphdr *iph; struct net *net; hash = protocol & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk != NULL) { iph = (const struct iphdr *)skb->data; net = dev_net(skb->dev); while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, iph->daddr, iph->saddr, skb->dev->ifindex)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (const struct iphdr *)skb->data; } } read_unlock(&raw_v4_hashinfo.lock); }
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; struct iphdr *iph; struct vrf *vrf; hash = protocol & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk != NULL) { iph = (struct iphdr *)skb->data; vrf = if_dev_vrf(skb->dev); while ((raw_sk = __raw_v4_lookup(vrf, skb->litevrf_id, raw_sk, protocol, iph->daddr, iph->saddr, skb->dev->ifindex,ip_hdr(skb))) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (struct iphdr *)skb->data; } } read_unlock(&raw_v4_hashinfo.lock); }
void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) { int hash; struct sock *raw_sk; struct iphdr *iph; hash = protocol & (RAWV4_HTABLE_SIZE - 1); read_lock(&raw_v4_lock); raw_sk = sk_head(&raw_v4_htable[hash]); if (raw_sk != NULL) { iph = (struct iphdr *)skb->data; while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr, iph->saddr, skb->dev->ifindex)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (struct iphdr *)skb->data; } } read_unlock(&raw_v4_lock); }