int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return NET_RX_DROP; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; /* * RFC4291 2.5.3 * A packet received on an interface with a destination address * of loopback must be dropped. */ if (!(dev->flags & IFF_LOOPBACK) && ipv6_addr_loopback(&hdr->daddr)) goto err; /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it * must be silently dropped. */ if (ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) goto err; /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 * packets or appear in any Routing header. */ if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NET_RX_DROP; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ skb_orphan(skb); return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); err: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); return NET_RX_DROP; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ipv6hdr *hdr; u32 pkt_len; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); goto out; } /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = &loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; if (skb->len < sizeof(struct ipv6hdr)) goto err; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; } if (hdr->nexthdr == NEXTHDR_HOP) { skb->h.raw = (u8*)(hdr+1); if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); return 0; } hdr = skb->nh.ipv6h; }
static inline int ip6_input_finish(struct sk_buff *skb) { struct inet6_protocol *ipprot; struct sock *raw_sk; unsigned int nhoff; int nexthdr; u8 hash; struct inet6_dev *idev; /* * Parse extension headers */ rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb->dst); if (!pskb_pull(skb, skb->h.raw - skb->data)) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb->nh.raw[nhoff]; raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; hash = nexthdr & (MAX_INET_PROTOS - 1); if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { struct ipv6hdr *hdr; /* Free reference early: we don't need it any more, and it may hold ip_conntrack module loaded indefinitely. */ nf_reset(skb); skb_postpull_rcsum(skb, skb->nh.raw, skb->h.raw - skb->nh.raw); hdr = skb->nh.ipv6h; if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr)) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(&skb); if (ret > 0) goto resubmit; else if (ret == 0) IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS); } else { if (!raw_sk) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff, skb->dev); } } else IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS); kfree_skb(skb); } rcu_read_unlock(); return 0; discard: IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; }
static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; int res; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, ip6_dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); } rcu_read_lock(); if (dst->hh) { res = neigh_hh_output(dst->hh, skb); rcu_read_unlock(); return res; } else { neigh = dst_get_neighbour(dst); if (neigh) { res = neigh->output(skb); rcu_read_unlock(); return res; } rcu_read_unlock(); } IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr = skb->nh.ipv6h; struct inet6_skb_parm *opt = IP6CB(skb); if (ipv6_devconf.forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } skb->ip_summed = CHECKSUM_NONE; /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb->nh.raw + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb->dst; /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); goto error; } } if (skb->len > dst_mtu(dst)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = skb->nh.ipv6h; /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; struct net_device *dev; int offset, end; struct net *net = dev_net(skb_dst(skb)->dev); if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) goto err; fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.last_in & INET_FRAG_LAST_IN) goto err; fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; if (pskb_trim_rcsum(skb, end - offset)) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = fq->q.fragments_tail; if (!prev || FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* RFC5722, Section 4, amended by Errata ID : 3089 * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments) MUST be silently discarded. */ /* Check for overlap with preceding fragment. */ if (prev && (FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ if (next && FRAG6_CB(next)->offset < end) goto discard_fq; FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &fq->q.net->mem); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) { int res; unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; res = ip6_frag_reasm(fq, prev, dev); skb->_skb_refdst = orefdst; return res; } skb_dst_drop(skb); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); write_unlock(&ip6_frags.lock); return -1; discard_fq: fq_kill(fq); err: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; }
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; struct net_device *dev; int offset, end; struct net *net = dev_net(skb_dst(skb)->dev); if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } if (!(fhdr->frag_off & htons(IP6_MF))) { if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) goto err; fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { if (end & 0x7) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; } if (end > fq->q.len) { if (fq->q.last_in & INET_FRAG_LAST_IN) goto err; fq->q.len = end; } } if (end == offset) goto err; if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; if (pskb_trim_rcsum(skb, end - offset)) goto err; prev = fq->q.fragments_tail; if (!prev || FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; prev = next; } found: if (prev && (FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; if (next && FRAG6_CB(next)->offset < end) goto discard_fq; FRAG6_CB(skb)->offset = offset; skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &fq->q.net->mem); if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) return ip6_frag_reasm(fq, prev, dev); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); write_unlock(&ip6_frags.lock); return -1; discard_fq: fq_kill(fq); err: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); goto error; } } if (skb->len > dst_mtu(dst) && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } /* * We try to optimize forwarding of VE packets: * do not decrement TTL (and so save skb_cow) * during forwarding of outgoing pkts from VE. * For incoming pkts we still do ttl decr, * since such skb is not cloned and does not require * actual cow. So, there is at least one place * in pkts path with mandatory ttl decr, that is * sufficient to prevent routing loops. */ hdr = ipv6_hdr(skb); if (skb->dev->features & NETIF_F_VENET) /* src is VENET device */ goto no_ttl_decr; if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; no_ttl_decr: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return 0; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = &loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return 0; } } rcu_read_unlock(); return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); err: IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); return 0; }
int ip6_mc_input(struct sk_buff *skb) { struct ipv6hdr *hdr; int deliver; IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); hdr = ipv6_hdr(skb); deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->ra)) { /* Check if this is a mld message */ u8 *ptr = skb_network_header(skb) + opt->ra; struct icmp6hdr *icmp6; u8 nexthdr = hdr->nexthdr; int offset; /* Check if the value of Router Alert * is for MLD (0x0000). */ if ((ptr[2] | ptr[3]) == 0) { deliver = 0; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); if (offset < 0) goto out; if (nexthdr != IPPROTO_ICMPV6) goto out; if (!pskb_may_pull(skb, (skb_network_header(skb) + offset + 1 - skb->data))) goto out; icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: deliver = 1; break; } goto out; } /* unknown RA - process it normally */ } if (deliver) skb2 = skb_clone(skb, GFP_ATOMIC); else { skb2 = skb; skb = NULL; } if (skb2) { skb2->dev = skb2->dst->dev; ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) ip6_input(skb); else { /* discard */ kfree_skb(skb); } return 0; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_INC_STATS_BH(IPSTATS_MIB_INRECEIVES); /* purpose : CONFIG_IPV6 author : ChunRu date : 2011-04-21 */ /* description : add dad failure to hw addr mechanism */ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS); /* purpose : CONFIG_IPV6 author : ChunRu date : 2011-10-21 */ /* description : fix memory keeping growing after dad fail */ goto drop; } /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = &loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; if (skb->len < sizeof(struct ipv6hdr)) goto err; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; skb->h.raw = (u8 *)(hdr + 1); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb, IP6CB(skb)->nhoff) < 0) { IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return 0; } hdr = skb->nh.ipv6h; } rcu_read_unlock(); return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); truncated: IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS); err: IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); out: return 0; }
int ip6_mc_input(struct sk_buff *skb) { struct ipv6hdr *hdr; int deliver; IP6_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS); hdr = skb->nh.ipv6h; deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if ((ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_MULTICAST) && (IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)) goto out; if (ipv6_devconf.mc_forwarding && !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->ra)) { /* Check if this is a mld message */ //u8 *ptr = skb_network_header(skb) + opt->ra; u8 *ptr; struct icmp6hdr *icmp6; u8 nexthdr = hdr->nexthdr; int offset; ptr = (u8 *) (skb->nh.raw + opt->ra); //chunru: copy from ip6_output /* Check if the value of Router Alert * is for MLD (0x0000). */ if ((ptr[2] | ptr[3]) == 0) { deliver = 0; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); if (offset < 0) goto out; if (nexthdr != IPPROTO_ICMPV6) goto out; if (!pskb_may_pull(skb, (skb->nh.raw + offset + 1 - skb->data))) goto out; icmp6 = (struct icmp6hdr *)( skb->nh.raw + offset); switch (icmp6->icmp6_type) { case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: case ICMPV6_MLD2_REPORT: deliver = 1; break; } goto out; } /* unknown RA - process it normally */ } if (deliver) { skb2 = skb_clone(skb, GFP_ATOMIC); } else { skb2 = skb; skb = NULL; } if (skb2) { ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) { ip6_input(skb); return 0; } #ifdef CONFIG_IPV6_MROUTE else if(skb) { /* discard */ kfree_skb(skb); } #else /* discard */ kfree_skb(skb); #endif return 0; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev = NULL; int saddr_type, daddr_type; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; idev = in6_dev_get(dev); IP6_INC_STATS_BH(idev,Ip6InReceives); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto out; /* Store incoming device index. When the packet will be queued, we cannot refer to skb->dev anymore. */ ((struct inet6_skb_parm *)skb->cb)->iif = dev->ifindex; if (skb->len < sizeof(struct ipv6hdr)) goto err; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto drop; hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; saddr_type = ipv6_addr_type(&hdr->saddr); daddr_type = ipv6_addr_type(&hdr->daddr); if ((saddr_type & IPV6_ADDR_MULTICAST) || (daddr_type == IPV6_ADDR_ANY)) goto drop; /*XXX*/ if (((saddr_type & IPV6_ADDR_LOOPBACK) || (daddr_type & IPV6_ADDR_LOOPBACK)) && !(dev->flags & IFF_LOOPBACK)) goto drop; /*XXX*/ #ifdef CONFIG_IPV6_DROP_FAKE_V4MAPPED if (saddr_type == IPV6_ADDR_MAPPED || daddr_type == IPV6_ADDR_MAPPED) goto drop; /*XXX*/ #endif pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pkt_len + sizeof(struct ipv6hdr) < skb->len) { if (__pskb_trim(skb, pkt_len + sizeof(struct ipv6hdr))) goto drop; hdr = skb->nh.ipv6h; if (skb->ip_summed == CHECKSUM_HW) skb->ip_summed = CHECKSUM_NONE; } } if (hdr->nexthdr == NEXTHDR_HOP) { skb->h.raw = (u8*)(hdr+1); if (ipv6_parse_hopopts(skb, offsetof(struct ipv6hdr, nexthdr)) < 0) { IP6_INC_STATS_BH(idev,Ip6InHdrErrors); if (idev) in6_dev_put(idev); return 0; } hdr = skb->nh.ipv6h; }
static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; struct in6_addr *nexthop; int ret; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= IPV6_ADDR_SCOPE_NODELOCAL && !(dev->flags & IFF_LOOPBACK)) { kfree_skb(skb); return 0; } } rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; }
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; fq_kill(fq); if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; if (!fp->next) fq->q.fragments_tail = fp; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; kfree_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &fq->q.net->mem); } nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } atomic_sub(head->truesize, &fq->q.net->mem); head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return 1; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { struct net *net = sock_net(sk); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD struct neighbour *n; struct rt6_info *rt; #endif int err; if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ rt = (struct rt6_info *) *dst; rcu_read_lock_bh(); n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; rcu_read_unlock_bh(); if (err) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; int redirect; ifp = ipv6_get_ifaddr(net, &fl6->saddr, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); if ((err = (*dst)->error)) goto out_err_release; } } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; }
/* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, struct net_device *dev) { struct sk_buff *fp, *head = fq->fragments; int remove_fraghdr = 0; int payload_len; int nhoff; fq_kill(fq); BUG_TRAP(head != NULL); BUG_TRAP(FRAG6_CB(head)->offset == 0); /* Unfragmented part is taken from the first segment. */ payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len; nhoff = head->h.raw - head->nh.raw; if (payload_len > 65535) { payload_len -= 8; if (payload_len > 65535) goto out_oversize; remove_fraghdr = 1; } /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_shinfo(head)->frag_list) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_shinfo(head)->frag_list = NULL; for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &ip6_frag_mem); } /* Normally we do not remove frag header from datagram, but * we have to do this and to relocate header, when payload * is > 65535-8. */ if (remove_fraghdr) { nhoff = fq->nhoffset; head->nh.raw[nhoff] = head->h.raw[0]; memmove(head->head+8, head->head, (head->data-head->head)-8); head->mac.raw += 8; head->nh.raw += 8; } else { ((struct frag_hdr*)head->h.raw)->frag_off = 0; } skb_shinfo(head)->frag_list = head->next; head->h.raw = head->data; skb_push(head, head->data - head->nh.raw); atomic_sub(head->truesize, &ip6_frag_mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_HW) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &ip6_frag_mem); } head->next = NULL; head->dev = dev; head->stamp = fq->stamp; head->nh.ipv6h->payload_len = ntohs(payload_len); *skb_in = head; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_HW) head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); IP6_INC_STATS_BH(Ip6ReasmOKs); fq->fragments = NULL; return nhoff; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); out_fail: IP6_INC_STATS_BH(Ip6ReasmFails); return -1; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb->pkt_type != PACKET_HOST) goto drop; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { if (ip6_call_ra_chain(skb, ntohs(opt->ra))) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct inet_peer *peer; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) target = &rt->rt6i_gateway; else target = &hdr->daddr; peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(peer, 1*HZ)) ndisc_send_redirect(skb, target); if (peer) inet_putpeer(peer); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = ip6_dst_mtu_forward(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (ip6_pkt_too_big(skb, mtu)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
static int ipv6_frag_rcv(struct sk_buff *skb) { struct frag_hdr *fhdr; struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); int evicted; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + sizeof(struct frag_hdr)))) goto fail_hdr; hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ skb->transport_header += sizeof(struct frag_hdr); IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; return 1; } evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false); if (evicted) IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS, evicted); fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, ip6_frag_ecn(hdr)); if (fq != NULL) { int ret; spin_lock(&fq->q.lock); ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); inet_frag_put(&fq->q, &ip6_frags); return ret; } IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; fail_hdr: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); return -1; }
static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi *fl) { int err; if (*dst == NULL) *dst = ip6_route_output(sk, fl); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl->fl6_src)) { err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) { struct inet6_ifaddr *ifp; struct flowi fl_gw; int redirect; ifp = ipv6_get_ifaddr(&fl->fl6_src, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw, fl, sizeof(struct flowi)); memset(&fl_gw.fl6_dst, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(sk, &fl_gw); if ((err = (*dst)->error)) goto out_err_release; } } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; }
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; struct net_device *dev; int offset, end; struct net *net = dev_net(skb->dst->dev); if (fq->q.last_in & INET_FRAG_COMPLETE) goto err; offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, ((u8 *)&fhdr->frag_off - skb_network_header(skb))); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) goto err; fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ IP6_INC_STATS_BH(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, offsetof(struct ipv6hdr, payload_len)); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.last_in & INET_FRAG_LAST_IN) goto err; fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) goto err; if (pskb_trim_rcsum(skb, end - offset)) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = NULL; for(next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. */ if (prev) { int i = (FRAG6_CB(prev)->offset + prev->len) - offset; if (i > 0) { offset += i; if (end <= offset) goto err; if (!pskb_pull(skb, i)) goto err; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } } /* Look for overlap with succeeding segments. * If we can merge fragments, do it. */ while (next && FRAG6_CB(next)->offset < end) { int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; FRAG6_CB(next)->offset += i; /* next fragment */ fq->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) next->ip_summed = CHECKSUM_NONE; break; } else { struct sk_buff *free_it = next; /* Old fragment is completely overridden with * new one drop it. */ next = next->next; if (prev) prev->next = next; else fq->q.fragments = next; fq->q.meat -= free_it->len; frag_kfree_skb(fq->q.net, free_it, NULL); } } FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (prev) prev->next = skb; else fq->q.fragments = skb; dev = skb->dev; if (dev) { fq->iif = dev->ifindex; skb->dev = NULL; } fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &fq->q.net->mem); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) return ip6_frag_reasm(fq, prev, dev); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); write_unlock(&ip6_frags.lock); return -1; err: IP6_INC_STATS(net, ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -1; }
/* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(fq->q.net, struct net, ipv6.frags); struct sk_buff *fp, *head = fq->q.fragments; int payload_len; unsigned int nhoff; fq_kill(fq); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_oom; fp->next = head->next; if (!fp->next) fq->q.fragments_tail = fp; prev->next = fp; skb_morph(head, fq->q.fragments); head->next = fq->q.fragments->next; kfree_skb(fq->q.fragments); fq->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG6_CB(head)->offset != 0); /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_oom; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &fq->q.net->mem); } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ nhoff = fq->nhoffset; skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); if (skb_mac_header_was_set(head)) head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } atomic_sub(head->truesize, &fq->q.net->mem); head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; IP6CB(head)->flags |= IP6SKB_FRAGMENTED; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; return 1; out_oversize: net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: rcu_read_lock(); IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); rcu_read_unlock(); return -1; }
static int ip6_input_finish(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; int nexthdr; bool raw; /* * Parse extension headers */ rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; raw = raw6_local_deliver(skb, nexthdr); if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; /* Free reference early: we don't need it any more, and it may hold ip_conntrack module loaded indefinitely. */ nf_reset(skb); skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(skb); if (ret > 0) goto resubmit; else if (ret == 0) IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); } kfree_skb(skb); } else { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } rcu_read_unlock(); return 0; discard: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; }
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; struct net_device_extended *ext = dev_extended(dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return 0; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || (ext && unlikely(ext->ipv6_devconf_ext.disable_ipv6))) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); goto out; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = &loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb->dst ? ((struct rt6_info *)skb->dst)->rt6i_idev->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = skb->nh.ipv6h; if (hdr->version != 6) goto err; /* * RFC4291 2.5.3 * A packet received on an interface with a destination address * of loopback must be dropped. */ if (!(dev->flags & IFF_LOOPBACK) && ipv6_addr_loopback(&hdr->daddr)) goto err; skb->h.raw = (u8 *)(hdr + 1); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) goto truncated; if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = skb->nh.ipv6h; } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return 0; } } rcu_read_unlock(); return NF_HOOK(PF_INET6,NF_IP6_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); truncated: IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS); err: IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); out: return 0; }
int ipv6_reassembly(struct sk_buff **skbp, int nhoff) { struct sk_buff *skb = *skbp; struct inet6_dev *idev = in6_dev_get(skb->dev); struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; hdr = skb->nh.ipv6h; IP6_INC_STATS_BH(idev,Ip6ReasmReqds); /* Jumbo payload inhibits frag. header */ if (hdr->payload_len==0) { if (idev) in6_dev_put(idev); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; } if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+sizeof(struct frag_hdr))) { if (idev) in6_dev_put(idev); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb->h.raw-skb->nh.raw); return -1; } hdr = skb->nh.ipv6h; fhdr = (struct frag_hdr *)skb->h.raw; if (!(fhdr->frag_off & htons(0xFFF9))) { /* It is not a fragmented frame */ skb->h.raw += sizeof(struct frag_hdr); IP6_INC_STATS_BH(idev,Ip6ReasmOKs); if (idev) in6_dev_put(idev); return (u8*)fhdr - skb->nh.raw; } if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) ip6_evictor(idev); if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, idev)) != NULL) { int ret = -1; spin_lock(&fq->lock); ip6_frag_queue(fq, skb, fhdr, nhoff); if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) ret = ip6_frag_reasm(fq, skbp, dev); spin_unlock(&fq->lock); fq_put(fq); if (idev) in6_dev_put(idev); return ret; } IP6_INC_STATS_BH(idev,Ip6ReasmFails); if (idev) in6_dev_put(idev); kfree_skb(skb); return -1; }