static bool xttarpit_tarpit(struct tcphdr *tcph, const struct tcphdr *oth) { /* No replies for RST, FIN or !SYN,!ACK */ if (oth->rst || oth->fin || (!oth->syn && !oth->ack)) return false; tcph->seq = oth->ack ? oth->ack_seq : 0; /* Our SYN-ACKs must have a >0 window */ tcph->window = (oth->syn && !oth->ack) ? htons(5) : 0; if (oth->syn && oth->ack) { tcph->rst = true; tcph->ack_seq = false; } else { tcph->syn = oth->syn; tcph->ack = true; tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn); } #if 0 /* Rate-limit replies to !SYN,ACKs */ if (!oth->syn && oth->ack) if (!xrlim_allow(&ort->dst, HZ)) return false; #endif return true; }
static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code) { struct dst_entry *dst = &rt->u.dst; if (type > NR_ICMP_TYPES || !icmp_pointers[type].timeout) return 1; /* Don't limit PMTU discovery. */ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) return 1; /* Redirect has its own rate limit mechanism */ if (type == ICMP_REDIRECT) return 1; /* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) return 1; return xrlim_allow(dst, *(icmp_pointers[type].timeout)); }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr = skb->nh.ipv6h; struct inet6_skb_parm *opt = IP6CB(skb); if (ipv6_devconf.forwarding == 0) goto error; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); goto drop; } skb->ip_summed = CHECKSUM_NONE; /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb->nh.raw + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); kfree_skb(skb); return -ETIMEDOUT; } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb->dst; /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK |IPV6_ADDR_LINKLOCAL)) { /* This check is security critical. */ goto error; } if (skb->len > dst_mtu(dst)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = skb->nh.ipv6h; /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
/* Send reply */ static void tarpit_tcp(struct sk_buff *oskb,struct rtable *ort,int local) { struct sk_buff *nskb; struct rtable *nrt; struct tcphdr *otcph, *ntcph; struct flowi fl = {}; unsigned int otcplen; u_int16_t tmp; /* A truncated TCP header isn't going to be useful */ if (oskb->len < (ip_hdr(oskb)->ihl*4) + sizeof(struct tcphdr)) return; otcph = (struct tcphdr *)((u_int32_t*)ip_hdr(oskb) + ip_hdr(oskb)->ihl); otcplen = oskb->len - ip_hdr(oskb)->ihl*4; /* No replies for RST or FIN */ if (otcph->rst || otcph->fin) return; /* No reply to !SYN,!ACK. Rate-limit replies to !SYN,ACKs */ if (!otcph->syn && (!otcph->ack || !xrlim_allow(&ort->u.dst, 1*HZ))) return; /* Check checksum. */ if (tcp_v4_check(otcplen, ip_hdr(oskb)->saddr, ip_hdr(oskb)->daddr, csum_partial((char *)otcph, otcplen, 0)) != 0) return; /* Copy skb (even if skb is about to be dropped, we can't just clone it because there may be other things, such as tcpdump, interested in it) */ nskb = skb_copy(oskb, GFP_ATOMIC); if (!nskb) return; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) /* This packet will not be the same as the other: clear nf fields */ nf_conntrack_put(nskb->nfct); nskb->nfct = NULL; #endif /* CONFIG_NF_CONNTRACK */ ntcph = (struct tcphdr *)((u_int32_t*)ip_hdr(nskb) + ip_hdr(nskb)->ihl); /* Truncate to length (no data) */ ntcph->doff = sizeof(struct tcphdr)/4; skb_trim(nskb, ip_hdr(nskb)->ihl*4 + sizeof(struct tcphdr)); ip_hdr(nskb)->tot_len = htons(nskb->len); /* Swap source and dest */ ip_hdr(nskb)->daddr = xchg(&ip_hdr(nskb)->saddr, ip_hdr(nskb)->daddr); tmp = ntcph->source; ntcph->source = ntcph->dest; ntcph->dest = tmp; /* Use supplied sequence number or make a new one */ ntcph->seq = otcph->ack ? otcph->ack_seq : htonl(secure_tcp_sequence_number(ip_hdr(nskb)->saddr, ip_hdr(nskb)->daddr, ntcph->source, ntcph->dest)); /* Our SYN-ACKs must have a >0 window */ ntcph->window = (otcph->syn && !otcph->ack) ? htons(5) : 0; ntcph->urg_ptr = 0; /* Reset flags */ ((u_int8_t *)ntcph)[13] = 0; if (otcph->syn && otcph->ack) { ntcph->rst = 1; ntcph->ack_seq = 0; } else { ntcph->syn = otcph->syn; ntcph->ack = 1; ntcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn); } /* Adjust TCP checksum */ ntcph->check = 0; ntcph->check = tcp_v4_check(sizeof(struct tcphdr), ip_hdr(nskb)->saddr, ip_hdr(nskb)->daddr, csum_partial((char *)ntcph, sizeof(struct tcphdr), 0)); fl.nl_u.ip4_u.daddr = ip_hdr(nskb)->daddr; fl.nl_u.ip4_u.saddr = local ? ip_hdr(nskb)->saddr : 0; fl.nl_u.ip4_u.tos = RT_TOS(ip_hdr(nskb)->tos) | RTO_CONN; fl.oif = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) if (ip_route_output_key(&init_net, &nrt, &fl)) #else if (ip_route_output_key(&nrt, &fl)) #endif goto free_nskb; dst_release(nskb->dst); nskb->dst = &nrt->u.dst; /* Adjust IP TTL */ ip_hdr(nskb)->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); /* Set DF, id = 0 */ ip_hdr(nskb)->frag_off = htons(IP_DF); ip_hdr(nskb)->id = 0; /* Adjust IP checksum */ ip_hdr(nskb)->check = 0; ip_hdr(nskb)->check = ip_fast_csum((unsigned char *)ip_hdr(nskb), ip_hdr(nskb)->ihl); /* "Never happens" */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) if (nskb->len > dst_mtu(nskb->dst)) #else if (nskb->len > dst_pmtu(nskb->dst)) #endif goto free_nskb; ip_direct_send (nskb); return; free_nskb: kfree_skb(nskb); }
static void send_unreach(struct sk_buff *skb_in, int code) { struct iphdr *iph; struct udphdr *udph; struct icmphdr *icmph; struct sk_buff *nskb; u32 saddr; u8 tos; int hh_len, length; struct rtable *rt = (struct rtable*)skb_in->dst; unsigned char *data; if (!rt) return; /* FIXME: Use sysctl number. --RR */ if (!xrlim_allow(&rt->u.dst, 1*HZ)) return; iph = skb_in->nh.iph; /* No replies to physical multicast/broadcast */ if (skb_in->pkt_type!=PACKET_HOST) return; /* Now check at the protocol level */ if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) return; /* Only reply to fragment 0. */ if (iph->frag_off&htons(IP_OFFSET)) return; /* Ensure we have at least 8 bytes of proto header. */ if (skb_in->len < skb_in->nh.iph->ihl*4 + 8) return; /* if UDP checksum is set, verify it's correct */ if (iph->protocol == IPPROTO_UDP && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) { int datalen = skb_in->len - (iph->ihl<<2); udph = (struct udphdr *)((char *)iph + (iph->ihl<<2)); if (udph->check && csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_UDP, csum_partial((char *)udph, datalen, 0)) != 0) return; } /* If we send an ICMP error to an ICMP error a mess would result.. */ if (iph->protocol == IPPROTO_ICMP && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) { icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2)); if (skb_copy_bits(skb_in, skb_in->nh.iph->ihl*4, icmph, sizeof(*icmph)) < 0) return; /* Between echo-reply (0) and timestamp (13), everything except echo-request (8) is an error. Also, anything greater than NR_ICMP_TYPES is unknown, and hence should be treated as an error... */ if ((icmph->type < ICMP_TIMESTAMP && icmph->type != ICMP_ECHOREPLY && icmph->type != ICMP_ECHO) || icmph->type > NR_ICMP_TYPES) return; } saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) saddr = 0; tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL; { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = skb_in->nh.iph->saddr, .saddr = saddr, .tos = RT_TOS(tos) } }, .proto = IPPROTO_ICMP, .uli_u = { .icmpt = { .type = ICMP_DEST_UNREACH, .code = code } } }; if (ip_route_output_key(&rt, &fl)) return; }
void icmp_send(struct sk_buff *skb_in, int type, int code, unsigned long info, struct device *dev) { struct iphdr *iph; struct icmphdr *icmph; int atype, room; struct icmp_bxm icmp_param; __u32 saddr; /* * Find the original header */ iph = skb_in->ip_hdr; /* * No replies to physical multicast/broadcast */ if(skb_in->pkt_type!=PACKET_HOST) return; /* * Now check at the protocol level */ atype=ip_chk_addr(iph->daddr); if(atype==IS_BROADCAST||atype==IS_MULTICAST) return; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if(iph->frag_off&htons(IP_OFFSET)) return; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if(icmp_pointers[type].error) { /* * We are an error, check if we are replying to an ICMP error */ if(iph->protocol==IPPROTO_ICMP) { icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2)); /* * Assume any unknown ICMP type is an error. This isn't * specified by the RFC, but think about it.. */ if(icmph->type>18 || icmp_pointers[icmph->type].error) return; } } /* * Check the rate limit */ #ifndef CONFIG_NO_ICMP_LIMIT if (!xrlim_allow(type, iph->saddr)) return; #endif /* * Construct source address and options. */ saddr=iph->daddr; if(saddr!=dev->pa_addr && dev->pa_addr != 0 && ip_chk_addr(saddr)!=IS_MYADDR) saddr=dev->pa_addr; if(ip_options_echo(&icmp_param.replyopts, NULL, saddr, iph->saddr, skb_in)) return; /* * Prepare data for ICMP header. */ icmp_param.icmph.type=type; icmp_param.icmph.code=code; icmp_param.icmph.un.gateway = info; icmp_param.data_ptr=iph; room = 576 - sizeof(struct iphdr) - icmp_param.replyopts.optlen; icmp_param.data_len=(iph->ihl<<2)+skb_in->len; /* RFC says return as much as we can without exceeding 576 bytes */ if (icmp_param.data_len > room) icmp_param.data_len = room; /* * Build and send the packet. */ icmp_build_xmit(&icmp_param, saddr, iph->saddr, icmp_pointers[type].error ? (iph->tos & 0x1E) | 0xC0 : iph->tos); }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); goto error; } } if (skb->len > dst_mtu(dst) && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } /* * We try to optimize forwarding of VE packets: * do not decrement TTL (and so save skb_cow) * during forwarding of outgoing pkts from VE. * For incoming pkts we still do ttl decr, * since such skb is not cloned and does not require * actual cow. So, there is at least one place * in pkts path with mandatory ttl decr, that is * sufficient to prevent routing loops. */ hdr = ipv6_hdr(skb); if (skb->dev->features & NETIF_F_VENET) /* src is VENET device */ goto no_ttl_decr; if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; no_ttl_decr: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
static void send_unreach(struct sk_buff *skb_in, int code) { struct iphdr *iph; struct udphdr *udph; struct icmphdr *icmph; struct sk_buff *nskb; u32 saddr; u8 tos; int hh_len, length; struct rtable *rt = (struct rtable*)skb_in->dst; unsigned char *data; if (!rt) return; /* FIXME: Use sysctl number. --RR */ if (!xrlim_allow(&rt->u.dst, 1*HZ)) return; iph = skb_in->nh.iph; /* No replies to physical multicast/broadcast */ if (skb_in->pkt_type!=PACKET_HOST) return; /* Now check at the protocol level */ if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) return; /* Only reply to fragment 0. */ if (iph->frag_off&htons(IP_OFFSET)) return; /* if UDP checksum is set, verify it's correct */ if (iph->protocol == IPPROTO_UDP && skb_in->tail-(u8*)iph >= sizeof(struct udphdr)) { int datalen = skb_in->len - (iph->ihl<<2); udph = (struct udphdr *)((char *)iph + (iph->ihl<<2)); if (udph->check && csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, IPPROTO_UDP, csum_partial((char *)udph, datalen, 0)) != 0) return; } /* If we send an ICMP error to an ICMP error a mess would result.. */ if (iph->protocol == IPPROTO_ICMP && skb_in->tail-(u8*)iph >= sizeof(struct icmphdr)) { icmph = (struct icmphdr *)((char *)iph + (iph->ihl<<2)); /* Between echo-reply (0) and timestamp (13), everything except echo-request (8) is an error. Also, anything greater than NR_ICMP_TYPES is unknown, and hence should be treated as an error... */ if ((icmph->type < ICMP_TIMESTAMP && icmph->type != ICMP_ECHOREPLY && icmph->type != ICMP_ECHO) || icmph->type > NR_ICMP_TYPES) return; } saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) saddr = 0; tos = (iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL; if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0)) return; /* RFC says return as much as we can without exceeding 576 bytes. */ length = skb_in->len + sizeof(struct iphdr) + sizeof(struct icmphdr); if (length > rt->u.dst.pmtu) length = rt->u.dst.pmtu; if (length > 576) length = 576; hh_len = (rt->u.dst.dev->hard_header_len + 15)&~15; nskb = alloc_skb(hh_len+15+length, GFP_ATOMIC); if (!nskb) { ip_rt_put(rt); return; } nskb->priority = 0; nskb->dst = &rt->u.dst; skb_reserve(nskb, hh_len); /* Set up IP header */ iph = nskb->nh.iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); iph->version=4; iph->ihl=5; iph->tos=tos; iph->tot_len = htons(length); /* PMTU discovery never applies to ICMP packets. */ iph->frag_off = 0; iph->ttl = MAXTTL; ip_select_ident(iph, &rt->u.dst, NULL); iph->protocol=IPPROTO_ICMP; iph->saddr=rt->rt_src; iph->daddr=rt->rt_dst; iph->check=0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); /* Set up ICMP header. */ icmph = nskb->h.icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); icmph->type = ICMP_DEST_UNREACH; icmph->code = code; icmph->un.gateway = 0; icmph->checksum = 0; /* Copy as much of original packet as will fit */ data = skb_put(nskb, length - sizeof(struct iphdr) - sizeof(struct icmphdr)); /* FIXME: won't work with nonlinear skbs --RR */ memcpy(data, skb_in->nh.iph, length - sizeof(struct iphdr) - sizeof(struct icmphdr)); icmph->checksum = ip_compute_csum((unsigned char *)icmph, length - sizeof(struct iphdr)); nf_ct_attach(nskb, skb_in); NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev, ip_finish_output); }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr = skb->nh.ipv6h; struct inet6_skb_parm *opt =(struct inet6_skb_parm*)skb->cb; if (ipv6_devconf.forwarding == 0 && opt->srcrt == 0) goto drop; /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb->nh.raw + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); kfree_skb(skb); return -ETIMEDOUT; } /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK |IPV6_ADDR_LINKLOCAL)) { /* This check is security critical. */ goto drop; } if (skb->len > dst->pmtu) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev); ipv6_statistics.Ip6InTooBigErrors++; kfree_skb(skb); return -EMSGSIZE; } if ((skb = skb_cow(skb, dst->dev->hard_header_len)) == NULL) return 0; hdr = skb->nh.ipv6h; /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; ipv6_statistics.Ip6OutForwDatagrams++; return dst->output(skb); drop: ipv6_statistics.Ip6InAddrErrors++; kfree_skb(skb); return -EINVAL; }