__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; }
/** * Netfilter is nice enough to defragment the packet for us and store * details about the fragments in skb->data_len and skb_shinfo(skb)->frag_list * All the fragments are there, we just have to add them all up by * traversing the frag_list linked list. * **/ void calc_checksum(struct sk_buff *skb) { struct iphdr *iph = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; uint16_t l4len = 0; uint16_t iph_len = 0; uint16_t tcpdata_len = 0; uint16_t tcph_len = 0; uint16_t tcp_len = 0; uint16_t l3len = 0; void *l4ptr = NULL; iph = ip_hdr(skb); if(!iph){ return; } l3len = iph->ihl << 2; if(iph->protocol == IPPROTO_UDP){ uh = (struct udphdr *)((unsigned char *)iph + (iph->ihl<<2)); iph_len = l3len; l4ptr = uh; l4len = ntohs(uh->len); uh->check = 0x0000; } if(iph->protocol == IPPROTO_TCP){ th = (struct tcphdr *)((unsigned char *)iph + (iph->ihl<<2)); tcph_len = th->doff << 2; iph_len = l3len; tcpdata_len = ntohs(iph->tot_len) - iph_len - tcph_len; tcp_len = tcph_len + tcpdata_len; l4ptr = th; l4len = tcp_len; th->check = 0x0000; } if(th){ th->check = csum_tcpudp_magic(iph->saddr, iph->daddr, l4len, IPPROTO_TCP, skb_checksum(skb, iph_len, l4len, 0)); } if(uh){ uh->check = csum_tcpudp_magic(iph->saddr, iph->daddr, l4len, IPPROTO_UDP, skb_checksum(skb, iph_len, l4len, 0)); } iph->check = 0; iph->check = ip_fast_csum((void *)iph, iph->ihl); }
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) { struct inet6_dev *idev = in6_dev_get(skb->dev); if (skb->ip_summed != CHECKSUM_UNNECESSARY) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(idev,Ip6InDiscards); if (idev) in6_dev_put(idev); kfree_skb(skb); return 0; } skb->ip_summed = CHECKSUM_UNNECESSARY; } if (sock_queue_rcv_skb(sk,skb)<0) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(idev,Ip6InDiscards); if (idev) in6_dev_put(idev); kfree_skb(skb); return 0; } IP6_INC_STATS_BH(idev,Ip6InDelivers); UDP6_INC_STATS_BH(UdpInDatagrams); if (idev) in6_dev_put(idev); return 0; }
static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; };
static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, __be16 proto, __be32 key, __be32 seq) { struct gre_base_hdr *greh; skb_push(skb, hdr_len); skb_reset_transport_header(skb); greh = (struct gre_base_hdr *)skb->data; greh->flags = tnl_flags_to_gre_flags(flags); greh->protocol = proto; if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); if (flags & TUNNEL_SEQ) { *ptr = seq; ptr--; } if (flags & TUNNEL_KEY) { *ptr = key; ptr--; } if (flags & TUNNEL_CSUM && !(skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } } }
void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, int hdr_len) { struct gre_base_hdr *greh; skb_push(skb, hdr_len); greh = (struct gre_base_hdr *)skb->data; greh->flags = tnl_flags_to_gre_flags(tpi->flags); greh->protocol = tpi->proto; if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); if (tpi->flags&TUNNEL_SEQ) { *ptr = tpi->seq; ptr--; } if (tpi->flags&TUNNEL_KEY) { *ptr = tpi->key; ptr--; } if (tpi->flags&TUNNEL_CSUM && !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) { *ptr = 0; *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } } }
static void __gre_build_header(struct sk_buff *skb, int tunnel_hlen, bool is_gre64) { const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; __be32 *options = (__be32 *)(skb_network_header(skb) + tunnel_hlen - GRE_HEADER_SECTION); struct gre_base_hdr *greh = (struct gre_base_hdr *) skb_transport_header(skb); greh->protocol = htons(ETH_P_TEB); greh->flags = 0; /* Work backwards over the options so the checksum is last. */ if (tun_key->tun_flags & OVS_TNL_F_KEY || is_gre64) { greh->flags |= GRE_KEY; if (is_gre64) { /* Set higher 32 bits to seq. */ *options = be64_get_high32(tun_key->tun_id); options--; greh->flags |= GRE_SEQ; } *options = be64_get_low32(tun_key->tun_id); options--; } if (tun_key->tun_flags & OVS_TNL_F_CSUM) { greh->flags |= GRE_CSUM; *options = 0; *(__sum16 *)options = csum_fold(skb_checksum(skb, skb_transport_offset(skb), skb->len - skb_transport_offset(skb), 0)); } }
int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov) { unsigned int csum; int chunk = skb->len - hlen; /* Skip filled elements. Pretty silly, look at memcpy_toiovec, though 8) */ while (iov->iov_len == 0) iov++; if (iov->iov_len < chunk) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk+hlen, skb->csum))) goto csum_error; if (skb_copy_datagram_iovec(skb, hlen, iov, chunk)) goto fault; } else { csum = csum_partial(skb->data, hlen, skb->csum); if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; if ((unsigned short)csum_fold(csum)) goto csum_error; iov->iov_len -= chunk; iov->iov_base += chunk; } return 0; csum_error: return -EINVAL; fault: return -EFAULT; }
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ static int tcp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo, unsigned int hooknum) { struct iphdr *iph = skb->nh.iph; struct tcphdr _tcph, *th; unsigned int tcplen = skb->len - iph->ihl * 4; u_int8_t tcpflags; /* Smaller that minimal TCP header? */ th = skb_header_pointer(skb, iph->ihl * 4, sizeof(_tcph), &_tcph); if (th == NULL) { if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_tcp: short packet "); return -NF_ACCEPT; } if(sysctl_spi_enable){ /* Not whole TCP header or malformed packet */ if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_tcp: truncated/malformed packet "); printk(KERN_NOTICE "Blocked incoming TCP packet from %u.%u.%u.%u:%hu to %u.%u.%u.%u:%hu with unexpected sequence\n", NIPQUAD(iph->saddr), ntohs(th->source), NIPQUAD(iph->daddr), ntohs(th->dest)); return -NF_ACCEPT; } /* Checksum invalid? Ignore. * We skip checking packets on the outgoing path * because the semantic of CHECKSUM_HW is different there * and moreover root might send raw packets. */ /* FIXME: Source route IP option packets --RR */ if (hooknum == NF_IP_PRE_ROUTING && skb->ip_summed != CHECKSUM_UNNECESSARY && csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, skb->ip_summed == CHECKSUM_HW ? skb->csum : skb_checksum(skb, iph->ihl*4, tcplen, 0))) { if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_tcp: bad TCP checksum "); printk(KERN_NOTICE "Blocked incoming TCP packet from %u.%u.%u.%u:%hu to %u.%u.%u.%u:%hu with unexpected sequence\n", NIPQUAD(iph->saddr), ntohs(th->source), NIPQUAD(iph->daddr), ntohs(th->dest)); return -NF_ACCEPT; } } /* Check TCP flags. */ tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR)); if (!tcp_valid_flags[tcpflags]) { if (LOG_INVALID(IPPROTO_TCP)) nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, "ip_ct_tcp: invalid TCP flag combination "); return -NF_ACCEPT; } return NF_ACCEPT; }
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; __wsum csum; struct udphdr *uh; struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { segs = skb_udp_tunnel_segment(skb, features, false); goto out; } if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) goto out; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) return __udp_gso_segment(skb, features); mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; /* Do software UFO. Complete and fill in the UDP checksum as * HW cannot do checksum of UDP packets sent as multiple * IP fragments. */ uh = udp_hdr(skb); iph = ip_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_UNNECESSARY; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in * software prior to segmenting the frame. */ if (!skb->encap_hdr_csum) features |= NETIF_F_HW_CSUM; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ segs = skb_segment(skb, features); out: return segs; }
static int csum6(const struct sk_buff *skb, unsigned int dataoff) { return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, skb->len - dataoff, IPPROTO_UDP, skb->ip_summed == CHECKSUM_HW ? skb->csum : skb_checksum(skb, dataoff, skb->len - dataoff, 0)); }
static int pim6_rcv(struct sk_buff *skb) { struct pimreghdr *pim; struct ipv6hdr *encap; struct net_device *reg_dev = NULL; struct net *net = dev_net(skb->dev); int reg_vif_num = net->ipv6.mroute_reg_vif_num; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || (pim->flags & PIM_NULL_REGISTER) || (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, sizeof(*pim), IPPROTO_PIM, csum_partial((void *)pim, sizeof(*pim), 0)) && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; /* check if the inner packet is destined to mcast group */ encap = (struct ipv6hdr *)(skb_transport_header(skb) + sizeof(*pim)); if (!ipv6_addr_is_multicast(&encap->daddr) || encap->payload_len == 0 || ntohs(encap->payload_len) + sizeof(*pim) > skb->len) goto drop; read_lock(&mrt_lock); if (reg_vif_num >= 0) reg_dev = net->ipv6.vif6_table[reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); if (reg_dev == NULL) goto drop; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; skb->protocol = htons(ETH_P_IPV6); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; skb_dst_drop(skb); reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); return 0; drop: kfree_skb(skb); return 0; }
static __sum16 gre_checksum(struct sk_buff *skb) { __wsum csum; if (skb->ip_summed == CHECKSUM_PARTIAL) csum = lco_csum(skb); else csum = skb_checksum(skb, 0, skb->len, 0); return csum_fold(csum); }
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ static int tcp_error(struct sk_buff *skb, int hooknum) { struct iphdr *iph = skb->nh.iph; size_t len = skb->len; struct tcphdr *tcph = (struct tcphdr *)((u_int32_t *)iph + iph->ihl); unsigned int tcplen = len - iph->ihl * 4; u_int8_t tcpflags; /* Smaller that minimal TCP header? Should be always false. */ if (len < iph->ihl * 4 + sizeof(struct tcphdr)) { if (NET_RATELIMIT(ip_ct_tcp_log_invalid)) nf_log(PF_INET, (char *)iph, len, "ip_ct_tcp: short packet "); return -NF_ACCEPT; } /* Not whole TCP header or malformed packet */ if (tcph->doff*4 < sizeof(struct tcphdr) || tcplen < tcph->doff*4) { if (NET_RATELIMIT(ip_ct_tcp_log_invalid)) nf_log(PF_INET, (char *)iph, len, "ip_ct_tcp: truncated/malformed packet "); return -NF_ACCEPT; } /* Checksum invalid? Ignore. * We skip checking packets on the outgoing path * because the semantic of CHECKSUM_HW is different there * and moreover root might send raw packets. */ /* FIXME: Source route IP option packets --RR */ if (hooknum == NF_IP_PRE_ROUTING && skb->ip_summed != CHECKSUM_UNNECESSARY){ if (skb->ip_summed != CHECKSUM_HW) { skb->csum = skb_checksum(skb, iph->ihl*4, tcplen, 0); skb->ip_summed = CHECKSUM_HW; } if (csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, skb->csum)) { if (NET_RATELIMIT(ip_ct_tcp_log_invalid)) nf_log(PF_INET, (char *)iph, len, "ip_ct_tcp: bad TCP checksum "); return -NF_ACCEPT; } } /* Check TCP flags. */ tcpflags = (((u_int8_t *)tcph)[13] & ~(TH_ECE|TH_CWR)); if (!tcp_valid_flags[tcpflags]) { if (NET_RATELIMIT(ip_ct_tcp_log_invalid)) nf_log(PF_INET, (char *)iph, len, "ip_ct_tcp: invalid TCP flag combination "); return -NF_ACCEPT; } return NF_ACCEPT; }
static int pim6_rcv(struct sk_buff *skb) { struct pimreghdr *pim; struct ipv6hdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || (pim->flags & PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; /* check if the inner packet is destined to mcast group */ encap = (struct ipv6hdr *)(skb_transport_header(skb) + sizeof(*pim)); if (!ipv6_addr_is_multicast(&encap->daddr) || encap->payload_len == 0 || ntohs(encap->payload_len) + sizeof(*pim) > skb->len) goto drop; read_lock(&mrt_lock); if (reg_vif_num >= 0) reg_dev = vif6_table[reg_vif_num].dev; if (reg_dev) dev_hold(reg_dev); read_unlock(&mrt_lock); if (reg_dev == NULL) goto drop; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->dev = reg_dev; skb->protocol = htons(ETH_P_IP); skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); ((struct net_device_stats *)netdev_priv(reg_dev))->rx_bytes += skb->len; ((struct net_device_stats *)netdev_priv(reg_dev))->rx_packets++; skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); return 0; drop: kfree_skb(skb); return 0; }
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) { __sum16 sum; sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; } return sum; }
int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) { struct ip6_mh *mh; int mhlen; if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) || !pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3))) return -1; mh = (struct ip6_mh *)skb->h.raw; if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw); return -1; } mhlen = (mh->ip6mh_hdrlen + 1) << 3; if (skb->ip_summed == CHECKSUM_COMPLETE) { skb->ip_summed = CHECKSUM_UNNECESSARY; if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, mhlen, IPPROTO_MH, skb->csum)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH hw checksum failed\n"); skb->ip_summed = CHECKSUM_NONE; } } if (skb->ip_summed == CHECKSUM_NONE) { if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, mhlen, IPPROTO_MH, skb_checksum(skb, 0, mhlen, 0))) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH checksum failed " "[" NIP6_FMT " > " NIP6_FMT "]\n", NIP6(skb->nh.ipv6h->saddr), NIP6(skb->nh.ipv6h->daddr)); return -1; } skb->ip_summed = CHECKSUM_UNNECESSARY; } if (mh->ip6mh_proto != IPPROTO_NONE) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", mh->ip6mh_proto); mip6_param_prob(skb, 0, (&mh->ip6mh_proto) - skb->nh.raw); return -1; } return 0; }
static void gre_csum_fix(struct sk_buff *skb) { struct gre_base_hdr *greh; __be32 *options; int gre_offset = skb_transport_offset(skb); greh = (struct gre_base_hdr *)skb_transport_header(skb); options = ((__be32 *)greh + 1); *options = 0; *(__sum16 *)options = csum_fold(skb_checksum(skb, gre_offset, skb->len - gre_offset, 0)); }
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; int offset; __wsum csum; if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) { segs = skb_udp_tunnel_segment(skb, features); goto out; } mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | SKB_GSO_MPLS) || !(type & (SKB_GSO_UDP)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } /* Do software UFO. Complete and fill in the UDP checksum as * HW cannot do checksum of UDP packets sent as multiple * IP fragments. */ offset = skb_checksum_start_offset(skb); csum = skb_checksum(skb, offset, skb->len - offset, 0); offset += skb->csum_offset; *(__sum16 *)(skb->data + offset) = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() */ segs = skb_segment(skb, features); out: return segs; }
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset) { __wsum csum = skb->csum; if (skb->ip_summed != CHECKSUM_COMPLETE) return; if (offset != 0) { int tend_off = skb_transport_offset(skb) + tlen; csum = csum_sub(csum, skb_checksum(skb, tend_off, offset, 0)); } put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum); }
/* Compute the whole skb csum in s/w and store it, then verify GRO csum * starting from gro_offset. */ static __sum16 gro_skb_checksum(struct sk_buff *skb) { __sum16 sum; skb->csum = skb_checksum(skb, 0, skb->len, 0); NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum, csum_partial(skb->data, skb_gro_offset(skb), 0)); sum = csum_fold(NAPI_GRO_CB(skb)->csum); if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { if (unlikely(!sum)) netdev_rx_csum_fault(skb->dev); } else skb->ip_summed = CHECKSUM_COMPLETE; return sum; }
static int tcp_csum_check(int af, struct sk_buff *skb, struct dispatcher_protocol *pp) { unsigned int tcphoff; #ifdef CONFIG_DISPATCHER_IPV6 if (af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); case CHECKSUM_COMPLETE: #ifdef CONFIG_DISPATCHER_IPV6 if (af == AF_INET6) { if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - tcphoff, ipv6_hdr(skb)->nexthdr, skb->csum)) { DISPATCHER_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); return 0; } } else #endif if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - tcphoff, ip_hdr(skb)->protocol, skb->csum)) { DISPATCHER_DBG_RL_PKT(0, pp, skb, 0, "Failed checksum for"); return 0; } break; default: /* No need to checksum. */ break; } return 1; }
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) { #if defined(CONFIG_FILTER) if (sk->filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(Ip6InDiscards); kfree_skb(skb); return 0; } skb->ip_summed = CHECKSUM_UNNECESSARY; } #endif if (sock_queue_rcv_skb(sk,skb)<0) { UDP6_INC_STATS_BH(UdpInErrors); IP6_INC_STATS_BH(Ip6InDiscards); kfree_skb(skb); return 0; } IP6_INC_STATS_BH(Ip6InDelivers); UDP6_INC_STATS_BH(UdpInDatagrams); return 0; }
static void vxlan_gso(struct sk_buff *skb) { int udp_offset = skb_transport_offset(skb); struct udphdr *uh; uh = udp_hdr(skb); uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ if (unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - udp_offset, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, skb->len - udp_offset, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } skb->ip_summed = CHECKSUM_NONE; }
/** * dccp_invalid_packet - check for malformed packets * Implements RFC 4340, 8.5: Step 1: Check header basics * Packets that fail these checks are ignored and do not receive Resets. */ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; if (skb->pkt_type != PACKET_HOST) return 1; /* If the packet is shorter than 12 bytes, drop packet and return */ if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { DCCP_WARN("pskb_may_pull failed\n"); return 1; } dh = dccp_hdr(skb); /* If P.type is not understood, drop packet and return */ if (dh->dccph_type >= DCCP_PKT_INVALID) { DCCP_WARN("invalid packet type\n"); return 1; } /* * If P.Data Offset is too small for packet type, drop packet and return */ if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); return 1; } /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return */ if ((dh->dccph_type < DCCP_PKT_DATA || dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) { DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", dccp_packet_name(dh->dccph_type)); return 1; } /* * If P.CsCov is too large for the packet size, drop packet and return. * This must come _before_ checksumming (not as RFC 4340 suggests). */ cscov = dccp_csum_coverage(skb); if (cscov > skb->len) { DCCP_WARN("P.CsCov %u exceeds packet length %d\n", dh->dccph_cscov, skb->len); return 1; } /* If header checksum is incorrect, drop packet and return. * (This step is completed in the AF-dependent functions.) */ skb->csum = skb_checksum(skb, 0, cscov, 0); return 0; }
/* * Create syn packet and send it to rs. * ATTENTION: we also store syn skb in cp if syn retransimition * is tured on. */ static int syn_proxy_send_rs_syn(int af, const struct tcphdr *th, struct ip_vs_conn *cp, struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_synproxy_opt *opt) { struct sk_buff *syn_skb; int tcp_hdr_size; __u8 tcp_flags = TCPCB_FLAG_SYN; unsigned int tcphoff; struct tcphdr *new_th; if (!cp->packet_xmit) { IP_VS_ERR_RL("warning: packet_xmit is null"); return 0; } syn_skb = alloc_skb(MAX_TCP_HEADER + 15, GFP_ATOMIC); if (unlikely(syn_skb == NULL)) { IP_VS_ERR_RL("alloc skb failed when send rs syn packet\n"); return 0; } /* Reserve space for headers */ skb_reserve(syn_skb, MAX_TCP_HEADER); tcp_hdr_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + (opt->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + (opt->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + /* SACK_PERM is in the place of NOP NOP of TS */ ((opt->sack_ok && !opt->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); new_th = (struct tcphdr *)skb_push(syn_skb, tcp_hdr_size); /* Compose tcp header */ skb_reset_transport_header(syn_skb); syn_skb->csum = 0; /* Set tcp hdr */ new_th->source = th->source; new_th->dest = th->dest; new_th->seq = htonl(ntohl(th->seq) - 1); new_th->ack_seq = 0; *(((__u16 *) new_th) + 6) = htons(((tcp_hdr_size >> 2) << 12) | tcp_flags); /* FIX_ME: what window should we use */ new_th->window = htons(5000); new_th->check = 0; new_th->urg_ptr = 0; new_th->urg = 0; new_th->ece = 0; new_th->cwr = 0; syn_proxy_syn_build_options((__be32 *) (new_th + 1), opt); /* * Set ip hdr * Attention: set source and dest addr to ack skb's. * we rely on packet_xmit func to do NATs thing. */ #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { struct ipv6hdr *ack_iph = ipv6_hdr(skb); struct ipv6hdr *iph = (struct ipv6hdr *)skb_push(syn_skb, sizeof(struct ipv6hdr)); tcphoff = sizeof(struct ipv6hdr); skb_reset_network_header(syn_skb); memcpy(&iph->saddr, &ack_iph->saddr, sizeof(struct in6_addr)); memcpy(&iph->daddr, &ack_iph->daddr, sizeof(struct in6_addr)); iph->version = 6; iph->nexthdr = NEXTHDR_TCP; iph->payload_len = htons(tcp_hdr_size); iph->hop_limit = IPV6_DEFAULT_HOPLIMIT; new_th->check = 0; syn_skb->csum = skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0); new_th->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, syn_skb->len - tcphoff, IPPROTO_TCP, syn_skb->csum); } else #endif { struct iphdr *ack_iph = ip_hdr(skb); u32 rtos = RT_TOS(ack_iph->tos); struct iphdr *iph = (struct iphdr *)skb_push(syn_skb, sizeof(struct iphdr)); tcphoff = sizeof(struct iphdr); skb_reset_network_header(syn_skb); *((__u16 *) iph) = htons((4 << 12) | (5 << 8) | (rtos & 0xff)); iph->tot_len = htons(syn_skb->len); iph->frag_off = htons(IP_DF); /* FIX_ME: what ttl shoule we use */ iph->ttl = IPDEFTTL; iph->protocol = IPPROTO_TCP; iph->saddr = ack_iph->saddr; iph->daddr = ack_iph->daddr; ip_send_check(iph); new_th->check = 0; syn_skb->csum = skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0); new_th->check = csum_tcpudp_magic(iph->saddr, iph->daddr, syn_skb->len - tcphoff, IPPROTO_TCP, syn_skb->csum); } /* Save syn_skb if syn retransmission is on */ if (sysctl_ip_vs_synproxy_syn_retry > 0) { cp->syn_skb = skb_copy(syn_skb, GFP_ATOMIC); atomic_set(&cp->syn_retry_max, sysctl_ip_vs_synproxy_syn_retry); } /* Save info for fast_response_xmit */ if(sysctl_ip_vs_fast_xmit && skb->dev && likely(skb->dev->type == ARPHRD_ETHER) && skb_mac_header_was_set(skb)) { struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); if(likely(cp->indev == NULL)) { cp->indev = skb->dev; dev_hold(cp->indev); } if (unlikely(cp->indev != skb->dev)) { dev_put(cp->indev); cp->indev = skb->dev; dev_hold(cp->indev); } memcpy(cp->src_hwaddr, eth->h_source, ETH_ALEN); memcpy(cp->dst_hwaddr, eth->h_dest, ETH_ALEN); IP_VS_INC_ESTATS(ip_vs_esmib, FAST_XMIT_SYNPROXY_SAVE); IP_VS_DBG_RL("syn_proxy_send_rs_syn netdevice:%s\n", netdev_name(skb->dev)); } /* count in the syn packet */ ip_vs_in_stats(cp, skb); /* If xmit failed, syn_skb will be freed correctly. */ cp->packet_xmit(syn_skb, cp, pp); return 1; }
/* * Reuse skb for syn proxy, called by syn_proxy_syn_rcv(). * do following things: * 1) set tcp options; * 2) compute seq with cookie func. * 3) set tcp seq and ack_seq; * 4) exchange ip addr and tcp port; * 5) compute iphdr and tcp check. * */ static void syn_proxy_reuse_skb(int af, struct sk_buff *skb, struct ip_vs_synproxy_opt *opt) { __u32 isn; unsigned short tmpport; unsigned int tcphoff; struct tcphdr *th; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); th = (void *)skb_network_header(skb) + tcphoff; /* deal with tcp options */ syn_proxy_parse_set_opts(skb, th, opt); /* get cookie */ skb_set_transport_header(skb, tcphoff); #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) isn = ip_vs_synproxy_cookie_v6_init_sequence(skb, opt); else #endif isn = ip_vs_synproxy_cookie_v4_init_sequence(skb, opt); /* Set syn-ack flag * the tcp opt in syn/ack packet : 00010010 = 0x12 */ ((u_int8_t *) th)[13] = 0x12; /* Exchange ports */ tmpport = th->dest; th->dest = th->source; th->source = tmpport; /* Set seq(cookie) and ack_seq */ th->ack_seq = htonl(ntohl(th->seq) + 1); th->seq = htonl(isn); /* Exchange addresses and compute checksums */ #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { struct ipv6hdr *iph = ipv6_hdr(skb); struct in6_addr tmpAddr; memcpy(&tmpAddr, &iph->saddr, sizeof(struct in6_addr)); memcpy(&iph->saddr, &iph->daddr, sizeof(struct in6_addr)); memcpy(&iph->daddr, &tmpAddr, sizeof(struct in6_addr)); iph->hop_limit = sysctl_ip_vs_synproxy_synack_ttl; th->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); th->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, skb->len - tcphoff, IPPROTO_TCP, skb->csum); } else #endif { struct iphdr *iph = ip_hdr(skb); __be32 tmpAddr; tmpAddr = iph->saddr; iph->saddr = iph->daddr; iph->daddr = tmpAddr; iph->ttl = sysctl_ip_vs_synproxy_synack_ttl; iph->tos = 0; ip_send_check(iph); th->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); th->check = csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - tcphoff, IPPROTO_TCP, skb->csum); } }
/* Send RST reply */ static void send_reset(struct net *net, struct sk_buff *oldskb) { struct sk_buff *nskb; struct tcphdr otcph, *tcph; unsigned int otcplen, hh_len; int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); struct ipv6hdr *ip6h; #define DEFAULT_TOS_VALUE 0x0U const __u8 tclass = DEFAULT_TOS_VALUE; struct dst_entry *dst = NULL; u8 proto; struct flowi fl; if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { pr_debug("addr is not unicast.\n"); return; } proto = oip6h->nexthdr; tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); return; } otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { pr_debug("proto(%d) != IPPROTO_TCP, " "or too short. otcplen = %d\n", proto, otcplen); return; } if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) BUG(); /* No RST for RST. */ if (otcph.rst) { pr_debug("RST is set\n"); return; } /* Check checksum. */ if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP, skb_checksum(oldskb, tcphoff, otcplen, 0))) { pr_debug("TCP checksum is invalid\n"); return; } memset(&fl, 0, sizeof(fl)); fl.proto = IPPROTO_TCP; ipv6_addr_copy(&fl.fl6_src, &oip6h->daddr); ipv6_addr_copy(&fl.fl6_dst, &oip6h->saddr); fl.fl_ip_sport = otcph.dest; fl.fl_ip_dport = otcph.source; security_skb_classify_flow(oldskb, &fl); dst = ip6_route_output(net, NULL, &fl); if (dst == NULL || dst->error) { dst_release(dst); return; } if (xfrm_lookup(net, &dst, &fl, NULL, 0)) return; hh_len = (dst->dev->hard_header_len + 15)&~15; nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + dst->trailer_len, GFP_ATOMIC); if (!nskb) { if (net_ratelimit()) pr_debug("cannot alloc skb\n"); dst_release(dst); return; } skb_dst_set(nskb, dst); skb_reserve(nskb, hh_len + dst->header_len); skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT); ip6h->nexthdr = IPPROTO_TCP; ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; tcph->source = otcph.dest; tcph->dest = otcph.source; if (otcph.ack) { needs_ack = 0; tcph->seq = otcph.ack_seq; tcph->ack_seq = 0; } else { needs_ack = 1; tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin + otcplen - (otcph.doff<<2)); tcph->seq = 0; } /* Reset flags */ ((u_int8_t *)tcph)[13] = 0; tcph->rst = 1; tcph->ack = needs_ack; tcph->window = 0; tcph->urg_ptr = 0; tcph->check = 0; /* Adjust TCP checksum */ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, &ipv6_hdr(nskb)->daddr, sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); nf_ct_attach(nskb, oldskb); ip6_local_out(nskb); }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t enc_features; int ghl = GRE_HEADER_SECTION; struct gre_base_hdr *greh; u16 mac_offset = skb->mac_header; int mac_len = skb->mac_len; __be16 protocol = skb->protocol; int tnl_hlen; bool csum; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_IPIP))) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) goto out; greh = (struct gre_base_hdr *)skb_transport_header(skb); if (greh->flags & GRE_KEY) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_SEQ) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) { ghl += GRE_HEADER_SECTION; csum = true; } else csum = false; if (unlikely(!pskb_may_pull(skb, ghl))) goto out; /* setup inner skb. */ skb->protocol = greh->protocol; skb->encapsulation = 0; __skb_pull(skb, ghl); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) { skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); goto out; } skb = segs; tnl_hlen = skb_tnl_header_len(skb); do { __skb_push(skb, ghl); if (csum) { __be32 *pcsum; if (skb_has_shared_frag(skb)) { int err; err = __skb_linearize(skb); if (err) { kfree_skb_list(segs); segs = ERR_PTR(err); goto out; } } greh = (struct gre_base_hdr *)(skb->data); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } __skb_push(skb, tnl_hlen - ghl); skb_reset_inner_headers(skb); skb->encapsulation = 1; skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb->mac_len = mac_len; skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
int ip6_route_me_harder(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct ipv6hdr *iph = ipv6_hdr(skb); unsigned int hh_len; struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, .flowi6_mark = skb->mark, .daddr = iph->daddr, .saddr = iph->saddr, }; int err; dst = ip6_route_output(net, skb->sk, &fl6); err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); dst_release(dst); return err; } /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, dst); #ifdef CONFIG_XFRM if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip6_route_me_harder); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) return ip6_route_me_harder(skb); } return 0; } static int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { static const struct ipv6_pinfo fake_pinfo; static const struct inet_sock fake_sk = { /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ .sk.sk_bound_dev_if = 1, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; struct dst_entry *result; int err; result = ip6_route_output(net, sk, &fl->u.ip6); err = result->error; if (err) dst_release(result); else *dst = result; return err; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip6_checksum); static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; static const struct nf_ipv6_ops ipv6ops = { .chk_addr = ipv6_chk_addr, }; static const struct nf_afinfo nf_ip6_afinfo = { .family = AF_INET6, .checksum = nf_ip6_checksum, .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, .saveroute = nf_ip6_saveroute, .reroute = nf_ip6_reroute, .route_key_size = sizeof(struct ip6_rt_info), }; int __init ipv6_netfilter_init(void) { RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); return nf_register_afinfo(&nf_ip6_afinfo); } /* This can be called from inet6_init() on errors, so it cannot * be marked __exit. -DaveM */ void ipv6_netfilter_fini(void) { RCU_INIT_POINTER(nf_ipv6_ops, NULL); nf_unregister_afinfo(&nf_ip6_afinfo); }