static int rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) { __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; u16 csum_value, csum_value_final; __be16 ip6_hdr_csum, addend; struct ipv6hdr *ip6h; void *txporthdr; u32 length; ip6h = (struct ipv6hdr *)(skb->data); txporthdr = skb->data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); if (!csum_field) return -EPROTONOSUPPORT; csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) ~ntohs((__force __be16)ip_compute_csum(ip6h, (int)(txporthdr - (void *)(skb->data)))); ip6_payload_csum = csum16_sub((__force __sum16)csum_value, ip6_hdr_csum); length = (ip6h->nexthdr == IPPROTO_UDP) ? ntohs(((struct udphdr *)txporthdr)->len) : ntohs(ip6h->payload_len); pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, length, ip6h->nexthdr, 0)); addend = (__force __be16)ntohs((__force __be16)pseudo_csum); pseudo_csum = csum16_add(ip6_payload_csum, addend); addend = (__force __be16)ntohs((__force __be16)*csum_field); csum_temp = ~csum16_sub(pseudo_csum, addend); csum_value_final = (__force u16)csum_temp; if (unlikely(csum_value_final == 0)) { switch (ip6h->nexthdr) { case IPPROTO_UDP: /* RFC 2460 section 8.1 * DL6 One's complement rule for UDP checksum 0 */ csum_value_final = ~csum_value_final; break; case IPPROTO_TCP: /* DL6 Non-RFC compliant TCP checksum found */ if (*csum_field == (__force __sum16)0xFFFF) csum_value_final = ~csum_value_final; break; } } if (csum_value_final == ntohs((__force __be16)*csum_field)) return 0; else return -EINVAL; }
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features) { struct sock *sk = gso_skb->sk; unsigned int sum_truesize = 0; struct sk_buff *segs, *seg; struct udphdr *uh; unsigned int mss; bool copy_dtor; __sum16 check; __be16 newlen; mss = skb_shinfo(gso_skb)->gso_size; if (gso_skb->len <= sizeof(*uh) + mss) return ERR_PTR(-EINVAL); skb_pull(gso_skb, sizeof(*uh)); /* clear destructor to avoid skb_segment assigning it to tail */ copy_dtor = gso_skb->destructor == sock_wfree; if (copy_dtor) gso_skb->destructor = NULL; segs = skb_segment(gso_skb, features); if (unlikely(IS_ERR_OR_NULL(segs))) { if (copy_dtor) gso_skb->destructor = sock_wfree; return segs; } /* GSO partial and frag_list segmentation only requires splitting * the frame into an MSS multiple and possibly a remainder, both * cases return a GSO skb. So update the mss now. */ if (skb_is_gso(segs)) mss *= skb_shinfo(segs)->gso_segs; seg = segs; uh = udp_hdr(seg); /* compute checksum adjustment based on old length versus new */ newlen = htons(sizeof(*uh) + mss); check = csum16_add(csum16_sub(uh->check, uh->len), newlen); for (;;) { if (copy_dtor) { seg->destructor = sock_wfree; seg->sk = sk; sum_truesize += seg->truesize; } if (!seg->next) break; uh->len = newlen; uh->check = check; if (seg->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(seg, ~check); else uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; seg = seg->next; uh = udp_hdr(seg); } /* last packet can be partial gso_size, account for that in checksum */ newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) + seg->data_len); check = csum16_add(csum16_sub(uh->check, uh->len), newlen); uh->len = newlen; uh->check = check; if (seg->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(seg, ~check); else uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; /* update refcount for the packet */ if (copy_dtor) { int delta = sum_truesize - gso_skb->truesize; /* In some pathological cases, delta can be negative. * We need to either use refcount_add() or refcount_sub_and_test() */ if (likely(delta >= 0)) refcount_add(delta, &sk->sk_wmem_alloc); else WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); } return segs; }
static int rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, struct rmnet_map_dl_csum_trailer *csum_trailer) { __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; u16 csum_value, csum_value_final; struct iphdr *ip4h; void *txporthdr; __be16 addend; ip4h = (struct iphdr *)(skb->data); if ((ntohs(ip4h->frag_off) & IP_MF) || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) return -EOPNOTSUPP; txporthdr = skb->data + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); if (!csum_field) return -EPROTONOSUPPORT; /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) return 0; csum_value = ~ntohs(csum_trailer->csum_value); hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); ip_payload_csum = csum16_sub((__force __sum16)csum_value, (__force __be16)hdr_csum); pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, ntohs(ip4h->tot_len) - ip4h->ihl * 4, ip4h->protocol, 0); addend = (__force __be16)ntohs((__force __be16)pseudo_csum); pseudo_csum = csum16_add(ip_payload_csum, addend); addend = (__force __be16)ntohs((__force __be16)*csum_field); csum_temp = ~csum16_sub(pseudo_csum, addend); csum_value_final = (__force u16)csum_temp; if (unlikely(csum_value_final == 0)) { switch (ip4h->protocol) { case IPPROTO_UDP: /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ csum_value_final = ~csum_value_final; break; case IPPROTO_TCP: /* DL4 Non-RFC compliant TCP checksum found */ if (*csum_field == (__force __sum16)0xFFFF) csum_value_final = ~csum_value_final; break; } } if (csum_value_final == ntohs((__force __be16)*csum_field)) return 0; else return -EINVAL; }