static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { struct iphdr *iph = ip_hdr(skb); int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */ int mac_offset = skb_inner_mac_offset(skb); struct sk_buff *skb1 = skb; struct sk_buff *segs; __be16 proto = skb->protocol; char cb[sizeof(skb->cb)]; /* setup whole inner packet to get protocol. */ __skb_pull(skb, mac_offset); skb->protocol = __skb_network_protocol(skb); /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/ __skb_pull(skb, (pkt_hlen - mac_offset)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* From 3.9 kernel skb->cb is used by skb gso. Therefore * make copy of it to restore it back. */ memcpy(cb, skb->cb, sizeof(cb)); segs = __skb_gso_segment(skb, 0, tx_path); if (!segs || IS_ERR(segs)) goto free; skb = segs; while (skb) { __skb_push(skb, pkt_hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); skb->mac_len = 0; memcpy(ip_hdr(skb), iph, pkt_hlen); memcpy(skb->cb, cb, sizeof(cb)); if (OVS_GSO_CB(skb)->fix_segment) OVS_GSO_CB(skb)->fix_segment(skb); skb->protocol = proto; skb = skb->next; } free: consume_skb(skb1); return segs; }
void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct net_device *dev = skb->dev; int pkt_len = skb->len - skb_inner_network_offset(skb); struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); #elif defined(HAVE_IP_SELECT_IDENT_USING_NET) __ip_select_ident(dev_net(rt->dst.dev), iph, skb_shinfo(skb)->gso_segs ?: 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); }
void rpl_ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device *dev) { int pkt_len, err; pkt_len = skb->len - skb_inner_network_offset(skb); #ifdef HAVE_IP6_LOCAL_OUT_SK err = ip6_local_out_sk(sk, skb); #else err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); #endif if (net_xmit_eval(err)) pkt_len = -1; iptunnel_xmit_stats(dev, pkt_len); }
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len - skb_inner_network_offset(skb); struct net *net = dev_net(rt->dst.dev); struct net_device *dev = skb->dev; struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); err = ip_local_out(net, sk, skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t enc_features; int ghl = GRE_HEADER_SECTION; struct gre_base_hdr *greh; u16 mac_offset = skb->mac_header; int mac_len = skb->mac_len; __be16 protocol = skb->protocol; int tnl_hlen; bool csum; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_IPIP))) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(*greh)))) goto out; greh = (struct gre_base_hdr *)skb_transport_header(skb); if (greh->flags & GRE_KEY) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_SEQ) ghl += GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) { ghl += GRE_HEADER_SECTION; csum = true; } else csum = false; if (unlikely(!pskb_may_pull(skb, ghl))) goto out; /* setup inner skb. */ skb->protocol = greh->protocol; skb->encapsulation = 0; __skb_pull(skb, ghl); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) { skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); goto out; } skb = segs; tnl_hlen = skb_tnl_header_len(skb); do { __skb_push(skb, ghl); if (csum) { __be32 *pcsum; if (skb_has_shared_frag(skb)) { int err; err = __skb_linearize(skb); if (err) { kfree_skb_list(segs); segs = ERR_PTR(err); goto out; } } greh = (struct gre_base_hdr *)(skb->data); pcsum = (__be32 *)(greh + 1); *pcsum = 0; *(__sum16 *)pcsum = csum_fold(skb_checksum(skb, 0, skb->len, 0)); } __skb_push(skb, tnl_hlen - ghl); skb_reset_inner_headers(skb); skb->encapsulation = 1; skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb->mac_len = mac_len; skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path, sa_family_t sa_family) { void *iph = skb_network_header(skb); int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */ int mac_offset = skb_inner_mac_offset(skb); int outer_l3_offset = skb_network_offset(skb); int outer_l4_offset = skb_transport_offset(skb); struct sk_buff *skb1 = skb; struct dst_entry *dst = skb_dst(skb); struct sk_buff *segs; __be16 proto = skb->protocol; char cb[sizeof(skb->cb)]; BUILD_BUG_ON(sizeof(struct ovs_gso_cb) > FIELD_SIZEOF(struct sk_buff, cb)); OVS_GSO_CB(skb)->ipv6 = (sa_family == AF_INET6); /* setup whole inner packet to get protocol. */ __skb_pull(skb, mac_offset); skb->protocol = __skb_network_protocol(skb); /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/ __skb_pull(skb, (pkt_hlen - mac_offset)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* From 3.9 kernel skb->cb is used by skb gso. Therefore * make copy of it to restore it back. */ memcpy(cb, skb->cb, sizeof(cb)); skb->encapsulation = 0; /* We are handling offloads by segmenting l3 packet, so * no need to call OVS compat segmentation function. */ #ifdef HAVE___SKB_GSO_SEGMENT #undef __skb_gso_segment segs = __skb_gso_segment(skb, 0, tx_path); #else #undef skb_gso_segment segs = skb_gso_segment(skb, 0); #endif if (!segs || IS_ERR(segs)) goto free; skb = segs; while (skb) { __skb_push(skb, pkt_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, outer_l3_offset); skb_set_transport_header(skb, outer_l4_offset); skb->mac_len = 0; memcpy(skb_network_header(skb), iph, pkt_hlen); memcpy(skb->cb, cb, sizeof(cb)); skb->protocol = proto; if (skb->next) dst = dst_clone(dst); skb_dst_set(skb, dst); OVS_GSO_CB(skb)->fix_segment(skb); skb = skb->next; } free: consume_skb(skb1); return segs; }
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features), __be16 new_protocol, bool is_ipv6) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); bool remcsum, need_csum, offload_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); struct udphdr *uh = udp_hdr(skb); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int udp_offset, outer_hlen; __wsum partial; bool need_ipsec; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* Adjust partial header checksum to negate old length. * We cannot rely on the value contained in uh->len as it is * possible that the actual value exceeds the boundaries of the * 16 bit length field due to the header being added outside of an * IP or IPv6 frame that was already limited to 64K - 1. */ if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) partial = (__force __wsum)uh->len; else partial = (__force __wsum)htonl(skb->len); partial = csum_sub(csum_unfold(uh->check), partial); /* setup inner skb. */ skb->encapsulation = 0; SKB_GSO_CB(skb)->encap_level = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = new_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); skb->encap_hdr_csum = need_csum; remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum; need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && !need_ipsec && (skb->dev->features & (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags and * instead set the flag based on our outer checksum offload value. */ if (remcsum) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum || offload_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = gso_inner_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); outer_hlen = skb_tnl_header_len(skb); udp_offset = outer_hlen - tnl_hlen; skb = segs; do { unsigned int len; if (remcsum) skb->ip_summed = CHECKSUM_NONE; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); len = skb->len - udp_offset; uh = udp_hdr(skb); /* If we are only performing partial GSO the inner header * will be using a length value equal to only one MSS sized * segment instead of the entire frame. */ if (gso_partial && skb_is_gso(skb)) { uh->len = htons(skb_shinfo(skb)->gso_size + SKB_GSO_CB(skb)->data_offset + skb->head - (unsigned char *)uh); } else { uh->len = htons(len); } if (!need_csum) continue; uh->check = ~csum_fold(csum_add(partial, (__force __wsum)htonl(len))); if (skb->encapsulation || !offload_csum) { uh->check = gso_make_checksum(skb, ~uh->check); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); } } while ((skb = skb->next)); out: return segs; }
static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; bool need_csum, ufo; if (unlikely(skb_shinfo(skb)->gso_type & ~(SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_TCP_FIXEDID | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_PARTIAL))) goto out; if (!skb->encapsulation) goto out; if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr))) goto out; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; /* setup inner skb. */ skb->encapsulation = 0; SKB_GSO_CB(skb)->encap_level = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); features &= skb->dev->hw_enc_features; /* The only checksum offload we care about from here on out is the * outer one so strip the existing checksum feature flags based * on the fact that we will be computing our checksum in software. */ if (ufo) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum) features |= NETIF_F_HW_CSUM; } /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, mac_len); goto out; } outer_hlen = skb_tnl_header_len(skb); gre_offset = outer_hlen - tnl_hlen; skb = segs; do { struct gre_base_hdr *greh; __sum16 *pcsum; /* Set up inner headers if we are offloading inner checksum */ if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } skb->mac_len = mac_len; skb->protocol = protocol; __skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, gre_offset); if (!need_csum) continue; greh = (struct gre_base_hdr *)skb_transport_header(skb); pcsum = (__sum16 *)(greh + 1); if (skb_is_gso(skb)) { unsigned int partial_adj; /* Adjust checksum to account for the fact that * the partial checksum is based on actual size * whereas headers should be based on MSS size. */ partial_adj = skb->len + skb_headroom(skb) - SKB_GSO_CB(skb)->data_offset - skb_shinfo(skb)->gso_size; *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj)); } else { *pcsum = 0; } *(pcsum + 1) = 0; *pcsum = gso_make_checksum(skb, 0); } while ((skb = skb->next)); out: return segs; }