/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb); if (!skb->dev) { OVS_NLERR(true, "%s: skb has no dev; dropping", __func__); return -EINVAL; } if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(skb, user); if (err) return err; ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); reasm = nf_ct_frag6_gather(skb, user); if (!reasm) return -EINPROGRESS; if (skb == reasm) { kfree_skb(skb); return -EINVAL; } /* Don't free 'skb' even though it is one of the original * fragments, as we're going to morph it into the head. */ skb_get(skb); nf_ct_frag6_consume_orig(reasm); key->ip.proto = ipv6_hdr(reasm)->nexthdr; skb_morph(skb, reasm); skb->next = reasm->next; consume_skb(reasm); ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size; #endif /* IP frag support */ } else { kfree_skb(skb); return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_GSO_CB(skb) = ovs_cb; return 0; }
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path) { struct iphdr *iph = ip_hdr(skb); int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */ int mac_offset = skb_inner_mac_offset(skb); struct sk_buff *skb1 = skb; struct sk_buff *segs; __be16 proto = skb->protocol; char cb[sizeof(skb->cb)]; /* setup whole inner packet to get protocol. */ __skb_pull(skb, mac_offset); skb->protocol = __skb_network_protocol(skb); /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/ __skb_pull(skb, (pkt_hlen - mac_offset)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* From 3.9 kernel skb->cb is used by skb gso. Therefore * make copy of it to restore it back. */ memcpy(cb, skb->cb, sizeof(cb)); segs = __skb_gso_segment(skb, 0, tx_path); if (!segs || IS_ERR(segs)) goto free; skb = segs; while (skb) { __skb_push(skb, pkt_hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); skb->mac_len = 0; memcpy(ip_hdr(skb), iph, pkt_hlen); memcpy(skb->cb, cb, sizeof(cb)); if (OVS_GSO_CB(skb)->fix_segment) OVS_GSO_CB(skb)->fix_segment(skb); skb->protocol = proto; skb = skb->next; } free: consume_skb(skb1); return segs; }
int rpl_ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { if (!OVS_GSO_CB(skb)->fix_segment) return output_ipv6(skb); if (skb_is_gso(skb)) { int ret; skb = tnl_skb_gso_segment(skb, 0, false, AF_INET6); if (!skb || IS_ERR(skb)) return NET_XMIT_DROP; do { struct sk_buff *next_skb = skb->next; skb->next = NULL; ret = output_ipv6(skb); skb = next_skb; } while (skb); return ret; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int err; err = skb_checksum_help(skb); if (unlikely(err)) return NET_XMIT_DROP; } return output_ipv6(skb); }
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb); int err; if (!skb->dev) { OVS_NLERR(true, "%s: skb has no dev; dropping", __func__); return -EINVAL; } if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(net, skb, user); if (err) return err; ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; skb_orphan(skb); memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); if (err) return err; key->ip.proto = ipv6_hdr(skb)->nexthdr; ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size; #endif /* IP frag support */ } else { kfree_skb(skb); return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_GSO_CB(skb) = ovs_cb; return 0; }
int ovs_iptunnel_handle_offloads(struct sk_buff *skb, bool csum_help, int gso_type_mask, void (*fix_segment)(struct sk_buff *)) { int err; if (likely(!skb_is_encapsulated(skb))) { skb_reset_inner_headers(skb); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) skb->encapsulation = 1; #endif } else if (skb_is_gso(skb)) { err = -ENOSYS; goto error; } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) if (gso_type_mask) fix_segment = NULL; OVS_GSO_CB(skb)->fix_segment = fix_segment; #endif if (skb_is_gso(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) goto error; skb_shinfo(skb)->gso_type |= gso_type_mask; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) /* If packet is not gso and we are resolving any partial checksum, * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL * on the outer header without confusing devices that implement * NETIF_F_IP_CSUM with encapsulation. */ if (csum_help) skb->encapsulation = 0; #endif if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { err = skb_checksum_help(skb); if (unlikely(err)) goto error; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; return 0; error: return err; }
struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb, bool csum_help, void (*fix_segment)(struct sk_buff *)) { int err; /* XXX: synchronize inner header reset for compat and non compat code * so that we can do it here. */ /* skb_reset_inner_headers(skb); */ /* OVS compat code does not maintain encapsulation bit. * skb->encapsulation = 1; */ if (skb_is_gso(skb)) { if (skb_is_encapsulated(skb)) { err = -ENOSYS; goto error; } OVS_GSO_CB(skb)->fix_segment = fix_segment; return skb; } /* If packet is not gso and we are resolving any partial checksum, * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL * on the outer header without confusing devices that implement * NETIF_F_IP_CSUM with encapsulation. */ /* if (csum_help) skb->encapsulation = 0; */ if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { err = skb_checksum_help(skb); if (unlikely(err)) goto error; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; return skb; error: kfree_skb(skb); return ERR_PTR(err); }
int ovs_iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask, void (*fix_segment)(struct sk_buff *)) { int err; if (likely(!skb_is_encapsulated(skb))) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } else if (skb_is_gso(skb)) { err = -ENOSYS; goto error; } if (skb_is_gso(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) goto error; skb_shinfo(skb)->gso_type |= gso_type_mask; #ifndef USE_UPSTREAM_TUNNEL_GSO if (gso_type_mask) fix_segment = NULL; OVS_GSO_CB(skb)->fix_segment = fix_segment; #endif return 0; } if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_NONE; skb->encapsulation = 0; } return 0; error: return err; }
struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum) { int err; skb_reset_inner_headers(skb); if (skb_is_gso(skb)) { if (gre_csum) OVS_GSO_CB(skb)->fix_segment = gre_csum_fix; } else { if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) { err = skb_checksum_help(skb); if (err) goto error; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; } return skb; error: kfree_skb(skb); return ERR_PTR(err); }
int rpl_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { if (!OVS_GSO_CB(skb)->fix_segment) return output_ip(skb); /* This bit set can confuse some drivers on old kernel. */ skb->encapsulation = 0; if (skb_is_gso(skb)) { int ret; int id; skb = tnl_skb_gso_segment(skb, 0, false, AF_INET); if (!skb || IS_ERR(skb)) return NET_XMIT_DROP; id = ntohs(ip_hdr(skb)->id); do { struct sk_buff *next_skb = skb->next; skb->next = NULL; ip_hdr(skb)->id = htons(id++); ret = output_ip(skb); skb = next_skb; } while (skb); return ret; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int err; err = skb_checksum_help(skb); if (unlikely(err)) return NET_XMIT_DROP; } return output_ip(skb); }
static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path, sa_family_t sa_family) { void *iph = skb_network_header(skb); int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */ int mac_offset = skb_inner_mac_offset(skb); int outer_l3_offset = skb_network_offset(skb); int outer_l4_offset = skb_transport_offset(skb); struct sk_buff *skb1 = skb; struct dst_entry *dst = skb_dst(skb); struct sk_buff *segs; __be16 proto = skb->protocol; char cb[sizeof(skb->cb)]; BUILD_BUG_ON(sizeof(struct ovs_gso_cb) > FIELD_SIZEOF(struct sk_buff, cb)); OVS_GSO_CB(skb)->ipv6 = (sa_family == AF_INET6); /* setup whole inner packet to get protocol. */ __skb_pull(skb, mac_offset); skb->protocol = __skb_network_protocol(skb); /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/ __skb_pull(skb, (pkt_hlen - mac_offset)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); /* From 3.9 kernel skb->cb is used by skb gso. Therefore * make copy of it to restore it back. */ memcpy(cb, skb->cb, sizeof(cb)); skb->encapsulation = 0; /* We are handling offloads by segmenting l3 packet, so * no need to call OVS compat segmentation function. */ #ifdef HAVE___SKB_GSO_SEGMENT #undef __skb_gso_segment segs = __skb_gso_segment(skb, 0, tx_path); #else #undef skb_gso_segment segs = skb_gso_segment(skb, 0); #endif if (!segs || IS_ERR(segs)) goto free; skb = segs; while (skb) { __skb_push(skb, pkt_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, outer_l3_offset); skb_set_transport_header(skb, outer_l4_offset); skb->mac_len = 0; memcpy(skb_network_header(skb), iph, pkt_hlen); memcpy(skb->cb, cb, sizeof(cb)); skb->protocol = proto; if (skb->next) dst = dst_clone(dst); skb_dst_set(skb, dst); OVS_GSO_CB(skb)->fix_segment(skb); skb = skb->next; } free: consume_skb(skb1); return segs; }
static void tnl_fix_segment(struct sk_buff *skb) { if (OVS_GSO_CB(skb)->fix_segment) OVS_GSO_CB(skb)->fix_segment(skb); }