static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum) { if (skb_is_gso(skb) && skb_is_encapsulated(skb)) return -ENOSYS; #undef gre_handle_offloads return gre_handle_offloads(skb, gre_csum); }
int ovs_iptunnel_handle_offloads(struct sk_buff *skb, bool csum_help, int gso_type_mask, void (*fix_segment)(struct sk_buff *)) { int err; if (likely(!skb_is_encapsulated(skb))) { skb_reset_inner_headers(skb); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) skb->encapsulation = 1; #endif } else if (skb_is_gso(skb)) { err = -ENOSYS; goto error; } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) if (gso_type_mask) fix_segment = NULL; OVS_GSO_CB(skb)->fix_segment = fix_segment; #endif if (skb_is_gso(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) goto error; skb_shinfo(skb)->gso_type |= gso_type_mask; return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) /* If packet is not gso and we are resolving any partial checksum, * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL * on the outer header without confusing devices that implement * NETIF_F_IP_CSUM with encapsulation. */ if (csum_help) skb->encapsulation = 0; #endif if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { err = skb_checksum_help(skb); if (unlikely(err)) goto error; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; return 0; error: return err; }
struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb, bool csum_help, void (*fix_segment)(struct sk_buff *)) { int err; /* XXX: synchronize inner header reset for compat and non compat code * so that we can do it here. */ /* skb_reset_inner_headers(skb); */ /* OVS compat code does not maintain encapsulation bit. * skb->encapsulation = 1; */ if (skb_is_gso(skb)) { if (skb_is_encapsulated(skb)) { err = -ENOSYS; goto error; } OVS_GSO_CB(skb)->fix_segment = fix_segment; return skb; } /* If packet is not gso and we are resolving any partial checksum, * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL * on the outer header without confusing devices that implement * NETIF_F_IP_CSUM with encapsulation. */ /* if (csum_help) skb->encapsulation = 0; */ if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { err = skb_checksum_help(skb); if (unlikely(err)) goto error; } else if (skb->ip_summed != CHECKSUM_PARTIAL) skb->ip_summed = CHECKSUM_NONE; return skb; error: kfree_skb(skb); return ERR_PTR(err); }
int ovs_iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask, void (*fix_segment)(struct sk_buff *)) { int err; if (likely(!skb_is_encapsulated(skb))) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } else if (skb_is_gso(skb)) { err = -ENOSYS; goto error; } if (skb_is_gso(skb)) { err = skb_unclone(skb, GFP_ATOMIC); if (unlikely(err)) goto error; skb_shinfo(skb)->gso_type |= gso_type_mask; #ifndef USE_UPSTREAM_TUNNEL_GSO if (gso_type_mask) fix_segment = NULL; OVS_GSO_CB(skb)->fix_segment = fix_segment; #endif return 0; } if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_NONE; skb->encapsulation = 0; } return 0; error: return err; }