static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); if (OVTYPE_IS_GRETAP (skb)) { gretap_gre_tap_xmit_in (skb) = rdtsc (); } skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); return NETDEV_TX_OK; free_skb: kfree_skb(skb); out: dev->stats.tx_dropped++; return NETDEV_TX_OK; }
static int rpl_gre_handle_offloads(struct sk_buff *skb, bool gre_csum) { if (skb_is_gso(skb) && skb_is_encapsulated(skb)) return -ENOSYS; #undef gre_handle_offloads return gre_handle_offloads(skb, gre_csum); }
static struct sk_buff *__build_header(struct sk_buff *skb, int tunnel_hlen) { const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->egress_tun_key; struct tnl_ptk_info tpi; skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); if (IS_ERR(skb)) return NULL; tpi.flags = filter_tnl_flags(tun_key->tun_flags); tpi.proto = htons(ETH_P_TEB); tpi.key = be64_get_low32(tun_key->tun_id); tpi.seq = 0; gre_build_header(skb, &tpi, tunnel_hlen); return skb; }
static netdev_tx_t ipgre_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tnl_params; if (OVTYPE_IS_GRE (skb)) { gre_ipgre_xmit_in (skb) = rdtsc (); } if (dev->header_ops) { /* Need space for new headers */ if (skb_cow_head(skb, dev->needed_headroom - (tunnel->hlen + sizeof(struct iphdr)))) goto free_skb; tnl_params = (const struct iphdr *)skb->data; /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); skb_reset_mac_header(skb); } else { if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; tnl_params = &tunnel->parms.iph; } skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; __gre_xmit(skb, dev, tnl_params, skb->protocol); return NETDEV_TX_OK; free_skb: kfree_skb(skb); out: dev->stats.tx_dropped++; return NETDEV_TX_OK; }
tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; } /* Push Tunnel header. */ skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); if (IS_ERR(skb)) { skb = NULL; goto err_free_rt; } flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), tunnel_id_to_key(tun_info->key.tun_id), 0); df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->ipv4_dst, IPPROTO_GRE, key->ipv4_tos, key->ipv4_ttl, df, false); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return;