static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; if (likely(!skb->encapsulation)) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ skb = gre_build_header(skb, &tpi, tunnel->hlen); if (unlikely(!skb)) { dev->stats.tx_dropped++; return; } ip_tunnel_xmit(skb, dev, tnl_params); }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; if (OVTYPE_IS_GRE (skb)) { gre_gre_xmit_in (skb) = rdtsc (); } if (OVTYPE_IS_GRETAP (skb)) { gretap_gre_xmit_in (skb) = rdtsc (); } tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ gre_build_header(skb, &tpi, tunnel->tun_hlen); skb_set_inner_protocol(skb, tpi.proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph; struct iphdr fiph; if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); if (IS_ERR(skb)) goto out; if (ipip_tunnel_is_fan(tunnel)) { if (ipip_build_fan_iphdr(tunnel, skb, &fiph)) goto tx_error; tiph = &fiph; } else { tiph = &tunnel->parms.iph; } skb_set_inner_ipproto(skb, IPPROTO_IPIP); ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; tx_error: kfree_skb(skb); out: dev->stats.tx_errors++; return NETDEV_TX_OK; }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; /* Push GRE header. */ build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); skb_set_inner_protocol(skb, proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ gre_build_header(skb, &tpi, tunnel->hlen); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); if (IS_ERR(skb)) goto out; ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; tx_error: kfree_skb(skb); out: dev->stats.tx_errors++; return NETDEV_TX_OK; }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; if (likely(!skb->encapsulation)) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; tx_error: dev->stats.tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; u8 ipproto; switch (skb->protocol) { case htons(ETH_P_IP): ipproto = IPPROTO_IPIP; break; #if IS_ENABLED(CONFIG_MPLS) case htons(ETH_P_MPLS_UC): ipproto = IPPROTO_MPLS; break; #endif default: goto tx_error; } if (tiph->protocol != ipproto && tiph->protocol != 0) goto tx_error; if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4)) goto tx_error; skb_set_inner_ipproto(skb, ipproto); if (tunnel->collect_md) ip_md_tunnel_xmit(skb, dev, ipproto); else ip_tunnel_xmit(skb, dev, tiph, ipproto); return NETDEV_TX_OK; tx_error: kfree_skb(skb); dev->stats.tx_errors++; return NETDEV_TX_OK; }
/* * This function assumes it is being called from dev_queue_xmit() * and that skb is filled properly by that function. */ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; struct net_device *mcdev; /* madcap device */ #ifdef OVBENCH if (SKB_OVBENCH (skb)) { skb->ipip_tunnel_xmit_in = rdtsc (); } #endif if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); if (IS_ERR(skb)) goto out; if (madcap_enable) { mcdev = __dev_get_by_index (dev_net (dev), tunnel->parms.link); if (mcdev && get_madcap_ops (mcdev)) { madcap_queue_xmit (skb, mcdev); return NETDEV_TX_OK; } } skb_set_inner_ipproto(skb, IPPROTO_IPIP); ip_tunnel_xmit(skb, dev, tiph, tiph->protocol); return NETDEV_TX_OK; tx_error: kfree_skb(skb); out: dev->stats.tx_errors++; return NETDEV_TX_OK; }