static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; if (OVTYPE_IS_GRE (skb)) { gre_gre_xmit_in (skb) = rdtsc (); } if (OVTYPE_IS_GRETAP (skb)) { gretap_gre_xmit_in (skb) = rdtsc (); } tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ gre_build_header(skb, &tpi, tunnel->tun_hlen); skb_set_inner_protocol(skb, tpi.proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; if (likely(!skb->encapsulation)) { skb_reset_inner_headers(skb); skb->encapsulation = 1; } tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ skb = gre_build_header(skb, &tpi, tunnel->hlen); if (unlikely(!skb)) { dev->stats.tx_dropped++; return; } ip_tunnel_xmit(skb, dev, tnl_params); }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; /* Push GRE header. */ gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }
static struct sk_buff *__build_header(struct sk_buff *skb, int tunnel_hlen) { const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->egress_tun_key; struct tnl_ptk_info tpi; skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); if (IS_ERR(skb)) return NULL; tpi.flags = filter_tnl_flags(tun_key->tun_flags); tpi.proto = htons(ETH_P_TEB); tpi.key = be64_get_low32(tun_key->tun_id); tpi.seq = 0; gre_build_header(skb, &tpi, tunnel_hlen); return skb; }
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); struct tnl_ptk_info tpi; tpi.flags = tunnel->parms.o_flags; tpi.proto = proto; tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ gre_build_header(skb, &tpi, tunnel->hlen); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); }