static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { skb_pop_mac_header(skb); if (tunnel->collect_md) { __be16 flags; __be64 tun_id; flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); tun_id = key_to_tunnel_id(tpi->key); tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); if (!tun_dst) return PACKET_REJECT; } ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } return PACKET_REJECT; }
static int parse_header(struct iphdr *iph, __be16 *flags, __be64 *tun_id, bool *is_gre64) { /* IP and ICMP protocol handlers check that the IHL is valid. */ struct gre_base_hdr *greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2)); __be32 *options = (__be32 *)(greh + 1); int hdr_len; *flags = greh->flags; if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL; if (unlikely(greh->protocol != htons(ETH_P_TEB))) return -EINVAL; hdr_len = GRE_HEADER_SECTION; if (greh->flags & GRE_CSUM) { hdr_len += GRE_HEADER_SECTION; options++; } if (greh->flags & GRE_KEY) { __be32 seq; __be32 gre_key; gre_key = *options; hdr_len += GRE_HEADER_SECTION; options++; if (greh->flags & GRE_SEQ) { seq = *options; *is_gre64 = true; } else { seq = 0; *is_gre64 = false; } *tun_id = key_to_tunnel_id(gre_key, seq); } else { *tun_id = 0; /* Ignore GRE seq if there is no key present. */ *is_gre64 = false; } if (greh->flags & GRE_SEQ) hdr_len += GRE_HEADER_SECTION; return hdr_len; }
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; if (tpi->proto == htons(ETH_P_TEB)) itn = net_generic(net, gre_tap_net_id); else itn = net_generic(net, ipgre_net_id); iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { skb_pop_mac_header(skb); if (tunnel->collect_md) { struct ip_tunnel_info *info; tun_dst = metadata_dst_alloc(0, GFP_ATOMIC); if (!tun_dst) return PACKET_REJECT; info = &tun_dst->u.tun_info; info->key.ipv4_src = iph->saddr; info->key.ipv4_dst = iph->daddr; info->key.ipv4_tos = iph->tos; info->key.ipv4_ttl = iph->ttl; info->mode = IP_TUNNEL_INFO_RX; info->key.tun_flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); info->key.tun_id = key_to_tunnel_id(tpi->key); info->key.tp_src = 0; info->key.tp_dst = 0; } ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } return PACKET_REJECT; }
/* Called with rcu_read_lock and BH disabled. */ static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct ovs_key_ipv4_tunnel tun_key; struct ovs_net *ovs_net; struct vport *vport; __be64 key; ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); vport = rcu_dereference(ovs_net->vport_net.gre_vport); if (unlikely(!vport)) return PACKET_REJECT; key = key_to_tunnel_id(tpi->key, tpi->seq); ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, filter_tnl_flags(tpi->flags)); ovs_vport_receive(vport, skb, &tun_key); return PACKET_RCVD; }
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); struct metadata_dst tun_dst; struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; if (tpi->proto != htons(ETH_P_TEB)) return PACKET_REJECT; itn = net_generic(net, gre_tap_net_id); iph = ip_hdr(skb); tunnel = rcu_dereference(itn->collect_md_tun); if (tunnel) { __be16 flags; __be64 tun_id; int err; skb_pop_mac_header(skb); flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); tun_id = key_to_tunnel_id(tpi->key); ovs_ip_tun_rx_dst(&tun_dst, skb, flags, tun_id, 0); skb_reset_network_header(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (err > 1) { ++tunnel->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_errors; return PACKET_REJECT; } } ovs_ip_tunnel_rcv(tunnel->dev, skb, &tun_dst); return PACKET_RCVD; } return PACKET_REJECT; }
/* Called with rcu_read_lock and BH disabled. */ static int gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct ovs_tunnel_info tun_info; struct ovs_net *ovs_net; struct vport *vport; __be64 key; ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ)) vport = rcu_dereference(ovs_net->vport_net.gre64_vport); else vport = rcu_dereference(ovs_net->vport_net.gre_vport); if (unlikely(!vport)) return PACKET_REJECT; key = key_to_tunnel_id(tpi->key, tpi->seq); ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, filter_tnl_flags(tpi->flags), NULL, 0); ovs_vport_receive(vport, skb, &tun_info); return PACKET_RCVD; }