/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct sk_buff *skb) { struct vport *vport; vport = ovs_netdev_get_vport(skb->dev); if (unlikely(!vport)) goto error; if (unlikely(skb_warn_if_lro(skb))) goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); skb_postpush_rcsum(skb, skb->data, ETH_HLEN); ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); return; error: kfree_skb(skb); }
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; struct flowi4 fl; struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df, flags; int err; tun_info = skb_tunnel_info(skb); if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || ip_tunnel_info_af(tun_info) != AF_INET)) goto err_free_skb; key = &tun_info->key; rt = gre_get_rt(skb, dev, &fl, key); if (IS_ERR(rt)) goto err_free_skb; tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
/* Called with rcu_read_lock and bottom-halves disabled. */ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; #ifndef HAVE_METADATA_DST netdev_port_receive(skb, NULL); #else netdev_port_receive(skb, skb_tunnel_info(skb)); #endif return RX_HANDLER_CONSUMED; }
int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall, struct net *net, struct sk_buff *skb, u8 ipproto, __be16 tp_src, __be16 tp_dst) { struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info; struct ip_tunnel_info *tun_info = skb_tunnel_info(skb); const struct ip_tunnel_key *tun_key; u32 skb_mark = skb->mark; struct rtable *rt; struct flowi4 fl; if (unlikely(!tun_info)) return -EINVAL; if (ip_tunnel_info_af(tun_info) != AF_INET) return -EINVAL; tun_key = &tun_info->key; /* Route lookup to get srouce IP address. * The process may need to be changed if the corresponding process * in vports ops changed. */ rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); /* Generate egress_tun_info based on tun_info, * saddr, tp_src and tp_dst */ ip_tunnel_key_init(&egress_tun_info->key, fl.saddr, tun_key->u.ipv4.dst, tun_key->tos, tun_key->ttl, tp_src, tp_dst, tun_key->tun_id, tun_key->tun_flags); egress_tun_info->options_len = tun_info->options_len; egress_tun_info->mode = tun_info->mode; upcall->egress_tun_opts = ip_tunnel_info_opts(tun_info); return 0; }
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel_info *tun_info; struct net *net = dev_net(dev); const struct ip_tunnel_key *key; struct flowi4 fl; struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df, flags; int err; tun_info = skb_tunnel_info(skb, AF_INET); if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX)) goto err_free_skb; key = &tun_info->key; memset(&fl, 0, sizeof(fl)); fl.daddr = key->ipv4_dst; fl.saddr = key->ipv4_src; fl.flowi4_tos = RT_TOS(key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_GRE; rt = ip_route_output_key(net, &fl); if (IS_ERR(rt)) goto err_free_skb; tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
static struct rtable *prepare_fb_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi4 *fl, int tunnel_hlen) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; struct rtable *rt = NULL; int min_headroom; bool use_cache; int err; tun_info = skb_tunnel_info(skb); key = &tun_info->key; use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); if (use_cache) rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr); if (!rt) { rt = gre_get_rt(skb, dev, fl, key); if (IS_ERR(rt)) goto err_free_skb; if (use_cache) dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, fl->saddr); } min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); u32 headroom = sizeof(struct iphdr); struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; const struct iphdr *inner_iph; struct rtable *rt; struct flowi4 fl4; __be16 df = 0; u8 tos, ttl; tun_info = skb_tunnel_info(skb); if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) || ip_tunnel_info_af(tun_info) != AF_INET)) goto tx_error; key = &tun_info->key; memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); inner_iph = (const struct iphdr *)skb_inner_network_header(skb); tos = key->tos; if (tos == 1) { if (skb->protocol == htons(ETH_P_IP)) tos = inner_iph->tos; else if (skb->protocol == htons(ETH_P_IPV6)) tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0, RT_TOS(tos), tunnel->parms.link); if (tunnel->encap.type != TUNNEL_ENCAP_NONE) goto tx_error; rt = ip_route_output_key(tunnel->net, &fl4); if (IS_ERR(rt)) { dev->stats.tx_carrier_errors++; goto tx_error; } if (rt->dst.dev == dev) { ip_rt_put(rt); dev->stats.collisions++; goto tx_error; } tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); ttl = key->ttl; if (ttl == 0) { if (skb->protocol == htons(ETH_P_IP)) ttl = inner_iph->ttl; else if (skb->protocol == htons(ETH_P_IPV6)) ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit; else ttl = ip4_dst_hoplimit(&rt->dst); } if (key->tun_flags & TUNNEL_DONT_FRAGMENT) df = htons(IP_DF); else if (skb->protocol == htons(ETH_P_IP)) df = inner_iph->frag_off & htons(IP_DF); headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; if (headroom > dev->needed_headroom) dev->needed_headroom = headroom; if (skb_cow_head(skb, dev->needed_headroom)) { ip_rt_put(rt); goto tx_dropped; } iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos, key->ttl, df, !net_eq(tunnel->net, dev_net(dev))); return; tx_error: dev->stats.tx_errors++; goto kfree; tx_dropped: dev->stats.tx_dropped++; kfree: kfree_skb(skb); }