static int pop_vlan(struct sk_buff *skb) { __be16 tci; int err; if (likely(vlan_tx_tag_present(skb))) { vlan_set_tci(skb, 0); } else { if (unlikely(skb->protocol != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN)) return 0; err = __pop_vlan_tci(skb, &tci); if (err) return err; } /* move next vlan tag to hw accel tag */ if (likely(skb->protocol != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN)) return 0; err = __pop_vlan_tci(skb, &tci); if (unlikely(err)) return err; __vlan_hwaccel_put_tag(skb, ntohs(tci)); return 0; }
/** * ovs_tnl_rcv - ingress point for generic tunnel code * * @vport: port this packet was received on * @skb: received packet * @tos: ToS from encapsulating IP packet, used to copy ECN bits * * Must be called with rcu_read_lock. * * Packets received by this function are in the following state: * - skb->data points to the inner Ethernet header. * - The inner Ethernet header is in the linear data area. * - skb->csum does not include the inner Ethernet header. * - The layer pointers are undefined. */ void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb, struct ovs_key_ipv4_tunnel *tun_key) { struct ethhdr *eh; skb_reset_mac_header(skb); eh = eth_hdr(skb); if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); skb_dst_drop(skb); nf_reset(skb); skb_clear_rxhash(skb); secpath_reset(skb); vlan_set_tci(skb, 0); if (unlikely(compute_ip_summed(skb, false))) { kfree_skb(skb); return; } ovs_vport_receive(vport, skb, tun_key); }
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } nf_reset(skb); secpath_reset(skb); skb_clear_hash(skb); skb_dst_drop(skb); vlan_set_tci(skb, 0); skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; return 0; }
int rpl_dev_queue_xmit(struct sk_buff *skb) { #undef dev_queue_xmit int err = -ENOMEM; if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return err; vlan_set_tci(skb, 0); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto xmit; } if (IS_ERR(nskb)) { err = PTR_ERR(nskb); goto drop; } consume_skb(skb); skb = nskb; do { nskb = skb->next; skb->next = NULL; err = dev_queue_xmit(skb); skb = nskb; } while (skb); return err; } } xmit: return dev_queue_xmit(skb); drop: kfree_skb(skb); return err; }
int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni) { struct vxlanhdr *vxh; struct udphdr *uh; int min_headroom; int err; min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len + VXLAN_HLEN + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { kfree_skb(skb); return err; } if (vlan_tx_tag_present(skb)) { if (unlikely(!vlan_insert_tag_set_proto(skb, skb->vlan_proto, vlan_tx_tag_get(skb)))) return -ENOMEM; vlan_set_tci(skb, 0); } skb_reset_inner_headers(skb); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); uh->check = 0; vxlan_set_owner(vs->sock->sk, skb); skb = handle_offloads(skb); if (IS_ERR(skb)) return PTR_ERR(skb); return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, false); }
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) { struct net_device *netdev = netdev_vport_priv(vport)->dev; int len; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) if (vlan_tx_tag_present(skb)) { if (unlikely(!__vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)))) return 0; if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_add(skb->csum, csum_partial(skb->data + (2 * ETH_ALEN), VLAN_HLEN, 0)); vlan_set_tci(skb, 0); } #endif len = skb->len; skb_dst_drop(skb); nf_reset(skb); secpath_reset(skb); skb->dev = netdev; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, netdev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); netif_rx(skb); return len; }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { if (net_ratelimit()) pr_warn("%s: dropped over-mtu packet: %d > %d\n", ovs_dp_name(vport->dp), packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; forward_ip_summed(skb, true); if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { kfree_skb(skb); return 0; } skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto tag; } if (IS_ERR(nskb)) { kfree_skb(skb); return 0; } consume_skb(skb); skb = nskb; len = 0; do { nskb = skb->next; skb->next = NULL; skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (likely(skb)) { len += skb->len; vlan_set_tci(skb, 0); dev_queue_xmit(skb); } skb = nskb; } while (skb); return len; } tag: skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return 0; vlan_set_tci(skb, 0); } len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
int rpl_dev_queue_xmit(struct sk_buff *skb) { #undef dev_queue_xmit int err = -ENOMEM; bool vlan, mpls; vlan = mpls = false; /* Avoid traversing any VLAN tags that are present to determine if * the ethtype is MPLS. Instead compare the mac_len (end of L2) and * skb_network_offset() (beginning of L3) whose inequality will * indicate the presence of an MPLS label stack. */ if (skb->mac_len != skb_network_offset(skb) && !supports_mpls_gso()) mpls = true; if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) vlan = true; if (vlan || mpls) { int features; features = netif_skb_features(skb); if (vlan) { if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return err; vlan_set_tci(skb, 0); } /* As of v3.11 the kernel provides an mpls_features field in * struct net_device which allows devices to advertise which * features its supports for MPLS. This value defaults to * NETIF_F_SG and as of v3.16. * * This compatibility code is intended for kernels older * than v3.16 that do not support MPLS GSO and do not * use mpls_features. Thus this code uses NETIF_F_SG * directly in place of mpls_features. */ if (mpls) features &= NETIF_F_SG; if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto xmit; } if (IS_ERR(nskb)) { err = PTR_ERR(nskb); goto drop; } consume_skb(skb); skb = nskb; do { nskb = skb->next; skb->next = NULL; err = dev_queue_xmit(skb); skb = nskb; } while (skb); return err; } } xmit: return dev_queue_xmit(skb); drop: kfree_skb(skb); return err; }