static int __send(struct vport *vport, struct sk_buff *skb, int tunnel_hlen, __be32 seq, __be16 gre64_flag) { struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel; struct rtable *rt; int min_headroom; __be16 df; __be32 saddr; int err; /* Route lookup */ saddr = tun_key->ipv4_src; rt = find_route(ovs_dp_get_net(vport->dp), &saddr, tun_key->ipv4_dst, IPPROTO_GRE, tun_key->ipv4_tos, skb->mark); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto error; } min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len + tunnel_hlen + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { int head_delta = SKB_DATA_ALIGN(min_headroom - skb_headroom(skb) + 16); err = pskb_expand_head(skb, max_t(int, head_delta, 0), 0, GFP_ATOMIC); if (unlikely(err)) goto err_free_rt; }
int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni) { struct vxlanhdr *vxh; struct udphdr *uh; int min_headroom; int err; min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len + VXLAN_HLEN + sizeof(struct iphdr) + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { kfree_skb(skb); return err; } if (vlan_tx_tag_present(skb)) { if (unlikely(!vlan_insert_tag_set_proto(skb, skb->vlan_proto, vlan_tx_tag_get(skb)))) return -ENOMEM; vlan_set_tci(skb, 0); } skb_reset_inner_headers(skb); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); uh->check = 0; vxlan_set_owner(vs->sock->sk, skb); skb = handle_offloads(skb); if (IS_ERR(skb)) return PTR_ERR(skb); return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, false); }
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; /* inlined skb_scrub_packet() */ if (xnet) skb_orphan(skb); skb->pkt_type = PACKET_HOST; #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0) skb->skb_iif = 0; #endif skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); nf_reset(skb); skb->rxhash = 0; skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; tunnel_ip_select_ident(skb, (const struct iphdr *)skb_inner_network_header(skb), &rt_dst(rt)); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
int vxlan_xmit_skb(struct vxlan_sock *vs, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, struct vxlan_metadata *md, bool xnet, u32 vxflags) { struct vxlanhdr *vxh; int min_headroom; int err; bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM); skb = udp_tunnel_handle_offloads(skb, udp_sum, true); if (IS_ERR(skb)) return PTR_ERR(skb); min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len + VXLAN_HLEN + sizeof(struct iphdr) + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ err = skb_cow_head(skb, min_headroom); if (unlikely(err)) { kfree_skb(skb); return err; } skb = vlan_hwaccel_push_inside(skb); if (WARN_ON(!skb)) return -ENOMEM; vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh->vx_flags = htonl(VXLAN_HF_VNI); vxh->vx_vni = md->vni; if (vxflags & VXLAN_F_GBP) vxlan_build_gbp_hdr(vxh, vxflags, md); vxlan_set_owner(vs->sock->sk, skb); ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB)); return udp_tunnel_xmit_skb(rt, skb, src, dst, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); }
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; nf_reset(skb); secpath_reset(skb); skb_clear_hash(skb); skb_dst_drop(skb); skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt_dst(rt), (skb_shinfo(skb)->gso_segs ?: 1) - 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
int iptunnel_xmit(struct net *net, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df) { int pkt_len = skb->len; struct iphdr *iph; int err; nf_reset(skb); secpath_reset(skb); skb_clear_rxhash(skb); skb_dst_drop(skb); skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; tunnel_ip_select_ident(skb, (const struct iphdr *)skb_inner_network_header(skb), &rt_dst(rt)); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
static int ip_tunnel_bind_dev(struct net_device *dev) { struct net_device *tdev = NULL; struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *iph; int hlen = LL_MAX_HEADER; int mtu = ETH_DATA_LEN; int t_hlen = tunnel->hlen + sizeof(struct iphdr); iph = &tunnel->parms.iph; /* Guess output device to choose reasonable mtu and needed_headroom */ if (iph->daddr) { struct flowi4 fl4; struct rtable *rt; init_tunnel_flow(&fl4, iph->protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, RT_TOS(iph->tos), tunnel->parms.link); rt = ip_route_output_key(tunnel->net, &fl4); if (!IS_ERR(rt)) { tdev = rt_dst(rt).dev; ip_rt_put(rt); } if (dev->type != ARPHRD_ETHER) dev->flags |= IFF_POINTOPOINT; } if (!tdev && tunnel->parms.link) tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); if (tdev) { hlen = tdev->hard_header_len + tdev->needed_headroom; mtu = tdev->mtu; } dev->needed_headroom = t_hlen + hlen; mtu -= (dev->hard_header_len + t_hlen); if (mtu < 68) mtu = 68; return mtu; }