Пример #1
0
/*
 *	This function assumes it is being called from dev_queue_xmit()
 *	and that skb is filled properly by that function.
 */
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr *tiph;
	struct iphdr fiph;

	if (unlikely(skb->protocol != htons(ETH_P_IP)))
		goto tx_error;

	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
	if (IS_ERR(skb))
		goto out;

	if (ipip_tunnel_is_fan(tunnel)) {
		if (ipip_build_fan_iphdr(tunnel, skb, &fiph))
			goto tx_error;
		tiph = &fiph;
	} else {
		tiph = &tunnel->parms.iph;
	}

	skb_set_inner_ipproto(skb, IPPROTO_IPIP);

	ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
	return NETDEV_TX_OK;

tx_error:
	kfree_skb(skb);
out:
	dev->stats.tx_errors++;
	return NETDEV_TX_OK;
}
Пример #2
0
static bool expand_for_outer_iph(struct sk_buff *skb,
                                 const struct net_device *out_dev)
{
        unsigned int max_headroom;
        unsigned int len_to_expand;
        max_headroom = sizeof(struct iphdr) + LL_RESERVED_SPACE(out_dev);
        if (skb_headroom(skb) < max_headroom) {
                len_to_expand = max_headroom - skb_headroom(skb);
                if (unlikely(skb_cow_head(skb, len_to_expand))) {
                        printk(KERN_INFO "[FlexPath] Failed to expand sk_buff\n");
                        return false;
                }
        }
        skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
        skb_push(skb, sizeof(struct iphdr));
        skb_reset_network_header(skb);
        return true;
}
Пример #3
0
Файл: ipip.c Проект: 7799/linux
/*
 *	This function assumes it is being called from dev_queue_xmit()
 *	and that skb is filled properly by that function.
 */
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr  *tiph = &tunnel->parms.iph;

	if (unlikely(skb->protocol != htons(ETH_P_IP)))
		goto tx_error;

	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
	if (IS_ERR(skb))
		goto out;

	ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
	return NETDEV_TX_OK;

tx_error:
	kfree_skb(skb);
out:
	dev->stats.tx_errors++;
	return NETDEV_TX_OK;
}
Пример #4
0
/*
 *	This function assumes it is being called from dev_queue_xmit()
 *	and that skb is filled properly by that function.
 */
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
				    struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr  *tiph = &tunnel->parms.iph;
	u8 ipproto;

	switch (skb->protocol) {
	case htons(ETH_P_IP):
		ipproto = IPPROTO_IPIP;
		break;
#if IS_ENABLED(CONFIG_MPLS)
	case htons(ETH_P_MPLS_UC):
		ipproto = IPPROTO_MPLS;
		break;
#endif
	default:
		goto tx_error;
	}

	if (tiph->protocol != ipproto && tiph->protocol != 0)
		goto tx_error;

	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP4))
		goto tx_error;

	skb_set_inner_ipproto(skb, ipproto);

	if (tunnel->collect_md)
		ip_md_tunnel_xmit(skb, dev, ipproto);
	else
		ip_tunnel_xmit(skb, dev, tiph, ipproto);
	return NETDEV_TX_OK;

tx_error:
	kfree_skb(skb);

	dev->stats.tx_errors++;
	return NETDEV_TX_OK;
}
Пример #5
0
Файл: ipip.c Проект: upa/madcap
/*
 *	This function assumes it is being called from dev_queue_xmit()
 *	and that skb is filled properly by that function.
 */
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	const struct iphdr  *tiph = &tunnel->parms.iph;
	struct net_device *mcdev;	/* madcap device */

#ifdef OVBENCH
	if (SKB_OVBENCH (skb)) {
		skb->ipip_tunnel_xmit_in = rdtsc ();
	}
#endif

	if (unlikely(skb->protocol != htons(ETH_P_IP)))
		goto tx_error;

	skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
	if (IS_ERR(skb))
		goto out;

	if (madcap_enable) {
		mcdev = __dev_get_by_index (dev_net (dev), tunnel->parms.link);
		if (mcdev && get_madcap_ops (mcdev)) {
			madcap_queue_xmit (skb, mcdev);
			return NETDEV_TX_OK;
		}
	}

	skb_set_inner_ipproto(skb, IPPROTO_IPIP);

	ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
	return NETDEV_TX_OK;

tx_error:
	kfree_skb(skb);
out:
	dev->stats.tx_errors++;
	return NETDEV_TX_OK;
}
Пример #6
0
/*
 *   IP Tunneling transmitter
 *
 *   This function encapsulates the packet in a new IP packet, its
 *   destination will be set to cp->daddr. Most code of this function
 *   is taken from ipip.c.
 *
 *   It is used in VS/TUN cluster. The load balancer selects a real
 *   server from a cluster based on a scheduling algorithm,
 *   encapsulates the request packet and forwards it to the selected
 *   server. For example, all real servers are configured with
 *   "ifconfig tunl0 <Virtual IP Address> up". When the server receives
 *   the encapsulated packet, it will decapsulate the packet, processe
 *   the request and return the response packets directly to the client
 *   without passing the load balancer. This can greatly increase the
 *   scalability of virtual server.
 *
 *   Used for ANY protocol
 */
int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
	struct net *net = skb_net(skb);
	struct netns_ipvs *ipvs = net_ipvs(net);
	struct rtable *rt;			/* Route to the other host */
	__be32 saddr;				/* Source for tunnel */
	struct net_device *tdev;		/* Device to other host */
	__u8 next_protocol = 0;
	__u8 dsfield = 0;
	__u8 ttl = 0;
	__be16 df = 0;
	__be16 *dfp = NULL;
	struct iphdr  *iph;			/* Our new IP header */
	unsigned int max_headroom;		/* The extra header space needed */
	int ret, local;

	EnterFunction(10);

	rcu_read_lock();
	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
				   IP_VS_RT_MODE_LOCAL |
				   IP_VS_RT_MODE_NON_LOCAL |
				   IP_VS_RT_MODE_CONNECT |
				   IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
	if (local < 0)
		goto tx_error;
	if (local) {
		rcu_read_unlock();
		return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
	}

	rt = skb_rtable(skb);
	tdev = rt->dst.dev;

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);

	/* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
	dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
					 &next_protocol, NULL, &dsfield,
					 &ttl, dfp);
	if (IS_ERR(skb))
		goto tx_error;

	skb = iptunnel_handle_offloads(
		skb, false, __tun_gso_type_mask(AF_INET, cp->af));
	if (IS_ERR(skb))
		goto tx_error;

	skb->transport_header = skb->network_header;

	skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

	/*
	 *	Push down and install the IPIP header.
	 */
	iph			=	ip_hdr(skb);
	iph->version		=	4;
	iph->ihl		=	sizeof(struct iphdr)>>2;
	iph->frag_off		=	df;
	iph->protocol		=	next_protocol;
	iph->tos		=	dsfield;
	iph->daddr		=	cp->daddr.ip;
	iph->saddr		=	saddr;
	iph->ttl		=	ttl;
	ip_select_ident(net, skb, NULL);

	/* Another hack: avoid icmp_send in ip_fragment */
	skb->ignore_df = 1;

	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
	if (ret == NF_ACCEPT)
		ip_local_out(skb);
	else if (ret == NF_DROP)
		kfree_skb(skb);
	rcu_read_unlock();

	LeaveFunction(10);

	return NF_STOLEN;

  tx_error:
	if (!IS_ERR(skb))
		kfree_skb(skb);
	rcu_read_unlock();
	LeaveFunction(10);
	return NF_STOLEN;
}
Пример #7
0
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
	struct rt6_info *rt;		/* Route to the other host */
	struct in6_addr saddr;		/* Source for tunnel */
	struct net_device *tdev;	/* Device to other host */
	__u8 next_protocol = 0;
	__u32 payload_len = 0;
	__u8 dsfield = 0;
	__u8 ttl = 0;
	struct ipv6hdr  *iph;		/* Our new IP header */
	unsigned int max_headroom;	/* The extra header space needed */
	int ret, local;

	EnterFunction(10);

	rcu_read_lock();
	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
				      &saddr, ipvsh, 1,
				      IP_VS_RT_MODE_LOCAL |
				      IP_VS_RT_MODE_NON_LOCAL |
				      IP_VS_RT_MODE_TUNNEL);
	if (local < 0)
		goto tx_error;
	if (local) {
		rcu_read_unlock();
		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
	}

	rt = (struct rt6_info *) skb_dst(skb);
	tdev = rt->dst.dev;

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);

	skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
					 &next_protocol, &payload_len,
					 &dsfield, &ttl, NULL);
	if (IS_ERR(skb))
		goto tx_error;

	skb = iptunnel_handle_offloads(
		skb, false, __tun_gso_type_mask(AF_INET6, cp->af));
	if (IS_ERR(skb))
		goto tx_error;

	skb->transport_header = skb->network_header;

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

	/*
	 *	Push down and install the IPIP header.
	 */
	iph			=	ipv6_hdr(skb);
	iph->version		=	6;
	iph->nexthdr		=	next_protocol;
	iph->payload_len	=	htons(payload_len);
	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
	ipv6_change_dsfield(iph, 0, dsfield);
	iph->daddr = cp->daddr.in6;
	iph->saddr = saddr;
	iph->hop_limit		=	ttl;

	/* Another hack: avoid icmp_send in ip_fragment */
	skb->ignore_df = 1;

	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
	if (ret == NF_ACCEPT)
		ip6_local_out(skb);
	else if (ret == NF_DROP)
		kfree_skb(skb);
	rcu_read_unlock();

	LeaveFunction(10);

	return NF_STOLEN;

tx_error:
	if (!IS_ERR(skb))
		kfree_skb(skb);
	rcu_read_unlock();
	LeaveFunction(10);
	return NF_STOLEN;
}
Пример #8
0
static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
					   bool csum)
{
	return iptunnel_handle_offloads(skb, csum,
					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
}