static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
	if (unlikely(vlan_deaccel_tag(skb)))
		return 0;
#endif

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	forward_ip_summed(skb, false);

	netif_rx(skb);

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
	netdev->last_rx = jiffies;
#endif

	return len;
}
Exemple #2
0
/**
 *	ovs_tnl_rcv - ingress point for generic tunnel code
 *
 * @vport: port this packet was received on
 * @skb: received packet
 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
 *
 * Must be called with rcu_read_lock.
 *
 * Packets received by this function are in the following state:
 * - skb->data points to the inner Ethernet header.
 * - The inner Ethernet header is in the linear data area.
 * - skb->csum does not include the inner Ethernet header.
 * - The layer pointers are undefined.
 */
void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb,
		 struct ovs_key_ipv4_tunnel *tun_key)
{
	struct ethhdr *eh;

	skb_reset_mac_header(skb);
	eh = eth_hdr(skb);

	if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
		skb->protocol = eh->h_proto;
	else
		skb->protocol = htons(ETH_P_802_2);

	skb_dst_drop(skb);
	nf_reset(skb);
	skb_clear_rxhash(skb);
	secpath_reset(skb);
	vlan_set_tci(skb, 0);

	if (unlikely(compute_ip_summed(skb, false))) {
		kfree_skb(skb);
		return;
	}

	ovs_vport_receive(vport, skb, tun_key);
}
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
{
	if (unlikely(!pskb_may_pull(skb, hdr_len)))
		return -ENOMEM;

	skb_pull_rcsum(skb, hdr_len);

	if (inner_proto == htons(ETH_P_TEB)) {
		struct ethhdr *eh;

		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
			return -ENOMEM;

		eh = (struct ethhdr *)skb->data;
		if (likely(eth_proto_is_802_3(eh->h_proto)))
			skb->protocol = eh->h_proto;
		else
			skb->protocol = htons(ETH_P_802_2);

	} else {
		skb->protocol = inner_proto;
	}

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_hash_if_not_l4(skb);
	skb_dst_drop(skb);
	skb->vlan_tci = 0;
	skb_set_queue_mapping(skb, 0);
	skb->pkt_type = PACKET_HOST;
	return 0;
}
Exemple #4
0
static int ipip_rcv(struct sk_buff *skb)
{
	struct ip_tunnel *tunnel;
	const struct iphdr *iph = ip_hdr(skb);

	read_lock(&ipip_lock);
	if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
					iph->saddr, iph->daddr)) != NULL) {
		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
			read_unlock(&ipip_lock);
			kfree_skb(skb);
			return 0;
		}

		secpath_reset(skb);

		skb->mac_header = skb->network_header;
		skb_reset_network_header(skb);
		skb->protocol = htons(ETH_P_IP);
		skb->pkt_type = PACKET_HOST;

		tunnel->dev->stats.rx_packets++;
		tunnel->dev->stats.rx_bytes += skb->len;
		skb->dev = tunnel->dev;
		skb_dst_drop(skb);
		nf_reset(skb);
		ipip_ecn_decapsulate(iph, skb);
		netif_rx(skb);
		read_unlock(&ipip_lock);
		return 0;
	}
	read_unlock(&ipip_lock);

	return -1;
}
Exemple #5
0
static void sc_send_8023(struct sk_buff *skb, struct net_device *dev)
{
	struct ethhdr *eh;

	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
		return;

	/* drop conntrack reference */
	nf_reset(skb);

	/* detach skb from CAPWAP */
	skb_orphan(skb);
	secpath_reset(skb);

	/* drop any routing info */
	skb_dst_drop(skb);

	skb->dev = dev;
	skb_reset_mac_header(skb);
	eh = eth_hdr(skb);
	if (likely(eth_proto_is_802_3(eh->h_proto)))
		skb->protocol = eh->h_proto;
	else
		skb->protocol = htons(ETH_P_802_2);
	skb_set_network_header(skb, ETH_HLEN);

	/* Force the device to verify it. */
	skb->ip_summed = CHECKSUM_NONE;

	dev_queue_xmit(skb);
}
Exemple #6
0
static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
{
	struct net_device *netdev = skb->dev;
	struct pcpu_sw_netstats *stats;

	if (unlikely(!(netdev->flags & IFF_UP))) {
		kfree_skb(skb);
		netdev->stats.rx_dropped++;
		return NETDEV_TX_OK;
	}

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	stats = this_cpu_ptr(netdev->tstats);
	u64_stats_update_begin(&stats->syncp);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	u64_stats_update_end(&stats->syncp);

	netif_rx(skb);
	return NETDEV_TX_OK;
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

	if (unlikely(!(netdev->flags & IFF_UP))) {
		kfree_skb(skb);
		return 0;
	}

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	netif_rx(skb);

	return len;
}
Exemple #8
0
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device *rcv = NULL;
	struct veth_priv *priv, *rcv_priv;
	struct veth_net_stats *stats, *rcv_stats;
	int length, cpu;

	skb_orphan(skb);

	priv = netdev_priv(dev);
	rcv = priv->peer;
	rcv_priv = netdev_priv(rcv);

	cpu = smp_processor_id();
	stats = per_cpu_ptr(priv->stats, cpu);
	rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);

	if (!(rcv->flags & IFF_UP))
		goto tx_drop;

	if (skb->len > (rcv->mtu + MTU_PAD))
		goto rx_drop;

        skb->tstamp.tv64 = 0;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, rcv);
	if (dev->features & NETIF_F_NO_CSUM)
		skb->ip_summed = rcv_priv->ip_summed;

	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);

	length = skb->len;

	stats->tx_bytes += length;
	stats->tx_packets++;

	rcv_stats->rx_bytes += length;
	rcv_stats->rx_packets++;

	netif_rx(skb);
	return NETDEV_TX_OK;

tx_drop:
	kfree_skb(skb);
	stats->tx_dropped++;
	return NETDEV_TX_OK;

rx_drop:
	kfree_skb(skb);
	rcv_stats->rx_dropped++;
	return NETDEV_TX_OK;
}
static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
		       __u8 ipproto,
		       void (*dscp_ecn_decapsulate)(struct ip6_tnl *t,
						    struct ipv6hdr *ipv6h,
						    struct sk_buff *skb))
{
	struct ip6_tnl *t;
	struct ipv6hdr *ipv6h = ipv6_hdr(skb);

	read_lock(&ip6_tnl_lock);

	if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
					&ipv6h->daddr)) != NULL) {
		if (t->parms.proto != ipproto && t->parms.proto != 0) {
			read_unlock(&ip6_tnl_lock);
			goto discard;
		}

		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
			read_unlock(&ip6_tnl_lock);
			goto discard;
		}

		if (!ip6_tnl_rcv_ctl(t)) {
			t->dev->stats.rx_dropped++;
			read_unlock(&ip6_tnl_lock);
			goto discard;
		}
		secpath_reset(skb);
		skb->mac_header = skb->network_header;
		skb_reset_network_header(skb);
		skb->protocol = htons(protocol);
		skb->pkt_type = PACKET_HOST;
		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
		skb->dev = t->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);

		dscp_ecn_decapsulate(t, ipv6h, skb);

		t->dev->stats.rx_packets++;
		t->dev->stats.rx_bytes += skb->len;
		netif_rx(skb);
		read_unlock(&ip6_tnl_lock);
		return 0;
	}
	read_unlock(&ip6_tnl_lock);
	return 1;

discard:
	kfree_skb(skb);
	return 0;
}
Exemple #10
0
int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
	struct net *net = dev_net(skb_dst(skb)->dev);
	struct xfrm_state *x = skb_dst(skb)->xfrm;
	int err;

	secpath_reset(skb);

	if (xfrm_dev_offload_ok(skb, x)) {
		struct sec_path *sp;

		sp = secpath_dup(skb->sp);
		if (!sp) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			kfree_skb(skb);
			return -ENOMEM;
		}
		if (skb->sp)
			secpath_put(skb->sp);
		skb->sp = sp;
		skb->encapsulation = 1;

		sp->olen++;
		sp->xvec[skb->sp->len++] = x;
		xfrm_state_hold(x);

		if (skb_is_gso(skb)) {
			skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;

			return xfrm_output2(net, sk, skb);
		}

		if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
			goto out;
	}

	if (skb_is_gso(skb))
		return xfrm_output_gso(net, sk, skb);

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		err = skb_checksum_help(skb);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			kfree_skb(skb);
			return err;
		}
	}

out:
	return xfrm_output2(net, sk, skb);
}
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	/* inlined skb_scrub_packet() */
	if (xnet)
		skb_orphan(skb);
	skb->pkt_type = PACKET_HOST;
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0)
	skb->skb_iif = 0;
#endif
	skb_dst_drop(skb);
	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);

	skb->rxhash = 0;
	skb_dst_set(skb, &rt_dst(rt));
#if 0
	/* Do not clear ovs_skb_cb.  It will be done in gso code. */
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#endif

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;
	tunnel_ip_select_ident(skb,
			       (const struct iphdr *)skb_inner_network_header(skb),
			       &rt_dst(rt));

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
Exemple #12
0
static int
ip6ip6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
{
	struct sk_buff *skb = *pskb;
	struct ipv6hdr *ipv6h;
	struct ip6_tnl *t;

	if (!pskb_may_pull(skb, sizeof (*ipv6h)))
		goto discard;

	ipv6h = skb->nh.ipv6h;

	read_lock(&ip6ip6_lock);

	if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
			read_unlock(&ip6ip6_lock);
			kfree_skb(skb);
			return 0;
		}

		if (!ip6_tnl_rcv_ctl(t)) {
			t->stat.rx_dropped++;
			read_unlock(&ip6ip6_lock);
			goto discard;
		}
		secpath_reset(skb);
		skb->mac.raw = skb->nh.raw;
		skb->nh.raw = skb->data;
		skb->protocol = htons(ETH_P_IPV6);
		skb->pkt_type = PACKET_HOST;
		memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
		skb->dev = t->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
			ipv6_copy_dscp(ipv6h, skb->nh.ipv6h);
		ip6ip6_ecn_decapsulate(ipv6h, skb);
		t->stat.rx_packets++;
		t->stat.rx_bytes += skb->len;
		netif_rx(skb);
		read_unlock(&ip6ip6_lock);
		return 0;
	}
	read_unlock(&ip6ip6_lock);
	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
discard:
	return 1;
}
Exemple #13
0
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
	struct net_device *dev = spriv->dev;

	if (session->debug & L2TP_MSG_DATA) {
		unsigned int length;
		int offset;
		u8 *ptr = skb->data;

		length = min(32u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		printk(KERN_DEBUG "%s: eth recv: ", session->name);

		offset = 0;
		do {
			printk(" %02X", ptr[offset]);
		} while (++offset < length);

		printk("\n");
	}

	if (data_len < ETH_HLEN)
		goto error;

	secpath_reset(skb);

	/* checksums verified by L2TP */
	skb->ip_summed = CHECKSUM_NONE;

	skb_dst_drop(skb);
	nf_reset(skb);

	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
		dev->last_rx = jiffies;
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += data_len;
	} else
		dev->stats.rx_errors++;

	return;

error:
	dev->stats.rx_errors++;
	kfree_skb(skb);
}
Exemple #14
0
static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_private *np = netdev_priv(dev);

	if (!skb_remove_foreign_references(skb)) {
		np->stats.tx_dropped++;
		dev_kfree_skb(skb);
		return 0;
	}

	dst_release(skb->dst);
	skb->dst = NULL;

	skb_orphan(skb);

	np->stats.tx_bytes += skb->len;
	np->stats.tx_packets++;

	/* Switch to loopback context. */
	dev = np->loopback_dev;
	np  = netdev_priv(dev);

	np->stats.rx_bytes += skb->len;
	np->stats.rx_packets++;

	if (skb->ip_summed == CHECKSUM_HW) {
		/* Defer checksum calculation. */
		skb->proto_csum_blank = 1;
		/* Must be a local packet: assert its integrity. */
		skb->proto_data_valid = 1;
	}

	skb->ip_summed = skb->proto_data_valid ?
		CHECKSUM_UNNECESSARY : CHECKSUM_NONE;

	skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
	skb->protocol = eth_type_trans(skb, dev);
	skb->dev      = dev;
	dev->last_rx  = jiffies;

	/* Flush netfilter context: rx'ed skbuffs not expected to have any. */
	nf_reset(skb);
	secpath_reset(skb);

	netif_rx(skb);

	return 0;
}
Exemple #15
0
static int veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device *rcv = NULL;
	struct veth_priv *priv, *rcv_priv;
	struct veth_net_stats *stats;
	int length, cpu;

	skb_orphan(skb);

	priv = netdev_priv(dev);
	rcv = priv->peer;
	rcv_priv = netdev_priv(rcv);

	cpu = smp_processor_id();
	stats = per_cpu_ptr(priv->stats, cpu);

	if (!(rcv->flags & IFF_UP))
		goto outf;

	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, rcv);
	if (dev->features & NETIF_F_NO_CSUM)
		skb->ip_summed = rcv_priv->ip_summed;

	dst_release(skb->dst);
	skb->dst = NULL;
	skb->mark = 0;
	secpath_reset(skb);
	nf_reset(skb);

	length = skb->len;

	stats->tx_bytes += length;
	stats->tx_packets++;

	stats = per_cpu_ptr(rcv_priv->stats, cpu);
	stats->rx_bytes += length;
	stats->rx_packets++;

	netif_rx(skb);
	return 0;

outf:
	kfree_skb(skb);
	stats->tx_dropped++;
	return 0;
}
Exemple #16
0
int iptunnel_xmit(struct sock *sk, struct rtable *rt,
		  struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_hash(skb);
	skb_dst_drop(skb);
	skb_dst_set(skb, &rt_dst(rt));
#if 0
	/* Do not clear ovs_skb_cb.  It will be done in gso code. */
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#endif

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;

#ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY
	__ip_select_ident(iph, &rt_dst(rt), (skb_shinfo(skb)->gso_segs ?: 1) - 1);
#else
	__ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1);
#endif

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
Exemple #17
0
static int ipip6_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_tunnel *tunnel;

	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
		goto out;

	iph = ip_hdr(skb);

	read_lock(&ipip6_lock);
	if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev),
					iph->saddr, iph->daddr)) != NULL) {
		secpath_reset(skb);
		skb->mac_header = skb->network_header;
		skb_reset_network_header(skb);
		IPCB(skb)->flags = 0;
		skb->protocol = htons(ETH_P_IPV6);
		skb->pkt_type = PACKET_HOST;

		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
		    !isatap_chksrc(skb, iph, tunnel)) {
			tunnel->dev->stats.rx_errors++;
			read_unlock(&ipip6_lock);
			kfree_skb(skb);
			return 0;
		}
		tunnel->dev->stats.rx_packets++;
		tunnel->dev->stats.rx_bytes += skb->len;
		skb->dev = tunnel->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);
		ipip6_ecn_decapsulate(iph, skb);
		netif_rx(skb);
		read_unlock(&ipip6_lock);
		return 0;
	}

	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
	read_unlock(&ipip6_lock);
out:
	kfree_skb(skb);
	return 0;
}
Exemple #18
0
static int ipip6_rcv(struct sk_buff *skb)
{
	int err;
	struct iphdr *iph;
	struct ip_tunnel *tunnel = tunnels_wc[0];

	/* Returning nonzero yields to the next tunnel device */

	if (!(tunnel && tunnel->dev->flags&IFF_UP))
		return -1;

	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
		return -1;

	err = check_stubl_source(skb);
	if (err == -ERANGE) {
		/* Not my prefix, let the next tunnel handle it. */
		return -1;
	} else if (err < 0) {
		/* For any other errors, send an ICMPv4 reject. */
		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
		kfree_skb(skb);
		return 0;
	}

	iph = ip_hdr(skb);

	secpath_reset(skb);
	skb->mac_header = skb->network_header;
	skb_reset_network_header(skb);
	IPCB(skb)->flags = 0;
	skb->protocol = htons(ETH_P_IPV6);
	skb->pkt_type = PACKET_HOST;
	tunnel->stat.rx_packets++;
	tunnel->stat.rx_bytes += skb->len;
	skb->dev = tunnel->dev;
	dst_release(skb->dst);
	skb->dst = NULL;
	nf_reset(skb);
	ipip6_ecn_decapsulate(iph, skb);
	netif_rx(skb);
	return 0;
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);

	netif_rx(skb);

	return len;
}
Exemple #20
0
int iptunnel_xmit(struct net *net, struct rtable *rt,
		  struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_rxhash(skb);
	skb_dst_drop(skb);
	skb_dst_set(skb, &rt_dst(rt));
#if 0
	/* Do not clear ovs_skb_cb.  It will be done in gso code. */
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
#endif

	/* Push down and install the IP header. */
	__skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;
	tunnel_ip_select_ident(skb,
			       (const struct iphdr *)skb_inner_network_header(skb),
			       &rt_dst(rt));

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
Exemple #21
0
static int ipip_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_tunnel *tunnel;

	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
		goto out;

	iph = skb->nh.iph;

	read_lock(&ipip_lock);
	if ((tunnel = ipip_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
		if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
			read_unlock(&ipip_lock);
			kfree_skb(skb);
			return 0;
		}

		secpath_reset(skb);

		skb->mac.raw = skb->nh.raw;
		skb->nh.raw = skb->data;
		memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
		skb->protocol = htons(ETH_P_IP);
		skb->pkt_type = PACKET_HOST;

		tunnel->stat.rx_packets++;
		tunnel->stat.rx_bytes += skb->len;
		skb->dev = tunnel->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);
		ipip_ecn_decapsulate(iph, skb);
		netif_rx(skb);
		read_unlock(&ipip_lock);
		return 0;
	}
	read_unlock(&ipip_lock);

out:
	return -1;
}
Exemple #22
0
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
	struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
	struct net_device *dev = spriv->dev;
	struct l2tp_eth *priv = netdev_priv(dev);

	if (session->debug & L2TP_MSG_DATA) {
		unsigned int length;

		length = min(32u, skb->len);
		if (!pskb_may_pull(skb, length))
			goto error;

		pr_debug("%s: eth recv\n", session->name);
		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
	}

	if (!pskb_may_pull(skb, ETH_HLEN))
		goto error;

	secpath_reset(skb);

	/* checksums verified by L2TP */
	skb->ip_summed = CHECKSUM_NONE;

	skb_dst_drop(skb);
	nf_reset(skb);

	if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
		atomic_long_inc(&priv->rx_packets);
		atomic_long_add(data_len, &priv->rx_bytes);
	} else {
		atomic_long_inc(&priv->rx_errors);
	}
	return;

error:
	atomic_long_inc(&priv->rx_errors);
	kfree_skb(skb);
}
Exemple #23
0
static int ipip6_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	struct ip_tunnel *tunnel;

	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
		goto out;

	iph = skb->nh.iph;

	read_lock(&ipip6_lock);
	if ((tunnel = ipip6_tunnel_lookup(iph->saddr, iph->daddr)) != NULL) {
		secpath_reset(skb);
		skb->mac.raw = skb->nh.raw;
		skb->nh.raw = skb->data;
		memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
		IPCB(skb)->flags = 0;
		skb->protocol = htons(ETH_P_IPV6);
		skb->pkt_type = PACKET_HOST;
		tunnel->stat.rx_packets++;
		tunnel->stat.rx_bytes += skb->len;
		skb->dev = tunnel->dev;
		dst_release(skb->dst);
		skb->dst = NULL;
		nf_reset(skb);
		ipip6_ecn_decapsulate(iph, skb);
		netif_rx(skb);
		read_unlock(&ipip6_lock);
		return 0;
	}

	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PROT_UNREACH, 0);
	kfree_skb(skb);
	read_unlock(&ipip6_lock);
out:
	return 0;
}
Exemple #24
0
int iptunnel_xmit(struct net *net, struct rtable *rt,
		  struct sk_buff *skb,
		  __be32 src, __be32 dst, __u8 proto,
		  __u8 tos, __u8 ttl, __be16 df)
{
	int pkt_len = skb->len;
	struct iphdr *iph;
	int err;

	nf_reset(skb);
	secpath_reset(skb);
	skb->rxhash = 0;
	skb_dst_drop(skb);
	skb_dst_set(skb, &rt->dst);
	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));

	/* Push down and install the IP header. */
	skb_push(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);

	iph = ip_hdr(skb);

	iph->version	=	4;
	iph->ihl	=	sizeof(struct iphdr) >> 2;
	iph->frag_off	=	df;
	iph->protocol	=	proto;
	iph->tos	=	tos;
	iph->daddr	=	dst;
	iph->saddr	=	src;
	iph->ttl	=	ttl;
	__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);

	err = ip_local_out(skb);
	if (unlikely(net_xmit_eval(err)))
		pkt_len = 0;
	return pkt_len;
}
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
{
	if (unlikely(!pskb_may_pull(skb, hdr_len)))
		return -ENOMEM;

	skb_pull_rcsum(skb, hdr_len);

	if (inner_proto == htons(ETH_P_TEB)) {
		struct ethhdr *eh;

		if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
			return -ENOMEM;

		eh = (struct ethhdr *)skb->data;

		if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
			skb->protocol = eh->h_proto;
		else
			skb->protocol = htons(ETH_P_802_2);

	} else {
		skb->protocol = inner_proto;
	}

	if (unlikely(compute_ip_summed(skb, false)))
		return -EPROTO;

	nf_reset(skb);
	secpath_reset(skb);
	skb_clear_rxhash(skb);
	skb_dst_drop(skb);
	vlan_set_tci(skb, 0);
	skb_set_queue_mapping(skb, 0);
	skb->pkt_type = PACKET_HOST;
	return 0;
}
Exemple #26
0
static void sc_send_80211(struct sk_buff *skb, struct net_device *dev)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_hdr *hdr;
	int hdrlen;

	printk(KERN_DEBUG "capwap inject: %s: hdr: %p\n",
	       dev->name, skb->data);

	/* detach skb from CAPWAP */
	skb_orphan(skb);
	secpath_reset(skb);

	/* drop any routing info */
	skb_dst_drop(skb);

	/* drop conntrack reference */
	nf_reset(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);

	skb->dev = dev;

	skb_set_mac_header(skb, hdrlen);
	skb_set_network_header(skb, hdrlen);
	skb_set_transport_header(skb, hdrlen);

	skb->protocol = htons(ETH_P_CONTROL);
	info->flags |= IEEE80211_TX_CTL_INJECTED;

	/* Force the device to verify it. */
	skb->ip_summed = CHECKSUM_NONE;

	dev_queue_xmit(skb);
}
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!__vlan_put_tag(skb,
					     skb->vlan_proto,
					     vlan_tx_tag_get(skb))))
			return 0;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum,
					     csum_partial(skb->data + (2 * ETH_ALEN),
							  VLAN_HLEN, 0));

		vlan_set_tci(skb, 0);
	}
#endif

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	netif_rx(skb);

	return len;
}
Exemple #28
0
static int ipgre_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	u8     *h;
	__be16    flags;
	__sum16   csum = 0;
	__be32 key = 0;
	u32    seqno = 0;
	struct ip_tunnel *tunnel;
	int    offset = 4;
	__be16 gre_proto;
	unsigned int len;

	if (!pskb_may_pull(skb, 16))
		goto drop_nolock;

	iph = ip_hdr(skb);
	h = skb->data;
	flags = *(__be16*)h;

	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
		/* - Version must be 0.
		   - We do not support routing headers.
		 */
		if (flags&(GRE_VERSION|GRE_ROUTING))
			goto drop_nolock;

		if (flags&GRE_CSUM) {
			switch (skb->ip_summed) {
			case CHECKSUM_COMPLETE:
				csum = csum_fold(skb->csum);
				if (!csum)
					break;
				/* fall through */
			case CHECKSUM_NONE:
				skb->csum = 0;
				csum = __skb_checksum_complete(skb);
				skb->ip_summed = CHECKSUM_COMPLETE;
			}
			offset += 4;
		}
		if (flags&GRE_KEY) {
			key = *(__be32*)(h + offset);
			offset += 4;
		}
		if (flags&GRE_SEQ) {
			seqno = ntohl(*(__be32*)(h + offset));
			offset += 4;
		}
	}

	gre_proto = *(__be16 *)(h + 2);

	read_lock(&ipgre_lock);
	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
					  iph->saddr, iph->daddr, key,
					  gre_proto))) {
		struct net_device_stats *stats = &tunnel->dev->stats;

		secpath_reset(skb);

		skb->protocol = gre_proto;
		/* WCCP version 1 and 2 protocol decoding.
		 * - Change protocol to IP
		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
		 */
		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
			skb->protocol = htons(ETH_P_IP);
			if ((*(h + offset) & 0xF0) != 0x40)
				offset += 4;
		}

		skb->mac_header = skb->network_header;
		__pskb_pull(skb, offset);
		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
		skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
		if (ipv4_is_multicast(iph->daddr)) {
			/* Looped back packet, drop it! */
			if (skb_rtable(skb)->fl.iif == 0)
				goto drop;
			stats->multicast++;
			skb->pkt_type = PACKET_BROADCAST;
		}
#endif

		if (((flags&GRE_CSUM) && csum) ||
		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
			stats->rx_crc_errors++;
			stats->rx_errors++;
			goto drop;
		}
		if (tunnel->parms.i_flags&GRE_SEQ) {
			if (!(flags&GRE_SEQ) ||
			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
				stats->rx_fifo_errors++;
				stats->rx_errors++;
				goto drop;
			}
			tunnel->i_seqno = seqno + 1;
		}

		len = skb->len;

		/* Warning: All skb pointers will be invalidated! */
		if (tunnel->dev->type == ARPHRD_ETHER) {
			if (!pskb_may_pull(skb, ETH_HLEN)) {
				stats->rx_length_errors++;
				stats->rx_errors++;
				goto drop;
			}

			iph = ip_hdr(skb);
			skb->protocol = eth_type_trans(skb, tunnel->dev);
			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
		}

		stats->rx_packets++;
		stats->rx_bytes += len;
		skb->dev = tunnel->dev;
		skb_dst_drop(skb);
		nf_reset(skb);

		skb_reset_network_header(skb);
		ipgre_ecn_decapsulate(iph, skb);

		netif_rx(skb);
		read_unlock(&ipgre_lock);
		return(0);
	}
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

drop:
	read_unlock(&ipgre_lock);
drop_nolock:
	kfree_skb(skb);
	return(0);
}
Exemple #29
0
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
{
	struct pcpu_tstats *tstats;
	const struct iphdr *iph = ip_hdr(skb);
	int err;

	secpath_reset(skb);

	skb->protocol = tpi->proto;

	skb->mac_header = skb->network_header;
	__pskb_pull(skb, tunnel->hlen);
	skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
#ifdef CONFIG_NET_IPGRE_BROADCAST
	if (ipv4_is_multicast(iph->daddr)) {
		/* Looped back packet, drop it! */
		if (rt_is_output_route(skb_rtable(skb)))
			goto drop;
		tunnel->dev->stats.multicast++;
		skb->pkt_type = PACKET_BROADCAST;
	}
#endif

	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
		tunnel->dev->stats.rx_crc_errors++;
		tunnel->dev->stats.rx_errors++;
		goto drop;
	}

	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
		if (!(tpi->flags&TUNNEL_SEQ) ||
		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
			tunnel->dev->stats.rx_fifo_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}
		tunnel->i_seqno = ntohl(tpi->seq) + 1;
	}

	/* Warning: All skb pointers will be invalidated! */
	if (tunnel->dev->type == ARPHRD_ETHER) {
		if (!pskb_may_pull(skb, ETH_HLEN)) {
			tunnel->dev->stats.rx_length_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}

		iph = ip_hdr(skb);
		skb->protocol = eth_type_trans(skb, tunnel->dev);
		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
	}

	skb->pkt_type = PACKET_HOST;
	__skb_tunnel_rx(skb, tunnel->dev);

	skb_reset_network_header(skb);
	err = IP_ECN_decapsulate(iph, skb);
	if (unlikely(err)) {
		if (log_ecn_error)
			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
					&iph->saddr, iph->tos);
		if (err > 1) {
			++tunnel->dev->stats.rx_frame_errors;
			++tunnel->dev->stats.rx_errors;
			goto drop;
		}
	}

	tstats = this_cpu_ptr(tunnel->dev->tstats);
	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);

	gro_cells_receive(&tunnel->gro_cells, skb);
	return 0;

drop:
	kfree_skb(skb);
	return 0;
}