예제 #1
0
/* derived from ip_tunnel_rcv(). */
void ovs_ip_tunnel_rcv(struct net_device *dev, struct sk_buff *skb,
		       struct metadata_dst *tun_dst)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
	struct pcpu_sw_netstats *tstats;

	tstats = this_cpu_ptr((struct pcpu_sw_netstats __percpu *)dev->tstats);
	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);
#endif

	skb_reset_mac_header(skb);
	skb_scrub_packet(skb, false);
	skb->protocol = eth_type_trans(skb, dev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	ovs_skb_dst_set(skb, (struct dst_entry *)tun_dst);

#ifndef USE_UPSTREAM_TUNNEL
	netdev_port_receive(skb, &tun_dst->u.tun_info);
#else
	netif_rx(skb);
#endif
}
예제 #2
0
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
{
	struct ethhdr *hdr;
	int err;

	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);

	/* skb_mpls_header() is used to locate the ethertype
	 * field correctly in the presence of VLAN tags.
	 */
	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
	hdr->h_proto = ethertype;
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;

	invalidate_flow_key(key);
	return 0;
}
예제 #3
0
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
{
	int err;

	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb->mac_len);

	if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
		struct ethhdr *hdr;

		/* mpls_hdr() is used to locate the ethertype field correctly in the
		 * presence of VLAN tags.
		 */
		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
		update_ethertype(skb, hdr, ethertype);
	}
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;

	invalidate_flow_key(key);
	return 0;
}
예제 #4
0
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

	if (unlikely(!(netdev->flags & IFF_UP))) {
		kfree_skb(skb);
		return 0;
	}

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	netif_rx(skb);

	return len;
}
예제 #5
0
static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
{
	struct net_device *netdev = skb->dev;
	struct pcpu_sw_netstats *stats;

	if (unlikely(!(netdev->flags & IFF_UP))) {
		kfree_skb(skb);
		netdev->stats.rx_dropped++;
		return NETDEV_TX_OK;
	}

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	stats = this_cpu_ptr(netdev->tstats);
	u64_stats_update_begin(&stats->syncp);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	u64_stats_update_end(&stats->syncp);

	netif_rx(skb);
	return NETDEV_TX_OK;
}
예제 #6
0
/* Called with rcu_read_lock and BH disabled. */
static int gre_rcv(struct sk_buff *skb)
{
	struct ovs_net *ovs_net;
	struct vport *vport;
	int hdr_len;
	struct iphdr *iph;
	struct ovs_key_ipv4_tunnel tun_key;
	__be16 gre_flags;
	u32 tnl_flags;
	__be64 key;
	bool is_gre64;

	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr) + ETH_HLEN)))
		goto error;
	if (unlikely(!check_checksum(skb)))
		goto error;

	hdr_len = parse_header(ip_hdr(skb), &gre_flags, &key, &is_gre64);
	if (unlikely(hdr_len < 0))
		goto error;

	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
	if (is_gre64)
		vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
	else
		vport = rcu_dereference(ovs_net->vport_net.gre_vport);
	if (unlikely(!vport))
		goto error;

	if (unlikely(!pskb_may_pull(skb, hdr_len + ETH_HLEN)))
		goto error;

	iph = ip_hdr(skb);
	tnl_flags = gre_flags_to_tunnel_flags(gre_flags, is_gre64);
	tnl_tun_key_init(&tun_key, iph, key, tnl_flags);
	OVS_CB(skb)->tun_key = &tun_key;

	__skb_pull(skb, hdr_len);
	skb_postpull_rcsum(skb, skb_transport_header(skb), hdr_len + ETH_HLEN);

	ovs_tnl_rcv(vport, skb);
	return 0;

error:
	kfree_skb(skb);
	return 0;
}
예제 #7
0
static int set_eth_addr(struct sk_buff *skb,
			const struct ovs_key_ethernet *eth_key)
{
	int err;
	err = make_writable(skb, ETH_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

	ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
	ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);

	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

	return 0;
}
예제 #8
0
static bool udpencap_remove_header(struct sk_buff *skb, const struct xt_udpencap_tginfo *info)
{
	unsigned int nlen = skb_transport_offset(skb);
	if (!skb_make_writable(&skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
		return false;

	if (skb->len < nlen + sizeof(struct udphdr))
		return false;

	if (!pskb_pull(skb, sizeof(struct udphdr)))
		return false;
	skb_postpull_rcsum(skb, skb_transport_header(skb), sizeof(struct udphdr));
	memmove(skb->data, skb->data - sizeof(struct udphdr), nlen);
	skb->network_header += sizeof(struct udphdr);
	skb->transport_header += sizeof(struct udphdr);

	return true;
}
예제 #9
0
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
{
	int err;

	err = skb_ensure_writable(skb, ETH_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
//added by daolicloud
        if (ETH_P_ARP == ntohs(eth_hdr(skb)->h_proto)) {
            struct arphdr *p = arp_hdr(skb);
            unsigned char ip[4];
            struct arpft *pdata = (struct arpft *)((unsigned char *)p + sizeof(struct arphdr));
            if(ARPOP_REQUEST == ntohs(p->ar_op)) {
                p->ar_op = htons(ARPOP_REPLY);
                memcpy(ip, pdata->ar_sip, 4);
                memcpy(pdata->ar_sha, key->eth_src, ETH_ALEN);
                memcpy(pdata->ar_sip, pdata->ar_tip, 4);
                memcpy(pdata->ar_tha, eth_hdr(skb)->h_source, ETH_ALEN);
                memcpy(pdata->ar_tip, ip, 4);
                memcpy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source, ETH_ALEN);
            } else {
		ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);
            }
        } else {
		ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);
        }

////
	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
			       mask->eth_src);

	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
	return 0;
}
예제 #10
0
static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
{
	int err;

	err = skb_ensure_writable(skb, ETH_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
			       mask->eth_src);
	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
			       mask->eth_dst);

	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);

	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
	return 0;
}
예제 #11
0
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!__vlan_put_tag(skb,
					     skb->vlan_proto,
					     vlan_tx_tag_get(skb))))
			return 0;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum,
					     csum_partial(skb->data + (2 * ETH_ALEN),
							  VLAN_HLEN, 0));

		vlan_set_tci(skb, 0);
	}
#endif

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	netif_rx(skb);

	return len;
}
예제 #12
0
/* <DTS2012022805249  g00206962 2012/2/27 add begin */
static inline int ip6_input_icmp_cheat(struct sk_buff *skb)
{
	struct inet6_protocol *ipprot;
	unsigned int nhoff;
	int nexthdr, raw;
	u8 hash;
	struct inet6_dev *idev;
	struct net *net = dev_net(skb->dst->dev);

	/*
	 *	Parse extension headers
	 */

	rcu_read_lock();
resubmit:
	idev = ip6_dst_idev(skb->dst);
	if (!pskb_pull(skb, skb_transport_offset(skb)))
		goto discard;
	nhoff = IP6CB(skb)->nhoff;
	nexthdr = skb_network_header(skb)[nhoff];

	raw = raw6_local_deliver(skb, nexthdr);

	hash = nexthdr & (MAX_INET_PROTOS - 1);
	if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
		int ret;

		if (ipprot->flags & INET6_PROTO_FINAL) {
			struct ipv6hdr *hdr;

			/* Free reference early: we don't need it any more,
			   and it may hold ip_conntrack module loaded
			   indefinitely. */
			nf_reset(skb);

			skb_postpull_rcsum(skb, skb_network_header(skb),
					   skb_network_header_len(skb));
			hdr = ipv6_hdr(skb);
            /*
			if (ipv6_addr_is_multicast(&hdr->daddr) &&
			    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
			    &hdr->saddr) &&
			    !ipv6_is_mld(skb, nexthdr))
				goto discard;
			*/
			if (ipv6_addr_is_multicast(&hdr->daddr) && IPPROTO_ICMPV6 != nexthdr)
				goto discard;
			else if (!ipv6_addr_is_multicast(&hdr->daddr))
				goto discard;
		}
		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
			goto discard;

		ret = ipprot->handler(skb);
		if (ret > 0)
			goto resubmit;
		else if (ret == 0)
			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
	} else {
		if (!raw) {
			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
				IP6_INC_STATS_BH(net, idev,
						 IPSTATS_MIB_INUNKNOWNPROTOS);
				icmpv6_send(skb, ICMPV6_PARAMPROB,
					    ICMPV6_UNK_NEXTHDR, nhoff,
					    skb->dev);
			}
		} else
			IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
		kfree_skb(skb);
	}
	rcu_read_unlock();
	return 0;

discard:
	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS);
	rcu_read_unlock();
	kfree_skb(skb);
	return 0;
}
예제 #13
0
파일: ip6_input.c 프로젝트: oldzhu/linux
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
    const struct inet6_protocol *ipprot;
    struct inet6_dev *idev;
    unsigned int nhoff;
    int nexthdr;
    bool raw;
    bool have_final = false;

    /*
     *	Parse extension headers
     */

    rcu_read_lock();
resubmit:
    idev = ip6_dst_idev(skb_dst(skb));
    if (!pskb_pull(skb, skb_transport_offset(skb)))
        goto discard;
    nhoff = IP6CB(skb)->nhoff;
    nexthdr = skb_network_header(skb)[nhoff];

resubmit_final:
    raw = raw6_local_deliver(skb, nexthdr);
    ipprot = rcu_dereference(inet6_protos[nexthdr]);
    if (ipprot) {
        int ret;

        if (have_final) {
            if (!(ipprot->flags & INET6_PROTO_FINAL)) {
                /* Once we've seen a final protocol don't
                 * allow encapsulation on any non-final
                 * ones. This allows foo in UDP encapsulation
                 * to work.
                 */
                goto discard;
            }
        } else if (ipprot->flags & INET6_PROTO_FINAL) {
            const struct ipv6hdr *hdr;

            /* Only do this once for first final protocol */
            have_final = true;

            /* Free reference early: we don't need it any more,
               and it may hold ip_conntrack module loaded
               indefinitely. */
            nf_reset(skb);

            skb_postpull_rcsum(skb, skb_network_header(skb),
                               skb_network_header_len(skb));
            hdr = ipv6_hdr(skb);
            if (ipv6_addr_is_multicast(&hdr->daddr) &&
                    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
                                         &hdr->saddr) &&
                    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
                goto discard;
        }
        if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
                !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
            goto discard;

        ret = ipprot->handler(skb);
        if (ret > 0) {
            if (ipprot->flags & INET6_PROTO_FINAL) {
                /* Not an extension header, most likely UDP
                 * encapsulation. Use return value as nexthdr
                 * protocol not nhoff (which presumably is
                 * not set by handler).
                 */
                nexthdr = ret;
                goto resubmit_final;
            } else {
                goto resubmit;
            }
        } else if (ret == 0) {
            __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
        }
    } else {
        if (!raw) {
            if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                __IP6_INC_STATS(net, idev,
                                IPSTATS_MIB_INUNKNOWNPROTOS);
                icmpv6_send(skb, ICMPV6_PARAMPROB,
                            ICMPV6_UNK_NEXTHDR, nhoff);
            }
            kfree_skb(skb);
        } else {
            __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
            consume_skb(skb);
        }
    }
    rcu_read_unlock();
    return 0;

discard:
    __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
    rcu_read_unlock();
    kfree_skb(skb);
    return 0;
}
예제 #14
0
/*
 *	Determine the packet's protocol ID. The rule here is that we 
 *	assume 802.3 if the type field is short enough to be a length.
 *	This is normal practice and works for any 'now in use' protocol.
 *
 *  Also, at this point we assume that we ARE dealing exclusively with
 *  VLAN packets, or packets that should be made into VLAN packets based
 *  on a default VLAN ID.
 *
 *  NOTE:  Should be similar to ethernet/eth.c.
 *
 *  SANITY NOTE:  This method is called when a packet is moving up the stack
 *                towards userland.  To get here, it would have already passed
 *                through the ethernet/eth.c eth_type_trans() method.
 *  SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
 *                 stored UNALIGNED in the memory.  RISC systems don't like
 *                 such cases very much...
 *  SANITY NOTE 2a:  According to Dave Miller & Alexey, it will always be aligned,
 *                 so there doesn't need to be any of the unaligned stuff.  It has
 *                 been commented out now...  --Ben
 *
 */
int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                  struct packet_type* ptype, struct net_device *orig_dev)
{
	unsigned char *rawp = NULL;
	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
	unsigned short vid;
	struct net_device_stats *stats;
	unsigned short vlan_TCI;
	__be16 proto;

	/* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
	vlan_TCI = ntohs(vhdr->h_vlan_TCI);

	vid = (vlan_TCI & VLAN_VID_MASK);

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n",
		__FUNCTION__, skb, vid);
#endif

	/* Ok, we will find the correct VLAN device, strip the header,
	 * and then go on as usual.
	 */

	/* We have 12 bits of vlan ID.
	 *
	 * We must not drop allow preempt until we hold a
	 * reference to the device (netif_rx does that) or we
	 * fail.
	 */

	rcu_read_lock();
	skb->dev = __find_vlan_dev(dev, vid);
	if (!skb->dev) {
		rcu_read_unlock();

#ifdef VLAN_DEBUG
		printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n",
			__FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex);
#endif
		kfree_skb(skb);
		return -1;
	}

	skb->dev->last_rx = jiffies;

	/* Bump the rx counters for the VLAN device. */
	stats = vlan_dev_get_stats(skb->dev);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;

	skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */

	/* Need to correct hardware checksum */
	skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);

	/* Ok, lets check to make sure the device (dev) we
	 * came in on is what this VLAN is attached to.
	 */

	if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) {
		rcu_read_unlock();

#ifdef VLAN_DEBUG
		printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s  real_dev: %s, skb_dev: %s\n",
			__FUNCTION__, skb, dev->name, 
			VLAN_DEV_INFO(skb->dev)->real_dev->name, 
			skb->dev->name);
#endif
		kfree_skb(skb);
		stats->rx_errors++;
		return -1;
	}

	/*
	 * Deal with ingress priority mapping.
	 */
	skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI));

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: priority: %lu  for TCI: %hu (hbo)\n",
		__FUNCTION__, (unsigned long)(skb->priority), 
		ntohs(vhdr->h_vlan_TCI));
#endif

	/* The ethernet driver already did the pkt_type calculations
	 * for us...
	 */
	switch (skb->pkt_type) {
	case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
		// stats->broadcast ++; // no such counter :-(
		break;

	case PACKET_MULTICAST:
		stats->multicast++;
		break;

	case PACKET_OTHERHOST: 
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the underlying
		 * device, and still route correctly.
		 */
		if (memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) {
			/* It is for our (changed) MAC-address! */
			skb->pkt_type = PACKET_HOST;
		}
		break;
	default:
		break;
	};

	/*  Was a VLAN packet, grab the encapsulated protocol, which the layer
	 * three protocols care about.
	 */
	/* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */
	proto = vhdr->h_vlan_encapsulated_proto;

	skb->protocol = proto;
	if (ntohs(proto) >= 1536) {
		/* place it back on the queue to be handled by
		 * true layer 3 protocols.
		 */

		/* See if we are configured to re-write the VLAN header
		 * to make it look like ethernet...
		 */
		skb = vlan_check_reorder_header(skb);

		/* Can be null if skb-clone fails when re-ordering */
		if (skb) {
			netif_rx(skb);
		} else {
			/* TODO:  Add a more specific counter here. */
			stats->rx_errors++;
		}
		rcu_read_unlock();
		return 0;
	}

	rawp = skb->data;

	/*
	 * This is a magic hack to spot IPX packets. Older Novell breaks
	 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
	 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
	 * won't work for fault tolerant netware but does for the rest.
	 */
	if (*(unsigned short *)rawp == 0xFFFF) {
		skb->protocol = __constant_htons(ETH_P_802_3);
		/* place it back on the queue to be handled by true layer 3 protocols.
		 */

		/* See if we are configured to re-write the VLAN header
		 * to make it look like ethernet...
		 */
		skb = vlan_check_reorder_header(skb);

		/* Can be null if skb-clone fails when re-ordering */
		if (skb) {
			netif_rx(skb);
		} else {
			/* TODO:  Add a more specific counter here. */
			stats->rx_errors++;
		}
		rcu_read_unlock();
		return 0;
	}

	/*
	 *	Real 802.2 LLC
	 */
	skb->protocol = __constant_htons(ETH_P_802_2);
	/* place it back on the queue to be handled by upper layer protocols.
	 */

	/* See if we are configured to re-write the VLAN header
	 * to make it look like ethernet...
	 */
	skb = vlan_check_reorder_header(skb);

	/* Can be null if skb-clone fails when re-ordering */
	if (skb) {
		netif_rx(skb);
	} else {
		/* TODO:  Add a more specific counter here. */
		stats->rx_errors++;
	}
	rcu_read_unlock();
	return 0;
}
예제 #15
0
/* geneve receive/decap routine */
static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
{
	struct genevehdr *gnvh = geneve_hdr(skb);
	struct metadata_dst *tun_dst;
	struct geneve_dev *geneve = NULL;
#ifdef HAVE_DEV_TSTATS
	struct pcpu_sw_netstats *stats;
#endif
	struct iphdr *iph;
	u8 *vni;
	__be32 addr;
	int err;
	union {
		struct metadata_dst dst;
		char buf[sizeof(struct metadata_dst) + 256];
	} buf;

	iph = ip_hdr(skb); /* outer IP header... */

	if (gs->collect_md) {
		static u8 zero_vni[3];

		vni = zero_vni;
		addr = 0;
	} else {
		vni = gnvh->vni;
		addr = iph->saddr;
	}

	geneve = geneve_lookup(gs, addr, vni);
	if (!geneve)
		goto drop;

	if (ip_tunnel_collect_metadata() || gs->collect_md) {
		__be16 flags;

		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
			(gnvh->oam ? TUNNEL_OAM : 0) |
			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);

		tun_dst = &buf.dst;
		ovs_udp_tun_rx_dst(&tun_dst->u.tun_info, skb, AF_INET, flags,
				   vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4);
		/* Update tunnel dst according to Geneve options. */
		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
					gnvh->options, gnvh->opt_len * 4);
	} else {
		/* Drop packets w/ critical options,
		 * since we don't support any...
		 */
		tun_dst = NULL;
		if (gnvh->critical)
			goto drop;
	}

	skb_reset_mac_header(skb);
	skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
	skb->protocol = eth_type_trans(skb, geneve->dev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	if (tun_dst)
		ovs_skb_dst_set(skb, &tun_dst->dst);
	else
		goto drop;
	/* Ignore packet loops (and multicast echo) */
	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
		goto drop;

	skb_reset_network_header(skb);

	err = IP_ECN_decapsulate(iph, skb);

	if (unlikely(err)) {
		if (err > 1) {
			++geneve->dev->stats.rx_frame_errors;
			++geneve->dev->stats.rx_errors;
			goto drop;
		}
	}

#ifdef HAVE_DEV_TSTATS
	stats = this_cpu_ptr((struct pcpu_sw_netstats __percpu *)geneve->dev->tstats);
	u64_stats_update_begin(&stats->syncp);
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	u64_stats_update_end(&stats->syncp);
#endif
	netdev_port_receive(skb, &tun_dst->u.tun_info);
	return;
drop:
	/* Consume bad packet */
	kfree_skb(skb);
}
예제 #16
0
static int ipgre_rcv(struct sk_buff *skb)
{
	struct iphdr *iph;
	u8     *h;
	__be16    flags;
	__sum16   csum = 0;
	__be32 key = 0;
	u32    seqno = 0;
	struct ip_tunnel *tunnel;
	int    offset = 4;
	__be16 gre_proto;
	unsigned int len;

	if (!pskb_may_pull(skb, 16))
		goto drop_nolock;

	iph = ip_hdr(skb);
	h = skb->data;
	flags = *(__be16*)h;

	if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
		/* - Version must be 0.
		   - We do not support routing headers.
		 */
		if (flags&(GRE_VERSION|GRE_ROUTING))
			goto drop_nolock;

		if (flags&GRE_CSUM) {
			switch (skb->ip_summed) {
			case CHECKSUM_COMPLETE:
				csum = csum_fold(skb->csum);
				if (!csum)
					break;
				/* fall through */
			case CHECKSUM_NONE:
				skb->csum = 0;
				csum = __skb_checksum_complete(skb);
				skb->ip_summed = CHECKSUM_COMPLETE;
			}
			offset += 4;
		}
		if (flags&GRE_KEY) {
			key = *(__be32*)(h + offset);
			offset += 4;
		}
		if (flags&GRE_SEQ) {
			seqno = ntohl(*(__be32*)(h + offset));
			offset += 4;
		}
	}

	gre_proto = *(__be16 *)(h + 2);

	read_lock(&ipgre_lock);
	if ((tunnel = ipgre_tunnel_lookup(skb->dev,
					  iph->saddr, iph->daddr, key,
					  gre_proto))) {
		struct net_device_stats *stats = &tunnel->dev->stats;

		secpath_reset(skb);

		skb->protocol = gre_proto;
		/* WCCP version 1 and 2 protocol decoding.
		 * - Change protocol to IP
		 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
		 */
		if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
			skb->protocol = htons(ETH_P_IP);
			if ((*(h + offset) & 0xF0) != 0x40)
				offset += 4;
		}

		skb->mac_header = skb->network_header;
		__pskb_pull(skb, offset);
		skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
		skb->pkt_type = PACKET_HOST;
#ifdef CONFIG_NET_IPGRE_BROADCAST
		if (ipv4_is_multicast(iph->daddr)) {
			/* Looped back packet, drop it! */
			if (skb_rtable(skb)->fl.iif == 0)
				goto drop;
			stats->multicast++;
			skb->pkt_type = PACKET_BROADCAST;
		}
#endif

		if (((flags&GRE_CSUM) && csum) ||
		    (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
			stats->rx_crc_errors++;
			stats->rx_errors++;
			goto drop;
		}
		if (tunnel->parms.i_flags&GRE_SEQ) {
			if (!(flags&GRE_SEQ) ||
			    (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
				stats->rx_fifo_errors++;
				stats->rx_errors++;
				goto drop;
			}
			tunnel->i_seqno = seqno + 1;
		}

		len = skb->len;

		/* Warning: All skb pointers will be invalidated! */
		if (tunnel->dev->type == ARPHRD_ETHER) {
			if (!pskb_may_pull(skb, ETH_HLEN)) {
				stats->rx_length_errors++;
				stats->rx_errors++;
				goto drop;
			}

			iph = ip_hdr(skb);
			skb->protocol = eth_type_trans(skb, tunnel->dev);
			skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
		}

		stats->rx_packets++;
		stats->rx_bytes += len;
		skb->dev = tunnel->dev;
		skb_dst_drop(skb);
		nf_reset(skb);

		skb_reset_network_header(skb);
		ipgre_ecn_decapsulate(iph, skb);

		netif_rx(skb);
		read_unlock(&ipgre_lock);
		return(0);
	}
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

drop:
	read_unlock(&ipgre_lock);
drop_nolock:
	kfree_skb(skb);
	return(0);
}
예제 #17
0
파일: ip_tunnel.c 프로젝트: gbtian/mpip
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
{
	struct pcpu_tstats *tstats;
	const struct iphdr *iph = ip_hdr(skb);
	int err;

#ifdef CONFIG_NET_IPGRE_BROADCAST
	if (ipv4_is_multicast(iph->daddr)) {
		/* Looped back packet, drop it! */
		if (rt_is_output_route(skb_rtable(skb)))
			goto drop;
		tunnel->dev->stats.multicast++;
		skb->pkt_type = PACKET_BROADCAST;
	}
#endif

	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
		tunnel->dev->stats.rx_crc_errors++;
		tunnel->dev->stats.rx_errors++;
		goto drop;
	}

	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
		if (!(tpi->flags&TUNNEL_SEQ) ||
		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
			tunnel->dev->stats.rx_fifo_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}
		tunnel->i_seqno = ntohl(tpi->seq) + 1;
	}

	err = IP_ECN_decapsulate(iph, skb);
	if (unlikely(err)) {
		if (log_ecn_error)
			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
					&iph->saddr, iph->tos);
		if (err > 1) {
			++tunnel->dev->stats.rx_frame_errors;
			++tunnel->dev->stats.rx_errors;
			goto drop;
		}
	}

	tstats = this_cpu_ptr(tunnel->dev->tstats);
	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);

	if (tunnel->net != dev_net(tunnel->dev))
		skb_scrub_packet(skb);

	if (tunnel->dev->type == ARPHRD_ETHER) {
		skb->protocol = eth_type_trans(skb, tunnel->dev);
		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
	} else {
		skb->dev = tunnel->dev;
	}
	gro_cells_receive(&tunnel->gro_cells, skb);
	return 0;

drop:
	kfree_skb(skb);
	return 0;
}
static inline int ip6_input_finish(struct sk_buff *skb)
{
	struct inet6_protocol *ipprot;
	struct sock *raw_sk;
	unsigned int nhoff;
	int nexthdr;
	u8 hash;

	/*
	 *	Parse extension headers
	 */

	rcu_read_lock();
resubmit:
	if (!pskb_pull(skb, skb->h.raw - skb->data))
		goto discard;
	nhoff = IP6CB(skb)->nhoff;
	nexthdr = skb->nh.raw[nhoff];

	raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]);
	if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
		raw_sk = NULL;

	hash = nexthdr & (MAX_INET_PROTOS - 1);
	if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
		int ret;
		
		if (ipprot->flags & INET6_PROTO_FINAL) {
			struct ipv6hdr *hdr;	

			/* Free reference early: we don't need it any more,
			   and it may hold ip_conntrack module loaded
			   indefinitely. */
			nf_reset(skb);

			skb_postpull_rcsum(skb, skb->nh.raw,
					   skb->h.raw - skb->nh.raw);
			hdr = skb->nh.ipv6h;
			if (ipv6_addr_is_multicast(&hdr->daddr) &&
			    !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
			    &hdr->saddr) &&
			    !ipv6_is_mld(skb, nexthdr))
				goto discard;
		}
		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 
			goto discard;
		
		ret = ipprot->handler(&skb);
		if (ret > 0)
			goto resubmit;
		else if (ret == 0)
			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
	} else {
		if (!raw_sk) {
			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
				IP6_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
				icmpv6_send(skb, ICMPV6_PARAMPROB,
				            ICMPV6_UNK_NEXTHDR, nhoff,
				            skb->dev);
			}
		} else
			IP6_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
		kfree_skb(skb);
	}
	rcu_read_unlock();
	return 0;

discard:
	IP6_INC_STATS_BH(IPSTATS_MIB_INDISCARDS);
	rcu_read_unlock();
	kfree_skb(skb);
	return 0;
}