Esempio n. 1
0
static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
{
	struct ethhdr *hdr;
	int err;

	err = make_writable(skb, skb->mac_len + MPLS_HLEN);
	if (unlikely(err))
		return err;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_sub(skb->csum,
				     csum_partial(mac_header_end(skb),
						  MPLS_HLEN, 0));

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);

	/* mac_header_end() is used to locate the ethertype
	 * field correctly in the presence of VLAN tags.
	 */
	hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
	hdr->h_proto = ethertype;
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;
	return 0;
}
Esempio n. 2
0
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
{
	struct ethhdr *hdr;
	int err;

	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);

	/* skb_mpls_header() is used to locate the ethertype
	 * field correctly in the presence of VLAN tags.
	 */
	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
	hdr->h_proto = ethertype;
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;

	invalidate_flow_key(key);
	return 0;
}
Esempio n. 3
0
int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb)
{
	int err;
	struct ip_comp_hdr *ipch;
	struct ipcomp_data *ipcd = x->data;

	if (skb->len < ipcd->threshold) {
		
		goto out_ok;
	}

	if (skb_linearize_cow(skb))
		goto out_ok;

	err = ipcomp_compress(x, skb);

	if (err) {
		goto out_ok;
	}

	
	ipch = ip_comp_hdr(skb);
	ipch->nexthdr = *skb_mac_header(skb);
	ipch->flags = 0;
	ipch->cpi = htons((u16 )ntohl(x->id.spi));
	*skb_mac_header(skb) = IPPROTO_COMP;
out_ok:
	skb_push(skb, -skb_network_offset(skb));
	return 0;
}
Esempio n. 4
0
static bool match(const struct sk_buff *skb, struct xt_action_param *par)
{
	unsigned char eui64[8];
	int i = 0;

	if (!(skb_mac_header(skb) >= skb->head &&
	      (skb_mac_header(skb) + ETH_HLEN) <= skb->data) &&
	    par->fragoff != 0) {
		par->hotdrop = true;
		return false;
	}

	memset(eui64, 0, sizeof(eui64));

	if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
		if (ipv6_hdr(skb)->version == 0x6) {
			memcpy(eui64, eth_hdr(skb)->h_source, 3);
			memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
			eui64[3] = 0xff;
			eui64[4] = 0xfe;
			eui64[0] ^= 0x02;

			i = 0;
			while ((ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i])
			       && (i < 8))
				i++;

			if (i == 8)
				return true;
		}
	}

	return false;
}
Esempio n. 5
0
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
	const unsigned char *old_mac;
	int err = -EINVAL;

	if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
		goto out;

	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
		goto out;

	if (skb_cloned(skb) &&
	    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
		goto out;

	if (x->props.flags & XFRM_STATE_DECAP_DSCP)
		ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
	if (!(x->props.flags & XFRM_STATE_NOECN))
		ipip_ecn_decapsulate(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);
	skb_reset_network_header(skb);
	err = 0;

out:
	return err;
}
Esempio n. 6
0
static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipv6hdr *ip6h;
	const unsigned char *old_mac;
	int size = sizeof(struct ipv6hdr);
	int err;

	err = skb_cow_head(skb, size + skb->mac_len);
	if (err)
		goto out;

	__skb_push(skb, size);
	skb_reset_network_header(skb);

	old_mac = skb_mac_header(skb);
	skb_set_mac_header(skb, -skb->mac_len);
	memmove(skb_mac_header(skb), old_mac, skb->mac_len);

	xfrm6_beet_make_header(skb);

	ip6h = ipv6_hdr(skb);
	ip6h->payload_len = htons(skb->len - size);
	ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
	ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
	err = 0;
out:
	return err;
}
static int
bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
		  const struct xt_action_param *par,
		  enum ipset_adt adt, const struct ip_set_adt_opt *opt)
{
	struct bitmap_ipmac *map = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct ipmac data;

	/* MAC can be src only */
	if (!(opt->flags & IPSET_DIM_TWO_SRC))
		return 0;

	data.id = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
	if (data.id < map->first_ip || data.id > map->last_ip)
		return -IPSET_ERR_BITMAP_RANGE;

	/* Backward compatibility: we don't check the second flag */
	if (skb_mac_header(skb) < skb->head ||
	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
		return -EINVAL;

	data.id -= map->first_ip;
	data.ether = eth_hdr(skb)->h_source;

	return adtfn(set, &data, opt_timeout(opt, map), opt->cmdflags);
}
Esempio n. 8
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;

	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Esempio n. 9
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	struct mpls_shim_hdr *new_mpls_lse;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	if (!skb->inner_protocol) {
		skb_set_inner_network_header(skb, skb->mac_len);
		skb_set_inner_protocol(skb, skb->protocol);
	}

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb->mac_len);

	new_mpls_lse = mpls_hdr(skb);
	new_mpls_lse->label_stack_entry = mpls->mpls_lse;

	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);

	if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET)
		update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Esempio n. 10
0
static bool
eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
{
	unsigned char eui64[8];

	if (!(skb_mac_header(skb) >= skb->head &&
	      skb_mac_header(skb) + ETH_HLEN <= skb->data) &&
	    par->fragoff != 0) {
		*par->hotdrop = true;
		return false;
	}

	memset(eui64, 0, sizeof(eui64));

	if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) {
		if (ipv6_hdr(skb)->version == 0x6) {
			memcpy(eui64, eth_hdr(skb)->h_source, 3);
			memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3);
			eui64[3] = 0xff;
			eui64[4] = 0xfe;
			eui64[0] ^= 0x02;

			if (!memcmp(ipv6_hdr(skb)->saddr.s6_addr + 8, eui64,
				    sizeof(eui64)))
				return true;
		}
	}

	return false;
}
Esempio n. 11
0
static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
{
	int err;

	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
	if (unlikely(err))
		return err;

	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);

	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);

	__skb_pull(skb, MPLS_HLEN);
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb->mac_len);

	if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) {
		struct ethhdr *hdr;

		/* mpls_hdr() is used to locate the ethertype field correctly in the
		 * presence of VLAN tags.
		 */
		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
		update_ethertype(skb, hdr, ethertype);
	}
	if (eth_p_mpls(skb->protocol))
		skb->protocol = ethertype;

	invalidate_flow_key(key);
	return 0;
}
Esempio n. 12
0
static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;

	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
	if (skb->encapsulation)
		return -ENOTSUPP;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
	*new_mpls_lse = mpls->mpls_lse;

	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);

	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
	if (!skb->inner_protocol)
		skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;

	invalidate_flow_key(key);
	return 0;
}
Esempio n. 13
0
static int push_mpls(struct sk_buff *skb,
		     const struct ovs_action_push_mpls *mpls)
{
	__be32 *new_mpls_lse;
	struct ethhdr *hdr;

	if (skb_cow_head(skb, MPLS_HLEN) < 0)
		return -ENOMEM;

	skb_push(skb, MPLS_HLEN);
	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
		skb->mac_len);
	skb_reset_mac_header(skb);

	new_mpls_lse = (__be32 *)mac_header_end(skb);
	*new_mpls_lse = mpls->mpls_lse;

	if (skb->ip_summed == CHECKSUM_COMPLETE)
		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
							     MPLS_HLEN, 0));

	hdr = eth_hdr(skb);
	hdr->h_proto = mpls->mpls_ethertype;
	if (!ovs_skb_get_inner_protocol(skb))
		ovs_skb_set_inner_protocol(skb, skb->protocol);
	skb->protocol = mpls->mpls_ethertype;
	return 0;
}
Esempio n. 14
0
static void dump_ipv6_mac_header(struct nf_log_buf *m,
				 const struct nf_loginfo *info,
				 const struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	unsigned int logflags = 0;

	if (info->type == NF_LOG_TYPE_LOG)
		logflags = info->u.log.logflags;

	if (!(logflags & NF_LOG_MACDECODE))
		goto fallback;

	switch (dev->type) {
	case ARPHRD_ETHER:
		nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
		       eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
		       ntohs(eth_hdr(skb)->h_proto));
		return;
	default:
		break;
	}

fallback:
	nf_log_buf_add(m, "MAC=");
	if (dev->hard_header_len &&
	    skb->mac_header != skb->network_header) {
		const unsigned char *p = skb_mac_header(skb);
		unsigned int len = dev->hard_header_len;
		unsigned int i;

		if (dev->type == ARPHRD_SIT) {
			p -= ETH_HLEN;

			if (p < skb->head)
				p = NULL;
		}

		if (p != NULL) {
			nf_log_buf_add(m, "%02x", *p++);
			for (i = 1; i < len; i++)
				nf_log_buf_add(m, ":%02x", *p++);
		}
		nf_log_buf_add(m, " ");

		if (dev->type == ARPHRD_SIT) {
			const struct iphdr *iph =
				(struct iphdr *)skb_mac_header(skb);
			nf_log_buf_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
				       &iph->daddr);
		}
	} else {
		nf_log_buf_add(m, " ");
	}
}
Esempio n. 15
0
static void
ip6t_log_packet(unsigned int pf,
        unsigned int hooknum,
        const struct sk_buff *skb,
        const struct net_device *in,
        const struct net_device *out,
        const struct nf_loginfo *loginfo,
        const char *prefix)
{
    if (!loginfo)
        loginfo = &default_loginfo;

    spin_lock_bh(&log_lock);
    printk("<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
        prefix,
        in ? in->name : "",
        out ? out->name : "");
    if (in && !out) {
        unsigned int len;
        /* MAC logging for input chain only. */
        printk("MAC=");
        if (skb->dev && (len = skb->dev->hard_header_len) &&
            skb->mac_header != skb->network_header) {
            const unsigned char *p = skb_mac_header(skb);
            int i;

            if (skb->dev->type == ARPHRD_SIT &&
                (p -= ETH_HLEN) < skb->head)
                p = NULL;

            if (p != NULL) {
                for (i = 0; i < len; i++)
                    printk("%02x%s", p[i],
                           i == len - 1 ? "" : ":");
            }
            printk(" ");

            if (skb->dev->type == ARPHRD_SIT) {
                const struct iphdr *iph =
                    (struct iphdr *)skb_mac_header(skb);
                printk("TUNNEL=%u.%u.%u.%u->%u.%u.%u.%u ",
                       NIPQUAD(iph->saddr),
                       NIPQUAD(iph->daddr));
            }
        } else
            printk(" ");
    }

    dump_packet(loginfo, skb, skb_network_offset(skb), 1);
    printk("\n");
    spin_unlock_bh(&log_lock);
}
Esempio n. 16
0
static bool mac_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_mac_info *info = par->matchinfo;
	bool ret;

	if (skb->dev == NULL || skb->dev->type != ARPHRD_ETHER)
		return false;
	if (skb_mac_header(skb) < skb->head)
		return false;
	if (skb_mac_header(skb) + ETH_HLEN > skb->data)
		return false;
	ret  = compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr) == 0;
	ret ^= info->invert;
	return ret;
}
Esempio n. 17
0
int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
{
	int err;
	struct xfrm_state *x;
	struct xfrm_offload *xo = xfrm_offload(skb);

	if (skb_is_gso(skb))
		return 0;

	if (xo) {
		x = skb->sp->xvec[skb->sp->len - 1];
		if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
			return 0;

		x->outer_mode->xmit(x, skb);

		err = x->type_offload->xmit(x, skb, features);
		if (err) {
			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			return err;
		}

		skb_push(skb, skb->data - skb_mac_header(skb));
	}

	return 0;
}
Esempio n. 18
0
static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
				  const struct nft_pktinfo *pkt)
{
	const struct sk_buff *skb = pkt->skb;
	unsigned int len = min_t(unsigned int,
				 pkt->xt.thoff - skb_network_offset(skb),
				 NFT_TRACETYPE_NETWORK_HSIZE);
	int off = skb_network_offset(skb);

	if (trace_fill_header(nlskb, NFTA_TRACE_NETWORK_HEADER, skb, off, len))
		return -1;

	len = min_t(unsigned int, skb->len - pkt->xt.thoff,
		    NFT_TRACETYPE_TRANSPORT_HSIZE);

	if (trace_fill_header(nlskb, NFTA_TRACE_TRANSPORT_HEADER, skb,
			      pkt->xt.thoff, len))
		return -1;

	if (!skb_mac_header_was_set(skb))
		return 0;

	if (skb_vlan_tag_get(skb))
		return nf_trace_fill_ll_header(nlskb, skb);

	off = skb_mac_header(skb) - skb->data;
	len = min_t(unsigned int, -off, NFT_TRACETYPE_LL_HSIZE);
	return trace_fill_header(nlskb, NFTA_TRACE_LL_HEADER,
				 skb, off, len);
}
Esempio n. 19
0
/**
 * brcmf_skb_is_iapp - checks if skb is an IAPP packet
 *
 * @skb: skb to check
 */
static bool brcmf_skb_is_iapp(struct sk_buff *skb)
{
	static const u8 iapp_l2_update_packet[6] __aligned(2) = {
		0x00, 0x01, 0xaf, 0x81, 0x01, 0x00,
	};
	unsigned char *eth_data;
#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
	const u16 *a, *b;
#endif

	if (skb->len - skb->mac_len != 6 ||
	    !is_multicast_ether_addr(eth_hdr(skb)->h_dest))
		return false;

	eth_data = skb_mac_header(skb) + ETH_HLEN;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
	return !(((*(const u32 *)eth_data) ^ (*(const u32 *)iapp_l2_update_packet)) |
		 ((*(const u16 *)(eth_data + 4)) ^ (*(const u16 *)(iapp_l2_update_packet + 4))));
#else
	a = (const u16 *)eth_data;
	b = (const u16 *)iapp_l2_update_packet;

	return !((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2]));
#endif
}
Esempio n. 20
0
static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
	struct sk_buff *segs = ERR_PTR(-EINVAL);
	netdev_features_t mpls_features;
	__be16 mpls_protocol;

	/* Setup inner SKB. */
	mpls_protocol = skb->protocol;
	skb->protocol = skb->inner_protocol;

	/* Push back the mac header that skb_mac_gso_segment() has pulled.
	 * It will be re-pulled by the call to skb_mac_gso_segment() below
	 */
	__skb_push(skb, skb->mac_len);

	/* Segment inner packet. */
	mpls_features = skb->dev->mpls_features & features;
	segs = skb_mac_gso_segment(skb, mpls_features);


	/* Restore outer protocol. */
	skb->protocol = mpls_protocol;

	/* Re-pull the mac header that the call to skb_mac_gso_segment()
	 * above pulled.  It will be re-pushed after returning
	 * skb_mac_gso_segment(), an indirect caller of this function.
	 */
	__skb_pull(skb, skb->data - skb_mac_header(skb));

	return segs;
}
static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
			   unsigned int vlan_tci, struct sk_buff *skb)
{
	struct sk_buff *p;

	if (skb_bond_should_drop(skb))
		goto drop;

	skb->vlan_tci = vlan_tci;
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;

	for (p = napi->gro_list; p; p = p->next) {
		NAPI_GRO_CB(p)->same_flow =
			p->dev == skb->dev && !compare_ether_header(
				skb_mac_header(p), skb_gro_mac_header(skb));
		NAPI_GRO_CB(p)->flush = 0;
	}

	return dev_gro_receive(napi, skb);

drop:
	return GRO_DROP;
}
Esempio n. 22
0
/* Get the hsr_node from which 'skb' was sent.
 */
struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
			      bool is_sup)
{
	struct hsr_node *node;
	struct ethhdr *ethhdr;
	u16 seq_out;

	if (!skb_mac_header_was_set(skb))
		return NULL;

	ethhdr = (struct ethhdr *) skb_mac_header(skb);

	list_for_each_entry_rcu(node, node_db, mac_list) {
		if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
			return node;
		if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
			return node;
	}

	if (!is_sup)
		return NULL; /* Only supervision frame may create node entry */

	if (ethhdr->h_proto == htons(ETH_P_PRP)) {
		/* Use the existing sequence_nr from the tag as starting point
		 * for filtering duplicate frames.
		 */
		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
	} else {
		WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
		seq_out = 0;
	}

	return hsr_add_node(node_db, ethhdr->h_source, seq_out);
}
Esempio n. 23
0
bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
{
	struct sk_buff *skb = *skbp;
	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
	struct net_device *vlan_dev;
	struct vlan_pcpu_stats *rx_stats;

	vlan_dev = vlan_find_dev(skb->dev, vlan_id);
	if (!vlan_dev) {
		/* Only the last call to vlan_do_receive() should change
		 * pkt_type to PACKET_OTHERHOST
		 */
		if (vlan_id && last_handler)
			skb->pkt_type = PACKET_OTHERHOST;
		return false;
	}

	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return false;

	skb->dev = vlan_dev;
	if (skb->pkt_type == PACKET_OTHERHOST) {
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the
		 * underlying device, and still route correctly. */
		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
			skb->pkt_type = PACKET_HOST;
	}

	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
		unsigned int offset = skb->data - skb_mac_header(skb);

		/*
		 * vlan_insert_tag expect skb->data pointing to mac header.
		 * So change skb->data before calling it and change back to
		 * original position later
		 */
		skb_push(skb, offset);
		skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
		if (!skb)
			return false;
		skb_pull(skb, offset + VLAN_HLEN);
		skb_reset_mac_len(skb);
	}

	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
	skb->vlan_tci = 0;

	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);

	u64_stats_update_begin(&rx_stats->syncp);
	rx_stats->rx_packets++;
	rx_stats->rx_bytes += skb->len;
	if (skb->pkt_type == PACKET_MULTICAST)
		rx_stats->rx_multicast++;
	u64_stats_update_end(&rx_stats->syncp);

	return true;
}
Esempio n. 24
0
static void ccmni_dbg_skb_addr(int md_id, bool tx, struct sk_buff *skb, int idx)
{
	CCMNI_INF_MSG(md_id, "[SKB][%s] idx=%d addr=%p len=%d data_len=%d, L2_addr=%p L3_addr=%p L4_addr=%p\n",
		tx?"TX":"RX", idx,
		(void *)skb->data, skb->len, skb->data_len, (void *)skb_mac_header(skb),
		(void *)skb_network_header(skb), (void *)skb_transport_header(skb));
}
static gro_result_t
vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
		unsigned int vlan_tci, struct sk_buff *skb)
{
	struct sk_buff *p;

	if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
		goto drop;

	skb->skb_iif = skb->dev->ifindex;
	__vlan_hwaccel_put_tag(skb, vlan_tci);
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;

	for (p = napi->gro_list; p; p = p->next) {
		NAPI_GRO_CB(p)->same_flow =
			p->dev == skb->dev && !compare_ether_header(
				skb_mac_header(p), skb_gro_mac_header(skb));
		NAPI_GRO_CB(p)->flush = 0;
	}

	return dev_gro_receive(napi, skb);

drop:
	return GRO_DROP;
}
Esempio n. 26
0
/* Get the start of the PTP header in this skb */
static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
{
	u8 *data = skb_mac_header(skb);
	unsigned int offset = 0;

	if (type & PTP_CLASS_VLAN)
		offset += VLAN_HLEN;

	switch (type & PTP_CLASS_PMASK) {
	case PTP_CLASS_IPV4:
		offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
		break;
	case PTP_CLASS_IPV6:
		offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
		break;
	case PTP_CLASS_L2:
		offset += ETH_HLEN;
		break;
	default:
		return NULL;
	}

	/* Ensure that the entire header is present in this packet. */
	if (skb->len + ETH_HLEN < offset + 34)
		return NULL;

	return data + offset;
}
Esempio n. 27
0
/* hfi1_vnic_update_tx_counters - update transmit counters */
static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo,
					 u8 q_idx, struct sk_buff *skb, int err)
{
	struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
	struct opa_vnic_stats *stats = &vinfo->stats[q_idx];
	struct opa_vnic_grp_stats *tx_grp = &stats->tx_grp;
	u16 vlan_tci;

	stats->netstats.tx_packets++;
	stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN;

	update_len_counters(tx_grp, skb->len);

	/* rest of the counts are for good packets only */
	if (unlikely(err))
		return;

	if (is_multicast_ether_addr(mac_hdr->h_dest))
		tx_grp->mcastbcast++;
	else
		tx_grp->unicast++;

	if (!__vlan_get_tag(skb, &vlan_tci))
		tx_grp->vlan++;
	else
		tx_grp->untagged++;
}
Esempio n. 28
0
static struct sk_buff* cfg(struct aoedev *d, struct sk_buff *skb)
{
	struct aoe_hdr *aoe;
	struct aoe_cfghdr *cfg;
	int len, cslen, ccmd;

	aoe = (struct aoe_hdr *) skb_mac_header(skb);
	cfg = (struct aoe_cfghdr *) aoe->data;
	cslen = ntohs(cfg->cslen);
	ccmd = cfg->aoeccmd & 0xf;
	len = sizeof *aoe;

	cfg->bufcnt = htons(nelem(d->reqs));
	cfg->scnt = MAXSECTORS(d->netdev->mtu);
	cfg->fwver = __constant_htons(0x0002);
	cfg->aoeccmd = AOE_HVER;

	if (cslen > nelem(d->config))
		goto drop;

	switch (ccmd) {
	case AOECCMD_TEST:
		if (d->nconfig != cslen)
			goto drop;
		// fall thru
	case AOECCMD_PTEST:
		if (cslen > d->nconfig)
			goto drop;
		if (memcmp(cfg->data, d->config, cslen) != 0)
			goto drop;
		// fall thru
	case AOECCMD_READ:
		cfg->cslen = cpu_to_be16(d->nconfig);
		memcpy(cfg->data, d->config, d->nconfig);
		len += sizeof *cfg + d->nconfig;
		break;
	case AOECCMD_SET:
		if (d->nconfig)
		if (d->nconfig != cslen || memcmp(cfg->data, d->config, cslen) != 0) {
			aoe->verfl |= AOEFL_ERR;
			aoe->err = AOEERR_CFG;
			break;
		}
		// fall thru
	case AOECCMD_FSET:
		d->nconfig = cslen;
		memcpy(d->config, cfg->data, cslen);
		len += sizeof *cfg + cslen;
		break;
	default:
		aoe->verfl |= AOEFL_ERR;
		aoe->err = AOEERR_ARG;
	}
	skb_trim(skb, len);
	return skb;
drop:
	dev_kfree_skb(skb);
	return NULL;
}
Esempio n. 29
0
inline struct ethhdr *skb2ethhdr(const struct sk_buff *skb)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
	return (struct ethhdr *)skb->mac.raw;
#else
	return (struct ethhdr *)skb_mac_header(skb);
#endif
}
Esempio n. 30
0
static void
ipt_log_packet(unsigned int pf,
               unsigned int hooknum,
               const struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
               const struct nf_loginfo *loginfo,
               const char *prefix)
{
    static char buf[4096];
    size_t buf_siz = sizeof(buf);
    size_t buf_len = 0;

    *buf = '\0';

    if (!loginfo)
        loginfo = &default_loginfo;

    spin_lock_bh(&log_lock);
    BUF_ADD(buf,buf_siz,buf_len, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
            prefix[0] ? prefix : "netfilter: ",
            in ? in->name : "",
            out ? out->name : "");
#ifdef CONFIG_BRIDGE_NETFILTER
    if (skb->nf_bridge) {
        const struct net_device *physindev;
        const struct net_device *physoutdev;

        physindev = skb->nf_bridge->physindev;
        if (physindev && in != physindev)
            BUF_ADD(buf,buf_siz,buf_len, "PHYSIN=%s ", physindev->name);

        physoutdev = skb->nf_bridge->physoutdev;
        if (physoutdev && out != physoutdev)
            BUF_ADD(buf,buf_siz,buf_len, "PHYSOUT=%s ", physoutdev->name);
    }
#endif

    if (in && !out) {
        /* MAC logging for input chain only. */
        BUF_ADD(buf,buf_siz,buf_len, "MAC=");
        if (skb->dev && skb->dev->hard_header_len
                && skb->mac_header != skb->network_header) {
            int i;
            const unsigned char *p = skb_mac_header(skb);
            for (i = 0; i < skb->dev->hard_header_len; i++,p++)
                BUF_ADD(buf,buf_siz,buf_len, "%02x%c", *p,
                        i==skb->dev->hard_header_len - 1
                        ? ' ':':');
        } else
            BUF_ADD(buf,buf_siz,buf_len, " ");
    }

    dump_packet(loginfo, skb, 0, buf, buf_siz);
    BUF_ADD(buf,buf_siz,buf_len, "\n");
    send_data(buf, strlen(buf));
    spin_unlock_bh(&log_lock);
}