static int bpf_dp_ctx_init(struct bpf_dp_context *ctx)
{
	struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(ctx->skb)->tun_key;

	if (skb_headroom(ctx->skb) < 64) {
		if (pskb_expand_head(ctx->skb, 64, 0, GFP_ATOMIC))
			return -ENOMEM;
	}
	ctx->context.length = ctx->skb->len;
	ctx->context.vlan_tag = vlan_tx_tag_present(ctx->skb) ?
			vlan_tx_tag_get(ctx->skb) : 0;
	ctx->context.hw_csum = (ctx->skb->ip_summed == CHECKSUM_PARTIAL);
	if (tun_key) {
		ctx->context.tun_key.tun_id =
				be32_to_cpu(be64_get_low32(tun_key->tun_id));
		ctx->context.tun_key.src_ip = be32_to_cpu(tun_key->ipv4_src);
		ctx->context.tun_key.dst_ip = be32_to_cpu(tun_key->ipv4_dst);
		ctx->context.tun_key.tos = tun_key->ipv4_tos;
		ctx->context.tun_key.ttl = tun_key->ipv4_ttl;
	} else {
		memset(&ctx->context.tun_key, 0,
		       sizeof(struct bpf_ipv4_tun_key));
	}

	return 0;
}
int bpf_push_vlan(struct bpf_context *pctx, u16 proto, u16 vlan)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct sk_buff *skb = ctx->skb;
	u16 current_tag;

	if (unlikely(!skb))
		return -EINVAL;

	if (vlan_tx_tag_present(skb)) {
		current_tag = vlan_tx_tag_get(skb);

		if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) {
			ctx->skb = NULL;
			return -ENOMEM;
		}

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum, csum_partial(skb->data
					+ (2 * ETH_ALEN), VLAN_HLEN, 0));
		ctx->context.length = skb->len;
	}
	__vlan_hwaccel_put_tag(skb, proto, vlan);
	ctx->context.vlan_tag = vlan;

	return 0;
}
Esempio n. 3
0
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
{
	if (unlikely(vlan_tx_tag_present(skb))) {
		u16 current_tag;

		/* push down current VLAN tag */
		current_tag = vlan_tx_tag_get(skb);

		if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
			return -ENOMEM;

		/* Update mac_len for subsequent MPLS actions */
		skb->mac_len += VLAN_HLEN;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum, csum_partial(skb->data
					+ (2 * ETH_ALEN), VLAN_HLEN, 0));

		invalidate_skb_flow_key(skb);
	} else {
		flow_key_set_vlan_tci(skb,  vlan->vlan_tci);
	}
	__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
	return 0;
}
Esempio n. 4
0
uint32_t
__adf_net_get_vlantag(struct sk_buff *skb)
{

    if(!vlan_tx_tag_present(skb))
        return A_STATUS_ENOTSUPP;

    return vlan_tx_tag_get(skb);
}
Esempio n. 5
0
int rpl_dev_queue_xmit(struct sk_buff *skb)
{
#undef dev_queue_xmit
	int err = -ENOMEM;

	if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
		int features;

		features = netif_skb_features(skb);

		if (!vlan_tso)
			features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
				      NETIF_F_UFO | NETIF_F_FSO);

		skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
		if (unlikely(!skb))
			return err;
		vlan_set_tci(skb, 0);

		if (netif_needs_gso(skb, features)) {
			struct sk_buff *nskb;

			nskb = skb_gso_segment(skb, features);
			if (!nskb) {
				if (unlikely(skb_cloned(skb) &&
				    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
					goto drop;

				skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
				goto xmit;
			}

			if (IS_ERR(nskb)) {
				err = PTR_ERR(nskb);
				goto drop;
			}
			consume_skb(skb);
			skb = nskb;

			do {
				nskb = skb->next;
				skb->next = NULL;
				err = dev_queue_xmit(skb);
				skb = nskb;
			} while (skb);

			return err;
		}
	}
xmit:
	return dev_queue_xmit(skb);

drop:
	kfree_skb(skb);
	return err;
}
Esempio n. 6
0
int vxlan_xmit_skb(struct vxlan_sock *vs,
		   struct rtable *rt, struct sk_buff *skb,
		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
		   __be16 src_port, __be16 dst_port, __be32 vni)
{
	struct vxlanhdr *vxh;
	struct udphdr *uh;
	int min_headroom;
	int err;

	min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
			+ VXLAN_HLEN + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	/* Need space for new headers (invalidates iph ptr) */
	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err)) {
		kfree_skb(skb);
		return err;
	}

	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!vlan_insert_tag_set_proto(skb,
							skb->vlan_proto,
							vlan_tx_tag_get(skb))))
			return -ENOMEM;

		vlan_set_tci(skb, 0);
	}

	skb_reset_inner_headers(skb);

	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
	vxh->vx_flags = htonl(VXLAN_FLAGS);
	vxh->vx_vni = vni;

	__skb_push(skb, sizeof(*uh));
	skb_reset_transport_header(skb);
	uh = udp_hdr(skb);

	uh->dest = dst_port;
	uh->source = src_port;

	uh->len = htons(skb->len);
	uh->check = 0;

	vxlan_set_owner(vs->sock->sk, skb);

	skb = handle_offloads(skb);
	if (IS_ERR(skb))
		return PTR_ERR(skb);

	return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
			     tos, ttl, df, false);
}
int bpf_pop_vlan(struct bpf_context *pctx)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct sk_buff *skb = ctx->skb;

	if (unlikely(!skb))
		return -EINVAL;

	ctx->context.vlan_tag = 0;
	if (vlan_tx_tag_present(skb)) {
		skb->vlan_tci = 0;
	} else {
		if (skb->protocol != htons(ETH_P_8021Q) ||
		    skb->len < VLAN_ETH_HLEN)
			return 0;

		if (!pskb_may_pull(skb, ETH_HLEN))
			return 0;

		__skb_pull(skb, ETH_HLEN);
		skb = vlan_untag(skb);
		if (!skb) {
			ctx->skb = NULL;
			return -ENOMEM;
		}
		__skb_push(skb, ETH_HLEN);

		skb->vlan_tci = 0;
		ctx->context.length = skb->len;
		ctx->skb = skb;
	}
	/* move next vlan tag to hw accel tag */
	if (skb->protocol != htons(ETH_P_8021Q) ||
	    skb->len < VLAN_ETH_HLEN)
		return 0;

	if (!pskb_may_pull(skb, ETH_HLEN))
		return 0;

	__skb_pull(skb, ETH_HLEN);
	skb = vlan_untag(skb);
	if (!skb) {
		ctx->skb = NULL;
		return -ENOMEM;
	}
	__skb_push(skb, ETH_HLEN);

	ctx->context.vlan_tag = vlan_tx_tag_get(skb);
	ctx->context.length = skb->len;
	ctx->skb = skb;

	return 0;
}
Esempio n. 8
0
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	u16 vlan_tag = 0;

	/*                                                                
                                                                        
  */
	if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
		vlan_tag = vlan_tx_tag_get(skb);
		return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
	}
Esempio n. 9
0
static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
{
	struct net_device *vlan, *br;

	br = bridge_parent(dev);
	if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
		return br;

	vlan = __vlan_find_dev_deep(br, vlan_tx_tag_get(skb) & VLAN_VID_MASK);

	return vlan ? vlan : br;
}
Esempio n. 10
0
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	u16 vlan_tag = 0;

	/* If we support per priority flow control and the packet contains
	 * a vlan tag, send the packet to the TX ring assigned to that priority
	 */
	if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
		vlan_tag = vlan_tx_tag_get(skb);
		return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
	}
Esempio n. 11
0
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
{
	if (unlikely(vlan_tx_tag_present(skb))) {
		u16 current_tag;

		/* push down current VLAN tag */
		current_tag = vlan_tx_tag_get(skb);

		if (!__vlan_put_tag(skb, current_tag))
			return -ENOMEM;

		if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
			skb->csum = csum_add(skb->csum, csum_partial(skb->data
					+ ETH_HLEN, VLAN_HLEN, 0));

	}
	__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
	return 0;
}
Esempio n. 12
0
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
{
	if (unlikely(vlan_tx_tag_present(skb))) {
		u16 current_tag;

		/* push down current VLAN tag */
		current_tag = vlan_tx_tag_get(skb);

		skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
						current_tag);
		if (!skb)
			return -ENOMEM;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum, csum_partial(skb->data
					+ (2 * ETH_ALEN), VLAN_HLEN, 0));

	}
	__vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
	return 0;
}
Esempio n. 13
0
/* Transmit a fully formated Geneve frame.
 *
 * When calling this function. The skb->data should point
 * to the geneve header which is fully formed.
 *
 * This function will add other UDP tunnel headers.
 */
int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
		    bool xnet)
{
	struct genevehdr *gnvh;
	int min_headroom;
	int err;

	skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);

	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
			+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);

	err = skb_cow_head(skb, min_headroom);
	if (unlikely(err))
		return err;

	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!__vlan_put_tag(skb,
					     skb->vlan_proto,
					     vlan_tx_tag_get(skb)))) {
			err = -ENOMEM;
			return err;
		}
		skb->vlan_tci = 0;
	}

	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);

	skb_set_inner_protocol(skb, htons(ETH_P_TEB));

	return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
				   tos, ttl, df, src_port, dst_port, xnet);
}
Esempio n. 14
0
static inline void enic_queue_wq_skb(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb)
{
	unsigned int mss = skb_shinfo(skb)->gso_size;
	unsigned int vlan_tag = 0;
	int vlan_tag_insert = 0;

	if (enic->vlan_group && vlan_tx_tag_present(skb)) {
		
		vlan_tag_insert = 1;
		vlan_tag = vlan_tx_tag_get(skb);
	}

	if (mss)
		enic_queue_wq_skb_tso(enic, wq, skb, mss,
			vlan_tag_insert, vlan_tag);
	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
		enic_queue_wq_skb_csum_l4(enic, wq, skb,
			vlan_tag_insert, vlan_tag);
	else
		enic_queue_wq_skb_vlan(enic, wq, skb,
			vlan_tag_insert, vlan_tag);
}
Esempio n. 15
0
static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
{
	/*
	 * Push an eventual existing hardware accel VLAN tag to the skb first
	 * to maintain correct order.
	 */
	if (unlikely(vlan_tx_tag_present(skb))) {
		u16 current_tag;

		/* push down current VLAN tag */
		current_tag = vlan_tx_tag_get(skb);

		if (!__vlan_put_tag(skb, current_tag))
			return -ENOMEM;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum, csum_partial(skb->data
					+ ETH_HLEN, VLAN_HLEN, 0));

	}

	__vlan_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
	return 0;
}
Esempio n. 16
0
static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
{
	struct net_device *netdev = netdev_vport_priv(vport)->dev;
	int len;

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
	if (vlan_tx_tag_present(skb)) {
		if (unlikely(!__vlan_put_tag(skb,
					     skb->vlan_proto,
					     vlan_tx_tag_get(skb))))
			return 0;

		if (skb->ip_summed == CHECKSUM_COMPLETE)
			skb->csum = csum_add(skb->csum,
					     csum_partial(skb->data + (2 * ETH_ALEN),
							  VLAN_HLEN, 0));

		vlan_set_tci(skb, 0);
	}
#endif

	len = skb->len;

	skb_dst_drop(skb);
	nf_reset(skb);
	secpath_reset(skb);

	skb->dev = netdev;
	skb->pkt_type = PACKET_HOST;
	skb->protocol = eth_type_trans(skb, netdev);
	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);

	netif_rx(skb);

	return len;
}
Esempio n. 17
0
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
	int mtu = netdev_vport->dev->mtu;
	int len;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		if (net_ratelimit())
			pr_warn("%s: dropped over-mtu packet: %d > %d\n",
				ovs_dp_name(vport->dp), packet_length(skb), mtu);
		goto error;
	}

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	skb->dev = netdev_vport->dev;
	forward_ip_summed(skb, true);

	if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
		int features;

		features = netif_skb_features(skb);

		if (!vlan_tso)
			features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
				      NETIF_F_UFO | NETIF_F_FSO);

		if (netif_needs_gso(skb, features)) {
			struct sk_buff *nskb;

			nskb = skb_gso_segment(skb, features);
			if (!nskb) {
				if (unlikely(skb_cloned(skb) &&
				    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
					kfree_skb(skb);
					return 0;
				}

				skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
				goto tag;
			}

			if (IS_ERR(nskb)) {
				kfree_skb(skb);
				return 0;
			}
			consume_skb(skb);
			skb = nskb;

			len = 0;
			do {
				nskb = skb->next;
				skb->next = NULL;

				skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
				if (likely(skb)) {
					len += skb->len;
					vlan_set_tci(skb, 0);
					dev_queue_xmit(skb);
				}

				skb = nskb;
			} while (skb);

			return len;
		}

tag:
		skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
		if (unlikely(!skb))
			return 0;
		vlan_set_tci(skb, 0);
	}

	len = skb->len;
	dev_queue_xmit(skb);

	return len;

error:
	kfree_skb(skb);
	ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
	return 0;
}
Esempio n. 18
0
static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
				  const struct dp_upcall_info *upcall_info)
{
	struct ovs_header *upcall;
	struct sk_buff *nskb = NULL;
	struct sk_buff *user_skb; /* to be queued to userspace */
	struct nlattr *nla;
	unsigned int len;
	int err;

	if (vlan_tx_tag_present(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return -ENOMEM;

		nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
		if (!nskb)
			return -ENOMEM;

		nskb->vlan_tci = 0;
		skb = nskb;
	}

	if (nla_attr_size(skb->len) > USHRT_MAX) {
		err = -EFBIG;
		goto out;
	}

	len = sizeof(struct ovs_header);
	len += nla_total_size(skb->len);
	len += nla_total_size(FLOW_BUFSIZE);
	if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
		len += nla_total_size(8);

	user_skb = genlmsg_new(len, GFP_ATOMIC);
	if (!user_skb) {
		err = -ENOMEM;
		goto out;
	}

	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
			     0, upcall_info->cmd);
	upcall->dp_ifindex = dp_ifindex;

	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
	ovs_flow_to_nlattrs(upcall_info->key, user_skb);
	nla_nest_end(user_skb, nla);

	if (upcall_info->userdata)
		nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
			    nla_get_u64(upcall_info->userdata));

	nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);

	skb_copy_and_csum_dev(skb, nla_data(nla));

	err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);

out:
	kfree_skb(nskb);
	return err;
}
Esempio n. 19
0
ASF_int32_t asfctrl_delete_dev_map(struct net_device *dev)
{
	ASF_int32_t  cii;
	ASF_uint32_t ulVSGId;
	ASFCTRL_FUNC_ENTRY;
#ifdef CONFIG_PPPOE
	if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_PPP)) {
#else
	if (dev->type == ARPHRD_ETHER) {
#endif
		cii = asfctrl_dev_get_cii(dev);
		if (cii < 0) {
			ASFCTRL_DBG("Failed to determine cii for device %s\n",
				dev->name);
			return T_FAILURE;
		}
		ASFCTRL_DBG("UNMAP interface %s\n",  dev->name);
		/* Get the VSG Id from asfctrl_netns Subha 11/3 */
		ulVSGId = asfctrl_netns_net_to_vsg(dev_net(dev));
		printk("UnRegister Device: ulVSGId = %d\n", ulVSGId);
		ASFUnBindDeviceToVSG(ulVSGId, cii);
		ASFUnMapInterface(cii);
		dev_put(dev);
		p_asfctrl_netdev_cii[cii] = NULL;
		return T_SUCCESS;
	}

	ASFCTRL_FUNC_EXIT;
	return T_FAILURE;
}
EXPORT_SYMBOL(asfctrl_delete_dev_map);

#if (ASFCTRL_DEBUG_LEVEL >= LOGS)
char *print_netevent(int event)
{
	switch (event) {
	case NETDEV_UP:
		return (char *)"NETDEV_UP";
	case NETDEV_DOWN:
		return (char *)"NETDEV_DOWN";
	case NETDEV_REBOOT:
		return (char *)"NETDEV_REBOOT";
	case NETDEV_CHANGE:
		return (char *)"NETDEV_CHANGE";
	case NETDEV_REGISTER:
		return (char *)"NETDEV_REGISTER";
	case NETDEV_UNREGISTER:
		return (char *)"NETDEV_UNREGISTER";
	case NETDEV_CHANGEMTU:
		return (char *)"NETDEV_CHANGEMTU";
	case NETDEV_CHANGEADDR:
		return (char *)"NETDEV_CHANGEADDR";
	case NETDEV_GOING_DOWN:
		return (char *)"NETDEV_GOING_DOWN";
	case NETDEV_CHANGENAME:
		return (char *)"NETDEV_CHANGENAME";
	case NETDEV_PRE_UP:
		return (char *)"NETDEV_PRE_UP";
	default:
		return (char *)"UNKNOWN";
	}
}
#endif

static int asfctrl_dev_notifier_fn(struct notifier_block *this,
				unsigned long event, void *ptr)
{
#if 0 /* Linux verions less than 3.8 */
	struct net_device *dev = (struct net_device *)(ptr);
#else
	/* Versions above 3.8 */
	struct net_device *dev = ((struct netdev_notifier_info *)(ptr))->dev;
#endif

	if (dev == NULL)
	{
		ASFCTRL_DBG("asfctrl_dev_notifier: NULL String for dev? \n");
		return NOTIFY_DONE;
	}

	ASFCTRL_FUNC_ENTRY;
	printk(KERN_INFO "Subha: asfctrl_dev_notifier called for dev = 0x%x\n", dev);
	ASFCTRL_DBG("%s - event %ld (%s)\n",
			dev->name, event, print_netevent(event));

	/* handle only ethernet, vlan, bridge and pppoe (ppp) interfaces */
	switch (event) {
	case NETDEV_REGISTER: /* A  new device is allocated*/
		printk(KERN_INFO "Subha: NETDEV_REGISTER\n");
		printk(KERN_INFO "dev->type = %d, ARPHDR_ETHER =%d device=%s\n", dev->type, ARPHRD_ETHER, dev->name);
		ASFCTRL_INFO("Register Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
		if (dev->type == ARPHRD_ETHER)
			asfctrl_create_dev_map(dev, 1);
		break;

	case NETDEV_UNREGISTER:/* A new device is deallocated*/
		ASFCTRL_INFO("Unregister Device type %d mac %pM\n", dev->type,
			dev->dev_addr);
#ifdef CONFIG_PPPOE
		if (dev->type == ARPHRD_ETHER  || dev->type == ARPHRD_PPP)
#else
		if (dev->type == ARPHRD_ETHER)
#endif
			asfctrl_delete_dev_map(dev);
		break;

#ifdef CONFIG_PPPOE
	case NETDEV_UP:
		if (dev->type == ARPHRD_PPP)
			asfctrl_create_dev_map(dev, 1);
		break;
#endif
	}
	ASFCTRL_FUNC_EXIT;
	return NOTIFY_DONE;
}

int asfctrl_dev_fp_tx_hook(struct sk_buff *skb, struct net_device *dev)
{
	ASF_uint16_t	usEthType;
	ASF_int32_t		hh_len;
	ASF_boolean_t	bPPPoE = 0;
	struct iphdr       *iph = 0;
	struct ipv6hdr       *ipv6h;
	unsigned int proto;
	unsigned int  tun_hdr = 0;

	ASFCTRL_FUNC_ENTRY;

	if (!asfctrl_skb_is_dummy(skb))
		return AS_FP_PROCEED;
	

	asfctrl_skb_unmark_dummy(skb);

	if (dev->type != ARPHRD_ETHER)
		goto drop;

	ASFCTRL_INFO("asfctrl_dev_fp_tx: 2\n");

	usEthType = skb->protocol;
	hh_len = ETH_HLEN;

	if (usEthType == __constant_htons(ETH_P_8021Q)) {
		struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data+hh_len);
		ASFCTRL_TRACE("8021Q packet");
		hh_len += VLAN_HLEN;
		usEthType = vhdr->h_vlan_encapsulated_proto;
	}

	if (usEthType == __constant_htons(ETH_P_PPP_SES)) {
		unsigned char *poe_hdr = skb->data+hh_len;
		unsigned short ppp_proto;

		ASFCTRL_TRACE("PPPoE packet");

		/*PPPoE header is of 6 bytes */
		ppp_proto = *(unsigned short *)(poe_hdr+6);
		/* PPPOE: VER=1,TYPE=1,CODE=0 and  PPP:_PROTO=0x0021 (IP) */
		if ((poe_hdr[0] != 0x11) || (poe_hdr[1] != 0) ||
			(ppp_proto != __constant_htons(0x0021))) {
				goto drop;
		}

		hh_len += (8); /* 6+2 -- pppoe+ppp headers */
		usEthType = __constant_htons(ETH_P_IP);
		bPPPoE = 1;
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 3\n");
	if (usEthType != __constant_htons(ETH_P_IP) &&
		usEthType != __constant_htons(ETH_P_IPV6))
		goto drop;

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 4\n");

	if (usEthType == __constant_htons(ETH_P_IP)) {
		iph = (struct iphdr *)(skb->data+hh_len);
		proto = iph->protocol;
		if (proto == IPPROTO_IPV6) {
			ipv6h = (struct ipv6hdr *)(skb->data+hh_len+sizeof(struct iphdr));
			proto = ipv6h->nexthdr;
			tun_hdr = sizeof(struct iphdr);
		}
	} else {
		ipv6h = (struct ipv6hdr *)(skb->data+hh_len);
		proto = ipv6h->nexthdr;
		if (proto == IPPROTO_IPIP) {
			iph = (struct iphdr *)(skb->data+hh_len+sizeof(struct ipv6hdr));
			proto = iph->protocol;
			tun_hdr = sizeof(struct ipv6hdr);
		}
	}

	ASFCTRL_INFO("subha: asfctrl_dev_fp_tx: 5\n");
	switch (proto) {
		asf_linux_L2blobPktData_t *pData;
		ASFFFPUpdateFlowParams_t  cmd;

	case ASFCTRL_IPPROTO_DUMMY_L2BLOB:

		/*
		* if the packet is coming on a PPP interface,
		* network header points to start of PPPOE header
		* instaed of IP header.
		*  So always dynamically identify start of IP header!
		*/

		memset(&cmd, 0, sizeof(cmd));
		cmd.u.l2blob.bUpdatePPPoELen = bPPPoE;


		ASFCTRL_INFO(
			"DUMMY_L2BLOB: %pM:%pM..%02x%02x (skb->proto 0x%04x) "
			"data 0x%p nw_hdr 0x%p tr_hdr 0x%p\n",
			skb->data, skb->data+6, skb->data[12], skb->data[13],
			skb->protocol, skb->data, skb_network_header(skb),
			skb_transport_header(skb));

		if (usEthType == __constant_htons(ETH_P_IP)) {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							(iph->ihl * 4) + (tun_hdr ? sizeof(struct ipv6hdr) : 0));
			cmd.u.l2blob.tunnel.bIP6IP4Out = tun_hdr ? 1 : 0;
		} else {
			pData = (asf_linux_L2blobPktData_t *)(skb->data+hh_len +
							sizeof(struct ipv6hdr) + (tun_hdr ? sizeof(struct iphdr) : 0));
			cmd.u.l2blob.tunnel.bIP4IP6Out = tun_hdr ? 1 : 0;
		}

		memcpy(&cmd.tuple, &pData->tuple, sizeof(cmd.tuple));
		cmd.ulZoneId = pData->ulZoneId;
		cmd.bL2blobUpdate = 1;
		cmd.u.l2blob.ulDeviceId = asfctrl_dev_get_cii(dev);
		cmd.u.l2blob.ulPathMTU = pData->ulPathMTU;

		cmd.u.l2blob.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;

		/* need to include PPPOE+PPP header if any */
		cmd.u.l2blob.l2blobLen = hh_len + tun_hdr;

		memcpy(cmd.u.l2blob.l2blob, skb->data, cmd.u.l2blob.l2blobLen);
#ifdef CONFIG_VLAN_8021Q
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
		if (vlan_tx_tag_present(skb)) {
#else
		if (skb_vlan_tag_present(skb)) {
#endif
			cmd.u.l2blob.bTxVlan = 1;

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
			cmd.u.l2blob.usTxVlanId = (vlan_tx_tag_get(skb)
							| VLAN_TAG_PRESENT);
#else
			cmd.u.l2blob.usTxVlanId = (skb_vlan_tag_get(skb)
							| VLAN_TAG_PRESENT);
#endif
		} else
#endif
			cmd.u.l2blob.bTxVlan = 0;

		ASFFFPRuntime(pData->ulVsgId, ASF_FFP_MODIFY_FLOWS, &cmd,
			sizeof(cmd), NULL, 0);
		break;

#ifdef ASFCTRL_IPSEC_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_IPSEC_L2BLOB:
		ASFCTRL_INFO("DUMMY_IPSEC_L2BLOB");

		skb->protocol = usEthType;
		if (fn_ipsec_l2blob_update)
			fn_ipsec_l2blob_update(skb,
				hh_len, asfctrl_dev_get_cii(dev));

		break;
#endif

#ifdef ASFCTRL_FWD_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_FWD_L2BLOB:
		ASFCTRL_INFO("DUMMY_FWD_L2BLOB");

		if (fn_fwd_l2blob_update)
			fn_fwd_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
#ifdef ASFCTRL_TERM_FP_SUPPORT
	case ASFCTRL_IPPROTO_DUMMY_TERM_L2BLOB:
		ASFCTRL_INFO("DUMMY_TERM_L2BLOB");

		if (fn_term_l2blob_update)
			fn_term_l2blob_update(skb, hh_len,
				asfctrl_dev_get_cii(dev));

		break;
#endif
	}
drop:
	ASFCTRLKernelSkbFree(skb);
	ASFCTRL_FUNC_EXIT;
	return AS_FP_STOLEN;
}

static struct notifier_block asfctrl_dev_notifier = {
	.notifier_call = asfctrl_dev_notifier_fn,
};

ASF_void_t  asfctrl_fnInterfaceNotFound(
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}

ASF_void_t  asfctrl_fnVSGMappingNotFound(
			ASF_uint32_t ulCommonInterfaceId,
			ASFBuffer_t Buffer,
			genericFreeFn_t pFreeFn,
			ASF_void_t *freeArg)
{
	struct sk_buff  *skb;
	int bVal = in_softirq();

	ASFCTRL_FUNC_ENTRY;
	skb = AsfBuf2Skb(Buffer);

	if (!bVal)
		local_bh_disable();
	/* Send it to for normal path handling */
	ASFCTRL_netif_receive_skb(skb);

	if (!bVal)
		local_bh_enable();
	ASFCTRL_FUNC_EXIT;
}


static int __init asfctrl_init(void)
{
	int ret;
	ASFFFPConfigIdentity_t cmd;
	ASFFFPCallbackFns_t asfctrl_Cbs = {
		asfctrl_fnInterfaceNotFound,
		asfctrl_fnVSGMappingNotFound,
		asfctrl_fnZoneMappingNotFound,
		asfctrl_fnNoFlowFound,
		asfctrl_fnRuntime,
		asfctrl_fnFlowRefreshL2Blob,
		asfctrl_fnFlowActivityRefresh,
		asfctrl_fnFlowTcpSpecialPkts,
		asfctrl_fnFlowValidate,
		asfctrl_fnAuditLog
	};

	ASFCTRL_FUNC_ENTRY;

	memset(p_asfctrl_netdev_cii, 0, sizeof(p_asfctrl_netdev_cii));

	ASFGetCapabilities(&g_cap);

	if (!g_cap.bBufferHomogenous) {
		ASFCTRL_ERR("ASF capabilities: Non homogenous buffer");
		return -1;
	}
	asfctrl_vsg_config_id = jiffies;
	memset(&cmd, 0, sizeof(cmd));
	cmd.ulConfigMagicNumber = asfctrl_vsg_config_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);

	memset(&cmd, 0, sizeof(cmd));
	cmd.bL2blobMagicNumber = 1;
	cmd.l2blobConfig.ulL2blobMagicNumber = asfctrl_vsg_l2blobconfig_id;
	ASFFFPUpdateConfigIdentity(ASF_DEF_VSG, cmd);


	ASFFFPRegisterCallbackFns(&asfctrl_Cbs);

	ret = asfctrl_netns_vsg_init();
	printk("asfctrl_netns_vsg_init returned %d\n", ret);

	printk(KERN_INFO "Subha: before asfctrl_dev_notifier\r\n");
	register_netdevice_notifier(&asfctrl_dev_notifier);
	printk(KERN_INFO "Subha: before devfp_register_hook\r\n");
	printk(KERN_INFO "Subha: devfp_register_hook called asf_ffp_devfp_rx=0x%x, asfctrl_dev_fp_tx_hook=%x\r\n",
		(int)asf_ffp_devfp_rx, (int)asfctrl_dev_fp_tx_hook);
	//devfp_register_hook(asf_ffp_devfp_rx, asfctrl_dev_fp_tx_hook);
	devfp_register_rx_hook_veth(asf_ffp_devfp_rx_veth);
	devfp_register_tx_hook_veth(asfctrl_dev_fp_tx_hook);
	route_hook_fn_register(&asfctrl_l3_route_flush);
#ifdef ASF_IPV6_FP_SUPPORT
	ipv6_route_hook_fn_register(&asfctrl_l3_ipv6_route_flush);
#endif

	asfctrl_sysfs_init();


	if (g_cap.mode & fwMode)
		asfctrl_linux_register_ffp();

	if (ASFGetStatus() == 0)
		ASFDeploy();

	ASFCTRL_INFO("ASF Control Module - Core Loaded.\n");
	ASFCTRL_FUNC_EXIT;
	return 0;
}
Esempio n. 20
0
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
	struct cp_private *cp = netdev_priv(dev);
	unsigned entry;
	u32 eor;
#if CP_VLAN_TAG_USED
	u32 vlan_tag = 0;
#endif

	spin_lock_irq(&cp->lock);

	/* This is a hard error, log it. */
	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&cp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

#if CP_VLAN_TAG_USED
	if (cp->vlgrp && vlan_tx_tag_present(skb))
		vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
#endif

	entry = cp->tx_head;
	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
	if (skb_shinfo(skb)->nr_frags == 0) {
		struct cp_desc *txd = &cp->tx_ring[entry];
		u32 len;
		dma_addr_t mapping;

		len = skb->len;
		mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			const struct iphdr *ip = skb->nh.iph;
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(eor | len | DescOwn |
						 FirstFrag | LastFrag);
		wmb();

		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = mapping;
		cp->tx_skb[entry].frag = 0;
		entry = NEXT_TX(entry);
	} else {
		struct cp_desc *txd;
		u32 first_len, first_eor;
		dma_addr_t first_mapping;
		int frag, first_entry = entry;
		const struct iphdr *ip = skb->nh.iph;

		/* We must give this initial chunk to the device last.
		 * Otherwise we could race with the device.
		 */
		first_eor = eor;
		first_len = skb_headlen(skb);
		first_mapping = pci_map_single(cp->pdev, skb->data,
					       first_len, PCI_DMA_TODEVICE);
		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = first_mapping;
		cp->tx_skb[entry].frag = 1;
		entry = NEXT_TX(entry);

		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
			u32 len;
			u32 ctrl;
			dma_addr_t mapping;

			len = this_frag->size;
			mapping = pci_map_single(cp->pdev,
						 ((void *) page_address(this_frag->page) +
						  this_frag->page_offset),
						 len, PCI_DMA_TODEVICE);
			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;

			if (skb->ip_summed == CHECKSUM_HW) {
				ctrl = eor | len | DescOwn | IPCS;
				if (ip->protocol == IPPROTO_TCP)
					ctrl |= TCPCS;
				else if (ip->protocol == IPPROTO_UDP)
					ctrl |= UDPCS;
				else
					BUG();
			} else
				ctrl = eor | len | DescOwn;

			if (frag == skb_shinfo(skb)->nr_frags - 1)
				ctrl |= LastFrag;

			txd = &cp->tx_ring[entry];
			CP_VLAN_TX_TAG(txd, vlan_tag);
			txd->addr = cpu_to_le64(mapping);
			wmb();

			txd->opts1 = cpu_to_le32(ctrl);
			wmb();

			cp->tx_skb[entry].skb = skb;
			cp->tx_skb[entry].mapping = mapping;
			cp->tx_skb[entry].frag = frag + 2;
			entry = NEXT_TX(entry);
		}

		txd = &cp->tx_ring[first_entry];
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(first_mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(first_eor | first_len |
						 FirstFrag | DescOwn);
		wmb();
	}
	cp->tx_head = entry;
	if (netif_msg_tx_queued(cp))
		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
		       dev->name, entry, skb->len);
	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
		netif_stop_queue(dev);

	spin_unlock_irq(&cp->lock);

	cpw8(TxPoll, NormalTxPoll);
	dev->trans_start = jiffies;

	return 0;
}
Esempio n. 21
0
/**
 *	sk_run_filter - run a filter on a socket
 *	@skb: buffer to run the filter on
 *	@fentry: filter to apply
 *
 * Decode and apply filter instructions to the skb->data.
 * Return length to keep, 0 for none. @skb is the data we are
 * filtering, @filter is the array of filter instructions.
 * Because all jumps are guaranteed to be before last instruction,
 * and last instruction guaranteed to be a RET, we dont need to check
 * flen. (We used to pass to this function the length of filter)
 */
unsigned int sk_run_filter(const struct sk_buff *skb,
			   const struct sock_filter *fentry)
{
	void *ptr;
	u32 A = 0;			/* Accumulator */
	u32 X = 0;			/* Index Register */
	u32 mem[BPF_MEMWORDS];		/* Scratch Memory Store */
	u32 tmp;
	int k;

	/*
	 * Process array of filter instructions.
	 */
	for (;; fentry++) {
#if defined(CONFIG_X86_32)
#define	K (fentry->k)
#else
		const u32 K = fentry->k;
#endif

		switch (fentry->code) {
		case BPF_S_ALU_ADD_X:
			A += X;
			continue;
		case BPF_S_ALU_ADD_K:
			A += K;
			continue;
		case BPF_S_ALU_SUB_X:
			A -= X;
			continue;
		case BPF_S_ALU_SUB_K:
			A -= K;
			continue;
		case BPF_S_ALU_MUL_X:
			A *= X;
			continue;
		case BPF_S_ALU_MUL_K:
			A *= K;
			continue;
		case BPF_S_ALU_DIV_X:
			if (X == 0)
				return 0;
			A /= X;
			continue;
		case BPF_S_ALU_DIV_K:
			A = reciprocal_divide(A, K);
			continue;
		case BPF_S_ALU_MOD_X:
			if (X == 0)
				return 0;
			A %= X;
			continue;
		case BPF_S_ALU_MOD_K:
			A %= K;
			continue;
		case BPF_S_ALU_AND_X:
			A &= X;
			continue;
		case BPF_S_ALU_AND_K:
			A &= K;
			continue;
		case BPF_S_ALU_OR_X:
			A |= X;
			continue;
		case BPF_S_ALU_OR_K:
			A |= K;
			continue;
		case BPF_S_ANC_ALU_XOR_X:
		case BPF_S_ALU_XOR_X:
			A ^= X;
			continue;
		case BPF_S_ALU_XOR_K:
			A ^= K;
			continue;
		case BPF_S_ALU_LSH_X:
			A <<= X;
			continue;
		case BPF_S_ALU_LSH_K:
			A <<= K;
			continue;
		case BPF_S_ALU_RSH_X:
			A >>= X;
			continue;
		case BPF_S_ALU_RSH_K:
			A >>= K;
			continue;
		case BPF_S_ALU_NEG:
			A = -A;
			continue;
		case BPF_S_JMP_JA:
			fentry += K;
			continue;
		case BPF_S_JMP_JGT_K:
			fentry += (A > K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGE_K:
			fentry += (A >= K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JEQ_K:
			fentry += (A == K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JSET_K:
			fentry += (A & K) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGT_X:
			fentry += (A > X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JGE_X:
			fentry += (A >= X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JEQ_X:
			fentry += (A == X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_JMP_JSET_X:
			fentry += (A & X) ? fentry->jt : fentry->jf;
			continue;
		case BPF_S_LD_W_ABS:
			k = K;
load_w:
			ptr = load_pointer(skb, k, 4, &tmp);
			if (ptr != NULL) {
				A = get_unaligned_be32(ptr);
				continue;
			}
			return 0;
		case BPF_S_LD_H_ABS:
			k = K;
load_h:
			ptr = load_pointer(skb, k, 2, &tmp);
			if (ptr != NULL) {
				A = get_unaligned_be16(ptr);
				continue;
			}
			return 0;
		case BPF_S_LD_B_ABS:
			k = K;
load_b:
			ptr = load_pointer(skb, k, 1, &tmp);
			if (ptr != NULL) {
				A = *(u8 *)ptr;
				continue;
			}
			return 0;
		case BPF_S_LD_W_LEN:
			A = skb->len;
			continue;
		case BPF_S_LDX_W_LEN:
			X = skb->len;
			continue;
		case BPF_S_LD_W_IND:
			k = X + K;
			goto load_w;
		case BPF_S_LD_H_IND:
			k = X + K;
			goto load_h;
		case BPF_S_LD_B_IND:
			k = X + K;
			goto load_b;
		case BPF_S_LDX_B_MSH:
			ptr = load_pointer(skb, K, 1, &tmp);
			if (ptr != NULL) {
				X = (*(u8 *)ptr & 0xf) << 2;
				continue;
			}
			return 0;
		case BPF_S_LD_IMM:
			A = K;
			continue;
		case BPF_S_LDX_IMM:
			X = K;
			continue;
		case BPF_S_LD_MEM:
			A = mem[K];
			continue;
		case BPF_S_LDX_MEM:
			X = mem[K];
			continue;
		case BPF_S_MISC_TAX:
			X = A;
			continue;
		case BPF_S_MISC_TXA:
			A = X;
			continue;
		case BPF_S_RET_K:
			return K;
		case BPF_S_RET_A:
			return A;
		case BPF_S_ST:
			mem[K] = A;
			continue;
		case BPF_S_STX:
			mem[K] = X;
			continue;
		case BPF_S_ANC_PROTOCOL:
			A = ntohs(skb->protocol);
			continue;
		case BPF_S_ANC_PKTTYPE:
			A = skb->pkt_type;
			continue;
		case BPF_S_ANC_IFINDEX:
			if (!skb->dev)
				return 0;
			A = skb->dev->ifindex;
			continue;
		case BPF_S_ANC_MARK:
			A = skb->mark;
			continue;
		case BPF_S_ANC_QUEUE:
			A = skb->queue_mapping;
			continue;
		case BPF_S_ANC_HATYPE:
			if (!skb->dev)
				return 0;
			A = skb->dev->type;
			continue;
		case BPF_S_ANC_RXHASH:
			A = skb->rxhash;
			continue;
		case BPF_S_ANC_CPU:
			A = raw_smp_processor_id();
			continue;
		case BPF_S_ANC_VLAN_TAG:
			A = vlan_tx_tag_get(skb);
			continue;
		case BPF_S_ANC_VLAN_TAG_PRESENT:
			A = !!vlan_tx_tag_present(skb);
			continue;
		case BPF_S_ANC_PAY_OFFSET:
			A = __skb_get_poff(skb);
			continue;
		case BPF_S_ANC_NLATTR: {
			struct nlattr *nla;

			if (skb_is_nonlinear(skb))
				return 0;
			if (A > skb->len - sizeof(struct nlattr))
				return 0;

			nla = nla_find((struct nlattr *)&skb->data[A],
				       skb->len - A, X);
			if (nla)
				A = (void *)nla - (void *)skb->data;
			else
				A = 0;
			continue;
		}
		case BPF_S_ANC_NLATTR_NEST: {
			struct nlattr *nla;

			if (skb_is_nonlinear(skb))
				return 0;
			if (A > skb->len - sizeof(struct nlattr))
				return 0;

			nla = (struct nlattr *)&skb->data[A];
			if (nla->nla_len > A - skb->len)
				return 0;

			nla = nla_find_nested(nla, X);
			if (nla)
				A = (void *)nla - (void *)skb->data;
			else
				A = 0;
			continue;
		}
#ifdef CONFIG_SECCOMP_FILTER
		case BPF_S_ANC_SECCOMP_LD_W:
			A = seccomp_bpf_load(fentry->k);
			continue;
#endif
		default:
			WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
				       fentry->code, fentry->jt,
				       fentry->jf, fentry->k);
			return 0;
		}
	}

	return 0;
}
Esempio n. 22
0
int rpl_dev_queue_xmit(struct sk_buff *skb)
{
#undef dev_queue_xmit
	int err = -ENOMEM;
	bool vlan, mpls;

	vlan = mpls = false;

	/* Avoid traversing any VLAN tags that are present to determine if
	 * the ethtype is MPLS. Instead compare the mac_len (end of L2) and
	 * skb_network_offset() (beginning of L3) whose inequality will
	 * indicate the presence of an MPLS label stack. */
	if (skb->mac_len != skb_network_offset(skb) && !supports_mpls_gso())
		mpls = true;

	if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev))
		vlan = true;

	if (vlan || mpls) {
		int features;

		features = netif_skb_features(skb);

		if (vlan) {
			if (!vlan_tso)
				features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
					      NETIF_F_UFO | NETIF_F_FSO);

			skb = __vlan_put_tag(skb, skb->vlan_proto,
					     vlan_tx_tag_get(skb));
			if (unlikely(!skb))
				return err;
			vlan_set_tci(skb, 0);
		}

		/* As of v3.11 the kernel provides an mpls_features field in
		 * struct net_device which allows devices to advertise which
		 * features its supports for MPLS. This value defaults to
		 * NETIF_F_SG and as of v3.16.
		 *
		 * This compatibility code is intended for kernels older
		 * than v3.16 that do not support MPLS GSO and do not
		 * use mpls_features. Thus this code uses NETIF_F_SG
		 * directly in place of mpls_features.
		 */
		if (mpls)
			features &= NETIF_F_SG;

		if (netif_needs_gso(skb, features)) {
			struct sk_buff *nskb;

			nskb = skb_gso_segment(skb, features);
			if (!nskb) {
				if (unlikely(skb_cloned(skb) &&
				    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
					goto drop;

				skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
				goto xmit;
			}

			if (IS_ERR(nskb)) {
				err = PTR_ERR(nskb);
				goto drop;
			}
			consume_skb(skb);
			skb = nskb;

			do {
				nskb = skb->next;
				skb->next = NULL;
				err = dev_queue_xmit(skb);
				skb = nskb;
			} while (skb);

			return err;
		}
	}
xmit:
	return dev_queue_xmit(skb);

drop:
	kfree_skb(skb);
	return err;
}