Exemple #1
0
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
			     const struct flow_dissector_key *key,
			     unsigned int key_count)
{
	unsigned int i;

	memset(flow_dissector, 0, sizeof(*flow_dissector));

	for (i = 0; i < key_count; i++, key++) {
		/* User should make sure that every key target offset is withing
		 * boundaries of unsigned short.
		 */
		BUG_ON(key->offset > USHRT_MAX);
		BUG_ON(dissector_uses_key(flow_dissector,
					  key->key_id));

		dissector_set_key(flow_dissector, key->key_id);
		flow_dissector->offset[key->key_id] = key->offset;
	}

	/* Ensure that the dissector always includes control and basic key.
	 * That way we are able to avoid handling lack of these in fast path.
	 */
	BUG_ON(!dissector_uses_key(flow_dissector,
				   FLOW_DISSECTOR_KEY_CONTROL));
	BUG_ON(!dissector_uses_key(flow_dissector,
				   FLOW_DISSECTOR_KEY_BASIC));
}
Exemple #2
0
static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
{
	return dissector_uses_key(f->dissector,
				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
		dissector_uses_key(f->dissector,
				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
		dissector_uses_key(f->dissector,
				   FLOW_DISSECTOR_KEY_PORTS) ||
		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
}
Exemple #3
0
static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
				    struct mlxsw_sp_acl_rule_info *rulei,
				    struct tc_cls_flower_offload *f,
				    u16 n_proto)
{
	struct flow_dissector_key_ip *key, *mask;

	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
		return 0;

	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
		return -EINVAL;
	}

	key = skb_flow_dissector_target(f->dissector,
					FLOW_DISSECTOR_KEY_IP,
					f->key);
	mask = skb_flow_dissector_target(f->dissector,
					 FLOW_DISSECTOR_KEY_IP,
					 f->mask);
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
				       key->ttl, mask->ttl);

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
				       key->tos & 0x3, mask->tos & 0x3);

	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
				       key->tos >> 6, mask->tos >> 6);

	return 0;
}
Exemple #4
0
static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
				     struct mlxsw_sp_acl_rule_info *rulei,
				     struct tc_cls_flower_offload *f,
				     u8 ip_proto)
{
	struct flow_dissector_key_tcp *key, *mask;

	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
		return 0;

	if (ip_proto != IPPROTO_TCP) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
		return -EINVAL;
	}

	key = skb_flow_dissector_target(f->dissector,
					FLOW_DISSECTOR_KEY_TCP,
					f->key);
	mask = skb_flow_dissector_target(f->dissector,
					 FLOW_DISSECTOR_KEY_TCP,
					 f->mask);
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
				       ntohs(key->flags), ntohs(mask->flags));
	return 0;
}
Exemple #5
0
static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
				       struct mlxsw_sp_acl_rule_info *rulei,
				       struct tc_cls_flower_offload *f,
				       u8 ip_proto)
{
	struct flow_dissector_key_ports *key, *mask;

	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
		return 0;

	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
		return -EINVAL;
	}

	key = skb_flow_dissector_target(f->dissector,
					FLOW_DISSECTOR_KEY_PORTS,
					f->key);
	mask = skb_flow_dissector_target(f->dissector,
					 FLOW_DISSECTOR_KEY_PORTS,
					 f->mask);
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
				       ntohs(key->dst), ntohs(mask->dst));
	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
				       ntohs(key->src), ntohs(mask->src));
	return 0;
}
Exemple #6
0
static enum flow_dissect_ret
__skb_flow_dissect_mpls(const struct sk_buff *skb,
			struct flow_dissector *flow_dissector,
			void *target_container, void *data, int nhoff, int hlen)
{
	struct flow_dissector_key_keyid *key_keyid;
	struct mpls_label *hdr, _hdr[2];
	u32 entry, label;

	if (!dissector_uses_key(flow_dissector,
				FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
	    !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
		return FLOW_DISSECT_RET_OUT_GOOD;

	hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
				   hlen, &_hdr);
	if (!hdr)
		return FLOW_DISSECT_RET_OUT_BAD;

	entry = ntohl(hdr[0].entry);
	label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;

	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
		struct flow_dissector_key_mpls *key_mpls;

		key_mpls = skb_flow_dissector_target(flow_dissector,
						     FLOW_DISSECTOR_KEY_MPLS,
						     target_container);
		key_mpls->mpls_label = label;
		key_mpls->mpls_ttl = (entry & MPLS_LS_TTL_MASK)
					>> MPLS_LS_TTL_SHIFT;
		key_mpls->mpls_tc = (entry & MPLS_LS_TC_MASK)
					>> MPLS_LS_TC_SHIFT;
		key_mpls->mpls_bos = (entry & MPLS_LS_S_MASK)
					>> MPLS_LS_S_SHIFT;
	}

	if (label == MPLS_LABEL_ENTROPY) {
		key_keyid = skb_flow_dissector_target(flow_dissector,
						      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
						      target_container);
		key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK);
	}
	return FLOW_DISSECT_RET_OUT_GOOD;
}
Exemple #7
0
static enum flow_dissect_ret
__skb_flow_dissect_arp(const struct sk_buff *skb,
		       struct flow_dissector *flow_dissector,
		       void *target_container, void *data, int nhoff, int hlen)
{
	struct flow_dissector_key_arp *key_arp;
	struct {
		unsigned char ar_sha[ETH_ALEN];
		unsigned char ar_sip[4];
		unsigned char ar_tha[ETH_ALEN];
		unsigned char ar_tip[4];
	} *arp_eth, _arp_eth;
	const struct arphdr *arp;
	struct arphdr _arp;

	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
		return FLOW_DISSECT_RET_OUT_GOOD;

	arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
				   hlen, &_arp);
	if (!arp)
		return FLOW_DISSECT_RET_OUT_BAD;

	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
	    arp->ar_pro != htons(ETH_P_IP) ||
	    arp->ar_hln != ETH_ALEN ||
	    arp->ar_pln != 4 ||
	    (arp->ar_op != htons(ARPOP_REPLY) &&
	     arp->ar_op != htons(ARPOP_REQUEST)))
		return FLOW_DISSECT_RET_OUT_BAD;

	arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
				       sizeof(_arp_eth), data,
				       hlen, &_arp_eth);
	if (!arp_eth)
		return FLOW_DISSECT_RET_OUT_BAD;

	key_arp = skb_flow_dissector_target(flow_dissector,
					    FLOW_DISSECTOR_KEY_ARP,
					    target_container);

	memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
	memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));

	/* Only store the lower byte of the opcode;
	 * this covers ARPOP_REPLY and ARPOP_REQUEST.
	 */
	key_arp->op = ntohs(arp->ar_op) & 0xff;

	ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
	ether_addr_copy(key_arp->tha, arp_eth->ar_tha);

	return FLOW_DISSECT_RET_OUT_GOOD;
}
Exemple #8
0
static void
__skb_flow_dissect_ipv6(const struct sk_buff *skb,
			struct flow_dissector *flow_dissector,
			void *target_container, void *data, const struct ipv6hdr *iph)
{
	struct flow_dissector_key_ip *key_ip;

	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
		return;

	key_ip = skb_flow_dissector_target(flow_dissector,
					   FLOW_DISSECTOR_KEY_IP,
					   target_container);
	key_ip->tos = ipv6_get_dsfield(iph);
	key_ip->ttl = iph->hop_limit;
}
Exemple #9
0
static void
__skb_flow_dissect_tcp(const struct sk_buff *skb,
		       struct flow_dissector *flow_dissector,
		       void *target_container, void *data, int thoff, int hlen)
{
	struct flow_dissector_key_tcp *key_tcp;
	struct tcphdr *th, _th;

	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
		return;

	th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
	if (!th)
		return;

	if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
		return;

	key_tcp = skb_flow_dissector_target(flow_dissector,
					    FLOW_DISSECTOR_KEY_TCP,
					    target_container);
	key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
}
Exemple #10
0
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
				struct nfp_fl_key_ls *ret_key_ls,
				struct tc_cls_flower_offload *flow,
				bool egress,
				enum nfp_flower_tun_type *tun_type)
{
	struct flow_dissector_key_basic *mask_basic = NULL;
	struct flow_dissector_key_basic *key_basic = NULL;
	struct nfp_flower_priv *priv = app->priv;
	u32 key_layer_two;
	u8 key_layer;
	int key_size;
	int err;

	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
		return -EOPNOTSUPP;

	/* If any tun dissector is used then the required set must be used. */
	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
		return -EOPNOTSUPP;

	key_layer_two = 0;
	key_layer = NFP_FLOWER_LAYER_PORT;
	key_size = sizeof(struct nfp_flower_meta_tci) +
		   sizeof(struct nfp_flower_in_port);

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
		key_layer |= NFP_FLOWER_LAYER_MAC;
		key_size += sizeof(struct nfp_flower_mac_mpls);
	}

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *flow_vlan;

		flow_vlan = skb_flow_dissector_target(flow->dissector,
						      FLOW_DISSECTOR_KEY_VLAN,
						      flow->mask);
		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
		    flow_vlan->vlan_priority)
			return -EOPNOTSUPP;
	}

	if (dissector_uses_key(flow->dissector,
			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
		struct flow_dissector_key_ports *mask_enc_ports = NULL;
		struct flow_dissector_key_enc_opts *enc_op = NULL;
		struct flow_dissector_key_ports *enc_ports = NULL;
		struct flow_dissector_key_control *mask_enc_ctl =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
						  flow->mask);
		struct flow_dissector_key_control *enc_ctl =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
						  flow->key);
		if (!egress)
			return -EOPNOTSUPP;

		if (mask_enc_ctl->addr_type != 0xffff ||
		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
			return -EOPNOTSUPP;

		/* These fields are already verified as used. */
		mask_ipv4 =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
						  flow->mask);
		if (mask_ipv4->dst != cpu_to_be32(~0))
			return -EOPNOTSUPP;

		mask_enc_ports =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_PORTS,
						  flow->mask);
		enc_ports =
			skb_flow_dissector_target(flow->dissector,
						  FLOW_DISSECTOR_KEY_ENC_PORTS,
						  flow->key);

		if (mask_enc_ports->dst != cpu_to_be16(~0))
			return -EOPNOTSUPP;

		if (dissector_uses_key(flow->dissector,
				       FLOW_DISSECTOR_KEY_ENC_OPTS)) {
			enc_op = skb_flow_dissector_target(flow->dissector,
							   FLOW_DISSECTOR_KEY_ENC_OPTS,
							   flow->key);
		}

		switch (enc_ports->dst) {
		case htons(NFP_FL_VXLAN_PORT):
			*tun_type = NFP_FL_TUNNEL_VXLAN;
			key_layer |= NFP_FLOWER_LAYER_VXLAN;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);

			if (enc_op)
				return -EOPNOTSUPP;
			break;
		case htons(NFP_FL_GENEVE_PORT):
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
				return -EOPNOTSUPP;
			*tun_type = NFP_FL_TUNNEL_GENEVE;
			key_layer |= NFP_FLOWER_LAYER_EXT_META;
			key_size += sizeof(struct nfp_flower_ext_meta);
			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);

			if (!enc_op)
				break;
			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
				return -EOPNOTSUPP;
			err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
							&key_size);
			if (err)
				return err;
			break;
		default:
			return -EOPNOTSUPP;
		}
	} else if (egress) {
		/* Reject non tunnel matches offloaded to egress repr. */
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		mask_basic = skb_flow_dissector_target(flow->dissector,
						       FLOW_DISSECTOR_KEY_BASIC,
						       flow->mask);

		key_basic = skb_flow_dissector_target(flow->dissector,
						      FLOW_DISSECTOR_KEY_BASIC,
						      flow->key);
	}

	if (mask_basic && mask_basic->n_proto) {
		/* Ethernet type is present in the key. */
		switch (key_basic->n_proto) {
		case cpu_to_be16(ETH_P_IP):
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
			break;

		case cpu_to_be16(ETH_P_IPV6):
			key_layer |= NFP_FLOWER_LAYER_IPV6;
			key_size += sizeof(struct nfp_flower_ipv6);
			break;

		/* Currently we do not offload ARP
		 * because we rely on it to get to the host.
		 */
		case cpu_to_be16(ETH_P_ARP):
			return -EOPNOTSUPP;

		case cpu_to_be16(ETH_P_MPLS_UC):
		case cpu_to_be16(ETH_P_MPLS_MC):
			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
				key_layer |= NFP_FLOWER_LAYER_MAC;
				key_size += sizeof(struct nfp_flower_mac_mpls);
			}
			break;

		/* Will be included in layer 2. */
		case cpu_to_be16(ETH_P_8021Q):
			break;

		default:
			/* Other ethtype - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			if (nfp_flower_check_higher_than_mac(flow))
				return -EOPNOTSUPP;
			break;
		}
	}

	if (mask_basic && mask_basic->ip_proto) {
		/* Ethernet type is present in the key. */
		switch (key_basic->ip_proto) {
		case IPPROTO_TCP:
		case IPPROTO_UDP:
		case IPPROTO_SCTP:
		case IPPROTO_ICMP:
		case IPPROTO_ICMPV6:
			key_layer |= NFP_FLOWER_LAYER_TP;
			key_size += sizeof(struct nfp_flower_tp_ports);
			break;
		default:
			/* Other ip proto - we need check the masks for the
			 * remainder of the key to ensure we can offload.
			 */
			return -EOPNOTSUPP;
		}
	}

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
		struct flow_dissector_key_tcp *tcp;
		u32 tcp_flags;

		tcp = skb_flow_dissector_target(flow->dissector,
						FLOW_DISSECTOR_KEY_TCP,
						flow->key);
		tcp_flags = be16_to_cpu(tcp->flags);

		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
			return -EOPNOTSUPP;

		/* We only support PSH and URG flags when either
		 * FIN, SYN or RST is present as well.
		 */
		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
			return -EOPNOTSUPP;

		/* We need to store TCP flags in the IPv4 key space, thus
		 * we need to ensure we include a IPv4 key layer if we have
		 * not done so already.
		 */
		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
			key_layer |= NFP_FLOWER_LAYER_IPV4;
			key_size += sizeof(struct nfp_flower_ipv4);
		}
	}

	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key_ctl;

		key_ctl = skb_flow_dissector_target(flow->dissector,
						    FLOW_DISSECTOR_KEY_CONTROL,
						    flow->key);

		if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
			return -EOPNOTSUPP;
	}

	ret_key_ls->key_layer = key_layer;
	ret_key_ls->key_layer_two = key_layer_two;
	ret_key_ls->key_size = key_size;

	return 0;
}
Exemple #11
0
static int bnxt_tc_parse_flow(struct bnxt *bp,
			      struct tc_cls_flower_offload *tc_flow_cmd,
			      struct bnxt_tc_flow *flow)
{
	struct flow_dissector *dissector = tc_flow_cmd->dissector;
	u16 addr_type = 0;

	/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
	if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
	    (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
		netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
			    dissector->used_keys);
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);

		addr_type = key->addr_type;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_dissector_key_basic *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
		struct flow_dissector_key_basic *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);

		flow->l2_key.ether_type = key->n_proto;
		flow->l2_mask.ether_type = mask->n_proto;

		if (key->n_proto == htons(ETH_P_IP) ||
		    key->n_proto == htons(ETH_P_IPV6)) {
			flow->l4_key.ip_proto = key->ip_proto;
			flow->l4_mask.ip_proto = mask->ip_proto;
		}
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_dissector_key_eth_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
		struct flow_dissector_key_eth_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
		ether_addr_copy(flow->l2_key.dmac, key->dst);
		ether_addr_copy(flow->l2_mask.dmac, mask->dst);
		ether_addr_copy(flow->l2_key.smac, key->src);
		ether_addr_copy(flow->l2_mask.smac, mask->src);
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
		struct flow_dissector_key_vlan *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);

		flow->l2_key.inner_vlan_tci =
		   cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
		flow->l2_mask.inner_vlan_tci =
		   cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
		flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
		flow->l2_mask.inner_vlan_tpid = htons(0xffff);
		flow->l2_key.num_vlans = 1;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
		struct flow_dissector_key_ipv4_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
		struct flow_dissector_key_ipv4_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
		flow->l3_key.ipv4.daddr.s_addr = key->dst;
		flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
		flow->l3_key.ipv4.saddr.s_addr = key->src;
		flow->l3_mask.ipv4.saddr.s_addr = mask->src;
	} else if (dissector_uses_key(dissector,
				      FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
		struct flow_dissector_key_ipv6_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
		struct flow_dissector_key_ipv6_addrs *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
		flow->l3_key.ipv6.daddr = key->dst;
		flow->l3_mask.ipv6.daddr = mask->dst;
		flow->l3_key.ipv6.saddr = key->src;
		flow->l3_mask.ipv6.saddr = mask->src;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
		struct flow_dissector_key_ports *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
		struct flow_dissector_key_ports *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
		flow->l4_key.ports.dport = key->dst;
		flow->l4_mask.ports.dport = mask->dst;
		flow->l4_key.ports.sport = key->src;
		flow->l4_mask.ports.sport = mask->src;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
		struct flow_dissector_key_icmp *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
		struct flow_dissector_key_icmp *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);

		flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
		flow->l4_key.icmp.type = key->type;
		flow->l4_key.icmp.code = key->code;
		flow->l4_mask.icmp.type = mask->type;
		flow->l4_mask.icmp.code = mask->code;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
		struct flow_dissector_key_control *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);

		addr_type = key->addr_type;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
		struct flow_dissector_key_ipv4_addrs *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
		struct flow_dissector_key_ipv4_addrs *mask =
				GET_MASK(tc_flow_cmd,
					 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
		flow->tun_key.u.ipv4.dst = key->dst;
		flow->tun_mask.u.ipv4.dst = mask->dst;
		flow->tun_key.u.ipv4.src = key->src;
		flow->tun_mask.u.ipv4.src = mask->src;
	} else if (dissector_uses_key(dissector,
				      FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
		return -EOPNOTSUPP;
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
		struct flow_dissector_key_keyid *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
		struct flow_dissector_key_keyid *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);

		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
		flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
		flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
	}

	if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
		struct flow_dissector_key_ports *key =
			GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
		struct flow_dissector_key_ports *mask =
			GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);

		flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
		flow->tun_key.tp_dst = key->dst;
		flow->tun_mask.tp_dst = mask->dst;
		flow->tun_key.tp_src = key->src;
		flow->tun_mask.tp_src = mask->src;
	}

	return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
Exemple #12
0
/**
 * __skb_flow_dissect - extract the flow_keys struct and return it
 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
 * @flow_dissector: list of keys to dissect
 * @target_container: target structure to put dissected values into
 * @data: raw buffer pointer to the packet, if NULL use skb->data
 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
 *
 * The function will try to retrieve individual keys into target specified
 * by flow_dissector from either the skbuff or a raw buffer specified by the
 * rest parameters.
 *
 * Caller must take care of zeroing target container memory.
 */
bool __skb_flow_dissect(const struct sk_buff *skb,
			struct flow_dissector *flow_dissector,
			void *target_container,
			void *data, __be16 proto, int nhoff, int hlen,
			unsigned int flags)
{
	struct flow_dissector_key_control *key_control;
	struct flow_dissector_key_basic *key_basic;
	struct flow_dissector_key_addrs *key_addrs;
	struct flow_dissector_key_ports *key_ports;
	struct flow_dissector_key_icmp *key_icmp;
	struct flow_dissector_key_tags *key_tags;
	struct flow_dissector_key_vlan *key_vlan;
	bool skip_vlan = false;
	u8 ip_proto = 0;
	bool ret;

	if (!data) {
		data = skb->data;
		proto = skb_vlan_tag_present(skb) ?
			 skb->vlan_proto : skb->protocol;
		nhoff = skb_network_offset(skb);
		hlen = skb_headlen(skb);
	}

	/* It is ensured by skb_flow_dissector_init() that control key will
	 * be always present.
	 */
	key_control = skb_flow_dissector_target(flow_dissector,
						FLOW_DISSECTOR_KEY_CONTROL,
						target_container);

	/* It is ensured by skb_flow_dissector_init() that basic key will
	 * be always present.
	 */
	key_basic = skb_flow_dissector_target(flow_dissector,
					      FLOW_DISSECTOR_KEY_BASIC,
					      target_container);

	if (dissector_uses_key(flow_dissector,
			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct ethhdr *eth = eth_hdr(skb);
		struct flow_dissector_key_eth_addrs *key_eth_addrs;

		key_eth_addrs = skb_flow_dissector_target(flow_dissector,
							  FLOW_DISSECTOR_KEY_ETH_ADDRS,
							  target_container);
		memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
	}

proto_again:
	switch (proto) {
	case htons(ETH_P_IP): {
		const struct iphdr *iph;
		struct iphdr _iph;
ip:
		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
		if (!iph || iph->ihl < 5)
			goto out_bad;
		nhoff += iph->ihl * 4;

		ip_proto = iph->protocol;

		if (dissector_uses_key(flow_dissector,
				       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
			key_addrs = skb_flow_dissector_target(flow_dissector,
							      FLOW_DISSECTOR_KEY_IPV4_ADDRS,
							      target_container);

			memcpy(&key_addrs->v4addrs, &iph->saddr,
			       sizeof(key_addrs->v4addrs));
			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
		}

		if (ip_is_fragment(iph)) {
			key_control->flags |= FLOW_DIS_IS_FRAGMENT;

			if (iph->frag_off & htons(IP_OFFSET)) {
				goto out_good;
			} else {
				key_control->flags |= FLOW_DIS_FIRST_FRAG;
				if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
					goto out_good;
			}
		}

		__skb_flow_dissect_ipv4(skb, flow_dissector,
					target_container, data, iph);

		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
			goto out_good;

		break;
	}
	case htons(ETH_P_IPV6): {
		const struct ipv6hdr *iph;
		struct ipv6hdr _iph;

ipv6:
		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
		if (!iph)
			goto out_bad;

		ip_proto = iph->nexthdr;
		nhoff += sizeof(struct ipv6hdr);

		if (dissector_uses_key(flow_dissector,
				       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
			key_addrs = skb_flow_dissector_target(flow_dissector,
							      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
							      target_container);

			memcpy(&key_addrs->v6addrs, &iph->saddr,
			       sizeof(key_addrs->v6addrs));
			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
		}

		if ((dissector_uses_key(flow_dissector,
					FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
		     (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
		    ip6_flowlabel(iph)) {
			__be32 flow_label = ip6_flowlabel(iph);

			if (dissector_uses_key(flow_dissector,
					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
				key_tags = skb_flow_dissector_target(flow_dissector,
								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
								     target_container);
				key_tags->flow_label = ntohl(flow_label);
			}
			if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
				goto out_good;
		}

		__skb_flow_dissect_ipv6(skb, flow_dissector,
					target_container, data, iph);

		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
			goto out_good;

		break;
	}
	case htons(ETH_P_8021AD):
	case htons(ETH_P_8021Q): {
		const struct vlan_hdr *vlan;
		struct vlan_hdr _vlan;
		bool vlan_tag_present = skb && skb_vlan_tag_present(skb);

		if (vlan_tag_present)
			proto = skb->protocol;

		if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
			vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
						    data, hlen, &_vlan);
			if (!vlan)
				goto out_bad;
			proto = vlan->h_vlan_encapsulated_proto;
			nhoff += sizeof(*vlan);
			if (skip_vlan)
				goto proto_again;
		}

		skip_vlan = true;
		if (dissector_uses_key(flow_dissector,
				       FLOW_DISSECTOR_KEY_VLAN)) {
			key_vlan = skb_flow_dissector_target(flow_dissector,
							     FLOW_DISSECTOR_KEY_VLAN,
							     target_container);

			if (vlan_tag_present) {
				key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
				key_vlan->vlan_priority =
					(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
			} else {
				key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
					VLAN_VID_MASK;
				key_vlan->vlan_priority =
					(ntohs(vlan->h_vlan_TCI) &
					 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
			}
		}

		goto proto_again;
	}
	case htons(ETH_P_PPP_SES): {
		struct {
			struct pppoe_hdr hdr;
			__be16 proto;
		} *hdr, _hdr;
		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
		if (!hdr)
			goto out_bad;
		proto = hdr->proto;
		nhoff += PPPOE_SES_HLEN;
		switch (proto) {
		case htons(PPP_IP):
			goto ip;
		case htons(PPP_IPV6):
			goto ipv6;
		default:
			goto out_bad;
		}
	}
	case htons(ETH_P_TIPC): {
		struct {
			__be32 pre[3];
			__be32 srcnode;
		} *hdr, _hdr;
		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
		if (!hdr)
			goto out_bad;

		if (dissector_uses_key(flow_dissector,
				       FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
			key_addrs = skb_flow_dissector_target(flow_dissector,
							      FLOW_DISSECTOR_KEY_TIPC_ADDRS,
							      target_container);
			key_addrs->tipcaddrs.srcnode = hdr->srcnode;
			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
		}
		goto out_good;
	}

	case htons(ETH_P_MPLS_UC):
	case htons(ETH_P_MPLS_MC):
mpls:
		switch (__skb_flow_dissect_mpls(skb, flow_dissector,
						target_container, data,
						nhoff, hlen)) {
		case FLOW_DISSECT_RET_OUT_GOOD:
			goto out_good;
		case FLOW_DISSECT_RET_OUT_BAD:
		default:
			goto out_bad;
		}
	case htons(ETH_P_FCOE):
		if ((hlen - nhoff) < FCOE_HEADER_LEN)
			goto out_bad;

		nhoff += FCOE_HEADER_LEN;
		goto out_good;

	case htons(ETH_P_ARP):
	case htons(ETH_P_RARP):
		switch (__skb_flow_dissect_arp(skb, flow_dissector,
					       target_container, data,
					       nhoff, hlen)) {
		case FLOW_DISSECT_RET_OUT_GOOD:
			goto out_good;
		case FLOW_DISSECT_RET_OUT_BAD:
		default:
			goto out_bad;
		}
	default:
		goto out_bad;
	}
Exemple #13
0
static enum flow_dissect_ret
__skb_flow_dissect_gre(const struct sk_buff *skb,
		       struct flow_dissector_key_control *key_control,
		       struct flow_dissector *flow_dissector,
		       void *target_container, void *data,
		       __be16 *p_proto, int *p_nhoff, int *p_hlen,
		       unsigned int flags)
{
	struct flow_dissector_key_keyid *key_keyid;
	struct gre_base_hdr *hdr, _hdr;
	int offset = 0;
	u16 gre_ver;

	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
				   data, *p_hlen, &_hdr);
	if (!hdr)
		return FLOW_DISSECT_RET_OUT_BAD;

	/* Only look inside GRE without routing */
	if (hdr->flags & GRE_ROUTING)
		return FLOW_DISSECT_RET_OUT_GOOD;

	/* Only look inside GRE for version 0 and 1 */
	gre_ver = ntohs(hdr->flags & GRE_VERSION);
	if (gre_ver > 1)
		return FLOW_DISSECT_RET_OUT_GOOD;

	*p_proto = hdr->protocol;
	if (gre_ver) {
		/* Version1 must be PPTP, and check the flags */
		if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
			return FLOW_DISSECT_RET_OUT_GOOD;
	}

	offset += sizeof(struct gre_base_hdr);

	if (hdr->flags & GRE_CSUM)
		offset += sizeof(((struct gre_full_hdr *) 0)->csum) +
			  sizeof(((struct gre_full_hdr *) 0)->reserved1);

	if (hdr->flags & GRE_KEY) {
		const __be32 *keyid;
		__be32 _keyid;

		keyid = __skb_header_pointer(skb, *p_nhoff + offset,
					     sizeof(_keyid),
					     data, *p_hlen, &_keyid);
		if (!keyid)
			return FLOW_DISSECT_RET_OUT_BAD;

		if (dissector_uses_key(flow_dissector,
				       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
			key_keyid = skb_flow_dissector_target(flow_dissector,
							      FLOW_DISSECTOR_KEY_GRE_KEYID,
							      target_container);
			if (gre_ver == 0)
				key_keyid->keyid = *keyid;
			else
				key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
		}
		offset += sizeof(((struct gre_full_hdr *) 0)->key);
	}

	if (hdr->flags & GRE_SEQ)
		offset += sizeof(((struct pptp_gre_header *) 0)->seq);

	if (gre_ver == 0) {
		if (*p_proto == htons(ETH_P_TEB)) {
			const struct ethhdr *eth;
			struct ethhdr _eth;

			eth = __skb_header_pointer(skb, *p_nhoff + offset,
						   sizeof(_eth),
						   data, *p_hlen, &_eth);
			if (!eth)
				return FLOW_DISSECT_RET_OUT_BAD;
			*p_proto = eth->h_proto;
			offset += sizeof(*eth);

			/* Cap headers that we access via pointers at the
			 * end of the Ethernet header as our maximum alignment
			 * at that point is only 2 bytes.
			 */
			if (NET_IP_ALIGN)
				*p_hlen = *p_nhoff + offset;
		}
	} else { /* version 1, must be PPTP */
		u8 _ppp_hdr[PPP_HDRLEN];
		u8 *ppp_hdr;

		if (hdr->flags & GRE_ACK)
			offset += sizeof(((struct pptp_gre_header *) 0)->ack);

		ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
					       sizeof(_ppp_hdr),
					       data, *p_hlen, _ppp_hdr);
		if (!ppp_hdr)
			return FLOW_DISSECT_RET_OUT_BAD;

		switch (PPP_PROTOCOL(ppp_hdr)) {
		case PPP_IP:
			*p_proto = htons(ETH_P_IP);
			break;
		case PPP_IPV6:
			*p_proto = htons(ETH_P_IPV6);
			break;
		default:
			/* Could probably catch some more like MPLS */
			break;
		}

		offset += PPP_HDRLEN;
	}

	*p_nhoff += offset;
	key_control->flags |= FLOW_DIS_ENCAPSULATION;
	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
		return FLOW_DISSECT_RET_OUT_GOOD;

	return FLOW_DISSECT_RET_OUT_PROTO_AGAIN;
}
Exemple #14
0
static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
				 struct mlxsw_sp_acl_block *block,
				 struct mlxsw_sp_acl_rule_info *rulei,
				 struct tc_cls_flower_offload *f)
{
	u16 n_proto_mask = 0;
	u16 n_proto_key = 0;
	u16 addr_type = 0;
	u8 ip_proto = 0;
	int err;

	if (f->dissector->used_keys &
	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
	      BIT(FLOW_DISSECTOR_KEY_TCP) |
	      BIT(FLOW_DISSECTOR_KEY_IP) |
	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
		return -EOPNOTSUPP;
	}

	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
		struct flow_dissector_key_control *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_CONTROL,
						  f->key);
		addr_type = key->addr_type;
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
		struct flow_dissector_key_basic *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_BASIC,
						  f->key);
		struct flow_dissector_key_basic *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_BASIC,
						  f->mask);
		n_proto_key = ntohs(key->n_proto);
		n_proto_mask = ntohs(mask->n_proto);

		if (n_proto_key == ETH_P_ALL) {
			n_proto_key = 0;
			n_proto_mask = 0;
		}
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_ETHERTYPE,
					       n_proto_key, n_proto_mask);

		ip_proto = key->ip_proto;
		mlxsw_sp_acl_rulei_keymask_u32(rulei,
					       MLXSW_AFK_ELEMENT_IP_PROTO,
					       key->ip_proto, mask->ip_proto);
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
		struct flow_dissector_key_eth_addrs *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
						  f->key);
		struct flow_dissector_key_eth_addrs *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
						  f->mask);

		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_32_47,
					       key->dst, mask->dst, 2);
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_DMAC_0_31,
					       key->dst + 2, mask->dst + 2, 4);
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_32_47,
					       key->src, mask->src, 2);
		mlxsw_sp_acl_rulei_keymask_buf(rulei,
					       MLXSW_AFK_ELEMENT_SMAC_0_31,
					       key->src + 2, mask->src + 2, 4);
	}

	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
		struct flow_dissector_key_vlan *key =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_VLAN,
						  f->key);
		struct flow_dissector_key_vlan *mask =
			skb_flow_dissector_target(f->dissector,
						  FLOW_DISSECTOR_KEY_VLAN,
						  f->mask);

		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
			return -EOPNOTSUPP;
		}
		if (mask->vlan_id != 0)
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_VID,
						       key->vlan_id,
						       mask->vlan_id);
		if (mask->vlan_priority != 0)
			mlxsw_sp_acl_rulei_keymask_u32(rulei,
						       MLXSW_AFK_ELEMENT_PCP,
						       key->vlan_priority,
						       mask->vlan_priority);
	}

	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
		mlxsw_sp_flower_parse_ipv4(rulei, f);

	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
		mlxsw_sp_flower_parse_ipv6(rulei, f);

	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
	if (err)
		return err;
	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
	if (err)
		return err;

	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
	if (err)
		return err;

	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
					     f->common.extack);
}