コード例 #1
0
ファイル: bnxt_flow.c プロジェクト: btw616/dpdk
static int
bnxt_flow_args_validate(const struct rte_flow_attr *attr,
			const struct rte_flow_item pattern[],
			const struct rte_flow_action actions[],
			struct rte_flow_error *error)
{
	if (!pattern) {
		rte_flow_error_set(error,
				   EINVAL,
				   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
				   NULL,
				   "NULL pattern.");
		return -rte_errno;
	}

	if (!actions) {
		rte_flow_error_set(error,
				   EINVAL,
				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
				   NULL,
				   "NULL action.");
		return -rte_errno;
	}

	if (!attr) {
		rte_flow_error_set(error,
				   EINVAL,
				   RTE_FLOW_ERROR_TYPE_ATTR,
				   NULL,
				   "NULL attribute.");
		return -rte_errno;
	}

	return 0;
}
コード例 #2
0
ファイル: sfc_flow.c プロジェクト: cleveritcz/f-stack
/*
 * Validate item and prepare structures spec and mask for parsing
 */
static int
sfc_flow_parse_init(const struct rte_flow_item *item,
		    const void **spec_ptr,
		    const void **mask_ptr,
		    const void *supp_mask,
		    const void *def_mask,
		    unsigned int size,
		    struct rte_flow_error *error)
{
	const uint8_t *spec;
	const uint8_t *mask;
	const uint8_t *last;
	uint8_t supp;
	unsigned int i;

	if (item == NULL) {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ITEM, NULL,
				   "NULL item");
		return -rte_errno;
	}

	if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ITEM, item,
				   "Mask or last is set without spec");
		return -rte_errno;
	}

	/*
	 * If "mask" is not set, default mask is used,
	 * but if default mask is NULL, "mask" should be set
	 */
	if (item->mask == NULL) {
		if (def_mask == NULL) {
			rte_flow_error_set(error, EINVAL,
				RTE_FLOW_ERROR_TYPE_ITEM, NULL,
				"Mask should be specified");
			return -rte_errno;
		}

		mask = (const uint8_t *)def_mask;
	} else {
		mask = (const uint8_t *)item->mask;
	}

	spec = (const uint8_t *)item->spec;
	last = (const uint8_t *)item->last;

	if (spec == NULL)
		goto exit;

	/*
	 * If field values in "last" are either 0 or equal to the corresponding
	 * values in "spec" then they are ignored
	 */
	if (last != NULL &&
	    !sfc_flow_is_zero(last, size) &&
	    memcmp(last, spec, size) != 0) {
		rte_flow_error_set(error, ENOTSUP,
				   RTE_FLOW_ERROR_TYPE_ITEM, item,
				   "Ranging is not supported");
		return -rte_errno;
	}

	if (supp_mask == NULL) {
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
			"Supported mask for item should be specified");
		return -rte_errno;
	}

	/* Check that mask does not ask for more match than supp_mask */
	for (i = 0; i < size; i++) {
		supp = ((const uint8_t *)supp_mask)[i];

		if (~supp & mask[i]) {
			rte_flow_error_set(error, ENOTSUP,
					   RTE_FLOW_ERROR_TYPE_ITEM, item,
					   "Item's field is not supported");
			return -rte_errno;
		}
	}

exit:
	*spec_ptr = spec;
	*mask_ptr = mask;
	return 0;
}
コード例 #3
0
ファイル: sfc_flow.c プロジェクト: cleveritcz/f-stack
/**
 * Convert Ethernet item to EFX filter specification.
 *
 * @param item[in]
 *   Item specification. Only source and destination addresses and
 *   Ethernet type fields are supported. In addition to full and
 *   empty masks of destination address, individual/group mask is
 *   also supported. If the mask is NULL, default mask will be used.
 *   Ranging is not supported.
 * @param efx_spec[in, out]
 *   EFX filter specification to update.
 * @param[out] error
 *   Perform verbose error reporting if not NULL.
 */
static int
sfc_flow_parse_eth(const struct rte_flow_item *item,
		   efx_filter_spec_t *efx_spec,
		   struct rte_flow_error *error)
{
	int rc;
	const struct rte_flow_item_eth *spec = NULL;
	const struct rte_flow_item_eth *mask = NULL;
	const struct rte_flow_item_eth supp_mask = {
		.dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
		.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
		.type = 0xffff,
	};
	const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
		0x01, 0x00, 0x00, 0x00, 0x00, 0x00
	};

	rc = sfc_flow_parse_init(item,
				 (const void **)&spec,
				 (const void **)&mask,
				 &supp_mask,
				 &rte_flow_item_eth_mask,
				 sizeof(struct rte_flow_item_eth),
				 error);
	if (rc != 0)
		return rc;

	/* If "spec" is not set, could be any Ethernet */
	if (spec == NULL)
		return 0;

	if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
		rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
			   EFX_MAC_ADDR_LEN);
	} else if (memcmp(mask->dst.addr_bytes, ig_mask,
			  EFX_MAC_ADDR_LEN) == 0) {
		if (is_unicast_ether_addr(&spec->dst))
			efx_spec->efs_match_flags |=
				EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
		else
			efx_spec->efs_match_flags |=
				EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
	} else if (!is_zero_ether_addr(&mask->dst)) {
		goto fail_bad_mask;
	}

	if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
		rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
			   EFX_MAC_ADDR_LEN);
	} else if (!is_zero_ether_addr(&mask->src)) {
		goto fail_bad_mask;
	}

	/*
	 * Ether type is in big-endian byte order in item and
	 * in little-endian in efx_spec, so byte swap is used
	 */
	if (mask->type == supp_mask.type) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
		efx_spec->efs_ether_type = rte_bswap16(spec->type);
	} else if (mask->type != 0) {
		goto fail_bad_mask;
	}

	return 0;

fail_bad_mask:
	rte_flow_error_set(error, EINVAL,
			   RTE_FLOW_ERROR_TYPE_ITEM, item,
			   "Bad mask in the ETH pattern item");
	return -rte_errno;
}

/**
 * Convert VLAN item to EFX filter specification.
 *
 * @param item[in]
 *   Item specification. Only VID field is supported.
 *   The mask can not be NULL. Ranging is not supported.
 * @param efx_spec[in, out]
 *   EFX filter specification to update.
 * @param[out] error
 *   Perform verbose error reporting if not NULL.
 */
static int
sfc_flow_parse_vlan(const struct rte_flow_item *item,
		    efx_filter_spec_t *efx_spec,
		    struct rte_flow_error *error)
{
	int rc;
	uint16_t vid;
	const struct rte_flow_item_vlan *spec = NULL;
	const struct rte_flow_item_vlan *mask = NULL;
	const struct rte_flow_item_vlan supp_mask = {
		.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
	};

	rc = sfc_flow_parse_init(item,
				 (const void **)&spec,
				 (const void **)&mask,
				 &supp_mask,
				 NULL,
				 sizeof(struct rte_flow_item_vlan),
				 error);
	if (rc != 0)
		return rc;

	/*
	 * VID is in big-endian byte order in item and
	 * in little-endian in efx_spec, so byte swap is used.
	 * If two VLAN items are included, the first matches
	 * the outer tag and the next matches the inner tag.
	 */
	if (mask->tci == supp_mask.tci) {
		/* Apply mask to keep VID only */
		vid = rte_bswap16(spec->tci & mask->tci);

		if (!(efx_spec->efs_match_flags &
		      EFX_FILTER_MATCH_OUTER_VID)) {
			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
			efx_spec->efs_outer_vid = vid;
		} else if (!(efx_spec->efs_match_flags &
			     EFX_FILTER_MATCH_INNER_VID)) {
			efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
			efx_spec->efs_inner_vid = vid;
		} else {
			rte_flow_error_set(error, EINVAL,
					   RTE_FLOW_ERROR_TYPE_ITEM, item,
					   "More than two VLAN items");
			return -rte_errno;
		}
	} else {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ITEM, item,
				   "VLAN ID in TCI match is required");
		return -rte_errno;
	}

	return 0;
}
コード例 #4
0
ファイル: sfc_flow.c プロジェクト: cleveritcz/f-stack
/**
 * Convert IPv4 item to EFX filter specification.
 *
 * @param item[in]
 *   Item specification. Only source and destination addresses and
 *   protocol fields are supported. If the mask is NULL, default
 *   mask will be used. Ranging is not supported.
 * @param efx_spec[in, out]
 *   EFX filter specification to update.
 * @param[out] error
 *   Perform verbose error reporting if not NULL.
 */
static int
sfc_flow_parse_ipv4(const struct rte_flow_item *item,
		    efx_filter_spec_t *efx_spec,
		    struct rte_flow_error *error)
{
	int rc;
	const struct rte_flow_item_ipv4 *spec = NULL;
	const struct rte_flow_item_ipv4 *mask = NULL;
	const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
	const struct rte_flow_item_ipv4 supp_mask = {
		.hdr = {
			.src_addr = 0xffffffff,
			.dst_addr = 0xffffffff,
			.next_proto_id = 0xff,
		}
	};

	rc = sfc_flow_parse_init(item,
				 (const void **)&spec,
				 (const void **)&mask,
				 &supp_mask,
				 &rte_flow_item_ipv4_mask,
				 sizeof(struct rte_flow_item_ipv4),
				 error);
	if (rc != 0)
		return rc;

	/*
	 * Filtering by IPv4 source and destination addresses requires
	 * the appropriate ETHER_TYPE in hardware filters
	 */
	if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
		efx_spec->efs_ether_type = ether_type_ipv4;
	} else if (efx_spec->efs_ether_type != ether_type_ipv4) {
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM, item,
			"Ethertype in pattern with IPV4 item should be appropriate");
		return -rte_errno;
	}

	if (spec == NULL)
		return 0;

	/*
	 * IPv4 addresses are in big-endian byte order in item and in
	 * efx_spec
	 */
	if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
		efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
	} else if (mask->hdr.src_addr != 0) {
		goto fail_bad_mask;
	}

	if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
		efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
	} else if (mask->hdr.dst_addr != 0) {
		goto fail_bad_mask;
	}

	if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
		efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
		efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
	} else if (mask->hdr.next_proto_id != 0) {
		goto fail_bad_mask;
	}

	return 0;

fail_bad_mask:
	rte_flow_error_set(error, EINVAL,
			   RTE_FLOW_ERROR_TYPE_ITEM, item,
			   "Bad mask in the IPV4 pattern item");
	return -rte_errno;
}
コード例 #5
0
/**
 * Parse the rule to see if it is a n-tuple rule.
 * And get the n-tuple filter info BTW.
 * pattern:
 * The first not void item can be ETH or IPV4.
 * The second not void item must be IPV4 if the first one is ETH.
 * The third not void item must be UDP or TCP.
 * The next not void item must be END.
 * action:
 * The first not void action should be QUEUE.
 * The next not void action should be END.
 * pattern example:
 * ITEM		Spec			Mask
 * ETH		NULL			NULL
 * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
 *			dst_addr 192.167.3.50	0xFFFFFFFF
 *			next_proto_id	17	0xFF
 * UDP/TCP/	src_port	80	0xFFFF
 * SCTP		dst_port	80	0xFFFF
 * END
 * other members in mask and spec should set to 0x00.
 * item->last should be NULL.
 */
static int
classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
			 const struct rte_flow_item pattern[],
			 const struct rte_flow_action actions[],
			 struct rte_eth_ntuple_filter *filter,
			 struct rte_flow_error *error)
{
	const struct rte_flow_item *item;
	const struct rte_flow_action *act;
	const struct rte_flow_item_ipv4 *ipv4_spec;
	const struct rte_flow_item_ipv4 *ipv4_mask;
	const struct rte_flow_item_tcp *tcp_spec;
	const struct rte_flow_item_tcp *tcp_mask;
	const struct rte_flow_item_udp *udp_spec;
	const struct rte_flow_item_udp *udp_mask;
	const struct rte_flow_item_sctp *sctp_spec;
	const struct rte_flow_item_sctp *sctp_mask;
	uint32_t index;

	if (!pattern) {
		rte_flow_error_set(error,
			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
			NULL, "NULL pattern.");
		return -EINVAL;
	}

	if (!actions) {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
				   NULL, "NULL action.");
		return -EINVAL;
	}
	if (!attr) {
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ATTR,
				   NULL, "NULL attribute.");
		return -EINVAL;
	}

	/* parse pattern */
	index = 0;

	/* the first not void item can be MAC or IPv4 */
	NEXT_ITEM_OF_PATTERN(item, pattern, index);

	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Not supported by ntuple filter");
		return -EINVAL;
	}
	/* Skip Ethernet */
	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
		/*Not supported last point for range*/
		if (item->last) {
			rte_flow_error_set(error, EINVAL,
					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
					item,
					"Not supported last point for range");
			return -EINVAL;

		}
		/* if the first item is MAC, the content should be NULL */
		if (item->spec || item->mask) {
			rte_flow_error_set(error, EINVAL,
					RTE_FLOW_ERROR_TYPE_ITEM,
					item,
					"Not supported by ntuple filter");
			return -EINVAL;
		}
		/* check if the next not void item is IPv4 */
		index++;
		NEXT_ITEM_OF_PATTERN(item, pattern, index);
		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
			rte_flow_error_set(error, EINVAL,
					RTE_FLOW_ERROR_TYPE_ITEM,
					item,
					"Not supported by ntuple filter");
			return -EINVAL;
		}
	}

	/* get the IPv4 info */
	if (!item->spec || !item->mask) {
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Invalid ntuple mask");
		return -EINVAL;
	}
	/*Not supported last point for range*/
	if (item->last) {
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
			item, "Not supported last point for range");
		return -EINVAL;

	}

	ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
	/**
	 * Only support src & dst addresses, protocol,
	 * others should be masked.
	 */
	if (ipv4_mask->hdr.version_ihl ||
		ipv4_mask->hdr.type_of_service ||
		ipv4_mask->hdr.total_length ||
		ipv4_mask->hdr.packet_id ||
		ipv4_mask->hdr.fragment_offset ||
		ipv4_mask->hdr.time_to_live ||
		ipv4_mask->hdr.hdr_checksum) {
		rte_flow_error_set(error,
			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Not supported by ntuple filter");
		return -EINVAL;
	}

	filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
	filter->src_ip_mask = ipv4_mask->hdr.src_addr;
	filter->proto_mask  = ipv4_mask->hdr.next_proto_id;

	ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
	filter->dst_ip = ipv4_spec->hdr.dst_addr;
	filter->src_ip = ipv4_spec->hdr.src_addr;
	filter->proto  = ipv4_spec->hdr.next_proto_id;

	/* check if the next not void item is TCP or UDP or SCTP */
	index++;
	NEXT_ITEM_OF_PATTERN(item, pattern, index);
	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
	    item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Not supported by ntuple filter");
		return -EINVAL;
	}

	/* get the TCP/UDP info */
	if (!item->spec || !item->mask) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Invalid ntuple mask");
		return -EINVAL;
	}

	/*Not supported last point for range*/
	if (item->last) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
			item, "Not supported last point for range");
		return -EINVAL;

	}

	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
		tcp_mask = (const struct rte_flow_item_tcp *)item->mask;

		/**
		 * Only support src & dst ports, tcp flags,
		 * others should be masked.
		 */
		if (tcp_mask->hdr.sent_seq ||
		    tcp_mask->hdr.recv_ack ||
		    tcp_mask->hdr.data_off ||
		    tcp_mask->hdr.rx_win ||
		    tcp_mask->hdr.cksum ||
		    tcp_mask->hdr.tcp_urp) {
			memset(filter, 0,
				sizeof(struct rte_eth_ntuple_filter));
			rte_flow_error_set(error, EINVAL,
				RTE_FLOW_ERROR_TYPE_ITEM,
				item, "Not supported by ntuple filter");
			return -EINVAL;
		}

		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
		filter->src_port_mask  = tcp_mask->hdr.src_port;
		if (tcp_mask->hdr.tcp_flags == 0xFF) {
			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
		} else if (!tcp_mask->hdr.tcp_flags) {
			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
		} else {
			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
			rte_flow_error_set(error, EINVAL,
				RTE_FLOW_ERROR_TYPE_ITEM,
				item, "Not supported by ntuple filter");
			return -EINVAL;
		}

		tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
		filter->dst_port  = tcp_spec->hdr.dst_port;
		filter->src_port  = tcp_spec->hdr.src_port;
		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
		udp_mask = (const struct rte_flow_item_udp *)item->mask;

		/**
		 * Only support src & dst ports,
		 * others should be masked.
		 */
		if (udp_mask->hdr.dgram_len ||
		    udp_mask->hdr.dgram_cksum) {
			memset(filter, 0,
				sizeof(struct rte_eth_ntuple_filter));
			rte_flow_error_set(error, EINVAL,
				RTE_FLOW_ERROR_TYPE_ITEM,
				item, "Not supported by ntuple filter");
			return -EINVAL;
		}

		filter->dst_port_mask = udp_mask->hdr.dst_port;
		filter->src_port_mask = udp_mask->hdr.src_port;

		udp_spec = (const struct rte_flow_item_udp *)item->spec;
		filter->dst_port = udp_spec->hdr.dst_port;
		filter->src_port = udp_spec->hdr.src_port;
	} else {
		sctp_mask = (const struct rte_flow_item_sctp *)item->mask;

		/**
		 * Only support src & dst ports,
		 * others should be masked.
		 */
		if (sctp_mask->hdr.tag ||
		    sctp_mask->hdr.cksum) {
			memset(filter, 0,
				sizeof(struct rte_eth_ntuple_filter));
			rte_flow_error_set(error, EINVAL,
				RTE_FLOW_ERROR_TYPE_ITEM,
				item, "Not supported by ntuple filter");
			return -EINVAL;
		}

		filter->dst_port_mask = sctp_mask->hdr.dst_port;
		filter->src_port_mask = sctp_mask->hdr.src_port;

		sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
		filter->dst_port = sctp_spec->hdr.dst_port;
		filter->src_port = sctp_spec->hdr.src_port;
	}

	/* check if the next not void item is END */
	index++;
	NEXT_ITEM_OF_PATTERN(item, pattern, index);
	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ITEM,
			item, "Not supported by ntuple filter");
		return -EINVAL;
	}

	/* parse action */
	index = 0;

	/**
	 * n-tuple only supports count,
	 * check if the first not void action is COUNT.
	 */
	memset(&action, 0, sizeof(action));
	NEXT_ITEM_OF_ACTION(act, actions, index);
	if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ACTION,
			item, "Not supported action.");
		return -EINVAL;
	}
	action.type = RTE_FLOW_ACTION_TYPE_COUNT;

	/* check if the next not void item is END */
	index++;
	NEXT_ITEM_OF_ACTION(act, actions, index);
	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
			RTE_FLOW_ERROR_TYPE_ACTION,
			act, "Not supported action.");
		return -EINVAL;
	}

	/* parse attr */
	/* must be input direction */
	if (!attr->ingress) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
				   attr, "Only support ingress.");
		return -EINVAL;
	}

	/* not supported */
	if (attr->egress) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
				   attr, "Not support egress.");
		return -EINVAL;
	}

	if (attr->priority > 0xFFFF) {
		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
		rte_flow_error_set(error, EINVAL,
				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
				   attr, "Error priority.");
		return -EINVAL;
	}
	filter->priority = (uint16_t)attr->priority;
	if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
		filter->priority = FLOW_RULE_MAX_PRIORITY;

	return 0;
}