Exemplo n.º 1
0
static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
	struct enic *enic = pmd_priv(eth_dev);
	uint64_t offloads;

	ENICPMD_FUNC_TRACE();

	offloads = eth_dev->data->dev_conf.rxmode.offloads;
	if (mask & ETH_VLAN_STRIP_MASK) {
		if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
			enic->ig_vlan_strip_en = 1;
		else
			enic->ig_vlan_strip_en = 0;
	}

	if ((mask & ETH_VLAN_FILTER_MASK) &&
	    (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
		dev_warning(enic,
			"Configuration of VLAN filter is not supported\n");
	}

	if ((mask & ETH_VLAN_EXTEND_MASK) &&
	    (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
		dev_warning(enic,
			"Configuration of extended VLAN is not supported\n");
	}

	return enic_set_vlan_strip(enic);
}
Exemplo n.º 2
0
static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();

	if (mask & ETH_VLAN_STRIP_MASK) {
		if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
			enic->ig_vlan_strip_en = 1;
		else
			enic->ig_vlan_strip_en = 0;
	}
	enic_set_rss_nic_cfg(enic);


	if (mask & ETH_VLAN_FILTER_MASK) {
		dev_warning(enic,
			"Configuration of VLAN filter is not supported\n");
	}

	if (mask & ETH_VLAN_EXTEND_MASK) {
		dev_warning(enic,
			"Configuration of extended VLAN is not supported\n");
	}
}
Exemplo n.º 3
0
static int
enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
		     enum rte_filter_type filter_type,
		     enum rte_filter_op filter_op,
		     void *arg)
{
	int ret = 0;

	ENICPMD_FUNC_TRACE();

	switch (filter_type) {
	case RTE_ETH_FILTER_GENERIC:
		if (filter_op != RTE_ETH_FILTER_GET)
			return -EINVAL;
		*(const void **)arg = &enic_flow_ops;
		break;
	case RTE_ETH_FILTER_FDIR:
		ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
		break;
	default:
		dev_warning(enic, "Filter type (%d) not supported",
			filter_type);
		ret = -EINVAL;
		break;
	}

	return ret;
}
Exemplo n.º 4
0
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
	unsigned int socket_id, struct rte_mempool *mp,
	uint16_t nb_desc)
{
	int rc;
	struct vnic_rq *rq = &enic->rq[queue_idx];

	rq->socket_id = socket_id;
	rq->mp = mp;

	if (nb_desc) {
		if (nb_desc > enic->config.rq_desc_count) {
			dev_warning(enic,
				"RQ %d - number of rx desc in cmd line (%d)"\
				"is greater than that in the UCSM/CIMC adapter"\
				"policy.  Applying the value in the adapter "\
				"policy (%d).\n",
				queue_idx, nb_desc, enic->config.rq_desc_count);
			nb_desc = enic->config.rq_desc_count;
		}
		dev_info(enic, "RX Queues - effective number of descs:%d\n",
			 nb_desc);
	}

	/* Allocate queue resources */
	rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
		nb_desc, sizeof(struct rq_enet_desc));
	if (rc) {
		dev_err(enic, "error in allocation of rq\n");
		goto err_exit;
	}

	rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
		socket_id, nb_desc,
		sizeof(struct cq_enet_rq_desc));
	if (rc) {
		dev_err(enic, "error in allocation of cq for rq\n");
		goto err_free_rq_exit;
	}

	/* Allocate the mbuf ring */
	rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
			sizeof(struct rte_mbuf *) * nb_desc,
			RTE_CACHE_LINE_SIZE, rq->socket_id);

	if (rq->mbuf_ring != NULL)
		return 0;

	/* cleanup on error */
	vnic_cq_free(&enic->cq[queue_idx]);
err_free_rq_exit:
	vnic_rq_free(rq);
err_exit:
	return -ENOMEM;
}
Exemplo n.º 5
0
static int
enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
		     enum rte_filter_type filter_type,
		     enum rte_filter_op filter_op,
		     void *arg)
{
	int ret = -EINVAL;

	if (RTE_ETH_FILTER_FDIR == filter_type)
		ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
	else
		dev_warning(enic, "Filter type (%d) not supported",
			filter_type);

	return ret;
}
Exemplo n.º 6
0
int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
	unsigned int socket_id, struct rte_mempool *mp,
	uint16_t nb_desc)
{
	int err;
	struct vnic_rq *rq = &enic->rq[queue_idx];

	rq->socket_id = socket_id;
	rq->mp = mp;

	if (nb_desc) {
		if (nb_desc > enic->config.rq_desc_count) {
			dev_warning(enic,
				"RQ %d - number of rx desc in cmd line (%d)"\
				"is greater than that in the UCSM/CIMC adapter"\
				"policy.  Applying the value in the adapter "\
				"policy (%d).\n",
				queue_idx, nb_desc, enic->config.rq_desc_count);
		} else if (nb_desc != enic->config.rq_desc_count) {
			enic->config.rq_desc_count = nb_desc;
			dev_info(enic,
				"RX Queues - effective number of descs:%d\n",
				nb_desc);
		}
	}

	/* Allocate queue resources */
	err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
		enic->config.rq_desc_count,
		sizeof(struct rq_enet_desc));
	if (err) {
		dev_err(enic, "error in allocation of rq\n");
		return err;
	}

	err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
		socket_id, enic->config.rq_desc_count,
		sizeof(struct cq_enet_rq_desc));
	if (err) {
		vnic_rq_free(rq);
		dev_err(enic, "error in allocation of cq for rq\n");
	}

	return err;
}
Exemplo n.º 7
0
static int
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
			enum rte_filter_op filter_op, void *arg)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret = 0;

	ENICPMD_FUNC_TRACE();
	if (filter_op == RTE_ETH_FILTER_NOP)
		return 0;

	if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
		return -EINVAL;

	switch (filter_op) {
	case RTE_ETH_FILTER_ADD:
	case RTE_ETH_FILTER_UPDATE:
		ret = enic_fdir_add_fltr(enic,
			(struct rte_eth_fdir_filter *)arg);
		break;

	case RTE_ETH_FILTER_DELETE:
		ret = enic_fdir_del_fltr(enic,
			(struct rte_eth_fdir_filter *)arg);
		break;

	case RTE_ETH_FILTER_STATS:
		enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
		break;

	case RTE_ETH_FILTER_FLUSH:
		dev_warning(enic, "unsupported operation %u", filter_op);
		ret = -ENOTSUP;
		break;
	case RTE_ETH_FILTER_INFO:
		enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
		break;
	default:
		dev_err(enic, "unknown operation %u", filter_op);
		ret = -EINVAL;
		break;
	}
	return ret;
}
Exemplo n.º 8
0
int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
	unsigned int socket_id, uint16_t nb_desc)
{
	int err;
	struct vnic_wq *wq = &enic->wq[queue_idx];
	unsigned int cq_index = enic_cq_wq(enic, queue_idx);

	wq->socket_id = socket_id;
	if (nb_desc) {
		if (nb_desc > enic->config.wq_desc_count) {
			dev_warning(enic,
				"WQ %d - number of tx desc in cmd line (%d)"\
				"is greater than that in the UCSM/CIMC adapter"\
				"policy.  Applying the value in the adapter "\
				"policy (%d)\n",
				queue_idx, nb_desc, enic->config.wq_desc_count);
		} else if (nb_desc != enic->config.wq_desc_count) {
			enic->config.wq_desc_count = nb_desc;
			dev_info(enic,
				"TX Queues - effective number of descs:%d\n",
				nb_desc);
		}
	}

	/* Allocate queue resources */
	err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
		enic->config.wq_desc_count,
		sizeof(struct wq_enet_desc));
	if (err) {
		dev_err(enic, "error in allocation of wq\n");
		return err;
	}

	err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
		socket_id, enic->config.wq_desc_count,
		sizeof(struct cq_enet_wq_desc));
	if (err) {
		vnic_wq_free(wq);
		dev_err(enic, "error in allocation of cq for wq\n");
	}

	return err;
}
Exemplo n.º 9
0
int enic_enable(struct enic *enic)
{
	unsigned int index;
	struct rte_eth_dev *eth_dev = enic->rte_dev;

	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */

	if (enic_clsf_init(enic))
		dev_warning(enic, "Init of hash table for clsf failed."\
			"Flow director feature will not work\n");

	/* Fill RQ bufs */
	for (index = 0; index < enic->rq_count; index++) {
		vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);

		/* Need at least one buffer on ring to get going
		*/
		if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
			dev_err(enic, "Unable to alloc receive buffers\n");
			return -1;
		}
	}

	for (index = 0; index < enic->wq_count; index++)
		vnic_wq_enable(&enic->wq[index]);
	for (index = 0; index < enic->rq_count; index++)
		vnic_rq_enable(&enic->rq[index]);

	vnic_dev_enable_wait(enic->vdev);

	/* Register and enable error interrupt */
	rte_intr_callback_register(&(enic->pdev->intr_handle),
		enic_intr_handler, (void *)enic->rte_dev);

	rte_intr_enable(&(enic->pdev->intr_handle));
	vnic_intr_unmask(&enic->intr);

	return 0;
}
Exemplo n.º 10
0
int enic_enable(struct enic *enic)
{
	unsigned int index;
	int err;
	struct rte_eth_dev *eth_dev = enic->rte_dev;

	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */

	if (enic_clsf_init(enic))
		dev_warning(enic, "Init of hash table for clsf failed."\
			"Flow director feature will not work\n");

	for (index = 0; index < enic->rq_count; index++) {
		err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
		if (err) {
			dev_err(enic, "Failed to alloc RX queue mbufs\n");
			return err;
		}
	}

	for (index = 0; index < enic->wq_count; index++)
		vnic_wq_enable(&enic->wq[index]);
	for (index = 0; index < enic->rq_count; index++)
		vnic_rq_enable(&enic->rq[index]);

	vnic_dev_enable_wait(enic->vdev);

	/* Register and enable error interrupt */
	rte_intr_callback_register(&(enic->pdev->intr_handle),
		enic_intr_handler, (void *)enic->rte_dev);

	rte_intr_enable(&(enic->pdev->intr_handle));
	vnic_intr_unmask(&enic->intr);

	return 0;
}
Exemplo n.º 11
0
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
	struct enic_fdir_node *key;
	struct filter_v2 fltr;
	int32_t pos;
	u8 do_free = 0;
	u16 old_fltr_id = 0;
	u32 flowtype_supported;
	u16 flex_bytes;
	u16 queue;
	struct filter_action_v2 action;

	memset(&fltr, 0, sizeof(fltr));
	memset(&action, 0, sizeof(action));
	flowtype_supported = enic->fdir.types_mask
			     & (1 << params->input.flow_type);

	flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
		(params->input.flow_ext.flexbytes[0] & 0xFF));

	if (!enic->fdir.hash ||
		(params->input.flow_ext.vlan_tci & 0xFFF) ||
		!flowtype_supported || flex_bytes ||
		params->action.behavior /* drop */) {
		enic->fdir.stats.f_add++;
		return -ENOTSUP;
	}

	/* Get the enicpmd RQ from the DPDK Rx queue */
	queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);

	if (!enic->rq[queue].in_use)
		return -EINVAL;

	/* See if the key is already there in the table */
	pos = rte_hash_del_key(enic->fdir.hash, params);
	switch (pos) {
	case -EINVAL:
		enic->fdir.stats.f_add++;
		return -EINVAL;
	case -ENOENT:
		/* Add a new classifier entry */
		if (!enic->fdir.stats.free) {
			enic->fdir.stats.f_add++;
			return -ENOSPC;
		}
		key = rte_zmalloc("enic_fdir_node",
				  sizeof(struct enic_fdir_node), 0);
		if (!key) {
			enic->fdir.stats.f_add++;
			return -ENOMEM;
		}
		break;
	default:
		/* The entry is already present in the table.
		 * Check if there is a change in queue
		 */
		key = enic->fdir.nodes[pos];
		enic->fdir.nodes[pos] = NULL;
		if (unlikely(key->rq_index == queue)) {
			/* Nothing to be done */
			enic->fdir.stats.f_add++;
			pos = rte_hash_add_key(enic->fdir.hash, params);
			if (pos < 0) {
				dev_err(enic, "Add hash key failed\n");
				return pos;
			}
			enic->fdir.nodes[pos] = key;
			dev_warning(enic,
				"FDIR rule is already present\n");
			return 0;
		}

		if (likely(enic->fdir.stats.free)) {
			/* Add the filter and then delete the old one.
			 * This is to avoid packets from going into the
			 * default queue during the window between
			 * delete and add
			 */
			do_free = 1;
			old_fltr_id = key->fltr_id;
		} else {
			/* No free slots in the classifier.
			 * Delete the filter and add the modified one later
			 */
			vnic_dev_classifier(enic->vdev, CLSF_DEL,
				&key->fltr_id, NULL, NULL);
			enic->fdir.stats.free++;
		}

		break;
	}

	key->filter = *params;
	key->rq_index = queue;

	enic->fdir.copy_fltr_fn(&fltr, &params->input,
				&enic->rte_dev->data->dev_conf.fdir_conf.mask);
	action.type = FILTER_ACTION_RQ_STEERING;
	action.rq_idx = queue;

	if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
	    &action)) {
		key->fltr_id = queue;
	} else {
		dev_err(enic, "Add classifier entry failed\n");
		enic->fdir.stats.f_add++;
		rte_free(key);
		return -1;
	}

	if (do_free)
		vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
				    NULL);
	else{
		enic->fdir.stats.free--;
		enic->fdir.stats.add++;
	}

	pos = rte_hash_add_key(enic->fdir.hash, params);
	if (pos < 0) {
		enic->fdir.stats.f_add++;
		dev_err(enic, "Add hash key failed\n");
		return pos;
	}

	enic->fdir.nodes[pos] = key;
	return 0;
}
Exemplo n.º 12
0
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
	struct enic_fdir_node *key;
	struct filter fltr = {0};
	int32_t pos;
	u8 do_free = 0;
	u16 old_fltr_id = 0;
	u32 flowtype_supported;
	u16 flex_bytes;
	u16 queue;

	flowtype_supported = (
		(RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
		(RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));

	flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
		(params->input.flow_ext.flexbytes[0] & 0xFF));

	if (!enic->fdir.hash ||
		(params->input.flow_ext.vlan_tci & 0xFFF) ||
		!flowtype_supported || flex_bytes ||
		params->action.behavior /* drop */) {
		enic->fdir.stats.f_add++;
		return -ENOTSUP;
	}

	queue = params->action.rx_queue;
	/* See if the key is already there in the table */
	pos = rte_hash_del_key(enic->fdir.hash, params);
	switch (pos) {
	case -EINVAL:
		enic->fdir.stats.f_add++;
		return -EINVAL;
	case -ENOENT:
		/* Add a new classifier entry */
		if (!enic->fdir.stats.free) {
			enic->fdir.stats.f_add++;
			return -ENOSPC;
		}
		key = rte_zmalloc("enic_fdir_node",
				  sizeof(struct enic_fdir_node), 0);
		if (!key) {
			enic->fdir.stats.f_add++;
			return -ENOMEM;
		}
		break;
	default:
		/* The entry is already present in the table.
		 * Check if there is a change in queue
		 */
		key = enic->fdir.nodes[pos];
		enic->fdir.nodes[pos] = NULL;
		if (unlikely(key->rq_index == queue)) {
			/* Nothing to be done */
			enic->fdir.stats.f_add++;
			pos = rte_hash_add_key(enic->fdir.hash, params);
			if (pos < 0) {
				dev_err(enic, "Add hash key failed\n");
				return pos;
			}
			enic->fdir.nodes[pos] = key;
			dev_warning(enic,
				"FDIR rule is already present\n");
			return 0;
		}

		if (likely(enic->fdir.stats.free)) {
			/* Add the filter and then delete the old one.
			 * This is to avoid packets from going into the
			 * default queue during the window between
			 * delete and add
			 */
			do_free = 1;
			old_fltr_id = key->fltr_id;
		} else {
			/* No free slots in the classifier.
			 * Delete the filter and add the modified one later
			 */
			vnic_dev_classifier(enic->vdev, CLSF_DEL,
				&key->fltr_id, NULL);
			enic->fdir.stats.free++;
		}

		break;
	}

	key->filter = *params;
	key->rq_index = queue;

	fltr.type = FILTER_IPV4_5TUPLE;
	fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.src_ip);
	fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
		params->input.flow.ip4_flow.dst_ip);
	fltr.u.ipv4.src_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.src_port);
	fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
		params->input.flow.udp4_flow.dst_port);

	if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
		fltr.u.ipv4.protocol = PROTO_TCP;
	else
		fltr.u.ipv4.protocol = PROTO_UDP;

	fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;

	if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
		key->fltr_id = queue;
	} else {
		dev_err(enic, "Add classifier entry failed\n");
		enic->fdir.stats.f_add++;
		rte_free(key);
		return -1;
	}

	if (do_free)
		vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
	else{
		enic->fdir.stats.free--;
		enic->fdir.stats.add++;
	}

	pos = rte_hash_add_key(enic->fdir.hash, params);
	if (pos < 0) {
		dev_err(enic, "Add hash key failed\n");
		return pos;
	}

	enic->fdir.nodes[pos] = key;
	return 0;
}