コード例 #1
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_start(uint8_t dev_id)
{
	struct rte_eventdev *dev;
	int diag;

	RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);

	if (dev->data->dev_started != 0) {
		RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
			dev_id);
		return 0;
	}

	diag = (*dev->dev_ops->dev_start)(dev);
	if (diag == 0)
		dev->data->dev_started = 1;
	else
		return diag;

	return 0;
}
コード例 #2
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
		       uint32_t *attr_value)
{
	struct rte_eventdev *dev;

	if (!attr_value)
		return -EINVAL;
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	switch (attr_id) {
	case RTE_EVENT_DEV_ATTR_PORT_COUNT:
		*attr_value = dev->data->nb_ports;
		break;
	case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
		*attr_value = dev->data->nb_queues;
		break;
	case RTE_EVENT_DEV_ATTR_STARTED:
		*attr_value = dev->data->dev_started;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
コード例 #3
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
			uint32_t *attr_value)
{
	struct rte_eventdev *dev;

	if (!attr_value)
		return -EINVAL;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	if (!is_valid_port(dev, port_id)) {
		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
		return -EINVAL;
	}

	switch (attr_id) {
	case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
		*attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
		break;
	case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
		*attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
		break;
	case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
		*attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
		break;
	default:
		return -EINVAL;
	};
	return 0;
}
コード例 #4
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
			 uint8_t queues[], uint8_t priorities[])
{
	struct rte_eventdev *dev;
	uint16_t *links_map;
	int i, count = 0;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	if (!is_valid_port(dev, port_id)) {
		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
		return -EINVAL;
	}

	links_map = dev->data->links_map;
	/* Point links_map to this port specific area */
	links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
	for (i = 0; i < dev->data->nb_queues; i++) {
		if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
			queues[count] = i;
			priorities[count] = (uint8_t)links_map[i];
			++count;
		}
	}
	return count;
}
コード例 #5
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
		      const struct rte_event_queue_conf *queue_conf)
{
	struct rte_eventdev *dev;
	struct rte_event_queue_conf def_conf;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (!is_valid_queue(dev, queue_id)) {
		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
		return -EINVAL;
	}

	/* Check nb_atomic_flows limit */
	if (is_valid_atomic_queue_conf(queue_conf)) {
		if (queue_conf->nb_atomic_flows == 0 ||
		    queue_conf->nb_atomic_flows >
			dev->data->dev_conf.nb_event_queue_flows) {
			RTE_EDEV_LOG_ERR(
		"dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
			dev_id, queue_id, queue_conf->nb_atomic_flows,
			dev->data->dev_conf.nb_event_queue_flows);
			return -EINVAL;
		}
	}

	/* Check nb_atomic_order_sequences limit */
	if (is_valid_ordered_queue_conf(queue_conf)) {
		if (queue_conf->nb_atomic_order_sequences == 0 ||
		    queue_conf->nb_atomic_order_sequences >
			dev->data->dev_conf.nb_event_queue_flows) {
			RTE_EDEV_LOG_ERR(
		"dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
			dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
			dev->data->dev_conf.nb_event_queue_flows);
			return -EINVAL;
		}
	}

	if (dev->data->dev_started) {
		RTE_EDEV_LOG_ERR(
		    "device %d must be stopped to allow queue setup", dev_id);
		return -EBUSY;
	}

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);

	if (queue_conf == NULL) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
					-ENOTSUP);
		(*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
		queue_conf = &def_conf;
	}

	dev->data->queues_cfg[queue_id] = *queue_conf;
	return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
コード例 #6
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_socket_id(uint8_t dev_id)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	return dev->data->socket_id;
}
コード例 #7
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int rte_event_dev_xstats_reset(uint8_t dev_id,
		enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
		const uint32_t ids[], uint32_t nb_ids)
{
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	struct rte_eventdev *dev = &rte_eventdevs[dev_id];

	if (dev->dev_ops->xstats_reset != NULL)
		return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
							ids, nb_ids);
	return -ENOTSUP;
}
コード例 #8
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_dump(uint8_t dev_id, FILE *f)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);

	(*dev->dev_ops->dump)(dev, f);
	return 0;

}
コード例 #9
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
/* retrieve eventdev extended statistics */
int
rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
		uint8_t queue_port_id, const unsigned int ids[],
		uint64_t values[], unsigned int n)
{
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];

	/* implemented by the driver */
	if (dev->dev_ops->xstats_get != NULL)
		return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
				ids, values, n);
	return -ENOTSUP;
}
コード例 #10
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
				 uint64_t *timeout_ticks)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);

	if (timeout_ticks == NULL)
		return -EINVAL;

	return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
}
コード例 #11
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (service_id == NULL)
		return -EINVAL;

	if (dev->data->service_inited)
		*service_id = dev->data->service_id;

	return dev->data->service_inited ? 0 : -ESRCH;
}
コード例 #12
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
			uint32_t *attr_value)
{
	struct rte_event_queue_conf *conf;
	struct rte_eventdev *dev;

	if (!attr_value)
		return -EINVAL;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	if (!is_valid_queue(dev, queue_id)) {
		RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
		return -EINVAL;
	}

	conf = &dev->data->queues_cfg[queue_id];

	switch (attr_id) {
	case RTE_EVENT_QUEUE_ATTR_PRIORITY:
		*attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
		if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
			*attr_value = conf->priority;
		break;
	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
		*attr_value = conf->nb_atomic_flows;
		break;
	case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
		*attr_value = conf->nb_atomic_order_sequences;
		break;
	case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
		*attr_value = conf->event_queue_cfg;
		break;
	case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
		if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
			return -EOVERFLOW;

		*attr_value = conf->schedule_type;
		break;
	default:
		return -EINVAL;
	};
	return 0;
}
コード例 #13
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
uint64_t
rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
		unsigned int *id)
{
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
	unsigned int temp = -1;

	if (id != NULL)
		*id = (unsigned int)-1;
	else
		id = &temp; /* ensure driver never gets a NULL value */

	/* implemented by driver */
	if (dev->dev_ops->xstats_get_by_name != NULL)
		return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
	return -ENOTSUP;
}
コード例 #14
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_close(uint8_t dev_id)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];
	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);

	/* Device must be stopped before it can be closed */
	if (dev->data->dev_started == 1) {
		RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
				dev_id);
		return -EBUSY;
	}

	return (*dev->dev_ops->dev_close)(dev);
}
コード例 #15
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (dev_info == NULL)
		return -EINVAL;

	memset(dev_info, 0, sizeof(struct rte_event_dev_info));

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
	(*dev->dev_ops->dev_infos_get)(dev, dev_info);

	dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;

	dev_info->dev = dev->dev;
	return 0;
}
コード例 #16
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
				uint32_t *caps)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);

	dev = &rte_eventdevs[dev_id];

	if (caps == NULL)
		return -EINVAL;
	*caps = 0;

	return dev->dev_ops->eth_rx_adapter_caps_get ?
				(*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
						&rte_eth_devices[eth_port_id],
						caps)
				: 0;
}
コード例 #17
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
				 struct rte_event_port_conf *port_conf)
{
	struct rte_eventdev *dev;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (port_conf == NULL)
		return -EINVAL;

	if (!is_valid_port(dev, port_id)) {
		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
		return -EINVAL;
	}

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
	memset(port_conf, 0, sizeof(struct rte_event_port_conf));
	(*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
	return 0;
}
コード例 #18
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_xstats_names_get(uint8_t dev_id,
		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
		struct rte_event_dev_xstats_name *xstats_names,
		unsigned int *ids, unsigned int size)
{
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
	const int cnt_expected_entries = xstats_get_count(dev_id, mode,
							  queue_port_id);
	if (xstats_names == NULL || cnt_expected_entries < 0 ||
			(int)size < cnt_expected_entries)
		return cnt_expected_entries;

	/* dev_id checked above */
	const struct rte_eventdev *dev = &rte_eventdevs[dev_id];

	if (dev->dev_ops->xstats_get_names != NULL)
		return (*dev->dev_ops->xstats_get_names)(dev, mode,
				queue_port_id, xstats_names, ids, size);

	return -ENOTSUP;
}
コード例 #19
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_dev_configure(uint8_t dev_id,
			const struct rte_event_dev_config *dev_conf)
{
	struct rte_eventdev *dev;
	struct rte_event_dev_info info;
	int diag;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);

	if (dev->data->dev_started) {
		RTE_EDEV_LOG_ERR(
		    "device %d must be stopped to allow configuration", dev_id);
		return -EBUSY;
	}

	if (dev_conf == NULL)
		return -EINVAL;

	(*dev->dev_ops->dev_infos_get)(dev, &info);

	/* Check dequeue_timeout_ns value is in limit */
	if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
		if (dev_conf->dequeue_timeout_ns &&
		    (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
			|| dev_conf->dequeue_timeout_ns >
				 info.max_dequeue_timeout_ns)) {
			RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
			" min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
			dev_id, dev_conf->dequeue_timeout_ns,
			info.min_dequeue_timeout_ns,
			info.max_dequeue_timeout_ns);
			return -EINVAL;
		}
	}

	/* Check nb_events_limit is in limit */
	if (dev_conf->nb_events_limit > info.max_num_events) {
		RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
		dev_id, dev_conf->nb_events_limit, info.max_num_events);
		return -EINVAL;
	}

	/* Check nb_event_queues is in limit */
	if (!dev_conf->nb_event_queues) {
		RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
					dev_id);
		return -EINVAL;
	}
	if (dev_conf->nb_event_queues > info.max_event_queues) {
		RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
		dev_id, dev_conf->nb_event_queues, info.max_event_queues);
		return -EINVAL;
	}

	/* Check nb_event_ports is in limit */
	if (!dev_conf->nb_event_ports) {
		RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
		return -EINVAL;
	}
	if (dev_conf->nb_event_ports > info.max_event_ports) {
		RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
		dev_id, dev_conf->nb_event_ports, info.max_event_ports);
		return -EINVAL;
	}

	/* Check nb_event_queue_flows is in limit */
	if (!dev_conf->nb_event_queue_flows) {
		RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
		return -EINVAL;
	}
	if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
		RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
		dev_id, dev_conf->nb_event_queue_flows,
		info.max_event_queue_flows);
		return -EINVAL;
	}

	/* Check nb_event_port_dequeue_depth is in limit */
	if (!dev_conf->nb_event_port_dequeue_depth) {
		RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
					dev_id);
		return -EINVAL;
	}
	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
		 (dev_conf->nb_event_port_dequeue_depth >
			 info.max_event_port_dequeue_depth)) {
		RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
		dev_id, dev_conf->nb_event_port_dequeue_depth,
		info.max_event_port_dequeue_depth);
		return -EINVAL;
	}

	/* Check nb_event_port_enqueue_depth is in limit */
	if (!dev_conf->nb_event_port_enqueue_depth) {
		RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
					dev_id);
		return -EINVAL;
	}
	if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
		(dev_conf->nb_event_port_enqueue_depth >
			 info.max_event_port_enqueue_depth)) {
		RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
		dev_id, dev_conf->nb_event_port_enqueue_depth,
		info.max_event_port_enqueue_depth);
		return -EINVAL;
	}

	/* Copy the dev_conf parameter into the dev structure */
	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));

	/* Setup new number of queues and reconfigure device. */
	diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
	if (diag != 0) {
		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
				dev_id, diag);
		return diag;
	}

	/* Setup new number of ports and reconfigure device. */
	diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
	if (diag != 0) {
		rte_event_dev_queue_config(dev, 0);
		RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
				dev_id, diag);
		return diag;
	}

	/* Configure the device */
	diag = (*dev->dev_ops->dev_configure)(dev);
	if (diag != 0) {
		RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
		rte_event_dev_queue_config(dev, 0);
		rte_event_dev_port_config(dev, 0);
	}

	dev->data->event_dev_cap = info.event_dev_cap;
	return diag;
}
コード例 #20
0
int
rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
				rte_event_eth_rx_adapter_conf_cb conf_cb,
				void *conf_arg)
{
	struct rte_event_eth_rx_adapter *rx_adapter;
	int ret;
	int socket_id;
	uint8_t i;
	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
	const uint8_t default_rss_key[] = {
		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
	};

	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	if (conf_cb == NULL)
		return -EINVAL;

	if (event_eth_rx_adapter == NULL) {
		ret = rte_event_eth_rx_adapter_init();
		if (ret)
			return ret;
	}

	rx_adapter = id_to_rx_adapter(id);
	if (rx_adapter != NULL) {
		RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
		return -EEXIST;
	}

	socket_id = rte_event_dev_socket_id(dev_id);
	snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
		"rte_event_eth_rx_adapter_%d",
		id);

	rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
			RTE_CACHE_LINE_SIZE, socket_id);
	if (rx_adapter == NULL) {
		RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
		return -ENOMEM;
	}

	rx_adapter->eventdev_id = dev_id;
	rx_adapter->socket_id = socket_id;
	rx_adapter->conf_cb = conf_cb;
	rx_adapter->conf_arg = conf_arg;
	strcpy(rx_adapter->mem_name, mem_name);
	rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
					rte_eth_dev_count() *
					sizeof(struct eth_device_info), 0,
					socket_id);
	rte_convert_rss_key((const uint32_t *)default_rss_key,
			(uint32_t *)rx_adapter->rss_key_be,
			    RTE_DIM(default_rss_key));

	if (rx_adapter->eth_devices == NULL) {
		RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
		rte_free(rx_adapter);
		return -ENOMEM;
	}
	rte_spinlock_init(&rx_adapter->rx_lock);
	for (i = 0; i < rte_eth_dev_count(); i++)
		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];

	event_eth_rx_adapter[id] = rx_adapter;
	if (conf_cb == default_conf_cb)
		rx_adapter->default_cb_arg = 1;
	return 0;
}
コード例 #21
0
ファイル: rte_eventdev.c プロジェクト: cleveritcz/f-stack
int
rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
		     const struct rte_event_port_conf *port_conf)
{
	struct rte_eventdev *dev;
	struct rte_event_port_conf def_conf;
	int diag;

	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
	dev = &rte_eventdevs[dev_id];

	if (!is_valid_port(dev, port_id)) {
		RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
		return -EINVAL;
	}

	/* Check new_event_threshold limit */
	if ((port_conf && !port_conf->new_event_threshold) ||
			(port_conf && port_conf->new_event_threshold >
				 dev->data->dev_conf.nb_events_limit)) {
		RTE_EDEV_LOG_ERR(
		   "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
			dev_id, port_id, port_conf->new_event_threshold,
			dev->data->dev_conf.nb_events_limit);
		return -EINVAL;
	}

	/* Check dequeue_depth limit */
	if ((port_conf && !port_conf->dequeue_depth) ||
			(port_conf && port_conf->dequeue_depth >
		dev->data->dev_conf.nb_event_port_dequeue_depth)) {
		RTE_EDEV_LOG_ERR(
		   "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
			dev_id, port_id, port_conf->dequeue_depth,
			dev->data->dev_conf.nb_event_port_dequeue_depth);
		return -EINVAL;
	}

	/* Check enqueue_depth limit */
	if ((port_conf && !port_conf->enqueue_depth) ||
			(port_conf && port_conf->enqueue_depth >
		dev->data->dev_conf.nb_event_port_enqueue_depth)) {
		RTE_EDEV_LOG_ERR(
		   "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
			dev_id, port_id, port_conf->enqueue_depth,
			dev->data->dev_conf.nb_event_port_enqueue_depth);
		return -EINVAL;
	}

	if (dev->data->dev_started) {
		RTE_EDEV_LOG_ERR(
		    "device %d must be stopped to allow port setup", dev_id);
		return -EBUSY;
	}

	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);

	if (port_conf == NULL) {
		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
					-ENOTSUP);
		(*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
		port_conf = &def_conf;
	}

	dev->data->ports_cfg[port_id] = *port_conf;

	diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);

	/* Unlink all the queues from this port(default state after setup) */
	if (!diag)
		diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);

	if (diag < 0)
		return diag;

	return 0;
}