Exemple #1
0
static int
ssovf_start(struct rte_eventdev *dev)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
	struct ssows *ws;
	uint8_t *base;
	uint8_t i;

	ssovf_func_trace();
	for (i = 0; i < edev->nb_event_ports; i++) {
		ws = dev->data->ports[i];
		ssows_reset(ws);
		ws->swtag_req = 0;
	}

	for (i = 0; i < edev->nb_event_queues; i++) {
		/* Consume all the events through HWS0 */
		ssows_flush_events(dev->data->ports[0], i, NULL, NULL);

		base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
		base += SSO_VHGRP_QCTL;
		ssovf_write64(1, base); /* Enable SSO group */
	}

	ssovf_fastpath_fns_set(dev);
	return 0;
}
Exemple #2
0
static void
ssovf_stop(struct rte_eventdev *dev)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
	struct ssows *ws;
	uint8_t *base;
	uint8_t i;

	ssovf_func_trace();
	for (i = 0; i < edev->nb_event_ports; i++) {
		ws = dev->data->ports[i];
		ssows_reset(ws);
		ws->swtag_req = 0;
	}

	for (i = 0; i < edev->nb_event_queues; i++) {
		/* Consume all the events through HWS0 */
		ssows_flush_events(dev->data->ports[0], i,
				ssows_handle_event, dev);

		base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
		base += SSO_VHGRP_QCTL;
		ssovf_write64(0, base); /* Disable SSO group */
	}
}
Exemple #3
0
static void
ssovf_dump(struct rte_eventdev *dev, FILE *f)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
	uint8_t port;

	/* Dump SSOWVF debug registers */
	for (port = 0; port < edev->nb_event_ports; port++)
		ssows_dump(dev->data->ports[port], f);
}
Exemple #4
0
static void
ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
				 struct rte_event_port_conf *port_conf)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	RTE_SET_USED(port_id);
	port_conf->new_event_threshold = edev->max_num_events;
	port_conf->dequeue_depth = 1;
	port_conf->enqueue_depth = 1;
}
Exemple #5
0
static int
ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
				const struct rte_event_port_conf *port_conf)
{
	struct ssows *ws;
	uint32_t reg_off;
	uint8_t q;
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	ssovf_func_trace("port=%d", port_id);
	RTE_SET_USED(port_conf);

	/* Free memory prior to re-allocation if needed */
	if (dev->data->ports[port_id] != NULL) {
		ssovf_port_release(dev->data->ports[port_id]);
		dev->data->ports[port_id] = NULL;
	}

	/* Allocate event port memory */
	ws = rte_zmalloc_socket("eventdev ssows",
			sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
			dev->data->socket_id);
	if (ws == NULL) {
		ssovf_log_err("Failed to alloc memory for port=%d", port_id);
		return -ENOMEM;
	}

	ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
	if (ws->base == NULL) {
		rte_free(ws);
		ssovf_log_err("Failed to get hws base addr port=%d", port_id);
		return -EINVAL;
	}

	reg_off = SSOW_VHWS_OP_GET_WORK0;
	reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
	reg_off |= 1 << 16; /* Wait */
	ws->getwork = ws->base + reg_off;
	ws->port = port_id;

	for (q = 0; q < edev->nb_event_queues; q++) {
		ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
		if (ws->grps[q] == NULL) {
			rte_free(ws);
			ssovf_log_err("Failed to get grp%d base addr", q);
			return -EINVAL;
		}
	}

	dev->data->ports[port_id] = ws;
	ssovf_log_dbg("port=%d ws=%p", port_id, ws);
	return 0;
}
Exemple #6
0
static int
ssovf_close(struct rte_eventdev *dev)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
	uint8_t i;

	for (i = 0; i < edev->nb_event_queues; i++)
		all_queues[i] = i;

	for (i = 0; i < edev->nb_event_ports; i++)
		ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
			edev->nb_event_queues);
	return 0;
}
Exemple #7
0
static void
ssovf_fastpath_fns_set(struct rte_eventdev *dev)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	dev->enqueue       = ssows_enq;
	dev->enqueue_burst = ssows_enq_burst;
	dev->enqueue_new_burst = ssows_enq_new_burst;
	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
	dev->dequeue       = ssows_deq;
	dev->dequeue_burst = ssows_deq_burst;

	if (edev->is_timeout_deq) {
		dev->dequeue       = ssows_deq_timeout;
		dev->dequeue_burst = ssows_deq_timeout_burst;
	}
}
Exemple #8
0
static void
ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
{
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);

	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
	dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
	dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
	dev_info->max_event_queues = edev->max_event_queues;
	dev_info->max_event_queue_flows = (1ULL << 20);
	dev_info->max_event_queue_priority_levels = 8;
	dev_info->max_event_priority_levels = 1;
	dev_info->max_event_ports = edev->max_event_ports;
	dev_info->max_event_port_dequeue_depth = 1;
	dev_info->max_event_port_enqueue_depth = 1;
	dev_info->max_num_events =  edev->max_num_events;
	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
}
Exemple #9
0
static int
ssovf_configure(const struct rte_eventdev *dev)
{
	struct rte_event_dev_config *conf = &dev->data->dev_conf;
	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
	uint64_t deq_tmo_ns;

	ssovf_func_trace();
	deq_tmo_ns = conf->dequeue_timeout_ns;
	if (deq_tmo_ns == 0)
		deq_tmo_ns = edev->min_deq_timeout_ns;

	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
		edev->is_timeout_deq = 1;
		deq_tmo_ns = edev->min_deq_timeout_ns;
	}
	edev->nb_event_queues = conf->nb_event_queues;
	edev->nb_event_ports = conf->nb_event_ports;

	return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
}
Exemple #10
0
static int
ssovf_vdev_probe(struct rte_vdev_device *vdev)
{
	struct octeontx_ssovf_info oinfo;
	struct ssovf_mbox_dev_info info;
	struct ssovf_evdev *edev;
	struct rte_eventdev *eventdev;
	static int ssovf_init_once;
	const char *name;
	int ret;

	name = rte_vdev_device_name(vdev);
	/* More than one instance is not supported */
	if (ssovf_init_once) {
		ssovf_log_err("Request to create >1 %s instance", name);
		return -EINVAL;
	}

	eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
				rte_socket_id());
	if (eventdev == NULL) {
		ssovf_log_err("Failed to create eventdev vdev %s", name);
		return -ENOMEM;
	}
	eventdev->dev_ops = &ssovf_ops;

	/* For secondary processes, the primary has done all the work */
	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
		ssovf_fastpath_fns_set(eventdev);
		return 0;
	}

	ret = octeontx_ssovf_info(&oinfo);
	if (ret) {
		ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
		goto error;
	}

	edev = ssovf_pmd_priv(eventdev);
	edev->max_event_ports = oinfo.total_ssowvfs;
	edev->max_event_queues = oinfo.total_ssovfs;
	edev->is_timeout_deq = 0;

	ret = ssovf_mbox_dev_info(&info);
	if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
		ssovf_log_err("Failed to get mbox devinfo %d", ret);
		goto error;
	}

	edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
	edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
	edev->max_num_events =  info.max_num_events;
	ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
			info.min_deq_timeout_ns, info.max_deq_timeout_ns,
			info.max_num_events);

	if (!edev->max_event_ports || !edev->max_event_queues) {
		ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
			edev->max_event_queues, edev->max_event_ports);
		ret = -ENODEV;
		goto error;
	}

	ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
			name, oinfo.domain, edev->max_event_queues,
			edev->max_event_ports);

	ssovf_init_once = 1;
	return 0;

error:
	rte_event_pmd_vdev_uninit(name);
	return ret;
}