int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf) { struct rte_eventdev *dev; struct rte_event_queue_conf def_conf; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; if (!is_valid_queue(dev, queue_id)) { RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id); return -EINVAL; } /* Check nb_atomic_flows limit */ if (is_valid_atomic_queue_conf(queue_conf)) { if (queue_conf->nb_atomic_flows == 0 || queue_conf->nb_atomic_flows > dev->data->dev_conf.nb_event_queue_flows) { RTE_EDEV_LOG_ERR( "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d", dev_id, queue_id, queue_conf->nb_atomic_flows, dev->data->dev_conf.nb_event_queue_flows); return -EINVAL; } } /* Check nb_atomic_order_sequences limit */ if (is_valid_ordered_queue_conf(queue_conf)) { if (queue_conf->nb_atomic_order_sequences == 0 || queue_conf->nb_atomic_order_sequences > dev->data->dev_conf.nb_event_queue_flows) { RTE_EDEV_LOG_ERR( "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d", dev_id, queue_id, queue_conf->nb_atomic_order_sequences, dev->data->dev_conf.nb_event_queue_flows); return -EINVAL; } } if (dev->data->dev_started) { RTE_EDEV_LOG_ERR( "device %d must be stopped to allow queue setup", dev_id); return -EBUSY; } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP); if (queue_conf == NULL) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); queue_conf = &def_conf; } dev->data->queues_cfg[queue_id] = *queue_conf; return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf); }
static inline int rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { uint8_t old_nb_queues = dev->data->nb_queues; struct rte_event_queue_conf *queues_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, dev->data->dev_id); /* First time configuration */ if (dev->data->queues_cfg == NULL && nb_queues != 0) { /* Allocate memory to store queue configuration */ dev->data->queues_cfg = rte_zmalloc_socket( "eventdev->data->queues_cfg", sizeof(dev->data->queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->queues_cfg == NULL) { dev->data->nb_queues = 0; RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," "nb_queues %u", nb_queues); return -(ENOMEM); } /* Re-configure */ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); /* Re allocate memory to store queue configuration */ queues_cfg = dev->data->queues_cfg; queues_cfg = rte_realloc(queues_cfg, sizeof(queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE); if (queues_cfg == NULL) { RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," " nb_queues %u", nb_queues); return -(ENOMEM); } dev->data->queues_cfg = queues_cfg; if (nb_queues > old_nb_queues) { uint8_t new_qs = nb_queues - old_nb_queues; memset(queues_cfg + old_nb_queues, 0, sizeof(queues_cfg[0]) * new_qs); } } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); } dev->data->nb_queues = nb_queues; return 0; }
const struct rte_security_capability * rte_security_capability_get(struct rte_security_ctx *instance, struct rte_security_capability_idx *idx) { const struct rte_security_capability *capabilities; const struct rte_security_capability *capability; uint16_t i = 0; RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL); capabilities = instance->ops->capabilities_get(instance->device); if (capabilities == NULL) return NULL; while ((capability = &capabilities[i++])->action != RTE_SECURITY_ACTION_TYPE_NONE) { if (capability->action == idx->action && capability->protocol == idx->protocol) { if (idx->protocol == RTE_SECURITY_PROTOCOL_IPSEC) { if (capability->ipsec.proto == idx->ipsec.proto && capability->ipsec.mode == idx->ipsec.mode && capability->ipsec.direction == idx->ipsec.direction) return capability; } } } return NULL; }
int rte_event_dev_start(uint8_t dev_id) { struct rte_eventdev *dev; int diag; RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); if (dev->data->dev_started != 0) { RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started", dev_id); return 0; } diag = (*dev->dev_ops->dev_start)(dev); if (diag == 0) dev->data->dev_started = 1; else return diag; return 0; }
int rte_security_session_stats_get(struct rte_security_ctx *instance, struct rte_security_session *sess, struct rte_security_stats *stats) { RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_stats_get, -ENOTSUP); return instance->ops->session_stats_get(instance->device, sess, stats); }
int rte_security_session_update(struct rte_security_ctx *instance, struct rte_security_session *sess, struct rte_security_session_conf *conf) { RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_update, -ENOTSUP); return instance->ops->session_update(instance->device, sess, conf); }
int rte_security_set_pkt_metadata(struct rte_security_ctx *instance, struct rte_security_session *sess, struct rte_mbuf *m, void *params) { RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->set_pkt_metadata, -ENOTSUP); return instance->ops->set_pkt_metadata(instance->device, sess, m, params); }
int rte_event_dev_dump(uint8_t dev_id, FILE *f) { struct rte_eventdev *dev; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP); (*dev->dev_ops->dump)(dev, f); return 0; }
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks) { struct rte_eventdev *dev; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP); if (timeout_ticks == NULL) return -EINVAL; return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); }
int rte_security_session_destroy(struct rte_security_ctx *instance, struct rte_security_session *sess) { int ret; RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_destroy, -ENOTSUP); if (instance->sess_cnt) instance->sess_cnt--; ret = instance->ops->session_destroy(instance->device, sess); if (!ret) rte_mempool_put(rte_mempool_from_obj(sess), (void *)sess); return ret; }
int rte_event_dev_close(uint8_t dev_id) { struct rte_eventdev *dev; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); /* Device must be stopped before it can be closed */ if (dev->data->dev_started == 1) { RTE_EDEV_LOG_ERR("Device %u must be stopped before closing", dev_id); return -EBUSY; } return (*dev->dev_ops->dev_close)(dev); }
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) { struct rte_eventdev *dev; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; if (dev_info == NULL) return -EINVAL; memset(dev_info, 0, sizeof(struct rte_event_dev_info)); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; dev_info->dev = dev->dev; return 0; }
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf) { struct rte_eventdev *dev; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; if (port_conf == NULL) return -EINVAL; if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); return -EINVAL; } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP); memset(port_conf, 0, sizeof(struct rte_event_port_conf)); (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf); return 0; }
struct rte_security_session * rte_security_session_create(struct rte_security_ctx *instance, struct rte_security_session_conf *conf, struct rte_mempool *mp) { struct rte_security_session *sess = NULL; if (conf == NULL) return NULL; RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_create, NULL); if (rte_mempool_get(mp, (void **)&sess)) return NULL; if (instance->ops->session_create(instance->device, conf, sess, mp)) { rte_mempool_put(mp, (void *)sess); return NULL; } instance->sess_cnt++; return sess; }
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf) { struct rte_eventdev *dev; struct rte_event_port_conf def_conf; int diag; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); return -EINVAL; } /* Check new_event_threshold limit */ if ((port_conf && !port_conf->new_event_threshold) || (port_conf && port_conf->new_event_threshold > dev->data->dev_conf.nb_events_limit)) { RTE_EDEV_LOG_ERR( "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d", dev_id, port_id, port_conf->new_event_threshold, dev->data->dev_conf.nb_events_limit); return -EINVAL; } /* Check dequeue_depth limit */ if ((port_conf && !port_conf->dequeue_depth) || (port_conf && port_conf->dequeue_depth > dev->data->dev_conf.nb_event_port_dequeue_depth)) { RTE_EDEV_LOG_ERR( "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d", dev_id, port_id, port_conf->dequeue_depth, dev->data->dev_conf.nb_event_port_dequeue_depth); return -EINVAL; } /* Check enqueue_depth limit */ if ((port_conf && !port_conf->enqueue_depth) || (port_conf && port_conf->enqueue_depth > dev->data->dev_conf.nb_event_port_enqueue_depth)) { RTE_EDEV_LOG_ERR( "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d", dev_id, port_id, port_conf->enqueue_depth, dev->data->dev_conf.nb_event_port_enqueue_depth); return -EINVAL; } if (dev->data->dev_started) { RTE_EDEV_LOG_ERR( "device %d must be stopped to allow port setup", dev_id); return -EBUSY; } RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP); if (port_conf == NULL) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP); (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf); port_conf = &def_conf; } dev->data->ports_cfg[port_id] = *port_conf; diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf); /* Unlink all the queues from this port(default state after setup) */ if (!diag) diag = rte_event_port_unlink(dev_id, port_id, NULL, 0); if (diag < 0) return diag; return 0; }
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf) { struct rte_eventdev *dev; struct rte_event_dev_info info; int diag; RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); dev = &rte_eventdevs[dev_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); if (dev->data->dev_started) { RTE_EDEV_LOG_ERR( "device %d must be stopped to allow configuration", dev_id); return -EBUSY; } if (dev_conf == NULL) return -EINVAL; (*dev->dev_ops->dev_infos_get)(dev, &info); /* Check dequeue_timeout_ns value is in limit */ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { if (dev_conf->dequeue_timeout_ns && (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns || dev_conf->dequeue_timeout_ns > info.max_dequeue_timeout_ns)) { RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", dev_id, dev_conf->dequeue_timeout_ns, info.min_dequeue_timeout_ns, info.max_dequeue_timeout_ns); return -EINVAL; } } /* Check nb_events_limit is in limit */ if (dev_conf->nb_events_limit > info.max_num_events) { RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d", dev_id, dev_conf->nb_events_limit, info.max_num_events); return -EINVAL; } /* Check nb_event_queues is in limit */ if (!dev_conf->nb_event_queues) { RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero", dev_id); return -EINVAL; } if (dev_conf->nb_event_queues > info.max_event_queues) { RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d", dev_id, dev_conf->nb_event_queues, info.max_event_queues); return -EINVAL; } /* Check nb_event_ports is in limit */ if (!dev_conf->nb_event_ports) { RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id); return -EINVAL; } if (dev_conf->nb_event_ports > info.max_event_ports) { RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d", dev_id, dev_conf->nb_event_ports, info.max_event_ports); return -EINVAL; } /* Check nb_event_queue_flows is in limit */ if (!dev_conf->nb_event_queue_flows) { RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id); return -EINVAL; } if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) { RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x", dev_id, dev_conf->nb_event_queue_flows, info.max_event_queue_flows); return -EINVAL; } /* Check nb_event_port_dequeue_depth is in limit */ if (!dev_conf->nb_event_port_dequeue_depth) { RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero", dev_id); return -EINVAL; } if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && (dev_conf->nb_event_port_dequeue_depth > info.max_event_port_dequeue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", dev_id, dev_conf->nb_event_port_dequeue_depth, info.max_event_port_dequeue_depth); return -EINVAL; } /* Check nb_event_port_enqueue_depth is in limit */ if (!dev_conf->nb_event_port_enqueue_depth) { RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero", dev_id); return -EINVAL; } if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && (dev_conf->nb_event_port_enqueue_depth > info.max_event_port_enqueue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", dev_id, dev_conf->nb_event_port_enqueue_depth, info.max_event_port_enqueue_depth); return -EINVAL; } /* Copy the dev_conf parameter into the dev structure */ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); /* Setup new number of queues and reconfigure device. */ diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues); if (diag != 0) { RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d", dev_id, diag); return diag; } /* Setup new number of ports and reconfigure device. */ diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports); if (diag != 0) { rte_event_dev_queue_config(dev, 0); RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d", dev_id, diag); return diag; } /* Configure the device */ diag = (*dev->dev_ops->dev_configure)(dev); if (diag != 0) { RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag); rte_event_dev_queue_config(dev, 0); rte_event_dev_port_config(dev, 0); } dev->data->event_dev_cap = info.event_dev_cap; return diag; }
static inline int rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) { uint8_t old_nb_ports = dev->data->nb_ports; void **ports; uint16_t *links_map; struct rte_event_port_conf *ports_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, dev->data->dev_id); /* First time configuration */ if (dev->data->ports == NULL && nb_ports != 0) { dev->data->ports = rte_zmalloc_socket("eventdev->data->ports", sizeof(dev->data->ports[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->ports == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port meta data," "nb_ports %u", nb_ports); return -(ENOMEM); } /* Allocate memory to store port configurations */ dev->data->ports_cfg = rte_zmalloc_socket("eventdev->ports_cfg", sizeof(dev->data->ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->ports_cfg == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port cfg," "nb_ports %u", nb_ports); return -(ENOMEM); } /* Allocate memory to store queue to port link connection */ dev->data->links_map = rte_zmalloc_socket("eventdev->links_map", sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port_map area," "nb_ports %u", nb_ports); return -(ENOMEM); } for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) dev->data->links_map[i] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; ports_cfg = dev->data->ports_cfg; links_map = dev->data->links_map; for (i = nb_ports; i < old_nb_ports; i++) (*dev->dev_ops->port_release)(ports[i]); /* Realloc memory for ports */ ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports, RTE_CACHE_LINE_SIZE); if (ports == NULL) { RTE_EDEV_LOG_ERR("failed to realloc port meta data," " nb_ports %u", nb_ports); return -(ENOMEM); } /* Realloc memory for ports_cfg */ ports_cfg = rte_realloc(ports_cfg, sizeof(ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE); if (ports_cfg == NULL) { RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," " nb_ports %u", nb_ports); return -(ENOMEM); } /* Realloc memory to store queue to port link connection */ links_map = rte_realloc(links_map, sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE); if (links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," "nb_ports %u", nb_ports); return -(ENOMEM); } if (nb_ports > old_nb_ports) { uint8_t new_ps = nb_ports - old_nb_ports; unsigned int old_links_map_end = old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; unsigned int links_map_end = nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; memset(ports + old_nb_ports, 0, sizeof(ports[0]) * new_ps); memset(ports_cfg + old_nb_ports, 0, sizeof(ports_cfg[0]) * new_ps); for (i = old_links_map_end; i < links_map_end; i++) links_map[i] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } dev->data->ports = ports; dev->data->ports_cfg = ports_cfg; dev->data->links_map = links_map; } else if (dev->data->ports != NULL && nb_ports == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; for (i = nb_ports; i < old_nb_ports; i++) (*dev->dev_ops->port_release)(ports[i]); } dev->data->nb_ports = nb_ports; return 0; }
/** Start device */ static int scheduler_pmd_start(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint32_t i; int ret; if (dev->data->dev_started) return 0; /* although scheduler_attach_init_slave presents multiple times, * there will be only 1 meaningful execution. */ ret = scheduler_attach_init_slave(dev); if (ret < 0) return ret; for (i = 0; i < dev->data->nb_queue_pairs; i++) { ret = update_order_ring(dev, i); if (ret < 0) { CS_LOG_ERR("Failed to update reorder buffer"); return ret; } } if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { CS_LOG_ERR("Scheduler mode is not set"); return -1; } if (!sched_ctx->nb_slaves) { CS_LOG_ERR("No slave in the scheduler"); return -1; } RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { CS_LOG_ERR("Failed to attach slave"); return -ENOTSUP; } } RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { CS_LOG_ERR("Scheduler start failed"); return -1; } /* start all slaves */ for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; struct rte_cryptodev *slave_dev = rte_cryptodev_pmd_get_dev(slave_dev_id); ret = (*slave_dev->dev_ops->dev_start)(slave_dev); if (ret < 0) { CS_LOG_ERR("Failed to start slave dev %u", slave_dev_id); return ret; } } return 0; }
int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id, int32_t rx_queue_id) { int ret = 0; struct rte_eventdev *dev; struct rte_event_eth_rx_adapter *rx_adapter; struct eth_device_info *dev_info; uint32_t cap; uint16_t i; RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); rx_adapter = id_to_rx_adapter(id); if (rx_adapter == NULL) return -EINVAL; dev = &rte_eventdevs[rx_adapter->eventdev_id]; ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, eth_dev_id, &cap); if (ret) return ret; if (rx_queue_id != -1 && (uint16_t)rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, (uint16_t)rx_queue_id); return -EINVAL; } dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del, -ENOTSUP); ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev, &rte_eth_devices[eth_dev_id], rx_queue_id); if (ret == 0) { update_queue_info(rx_adapter, &rx_adapter->eth_devices[eth_dev_id], rx_queue_id, 0); if (dev_info->nb_dev_queues == 0) { rte_free(dev_info->rx_queue); dev_info->rx_queue = NULL; } } } else { int rc; rte_spinlock_lock(&rx_adapter->rx_lock); if (rx_queue_id == -1) { for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) event_eth_rx_adapter_queue_del(rx_adapter, dev_info, i); } else { event_eth_rx_adapter_queue_del(rx_adapter, dev_info, (uint16_t)rx_queue_id); } rc = eth_poll_wrr_calc(rx_adapter); if (rc) RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32, rc); if (dev_info->nb_dev_queues == 0) { rte_free(dev_info->rx_queue); dev_info->rx_queue = NULL; } rte_spinlock_unlock(&rx_adapter->rx_lock); rte_service_component_runstate_set(rx_adapter->service_id, sw_rx_adapter_queue_count(rx_adapter)); } return ret; }
int rte_event_eth_rx_adapter_queue_add(uint8_t id, uint8_t eth_dev_id, int32_t rx_queue_id, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { int ret; uint32_t cap; struct rte_event_eth_rx_adapter *rx_adapter; struct rte_eventdev *dev; struct eth_device_info *dev_info; int start_service; RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); rx_adapter = id_to_rx_adapter(id); if ((rx_adapter == NULL) || (queue_conf == NULL)) return -EINVAL; dev = &rte_eventdevs[rx_adapter->eventdev_id]; ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, eth_dev_id, &cap); if (ret) { RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 "eth port %" PRIu8, id, eth_dev_id); return ret; } if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0 && (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) { RTE_EDEV_LOG_ERR("Flow ID override is not supported," " eth port: %" PRIu8 " adapter id: %" PRIu8, eth_dev_id, id); return -EINVAL; } if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 && (rx_queue_id != -1)) { RTE_EDEV_LOG_ERR("Rx queues can only be connected to single " "event queue id %u eth port %u", id, eth_dev_id); return -EINVAL; } if (rx_queue_id != -1 && (uint16_t)rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, (uint16_t)rx_queue_id); return -EINVAL; } start_service = 0; dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add, -ENOTSUP); if (dev_info->rx_queue == NULL) { dev_info->rx_queue = rte_zmalloc_socket(rx_adapter->mem_name, dev_info->dev->data->nb_rx_queues * sizeof(struct eth_rx_queue_info), 0, rx_adapter->socket_id); if (dev_info->rx_queue == NULL) return -ENOMEM; } ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev, &rte_eth_devices[eth_dev_id], rx_queue_id, queue_conf); if (ret == 0) { update_queue_info(rx_adapter, &rx_adapter->eth_devices[eth_dev_id], rx_queue_id, 1); } } else { rte_spinlock_lock(&rx_adapter->rx_lock); ret = init_service(rx_adapter, id); if (ret == 0) ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id, queue_conf); rte_spinlock_unlock(&rx_adapter->rx_lock); if (ret == 0) start_service = !!sw_rx_adapter_queue_count(rx_adapter); } if (ret) return ret; if (start_service) rte_service_component_runstate_set(rx_adapter->service_id, 1); return 0; }
const struct rte_security_capability * rte_security_capabilities_get(struct rte_security_ctx *instance) { RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->capabilities_get, NULL); return instance->ops->capabilities_get(instance->device); }