static int scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type, void *option) { struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *) dev->data->dev_private)->private_ctx; uint32_t threshold; if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { CS_LOG_ERR("Option not supported"); return -EINVAL; } threshold = ((struct rte_cryptodev_scheduler_threshold_option *) option)->threshold; if (!rte_is_power_of_2(threshold)) { CS_LOG_ERR("Threshold is not power of 2"); return -EINVAL; } psd_ctx->threshold = ~(threshold - 1); return 0; }
static int update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; if (sched_ctx->reordering_enabled) { char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; uint32_t buff_size = rte_align32pow2( sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } if (!buff_size) return 0; if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), dev->data->dev_id, qp_id) < 0) { CS_LOG_ERR("failed to create unique reorder buffer " "name"); return -ENOMEM; } qp_ctx->order_ring = rte_ring_create(order_ring_name, buff_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (!qp_ctx->order_ring) { CS_LOG_ERR("failed to create order ring"); return -ENOMEM; } } else { if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } } return 0; }
/** attaching the slaves predefined by scheduler's EAL options */ static int scheduler_attach_init_slave(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint8_t scheduler_id = dev->data->dev_id; int i; for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) { const char *dev_name = sched_ctx->init_slave_names[i]; struct rte_cryptodev *slave_dev = rte_cryptodev_pmd_get_named_dev(dev_name); int status; if (!slave_dev) { CS_LOG_ERR("Failed to locate slave dev %s", dev_name); return -EINVAL; } status = rte_cryptodev_scheduler_slave_attach( scheduler_id, slave_dev->data->dev_id); if (status < 0) { CS_LOG_ERR("Failed to attach slave cryptodev %u", slave_dev->data->dev_id); return status; } CS_LOG_INFO("Scheduler %s attached slave %s\n", dev->data->name, sched_ctx->init_slave_names[i]); rte_free(sched_ctx->init_slave_names[i]); sched_ctx->init_slave_names[i] = NULL; sched_ctx->nb_init_slaves -= 1; } return 0; }
static int scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; struct psd_scheduler_qp_ctx *ps_qp_ctx; ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0, rte_socket_id()); if (!ps_qp_ctx) { CS_LOG_ERR("failed allocate memory for private queue pair"); return -ENOMEM; } qp_ctx->private_qp_ctx = (void *)ps_qp_ctx; return 0; }
static int scheduler_stop(struct rte_cryptodev *dev) { uint16_t i; for (i = 0; i < dev->data->nb_queue_pairs; i++) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx; if (ps_qp_ctx->primary_slave.nb_inflight_cops + ps_qp_ctx->secondary_slave.nb_inflight_cops) { CS_LOG_ERR("Some crypto ops left in slave queue"); return -1; } } return 0; }
static int scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type, void *option) { struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *) dev->data->dev_private)->private_ctx; struct rte_cryptodev_scheduler_threshold_option *threshold_option; if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { CS_LOG_ERR("Option not supported"); return -EINVAL; } threshold_option = option; threshold_option->threshold = (~psd_ctx->threshold) + 1; return 0; }
static int scheduler_start(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx; uint16_t i; /* for packet size based scheduler, nb_slaves have to >= 2 */ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) { CS_LOG_ERR("not enough slaves to start"); return -1; } for (i = 0; i < dev->data->nb_queue_pairs; i++) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx; ps_qp_ctx->primary_slave.dev_id = sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id; ps_qp_ctx->primary_slave.qp_id = i; ps_qp_ctx->primary_slave.nb_inflight_cops = 0; ps_qp_ctx->secondary_slave.dev_id = sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id; ps_qp_ctx->secondary_slave.qp_id = i; ps_qp_ctx->secondary_slave.nb_inflight_cops = 0; ps_qp_ctx->threshold = psd_ctx->threshold; } if (sched_ctx->reordering_enabled) { dev->enqueue_burst = &schedule_enqueue_ordering; dev->dequeue_burst = &schedule_dequeue_ordering; } else { dev->enqueue_burst = &schedule_enqueue; dev->dequeue_burst = &schedule_dequeue; } return 0; }
static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx; if (sched_ctx->private_ctx) rte_free(sched_ctx->private_ctx); psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0, rte_socket_id()); if (!psd_ctx) { CS_LOG_ERR("failed allocate memory"); return -ENOMEM; } psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD; sched_ctx->private_ctx = (void *)psd_ctx; return 0; }
/** Setup a queue pair */ static int scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, struct rte_mempool *session_pool) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct scheduler_qp_ctx *qp_ctx; char name[RTE_CRYPTODEV_NAME_MAX_LEN]; uint32_t i; int ret; if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "CRYTO_SCHE PMD %u QP %u", dev->data->dev_id, qp_id) < 0) { CS_LOG_ERR("Failed to create unique queue pair name"); return -EFAULT; } /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) scheduler_pmd_qp_release(dev, qp_id); for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_id = sched_ctx->slaves[i].dev_id; /* * All slaves will share the same session mempool * for session-less operations, so the objects * must be big enough for all the drivers used. */ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id, qp_conf, socket_id, session_pool); if (ret < 0) return ret; } /* Allocate the queue pair data structure. */ qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE, socket_id); if (qp_ctx == NULL) return -ENOMEM; /* The actual available object number = nb_descriptors - 1 */ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1; dev->data->queue_pairs[qp_id] = qp_ctx; /* although scheduler_attach_init_slave presents multiple times, * there will be only 1 meaningful execution. */ ret = scheduler_attach_init_slave(dev); if (ret < 0) { CS_LOG_ERR("Failed to attach slave"); scheduler_pmd_qp_release(dev, qp_id); return ret; } if (*sched_ctx->ops.config_queue_pair) { if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { CS_LOG_ERR("Unable to configure queue pair"); return -1; } } return 0; }
/** Start device */ static int scheduler_pmd_start(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint32_t i; int ret; if (dev->data->dev_started) return 0; /* although scheduler_attach_init_slave presents multiple times, * there will be only 1 meaningful execution. */ ret = scheduler_attach_init_slave(dev); if (ret < 0) return ret; for (i = 0; i < dev->data->nb_queue_pairs; i++) { ret = update_order_ring(dev, i); if (ret < 0) { CS_LOG_ERR("Failed to update reorder buffer"); return ret; } } if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { CS_LOG_ERR("Scheduler mode is not set"); return -1; } if (!sched_ctx->nb_slaves) { CS_LOG_ERR("No slave in the scheduler"); return -1; } RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP); for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { CS_LOG_ERR("Failed to attach slave"); return -ENOTSUP; } } RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { CS_LOG_ERR("Scheduler start failed"); return -1; } /* start all slaves */ for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; struct rte_cryptodev *slave_dev = rte_cryptodev_pmd_get_dev(slave_dev_id); ret = (*slave_dev->dev_ops->dev_start)(slave_dev); if (ret < 0) { CS_LOG_ERR("Failed to start slave dev %u", slave_dev_id); return ret; } } return 0; }
static int cryptodev_scheduler_create(const char *name, struct scheduler_init_params *init_params) { struct rte_cryptodev *dev; struct scheduler_ctx *sched_ctx; uint32_t i; int ret; if (init_params->def_p.name[0] == '\0') snprintf(init_params->def_p.name, sizeof(init_params->def_p.name), "%s", name); dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name, sizeof(struct scheduler_ctx), init_params->def_p.socket_id); if (dev == NULL) { CS_LOG_ERR("driver %s: failed to create cryptodev vdev", name); return -EFAULT; } dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD; dev->dev_ops = rte_crypto_scheduler_pmd_ops; sched_ctx = dev->data->dev_private; sched_ctx->max_nb_queue_pairs = init_params->def_p.max_nb_queue_pairs; if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED && init_params->mode < CDEV_SCHED_MODE_COUNT) { ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id, init_params->mode); if (ret < 0) { rte_cryptodev_pmd_release_device(dev); return ret; } for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) { if (scheduler_mode_map[i].val != sched_ctx->mode) continue; RTE_LOG(INFO, PMD, " Scheduling mode = %s\n", scheduler_mode_map[i].name); break; } } sched_ctx->reordering_enabled = init_params->enable_ordering; for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) { if (scheduler_ordering_map[i].val != sched_ctx->reordering_enabled) continue; RTE_LOG(INFO, PMD, " Packet ordering = %s\n", scheduler_ordering_map[i].name); break; } for (i = 0; i < init_params->nb_slaves; i++) { sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] = rte_zmalloc_socket( NULL, RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0, SOCKET_ID_ANY); if (!sched_ctx->init_slave_names[ sched_ctx->nb_init_slaves]) { CS_LOG_ERR("driver %s: Insufficient memory", name); return -ENOMEM; } strncpy(sched_ctx->init_slave_names[ sched_ctx->nb_init_slaves], init_params->slave_names[i], RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1); sched_ctx->nb_init_slaves++; } /* * Initialize capabilities structure as an empty structure, * in case device information is requested when no slaves are attached */ sched_ctx->capabilities = rte_zmalloc_socket(NULL, sizeof(struct rte_cryptodev_capabilities), 0, SOCKET_ID_ANY); if (!sched_ctx->capabilities) { RTE_LOG(ERR, PMD, "Not enough memory for capability " "information\n"); return -ENOMEM; } return 0; }