void fb_mailbox_destroy(void) { int i = 0, j = 0; for (i = 0; i < GATEKEEPER_MAX_FUNC_BLKS; i++) for (j = 0; j < GATEKEEPER_MAX_NUMA_NODES; j++) rte_ring_free(mailboxes[i].func_mailbox[j]); }
static int update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; if (sched_ctx->reordering_enabled) { char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN]; uint32_t buff_size = rte_align32pow2( sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE); if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } if (!buff_size) return 0; if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), dev->data->dev_id, qp_id) < 0) { CS_LOG_ERR("failed to create unique reorder buffer " "name"); return -ENOMEM; } qp_ctx->order_ring = rte_ring_create(order_ring_name, buff_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (!qp_ctx->order_ring) { CS_LOG_ERR("failed to create order ring"); return -ENOMEM; } } else { if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } } return 0; }
/** Release queue pair */ static int kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) { struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; if (qp != NULL) { rte_ring_free(qp->processed_ops); rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } return 0; }
/** Close device */ static int scheduler_pmd_close(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint32_t i; int ret; /* the dev should be stopped before being closed */ if (dev->data->dev_started) return -EBUSY; /* close all slaves first */ for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; struct rte_cryptodev *slave_dev = rte_cryptodev_pmd_get_dev(slave_dev_id); ret = (*slave_dev->dev_ops->dev_close)(slave_dev); if (ret < 0) return ret; } for (i = 0; i < dev->data->nb_queue_pairs; i++) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i]; if (qp_ctx->order_ring) { rte_ring_free(qp_ctx->order_ring); qp_ctx->order_ring = NULL; } if (qp_ctx->private_qp_ctx) { rte_free(qp_ctx->private_qp_ctx); qp_ctx->private_qp_ctx = NULL; } } if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); sched_ctx->private_ctx = NULL; } if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); sched_ctx->capabilities = NULL; } return 0; }
/** Release queue pair */ static int scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) { struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id]; if (!qp_ctx) return 0; if (qp_ctx->order_ring) rte_ring_free(qp_ctx->order_ring); if (qp_ctx->private_qp_ctx) rte_free(qp_ctx->private_qp_ctx); rte_free(qp_ctx); dev->data->queue_pairs[qp_id] = NULL; return 0; }