int rte_event_pmd_release(struct rte_eventdev *eventdev) { int ret; char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; const struct rte_memzone *mz; if (eventdev == NULL) return -EINVAL; eventdev->attached = RTE_EVENTDEV_DETACHED; eventdev_globals.nb_devs--; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rte_free(eventdev->data->dev_private); /* Generate memzone name */ ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", eventdev->data->dev_id); if (ret >= (int)sizeof(mz_name)) return -EINVAL; mz = rte_memzone_lookup(mz_name); if (mz == NULL) return -ENOMEM; ret = rte_memzone_free(mz); if (ret) return ret; } eventdev->data = NULL; return 0; }
void bnxt_free_ring(struct bnxt_ring *ring) { if (ring->vmem_size && *ring->vmem) { memset((char *)*ring->vmem, 0, ring->vmem_size); *ring->vmem = NULL; } rte_memzone_free((const struct rte_memzone *)ring->mem_zone); }
int spdk_memzone_free(const char *name) { const struct rte_memzone *mz = rte_memzone_lookup(name); if (mz != NULL) { return rte_memzone_free(mz); } return -1; }
int otx_cpt_put_resource(struct cpt_instance *instance) { struct cpt_vf *cptvf = (struct cpt_vf *)instance; struct rte_memzone *rz; if (!cptvf) { CPT_LOG_ERR("Invalid CPTVF handle"); return -EINVAL; } CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name); rz = (struct rte_memzone *)instance->rsvd; rte_memzone_free(rz); return 0; }
static void qat_queue_delete(struct qat_queue *queue) { const struct rte_memzone *mz; int status = 0; if (queue == NULL) { PMD_DRV_LOG(DEBUG, "Invalid queue"); return; } mz = rte_memzone_lookup(queue->memz_name); if (mz != NULL) { /* Write an unused pattern to the queue memory. */ memset(queue->base_addr, 0x7F, queue->queue_size); status = rte_memzone_free(mz); if (status != 0) PMD_DRV_LOG(ERR, "Error %d on freeing queue %s", status, queue->memz_name); } else { PMD_DRV_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name); } }
int otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance) { int ret = -ENOENT, len, qlen, i; int chunk_len, chunks, chunk_size; struct cpt_vf *cptvf = (struct cpt_vf *)dev; struct cpt_instance *cpt_instance; struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; struct command_chunk *chunk = NULL; uint8_t *mem; const struct rte_memzone *rz; uint64_t dma_addr = 0, alloc_len, used_len; uint64_t *next_ptr; uint64_t pg_sz = sysconf(_SC_PAGESIZE); CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); cpt_instance = &cptvf->instance; memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); /* Chunks are of fixed size buffers */ chunks = DEFAULT_CMD_QCHUNKS; chunk_len = DEFAULT_CMD_QCHUNK_SIZE; qlen = chunks * chunk_len; /* Chunk size includes 8 bytes of next chunk ptr */ chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; /* For command chunk structures */ len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); /* For pending queue */ len += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* So that instruction queues start as pg size aligned */ len = RTE_ALIGN(len, pg_sz); /* For Instruction queues */ len += chunks * RTE_ALIGN(chunk_size, 128); /* Wastage after instruction queues */ len = RTE_ALIGN(len, pg_sz); rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB, RTE_CACHE_LINE_SIZE); if (!rz) { ret = rte_errno; goto cleanup; } mem = rz->addr; dma_addr = rz->phys_addr; alloc_len = len; memset(mem, 0, len); cpt_instance->rsvd = (uintptr_t)rz; /* Pending queue setup */ cptvf->pqueue.rid_queue = (struct rid *)mem; cptvf->pqueue.enq_tail = 0; cptvf->pqueue.deq_head = 0; cptvf->pqueue.pending_count = 0; mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* Alignment wastage */ used_len = alloc_len - len; mem += RTE_ALIGN(used_len, pg_sz) - used_len; len -= RTE_ALIGN(used_len, pg_sz) - used_len; dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; /* Init instruction queues */ chunk_head = &cptvf->cqueue.chead[0]; i = qlen; chunk_prev = NULL; for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { int csize; chunk = &cptvf->cqueue.chead[i]; chunk->head = mem; chunk->dma_addr = dma_addr; csize = RTE_ALIGN(chunk_size, 128); mem += csize; dma_addr += csize; len -= csize; if (chunk_prev) { next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk->dma_addr; } chunk_prev = chunk; } /* Circular loop */ next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk_head->dma_addr; assert(!len); /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ cptvf->qsize = chunk_size / 8; cptvf->cqueue.qhead = chunk_head->head; cptvf->cqueue.idx = 0; cptvf->cqueue.cchunk = 0; if (cpt_vq_init(cptvf, group)) { CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", cptvf->dev_name); ret = -EBUSY; goto cleanup; } *instance = cpt_instance; CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); return 0; cleanup: rte_memzone_free(rz); *instance = NULL; return ret; }