static int set_host_identifier(struct nvme_controller *ctrlr) { int ret; uint64_t *host_id; struct nvme_command cmd = {}; cmd.opc = NVME_OPC_SET_FEATURES; cmd.cdw10 = NVME_FEAT_HOST_IDENTIFIER; host_id = rte_malloc(NULL, 8, 0); *host_id = HOST_ID; outstanding_commands = 0; set_feature_result = -1; fprintf(stdout, "Set Feature: Host Identifier 0x%"PRIx64"\n", *host_id); ret = nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, host_id, 8, set_feature_completion, &features[NVME_FEAT_HOST_IDENTIFIER]); if (ret) { fprintf(stdout, "Set Feature: Failed\n"); rte_free(host_id); return -1; } outstanding_commands++; while (outstanding_commands) { nvme_ctrlr_process_admin_completions(ctrlr); } if (set_feature_result) fprintf(stdout, "Set Feature: Host Identifier Failed\n"); rte_free(host_id); return 0; }
static int i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) { struct virtchnl_vf_resource *vf_res = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint32_t len = 0; int ret = I40E_SUCCESS; if (!b_op) { i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_NOT_SUPPORTED, NULL, 0); return ret; } /* only have 1 VSI by default */ len = sizeof(struct virtchnl_vf_resource) + I40E_DEFAULT_VF_VSI_NUM * sizeof(struct virtchnl_vsi_resource); vf_res = rte_zmalloc("i40e_vf_res", len, 0); if (vf_res == NULL) { PMD_DRV_LOG(ERR, "failed to allocate mem"); ret = I40E_ERR_NO_MEMORY; vf_res = NULL; len = 0; goto send_msg; } vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_VLAN; vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; vf_res->num_queue_pairs = vf->vsi->nb_qps; vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; /* Change below setting if PF host can support more VSIs for VF */ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id; vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; ether_addr_copy(&vf->mac_addr, (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr); send_msg: i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, ret, (uint8_t *)vf_res, len); rte_free(vf_res); return ret; }
/* * Create a scheduler on the current lcore */ struct lthread_sched *_lthread_sched_create(size_t stack_size) { int status; struct lthread_sched *new_sched; unsigned lcoreid = rte_lcore_id(); RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE); if (stack_size == 0) stack_size = LTHREAD_MAX_STACK_SIZE; new_sched = rte_calloc_socket(NULL, 1, sizeof(struct lthread_sched), RTE_CACHE_LINE_SIZE, rte_socket_id()); if (new_sched == NULL) { RTE_LOG(CRIT, LTHREAD, "Failed to allocate memory for scheduler\n"); return NULL; } _lthread_key_pool_init(); new_sched->stack_size = stack_size; new_sched->birth = rte_rdtsc(); THIS_SCHED = new_sched; status = _lthread_sched_alloc_resources(new_sched); if (status != SCHED_ALLOC_OK) { RTE_LOG(CRIT, LTHREAD, "Failed to allocate resources for scheduler code = %d\n", status); rte_free(new_sched); return NULL; } bzero(&new_sched->ctx, sizeof(struct ctx)); new_sched->lcore_id = lcoreid; schedcore[lcoreid] = new_sched; new_sched->run_flag = 1; DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, rte_lcore_id(), 0); rte_wmb(); return new_sched; }
static void read_complete(void *arg, const struct spdk_nvme_cpl *completion) { struct hello_world_sequence *sequence = arg; /* * The read I/O has completed. Print the contents of the * buffer, free the buffer, then mark the sequence as * completed. This will trigger the hello_world() function * to exit its polling loop. */ printf("%s", sequence->buf); rte_free(sequence->buf); sequence->is_completed = 1; }
/** * Release queue pair (PMD ops callback). * * @param dev Pointer to the device structure. * @param qp_id ID of Queue Pair to release. * @returns 0. Always. */ static int mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) { struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id]; if (dev->data->queue_pairs[qp_id] != NULL) { sam_cio_flush(qp->cio); sam_cio_deinit(qp->cio); rte_free(dev->data->queue_pairs[qp_id]); dev->data->queue_pairs[qp_id] = NULL; } return 0; }
static void dpaa_mbuf_free_pool(struct rte_mempool *mp) { struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); MEMPOOL_INIT_FUNC_TRACE(); if (bp_info) { bman_free_pool(bp_info->bp); DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d", bp_info->bpid); rte_free(mp->pool_data); mp->pool_data = NULL; } }
static int reservation_ns_acquire(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, uint16_t ns_id) { int ret; struct spdk_nvme_reservation_acquire_data *cdata; struct spdk_nvme_ns *ns; ns = spdk_nvme_ctrlr_get_ns(ctrlr, ns_id); cdata = rte_zmalloc(NULL, sizeof(struct spdk_nvme_reservation_acquire_data), 0); cdata->crkey = CR_KEY; outstanding_commands = 0; reserve_command_result = -1; ret = spdk_nvme_ns_cmd_reservation_acquire(ns, qpair, cdata, 0, SPDK_NVME_RESERVE_ACQUIRE, SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, reservation_ns_completion, NULL); if (ret) { fprintf(stderr, "Reservation Acquire Failed\n"); rte_free(cdata); return -1; } outstanding_commands++; while (outstanding_commands) { spdk_nvme_qpair_process_completions(qpair, 100); } if (reserve_command_result) fprintf(stderr, "Reservation Acquire Failed\n"); rte_free(cdata); return 0; }
static int reservation_ns_release(struct nvme_controller *ctrlr, uint16_t ns_id) { int ret; struct nvme_reservation_key_data *cdata; struct nvme_namespace *ns; ns = nvme_ctrlr_get_ns(ctrlr, ns_id); cdata = rte_zmalloc(NULL, sizeof(struct nvme_reservation_key_data), 0); cdata->crkey = CR_KEY; outstanding_commands = 0; reserve_command_result = -1; ret = nvme_ns_cmd_reservation_release(ns, cdata, 0, NVME_RESERVE_RELEASE, NVME_RESERVE_WRITE_EXCLUSIVE, reservation_ns_completion, NULL); if (ret) { fprintf(stderr, "Reservation Release Failed\n"); rte_free(cdata); return -1; } outstanding_commands++; while (outstanding_commands) { nvme_ctrlr_process_io_completions(ctrlr, 100); } if (reserve_command_result) fprintf(stderr, "Reservation Release Failed\n"); rte_free(cdata); return 0; }
static int rte_port_sink_free(void *port) { struct rte_port_sink *p = port; if (p == NULL) return 0; PCAP_SINK_CLOSE(p->dumper); rte_free(p); return 0; }
static int avf_dev_uninit(struct rte_eth_dev *dev) { struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; dev->dev_ops = NULL; dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; if (hw->adapter_stopped == 0) avf_dev_close(dev); rte_free(vf->vf_res); vf->vsi_res = NULL; vf->vf_res = NULL; rte_free(vf->aq_resp); vf->aq_resp = NULL; rte_free(dev->data->mac_addrs); dev->data->mac_addrs = NULL; if (vf->rss_lut) { rte_free(vf->rss_lut); vf->rss_lut = NULL; } if (vf->rss_key) { rte_free(vf->rss_key); vf->rss_key = NULL; } return 0; }
static void cperf_verify_test_free(struct cperf_verify_ctx *ctx) { if (ctx) { if (ctx->sess) { rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess); rte_cryptodev_sym_session_free(ctx->sess); } if (ctx->pool) rte_mempool_free(ctx->pool); rte_free(ctx); } }
static int rte_table_array_free(void *table) { struct rte_table_array *t = (struct rte_table_array *) table; /* Check input parameters */ if (t == NULL) { RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__); return -EINVAL; } /* Free previously allocated resources */ rte_free(t); return 0; }
static void unregister_controllers(void) { struct ctrlr_entry *entry = g_controllers; while (entry) { struct ctrlr_entry *next = entry->next; rte_free(entry->latency_page); if (g_latency_tracking_enable && spdk_nvme_ctrlr_is_feature_supported(entry->ctrlr, SPDK_NVME_INTEL_FEAT_LATENCY_TRACKING)) set_latency_tracking_feature(entry->ctrlr, false); spdk_nvme_detach(entry->ctrlr); free(entry); entry = next; } }
/* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */ static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req, uint32_t *io_flags) { struct spdk_nvme_protection_info *pi; uint32_t md_size, sector_size; req->lba_count = 2; switch (spdk_nvme_ns_get_pi_type(ns)) { case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3: return 0; default: break; } /* separate metadata payload for the test case */ if (spdk_nvme_ns_supports_extended_lba(ns)) return 0; sector_size = spdk_nvme_ns_get_sector_size(ns);; md_size = spdk_nvme_ns_get_md_size(ns); req->contig = rte_zmalloc(NULL, sector_size * req->lba_count, 0x1000); if (!req->contig) return 0; req->metadata = rte_zmalloc(NULL, md_size * req->lba_count, 0x1000); if (!req->metadata) { rte_free(req->contig); return 0; } req->lba = 0x400000; req->use_extended_lba = false; /* last 8 bytes if the metadata size bigger than 8 */ pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8); /* big-endian for reference tag */ pi->ref_tag = swap32((uint32_t)req->lba); pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8); /* is incremented for each subsequent logical block */ pi->ref_tag = swap32((uint32_t)req->lba + 1); *io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; return req->lba_count; }
int rte_memzone_free(const struct rte_memzone *mz) { struct rte_mem_config *mcfg; int ret = 0; void *addr; unsigned idx; if (mz == NULL) return -EINVAL; mcfg = rte_eal_get_configuration()->mem_config; rte_rwlock_write_lock(&mcfg->mlock); idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone); idx = idx / sizeof(struct rte_memzone); #ifdef RTE_LIBRTE_IVSHMEM /* * If ioremap_addr is set, it's an IVSHMEM memzone and we cannot * free it. */ if (mcfg->memzone[idx].ioremap_addr != 0) { rte_rwlock_write_unlock(&mcfg->mlock); return -EINVAL; } #endif addr = mcfg->memzone[idx].addr; if (addr == NULL) ret = -EINVAL; else if (mcfg->memzone_cnt == 0) { rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n", __func__); } else { memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx])); mcfg->memzone_cnt--; } rte_rwlock_write_unlock(&mcfg->mlock); rte_free(addr); return ret; }
static int rte_table_lpm_ipv6_free(void *table) { struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table; /* Check input parameters */ if (lpm == NULL) { RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__); return -EINVAL; } /* Free previously allocated resources */ rte_lpm6_free(lpm->lpm); rte_free(lpm); return 0; }
static int rte_port_ring_writer_ras_free(void *port) { struct rte_port_ring_writer_ras *p = port; if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__); return -1; } rte_port_ring_writer_ras_flush(port); rte_ip_frag_table_destroy(p->frag_tbl); rte_free(port); return 0; }
int sfc_ev_qinit(struct sfc_adapter *sa, enum sfc_evq_type type, unsigned int type_index, unsigned int entries, int socket_id, struct sfc_evq **evqp) { struct sfc_evq *evq; int rc; sfc_log_init(sa, "type=%s type_index=%u", sfc_evq_type2str(type), type_index); SFC_ASSERT(rte_is_power_of_2(entries)); rc = ENOMEM; evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, socket_id); if (evq == NULL) goto fail_evq_alloc; evq->sa = sa; evq->type = type; evq->entries = entries; /* Allocate DMA space */ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); if (rc != 0) goto fail_dma_alloc; evq->init_state = SFC_EVQ_INITIALIZED; sa->evq_count++; *evqp = evq; return 0; fail_dma_alloc: rte_free(evq); fail_evq_alloc: sfc_log_init(sa, "failed %d", rc); return rc; }
static inline void vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring) { while (ring->next2comp != ring->next2fill) { /* No need to worry about tx desc ownership, device is quiesced by now. */ vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp; if (buf_info->m) { rte_pktmbuf_free(buf_info->m); buf_info->m = NULL; buf_info->bufPA = 0; buf_info->len = 0; } vmxnet3_cmd_ring_adv_next2comp(ring); } rte_free(ring->buf_info); ring->buf_info = NULL; }
static int cn23xx_vf_setup_mbox(struct lio_device *lio_dev) { struct lio_mbox *mbox; PMD_INIT_FUNC_TRACE(); if (lio_dev->mbox == NULL) { lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0); if (lio_dev->mbox == NULL) return -ENOMEM; } mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0); if (mbox == NULL) { rte_free(lio_dev->mbox); lio_dev->mbox = NULL; return -ENOMEM; } rte_spinlock_init(&mbox->lock); mbox->lio_dev = lio_dev; mbox->q_no = 0; mbox->state = LIO_MBOX_STATE_IDLE; /* VF mbox interrupt reg */ mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0); /* VF reads from SIG0 reg */ mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0); /* VF writes into SIG1 reg */ mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1); lio_dev->mbox[0] = mbox; rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg); return 0; }
/*---------------------------------------------------------------------------*/ sb_queue_t CreateSBQueue(int capacity) { sb_queue_t sq; sq = (sb_queue_t) rte_calloc("sb_queue", 1, sizeof(struct sb_queue), 0); if (!sq) return NULL; sq->_q = (struct tcp_send_buffer **) rte_calloc("tcp_send_buffer", capacity + 1, sizeof(struct tcp_send_buffer *), 0); if (!sq->_q) { rte_free(sq); return NULL; } sq->_capacity = capacity; sq->_head = sq->_tail = 0; return sq; }
/** Setup a queue pair */ static int aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id, struct rte_mempool *session_pool) { struct aesni_gcm_qp *qp = NULL; struct aesni_gcm_private *internals = dev->data->dev_private; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) aesni_gcm_pmd_qp_release(dev, qp_id); /* Allocate the queue pair data structure. */ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp), RTE_CACHE_LINE_SIZE, socket_id); if (qp == NULL) return (-ENOMEM); qp->id = qp_id; dev->data->queue_pairs[qp_id] = qp; if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode]; qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) goto qp_setup_cleanup; qp->sess_mp = session_pool; memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); return 0; qp_setup_cleanup: if (qp) rte_free(qp); return -1; }
static int rte_port_ring_writer_free(void *port) { struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port; if (port == NULL) { RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__); return -EINVAL; } if (p->is_multi) rte_port_ring_multi_writer_flush(port); else rte_port_ring_writer_flush(port); rte_free(port); return 0; }
void enic_clsf_destroy(struct enic *enic) { u32 index; struct enic_fdir_node *key; /* delete classifier entries */ for (index = 0; index < ENICPMD_FDIR_MAX; index++) { key = enic->fdir.nodes[index]; if (key) { vnic_dev_classifier(enic->vdev, CLSF_DEL, &key->fltr_id, NULL); rte_free(key); } } if (enic->fdir.hash) { rte_hash_free(enic->fdir.hash); enic->fdir.hash = NULL; } }
static int rte_table_acl_free(void *table) { struct rte_table_acl *acl = (struct rte_table_acl *) table; /* Check input parameters */ if (table == NULL) { RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__); return -EINVAL; } /* Free previously allocated resources */ if (acl->ctx != NULL) rte_acl_free(acl->ctx); rte_free(acl); return 0; }
static int sfc_filter_cache_match_supported(struct sfc_adapter *sa) { struct sfc_filter *filter = &sa->filter; size_t num = filter->supported_match_num; uint32_t *buf = filter->supported_match; unsigned int retry; int rc; /* Just a guess of possibly sufficient entries */ if (num == 0) num = 16; for (retry = 0; retry < 2; ++retry) { if (num != filter->supported_match_num) { rc = ENOMEM; buf = rte_realloc(buf, num * sizeof(*buf), 0); if (buf == NULL) goto fail_realloc; } rc = efx_filter_supported_filters(sa->nic, buf, num, &num); if (rc == 0) { filter->supported_match_num = num; filter->supported_match = buf; return 0; } else if (rc != ENOSPC) { goto fail_efx_filter_supported_filters; } } SFC_ASSERT(rc == ENOSPC); fail_efx_filter_supported_filters: fail_realloc: /* Original pointer is not freed by rte_realloc() on failure */ rte_free(buf); filter->supported_match = NULL; filter->supported_match_num = 0; return rc; }
static int cxgbe_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *e) { struct port_info *pi = (struct port_info *)(dev->data->dev_private); struct adapter *adapter = pi->adapter; u32 i, err = 0; u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); if (!buf) return -ENOMEM; e->magic = EEPROM_MAGIC; for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4) err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); if (!err) rte_memcpy(e->data, buf + e->offset, e->length); rte_free(buf); return err; }
static int i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf) { struct i40e_virtchnl_vf_resource *vf_res = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint32_t len = 0; int ret = I40E_SUCCESS; /* only have 1 VSI by default */ len = sizeof(struct i40e_virtchnl_vf_resource) + I40E_DEFAULT_VF_VSI_NUM * sizeof(struct i40e_virtchnl_vsi_resource); vf_res = rte_zmalloc("i40e_vf_res", len, 0); if (vf_res == NULL) { PMD_DRV_LOG(ERR, "failed to allocate mem\n"); ret = I40E_ERR_NO_MEMORY; vf_res = NULL; len = 0; goto send_msg; } vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 | I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; vf_res->num_queue_pairs = vf->vsi->nb_qps; vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; /* Change below setting if PF host can support more VSIs for VF */ vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV; /* As assume Vf only has single VSI now, always return 0 */ vf_res->vsi_res[0].vsi_id = 0; vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; send_msg: i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, ret, (uint8_t *)vf_res, len); rte_free(vf_res); return ret; }
/** * DPDK callback to close the device. * * Destroy all queues and objects, free memory. * * @param dev * Pointer to Ethernet device structure. */ static void mlx5_dev_close(struct rte_eth_dev *dev) { struct priv *priv = mlx5_get_priv(dev); unsigned int i; priv_lock(priv); DEBUG("%p: closing device \"%s\"", (void *)dev, ((priv->ctx != NULL) ? priv->ctx->device->name : "")); /* In case mlx5_dev_stop() has not been called. */ priv_dev_interrupt_handler_uninstall(priv, dev); priv_special_flow_disable_all(priv); priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); /* Remove flow director elements. */ priv_fdir_disable(priv); priv_fdir_delete_filters_list(priv); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; if (priv->rxqs != NULL) { /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; struct rxq_ctrl *rxq_ctrl; if (rxq == NULL) continue; rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); (*priv->rxqs)[i] = NULL; rxq_cleanup(rxq_ctrl); rte_free(rxq_ctrl); } priv->rxqs_n = 0; priv->rxqs = NULL; }
/** Setup a queue pair */ static int openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) { struct openssl_qp *qp = NULL; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) openssl_pmd_qp_release(dev, qp_id); /* Allocate the queue pair data structure. */ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp), RTE_CACHE_LINE_SIZE, socket_id); if (qp == NULL) return -ENOMEM; qp->id = qp_id; dev->data->queue_pairs[qp_id] = qp; if (openssl_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_ops == NULL) goto qp_setup_cleanup; qp->sess_mp = dev->data->session_pool; memset(&qp->stats, 0, sizeof(qp->stats)); return 0; qp_setup_cleanup: if (qp) rte_free(qp); return -1; }