static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; /*Allocate the driver's pointer to receive buffer status */ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb_stts; return 0; err_rb_stts: dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; err_bd: return -ENOMEM; }
static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); if (unlikely(!params->p || !params->g)) return -EINVAL; if (qat_dh_check_params_length(params->p_size << 3)) return -EINVAL; ctx->p_size = params->p_size; ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) return -ENOMEM; memcpy(ctx->p, params->p, ctx->p_size); /* If g equals 2 don't copy it */ if (params->g_size == 1 && *(char *)params->g == 0x02) { ctx->g2 = true; return 0; } ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); if (!ctx->g) { dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); ctx->p = NULL; return -ENOMEM; } memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, params->g_size); return 0; }
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev; spin_lock(&ctx->lock); if (ctx->enc_cd) { /* rekeying */ dev = &GET_DEV(ctx->inst->accel_dev); memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); } else { /* new key */ int node = get_current_node(); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(node); if (!inst) { spin_unlock(&ctx->lock); return -EINVAL; } dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), &ctx->enc_cd_paddr, GFP_ATOMIC); if (!ctx->enc_cd) { spin_unlock(&ctx->lock); return -ENOMEM; } ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), &ctx->dec_cd_paddr, GFP_ATOMIC); if (!ctx->dec_cd) { spin_unlock(&ctx->lock); goto out_free_enc; } } spin_unlock(&ctx->lock); if (qat_alg_aead_init_sessions(tfm, key, keylen, ICP_QAT_HW_CIPHER_CBC_MODE)) goto out_free_all; return 0; out_free_all: memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->dec_cd, ctx->dec_cd_paddr); ctx->dec_cd = NULL; out_free_enc: memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->enc_cd, ctx->enc_cd_paddr); ctx->enc_cd = NULL; return -ENOMEM; }
int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ctx->key_sz = vlen; ret = -EINVAL; /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; ret = -ENOMEM; ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->n, ptr, ctx->key_sz); return 0; err: ctx->key_sz = 0; ctx->n = NULL; return ret; }
static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct dh params; int ret; if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); ret = qat_dh_set_params(ctx, ¶ms); if (ret < 0) return ret; ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, GFP_KERNEL); if (!ctx->xa) { qat_dh_clear_ctx(dev, ctx); return -ENOMEM; } memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, params.key_size); return 0; }
int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; while (!*ptr && vlen) { ptr++; vlen--; } if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { ctx->e = NULL; return -EINVAL; } ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) { ctx->e = NULL; return -ENOMEM; } memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); return 0; }
int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; uint64_t reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } reg_val = (uint64_t)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; }
int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (vlen != 256 && vlen != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; }
static int cmdq_common_init(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = cmdq->ndev; u32 qsize; qsize = (ndev->qlen) * cmdq->instr_size; cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), (qsize + PKT_IN_ALIGN), &cmdq->dma_unaligned, GFP_KERNEL); if (!cmdq->head_unaligned) return -ENOMEM; cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); cmdq->qsize = (qsize + PKT_IN_ALIGN); spin_lock_init(&cmdq->response_lock); spin_lock_init(&cmdq->cmdq_lock); spin_lock_init(&cmdq->backlog_lock); INIT_LIST_HEAD(&cmdq->response_head); INIT_LIST_HEAD(&cmdq->backlog_head); INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work); atomic_set(&cmdq->pending_count, 0); atomic_set(&cmdq->backlog_count, 0); return 0; }
int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->d) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; }
/** * Initalizes both (Rx/Tx) DMA fifo's and related management structures */ static int ccat_eth_priv_init_dma(struct ccat_eth_priv *priv) { struct ccat_dma_mem *const dma = &priv->dma_mem; struct pci_dev *const pdev = priv->func->ccat->pdev; void __iomem *const bar_2 = priv->func->ccat->bar_2; const u8 rx_chan = priv->func->info.rx_dma_chan; const u8 tx_chan = priv->func->info.tx_dma_chan; int status = 0; dma->dev = &pdev->dev; dma->size = CCAT_ALIGNMENT * 3; dma->base = dma_zalloc_coherent(dma->dev, dma->size, &dma->phys, GFP_KERNEL); if (!dma->base || !dma->phys) { pr_err("init DMA memory failed.\n"); return -ENOMEM; } priv->rx_fifo.ops = &dma_rx_fifo_ops; status = ccat_dma_init(dma, rx_chan, bar_2, &priv->rx_fifo); if (status) { pr_info("init RX DMA memory failed.\n"); ccat_dma_free(priv); return status; } priv->tx_fifo.ops = &dma_tx_fifo_ops; status = ccat_dma_init(dma, tx_chan, bar_2, &priv->tx_fifo); if (status) { pr_info("init TX DMA memory failed.\n"); ccat_dma_free(priv); return status; } return ccat_hw_disable_mac_filter(priv); }
static int ftmac100_alloc_buffers(struct ftmac100 *priv) { int i; priv->descs = dma_zalloc_coherent(priv->dev, sizeof(struct ftmac100_descs), &priv->descs_dma_addr, GFP_KERNEL); if (!priv->descs) return -ENOMEM; /* initialize RX ring */ ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); for (i = 0; i < RX_QUEUE_ENTRIES; i++) { struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) goto err; } /* initialize TX ring */ ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); return 0; err: ftmac100_free_buffers(priv); return -ENOMEM; }
static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *ring; u16 size; ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) return NULL; ring->ndev = ndev; size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL); if (!ring->desc_addr) goto err; ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info), GFP_KERNEL); if (!ring->pkt_info) goto err; xge_setup_desc(ring); return ring; err: xge_delete_desc_ring(ndev, ring); return NULL; }
static void pothos_zynq_dma_buff_alloc(struct platform_device *pdev, pothos_zynq_dma_buff_t *buff) { dma_addr_t phys_addr = 0; void *virt_addr = dma_zalloc_coherent(&pdev->dev, buff->bytes, &phys_addr, GFP_KERNEL); buff->paddr = phys_addr; buff->kaddr = virt_addr; buff->uaddr = NULL; //filled by user with mmap }
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, unsigned int len, gfp_t gfp_flags) { buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, &buffer->dma_addr, gfp_flags); if (!buffer->addr) return -ENOMEM; buffer->len = len; return 0; }
static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct xge_desc_ring *tx_ring; struct xge_raw_desc *raw_desc; static dma_addr_t dma_addr; u64 addr_lo, addr_hi; void *pkt_buf; u8 tail; u16 len; tx_ring = pdata->tx_ring; tail = tx_ring->tail; len = skb_headlen(skb); raw_desc = &tx_ring->raw_desc[tail]; if (!is_tx_slot_available(raw_desc)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } /* Packet buffers should be 64B aligned */ pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, GFP_ATOMIC); if (unlikely(!pkt_buf)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } memcpy(pkt_buf, skb->data, len); addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1)); addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1)); raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) | SET_BITS(NEXT_DESC_ADDRH, addr_hi) | SET_BITS(PKT_ADDRH, upper_32_bits(dma_addr))); tx_ring->pkt_info[tail].skb = skb; tx_ring->pkt_info[tail].dma_addr = dma_addr; tx_ring->pkt_info[tail].pkt_buf = pkt_buf; dma_wmb(); raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) | SET_BITS(PKT_SIZE, len) | SET_BITS(E, 0)); skb_tx_timestamp(skb); xge_wr_csr(pdata, DMATXCTRL, 1); tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1); return NETDEV_TX_OK; }
static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), size, &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; return 0; }
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) { struct mtk_ring **ring = cryp->ring; int i, err = ENOMEM; for (i = 0; i < MTK_RING_MAX; i++) { ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); if (!ring[i]) goto err_cleanup; ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, MTK_DESC_RING_SZ, &ring[i]->cmd_dma, GFP_KERNEL); if (!ring[i]->cmd_base) goto err_cleanup; ring[i]->res_base = dma_zalloc_coherent(cryp->dev, MTK_DESC_RING_SZ, &ring[i]->res_dma, GFP_KERNEL); if (!ring[i]->res_base) goto err_cleanup; ring[i]->cmd_next = ring[i]->cmd_base; ring[i]->res_next = ring[i]->res_base; } return 0; err_cleanup: for (; i--; ) { dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, ring[i]->res_base, ring[i]->res_dma); dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, ring[i]->cmd_base, ring[i]->cmd_dma); kfree(ring[i]); } return err; }
/** * hinic_init_rq - Initialize HW Receive Queue * @rq: HW Receive Queue * @hwif: HW Interface for accessing HW * @wq: Work Queue for the data of the RQ * @entry: msix entry for rq * * Return 0 - Success, negative - Failure **/ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry) { struct pci_dev *pdev = hwif->pdev; size_t pi_size; int err; rq->hwif = hwif; rq->wq = wq; rq->irq = entry->vector; rq->msix_entry = entry->entry; rq->buf_sz = HINIC_RX_BUF_SZ; err = alloc_rq_skb_arr(rq); if (err) { dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); return err; } err = alloc_rq_cqe(rq); if (err) { dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); goto err_alloc_rq_cqe; } /* HW requirements: Must be at least 32 bit */ pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, &rq->pi_dma_addr, GFP_KERNEL); if (!rq->pi_virt_addr) { dev_err(&pdev->dev, "Failed to allocate PI address\n"); err = -ENOMEM; goto err_pi_virt; } return 0; err_pi_virt: free_rq_cqe(rq); err_alloc_rq_cqe: free_rq_skb_arr(rq); return err; }
static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, size_t size, dma_addr_t *dma_handle, int node) { struct mlx5_priv *priv = &dev->priv; int original_node; void *cpu_handle; mutex_lock(&priv->alloc_mutex); original_node = dev_to_node(&dev->pdev->dev); set_dev_node(&dev->pdev->dev, node); cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, dma_handle, GFP_KERNEL); set_dev_node(&dev->pdev->dev, original_node); mutex_unlock(&priv->alloc_mutex); return cpu_handle; }
static struct ath10k_ce_ring * ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) { struct ath10k_ce_ring *dest_ring; u32 nentries; dma_addr_t base_addr; nentries = roundup_pow_of_two(attr->dest_nentries); dest_ring = kzalloc(sizeof(*dest_ring) + (nentries * sizeof(*dest_ring->per_transfer_context)), GFP_KERNEL); if (dest_ring == NULL) return ERR_PTR(-ENOMEM); dest_ring->nentries = nentries; dest_ring->nentries_mask = nentries - 1; /* * Legacy platforms that do not support cache * coherent DMA are unsupported */ dest_ring->base_addr_owner_space_unaligned = dma_zalloc_coherent(ar->dev, (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); return ERR_PTR(-ENOMEM); } dest_ring->base_addr_ce_space_unaligned = base_addr; dest_ring->base_addr_owner_space = PTR_ALIGN( dest_ring->base_addr_owner_space_unaligned, CE_DESC_RING_ALIGN); dest_ring->base_addr_ce_space = ALIGN( dest_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); return dest_ring; }
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, struct scatterlist *sghead, u32 pages, u32 pg_size) { struct scatterlist *sg; bool is_umem = false; int i; /* page ptr arrays */ pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL); if (!pbl->pg_arr) return -ENOMEM; pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL); if (!pbl->pg_map_arr) { kfree(pbl->pg_arr); pbl->pg_arr = NULL; return -ENOMEM; } pbl->pg_count = 0; pbl->pg_size = pg_size; if (!sghead) { for (i = 0; i < pages; i++) { pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, pbl->pg_size, &pbl->pg_map_arr[i], GFP_KERNEL); if (!pbl->pg_arr[i]) goto fail; pbl->pg_count++; } } else { i = 0; is_umem = true; for_each_sg(sghead, sg, pages, i) { pbl->pg_map_arr[i] = sg_dma_address(sg); pbl->pg_arr[i] = sg_virt(sg); if (!pbl->pg_arr[i]) goto fail; pbl->pg_count++; } }
/* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = dma_zalloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) return -ENOMEM; /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { iwl_pcie_free_ict(trans); return -EINVAL; } return 0; }
int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct adf_bar *pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; u64 reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev), (void *) const_tab, 1024, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&GET_DEV(accel_dev), admin->const_tbl_addr))) { dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); kfree(admin); return -ENOMEM; } reg_val = (u64)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; }
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( struct bnxt_qplib_rcfw *rcfw, u32 size) { struct bnxt_qplib_rcfw_sbuf *sbuf; sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); if (!sbuf) return NULL; sbuf->size = size; sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, &sbuf->dma_addr, GFP_ATOMIC); if (!sbuf->sb) goto bail; return sbuf; bail: kfree(sbuf); return NULL; }
static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, u32 buf_len, void *buf) { struct be_dma_mem read_cmd; u32 read_len = 0, total_read_len = 0, chunk_size; u32 eof = 0; u8 addn_status; int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, &read_cmd.dma, GFP_ATOMIC); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading dump\n"); return -ENOMEM; } while ((total_read_len < buf_len) && !eof) { chunk_size = min_t(u32, (buf_len - total_read_len), LANCER_READ_FILE_CHUNK); chunk_size = ALIGN(chunk_size, 4); status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, total_read_len, file_name, &read_len, &eof, &addn_status); if (!status) { memcpy(buf + total_read_len, read_cmd.va, read_len); total_read_len += read_len; eof &= LANCER_READ_FILE_EOF_MASK; } else { status = -EIO; break; } } dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, read_cmd.dma); return status; }
static int alloc_ringmemory(struct b43_dmaring *ring) { /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K * alignment and 8K buffers for 64-bit DMA with 8K alignment. * In practice we could use smaller buffers for the latter, but the * alignment is really important because of the hardware bug. If bit * 0x00001000 is used in DMA address, some hardware (like BCM4331) * copies that bit into B43_DMA64_RXSTATUS and we get false values from * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use * more than 256 slots for ring. */ u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, ring_mem_size, &(ring->dmabase), GFP_KERNEL); if (!ring->descbase) return -ENOMEM; return 0; }
/** * alloc_rq_cqe - allocate rq completion queue elements * @rq: HW Receive Queue * * Return 0 - Success, negative - Failure **/ static int alloc_rq_cqe(struct hinic_rq *rq) { struct hinic_hwif *hwif = rq->hwif; struct pci_dev *pdev = hwif->pdev; size_t cqe_dma_size, cqe_size; struct hinic_wq *wq = rq->wq; int j, i; cqe_size = wq->q_depth * sizeof(*rq->cqe); rq->cqe = vzalloc(cqe_size); if (!rq->cqe) return -ENOMEM; cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); rq->cqe_dma = vzalloc(cqe_dma_size); if (!rq->cqe_dma) goto err_cqe_dma_arr_alloc; for (i = 0; i < wq->q_depth; i++) { rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, sizeof(*rq->cqe[i]), &rq->cqe_dma[i], GFP_KERNEL); if (!rq->cqe[i]) goto err_cqe_alloc; } return 0; err_cqe_alloc: for (j = 0; j < i; j++) dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], rq->cqe_dma[j]); vfree(rq->cqe_dma); err_cqe_dma_arr_alloc: vfree(rq->cqe); return -ENOMEM; }
bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev) { struct stats_mem *mem = &dev->stats_mem; mutex_init(&dev->stats_lock); /* Alloc mbox command mem*/ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), sizeof(struct ocrdma_rdma_stats_resp)); mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, &mem->pa, GFP_KERNEL); if (!mem->va) { pr_err("%s: stats mbox allocation failed\n", __func__); return false; } /* Alloc debugfs mem */ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); if (!mem->debugfs_mem) return false; return true; }
int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ctx->key_sz = vlen; ret = -EINVAL; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; ret = -ENOMEM; ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->n, ptr, ctx->key_sz); return 0; err: ctx->key_sz = 0; ctx->n = NULL; return ret; }