static struct ehci_qh *ehci_qh_alloc(struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qh *qh; dma_addr_t dma; dma = usb_malloc(sizeof(struct ehci_qh), flags); if (dma != 0) qh = (struct ehci_qh *)IO_ADDRESS(dma); else qh = (struct ehci_qh *) dma_pool_alloc(ehci->qh_pool, flags, &dma); ++g_debug_qH_allocated; if (qh == NULL) { panic("run out of i-ram for qH allocation\n"); return qh; } memset(qh, 0, sizeof *qh); qh->refcount = 1; qh->ehci = ehci; qh->qh_dma = dma; INIT_LIST_HEAD(&qh->qtd_list); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc(ehci, flags); if (qh->dummy == NULL) { ehci_dbg(ehci, "no dummy td\n"); dma_pool_free(ehci->qh_pool, qh, qh->qh_dma); qh = NULL; } return qh; }
/** * _hardware_dequeue: handles a request at hardware level * @gadget: gadget * @mEp: endpoint * * This function returns an error code */ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) { if (mReq->req.status != -EALREADY) return -EINVAL; if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0) return -EBUSY; if (mReq->zptr) { if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0) return -EBUSY; dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); mReq->zptr = NULL; } mReq->req.status = 0; usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir); mReq->req.status = mReq->ptr->token & TD_STATUS; if ((TD_STATUS_HALTED & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0) mReq->req.status = -1; else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0) mReq->req.status = -1; mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES; mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES); mReq->req.actual = mReq->req.length - mReq->req.actual; mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual; return mReq->req.actual; }
static void * skb_to_vbb(struct voicebus *vb, struct sk_buff *skb) { int res; struct vbb *vbb; const int COMMON_HEADER = 30; dma_addr_t dma_addr; if (skb->len != (VOICEBUS_SFRAME_SIZE + COMMON_HEADER)) { dev_warn(&vb->pdev->dev, "Packet of length %d is not the " "required %d.\n", skb->len, VOICEBUS_SFRAME_SIZE + COMMON_HEADER); return NULL; } vbb = dma_pool_alloc(vb->pool, GFP_KERNEL, &dma_addr); if (!vbb) return NULL; vbb->dma_addr = dma_addr; res = skb_copy_bits(skb, COMMON_HEADER, vbb, VOICEBUS_SFRAME_SIZE); if (res) { dev_warn(&vb->pdev->dev, "Failed call to skb_copy_bits.\n"); dma_pool_free(vb->pool, vbb, vbb->dma_addr); return NULL; } return vbb; }
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qh *qh; dma_addr_t dma; qh = kzalloc(sizeof *qh, GFP_ATOMIC); if (!qh) goto done; qh->hw = (struct ehci_qh_hw *) dma_pool_alloc(ehci->qh_pool, flags, &dma); if (!qh->hw) goto fail; memset(qh->hw, 0, sizeof *qh->hw); qh->refcount = 1; qh->ehci = ehci; qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc (ehci, flags); if (qh->dummy == NULL) { ehci_dbg (ehci, "no dummy td\n"); goto fail1; } done: return qh; fail1: dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); fail: kfree(qh); return NULL; }
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) { struct ehci_qh *qh; dma_addr_t dma; qh = (struct ehci_qh *) dma_pool_alloc (ehci->qh_pool, flags, &dma); if (!qh) return qh; memset (qh, 0, sizeof *qh); kref_init(&qh->kref, qh_destroy); qh->ehci = ehci; qh->qh_dma = dma; // INIT_LIST_HEAD (&qh->qh_list); INIT_LIST_HEAD (&qh->qtd_list); /* dummy td enables safe urb queuing */ qh->dummy = ehci_qtd_alloc (ehci, flags); if (qh->dummy == 0) { ehci_dbg (ehci, "no dummy td\n"); dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); qh = NULL; } return qh; }
static int __init my_init(void) { /* dma_alloc_coherent method */ printk(KERN_INFO "Loading DMA allocation test module\n"); printk(KERN_INFO "\nTesting dma_alloc_coherent()..........\n\n"); kbuf = dma_alloc_coherent(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "This is the dma_alloc_coherent() string"); dma_free_coherent(NULL, size, kbuf, handle); /* dma_map/unmap_single */ printk(KERN_INFO "\nTesting dma_map_single()................\n\n"); kbuf = kmalloc(size, GFP_KERNEL); handle = dma_map_single(NULL, kbuf, size, direction); output(kbuf, handle, size, "This is the dma_map_single() string"); dma_unmap_single(NULL, handle, size, direction); kfree(kbuf); /* dma_pool method */ printk(KERN_INFO "\nTesting dma_pool_alloc()..........\n\n"); mypool = dma_pool_create("mypool", NULL, pool_size, pool_align, 0); kbuf = dma_pool_alloc(mypool, GFP_KERNEL, &handle); output(kbuf, handle, size, "This is the dma_pool_alloc() string"); dma_pool_free(mypool, kbuf, handle); dma_pool_destroy(mypool); return 0; }
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, int sg_len) { struct fsl_edma_desc *fsl_desc; int i; fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len, GFP_NOWAIT); if (!fsl_desc) return NULL; fsl_desc->echan = fsl_chan; fsl_desc->n_tcds = sg_len; for (i = 0; i < sg_len; i++) { fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); if (!fsl_desc->tcd[i].vtcd) goto err; } return fsl_desc; err: while (--i >= 0) dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, fsl_desc->tcd[i].ptcd); kfree(fsl_desc); return NULL; }
int stmp3xxx_dma_free_command(int channel, struct stmp3xxx_dma_descriptor *descriptor) { int err = 0; if (!IS_VALID_CHANNEL(channel)) { err = -ENODEV; goto out; } if (!IS_USED(channel)) { err = -EBUSY; goto out; } /* Return the command memory to the pool */ dma_pool_free(channels[channel].pool, descriptor->command, descriptor->handle); /* Initialise descriptor so we're not tempted to use it */ descriptor->command = NULL; descriptor->handle = 0; descriptor->virtual_buf_ptr = NULL; descriptor->next_descr = NULL; WARN_ON(err); out: return err; }
static void destroy_hdlc_queues(struct port *port) { int i; if (port->desc_tab) { for (i = 0; i < RX_DESCS; i++) { struct desc *desc = rx_desc_ptr(port, i); buffer_t *buff = port->rx_buff_tab[i]; if (buff) { dma_unmap_single(&port->netdev->dev, desc->data, RX_SIZE, DMA_FROM_DEVICE); free_buffer(buff); } } for (i = 0; i < TX_DESCS; i++) { struct desc *desc = tx_desc_ptr(port, i); buffer_t *buff = port->tx_buff_tab[i]; if (buff) { dma_unmap_tx(port, desc); free_buffer(buff); } } dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); port->desc_tab = NULL; } if (!ports_open && dma_pool) { dma_pool_destroy(dma_pool); dma_pool = NULL; } }
static void ipath_user_sdma_free_pkt_frag(struct device *dev, struct ipath_user_sdma_queue *pq, struct ipath_user_sdma_pkt *pkt, int frag) { const int i = frag; if (pkt->addr[i].page) { if (pkt->addr[i].dma_mapped) dma_unmap_page(dev, pkt->addr[i].addr, pkt->addr[i].length, DMA_TO_DEVICE); if (pkt->addr[i].kvaddr) kunmap(pkt->addr[i].page); if (pkt->addr[i].put_page) put_page(pkt->addr[i].page); else __free_page(pkt->addr[i].page); } else if (pkt->addr[i].kvaddr) dma_pool_free(pq->header_cache, pkt->addr[i].kvaddr, pkt->addr[i].addr); }
void hcd_buffer_free( struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma ) { struct usb_hcd *hcd = bus_to_hcd(bus); int i; if (!addr) return; if (!bus->controller->dma_mask && !(hcd->driver->flags & HCD_LOCAL_MEM)) { kfree(addr); return; } for (i = 0; i < HCD_BUFFER_POOLS; i++) { if (size <= pool_max [i]) { dma_pool_free(hcd->pool [i], addr, dma); return; } } dma_free_coherent(hcd->self.controller, size, addr, dma); }
static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, int sg_len) { struct st_fdma_desc *fdesc; int i; fdesc = kzalloc(sizeof(*fdesc) + sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT); if (!fdesc) return NULL; fdesc->fchan = fchan; fdesc->n_nodes = sg_len; for (i = 0; i < sg_len; i++) { fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool, GFP_NOWAIT, &fdesc->node[i].pdesc); if (!fdesc->node[i].desc) goto err; } return fdesc; err: while (--i >= 0) dma_pool_free(fchan->node_pool, fdesc->node[i].desc, fdesc->node[i].pdesc); kfree(fdesc); return NULL; }
/** * @brief Initialize & alloc RX buffer * @details This function executes;\n * # Create DMA pool\n * # Alloc RX buffer\n * # Call RX buffer clear * @param N/A * @retval 0 : Success * @retval -ENOMEM : Error, no enough memory. * @note */ int felica_rxbuf_init(void) { int i; pr_debug(PRT_NAME ": %s\n", __func__); rxbuf.dmapool = dma_pool_create(DMAPOOL_NAME, NULL, DMAPOOL_SIZE, DMAPOOL_ALIGN, DMAPOOL_ALIGN * RXBUF_N); if (!rxbuf.dmapool) { pr_err(PRT_NAME ": Error. Cannot create DMA pool for RXbuf."); return -ENOMEM; } for (i = 0; i < RXBUF_N; i++) { rxbuf.slot[i].buf = dma_pool_alloc(rxbuf.dmapool, GFP_KERNEL, &rxbuf.slot[i].dmabase); if (!rxbuf.slot[i].buf) { pr_err(PRT_NAME ": Error. No enough mem for RXbuf.\n"); goto err_alloc_rx_buf; } } felica_rxbuf_clear(); return 0; err_alloc_rx_buf: for (i--; i >= 0; i--) { dma_pool_free(rxbuf.dmapool, rxbuf.slot[i].buf, rxbuf.slot[i].dmabase); } dma_pool_destroy(rxbuf.dmapool); rxbuf.dmapool = NULL; return -ENOMEM; }
void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize, struct scatterlist *src, struct scatterlist *dst) { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; if (req_ctx->gen_ctx.iv_dma_addr) { dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", &req_ctx->gen_ctx.iv_dma_addr, ivsize); dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, ivsize, DMA_TO_DEVICE); } /* Release pool */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && req_ctx->mlli_params.mlli_virt_addr) { dma_pool_free(req_ctx->mlli_params.curr_pool, req_ctx->mlli_params.mlli_virt_addr, req_ctx->mlli_params.mlli_dma_addr); } dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); if (src != dst) { dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); } }
static int __init my_init(void) { printk(KERN_INFO "Satish testing DMA module"); printk(KERN_INFO "testing DMA coherent mapping dma_alloc_coherent()"); kbuf = dma_alloc_coherent(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "dma_alloc_coherent string"); dma_free_coherent(NULL, size, kbuf, handle); printk(KERN_INFO "Testing DMA Mapping dma_map_page()"); kbuf = kmalloc(size, GFP_KERNEL); handle = dma_map_single(NULL, size, &handle, GFP_KERNEL); output(kbuf, handle, size, "this is dma_map_single string"); dma_unmap_single(NULL, handle, size, direction); kfree(kbuf); printk(KERN_INFO "Testing DMA Pool method"); mypool = dma_pool_create("mypool", NULL, pool_size, pool_align, 0); kbuf = dma_pool_alloc(mypool, GFP_KERNEL, &handle); output(kbuf, handle, size, "This is dma_pool_alloc string"); dma_pool_free(mypool, kbuf, handle); dma_pool_destroy(mypool); return 0; }
struct usb_request *usb_ept_alloc_req(struct msm_endpoint *ept, unsigned bufsize, gfp_t gfp_flags) { struct usb_info *ui = ept->ui; struct msm_request *req; req = kzalloc(sizeof(*req), gfp_flags); if (!req) goto fail1; req->item = dma_pool_alloc(ui->pool, gfp_flags, &req->item_dma); if (!req->item) goto fail2; if (bufsize) { req->req.buf = kmalloc(bufsize, gfp_flags); if (!req->req.buf) goto fail3; req->alloced = 1; } return &req->req; fail3: dma_pool_free(ui->pool, req->item, req->item_dma); fail2: kfree(req); fail1: return 0; }
static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) { /* tony.yu map between PHY addr & BUS addr */ #if defined(CONFIG_ARM) && (MP_USB_MSTAR==1) qtd->qtd_dma = PA2BUS(qtd->qtd_dma); #endif dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); }
static void do_free_req(struct usb_info *ui, struct msm_request *req) { if (req->alloced) kfree(req->req.buf); dma_pool_free(ui->pool, req->item, req->item_dma); kfree(req); }
void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (!ctx) return; dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); kfree(ctx); }
void unmap_aead_request(struct device *dev, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->mac_buf_dma_addr) if (areq_ctx->mac_buf_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->ccm_iv0_dma_addr) if (areq_ctx->ccm_iv0_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (&areq_ctx->ccm_adata_sg != NULL) dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); } /*HI3630++ DX: for SCCC bug*/ //HI3630 if (!areq_ctx->gen_ctx.iv_dma_addr) if (areq_ctx->gen_ctx.iv_dma_addr != 0) /*HI3630--*/ dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); /*In case a pool was set, a table was allocated and should be released */ if (areq_ctx->mlli_params.curr_pool != NULL) { DX_LOG_DEBUG("free MLLI buffer: dma=0x%08lX virt=0x%08X\n", (unsigned long)areq_ctx->mlli_params.mlli_dma_addr, (uint32_t)areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if (areq_ctx->assoc_dma_buf_type != DX_DMA_BUF_NULL) { DX_LOG_DEBUG("Unmapping sg assoc: req->assoc=0x%08lX\n", (unsigned long)sg_virt(req->assoc)); dma_unmap_sg(dev, req->assoc, areq_ctx->assoc_nents, DMA_TO_DEVICE); } DX_LOG_DEBUG("Unmapping sg src: req->src=0x%08lX\n", (unsigned long)sg_virt(req->src)); dma_unmap_sg(dev, req->src, areq_ctx->in_nents, DMA_BIDIRECTIONAL); if (unlikely(req->src != req->dst)) { DX_LOG_DEBUG("Unmapping sg dst: req->dst=0x%08lX\n", (unsigned long)sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, areq_ctx->out_nents, DMA_BIDIRECTIONAL); } }
static int fdma_resize_nodelist_mem(struct fdma *fdma, struct fdma_xfer_descriptor *desc, unsigned int new_nnodes, gfp_t context) { int old_list_size, new_list_size; unsigned int cur_nnodes; struct fdma_llu_node *new_nodes; /* This holds the number of allocated nodes, which may differ * from the old or new size. It must be maintained so that * free_list works. */ cur_nnodes = desc->alloced_nodes; /* The only resize down we need to support is freeing everything. */ if (new_nnodes == 0) goto free_list; /* this happens if the DMA firmware was not loaded yet. */ if(!fdma->llu_pool) return -ENOMEM; old_list_size = sizeof(struct fdma_llu_node) * desc->alloced_nodes; new_list_size = sizeof(struct fdma_llu_node) * new_nnodes; new_nodes = kmalloc(new_list_size, context); if (new_nodes == NULL) goto free_list; if (old_list_size > 0) { memcpy(new_nodes, desc->llu_nodes, old_list_size); kfree(desc->llu_nodes); } desc->llu_nodes = new_nodes; for (new_nodes += desc->alloced_nodes; cur_nnodes < new_nnodes; cur_nnodes++, new_nodes++) { new_nodes->virt_addr = dma_pool_alloc(fdma->llu_pool, context, &new_nodes->dma_addr); if (new_nodes->virt_addr == NULL) goto free_list; } desc->alloced_nodes = new_nnodes; return 0; free_list: for (new_nodes = desc->llu_nodes; cur_nnodes; cur_nnodes--, new_nodes++) dma_pool_free(fdma->llu_pool, new_nodes->virt_addr, new_nodes->dma_addr); kfree(desc->llu_nodes); desc->llu_nodes = NULL; desc->alloced_nodes = 0; return -ENOMEM; }
static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) { if (!req->padding) return; dma_pool_free(cesa_dev->dma->padding_pool, req->padding, req->padding_dma); req->padding = NULL; }
static inline void ehci_qtd_free(struct ehci_hcd *ehci, struct ehci_qtd *qtd) { if ((qtd->qtd_dma & (USB_IRAM_BASE_ADDR & 0xFFF00000)) == (USB_IRAM_BASE_ADDR & 0xFFF00000)) usb_free(qtd->qtd_dma); else dma_pool_free(ehci->qtd_pool, qtd, qtd->qtd_dma); --g_debug_qtd_allocated; }
static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) { if (!req->cache) return; dma_pool_free(cesa_dev->dma->cache_pool, req->cache, req->cache_dma); }
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) { if (!list_empty(&td->list)) dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td); if (!list_empty(&td->fl_list)) dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td); dma_pool_free(uhci->td_pool, td, td->dma_handle); }
void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) { struct mv_cesa_tdma_desc *tdma; for (tdma = dreq->chain.first; tdma;) { struct mv_cesa_tdma_desc *old_tdma = tdma; if (tdma->flags & CESA_TDMA_OP) dma_pool_free(cesa_dev->dma->op_pool, tdma->op, le32_to_cpu(tdma->src)); tdma = tdma->next; dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, old_tdma->cur_dma); } dreq->chain.first = NULL; dreq->chain.last = NULL; }
/** * crypto_free_context - Free crypto context to pool * @ctx: context to free */ void crypto_free_context(void *ctx) { struct ctx_hdr *ctxp; if (!ctx) return; ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr)); dma_pool_free(ctxp->pool, ctxp, ctxp->dma); }
static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) { struct fsl_edma_desc *fsl_desc; int i; fsl_desc = to_fsl_edma_desc(vdesc); for (i = 0; i < fsl_desc->n_tcds; i++) dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, fsl_desc->tcd[i].ptcd); kfree(fsl_desc); }
static void st_fdma_free_desc(struct virt_dma_desc *vdesc) { struct st_fdma_desc *fdesc; int i; fdesc = to_st_fdma_desc(vdesc); for (i = 0; i < fdesc->n_nodes; i++) dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc, fdesc->node[i].pdesc); kfree(fdesc); }
static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh) { /* clean qtds first, and know this is not linked */ if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { ehci_dbg (ehci, "unused qh not empty!\n"); BUG (); } if (qh->dummy) ehci_qtd_free (ehci, qh->dummy); dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); kfree(qh); }