static void fthd_buffer_cleanup(struct vb2_buffer *vb) { struct fthd_private *dev_priv = vb2_get_drv_priv(vb->vb2_queue); struct h2t_buf_ctx *ctx = NULL; int i; pr_debug("%p\n", vb); for(i = 0; i < FTHD_BUFFERS; i++) { if (dev_priv->h2t_bufs[i].vb == vb) { ctx = dev_priv->h2t_bufs + i; break; }; } if (!ctx || ctx->state == BUF_FREE) return; ctx->state = BUF_FREE; ctx->vb = NULL; isp_mem_destroy(ctx->dma_desc_obj); for(i = 0; i < dev_priv->fmt.planes; i++) { iommu_free(dev_priv, ctx->plane[i]); ctx->plane[i] = NULL; } ctx->dma_desc_obj = NULL; }
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { unsigned int npages; BUG_ON(direction == DMA_NONE); if (tbl) { npages = iommu_num_pages(dma_handle, size); iommu_free(tbl, dma_handle, npages); } }
void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) { if (tbl) { unsigned int nio_pages; size = PAGE_ALIGN(size); nio_pages = size >> IOMMU_PAGE_SHIFT; iommu_free(tbl, dma_handle, nio_pages); size = PAGE_ALIGN(size); free_pages((unsigned long)vaddr, get_order(size)); } }