void m2m1shot_unmap_dma_buf(struct device *dev, struct m2m1shot_buffer_plane_dma *plane, enum dma_data_direction dir) { if (plane->dmabuf) { exynos_ion_sync_dmabuf_for_device(dev, plane->dmabuf, plane->bytes_used, dir); dma_buf_unmap_attachment(plane->attachment, plane->sgt, dir); } else { exynos_ion_sync_sg_for_cpu(dev, plane->bytes_used, plane->sgt, dir); } }
dma_addr_t decon_map_sec_dma_buf(struct dma_buf *dbuf, int plane) { struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */ if (!dbuf || (plane >= MAX_BUF_PLANE_CNT) || (plane < 0)) return -EINVAL; dma.ion_handle = NULL; dma.fence = NULL; dma.dma_buf = dbuf; dma.attachment = dma_buf_attach(dbuf, decon->dev); if (IS_ERR(dma.attachment)) { decon_err("dma_buf_attach() failed: %ld\n", PTR_ERR(dma.attachment)); goto err_buf_map_attach; } dma.sg_table = dma_buf_map_attachment(dma.attachment, DMA_TO_DEVICE); if (IS_ERR(dma.sg_table)) { decon_err("dma_buf_map_attachment() failed: %ld\n", PTR_ERR(dma.sg_table)); goto err_buf_map_attachment; } dma.dma_addr = ion_iovmm_map(dma.attachment, 0, dma.dma_buf->size, DMA_TO_DEVICE, plane); if (IS_ERR_VALUE(dma.dma_addr)) { decon_err("iovmm_map() failed: %pa\n", &dma.dma_addr); goto err_iovmm_map; } exynos_ion_sync_dmabuf_for_device(decon->dev, dma.dma_buf, dma.dma_buf->size, DMA_TO_DEVICE); return dma.dma_addr; err_iovmm_map: dma_buf_unmap_attachment(dma.attachment, dma.sg_table, DMA_TO_DEVICE); err_buf_map_attachment: dma_buf_detach(dma.dma_buf, dma.attachment); err_buf_map_attach: return 0; }
int m2m1shot_map_dma_buf(struct device *dev, struct m2m1shot_buffer_plane_dma *plane, enum dma_data_direction dir) { if (plane->dmabuf) { plane->sgt = dma_buf_map_attachment(plane->attachment, dir); if (IS_ERR(plane->sgt)) { dev_err(dev, "%s: failed to map attacment of dma_buf\n", __func__); return PTR_ERR(plane->sgt); } exynos_ion_sync_dmabuf_for_device(dev, plane->dmabuf, plane->bytes_used, dir); } else { /* userptr */ exynos_ion_sync_sg_for_device(dev, plane->bytes_used, plane->sgt, dir); } return 0; }