static void g2d_fini_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; exynos_drm_iommu_unmap(dev, g2d->cmdlist_pool_map); kfree(g2d->cmdlist_node); dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool); }
static int g2d_init_cmdlist(struct g2d_data *g2d, struct exynos_drm_private *drm_priv) { struct device *dev = g2d->dev; struct g2d_cmdlist_node *node = g2d->cmdlist_node; int nr; int ret; g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, &g2d->cmdlist_pool, GFP_KERNEL); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; } /* * Allocate device address space for command list pool and then map all * pages contained in sg list to iommu table. Command list pool also is * accessed by dma through device address with using iommu. */ g2d->cmdlist_pool_map = exynos_drm_iommu_map(drm_priv->vmm, g2d->cmdlist_pool, G2D_CMDLIST_POOL_SIZE); if (!g2d->cmdlist_pool_map) { dev_err(dev, "failed map to iommu\n"); ret = -EFAULT; goto err; } node = kcalloc(G2D_CMDLIST_NUM, G2D_CMDLIST_NUM * sizeof(*node), GFP_KERNEL); if (!node) { dev_err(dev, "failed to allocate memory\n"); ret = -ENOMEM; goto err_iommu_unmap; } for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { node[nr].cmdlist = g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; node[nr].dma_addr = g2d->cmdlist_pool_map + nr * G2D_CMDLIST_SIZE; list_add_tail(&node[nr].list, &g2d->free_cmdlist); } return 0; err_iommu_unmap: exynos_drm_iommu_unmap(drm_priv->vmm, g2d->cmdlist_pool_map); err: dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool); return ret; }
static void g2d_fini_cmdlist(struct g2d_data *g2d) { struct exynos_drm_private *drm_priv; struct exynos_drm_subdrv *subdrv = &g2d->subdrv; drm_priv = subdrv->drm_dev->dev_private; if (drm_priv->vmm) exynos_drm_iommu_unmap(drm_priv->vmm, g2d->cmdlist_pool_map); kfree(g2d->cmdlist_node); dma_free_coherent(g2d->dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, g2d->cmdlist_pool); }