struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, unsigned long size, bool alloc_kmap) { struct mtk_drm_gem_obj *mtk_gem; struct drm_gem_object *obj; int ret; mtk_gem = mtk_drm_gem_init(dev, size); if (IS_ERR(mtk_gem)) return ERR_CAST(mtk_gem); obj = &mtk_gem->base; init_dma_attrs(&mtk_gem->dma_attrs); dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs); if (!alloc_kmap) dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs); mtk_gem->kvaddr = dma_alloc_attrs(dev->dev, obj->size, (dma_addr_t *)&mtk_gem->dma_addr, GFP_KERNEL, &mtk_gem->dma_attrs); if (!mtk_gem->kvaddr) { DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); ret = -ENOMEM; goto err_dma; } { mtk_gem->sgt = kzalloc(sizeof(*mtk_gem->sgt), GFP_KERNEL); if (!mtk_gem->sgt) { ret = -ENOMEM; goto err_sgt; } ret = dma_get_sgtable_attrs(dev->dev, mtk_gem->sgt, mtk_gem->kvaddr, mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); goto err_get_sgtable; } } DRM_INFO("kvaddr = %p dma_addr = %pad\n", mtk_gem->kvaddr, &mtk_gem->dma_addr); return mtk_gem; err_get_sgtable: kfree(mtk_gem->sgt); err_sgt: dma_free_attrs(dev->dev, obj->size, mtk_gem->kvaddr, mtk_gem->dma_addr, &mtk_gem->dma_attrs); err_dma: kfree(mtk_gem); return ERR_PTR(ret); }
/* * Allocate a sg_table for this GEM object. * Note: Both the table's contents, and the sg_table itself must be freed by * the caller. * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. */ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; struct sg_table *sgt; int ret; sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, &rk_obj->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); kfree(sgt); return ERR_PTR(ret); } return sgt; }