struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags) { struct drm_gem_object *obj = NULL; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); size = PAGE_ALIGN(size); ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); if (ret) goto fail; if (use_pages(obj)) { ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; } else { drm_gem_private_object_init(dev, obj, size); } return obj; fail: drm_gem_object_unreference(obj); return ERR_PTR(ret); }
static void put_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { /* For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); if (use_pages(obj)) drm_gem_put_pages(obj, msm_obj->pages, true, false); else { drm_mm_remove_node(msm_obj->vram_node); drm_free_large(msm_obj->pages); } msm_obj->pages = NULL; } }