void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); if (gem->map_list.map) drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); tegra_bo_destroy(gem->dev, bo); kfree(bo); }
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, unsigned long flags) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %u\n", size); err = -ENOMEM; goto err_dma; } err = drm_gem_object_init(drm, &bo->gem, size); if (err) goto err_init; err = drm_gem_create_mmap_offset(&bo->gem); if (err) goto err_mmap; if (flags & DRM_TEGRA_GEM_CREATE_TILED) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; return bo; err_mmap: drm_gem_object_release(&bo->gem); err_init: tegra_bo_destroy(drm, bo); err_dma: kfree(bo); return ERR_PTR(err); }
void tegra_bo_free_object(struct drm_gem_object *gem) { struct tegra_bo *bo = to_tegra_bo(gem); if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { tegra_bo_destroy(gem->dev, bo); } drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); kfree(bo); }