Exemplo n.º 1
1
Arquivo: gem.c Projeto: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
Exemplo n.º 2
0
static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
	struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);

	drm_gem_object_release(&bo->gem);
	kfree(bo);
}
Exemplo n.º 3
0
static int
udl_gem_create(struct drm_file *file,
	       struct drm_device *dev,
	       uint64_t size,
	       uint32_t *handle_p)
{
	struct udl_gem_object *obj;
	int ret;
	u32 handle;

	size = roundup(size, PAGE_SIZE);

	obj = udl_gem_alloc_object(dev, size);
	if (obj == NULL)
		return -ENOMEM;

	ret = drm_gem_handle_create(file, &obj->base, &handle);
	if (ret) {
		drm_gem_object_release(&obj->base);
		kfree(obj);
		return ret;
	}

	drm_gem_object_unreference_unlocked(&obj->base);
	*handle_p = handle;
	return 0;
}
Exemplo n.º 4
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_buffer_object *bo = &nvbo->bo;
	struct device *dev = drm->dev->dev;
	int ret;

	ret = pm_runtime_get_sync(dev);
	if (WARN_ON(ret < 0 && ret != -EACCES))
		return;

	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);

	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);
}
/**
 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
 * @drm: DRM device
 * @size: size of the object to allocate
 *
 * This function creates and initializes a GEM CMA object of the given size,
 * but doesn't allocate any memory to back the object.
 *
 * Returns:
 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
 * error code on failure.
 */
static struct drm_gem_cma_object *
__drm_gem_cma_create(struct drm_device *drm, size_t size)
{
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	int ret;

	if (drm->driver->gem_create_object)
		gem_obj = drm->driver->gem_create_object(drm, size);
	else
		gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
	if (!gem_obj)
		return ERR_PTR(-ENOMEM);
	cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);

	ret = drm_gem_object_init(drm, gem_obj, size);
	if (ret)
		goto error;

	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret) {
		drm_gem_object_release(gem_obj);
		goto error;
	}

	return cma_obj;

error:
	kfree(cma_obj);
	return ERR_PTR(ret);
}
Exemplo n.º 6
0
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);

	if (bo->pin_count > 0)
		amdgpu_bo_subtract_pin_size(bo);

	if (bo->kfd_bo)
		amdgpu_amdkfd_unreserve_memory_limit(bo);

	amdgpu_bo_kunmap(bo);

	if (bo->gem_base.import_attach)
		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
	drm_gem_object_release(&bo->gem_base);
	amdgpu_bo_unref(&bo->parent);
	if (!list_empty(&bo->shadow_list)) {
		mutex_lock(&adev->shadow_list_lock);
		list_del_init(&bo->shadow_list);
		mutex_unlock(&adev->shadow_list_lock);
	}
	kfree(bo->metadata);
	kfree(bo);
}
Exemplo n.º 7
0
Arquivo: gem.c Projeto: JaneDu/ath
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
					      size_t size)
{
	struct tegra_bo *bo;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	return bo;

release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);
	return ERR_PTR(err);
}
Exemplo n.º 8
0
int virtio_gpu_gem_create(struct drm_file *file,
			  struct drm_device *dev,
			  uint64_t size,
			  struct drm_gem_object **obj_p,
			  uint32_t *handle_p)
{
	struct virtio_gpu_object *obj;
	int ret;
	u32 handle;

	obj = virtio_gpu_alloc_object(dev, size, false, false);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	ret = drm_gem_handle_create(file, &obj->gem_base, &handle);
	if (ret) {
		drm_gem_object_release(&obj->gem_base);
		return ret;
	}

	*obj_p = &obj->gem_base;

	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(&obj->gem_base);

	*handle_p = handle;
	return 0;
}
Exemplo n.º 9
0
/*
 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
 * @drm: The drm device
 * @size: The GEM object size
 *
 * This function creates and initializes a GEM CMA object of the given size, but
 * doesn't allocate any memory to back the object.
 *
 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure.
 */
static struct drm_gem_cma_object *
__drm_gem_cma_create(struct drm_device *drm, unsigned int size)
{
	struct drm_gem_cma_object *cma_obj;
	struct drm_gem_object *gem_obj;
	int ret;

	cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
	if (!cma_obj)
		return ERR_PTR(-ENOMEM);

	gem_obj = &cma_obj->base;

	ret = drm_gem_object_init(drm, gem_obj, size);
	if (ret)
		goto error;

	ret = drm_gem_create_mmap_offset(gem_obj);
	if (ret) {
		drm_gem_object_release(gem_obj);
		goto error;
	}

	return cma_obj;

error:
	kfree(cma_obj);
	return ERR_PTR(ret);
}
Exemplo n.º 10
0
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
				       struct drm_file *file,
				       u32 *handle,
				       u64 size)
{
	struct vkms_gem_object *obj;
	int ret;

	if (!file || !dev || !handle)
		return ERR_PTR(-EINVAL);

	obj = __vkms_gem_create(dev, size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	ret = drm_gem_handle_create(file, &obj->gem, handle);
	drm_gem_object_put_unlocked(&obj->gem);
	if (ret) {
		drm_gem_object_release(&obj->gem);
		kfree(obj);
		return ERR_PTR(ret);
	}

	return &obj->gem;
}
Exemplo n.º 11
0
Arquivo: gem.c Projeto: JaneDu/ath
struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
				 unsigned long flags)
{
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, size);
	if (IS_ERR(bo))
		return bo;

	err = tegra_bo_alloc(drm, bo);
	if (err < 0)
		goto release;

	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;

	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
		bo->flags |= TEGRA_BO_BOTTOM_UP;

	return bo;

release:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
Exemplo n.º 12
0
void pscnv_gem_free_object (struct drm_gem_object *obj) {
	struct pscnv_bo *vo = obj->driver_private;
#ifndef PSCNV_KAPI_DRM_GEM_OBJECT_HANDLE_COUNT
	atomic_dec(&obj->handle_count);
#endif
	pscnv_mem_free(vo);
	drm_gem_object_release(obj);
	kfree(obj);
}
Exemplo n.º 13
0
Arquivo: gem.c Projeto: 03199618/linux
void psb_gem_free_object(struct drm_gem_object *obj)
{
	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);

	/* Remove the list map if one is present */
	drm_gem_free_mmap_offset(obj);
	drm_gem_object_release(obj);

	/* This must occur last as it frees up the memory of the GEM object */
	psb_gtt_free_range(obj->dev, gtt);
}
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
	struct radeon_bo *bo;

	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
	drm_gem_object_release(&bo->gem_base);
	kfree(bo);
}
Exemplo n.º 15
0
Arquivo: gem.c Projeto: monojo/xu3
void tegra_bo_free_object(struct drm_gem_object *gem)
{
	struct tegra_bo *bo = to_tegra_bo(gem);

	if (gem->map_list.map)
		drm_gem_free_mmap_offset(gem);

	drm_gem_object_release(gem);
	tegra_bo_destroy(gem->dev, bo);

	kfree(bo);
}
Exemplo n.º 16
0
void vkms_gem_free_object(struct drm_gem_object *obj)
{
	struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
						   gem);

	WARN_ON(gem->pages);
	WARN_ON(gem->vaddr);

	mutex_destroy(&gem->pages_lock);
	drm_gem_object_release(obj);
	kfree(gem);
}
Exemplo n.º 17
0
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
	struct radeon_bo *robj = gobj->driver_private;

	gobj->driver_private = NULL;
	if (robj) {
		radeon_bo_unref(&robj);
	}

	drm_gem_object_release(gobj);
	kfree(gobj);
}
Exemplo n.º 18
0
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);

	kvfree(vgem_obj->pages);
	mutex_destroy(&vgem_obj->pages_lock);

	if (obj->import_attach)
		drm_prime_gem_destroy(obj, vgem_obj->table);

	drm_gem_object_release(obj);
	kfree(vgem_obj);
}
Exemplo n.º 19
0
void drm_gem_object_release_wrap(struct drm_gem_object *obj)
{
	/* Remove the list map if one is present */
	if (obj->map_list.map) {
		struct drm_gem_mm *mm = obj->dev->mm_private;
		struct drm_map_list *list = &obj->map_list;
		drm_ht_remove_item(&mm->offset_hash, &list->hash);
		drm_mm_put_block(list->file_offset_node);
		kfree(list->map);
		list->map = NULL;
	}
	drm_gem_object_release(obj);
}
void mtk_drm_gem_free_object(struct drm_gem_object *obj)
{
	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);

	drm_gem_free_mmap_offset(obj);

	/* release file pointer to gem object. */
	drm_gem_object_release(obj);

	dma_free_attrs(obj->dev->dev, obj->size, mtk_gem->kvaddr,
			mtk_gem->dma_addr, &mtk_gem->dma_attrs);

	kfree(mtk_gem);
}
Exemplo n.º 21
0
Arquivo: gem.c Projeto: 03199618/linux
static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
						int size, u32 *handle)
{
	struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
	if (gtt == NULL)
		return -ENOMEM;

	drm_gem_private_object_init(dev, &gtt->gem, size);
	if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
		return 0;

	drm_gem_object_release(&gtt->gem);
	psb_gtt_free_range(dev, gtt);
	return -ENOMEM;
}
Exemplo n.º 22
0
static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
	struct virtio_gpu_object *bo;
	struct virtio_gpu_device *vgdev;

	bo = container_of(tbo, struct virtio_gpu_object, tbo);
	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;

	if (bo->hw_res_handle)
		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
	if (bo->pages)
		virtio_gpu_object_free_sg_table(bo);
	drm_gem_object_release(&bo->gem_base);
	kfree(bo);
}
Exemplo n.º 23
0
/*
 * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
 * function
 */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_gem_cma_object *cma_obj;

	if (gem_obj->map_list.map)
		drm_gem_free_mmap_offset(gem_obj);

	drm_gem_object_release(gem_obj);

	cma_obj = to_drm_gem_cma_obj(gem_obj);

	drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);

	kfree(cma_obj);
}
Exemplo n.º 24
0
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
	struct amdgpu_bo *bo;

	bo = container_of(tbo, struct amdgpu_bo, tbo);

	amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL);

	mutex_lock(&bo->adev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->adev->gem.mutex);
	drm_gem_object_release(&bo->gem_base);
	kfree(bo->metadata);
	kfree(bo);
}
Exemplo n.º 25
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);
}
Exemplo n.º 26
0
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
	struct radeon_bo *bo;

	bo = container_of(tbo, struct radeon_bo, tbo);

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);

	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
	WARN_ON(!list_empty(&bo->va));
	drm_gem_object_release(&bo->gem_base);
	kfree(bo);
}
Exemplo n.º 27
0
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
				 unsigned long flags)
{
	struct tegra_bo *bo;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(size, PAGE_SIZE);

	bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
					   GFP_KERNEL | __GFP_NOWARN);
	if (!bo->vaddr) {
		dev_err(drm->dev, "failed to allocate buffer with size %u\n",
			size);
		err = -ENOMEM;
		goto err_dma;
	}

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err)
		goto err_init;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err)
		goto err_mmap;

	if (flags & DRM_TEGRA_GEM_CREATE_TILED)
		bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;

	if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
		bo->flags |= TEGRA_BO_BOTTOM_UP;

	return bo;

err_mmap:
	drm_gem_object_release(&bo->gem);
err_init:
	tegra_bo_destroy(drm, bo);
err_dma:
	kfree(bo);

	return ERR_PTR(err);
}
/**
 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
 * @gem_obj: GEM object to free
 *
 * This function frees the backing memory of the CMA GEM object, cleans up the
 * GEM object state and frees the memory used to store the object itself.
 * Drivers using the CMA helpers should set this as their DRM driver's
 * ->gem_free_object() callback.
 */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_gem_cma_object *cma_obj;

	cma_obj = to_drm_gem_cma_obj(gem_obj);

	if (cma_obj->vaddr) {
		dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
				      cma_obj->vaddr, cma_obj->paddr);
	} else if (gem_obj->import_attach) {
		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
	}

	drm_gem_object_release(gem_obj);

	kfree(cma_obj);
}
Exemplo n.º 29
0
void tegra_bo_free_object(struct drm_gem_object *gem)
{
	struct tegra_bo *bo = to_tegra_bo(gem);

	if (gem->import_attach) {
		dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
					 DMA_TO_DEVICE);
		drm_prime_gem_destroy(gem, NULL);
	} else {
		tegra_bo_destroy(gem->dev, bo);
	}

	drm_gem_free_mmap_offset(gem);
	drm_gem_object_release(gem);

	kfree(bo);
}
Exemplo n.º 30
0
void msm_gem_free_object(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_drm_private *priv = obj->dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int id;

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));

	/* object should not be on active list: */
	WARN_ON(is_active(msm_obj));

	list_del(&msm_obj->mm_list);

	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
		struct msm_mmu *mmu = priv->mmus[id];
		if (mmu && msm_obj->domain[id].iova) {
			uint32_t offset = msm_obj->domain[id].iova;
			mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
		}
	}

	if (obj->import_attach) {
		if (msm_obj->vaddr)
			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);

		/* Don't drop the pages for imported dmabuf, as they are not
		 * ours, just free the array we allocated:
		 */
		if (msm_obj->pages)
			drm_free_large(msm_obj->pages);

		drm_prime_gem_destroy(obj, msm_obj->sgt);
	} else {
		vunmap(msm_obj->vaddr);
		put_pages(obj);
	}

	if (msm_obj->resv == &msm_obj->_resv)
		reservation_object_fini(msm_obj->resv);

	drm_gem_object_release(obj);

	kfree(msm_obj);
}