Exemplo n.º 1
0
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
				       struct drm_gem_object *gem,
				       int flags)
{
	return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
			      flags, NULL);
}
Exemplo n.º 2
0
int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
	struct ion_buffer *buffer;
	struct dma_buf *dmabuf;
	bool valid_handle;
	int fd;

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	mutex_unlock(&client->lock);
	if (!valid_handle) {
		WARN(1, "%s: invalid handle passed to share.\n", __func__);
		return -EINVAL;
	}

	buffer = handle->buffer;
	ion_buffer_get(buffer);
	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
	if (IS_ERR(dmabuf)) {
		ion_buffer_put(buffer);
		return PTR_ERR(dmabuf);
	}
	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(dmabuf);

	return fd;
}
Exemplo n.º 3
0
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				      struct drm_gem_object *gem_obj, int flags)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);

	return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
}
Exemplo n.º 4
0
int ioctl_request_fd()
{
/* The buffer exporter announces its wish to export a buffer. In this, it
   connects its own private buffer data, provides implementation for operations
   that can be performed on the exported dma_buf, and flags for the file
   associated with this buffer.

   Interface:
      struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
				     size_t size, int flags)
*/
/*
   Userspace entity requests for a file-descriptor (fd) which is a handle to the
   anonymous file associated with the buffer. It can then share the fd with other
   drivers and/or processes.

   Interface:
      int dma_buf_fd(struct dma_buf *dmabuf)
*/
	int fd;

	curr_dma_buf = dma_buf_export(NULL, &dma_ops, dma_size, 0);	
	fd = dma_buf_fd(curr_dma_buf);
	return fd;
}
Exemplo n.º 5
0
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
				struct drm_gem_object *obj, int flags)
{
	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);

	return dma_buf_export(obj, &exynos_dmabuf_ops,
				exynos_gem_obj->base.size, flags);
}
Exemplo n.º 6
0
struct dma_buf *rockchip_dmabuf_prime_export(struct drm_device *drm_dev,
					     struct drm_gem_object *obj,
					     int flags)
{
	struct rockchip_drm_gem_obj *rockchip_gem_obj =
	    				to_rockchip_gem_obj(obj);

	return dma_buf_export(rockchip_gem_obj, &rockchip_dmabuf_ops,
			      rockchip_gem_obj->base.size, flags);
}
Exemplo n.º 7
0
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
		struct drm_gem_object *obj, int flags)
{
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &omap_dmabuf_ops;
	exp_info.size = obj->size;
	exp_info.flags = flags;
	exp_info.priv = obj;

	return dma_buf_export(&exp_info);
}
Exemplo n.º 8
0
struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
				struct drm_gem_object *obj, int flags)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(obj);
	int ret = 0;

	/* pin buffer into GTT */
	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
	if (ret)
		return ERR_PTR(-EINVAL);

	return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
}
Exemplo n.º 9
0
Arquivo: gem.c Projeto: JaneDu/ath
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
				       struct drm_gem_object *gem,
				       int flags)
{
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &tegra_gem_prime_dmabuf_ops;
	exp_info.size = gem->size;
	exp_info.flags = flags;
	exp_info.priv = gem;

	return dma_buf_export(&exp_info);
}
Exemplo n.º 10
0
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				      struct drm_gem_object *gem_obj, int flags)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);

	if (obj->ops->dmabuf_export) {
		int ret = obj->ops->dmabuf_export(obj);
		if (ret)
			return ERR_PTR(ret);
	}

	return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
                              NULL);
}
Exemplo n.º 11
0
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
					struct drm_gem_object *obj,
					int flags)
{
	struct radeon_bo *bo = gem_to_radeon_bo(obj);
	int ret = 0;

	/* pin buffer into GTT */
	ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
	if (ret)
		return ERR_PTR(ret);

	return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
}
/**
 * adf_memblock_export - export a memblock reserved area as a dma-buf
 *
 * @base: base physical address
 * @size: memblock size
 * @flags: mode flags for the dma-buf's file
 *
 * @base and @size must be page-aligned.
 *
 * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
 */
struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
{
    struct adf_memblock_pdata *pdata;
    struct dma_buf *buf;

    if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
        return ERR_PTR(-EINVAL);

    pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
    if (!pdata)
        return ERR_PTR(-ENOMEM);

    pdata->base = base;
    buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags);
    if (IS_ERR(buf))
        kfree(pdata);

    return buf;
}
Exemplo n.º 13
0
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				      struct drm_gem_object *gem_obj, int flags)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &i915_dmabuf_ops;
	exp_info.size = gem_obj->size;
	exp_info.flags = flags;
	exp_info.priv = gem_obj;


	if (obj->ops->dmabuf_export) {
		int ret = obj->ops->dmabuf_export(obj);
		if (ret)
			return ERR_PTR(ret);
	}

	return dma_buf_export(&exp_info);
}
static int dmabuf_ioctl_create(struct dmabuf_file *priv,
			       const void __user *data)
{
	struct dmabuf_create args;
	int ret = 0;

	if (priv->buf || priv->virt)
		return -EBUSY;

	if (copy_from_user(&args, data, sizeof(args)))
		return -EFAULT;

	priv->virt = dma_alloc_writecombine(priv->dev, args.size, &priv->phys,
					    GFP_KERNEL | __GFP_NOWARN);
	if (!priv->virt)
		return -ENOMEM;

	args.flags |= O_RDWR;

	priv->buf = dma_buf_export(priv, &dmabuf_ops, args.size, args.flags,
				   NULL);
	if (!priv->buf) {
		ret = -ENOMEM;
		goto free;
	}

	if (IS_ERR(priv->buf)) {
		ret = PTR_ERR(priv->buf);
		goto free;
	}

	priv->size = args.size;

	return 0;

free:
	dma_free_writecombine(NULL, priv->size, priv->virt, priv->phys);
	priv->virt = NULL;
	return ret;
}
Exemplo n.º 15
0
static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct dma_buf *dbuf;
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &vb2_dma_sg_dmabuf_ops;
	exp_info.size = buf->size;
	exp_info.flags = flags;
	exp_info.priv = buf;

	if (WARN_ON(!buf->dma_sgt))
		return NULL;

	dbuf = dma_buf_export(&exp_info);
	if (IS_ERR(dbuf))
		return NULL;

	/* dmabuf keeps reference to vb2 buffer */
	atomic_inc(&buf->refcount);

	return dbuf;
}
Exemplo n.º 16
0
static struct dma_buf *mock_dmabuf(int npages)
{
	struct mock_dmabuf *mock;
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
	struct dma_buf *dmabuf;
	int i;

	mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
		       GFP_KERNEL);
	if (!mock)
		return ERR_PTR(-ENOMEM);

	mock->npages = npages;
	for (i = 0; i < npages; i++) {
		mock->pages[i] = alloc_page(GFP_KERNEL);
		if (!mock->pages[i])
			goto err;
	}

	exp_info.ops = &mock_dmabuf_ops;
	exp_info.size = npages * PAGE_SIZE;
	exp_info.flags = O_CLOEXEC;
	exp_info.priv = mock;

	dmabuf = dma_buf_export(&exp_info);
	if (IS_ERR(dmabuf))
		goto err;

	return dmabuf;

err:
	while (i--)
		put_page(mock->pages[i]);
	kfree(mock);
	return ERR_PTR(-ENOMEM);
}
Exemplo n.º 17
0
struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
				     struct drm_gem_object *obj, int flags)
{
	return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
}
Exemplo n.º 18
0
struct dma_buf * omap_gem_prime_export(struct drm_device *dev,
		struct drm_gem_object *obj, int flags)
{
	return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
}
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				      struct drm_gem_object *gem_obj, int flags)
{
	return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
Exemplo n.º 20
0
	.unmap_dma_buf = _tee_shm_dma_buf_unmap_dma_buf,
	.release = _tee_shm_dma_buf_release,
	.kmap_atomic = _tee_shm_dma_buf_kmap_atomic,
	.kmap = _tee_shm_dma_buf_kmap,
	.kunmap = _tee_shm_dma_buf_kunmap,
	.mmap = _tee_shm_dma_buf_mmap,
};

/******************************************************************************/

static int export_buf(struct tee *tee, struct tee_shm *shm, int *export)
{
	struct dma_buf *dmabuf;
	int ret = 0;

	dmabuf = dma_buf_export(shm, &_tee_shm_dma_buf_ops, shm->size_alloc,
				O_RDWR, 0);
	if (IS_ERR_OR_NULL(dmabuf)) {
		dev_err(_DEV(tee), "%s: dmabuf: couldn't export buffer (%ld)\n",
			__func__, PTR_ERR(dmabuf));
		ret = -EINVAL;
		goto out;
	}

	*export = dma_buf_fd(dmabuf, O_CLOEXEC);
out:
	OUTMSG(ret);
	return ret;
}

int tee_shm_alloc_io(struct tee_context *ctx, struct tee_shm_io *shm_io)
{
Exemplo n.º 21
0
static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
				       struct tee_device *teedev,
				       size_t size, u32 flags)
{
	struct tee_shm_pool_mgr *poolm = NULL;
	struct tee_shm *shm;
	void *ret;
	int rc;

	if (ctx && ctx->teedev != teedev) {
		dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
		return ERR_PTR(-EINVAL);
	}

	if (!(flags & TEE_SHM_MAPPED)) {
		dev_err(teedev->dev.parent,
			"only mapped allocations supported\n");
		return ERR_PTR(-EINVAL);
	}

	if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
		dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
		return ERR_PTR(-EINVAL);
	}

	if (!tee_device_get(teedev))
		return ERR_PTR(-EINVAL);

	if (!teedev->pool) {
		/* teedev has been detached from driver */
		ret = ERR_PTR(-EINVAL);
		goto err_dev_put;
	}

	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
	if (!shm) {
		ret = ERR_PTR(-ENOMEM);
		goto err_dev_put;
	}

	shm->flags = flags | TEE_SHM_POOL;
	shm->teedev = teedev;
	shm->ctx = ctx;
	if (flags & TEE_SHM_DMA_BUF)
		poolm = teedev->pool->dma_buf_mgr;
	else
		poolm = teedev->pool->private_mgr;

	rc = poolm->ops->alloc(poolm, shm, size);
	if (rc) {
		ret = ERR_PTR(rc);
		goto err_kfree;
	}

	mutex_lock(&teedev->mutex);
	shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
	mutex_unlock(&teedev->mutex);
	if (shm->id < 0) {
		ret = ERR_PTR(shm->id);
		goto err_pool_free;
	}

	if (flags & TEE_SHM_DMA_BUF) {
		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

		exp_info.ops = &tee_shm_dma_buf_ops;
		exp_info.size = shm->size;
		exp_info.flags = O_RDWR;
		exp_info.priv = shm;

		shm->dmabuf = dma_buf_export(&exp_info);
		if (IS_ERR(shm->dmabuf)) {
			ret = ERR_CAST(shm->dmabuf);
			goto err_rem;
		}
	}

	if (ctx) {
		teedev_ctx_get(ctx);
		mutex_lock(&teedev->mutex);
		list_add_tail(&shm->link, &ctx->list_shm);
		mutex_unlock(&teedev->mutex);
	}

	return shm;
err_rem:
	mutex_lock(&teedev->mutex);
	idr_remove(&teedev->idr, shm->id);
	mutex_unlock(&teedev->mutex);
err_pool_free:
	poolm->ops->free(poolm, shm);
err_kfree:
	kfree(shm);
err_dev_put:
	tee_device_put(teedev);
	return ret;
}
Exemplo n.º 22
0
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
				 size_t length, u32 flags)
{
	struct tee_device *teedev = ctx->teedev;
	const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
	struct tee_shm *shm;
	void *ret;
	int rc;
	int num_pages;
	unsigned long start;

	if (flags != req_flags)
		return ERR_PTR(-ENOTSUPP);

	if (!tee_device_get(teedev))
		return ERR_PTR(-EINVAL);

	if (!teedev->desc->ops->shm_register ||
	    !teedev->desc->ops->shm_unregister) {
		tee_device_put(teedev);
		return ERR_PTR(-ENOTSUPP);
	}

	teedev_ctx_get(ctx);

	shm = kzalloc(sizeof(*shm), GFP_KERNEL);
	if (!shm) {
		ret = ERR_PTR(-ENOMEM);
		goto err;
	}

	shm->flags = flags | TEE_SHM_REGISTER;
	shm->teedev = teedev;
	shm->ctx = ctx;
	shm->id = -1;
	start = rounddown(addr, PAGE_SIZE);
	shm->offset = addr - start;
	shm->size = length;
	num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
	if (!shm->pages) {
		ret = ERR_PTR(-ENOMEM);
		goto err;
	}

	rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
	if (rc > 0)
		shm->num_pages = rc;
	if (rc != num_pages) {
		if (rc >= 0)
			rc = -ENOMEM;
		ret = ERR_PTR(rc);
		goto err;
	}

	mutex_lock(&teedev->mutex);
	shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
	mutex_unlock(&teedev->mutex);

	if (shm->id < 0) {
		ret = ERR_PTR(shm->id);
		goto err;
	}

	rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
					     shm->num_pages, start);
	if (rc) {
		ret = ERR_PTR(rc);
		goto err;
	}

	if (flags & TEE_SHM_DMA_BUF) {
		DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

		exp_info.ops = &tee_shm_dma_buf_ops;
		exp_info.size = shm->size;
		exp_info.flags = O_RDWR;
		exp_info.priv = shm;

		shm->dmabuf = dma_buf_export(&exp_info);
		if (IS_ERR(shm->dmabuf)) {
			ret = ERR_CAST(shm->dmabuf);
			teedev->desc->ops->shm_unregister(ctx, shm);
			goto err;
		}
	}

	mutex_lock(&teedev->mutex);
	list_add_tail(&shm->link, &ctx->list_shm);
	mutex_unlock(&teedev->mutex);

	return shm;
err:
	if (shm) {
		size_t n;

		if (shm->id >= 0) {
			mutex_lock(&teedev->mutex);
			idr_remove(&teedev->idr, shm->id);
			mutex_unlock(&teedev->mutex);
		}
		if (shm->pages) {
			for (n = 0; n < shm->num_pages; n++)
				put_page(shm->pages[n]);
			kfree(shm->pages);
		}
	}
	kfree(shm);
	teedev_ctx_put(ctx);
	tee_device_put(teedev);
	return ret;
}
Exemplo n.º 23
0
int exynos_dmabuf_prime_handle_to_fd(struct drm_device *drm_dev,
					struct drm_file *file,
					unsigned int handle, int *prime_fd)
{
	struct drm_gem_object *obj;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	int ret = 0;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	obj = drm_gem_object_lookup(drm_dev, file, handle);
	if (!obj) {
		DRM_DEBUG_KMS("failed to lookup gem object.\n");
		ret = -EINVAL;
		goto err1;
	}

	exynos_gem_obj = to_exynos_gem_obj(obj);

	if (obj->prime_fd != -1) {
		/* we have a prime fd already referencing the object. */
		goto have_fd;
	}

	/*
	 * get the dmabuf object for a gem object after registering
	 * the gem object to allocated dmabuf.
	 *
	 * P.S. dma_buf_export function performs the followings:
	 *	- create a new dmabuf object.
	 *	- dmabuf->priv = gem object.
	 *	- file->private_data = dmabuf.
	 */
	obj->export_dma_buf = dma_buf_export(obj, &exynos_dmabuf_ops,
						obj->size, 0600);
	if (!obj->export_dma_buf) {
		ret = PTR_ERR(obj->export_dma_buf);
		goto err2;
	}

	/* get file descriptor for a given dmabuf object. */
	obj->prime_fd = dma_buf_fd(obj->export_dma_buf);
	if (obj->prime_fd < 0) {
		DRM_DEBUG_KMS("failed to get fd from dmabuf.\n");
		dma_buf_put(obj->export_dma_buf);
		ret = obj->prime_fd;
		goto err2;
	}

	/*
	 * this gem object is referenced by the fd so
	 * the object refcount should be increased.
	 * after that when dmabuf_ops->release() is called,
	 * it will be decreased again.
	 */
	drm_gem_object_reference(obj);

have_fd:
	*prime_fd = obj->prime_fd;
err2:
	drm_gem_object_unreference(obj);
err1:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}