Esempio n. 1
1
File: gem.c Progetto: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
Esempio n. 2
0
int ioctl_detach(int fd)
{
/*
when buffer-user is done using this buffer, it 'disconnects' itself from the
   buffer.

   After the buffer-user has no more interest in using this buffer, it should
   disconnect itself from the buffer:

   - it first detaches itself from the buffer.

   Interface:
      void dma_buf_detach(struct dma_buf *dmabuf,
                          struct dma_buf_attachment *dmabuf_attach);

Then, the buffer-user returns the buffer reference to exporter.

   Interface:
     void dma_buf_put(struct dma_buf *dmabuf);
*/

	/* todo - use fd from userland to get back the contexts */
	dma_buf_detach(curr_dma_buf, curr_dma_buf_attachment);
	dma_buf_put(curr_dma_buf);
	return 0;
}
Esempio n. 3
0
void nvhost_dmabuf_put(struct mem_handle *handle)
{
	struct dma_buf_attachment *attach = to_dmabuf_att(handle);
	struct dma_buf *dmabuf = attach->dmabuf;
	dma_buf_detach(dmabuf, attach);
	dma_buf_put(dmabuf);
}
Esempio n. 4
0
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct udl_gem_object *uobj;
	int ret;

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
	if (ret) {
		goto fail_unmap;
	}

	uobj->base.import_attach = attach;

	return &uobj->base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Esempio n. 5
0
/**
 * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor
 * @rpc - rppc instance handle
 * @fd - dma_buf file descriptor
 * @autoreg: flag indicating the mode of creation
 *
 * This function primarily imports a buffer into the driver and holds
 * a reference to the buffer on behalf of the remote processor. The
 * buffer to be imported is represented by a dma-buf file descriptor,
 * and as such is agnostic of the buffer allocator and/or exporter.
 * The buffer is imported using the dma-buf api, and a driver specific
 * buffer descriptor is used to store the imported buffer properties.
 * The imported buffers are all stored in a rppc instance specific
 * idr, to be used for looking up and cleaning up the driver buffer
 * descriptors.
 *
 * The @autoreg field is used to dictate the manner in which the buffer
 * is imported. The user-side can pre-register the buffers with the driver
 * (which will import the buffers) if the application is going to use
 * these repeatedly in consecutive function invocations. The buffers
 * are auto-imported if the user-side has not registered them previously
 * and are un-imported once the remote function call returns.
 *
 * This function is to be called only after checking that buffer has
 * not been imported already (see rppc_find_dmabuf).
 *
 * Return: allocated rppc_dma_buf or error
 */
struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd,
				       bool autoreg)
{
	struct rppc_dma_buf *dma;
	void *ret;
	int id;

	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
	if (!dma)
		return ERR_PTR(-ENOMEM);

	dma->fd = fd;
	dma->autoreg = !!autoreg;
	dma->buf = dma_buf_get(dma->fd);
	if (IS_ERR(dma->buf)) {
		ret = dma->buf;
		goto free_dma;
	}

	dma->attach = dma_buf_attach(dma->buf, rpc->dev);
	if (IS_ERR(dma->attach)) {
		ret = dma->attach;
		goto put_buf;
	}

	dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(dma->sgt)) {
		ret = dma->sgt;
		goto detach_buf;
	}

	dma->pa = sg_dma_address(dma->sgt->sgl);
	mutex_lock(&rpc->lock);
	id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL);
	dma->id = id;
	mutex_unlock(&rpc->lock);
	if (id < 0) {
		ret = ERR_PTR(id);
		goto unmap_buf;
	}

	return dma;

unmap_buf:
	dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
detach_buf:
	dma_buf_detach(dma->buf, dma->attach);
put_buf:
	dma_buf_put(dma->buf);
free_dma:
	kfree(dma);

	return ret;
}
static void vb2_ion_detach_dmabuf(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (buf->kva != NULL) {
        dma_buf_kunmap(buf->dma_buf, 0, buf->kva);
        dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0);
    }

    dma_buf_detach(buf->dma_buf, buf->attachment);
    kfree(buf);
}
static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;

	/* if vb2 works correctly you should never detach mapped buffer */
	if (WARN_ON(buf->dma_sgt))
		vb2_dma_sg_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	kfree(buf);
}
Esempio n. 8
0
static void fimg2d_unmap_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma)
{
	if (!dma->dma_addr)
		return;

	iovmm_unmap(info->dev, dma->dma_addr);
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			dma->direction);
	dma_buf_detach(dma->dma_buf, dma->attachment);
	dma_buf_put(dma->dma_buf);
	memset(dma, 0, sizeof(struct fimg2d_dma));
}
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf_to_obj(dma_buf);
		/* is it from our device? */
		if (obj->base.dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		i915_gem_object_free(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Esempio n. 10
0
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_gem_object *obj;
	struct sg_table *sgt;
	int ret;

	if (dma_buf->ops == &omap_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
dma_addr_t decon_map_sec_dma_buf(struct dma_buf *dbuf, int plane)
{
        struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */

        if (!dbuf || (plane >= MAX_BUF_PLANE_CNT) || (plane < 0))
                return -EINVAL;

        dma.ion_handle = NULL;
        dma.fence = NULL;

        dma.dma_buf = dbuf;
	dma.attachment = dma_buf_attach(dbuf, decon->dev);

        if (IS_ERR(dma.attachment)) {
		decon_err("dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma.attachment));
		goto err_buf_map_attach;
	}

	dma.sg_table = dma_buf_map_attachment(dma.attachment,
			DMA_TO_DEVICE);

	if (IS_ERR(dma.sg_table)) {
		decon_err("dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma.sg_table));
		goto err_buf_map_attachment;
	}

	dma.dma_addr = ion_iovmm_map(dma.attachment, 0,
			dma.dma_buf->size, DMA_TO_DEVICE, plane);

	if (IS_ERR_VALUE(dma.dma_addr)) {
		decon_err("iovmm_map() failed: %pa\n", &dma.dma_addr);
		goto err_iovmm_map;
	}

	exynos_ion_sync_dmabuf_for_device(decon->dev, dma.dma_buf,
			dma.dma_buf->size, DMA_TO_DEVICE);

	return dma.dma_addr;

err_iovmm_map:
	dma_buf_unmap_attachment(dma.attachment, dma.sg_table,
			DMA_TO_DEVICE);
err_buf_map_attachment:
	dma_buf_detach(dma.dma_buf, dma.attachment);
err_buf_map_attach:
        return 0;
}
Esempio n. 12
0
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma, int fd,
		enum dma_data_direction direction)
{
	dma_addr_t dma_addr;

	dma->direction = direction;
	dma->dma_buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dma->dma_buf)) {
		dev_err(info->dev, "dma_buf_get() failed: %ld\n",
				PTR_ERR(dma->dma_buf));
		goto err_buf_get;
	}

	dma->attachment = dma_buf_attach(dma->dma_buf, info->dev);
	if (IS_ERR_OR_NULL(dma->attachment)) {
		dev_err(info->dev, "dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma->attachment));
		goto err_buf_attach;
	}

	dma->sg_table = dma_buf_map_attachment(dma->attachment,
			direction);
	if (IS_ERR_OR_NULL(dma->sg_table)) {
		dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma->sg_table));
		goto err_buf_map_attachment;
	}

	dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0,
			dma->dma_buf->size);
	if (!dma_addr || IS_ERR_VALUE(dma_addr)) {
		dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr);
		goto err_iovmm_map;
	}

	dma->dma_addr = dma_addr;
	return dma->dma_buf->size;

err_iovmm_map:
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			direction);
err_buf_map_attachment:
	dma_buf_detach(dma->dma_buf, dma->attachment);
err_buf_attach:
	dma_buf_put(dma->dma_buf);
err_buf_get:
	return 0;
}
Esempio n. 13
0
/**
 * _mfc_dmabuf_put() - release memory associated with
 * a DMABUF shared buffer
 */
static void _mfc_dmabuf_put(struct vb2_plane *planes)
{
	unsigned int plane;

	for (plane = 0; plane < MFC_NUM_PLANE; ++plane) {
		void *mem_priv = planes[plane].mem_priv;

		if (mem_priv) {
			dma_buf_detach(planes[plane].dbuf,
					planes[plane].mem_priv);
			dma_buf_put(planes[plane].dbuf);
			planes[plane].dbuf = NULL;
			planes[plane].mem_priv = NULL;
		}
	}
}
Esempio n. 14
0
static void vb2_dma_contig_detach_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;

	if (!buf)
		return;

	if (buf->dma_addr)
		vb2_dma_contig_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	buf->db_attach = NULL;

	kfree(buf);
}
Esempio n. 15
0
/**
 * rppc_free_dmabuf - release the imported buffer
 * @id: idr index of the imported buffer descriptor
 * @p: imported buffer descriptor allocated during rppc_alloc_dmabuf
 * @data: rpc instance handle
 *
 * This function is used to release a buffer that has been previously
 * imported through a rppc_alloc_dmabuf call. The function can be used
 * either individually for releasing a specific buffer or in a loop iterator
 * for releasing all the buffers associated with a remote function call, or
 * during cleanup of the rpc instance.
 *
 * Return: 0 on success, and -ENOENT if invalid pointers passed in
 */
int rppc_free_dmabuf(int id, void *p, void *data)
{
	struct rppc_dma_buf *dma = p;
	struct rppc_instance *rpc = data;

	if (!dma || !rpc)
		return -ENOENT;

	dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
	dma_buf_detach(dma->buf, dma->attach);
	dma_buf_put(dma->buf);
	WARN_ON(id != dma->id);
	idr_remove(&rpc->dma_idr, id);
	kfree(dma);

	return 0;
}
void decon_free_sec_dma_buf(int plane)
{
	struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */ 

	if (IS_ERR_VALUE(dma.dma_addr) || !dma.dma_buf)
		return;

	ion_iovmm_unmap(dma.attachment, dma.dma_addr);

	dma_buf_unmap_attachment(dma.attachment, dma.sg_table,
		DMA_TO_DEVICE);

	exynos_ion_sync_dmabuf_for_cpu(decon->dev, dma.dma_buf,
				dma.dma_buf->size, DMA_FROM_DEVICE);

	dma_buf_detach(dma.dma_buf, dma.attachment);
	memset(&dma, 0, sizeof(dma));
}
Esempio n. 17
0
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf->priv;
		/* is it from our device? */
		if (obj->base.dev == dev) {
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);


	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		kfree(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Esempio n. 18
0
struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct nouveau_bo *nvbo;
	int ret;

	if (dma_buf->ops == &nouveau_dmabuf_ops) {
		nvbo = dma_buf->priv;
		if (nvbo->gem) {
			if (nvbo->gem->dev == dev) {
				drm_gem_object_reference(nvbo->gem);
				dma_buf_put(dma_buf);
				return nvbo->gem;
			}
		}
	}
	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
	if (ret)
		goto fail_unmap;

	nvbo->gem->import_attach = attach;

	return nvbo->gem;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Esempio n. 19
0
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
					       struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct radeon_bo *bo;
	int ret;

	if (dma_buf->ops == &radeon_dmabuf_ops) {
		bo = dma_buf->priv;
		if (bo->gem_base.dev == dev) {
			drm_gem_object_reference(&bo->gem_base);
			dma_buf_put(dma_buf);
			return &bo->gem_base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
	if (ret)
		goto fail_unmap;

	bo->gem_base.import_attach = attach;

	return &bo->gem_base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Esempio n. 20
0
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 fd,
			    struct tegra_dc_dmabuf **dc_buf,
			    dma_addr_t *phys_addr)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_dmabuf *dc_dmabuf;

	*dc_buf = NULL;
	*phys_addr = -1;
	if (!fd)
		return 0;

	dc_dmabuf = kzalloc(sizeof(*dc_dmabuf), GFP_KERNEL);
	if (!dc_dmabuf)
		return -ENOMEM;

	dc_dmabuf->buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dc_dmabuf->buf))
		goto buf_fail;

	dc_dmabuf->attach = dma_buf_attach(dc_dmabuf->buf, ext->dev->parent);
	if (IS_ERR_OR_NULL(dc_dmabuf->attach))
		goto attach_fail;

	dc_dmabuf->sgt = dma_buf_map_attachment(dc_dmabuf->attach,
						DMA_TO_DEVICE);
	if (IS_ERR_OR_NULL(dc_dmabuf->sgt))
		goto sgt_fail;

	*phys_addr = sg_dma_address(dc_dmabuf->sgt->sgl);
	*dc_buf = dc_dmabuf;

	return 0;
sgt_fail:
	dma_buf_detach(dc_dmabuf->buf, dc_dmabuf->attach);
attach_fail:
	dma_buf_put(dc_dmabuf->buf);
buf_fail:
	kfree(dc_dmabuf);
	return -ENOMEM;
}
int fimc_is_video_s_ctrl(struct file *file,
	struct fimc_is_video_ctx *vctx,
	struct v4l2_control *ctrl)
{
	int ret = 0;
	/* hack for 64bit addr */
	ulong value_to_addr = 0;
	struct fimc_is_video *video;
	struct fimc_is_device_ischain *device;
	struct fimc_is_resourcemgr *resourcemgr;

	BUG_ON(!vctx);
	BUG_ON(!GET_DEVICE(vctx));
	BUG_ON(!GET_VIDEO(vctx));
	BUG_ON(!ctrl);

	device = GET_DEVICE(vctx);
	video = GET_VIDEO(vctx);
	resourcemgr = device->resourcemgr;

	switch (ctrl->id) {
	case V4L2_CID_IS_END_OF_STREAM:
		ret = fimc_is_ischain_open_wrap(device, true);
		if (ret) {
			merr("fimc_is_ischain_open_wrap is fail(%d)", device, ret);
			goto p_err;
		}
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_ISCHAIN_START, &device->state)) {
			merr("device is already started, setfile applying is fail", device);
			ret = -EINVAL;
			goto p_err;
		}

		device->setfile = ctrl->value;
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", device, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		resourcemgr->hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		fimc_is_hw_logdump(device->interface);
		fimc_is_hw_regdump(device->interface);
		CALL_POPS(device, print_clk);

		if (ctrl->value)
			panic("intentional panic from camera HAL");
		break;
	case V4L2_CID_IS_DVFS_CLUSTER0:
	case V4L2_CID_IS_DVFS_CLUSTER1:
		fimc_is_resource_ioctl(resourcemgr, ctrl);
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				mverr("kmalloc is fail(%p)", device, video, buf);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_OUTPUT(buf->type)) {
				mverr("capture video type is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				mverr("single plane is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				mverr("buffer index is invalid(%d)", device, video, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				mverr("planes[%d] is invalid", device, video, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				mverr("memory type(%d) is not supported", device, video, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (!planes) {
				mverr("kmalloc is fail(%p)", device, video, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				mverr("this buffer(%d) is already mapped", device, video, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length <= 1) {
				mverr("this buffer(%d) have no meta plane", device, video, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			plane = buf->length - 1;
			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				mverr("dma_buf_get is fail(%p)", device, video, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				mverr("dma_buf_attach is fail(%p)", device, video, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only support output(read) video node */
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, 0, plane);
			if (IS_ERR_VALUE(dva)) {
				mverr("ion_iovmm_map is fail(%pa)", device, video, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_3aa.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				mverr("fimc_is_itf_map is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			mvinfo(" B%d.P%d MAP\n", device, video, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(0x%X)", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
Esempio n. 22
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0)
		goto err_release_gem;

	DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n",
			exynos_gem_obj->priv_id,
			buffer->dma_addr,
			buffer->size);

	return &exynos_gem_obj->base;

err_release_gem:
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
	exynos_gem_obj = NULL;
err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Esempio n. 23
0
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	ssize_t size;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(buf->size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free_mmap;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (bo->sgt->nents > 1) {
		err = -EINVAL;
		goto detach;
	}

	bo->paddr = sg_dma_address(bo->sgt->sgl);
	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free_mmap:
	drm_gem_free_mmap_offset(&bo->gem);
release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);

	return ERR_PTR(err);
}
Esempio n. 24
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);

		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}

		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
static int fimc_is_isp_video_s_ctrl(struct file *file, void *priv,
					struct v4l2_control *ctrl)
{
	int ret = 0;
	int i2c_clk;
	struct fimc_is_video *video;
	struct fimc_is_video_ctx *vctx = file->private_data;
	struct fimc_is_device_ischain *device;
	struct fimc_is_core *core;

	BUG_ON(!vctx);
	BUG_ON(!vctx->device);
	BUG_ON(!vctx->video);

	dbg_isp("%s\n", __func__);

	device = vctx->device;
	video = vctx->video;
	core = container_of(video, struct fimc_is_core, video_isp);

	if (core->resourcemgr.dvfs_ctrl.cur_int_qos == DVFS_L0)
		i2c_clk = I2C_L0;
	else
		i2c_clk = I2C_L1;

	switch (ctrl->id) {
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		if (device != NULL) {
			fimc_is_hw_logdump(device->interface);
			fimc_is_hw_regdump(device->interface);
			CALL_POPS(device, print_clk, device->pdev);
		}
		if (ctrl->value) {
			err("BUG_ON from HAL");
			BUG();
		}
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", vctx, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		core->resourcemgr.hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_G_CAPABILITY:
		ret = fimc_is_ischain_g_capability(device, ctrl->value);
		dbg_isp("V4L2_CID_IS_G_CAPABILITY : %X\n", ctrl->value);
		break;
	case V4L2_CID_IS_FORCE_DONE:
		set_bit(FIMC_IS_GROUP_REQUEST_FSTOP, &device->group_isp.state);
		break;
	case V4L2_CID_IS_DVFS_LOCK:
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_add_request(&device->user_qos, PM_QOS_DEVICE_THROUGHPUT,
					ctrl->value);
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_LOCK : %d\n", ctrl->value);
		break;
	case V4L2_CID_IS_DVFS_UNLOCK:
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_remove_request(&device->user_qos);
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_UNLOCK : %d I2C(%d)\n", ctrl->value, i2c_clk);
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("Setting setfile is only avaiable before starting device!! (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->setfile = ctrl->value;
			minfo("[ISP:V] setfile: 0x%08X\n", vctx, ctrl->value);
		}
		break;
	case V4L2_CID_IS_COLOR_RANGE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("failed to change color range: device started already (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->color_range &= ~FIMC_IS_ISP_CRANGE_MASK;

			if (ctrl->value)
				device->color_range	|=
					(FIMC_IS_CRANGE_LIMITED << FIMC_IS_ISP_CRANGE_SHIFT);
		}
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			/* hack for 64bit addr */
			ulong value_to_addr;
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 write, plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				merr("kmalloc is fail", vctx);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				merr("single plane is not supported", vctx);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				merr("buffer index is invalid(%d)", vctx, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				merr("planes[%d] is invalid", vctx, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx, buf->type);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				merr("memory type(%d) is not supported", vctx, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (IS_ERR(planes)) {
				merr("kmalloc is fail(%p)", vctx, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				merr("this buffer(%d) is already mapped", vctx, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length >= 1) {
				plane = buf->length - 1;
			} else {
				merr("buffer length is not correct(%d)", vctx, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				merr("dma_buf_get is fail(%p)", vctx, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				merr("dma_buf_attach is fail(%p)", vctx, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			write = !V4L2_TYPE_IS_OUTPUT(buf->type);
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, write, plane);
			if (IS_ERR_VALUE(dva)) {
				merr("ion_iovmm_map is fail(%pa)", vctx, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_isp.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				merr("fimc_is_itf_map is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			minfo("[ISP:V] buffer%d.plane%d mapping\n", vctx, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(%d)\n", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
Esempio n. 26
0
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}
int tegra_dc_ext_set_cursor_image(struct tegra_dc_ext_user *user,
				  struct tegra_dc_ext_cursor_image *args)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc *dc = ext->dc;
	struct tegra_dc_dmabuf *handle, *old_handle;
	dma_addr_t phys_addr;
	int ret;
	u32 extformat = TEGRA_DC_EXT_CURSOR_FORMAT_FLAGS(args->flags);
	u32 fg = CURSOR_COLOR(args->foreground.r,
			      args->foreground.g,
			      args->foreground.b);
	u32 bg = CURSOR_COLOR(args->background.r,
			      args->background.g,
			      args->background.b);
	unsigned extsize = TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE(args->flags);
	enum tegra_dc_cursor_size size;
	enum tegra_dc_cursor_format format;

	switch (extsize) {
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_32x32:
		size = TEGRA_DC_CURSOR_SIZE_32X32;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_64x64:
		size = TEGRA_DC_CURSOR_SIZE_64X64;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_128x128:
		size = TEGRA_DC_CURSOR_SIZE_128X128;
		break;
	case TEGRA_DC_EXT_CURSOR_IMAGE_FLAGS_SIZE_256x256:
		size = TEGRA_DC_CURSOR_SIZE_256X256;
		break;
	default:
		return -EINVAL;
	}

	switch (extformat) {
	case TEGRA_DC_EXT_CURSOR_FORMAT_2BIT_LEGACY:
		format = TEGRA_DC_CURSOR_FORMAT_2BIT_LEGACY;
		break;
	case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA:
		format = TEGRA_DC_CURSOR_FORMAT_RGBA_NON_PREMULT_ALPHA;
		break;
	case TEGRA_DC_EXT_CURSOR_FORMAT_RGBA_PREMULT_ALPHA:
		format = TEGRA_DC_CURSOR_FORMAT_RGBA_PREMULT_ALPHA;
		break;
	default:
		return -EINVAL;
	}

	mutex_lock(&ext->cursor.lock);

	if (ext->cursor.user != user) {
		ret = -EACCES;
		goto unlock;
	}

	if (!ext->enabled) {
		ret = -ENXIO;
		goto unlock;
	}

	old_handle = ext->cursor.cur_handle;

	ret = tegra_dc_ext_pin_window(user, args->buff_id, &handle, &phys_addr);
	if (ret)
		goto unlock;

	ext->cursor.cur_handle = handle;

	ret = tegra_dc_cursor_image(dc, format, size, fg, bg, phys_addr);

	mutex_unlock(&ext->cursor.lock);

	if (old_handle) {
		dma_buf_unmap_attachment(old_handle->attach,
			old_handle->sgt, DMA_TO_DEVICE);
		dma_buf_detach(old_handle->buf, old_handle->attach);
		dma_buf_put(old_handle->buf);
		kfree(old_handle);
	}

	return ret;

unlock:
	mutex_unlock(&ext->cursor.lock);

	return ret;
}
Esempio n. 28
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		obj = dma_buf->priv;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	sgl = sgt->sgl;

	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev,
					struct drm_file *file,
					int prime_fd, unsigned int *handle)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct dma_buf_attachment *attach;
	struct dma_buf *dmabuf;
	struct sg_table *sgt;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	dmabuf = dma_buf_get(prime_fd);
	if (IS_ERR(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		goto out;
	}

	/*
	 * if there is same dmabuf as the one to prime_fd
	 * in file_priv->prime list then return the handle.
	 *
	 * Note:
	 * but if the prime_fd from user belongs to another process
	 * then there couldn't be the dmabuf in file_priv->prime list
	 * because file_priv is unique to process.
	 */
	ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime,
							dmabuf, handle);
	if (!ret) {
		/* drop reference we got above. */
		dma_buf_put(dmabuf);
		goto out;
	}

	attach = dma_buf_attach(dmabuf, drm_dev->dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto fail_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto fail_unmap;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto fail_unmap;
	}

	ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle);
	if (ret < 0)
		goto fail_handle;

	/* consider physically non-continuous memory with IOMMU. */

	buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->size = sg_dma_len(sgt->sgl);
	buffer->sgt = sgt;

	/*
	 * import(fd to handle) means that the physical memory region
	 * from the sgt is being shared with others so shared_refcount
	 * should be 1.
	 */
	atomic_set(&buffer->shared_refcount, 1);

	exynos_gem_obj->base.import_attach = attach;

	ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
							dmabuf, *handle);
	if (ret < 0)
		goto fail_handle;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0) {
		drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf);
		goto fail_handle;
	}

	DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n",
			prime_fd, *handle, buffer->dma_addr, buffer->size);

	drm_gem_object_unreference(&exynos_gem_obj->base);
	mutex_unlock(&drm_dev->struct_mutex);

	return 0;

fail_handle:
	drm_gem_object_unreference(&exynos_gem_obj->base);
	kfree(buffer);
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
fail_unmap:
	dma_buf_unmap_attachment(attach, sgt);
fail_detach:
	dma_buf_detach(dmabuf, attach);
fail_put:
	dma_buf_put(dmabuf);
out:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}