示例#1
0
struct mem_handle *nvhost_dmabuf_get(u32 id, struct platform_device *dev)
{
	struct mem_handle *h;
	struct dma_buf *buf;

	buf = dma_buf_get(to_dmabuf_fd(id));
	if (IS_ERR(buf))
		return (struct mem_handle *)buf;
	else {
		h = (struct mem_handle *)dma_buf_attach(buf, &dev->dev);
		if (IS_ERR(h)) {
			dma_buf_put(buf);
			return (struct mem_handle *)h;
		}
	}

	return (struct mem_handle *) ((u32)h | mem_mgr_type_dmabuf);
}
示例#2
0
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 fd,
			    struct tegra_dc_dmabuf **dc_buf,
			    dma_addr_t *phys_addr)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_dmabuf *dc_dmabuf;

	*dc_buf = NULL;
	*phys_addr = -1;
	if (!fd)
		return 0;

	dc_dmabuf = kzalloc(sizeof(*dc_dmabuf), GFP_KERNEL);
	if (!dc_dmabuf)
		return -ENOMEM;

	dc_dmabuf->buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dc_dmabuf->buf))
		goto buf_fail;

	dc_dmabuf->attach = dma_buf_attach(dc_dmabuf->buf, ext->dev->parent);
	if (IS_ERR_OR_NULL(dc_dmabuf->attach))
		goto attach_fail;

	dc_dmabuf->sgt = dma_buf_map_attachment(dc_dmabuf->attach,
						DMA_TO_DEVICE);
	if (IS_ERR_OR_NULL(dc_dmabuf->sgt))
		goto sgt_fail;

	*phys_addr = sg_dma_address(dc_dmabuf->sgt->sgl);
	*dc_buf = dc_dmabuf;

	return 0;
sgt_fail:
	dma_buf_detach(dc_dmabuf->buf, dc_dmabuf->attach);
attach_fail:
	dma_buf_put(dc_dmabuf->buf);
buf_fail:
	kfree(dc_dmabuf);
	return -ENOMEM;
}
示例#3
0
int ioctl_connect(int fd)
{
/*
 Each buffer-user 'connects' itself to the buffer

   Each buffer-user now gets a reference to the buffer, using the fd passed to
   it.

   Interface:
      struct dma_buf *dma_buf_get(int fd)

After this, the buffer-user needs to attach its device with the buffer, which
   helps the exporter to know of device buffer constraints.

   Interface:
      struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                                                struct device *dev)
*/
	struct dma_buf* dma_buf_stored = dma_buf_get(fd); //todo - store this in some context
	curr_dma_buf_attachment = dma_buf_attach(dma_buf_stored, dmabufr_device);
	return 0;
}
/*
 * Start requested lock.
 *
 * Allocates required memory, copies dma_buf_fd list from userspace,
 * acquires related KDS resources, and starts the lock.
 */
static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
{
	dma_buf_lock_resource *resource;
	int size;
	int fd;
	int i;
	int ret;

	if (NULL == request->list_of_dma_buf_fds)
	{
		return -EINVAL;
	}
	if (request->count <= 0)
	{
		return -EINVAL;
	}
	if (request->count > DMA_BUF_LOCK_BUF_MAX)
	{
		return -EINVAL;
	}
	if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
	    request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
	{
		return -EINVAL;
	}

	resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
	if (NULL == resource)
	{
		return -ENOMEM;
	}

	atomic_set(&resource->locked, 0);
	kref_init(&resource->refcount);
	INIT_LIST_HEAD(&resource->link);
	resource->count = request->count;

	/* Allocate space to store dma_buf_fds received from user space */
	size = request->count * sizeof(int);
	resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->list_of_dma_buf_fds)
	{
		kfree(resource);
		return -ENOMEM;
	}

	/* Allocate space to store dma_buf pointers associated with dma_buf_fds */
	size = sizeof(struct dma_buf *) * request->count;
	resource->dma_bufs = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->dma_bufs)
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}
	/* Allocate space to store kds_resources associated with dma_buf_fds */
	size = sizeof(struct kds_resource *) * request->count;
	resource->kds_resources = kmalloc(size, GFP_KERNEL);

	if (NULL == resource->kds_resources)
	{
		kfree(resource->dma_bufs);
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource);
		return -ENOMEM;
	}

	/* Copy requested list of dma_buf_fds from user space */
	size = request->count * sizeof(int);
	if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
	{
		kfree(resource->list_of_dma_buf_fds);
		kfree(resource->dma_bufs);
		kfree(resource->kds_resources);
		kfree(resource);
		return -ENOMEM;
	}
#if DMA_BUF_LOCK_DEBUG
	for (i = 0; i < request->count; i++)
	{
		printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
	}
#endif

	/* Add resource to global list */
	mutex_lock(&dma_buf_lock_mutex);

	list_add(&resource->link, &dma_buf_lock_resource_list);

	mutex_unlock(&dma_buf_lock_mutex);

	for (i = 0; i < request->count; i++)
	{
		/* Convert fd into dma_buf structure */
		resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]);

		if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i])))
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}

		/*Get kds_resource associated with dma_buf */
		resource->kds_resources[i] = get_dma_buf_kds_resource(resource->dma_bufs[i]);

		if (NULL == resource->kds_resources[i])
		{
			mutex_lock(&dma_buf_lock_mutex);
			kref_put(&resource->refcount, dma_buf_lock_dounlock);
			mutex_unlock(&dma_buf_lock_mutex);
			return -EINVAL;
		}
#if DMA_BUF_LOCK_DEBUG
		printk("dma_buf_lock_dolock : dma_buf_fd %i dma_buf %X kds_resource %X\n", resource->list_of_dma_buf_fds[i],
		       (unsigned int)resource->dma_bufs[i], (unsigned int)resource->kds_resources[i]);
#endif	
	}

	kds_callback_init(&resource->cb, 1, dma_buf_lock_kds_callback);
	init_waitqueue_head(&resource->wait);

	kref_get(&resource->refcount);

	/* Create file descriptor associated with lock request */
	fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops, 
	                      (void *)resource, 0);
	if (fd < 0)
	{
		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);
		return fd;
	}

	resource->exclusive = request->exclusive;

	/* Start locking process */
	ret = kds_async_waitall(&resource->resource_set,KDS_FLAG_LOCKED_ACTION,
	                        &resource->cb, resource, NULL,
	                        request->count,  &resource->exclusive,
	                        resource->kds_resources);

	if (IS_ERR_VALUE(ret))
	{
		put_unused_fd(fd);

		mutex_lock(&dma_buf_lock_mutex);
		kref_put(&resource->refcount, dma_buf_lock_dounlock);
		mutex_unlock(&dma_buf_lock_mutex);

		return ret;
	}

#if DMA_BUF_LOCK_DEBUG
	printk("dma_buf_lock_dolock : complete\n");
#endif
	mutex_lock(&dma_buf_lock_mutex);
	kref_put(&resource->refcount, dma_buf_lock_dounlock);
	mutex_unlock(&dma_buf_lock_mutex);

	return fd;
}
int fimc_is_video_s_ctrl(struct file *file,
	struct fimc_is_video_ctx *vctx,
	struct v4l2_control *ctrl)
{
	int ret = 0;
	/* hack for 64bit addr */
	ulong value_to_addr = 0;
	struct fimc_is_video *video;
	struct fimc_is_device_ischain *device;
	struct fimc_is_resourcemgr *resourcemgr;

	BUG_ON(!vctx);
	BUG_ON(!GET_DEVICE(vctx));
	BUG_ON(!GET_VIDEO(vctx));
	BUG_ON(!ctrl);

	device = GET_DEVICE(vctx);
	video = GET_VIDEO(vctx);
	resourcemgr = device->resourcemgr;

	switch (ctrl->id) {
	case V4L2_CID_IS_END_OF_STREAM:
		ret = fimc_is_ischain_open_wrap(device, true);
		if (ret) {
			merr("fimc_is_ischain_open_wrap is fail(%d)", device, ret);
			goto p_err;
		}
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_ISCHAIN_START, &device->state)) {
			merr("device is already started, setfile applying is fail", device);
			ret = -EINVAL;
			goto p_err;
		}

		device->setfile = ctrl->value;
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", device, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		resourcemgr->hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		fimc_is_hw_logdump(device->interface);
		fimc_is_hw_regdump(device->interface);
		CALL_POPS(device, print_clk);

		if (ctrl->value)
			panic("intentional panic from camera HAL");
		break;
	case V4L2_CID_IS_DVFS_CLUSTER0:
	case V4L2_CID_IS_DVFS_CLUSTER1:
		fimc_is_resource_ioctl(resourcemgr, ctrl);
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				mverr("kmalloc is fail(%p)", device, video, buf);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_OUTPUT(buf->type)) {
				mverr("capture video type is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				mverr("single plane is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				mverr("buffer index is invalid(%d)", device, video, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				mverr("planes[%d] is invalid", device, video, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				mverr("memory type(%d) is not supported", device, video, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (!planes) {
				mverr("kmalloc is fail(%p)", device, video, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				mverr("this buffer(%d) is already mapped", device, video, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length <= 1) {
				mverr("this buffer(%d) have no meta plane", device, video, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			plane = buf->length - 1;
			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				mverr("dma_buf_get is fail(%p)", device, video, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				mverr("dma_buf_attach is fail(%p)", device, video, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only support output(read) video node */
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, 0, plane);
			if (IS_ERR_VALUE(dva)) {
				mverr("ion_iovmm_map is fail(%pa)", device, video, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_3aa.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				mverr("fimc_is_itf_map is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			mvinfo(" B%d.P%d MAP\n", device, video, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(0x%X)", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}
static int fimc_is_isp_video_s_ctrl(struct file *file, void *priv,
					struct v4l2_control *ctrl)
{
	int ret = 0;
	int i2c_clk;
	struct fimc_is_video *video;
	struct fimc_is_video_ctx *vctx = file->private_data;
	struct fimc_is_device_ischain *device;
	struct fimc_is_core *core;

	BUG_ON(!vctx);
	BUG_ON(!vctx->device);
	BUG_ON(!vctx->video);

	dbg_isp("%s\n", __func__);

	device = vctx->device;
	video = vctx->video;
	core = container_of(video, struct fimc_is_core, video_isp);

	if (core->resourcemgr.dvfs_ctrl.cur_int_qos == DVFS_L0)
		i2c_clk = I2C_L0;
	else
		i2c_clk = I2C_L1;

	switch (ctrl->id) {
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		if (device != NULL) {
			fimc_is_hw_logdump(device->interface);
			fimc_is_hw_regdump(device->interface);
			CALL_POPS(device, print_clk, device->pdev);
		}
		if (ctrl->value) {
			err("BUG_ON from HAL");
			BUG();
		}
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", vctx, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		core->resourcemgr.hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_G_CAPABILITY:
		ret = fimc_is_ischain_g_capability(device, ctrl->value);
		dbg_isp("V4L2_CID_IS_G_CAPABILITY : %X\n", ctrl->value);
		break;
	case V4L2_CID_IS_FORCE_DONE:
		set_bit(FIMC_IS_GROUP_REQUEST_FSTOP, &device->group_isp.state);
		break;
	case V4L2_CID_IS_DVFS_LOCK:
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_add_request(&device->user_qos, PM_QOS_DEVICE_THROUGHPUT,
					ctrl->value);
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_LOCK : %d\n", ctrl->value);
		break;
	case V4L2_CID_IS_DVFS_UNLOCK:
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_remove_request(&device->user_qos);
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_UNLOCK : %d I2C(%d)\n", ctrl->value, i2c_clk);
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("Setting setfile is only avaiable before starting device!! (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->setfile = ctrl->value;
			minfo("[ISP:V] setfile: 0x%08X\n", vctx, ctrl->value);
		}
		break;
	case V4L2_CID_IS_COLOR_RANGE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("failed to change color range: device started already (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->color_range &= ~FIMC_IS_ISP_CRANGE_MASK;

			if (ctrl->value)
				device->color_range	|=
					(FIMC_IS_CRANGE_LIMITED << FIMC_IS_ISP_CRANGE_SHIFT);
		}
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			/* hack for 64bit addr */
			ulong value_to_addr;
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 write, plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				merr("kmalloc is fail", vctx);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				merr("single plane is not supported", vctx);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				merr("buffer index is invalid(%d)", vctx, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				merr("planes[%d] is invalid", vctx, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx, buf->type);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				merr("memory type(%d) is not supported", vctx, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (IS_ERR(planes)) {
				merr("kmalloc is fail(%p)", vctx, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				merr("this buffer(%d) is already mapped", vctx, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length >= 1) {
				plane = buf->length - 1;
			} else {
				merr("buffer length is not correct(%d)", vctx, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				merr("dma_buf_get is fail(%p)", vctx, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				merr("dma_buf_attach is fail(%p)", vctx, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			write = !V4L2_TYPE_IS_OUTPUT(buf->type);
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, write, plane);
			if (IS_ERR_VALUE(dva)) {
				merr("ion_iovmm_map is fail(%pa)", vctx, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_isp.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				merr("fimc_is_itf_map is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			minfo("[ISP:V] buffer%d.plane%d mapping\n", vctx, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(%d)\n", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev,
					struct drm_file *file,
					int prime_fd, unsigned int *handle)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct dma_buf_attachment *attach;
	struct dma_buf *dmabuf;
	struct sg_table *sgt;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	dmabuf = dma_buf_get(prime_fd);
	if (IS_ERR(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		goto out;
	}

	/*
	 * if there is same dmabuf as the one to prime_fd
	 * in file_priv->prime list then return the handle.
	 *
	 * Note:
	 * but if the prime_fd from user belongs to another process
	 * then there couldn't be the dmabuf in file_priv->prime list
	 * because file_priv is unique to process.
	 */
	ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime,
							dmabuf, handle);
	if (!ret) {
		/* drop reference we got above. */
		dma_buf_put(dmabuf);
		goto out;
	}

	attach = dma_buf_attach(dmabuf, drm_dev->dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto fail_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto fail_unmap;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto fail_unmap;
	}

	ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle);
	if (ret < 0)
		goto fail_handle;

	/* consider physically non-continuous memory with IOMMU. */

	buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->size = sg_dma_len(sgt->sgl);
	buffer->sgt = sgt;

	/*
	 * import(fd to handle) means that the physical memory region
	 * from the sgt is being shared with others so shared_refcount
	 * should be 1.
	 */
	atomic_set(&buffer->shared_refcount, 1);

	exynos_gem_obj->base.import_attach = attach;

	ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
							dmabuf, *handle);
	if (ret < 0)
		goto fail_handle;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0) {
		drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf);
		goto fail_handle;
	}

	DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n",
			prime_fd, *handle, buffer->dma_addr, buffer->size);

	drm_gem_object_unreference(&exynos_gem_obj->base);
	mutex_unlock(&drm_dev->struct_mutex);

	return 0;

fail_handle:
	drm_gem_object_unreference(&exynos_gem_obj->base);
	kfree(buffer);
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
fail_unmap:
	dma_buf_unmap_attachment(attach, sgt);
fail_detach:
	dma_buf_detach(dmabuf, attach);
fail_put:
	dma_buf_put(dmabuf);
out:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}