static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
				struct g2d_cmdlist_node *node,
				struct drm_device *drm_dev,
				struct drm_file *file)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
	struct g2d_cmdlist *cmdlist = node->cmdlist;
	struct iommu_gem_map_params params;
	unsigned int gem_handle;
	dma_addr_t addr;
	int offset;
	int i;

	params.dev = g2d->dev;
	params.drm_dev = drm_dev;
	params.file = file;

	for (i = 0; i < node->map_nr; i++) {
		offset = cmdlist->last - (i * 2 + 1);
		gem_handle = cmdlist->data[offset];

		addr = exynos_drm_iommu_map_gem(&params,
						&g2d_priv->iommu_map_list,
						gem_handle,
						IOMMU_G2D);
		if (!addr) {
			node->map_nr = i;
			return -EFAULT;
		}

		cmdlist->data[offset] = addr;
		node->gem_obj[i] = params.gem_obj;
	}

	return 0;
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_private *private = drm_dev->dev_private;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);

		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}

		/*
		 * this case could be CONTIG or NONCONTIG type but now CONTIG.
		 * we have to find a way that exporter can notify the type of
		 * its own buffer to importer. TODO
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	if (private->vmm) {
		exynos_gem_obj->vmm = private->vmm;
		buffer->dev_addr = exynos_drm_iommu_map_gem(drm_dev,
							&exynos_gem_obj->base);
		if (!buffer->dev_addr) {
			DRM_ERROR("failed to map gem with iommu table.\n");
			exynos_drm_free_buf(drm_dev, exynos_gem_obj->flags,
									buffer);
			drm_gem_object_release(&exynos_gem_obj->base);
			ret = -EFAULT;

			goto err_free_pages;
		}

		buffer->dma_addr = buffer->dev_addr;
	}

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
				buffer->size);

	return &exynos_gem_obj->base;

err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}