コード例 #1
1
ファイル: gem.c プロジェクト: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
コード例 #2
0
static int vb2_dma_sg_map_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;
	struct sg_table *sg_table;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->sg_table.sgl)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sg_table = dma_buf_map_attachment(buf->db_attach,
				buf->write ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
	if (IS_ERR_OR_NULL(sg_table)) {
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	buf->sg_table = *sg_table;
	buf->num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;

	return 0;
}
コード例 #3
0
static int vb2_dma_sg_map_dmabuf(void *mem_priv)
{
	struct vb2_dma_sg_buf *buf = mem_priv;
	struct sg_table *sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->dma_sgt)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
	if (IS_ERR(sgt)) {
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	buf->dma_sgt = sgt;
	buf->vaddr = NULL;

	return 0;
}
コード例 #4
0
static int vb2_ion_map_dmabuf(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (WARN_ON(!buf->attachment)) {
        pr_err("%s error: trying to map a non attached buffer\n", __func__);
        return -EINVAL;
    }

    if (WARN_ON(buf->cookie.sgt)) {
        pr_err("%s error: dmabuf buffer is already mapped\n", __func__);
        return 0;
    }

    buf->cookie.sgt = dma_buf_map_attachment(buf->attachment,
            buf->direction);
    if (IS_ERR_OR_NULL(buf->cookie.sgt)) {
        pr_err("%s error: fail to get dmabuf scatterlist\n", __func__);
        return -EINVAL;
    }

    buf->cookie.offset = 0;

    return 0;
}
コード例 #5
0
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct udl_gem_object *uobj;
	int ret;

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
	if (ret) {
		goto fail_unmap;
	}

	uobj->base.import_attach = attach;

	return &uobj->base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
コード例 #6
0
/**
 * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor
 * @rpc - rppc instance handle
 * @fd - dma_buf file descriptor
 * @autoreg: flag indicating the mode of creation
 *
 * This function primarily imports a buffer into the driver and holds
 * a reference to the buffer on behalf of the remote processor. The
 * buffer to be imported is represented by a dma-buf file descriptor,
 * and as such is agnostic of the buffer allocator and/or exporter.
 * The buffer is imported using the dma-buf api, and a driver specific
 * buffer descriptor is used to store the imported buffer properties.
 * The imported buffers are all stored in a rppc instance specific
 * idr, to be used for looking up and cleaning up the driver buffer
 * descriptors.
 *
 * The @autoreg field is used to dictate the manner in which the buffer
 * is imported. The user-side can pre-register the buffers with the driver
 * (which will import the buffers) if the application is going to use
 * these repeatedly in consecutive function invocations. The buffers
 * are auto-imported if the user-side has not registered them previously
 * and are un-imported once the remote function call returns.
 *
 * This function is to be called only after checking that buffer has
 * not been imported already (see rppc_find_dmabuf).
 *
 * Return: allocated rppc_dma_buf or error
 */
struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd,
				       bool autoreg)
{
	struct rppc_dma_buf *dma;
	void *ret;
	int id;

	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
	if (!dma)
		return ERR_PTR(-ENOMEM);

	dma->fd = fd;
	dma->autoreg = !!autoreg;
	dma->buf = dma_buf_get(dma->fd);
	if (IS_ERR(dma->buf)) {
		ret = dma->buf;
		goto free_dma;
	}

	dma->attach = dma_buf_attach(dma->buf, rpc->dev);
	if (IS_ERR(dma->attach)) {
		ret = dma->attach;
		goto put_buf;
	}

	dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(dma->sgt)) {
		ret = dma->sgt;
		goto detach_buf;
	}

	dma->pa = sg_dma_address(dma->sgt->sgl);
	mutex_lock(&rpc->lock);
	id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL);
	dma->id = id;
	mutex_unlock(&rpc->lock);
	if (id < 0) {
		ret = ERR_PTR(id);
		goto unmap_buf;
	}

	return dma;

unmap_buf:
	dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
detach_buf:
	dma_buf_detach(dma->buf, dma->attach);
put_buf:
	dma_buf_put(dma->buf);
free_dma:
	kfree(dma);

	return ret;
}
コード例 #7
0
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
	struct sg_table *sg;

	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg))
		return PTR_ERR(sg);

	obj->pages = sg;
	return 0;
}
コード例 #8
0
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_gem_object *obj;
	struct sg_table *sgt;
	int ret;

	if (dma_buf->ops == &omap_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
コード例 #9
0
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma, int fd,
		enum dma_data_direction direction)
{
	dma_addr_t dma_addr;

	dma->direction = direction;
	dma->dma_buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dma->dma_buf)) {
		dev_err(info->dev, "dma_buf_get() failed: %ld\n",
				PTR_ERR(dma->dma_buf));
		goto err_buf_get;
	}

	dma->attachment = dma_buf_attach(dma->dma_buf, info->dev);
	if (IS_ERR_OR_NULL(dma->attachment)) {
		dev_err(info->dev, "dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma->attachment));
		goto err_buf_attach;
	}

	dma->sg_table = dma_buf_map_attachment(dma->attachment,
			direction);
	if (IS_ERR_OR_NULL(dma->sg_table)) {
		dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma->sg_table));
		goto err_buf_map_attachment;
	}

	dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0,
			dma->dma_buf->size);
	if (!dma_addr || IS_ERR_VALUE(dma_addr)) {
		dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr);
		goto err_iovmm_map;
	}

	dma->dma_addr = dma_addr;
	return dma->dma_buf->size;

err_iovmm_map:
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			direction);
err_buf_map_attachment:
	dma_buf_detach(dma->dma_buf, dma->attachment);
err_buf_attach:
	dma_buf_put(dma->dma_buf);
err_buf_get:
	return 0;
}
コード例 #10
0
dma_addr_t decon_map_sec_dma_buf(struct dma_buf *dbuf, int plane)
{
        struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */

        if (!dbuf || (plane >= MAX_BUF_PLANE_CNT) || (plane < 0))
                return -EINVAL;

        dma.ion_handle = NULL;
        dma.fence = NULL;

        dma.dma_buf = dbuf;
	dma.attachment = dma_buf_attach(dbuf, decon->dev);

        if (IS_ERR(dma.attachment)) {
		decon_err("dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma.attachment));
		goto err_buf_map_attach;
	}

	dma.sg_table = dma_buf_map_attachment(dma.attachment,
			DMA_TO_DEVICE);

	if (IS_ERR(dma.sg_table)) {
		decon_err("dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma.sg_table));
		goto err_buf_map_attachment;
	}

	dma.dma_addr = ion_iovmm_map(dma.attachment, 0,
			dma.dma_buf->size, DMA_TO_DEVICE, plane);

	if (IS_ERR_VALUE(dma.dma_addr)) {
		decon_err("iovmm_map() failed: %pa\n", &dma.dma_addr);
		goto err_iovmm_map;
	}

	exynos_ion_sync_dmabuf_for_device(decon->dev, dma.dma_buf,
			dma.dma_buf->size, DMA_TO_DEVICE);

	return dma.dma_addr;

err_iovmm_map:
	dma_buf_unmap_attachment(dma.attachment, dma.sg_table,
			DMA_TO_DEVICE);
err_buf_map_attachment:
	dma_buf_detach(dma.dma_buf, dma.attachment);
err_buf_map_attach:
        return 0;
}
コード例 #11
0
ファイル: dmabufr.c プロジェクト: prabindh/dmabufr
int ioctl_use_buffer(int fd)
{
/*
buffer-user requests access to the buffer

   Whenever a buffer-user wants to use the buffer for any DMA, it asks for
   access to the buffer using dma_buf_map_attachment API. At least one attach to
   the buffer must have happened before map_dma_buf can be called.

   Interface:
      struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *,
                                         enum dma_data_direction);
*/
	curr_sg_table = dma_buf_map_attachment(curr_dma_buf_attachment, DMA_BIDIRECTIONAL);
	/* todo - use these sg buffers to do some operation on the device */
	return 0;
}
コード例 #12
0
ファイル: nouveau_prime.c プロジェクト: aywq2008/omniplay
struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct nouveau_bo *nvbo;
	int ret;

	if (dma_buf->ops == &nouveau_dmabuf_ops) {
		nvbo = dma_buf->priv;
		if (nvbo->gem) {
			if (nvbo->gem->dev == dev) {
				drm_gem_object_reference(nvbo->gem);
				dma_buf_put(dma_buf);
				return nvbo->gem;
			}
		}
	}
	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
	if (ret)
		goto fail_unmap;

	nvbo->gem->import_attach = attach;

	return nvbo->gem;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
コード例 #13
0
static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
                          struct adf_buffer_mapping *mapping)
{
    int ret = 0;
    size_t i;

    for (i = 0; i < buf->n_planes; i++) {
        struct dma_buf_attachment *attachment;
        struct sg_table *sg_table;

        attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
        if (IS_ERR(attachment)) {
            ret = PTR_ERR(attachment);
            dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
                    i, ret);
            goto done;
        }
        mapping->attachments[i] = attachment;

        sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
        if (IS_ERR(sg_table)) {
            ret = PTR_ERR(sg_table);
            dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
                    i, ret);
            goto done;
        } else if (!sg_table) {
            ret = -ENOMEM;
            dev_err(&dev->base.dev, "mapping plane %zu failed\n",
                    i);
            goto done;
        }
        mapping->sg_tables[i] = sg_table;
    }

done:
    if (ret < 0) {
        adf_buffer_mapping_cleanup(mapping, buf);
        memset(mapping, 0, sizeof(*mapping));
    }

    return ret;
}
コード例 #14
0
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
					       struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct radeon_bo *bo;
	int ret;

	if (dma_buf->ops == &radeon_dmabuf_ops) {
		bo = dma_buf->priv;
		if (bo->gem_base.dev == dev) {
			drm_gem_object_reference(&bo->gem_base);
			dma_buf_put(dma_buf);
			return &bo->gem_base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
	if (ret)
		goto fail_unmap;

	bo->gem_base.import_attach = attach;

	return &bo->gem_base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
コード例 #15
0
ファイル: util.c プロジェクト: Lloir/nvidia-linux-3.10
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 fd,
			    struct tegra_dc_dmabuf **dc_buf,
			    dma_addr_t *phys_addr)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_dmabuf *dc_dmabuf;

	*dc_buf = NULL;
	*phys_addr = -1;
	if (!fd)
		return 0;

	dc_dmabuf = kzalloc(sizeof(*dc_dmabuf), GFP_KERNEL);
	if (!dc_dmabuf)
		return -ENOMEM;

	dc_dmabuf->buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dc_dmabuf->buf))
		goto buf_fail;

	dc_dmabuf->attach = dma_buf_attach(dc_dmabuf->buf, ext->dev->parent);
	if (IS_ERR_OR_NULL(dc_dmabuf->attach))
		goto attach_fail;

	dc_dmabuf->sgt = dma_buf_map_attachment(dc_dmabuf->attach,
						DMA_TO_DEVICE);
	if (IS_ERR_OR_NULL(dc_dmabuf->sgt))
		goto sgt_fail;

	*phys_addr = sg_dma_address(dc_dmabuf->sgt->sgl);
	*dc_buf = dc_dmabuf;

	return 0;
sgt_fail:
	dma_buf_detach(dc_dmabuf->buf, dc_dmabuf->attach);
attach_fail:
	dma_buf_put(dc_dmabuf->buf);
buf_fail:
	kfree(dc_dmabuf);
	return -ENOMEM;
}
コード例 #16
0
int m2m1shot_map_dma_buf(struct device *dev,
			struct m2m1shot_buffer_plane_dma *plane,
			enum dma_data_direction dir)
{
	if (plane->dmabuf) {
		plane->sgt = dma_buf_map_attachment(plane->attachment, dir);
		if (IS_ERR(plane->sgt)) {
			dev_err(dev, "%s: failed to map attacment of dma_buf\n",
					__func__);
			return PTR_ERR(plane->sgt);
		}

		exynos_ion_sync_dmabuf_for_device(dev, plane->dmabuf,
							plane->bytes_used, dir);
	} else { /* userptr */
		exynos_ion_sync_sg_for_device(dev, plane->bytes_used,
							plane->sgt, dir);
	}

	return 0;
}
コード例 #17
0
static void vb2_dma_contig_map_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct dma_buf *dmabuf;
	struct sg_table *sg;
	enum dma_data_direction dir;

	if (!buf || !buf->db_attach)
		return;

	WARN_ON(buf->dma_addr);

	dmabuf = buf->db_attach->dmabuf;

	/* TODO need a way to know if we are camera or display, etc.. */
	dir = DMA_BIDIRECTIONAL;

	/* get the associated sg for this buffer */
	sg = dma_buf_map_attachment(buf->db_attach, dir);
	if (!sg)
		return;

	/*
	 *  convert sglist to paddr:
	 *  Assumption: for dma-contig, dmabuf would map to single entry
	 *  Will print a warning if it has more than one.
	 */
	if (sg->nents > 1)
		printk(KERN_WARNING
			"dmabuf scatterlist has more than 1 entry\n");

	buf->dma_addr = sg_dma_address(sg->sgl);
	buf->size = sg_dma_len(sg->sgl);

	/* save this sg in dmabuf for put_scatterlist */
	dmabuf->priv = sg;
}
コード例 #18
0
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev,
					struct drm_file *file,
					int prime_fd, unsigned int *handle)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct dma_buf_attachment *attach;
	struct dma_buf *dmabuf;
	struct sg_table *sgt;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	dmabuf = dma_buf_get(prime_fd);
	if (IS_ERR(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		goto out;
	}

	/*
	 * if there is same dmabuf as the one to prime_fd
	 * in file_priv->prime list then return the handle.
	 *
	 * Note:
	 * but if the prime_fd from user belongs to another process
	 * then there couldn't be the dmabuf in file_priv->prime list
	 * because file_priv is unique to process.
	 */
	ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime,
							dmabuf, handle);
	if (!ret) {
		/* drop reference we got above. */
		dma_buf_put(dmabuf);
		goto out;
	}

	attach = dma_buf_attach(dmabuf, drm_dev->dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto fail_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto fail_unmap;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto fail_unmap;
	}

	ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle);
	if (ret < 0)
		goto fail_handle;

	/* consider physically non-continuous memory with IOMMU. */

	buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->size = sg_dma_len(sgt->sgl);
	buffer->sgt = sgt;

	/*
	 * import(fd to handle) means that the physical memory region
	 * from the sgt is being shared with others so shared_refcount
	 * should be 1.
	 */
	atomic_set(&buffer->shared_refcount, 1);

	exynos_gem_obj->base.import_attach = attach;

	ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
							dmabuf, *handle);
	if (ret < 0)
		goto fail_handle;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0) {
		drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf);
		goto fail_handle;
	}

	DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n",
			prime_fd, *handle, buffer->dma_addr, buffer->size);

	drm_gem_object_unreference(&exynos_gem_obj->base);
	mutex_unlock(&drm_dev->struct_mutex);

	return 0;

fail_handle:
	drm_gem_object_unreference(&exynos_gem_obj->base);
	kfree(buffer);
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
fail_unmap:
	dma_buf_unmap_attachment(attach, sgt);
fail_detach:
	dma_buf_detach(dmabuf, attach);
fail_put:
	dma_buf_put(dmabuf);
out:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}
コード例 #19
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		obj = dma_buf->priv;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	sgl = sgt->sgl;

	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
コード例 #20
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0)
		goto err_release_gem;

	DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n",
			exynos_gem_obj->priv_id,
			buffer->dma_addr,
			buffer->size);

	return &exynos_gem_obj->base;

err_release_gem:
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
	exynos_gem_obj = NULL;
err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
コード例 #21
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);

		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}

		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
コード例 #22
0
ファイル: gem.c プロジェクト: mikuhatsune001/linux2.6.32
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	ssize_t size;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(buf->size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free_mmap;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (bo->sgt->nents > 1) {
		err = -EINVAL;
		goto detach;
	}

	bo->paddr = sg_dma_address(bo->sgt->sgl);
	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free_mmap:
	drm_gem_free_mmap_offset(&bo->gem);
release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);

	return ERR_PTR(err);
}
コード例 #23
0
struct sg_table *nvhost_dmabuf_pin(struct mem_handle *handle)
{
	return dma_buf_map_attachment(to_dmabuf_att(handle),
				DMA_BIDIRECTIONAL);
}
コード例 #24
0
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}