Ejemplo n.º 1
1
Archivo: gem.c Proyecto: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
Ejemplo n.º 2
0
static void *vb2_ion_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
                    unsigned long size, int write)
{
    struct vb2_ion_buf *buf;
    struct dma_buf_attachment *attachment;

    if (dbuf->size < size) {
        printk(KERN_ERR "%s: size error(buf->size %lu/request size %lu)\n",
                __func__, (unsigned long)dbuf->size, size);
        return ERR_PTR(-EFAULT);
    }

    buf = kzalloc(sizeof(*buf), GFP_KERNEL);
    if (!buf) {
        printk(KERN_ERR "%s: error line %d\n", __func__, __LINE__);
        return ERR_PTR(-ENOMEM);
    }

    buf->ctx = alloc_ctx;

    attachment = dma_buf_attach(dbuf, buf->ctx->dev);
    if (IS_ERR(attachment)) {
        printk(KERN_ERR "%s error: fail to attach dmabuf\n", __func__);
        kfree(buf);
        return attachment;
    }

    buf->direction = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
    buf->size      = size;
    buf->dma_buf   = dbuf;
    buf->attachment = attachment;

    return buf;
}
Ejemplo n.º 3
0
static void *vb2_dma_contig_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf)
{
	struct vb2_dc_conf *conf = alloc_ctx;
	struct vb2_dc_buf *buf;
	struct dma_buf_attachment *dba;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, conf->dev);
	if (IS_ERR(dba)) {
		printk(KERN_ERR "failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

	buf->conf = conf;
	buf->size = dba->dmabuf->size;
	buf->db_attach = dba;
	buf->dma_addr = 0; /* dma_addr is available only after acquire */

	return buf;
}
Ejemplo n.º 4
0
static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
	unsigned long size, enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_conf *conf = alloc_ctx;
	struct vb2_dma_sg_buf *buf;
	struct dma_buf_attachment *dba;

	if (dbuf->size < size)
		return ERR_PTR(-EFAULT);

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	buf->dev = conf->dev;
	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, buf->dev);
	if (IS_ERR(dba)) {
		pr_err("failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

	buf->dma_dir = dma_dir;
	buf->size = size;
	buf->db_attach = dba;

	return buf;
}
Ejemplo n.º 5
0
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct udl_gem_object *uobj;
	int ret;

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
	if (ret) {
		goto fail_unmap;
	}

	uobj->base.import_attach = attach;

	return &uobj->base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Ejemplo n.º 6
0
/**
 * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor
 * @rpc - rppc instance handle
 * @fd - dma_buf file descriptor
 * @autoreg: flag indicating the mode of creation
 *
 * This function primarily imports a buffer into the driver and holds
 * a reference to the buffer on behalf of the remote processor. The
 * buffer to be imported is represented by a dma-buf file descriptor,
 * and as such is agnostic of the buffer allocator and/or exporter.
 * The buffer is imported using the dma-buf api, and a driver specific
 * buffer descriptor is used to store the imported buffer properties.
 * The imported buffers are all stored in a rppc instance specific
 * idr, to be used for looking up and cleaning up the driver buffer
 * descriptors.
 *
 * The @autoreg field is used to dictate the manner in which the buffer
 * is imported. The user-side can pre-register the buffers with the driver
 * (which will import the buffers) if the application is going to use
 * these repeatedly in consecutive function invocations. The buffers
 * are auto-imported if the user-side has not registered them previously
 * and are un-imported once the remote function call returns.
 *
 * This function is to be called only after checking that buffer has
 * not been imported already (see rppc_find_dmabuf).
 *
 * Return: allocated rppc_dma_buf or error
 */
struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd,
				       bool autoreg)
{
	struct rppc_dma_buf *dma;
	void *ret;
	int id;

	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
	if (!dma)
		return ERR_PTR(-ENOMEM);

	dma->fd = fd;
	dma->autoreg = !!autoreg;
	dma->buf = dma_buf_get(dma->fd);
	if (IS_ERR(dma->buf)) {
		ret = dma->buf;
		goto free_dma;
	}

	dma->attach = dma_buf_attach(dma->buf, rpc->dev);
	if (IS_ERR(dma->attach)) {
		ret = dma->attach;
		goto put_buf;
	}

	dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(dma->sgt)) {
		ret = dma->sgt;
		goto detach_buf;
	}

	dma->pa = sg_dma_address(dma->sgt->sgl);
	mutex_lock(&rpc->lock);
	id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL);
	dma->id = id;
	mutex_unlock(&rpc->lock);
	if (id < 0) {
		ret = ERR_PTR(id);
		goto unmap_buf;
	}

	return dma;

unmap_buf:
	dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
detach_buf:
	dma_buf_detach(dma->buf, dma->attach);
put_buf:
	dma_buf_put(dma->buf);
free_dma:
	kfree(dma);

	return ret;
}
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf_to_obj(dma_buf);
		/* is it from our device? */
		if (obj->base.dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		i915_gem_object_free(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Ejemplo n.º 8
0
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_gem_object *obj;
	struct sg_table *sgt;
	int ret;

	if (dma_buf->ops == &omap_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
dma_addr_t decon_map_sec_dma_buf(struct dma_buf *dbuf, int plane)
{
        struct decon_device *decon = get_decon_drvdata(0); /* 0: decon Int ID */

        if (!dbuf || (plane >= MAX_BUF_PLANE_CNT) || (plane < 0))
                return -EINVAL;

        dma.ion_handle = NULL;
        dma.fence = NULL;

        dma.dma_buf = dbuf;
	dma.attachment = dma_buf_attach(dbuf, decon->dev);

        if (IS_ERR(dma.attachment)) {
		decon_err("dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma.attachment));
		goto err_buf_map_attach;
	}

	dma.sg_table = dma_buf_map_attachment(dma.attachment,
			DMA_TO_DEVICE);

	if (IS_ERR(dma.sg_table)) {
		decon_err("dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma.sg_table));
		goto err_buf_map_attachment;
	}

	dma.dma_addr = ion_iovmm_map(dma.attachment, 0,
			dma.dma_buf->size, DMA_TO_DEVICE, plane);

	if (IS_ERR_VALUE(dma.dma_addr)) {
		decon_err("iovmm_map() failed: %pa\n", &dma.dma_addr);
		goto err_iovmm_map;
	}

	exynos_ion_sync_dmabuf_for_device(decon->dev, dma.dma_buf,
			dma.dma_buf->size, DMA_TO_DEVICE);

	return dma.dma_addr;

err_iovmm_map:
	dma_buf_unmap_attachment(dma.attachment, dma.sg_table,
			DMA_TO_DEVICE);
err_buf_map_attachment:
	dma_buf_detach(dma.dma_buf, dma.attachment);
err_buf_map_attach:
        return 0;
}
Ejemplo n.º 10
0
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma, int fd,
		enum dma_data_direction direction)
{
	dma_addr_t dma_addr;

	dma->direction = direction;
	dma->dma_buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dma->dma_buf)) {
		dev_err(info->dev, "dma_buf_get() failed: %ld\n",
				PTR_ERR(dma->dma_buf));
		goto err_buf_get;
	}

	dma->attachment = dma_buf_attach(dma->dma_buf, info->dev);
	if (IS_ERR_OR_NULL(dma->attachment)) {
		dev_err(info->dev, "dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma->attachment));
		goto err_buf_attach;
	}

	dma->sg_table = dma_buf_map_attachment(dma->attachment,
			direction);
	if (IS_ERR_OR_NULL(dma->sg_table)) {
		dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma->sg_table));
		goto err_buf_map_attachment;
	}

	dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0,
			dma->dma_buf->size);
	if (!dma_addr || IS_ERR_VALUE(dma_addr)) {
		dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr);
		goto err_iovmm_map;
	}

	dma->dma_addr = dma_addr;
	return dma->dma_buf->size;

err_iovmm_map:
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			direction);
err_buf_map_attachment:
	dma_buf_detach(dma->dma_buf, dma->attachment);
err_buf_attach:
	dma_buf_put(dma->dma_buf);
err_buf_get:
	return 0;
}
Ejemplo n.º 11
0
struct mem_handle *nvhost_dmabuf_get(u32 id, struct nvhost_device *dev)
{
	struct mem_handle *h;
	struct dma_buf *buf;

	buf = dma_buf_get(to_dmabuf_fd(id));
	if (IS_ERR_OR_NULL(buf))
		return (struct mem_handle *)buf;
	else {
		h = (struct mem_handle *)dma_buf_attach(buf, &dev->dev);
		if (IS_ERR_OR_NULL(h))
			dma_buf_put(buf);
	}

	return (struct mem_handle *) ((u32)h | mem_mgr_type_dmabuf);
}
Ejemplo n.º 12
0
struct mem_handle *nvhost_dmabuf_get(ulong id, struct platform_device *dev)
{
	struct mem_handle *h;
	struct dma_buf *buf;

	buf = dma_buf_get(to_dmabuf_fd(id));
	if (IS_ERR(buf))
		return (struct mem_handle *)buf;
	else {
		h = (struct mem_handle *)dma_buf_attach(buf, &dev->dev);
		if (IS_ERR(h)) {
			dma_buf_put(buf);
			return (struct mem_handle *)h;
		}
	}

	return (struct mem_handle *) ((uintptr_t)h | mem_mgr_type_dmabuf);
}
Ejemplo n.º 13
0
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf->priv;
		/* is it from our device? */
		if (obj->base.dev == dev) {
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);


	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		kfree(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Ejemplo n.º 14
0
struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct nouveau_bo *nvbo;
	int ret;

	if (dma_buf->ops == &nouveau_dmabuf_ops) {
		nvbo = dma_buf->priv;
		if (nvbo->gem) {
			if (nvbo->gem->dev == dev) {
				drm_gem_object_reference(nvbo->gem);
				dma_buf_put(dma_buf);
				return nvbo->gem;
			}
		}
	}
	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(PTR_ERR(attach));

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
	if (ret)
		goto fail_unmap;

	nvbo->gem->import_attach = attach;

	return nvbo->gem;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Ejemplo n.º 15
0
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
					       struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct radeon_bo *bo;
	int ret;

	if (dma_buf->ops == &radeon_dmabuf_ops) {
		bo = dma_buf->priv;
		if (bo->gem_base.dev == dev) {
			drm_gem_object_reference(&bo->gem_base);
			dma_buf_put(dma_buf);
			return &bo->gem_base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
	if (ret)
		goto fail_unmap;

	bo->gem_base.import_attach = attach;

	return &bo->gem_base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Ejemplo n.º 16
0
static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
                          struct adf_buffer_mapping *mapping)
{
    int ret = 0;
    size_t i;

    for (i = 0; i < buf->n_planes; i++) {
        struct dma_buf_attachment *attachment;
        struct sg_table *sg_table;

        attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
        if (IS_ERR(attachment)) {
            ret = PTR_ERR(attachment);
            dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
                    i, ret);
            goto done;
        }
        mapping->attachments[i] = attachment;

        sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
        if (IS_ERR(sg_table)) {
            ret = PTR_ERR(sg_table);
            dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
                    i, ret);
            goto done;
        } else if (!sg_table) {
            ret = -ENOMEM;
            dev_err(&dev->base.dev, "mapping plane %zu failed\n",
                    i);
            goto done;
        }
        mapping->sg_tables[i] = sg_table;
    }

done:
    if (ret < 0) {
        adf_buffer_mapping_cleanup(mapping, buf);
        memset(mapping, 0, sizeof(*mapping));
    }

    return ret;
}
Ejemplo n.º 17
0
int tegra_dc_ext_pin_window(struct tegra_dc_ext_user *user, u32 fd,
			    struct tegra_dc_dmabuf **dc_buf,
			    dma_addr_t *phys_addr)
{
	struct tegra_dc_ext *ext = user->ext;
	struct tegra_dc_dmabuf *dc_dmabuf;

	*dc_buf = NULL;
	*phys_addr = -1;
	if (!fd)
		return 0;

	dc_dmabuf = kzalloc(sizeof(*dc_dmabuf), GFP_KERNEL);
	if (!dc_dmabuf)
		return -ENOMEM;

	dc_dmabuf->buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dc_dmabuf->buf))
		goto buf_fail;

	dc_dmabuf->attach = dma_buf_attach(dc_dmabuf->buf, ext->dev->parent);
	if (IS_ERR_OR_NULL(dc_dmabuf->attach))
		goto attach_fail;

	dc_dmabuf->sgt = dma_buf_map_attachment(dc_dmabuf->attach,
						DMA_TO_DEVICE);
	if (IS_ERR_OR_NULL(dc_dmabuf->sgt))
		goto sgt_fail;

	*phys_addr = sg_dma_address(dc_dmabuf->sgt->sgl);
	*dc_buf = dc_dmabuf;

	return 0;
sgt_fail:
	dma_buf_detach(dc_dmabuf->buf, dc_dmabuf->attach);
attach_fail:
	dma_buf_put(dc_dmabuf->buf);
buf_fail:
	kfree(dc_dmabuf);
	return -ENOMEM;
}
Ejemplo n.º 18
0
int ioctl_connect(int fd)
{
/*
 Each buffer-user 'connects' itself to the buffer

   Each buffer-user now gets a reference to the buffer, using the fd passed to
   it.

   Interface:
      struct dma_buf *dma_buf_get(int fd)

After this, the buffer-user needs to attach its device with the buffer, which
   helps the exporter to know of device buffer constraints.

   Interface:
      struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                                                struct device *dev)
*/
	struct dma_buf* dma_buf_stored = dma_buf_get(fd); //todo - store this in some context
	curr_dma_buf_attachment = dma_buf_attach(dma_buf_stored, dmabufr_device);
	return 0;
}
Ejemplo n.º 19
0
int fimc_is_video_s_ctrl(struct file *file,
	struct fimc_is_video_ctx *vctx,
	struct v4l2_control *ctrl)
{
	int ret = 0;
	/* hack for 64bit addr */
	ulong value_to_addr = 0;
	struct fimc_is_video *video;
	struct fimc_is_device_ischain *device;
	struct fimc_is_resourcemgr *resourcemgr;

	BUG_ON(!vctx);
	BUG_ON(!GET_DEVICE(vctx));
	BUG_ON(!GET_VIDEO(vctx));
	BUG_ON(!ctrl);

	device = GET_DEVICE(vctx);
	video = GET_VIDEO(vctx);
	resourcemgr = device->resourcemgr;

	switch (ctrl->id) {
	case V4L2_CID_IS_END_OF_STREAM:
		ret = fimc_is_ischain_open_wrap(device, true);
		if (ret) {
			merr("fimc_is_ischain_open_wrap is fail(%d)", device, ret);
			goto p_err;
		}
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_ISCHAIN_START, &device->state)) {
			merr("device is already started, setfile applying is fail", device);
			ret = -EINVAL;
			goto p_err;
		}

		device->setfile = ctrl->value;
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", device, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		resourcemgr->hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		fimc_is_hw_logdump(device->interface);
		fimc_is_hw_regdump(device->interface);
		CALL_POPS(device, print_clk);

		if (ctrl->value)
			panic("intentional panic from camera HAL");
		break;
	case V4L2_CID_IS_DVFS_CLUSTER0:
	case V4L2_CID_IS_DVFS_CLUSTER1:
		fimc_is_resource_ioctl(resourcemgr, ctrl);
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				mverr("kmalloc is fail(%p)", device, video, buf);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_OUTPUT(buf->type)) {
				mverr("capture video type is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				mverr("single plane is not supported", device, video);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				mverr("buffer index is invalid(%d)", device, video, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				mverr("planes[%d] is invalid", device, video, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				mverr("memory type(%d) is not supported", device, video, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (!planes) {
				mverr("kmalloc is fail(%p)", device, video, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				mverr("copy_from_user is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				mverr("this buffer(%d) is already mapped", device, video, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length <= 1) {
				mverr("this buffer(%d) have no meta plane", device, video, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			plane = buf->length - 1;
			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				mverr("dma_buf_get is fail(%p)", device, video, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				mverr("dma_buf_attach is fail(%p)", device, video, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only support output(read) video node */
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, 0, plane);
			if (IS_ERR_VALUE(dva)) {
				mverr("ion_iovmm_map is fail(%pa)", device, video, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_3aa.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				mverr("fimc_is_itf_map is fail(%d)", device, video, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			mvinfo(" B%d.P%d MAP\n", device, video, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(0x%X)", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
Ejemplo n.º 20
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0)
		goto err_release_gem;

	DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n",
			exynos_gem_obj->priv_id,
			buffer->dma_addr,
			buffer->size);

	return &exynos_gem_obj->base;

err_release_gem:
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
	exynos_gem_obj = NULL;
err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
					     size_t size)
{
	void *err = ERR_PTR(-ENOMEM);
	struct nvmap_handle *h;
	struct nvmap_handle_ref *ref = NULL;

	if (!client)
		return ERR_PTR(-EINVAL);

	if (!size)
		return ERR_PTR(-EINVAL);

	h = kzalloc(sizeof(*h), GFP_KERNEL);
	if (!h)
		return ERR_PTR(-ENOMEM);

	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	if (!ref)
		goto ref_alloc_fail;

	atomic_set(&h->ref, 1);
	atomic_set(&h->pin, 0);
	h->owner = client;
	BUG_ON(!h->owner);
	h->size = h->orig_size = size;
	h->flags = NVMAP_HANDLE_WRITE_COMBINE;
	h->peer = NVMAP_IVM_INVALID_PEER;
	mutex_init(&h->lock);
	INIT_LIST_HEAD(&h->vmas);
	INIT_LIST_HEAD(&h->lru);

	/*
	 * This takes out 1 ref on the dambuf. This corresponds to the
	 * handle_ref that gets automatically made by nvmap_create_handle().
	 */
	h->dmabuf = __nvmap_make_dmabuf(client, h);
	if (IS_ERR(h->dmabuf)) {
		err = h->dmabuf;
		goto make_dmabuf_fail;
	}

	/*
	 * Pre-attach nvmap to this new dmabuf. This gets unattached during the
	 * dma_buf_release() operation.
	 */
	h->attachment = dma_buf_attach(h->dmabuf, nvmap_dev->dev_user.parent);
	if (IS_ERR(h->attachment)) {
		err = h->attachment;
		goto dma_buf_attach_fail;
	}

	nvmap_handle_add(nvmap_dev, h);

	/*
	 * Major assumption here: the dma_buf object that the handle contains
	 * is created with a ref count of 1.
	 */
	atomic_set(&ref->dupes, 1);
	ref->handle = h;
	atomic_set(&ref->pin, 0);
	add_handle_ref(client, ref);
	trace_nvmap_create_handle(client, client->name, h, size, ref);
	return ref;

dma_buf_attach_fail:
	dma_buf_put(h->dmabuf);
make_dmabuf_fail:
	kfree(ref);
ref_alloc_fail:
	kfree(h);
	return err;
}
Ejemplo n.º 22
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	struct page *page;
	int ret;

	DRM_DEBUG_PRIME("%s\n", __FILE__);

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		exynos_gem_obj = dma_buf->priv;
		obj = &exynos_gem_obj->base;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(dma_buf);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);


	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR_OR_NULL(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
	if (!buffer->pages) {
		DRM_ERROR("failed to allocate pages.\n");
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_pages;
	}

	sgl = sgt->sgl;

	if (sgt->nents == 1) {
		buffer->dma_addr = sg_dma_address(sgt->sgl);
		buffer->size = sg_dma_len(sgt->sgl);

		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		unsigned int i = 0;

		buffer->dma_addr = sg_dma_address(sgl);
		while (i < sgt->nents) {
			buffer->pages[i] = sg_page(sgl);
			buffer->size += sg_dma_len(sgl);
			sgl = sg_next(sgl);
			i++;
		}

		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_pages:
	kfree(buffer->pages);
	buffer->pages = NULL;
err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	return ERR_PTR(ret);
}
Ejemplo n.º 23
0
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	ssize_t size;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(buf->size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free_mmap;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (bo->sgt->nents > 1) {
		err = -EINVAL;
		goto detach;
	}

	bo->paddr = sg_dma_address(bo->sgt->sgl);
	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free_mmap:
	drm_gem_free_mmap_offset(&bo->gem);
release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);

	return ERR_PTR(err);
}
static int fimc_is_isp_video_s_ctrl(struct file *file, void *priv,
					struct v4l2_control *ctrl)
{
	int ret = 0;
	int i2c_clk;
	struct fimc_is_video *video;
	struct fimc_is_video_ctx *vctx = file->private_data;
	struct fimc_is_device_ischain *device;
	struct fimc_is_core *core;

	BUG_ON(!vctx);
	BUG_ON(!vctx->device);
	BUG_ON(!vctx->video);

	dbg_isp("%s\n", __func__);

	device = vctx->device;
	video = vctx->video;
	core = container_of(video, struct fimc_is_core, video_isp);

	if (core->resourcemgr.dvfs_ctrl.cur_int_qos == DVFS_L0)
		i2c_clk = I2C_L0;
	else
		i2c_clk = I2C_L1;

	switch (ctrl->id) {
	case V4L2_CID_IS_DEBUG_DUMP:
		info("Print fimc-is info dump by HAL");
		if (device != NULL) {
			fimc_is_hw_logdump(device->interface);
			fimc_is_hw_regdump(device->interface);
			CALL_POPS(device, print_clk, device->pdev);
		}
		if (ctrl->value) {
			err("BUG_ON from HAL");
			BUG();
		}
		break;
	case V4L2_CID_IS_DEBUG_SYNC_LOG:
		fimc_is_logsync(device->interface, ctrl->value, IS_MSG_TEST_SYNC_LOG);
		break;
	case V4L2_CID_IS_HAL_VERSION:
		if (ctrl->value < 0 || ctrl->value >= IS_HAL_VER_MAX) {
			merr("hal version(%d) is invalid", vctx, ctrl->value);
			ret = -EINVAL;
			goto p_err;
		}
		core->resourcemgr.hal_version = ctrl->value;
		break;
	case V4L2_CID_IS_G_CAPABILITY:
		ret = fimc_is_ischain_g_capability(device, ctrl->value);
		dbg_isp("V4L2_CID_IS_G_CAPABILITY : %X\n", ctrl->value);
		break;
	case V4L2_CID_IS_FORCE_DONE:
		set_bit(FIMC_IS_GROUP_REQUEST_FSTOP, &device->group_isp.state);
		break;
	case V4L2_CID_IS_DVFS_LOCK:
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_add_request(&device->user_qos, PM_QOS_DEVICE_THROUGHPUT,
					ctrl->value);
		ret = fimc_is_itf_i2c_lock(device, I2C_L0, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_LOCK : %d\n", ctrl->value);
		break;
	case V4L2_CID_IS_DVFS_UNLOCK:
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, true);
		if (ret) {
			err("fimc_is_itf_i2_clock fail\n");
			break;
		}
		pm_qos_remove_request(&device->user_qos);
		ret = fimc_is_itf_i2c_lock(device, i2c_clk, false);
		if (ret) {
			err("fimc_is_itf_i2c_unlock fail\n");
			break;
		}
		dbg_isp("V4L2_CID_IS_DVFS_UNLOCK : %d I2C(%d)\n", ctrl->value, i2c_clk);
		break;
	case V4L2_CID_IS_SET_SETFILE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("Setting setfile is only avaiable before starting device!! (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->setfile = ctrl->value;
			minfo("[ISP:V] setfile: 0x%08X\n", vctx, ctrl->value);
		}
		break;
	case V4L2_CID_IS_COLOR_RANGE:
		if (test_bit(FIMC_IS_SUBDEV_START, &device->group_isp.leader.state)) {
			err("failed to change color range: device started already (0x%08x)",
					ctrl->value);
			ret = -EINVAL;
		} else {
			device->color_range &= ~FIMC_IS_ISP_CRANGE_MASK;

			if (ctrl->value)
				device->color_range	|=
					(FIMC_IS_CRANGE_LIMITED << FIMC_IS_ISP_CRANGE_SHIFT);
		}
		break;
	case V4L2_CID_IS_MAP_BUFFER:
		{
			/* hack for 64bit addr */
			ulong value_to_addr;
			struct fimc_is_queue *queue;
			struct fimc_is_framemgr *framemgr;
			struct fimc_is_frame *frame;
			struct dma_buf *dmabuf;
			struct dma_buf_attachment *attachment;
			dma_addr_t dva;
			struct v4l2_buffer *buf;
			struct v4l2_plane *planes;
			size_t size;
			u32 write, plane, group_id;

			size = sizeof(struct v4l2_buffer);
			buf = kmalloc(size, GFP_KERNEL);
			if (!buf) {
				merr("kmalloc is fail", vctx);
				ret = -EINVAL;
				goto p_err;
			}
			/* hack for 64bit addr */
			value_to_addr = ctrl->value;

			ret = copy_from_user(buf, (void __user *)value_to_addr, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (!V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
				merr("single plane is not supported", vctx);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->index >= FRAMEMGR_MAX_REQUEST) {
				merr("buffer index is invalid(%d)", vctx, buf->index);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			if (buf->length > VIDEO_MAX_PLANES) {
				merr("planes[%d] is invalid", vctx, buf->length);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			queue = GET_QUEUE(vctx, buf->type);
			if (queue->vbq->memory != V4L2_MEMORY_DMABUF) {
				merr("memory type(%d) is not supported", vctx, queue->vbq->memory);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			size = sizeof(struct v4l2_plane) * buf->length;
			planes = kmalloc(size, GFP_KERNEL);
			if (IS_ERR(planes)) {
				merr("kmalloc is fail(%p)", vctx, planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			ret = copy_from_user(planes, (void __user *)buf->m.planes, size);
			if (ret) {
				merr("copy_from_user is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			framemgr = &queue->framemgr;
			frame = &framemgr->frame[buf->index];
			if (test_bit(FRAME_MAP_MEM, &frame->memory)) {
				merr("this buffer(%d) is already mapped", vctx, buf->index);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			/* only last buffer need to map */
			if (buf->length >= 1) {
				plane = buf->length - 1;
			} else {
				merr("buffer length is not correct(%d)", vctx, buf->length);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			dmabuf = dma_buf_get(planes[plane].m.fd);
			if (IS_ERR(dmabuf)) {
				merr("dma_buf_get is fail(%p)", vctx, dmabuf);
				kfree(planes);
				kfree(buf);
				ret = -EINVAL;
				goto p_err;
			}

			attachment = dma_buf_attach(dmabuf, &device->pdev->dev);
			if (IS_ERR(attachment)) {
				merr("dma_buf_attach is fail(%p)", vctx, attachment);
				kfree(planes);
				kfree(buf);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			write = !V4L2_TYPE_IS_OUTPUT(buf->type);
			dva = ion_iovmm_map(attachment, 0, dmabuf->size, write, plane);
			if (IS_ERR_VALUE(dva)) {
				merr("ion_iovmm_map is fail(%pa)", vctx, &dva);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				ret = -EINVAL;
				goto p_err;
			}

			group_id = GROUP_ID(device->group_isp.id);
			ret = fimc_is_itf_map(device, group_id, dva, dmabuf->size);
			if (ret) {
				merr("fimc_is_itf_map is fail(%d)", vctx, ret);
				kfree(planes);
				kfree(buf);
				dma_buf_detach(dmabuf, attachment);
				dma_buf_put(dmabuf);
				goto p_err;
			}

			minfo("[ISP:V] buffer%d.plane%d mapping\n", vctx, buf->index, plane);
			set_bit(FRAME_MAP_MEM, &frame->memory);
			dma_buf_detach(dmabuf, attachment);
			dma_buf_put(dmabuf);
			kfree(planes);
			kfree(buf);
		}
		break;
	default:
		err("unsupported ioctl(%d)\n", ctrl->id);
		ret = -EINVAL;
		break;
	}

p_err:
	return ret;
}
Ejemplo n.º 25
0
int ump_dmabuf_import_wrapper(u32 __user *argument,
				struct ump_session_data  *session_data)
{
	ump_session_memory_list_element *session = NULL;
	struct ump_uk_dmabuf ump_dmabuf;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block *blocks;
	struct dma_buf_attachment *attach;
	struct dma_buf *dma_buf;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	unsigned long block_size;
	/* FIXME */
	struct device dev;
	unsigned int i = 0, npages;
	int ret;

	/* Sanity check input parameters */
	if (!argument || !session_data) {
		MSG_ERR(("NULL parameter.\n"));
		return -EINVAL;
	}

	if (copy_from_user(&ump_dmabuf, argument,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_from_user() failed.\n"));
		return -EFAULT;
	}

	dma_buf = dma_buf_get(ump_dmabuf.fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	/*
	 * check whether dma_buf imported already exists or not.
	 *
	 * TODO
	 * if already imported then dma_buf_put() should be called
	 * and then just return dma_buf imported.
	 */

	attach = dma_buf_attach(dma_buf, &dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto err_dma_buf_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_dma_buf_detach;
	}

	npages = sgt->nents;

	/* really need? */
	ump_dmabuf.ctx = (void *)session_data;

	block_size = sizeof(ump_dd_physical_block) * npages;

	blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks\n"));
		ret = -ENOMEM;
		goto err_dmu_buf_unmap;
	}

	sgl = sgt->sgl;

	while (i < npages) {
		blocks[i].addr = sg_phys(sgl);
		blocks[i].size = sg_dma_len(sgl);
		sgl = sg_next(sgl);
		i++;
	}

	/*
	 * Initialize the session memory list element, and add it
	 * to the session object
	 */
	session = _mali_osk_calloc(1, sizeof(*session));
	if (!session) {
		DBG_MSG(1, ("Failed to allocate session.\n"));
		ret = -EFAULT;
		goto err_free_block;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
	if (UMP_DD_HANDLE_INVALID == ump_handle) {
		DBG_MSG(1, ("Failed to create ump handle.\n"));
		ret = -EFAULT;
		goto err_free_session;
	}

	session->mem = (ump_dd_mem *)ump_handle;

	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session->list),
			&(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);

	_mali_osk_free(blocks);

	ump_dmabuf.ump_handle = (uint32_t)ump_handle;
	ump_dmabuf.size = ump_dd_size_get(ump_handle);

	if (copy_to_user(argument, &ump_dmabuf,
				sizeof(struct ump_uk_dmabuf))) {
		MSG_ERR(("copy_to_user() failed.\n"));
		ret =  -EFAULT;
		goto err_release_ump_handle;
	}

	return 0;

err_release_ump_handle:
	ump_dd_reference_release(ump_handle);
err_free_session:
	_mali_osk_free(session);
err_free_block:
	_mali_osk_free(blocks);
err_dmu_buf_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_dma_buf_detach:
	dma_buf_detach(dma_buf, attach);
err_dma_buf_put:
	dma_buf_put(dma_buf);
	return ret;
}
Ejemplo n.º 26
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		obj = dma_buf->priv;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	sgl = sgt->sgl;

	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Ejemplo n.º 27
0
int exynos_dmabuf_prime_fd_to_handle(struct drm_device *drm_dev,
					struct drm_file *file,
					int prime_fd, unsigned int *handle)
{
	struct drm_exynos_file_private *file_priv = file->driver_priv;
	struct dma_buf_attachment *attach;
	struct dma_buf *dmabuf;
	struct sg_table *sgt;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	ret = mutex_lock_interruptible(&drm_dev->struct_mutex);
	if (ret < 0)
		return ret;

	dmabuf = dma_buf_get(prime_fd);
	if (IS_ERR(dmabuf)) {
		ret = PTR_ERR(dmabuf);
		goto out;
	}

	/*
	 * if there is same dmabuf as the one to prime_fd
	 * in file_priv->prime list then return the handle.
	 *
	 * Note:
	 * but if the prime_fd from user belongs to another process
	 * then there couldn't be the dmabuf in file_priv->prime list
	 * because file_priv is unique to process.
	 */
	ret = drm_prime_lookup_fd_handle_mapping(&file_priv->prime,
							dmabuf, handle);
	if (!ret) {
		/* drop reference we got above. */
		dma_buf_put(dmabuf);
		goto out;
	}

	attach = dma_buf_attach(dmabuf, drm_dev->dev);
	if (IS_ERR(attach)) {
		ret = PTR_ERR(attach);
		goto fail_put;
	}

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
		ret = -ENOMEM;
		goto fail_unmap;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dmabuf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto fail_unmap;
	}

	ret = drm_gem_handle_create(file, &exynos_gem_obj->base, handle);
	if (ret < 0)
		goto fail_handle;

	/* consider physically non-continuous memory with IOMMU. */

	buffer->dma_addr = sg_dma_address(sgt->sgl);
	buffer->size = sg_dma_len(sgt->sgl);
	buffer->sgt = sgt;

	/*
	 * import(fd to handle) means that the physical memory region
	 * from the sgt is being shared with others so shared_refcount
	 * should be 1.
	 */
	atomic_set(&buffer->shared_refcount, 1);

	exynos_gem_obj->base.import_attach = attach;

	ret = drm_prime_insert_fd_handle_mapping(&file_priv->prime,
							dmabuf, *handle);
	if (ret < 0)
		goto fail_handle;

	/* register buffer information to private buffer manager. */
	ret = register_buf_to_priv_mgr(exynos_gem_obj,
					&exynos_gem_obj->priv_handle,
					&exynos_gem_obj->priv_id);
	if (ret < 0) {
		drm_prime_remove_fd_handle_mapping(&file_priv->prime, dmabuf);
		goto fail_handle;
	}

	DRM_DEBUG_KMS("fd = %d, handle = %d, dma_addr = 0x%x, size = 0x%lx\n",
			prime_fd, *handle, buffer->dma_addr, buffer->size);

	drm_gem_object_unreference(&exynos_gem_obj->base);
	mutex_unlock(&drm_dev->struct_mutex);

	return 0;

fail_handle:
	drm_gem_object_unreference(&exynos_gem_obj->base);
	kfree(buffer);
	drm_gem_object_release(&exynos_gem_obj->base);
	kfree(exynos_gem_obj);
fail_unmap:
	dma_buf_unmap_attachment(attach, sgt);
fail_detach:
	dma_buf_detach(dmabuf, attach);
fail_put:
	dma_buf_put(dmabuf);
out:
	mutex_unlock(&drm_dev->struct_mutex);
	return ret;
}