コード例 #1
1
ファイル: gem.c プロジェクト: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
					struct nvmap_handle *h, bool skip_val)
{
	struct nvmap_handle_ref *ref = NULL;

	BUG_ON(!client);
	/* on success, the reference count for the handle should be
	 * incremented, so the success paths will not call nvmap_handle_put */
	h = nvmap_validate_get(h);

	if (!h) {
		pr_debug("%s duplicate handle failed\n",
			    current->group_leader->comm);
		return ERR_PTR(-EPERM);
	}

	if (!h->alloc) {
		pr_err("%s duplicating unallocated handle\n",
			current->group_leader->comm);
		nvmap_handle_put(h);
		return ERR_PTR(-EINVAL);
	}

	nvmap_ref_lock(client);
	ref = __nvmap_validate_locked(client, h);

	if (ref) {
		/* handle already duplicated in client; just increment
		 * the reference count rather than re-duplicating it */
		atomic_inc(&ref->dupes);
		nvmap_ref_unlock(client);
		return ref;
	}

	nvmap_ref_unlock(client);

	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
	if (!ref) {
		nvmap_handle_put(h);
		return ERR_PTR(-ENOMEM);
	}

	atomic_set(&ref->dupes, 1);
	ref->handle = h;
	atomic_set(&ref->pin, 0);
	add_handle_ref(client, ref);

	/*
	 * Ref counting on the dma_bufs follows the creation and destruction of
	 * nvmap_handle_refs. That is every time a handle_ref is made the
	 * dma_buf ref count goes up and everytime a handle_ref is destroyed
	 * the dma_buf ref count goes down.
	 */
	get_dma_buf(h->dmabuf);

	trace_nvmap_duplicate_handle(client, h, ref);
	return ref;
}
コード例 #3
0
/**
 * adf_device_post - flip to a new set of buffers
 *
 * @dev: device targeted by the flip
 * @intfs: interfaces targeted by the flip
 * @n_intfs: number of targeted interfaces
 * @bufs: description of buffers displayed
 * @n_bufs: number of buffers displayed
 * @custom_data: driver-private data
 * @custom_data_size: size of driver-private data
 *
 * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
 * point to variables on the stack.  adf_device_post() also takes its own
 * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
 * variant transfers ownership of these resources to ADF instead.
 *
 * On success, returns a sync fence which signals when the buffers are removed
 * from the screen.  On failure, returns ERR_PTR(-errno).
 */
struct sync_fence *adf_device_post(struct adf_device *dev,
		struct adf_interface **intfs, size_t n_intfs,
		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
		size_t custom_data_size)
{
	struct adf_interface **intfs_copy = NULL;
	struct adf_buffer *bufs_copy = NULL;
	void *custom_data_copy = NULL;
	struct sync_fence *ret;
	size_t i;

	intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
	if (!intfs_copy)
		return ERR_PTR(-ENOMEM);

	bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
	if (!bufs_copy) {
		ret = ERR_PTR(-ENOMEM);
		goto err_alloc;
	}

	custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
	if (!custom_data_copy) {
		ret = ERR_PTR(-ENOMEM);
		goto err_alloc;
	}

	for (i = 0; i < n_bufs; i++) {
		size_t j;
		for (j = 0; j < bufs[i].n_planes; j++)
			get_dma_buf(bufs[i].dma_bufs[j]);
	}

	memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
	memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
	memcpy(custom_data_copy, custom_data, custom_data_size);

	ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
			n_bufs, custom_data_copy, custom_data_size);
	if (IS_ERR(ret))
		goto err_post;

	return ret;

err_post:
	for (i = 0; i < n_bufs; i++) {
		size_t j;
		for (j = 0; j < bufs[i].n_planes; j++)
			dma_buf_put(bufs[i].dma_bufs[j]);
	}
err_alloc:
	kfree(custom_data_copy);
	kfree(bufs_copy);
	kfree(intfs_copy);
	return ret;
}
コード例 #4
0
ファイル: tee_shm.c プロジェクト: AlexShiLucky/linux
/**
 * tee_shm_get_fd() - Increase reference count and return file descriptor
 * @shm:	Shared memory handle
 * @returns user space file descriptor to shared memory
 */
int tee_shm_get_fd(struct tee_shm *shm)
{
	int fd;

	if (!(shm->flags & TEE_SHM_DMA_BUF))
		return -EINVAL;

	get_dma_buf(shm->dmabuf);
	fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(shm->dmabuf);
	return fd;
}
コード例 #5
0
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf_to_obj(dma_buf);
		/* is it from our device? */
		if (obj->base.dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		i915_gem_object_free(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
コード例 #6
0
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_gem_object *obj;
	struct sg_table *sgt;
	int ret;

	if (dma_buf->ops == &omap_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
コード例 #7
0
ファイル: tee_shm.c プロジェクト: AlexShiLucky/linux
/**
 * tee_shm_get_from_id() - Find shared memory object and increase reference
 * count
 * @ctx:	Context owning the shared memory
 * @id:		Id of shared memory object
 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
 */
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
{
	struct tee_device *teedev;
	struct tee_shm *shm;

	if (!ctx)
		return ERR_PTR(-EINVAL);

	teedev = ctx->teedev;
	mutex_lock(&teedev->mutex);
	shm = idr_find(&teedev->idr, id);
	if (!shm || shm->ctx != ctx)
		shm = ERR_PTR(-EINVAL);
	else if (shm->flags & TEE_SHM_DMA_BUF)
		get_dma_buf(shm->dmabuf);
	mutex_unlock(&teedev->mutex);
	return shm;
}
コード例 #8
0
static int dmabuf_ioctl_export(struct dmabuf_file *priv, unsigned long flags)
{
	int err;

	struct dmabuf_create *buf = (struct dmabuf_create *)flags;
	get_dma_buf(priv->buf);

	err = dma_buf_fd(priv->buf, flags);
	if (err < 0)
		dma_buf_put(priv->buf);

	priv->fd = err;

	if (copy_to_user(&buf->fd, &err, 4))
		return -EFAULT;

	return 0;
}
コード例 #9
0
ファイル: udl_dmabuf.c プロジェクト: 168519/linux
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct udl_gem_object *uobj;
	int ret;

	/* need to attach */
	get_device(dev->dev);
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach)) {
		put_device(dev->dev);
		return ERR_CAST(attach);
	}

	get_dma_buf(dma_buf);

	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
	if (ret)
		goto fail_unmap;

	uobj->base.import_attach = attach;
	uobj->flags = UDL_BO_WC;

	return &uobj->base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);
	put_device(dev->dev);
	return ERR_PTR(ret);
}
コード例 #10
0
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sgt;
	struct scatterlist *sgl;
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buffer;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &exynos_dmabuf_ops) {
		struct drm_gem_object *obj;

		obj = dma_buf->priv;

		/* is it from our device? */
		if (obj->dev == drm_dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, drm_dev->dev);
	if (IS_ERR(attach))
		return ERR_PTR(-EINVAL);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto err_buf_detach;
	}

	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		ret = -ENOMEM;
		goto err_unmap_attach;
	}

	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
	if (!exynos_gem_obj) {
		ret = -ENOMEM;
		goto err_free_buffer;
	}

	sgl = sgt->sgl;

	buffer->size = dma_buf->size;
	buffer->dma_addr = sg_dma_address(sgl);

	if (sgt->nents == 1) {
		/* always physically continuous memory if sgt->nents is 1. */
		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
	} else {
		/*
		 * this case could be CONTIG or NONCONTIG type but for now
		 * sets NONCONTIG.
		 * TODO. we have to find a way that exporter can notify
		 * the type of its own buffer to importer.
		 */
		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
	}

	exynos_gem_obj->buffer = buffer;
	buffer->sgt = sgt;
	exynos_gem_obj->base.import_attach = attach;

	DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
								buffer->size);

	return &exynos_gem_obj->base;

err_free_buffer:
	kfree(buffer);
	buffer = NULL;
err_unmap_attach:
	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
err_buf_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
コード例 #11
0
int show_framebuffer_on_crtc(struct drm_crtc *crtc,
				struct drm_framebuffer *fb, bool page_flip,
				struct drm_pending_vblank_event *event)
{
	struct pl111_gem_bo *bo;
	struct pl111_drm_flip_resource *flip_res;
	int flips_in_flight;
	int old_flips_in_flight;

	crtc->fb = fb;

	bo = PL111_BO_FROM_FRAMEBUFFER(fb);
	if (bo == NULL) {
		DRM_DEBUG_KMS("Failed to get pl111_gem_bo object\n");
		return -EINVAL;
	}

	/* If this is a full modeset, wait for all outstanding flips to complete
	 * before continuing. This avoids unnecessary complication from being
	 * able to queue up multiple modesets and queues of mixed modesets and
	 * page flips.
	 *
	 * Modesets should be uncommon and will not be performant anyway, so
	 * making them synchronous should have negligible performance impact.
	 */
	if (!page_flip) {
		int ret = wait_event_killable(priv.wait_for_flips,
				atomic_read(&priv.nr_flips_in_flight) == 0);
		if (ret)
			return ret;
	}

	/*
	 * There can be more 'early display' flips in flight than there are
	 * buffers, and there is (currently) no explicit bound on the number of
	 * flips. Hence, we need a new allocation for each one.
	 *
	 * Note: this could be optimized down if we knew a bound on the flips,
	 * since an application can only have so many buffers in flight to be
	 * useful/not hog all the memory
	 */
	flip_res = kmem_cache_alloc(priv.page_flip_slab, GFP_KERNEL);
	if (flip_res == NULL) {
		pr_err("kmem_cache_alloc failed to alloc - flip ignored\n");
		return -ENOMEM;
	}

	/*
	 * increment flips in flight, whilst blocking when we reach
	 * NR_FLIPS_IN_FLIGHT_THRESHOLD
	 */
	do {
		/*
		 * Note: use of assign-and-then-compare in the condition to set
		 * flips_in_flight
		 */
		int ret = wait_event_killable(priv.wait_for_flips,
				(flips_in_flight =
					atomic_read(&priv.nr_flips_in_flight))
				< NR_FLIPS_IN_FLIGHT_THRESHOLD);
		if (ret != 0) {
			kmem_cache_free(priv.page_flip_slab, flip_res);
			return ret;
		}

		old_flips_in_flight = atomic_cmpxchg(&priv.nr_flips_in_flight,
					flips_in_flight, flips_in_flight + 1);
	} while (old_flips_in_flight != flips_in_flight);

	flip_res->fb = fb;
	flip_res->crtc = crtc;
	flip_res->page_flip = page_flip;
	flip_res->event = event;
	INIT_LIST_HEAD(&flip_res->link);
	DRM_DEBUG_KMS("DRM alloc flip_res=%p\n", flip_res);
#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
	if (bo->gem_object.export_dma_buf != NULL) {
		struct dma_buf *buf = bo->gem_object.export_dma_buf;
		unsigned long shared[1] = { 0 };
		struct kds_resource *resource_list[1] = {
				get_dma_buf_kds_resource(buf) };
		int err;

		get_dma_buf(buf);
		DRM_DEBUG_KMS("Got dma_buf %p\n", buf);

		/* Wait for the KDS resource associated with this buffer */
		err = kds_async_waitall(&flip_res->kds_res_set,
					&priv.kds_cb, flip_res, fb, 1, shared,
					resource_list);
		BUG_ON(err);
	} else {
		struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);

		DRM_DEBUG_KMS("No dma_buf for this flip\n");

		/* No dma-buf attached so just call the callback directly */
		flip_res->kds_res_set = NULL;
		pl111_crtc->show_framebuffer_cb(flip_res, fb);
	}
#else
	if (bo->gem_object.export_dma_buf != NULL) {
		struct dma_buf *buf = bo->gem_object.export_dma_buf;

		get_dma_buf(buf);
		DRM_DEBUG_KMS("Got dma_buf %p\n", buf);
	} else {
		DRM_DEBUG_KMS("No dma_buf for this flip\n");
	}

	/* No dma-buf attached to this so just call the callback directly */
	{
		struct pl111_drm_crtc *pl111_crtc = to_pl111_crtc(crtc);
		pl111_crtc->show_framebuffer_cb(flip_res, fb);
	}
#endif

	/* For the same reasons as the wait at the start of this function,
	 * wait for the modeset to complete before continuing.
	 */
	if (!page_flip) {
		int ret = wait_event_killable(priv.wait_for_flips,
				flips_in_flight == 0);
		if (ret)
			return ret;
	}

	return 0;
}
コード例 #12
0
ファイル: gem.c プロジェクト: mikuhatsune001/linux2.6.32
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	ssize_t size;
	int err;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (!bo)
		return ERR_PTR(-ENOMEM);

	host1x_bo_init(&bo->base, &tegra_bo_ops);
	size = round_up(buf->size, PAGE_SIZE);

	err = drm_gem_object_init(drm, &bo->gem, size);
	if (err < 0)
		goto free;

	err = drm_gem_create_mmap_offset(&bo->gem);
	if (err < 0)
		goto release;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free_mmap;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (bo->sgt->nents > 1) {
		err = -EINVAL;
		goto detach;
	}

	bo->paddr = sg_dma_address(bo->sgt->sgl);
	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free_mmap:
	drm_gem_free_mmap_offset(&bo->gem);
release:
	drm_gem_object_release(&bo->gem);
free:
	kfree(bo);

	return ERR_PTR(err);
}