Exemplo n.º 1
1
Arquivo: gem.c Projeto: JaneDu/ath
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
					struct dma_buf *buf)
{
	struct tegra_drm *tegra = drm->dev_private;
	struct dma_buf_attachment *attach;
	struct tegra_bo *bo;
	int err;

	bo = tegra_bo_alloc_object(drm, buf->size);
	if (IS_ERR(bo))
		return bo;

	attach = dma_buf_attach(buf, drm->dev);
	if (IS_ERR(attach)) {
		err = PTR_ERR(attach);
		goto free;
	}

	get_dma_buf(buf);

	bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (!bo->sgt) {
		err = -ENOMEM;
		goto detach;
	}

	if (IS_ERR(bo->sgt)) {
		err = PTR_ERR(bo->sgt);
		goto detach;
	}

	if (tegra->domain) {
		err = tegra_bo_iommu_map(tegra, bo);
		if (err < 0)
			goto detach;
	} else {
		if (bo->sgt->nents > 1) {
			err = -EINVAL;
			goto detach;
		}

		bo->paddr = sg_dma_address(bo->sgt->sgl);
	}

	bo->gem.import_attach = attach;

	return bo;

detach:
	if (!IS_ERR_OR_NULL(bo->sgt))
		dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);

	dma_buf_detach(buf, attach);
	dma_buf_put(buf);
free:
	drm_gem_object_release(&bo->gem);
	kfree(bo);
	return ERR_PTR(err);
}
Exemplo n.º 2
0
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
	struct dma_buf *dmabuf;
	struct ion_buffer *buffer;
	struct ion_handle *handle;

	dmabuf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dmabuf))
		return ERR_PTR(PTR_ERR(dmabuf));
	

	if (dmabuf->ops != &dma_buf_ops) {
		pr_err("%s: can not import dmabuf from another exporter\n",
		       __func__);
		dma_buf_put(dmabuf);
		return ERR_PTR(-EINVAL);
	}
	buffer = dmabuf->priv;

	mutex_lock(&client->lock);
	
	handle = ion_handle_lookup(client, buffer);
	if (!IS_ERR_OR_NULL(handle)) {
		ion_handle_get(handle);
		goto end;
	}
	handle = ion_handle_create(client, buffer);
	if (IS_ERR_OR_NULL(handle))
		goto end;
	ion_handle_add(client, handle);
end:
	mutex_unlock(&client->lock);
	dma_buf_put(dmabuf);
	return handle;
}
Exemplo n.º 3
0
int sprd_ion_get_gsp_addr(struct ion_addr_data *data)
{
	int ret = 0;
	struct dma_buf *dmabuf;
	struct ion_buffer *buffer;

	dmabuf = dma_buf_get(data->fd_buffer);
	if (IS_ERR(dmabuf)) {
		pr_err("sprd_ion_get_gsp_addr() dmabuf=0x%lx dma_buf_get error!\n", (unsigned long)dmabuf);
		return -1;
	}
	/* if this memory came from ion */
#if 0
	if (dmabuf->ops != &dma_buf_ops) {
		pr_err("%s: can not import dmabuf from another exporter\n",
		       __func__);
		dma_buf_put(dmabuf);
		return ERR_PTR(-EINVAL);
	}
#endif
	buffer = dmabuf->priv;
	dma_buf_put(dmabuf);

	if (ION_HEAP_TYPE_SYSTEM == buffer->heap->type) {
#if defined(CONFIG_SPRD_IOMMU)
		mutex_lock(&buffer->lock);
		if(0 == buffer->iomap_cnt[IOMMU_GSP]) {
			buffer->iova[IOMMU_GSP] = sprd_iova_alloc(IOMMU_GSP, buffer->size);
			ret = sprd_iova_map(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer);
		}
		buffer->iomap_cnt[IOMMU_GSP]++;
		data->iova_enabled = true;
		data->iova_addr = buffer->iova[IOMMU_GSP];
		data->size = buffer->size;
		mutex_unlock(&buffer->lock);
#else
		ret = -1;
#endif
	} else {
		if (!buffer->heap->ops->phys) {
			pr_err("%s: ion_phys is not implemented by this heap.\n",
			       __func__);
			return -ENODEV;
		}
		ret = buffer->heap->ops->phys(buffer->heap, buffer, &(data->phys_addr), &(data->size));
		data->iova_enabled = false;
	}

	if (ret) {
		pr_err("sprd_ion_get_gsp_addr, error %d!\n",ret);
	}

	return ret;
}
Exemplo n.º 4
0
static int igt_dmabuf_import_ownership(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	dmabuf = mock_dmabuf(1);
	if (IS_ERR(dmabuf))
		return PTR_ERR(dmabuf);

	ptr = dma_buf_vmap(dmabuf);
	if (!ptr) {
		pr_err("dma_buf_vmap failed\n");
		err = -ENOMEM;
		goto err_dmabuf;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_vunmap(dmabuf, ptr);

	obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
	if (IS_ERR(obj)) {
		pr_err("i915_gem_prime_import failed with err=%d\n",
		       (int)PTR_ERR(obj));
		err = PTR_ERR(obj);
		goto err_dmabuf;
	}

	dma_buf_put(dmabuf);

	err = i915_gem_object_pin_pages(obj);
	if (err) {
		pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
		goto out_obj;
	}

	err = 0;
	i915_gem_object_unpin_pages(obj);
out_obj:
	i915_gem_object_put(obj);
	return err;

err_dmabuf:
	dma_buf_put(dmabuf);
	return err;
}
Exemplo n.º 5
0
struct drm_gem_object * omap_gem_prime_import(struct drm_device *dev,
		struct dma_buf *buffer)
{
	struct drm_gem_object *obj;

	/* is this one of own objects? */
	if (buffer->ops == &omap_dmabuf_ops) {
		obj = buffer->priv;
		/* is it from our device? */
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			dma_buf_put(buffer);
			return obj;
		}
	}

	/*
	 * TODO add support for importing buffers from other devices..
	 * for now we don't need this but would be nice to add eventually
	 */
	return ERR_PTR(-EINVAL);
}
Exemplo n.º 6
0
static int adf_fb_alloc(struct adf_fbdev *fbdev)
{
	int ret;

	ret = adf_interface_simple_buffer_alloc(fbdev->intf,
			fbdev->default_xres_virtual,
			fbdev->default_yres_virtual,
			fbdev->default_format,
			&fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
	if (ret < 0) {
		dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
		return ret;
	}

	fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
	if (!fbdev->vaddr) {
		ret = -ENOMEM;
		dev_err(fbdev->info->dev, "vmapping fb failed\n");
		goto err_vmap;
	}
	fbdev->info->fix.line_length = fbdev->pitch;
	fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
	fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
	fbdev->info->fix.smem_len = fbdev->dma_buf->size;
	fbdev->info->screen_base = fbdev->vaddr;

	return 0;

err_vmap:
	dma_buf_put(fbdev->dma_buf);
	return ret;
}
Exemplo n.º 7
0
int ioctl_detach(int fd)
{
/*
when buffer-user is done using this buffer, it 'disconnects' itself from the
   buffer.

   After the buffer-user has no more interest in using this buffer, it should
   disconnect itself from the buffer:

   - it first detaches itself from the buffer.

   Interface:
      void dma_buf_detach(struct dma_buf *dmabuf,
                          struct dma_buf_attachment *dmabuf_attach);

Then, the buffer-user returns the buffer reference to exporter.

   Interface:
     void dma_buf_put(struct dma_buf *dmabuf);
*/

	/* todo - use fd from userland to get back the contexts */
	dma_buf_detach(curr_dma_buf, curr_dma_buf_attachment);
	dma_buf_put(curr_dma_buf);
	return 0;
}
int s3cfb_extdsp_release(struct fb_info *fb, int user)
{
	struct s3cfb_extdsp_window *win = fb->par;
	struct s3cfb_extdsp_global *fbdev = get_extdsp_global(0);
	int i;

	printk("[VFB] %s\n", __func__);

	s3cfb_extdsp_release_window(fb);

	mutex_lock(&fbdev->lock);
	atomic_dec(&win->in_use);
	mutex_unlock(&fbdev->lock);

	for (i = 0; i < CONFIG_FB_S5P_EXTDSP_NR_BUFFERS; i++) {
		if (fbdev->buf_list[i].dma_buf) {
#if 0
			dma_buf_put(fbdev->buf_list[i].dma_buf);
			fbdev->buf_list[i].dma_buf = NULL;
			fbdev->buf_list[i].dma_buf_uv = NULL;
#endif
			printk("fbdev->buf_list[%d].dma_buf: %p\n", i, fbdev->buf_list[i].dma_buf);
		}
	}

	return 0;
}
void hal_tui_free(void)
{
	decon_free_sec_dma_buf(0);
	dma_buf_put(dbuf);
	ion_free(client, handle);
	ion_client_destroy(client);
}
Exemplo n.º 10
0
void nvhost_dmabuf_put(struct mem_handle *handle)
{
	struct dma_buf_attachment *attach = to_dmabuf_att(handle);
	struct dma_buf *dmabuf = attach->dmabuf;
	dma_buf_detach(dmabuf, attach);
	dma_buf_put(dmabuf);
}
Exemplo n.º 11
0
int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
{
	struct ion_buffer *buffer;
	struct dma_buf *dmabuf;
	bool valid_handle;
	int fd;

	mutex_lock(&client->lock);
	valid_handle = ion_handle_validate(client, handle);
	mutex_unlock(&client->lock);
	if (!valid_handle) {
		WARN(1, "%s: invalid handle passed to share.\n", __func__);
		return -EINVAL;
	}

	buffer = handle->buffer;
	ion_buffer_get(buffer);
	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
	if (IS_ERR(dmabuf)) {
		ion_buffer_put(buffer);
		return PTR_ERR(dmabuf);
	}
	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(dmabuf);

	return fd;
}
Exemplo n.º 12
0
static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
{
    /* Unbreak the reference cycle if we have an exported dma_buf. */
    if (obj->dma_buf) {
        dma_buf_put(obj->dma_buf);
        obj->dma_buf = NULL;
    }
}
Exemplo n.º 13
0
int sprd_ion_free_gsp_addr(int fd)
{
	int ret = 0;
	struct dma_buf *dmabuf;
	struct ion_buffer *buffer;

	dmabuf = dma_buf_get(fd);
	if (IS_ERR(dmabuf)) {
		pr_err("sprd_ion_free_gsp_addr() dmabuf=0x%lx dma_buf_get error!\n", (unsigned long)dmabuf);
		return -1;
	}
	/* if this memory came from ion */
#if 0
	if (dmabuf->ops != &dma_buf_ops) {
		pr_err("%s: can not import dmabuf from another exporter\n",
		       __func__);
		dma_buf_put(dmabuf);
		return ERR_PTR(-EINVAL);
	}
#endif
	buffer = dmabuf->priv;
	dma_buf_put(dmabuf);

	if (ION_HEAP_TYPE_SYSTEM == buffer->heap->type) {
#if defined(CONFIG_SPRD_IOMMU)
		mutex_lock(&buffer->lock);
		if (buffer->iomap_cnt[IOMMU_GSP] > 0) {
			buffer->iomap_cnt[IOMMU_GSP]--;
			if(0 == buffer->iomap_cnt[IOMMU_GSP]) {
				ret = sprd_iova_unmap(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer);
				sprd_iova_free(IOMMU_GSP, buffer->iova[IOMMU_GSP], buffer->size);
				buffer->iova[IOMMU_GSP] = 0;
			}
		}
		mutex_unlock(&buffer->lock);
#else
		ret = -1;
#endif
	}

	if (ret) {
		pr_err("sprd_ion_free_gsp_addr, error %d!\n",ret);
	}

	return ret;
}
/**
 * adf_device_post - flip to a new set of buffers
 *
 * @dev: device targeted by the flip
 * @intfs: interfaces targeted by the flip
 * @n_intfs: number of targeted interfaces
 * @bufs: description of buffers displayed
 * @n_bufs: number of buffers displayed
 * @custom_data: driver-private data
 * @custom_data_size: size of driver-private data
 *
 * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
 * point to variables on the stack.  adf_device_post() also takes its own
 * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
 * variant transfers ownership of these resources to ADF instead.
 *
 * On success, returns a sync fence which signals when the buffers are removed
 * from the screen.  On failure, returns ERR_PTR(-errno).
 */
struct sync_fence *adf_device_post(struct adf_device *dev,
		struct adf_interface **intfs, size_t n_intfs,
		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
		size_t custom_data_size)
{
	struct adf_interface **intfs_copy = NULL;
	struct adf_buffer *bufs_copy = NULL;
	void *custom_data_copy = NULL;
	struct sync_fence *ret;
	size_t i;

	intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
	if (!intfs_copy)
		return ERR_PTR(-ENOMEM);

	bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
	if (!bufs_copy) {
		ret = ERR_PTR(-ENOMEM);
		goto err_alloc;
	}

	custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
	if (!custom_data_copy) {
		ret = ERR_PTR(-ENOMEM);
		goto err_alloc;
	}

	for (i = 0; i < n_bufs; i++) {
		size_t j;
		for (j = 0; j < bufs[i].n_planes; j++)
			get_dma_buf(bufs[i].dma_bufs[j]);
	}

	memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
	memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
	memcpy(custom_data_copy, custom_data, custom_data_size);

	ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
			n_bufs, custom_data_copy, custom_data_size);
	if (IS_ERR(ret))
		goto err_post;

	return ret;

err_post:
	for (i = 0; i < n_bufs; i++) {
		size_t j;
		for (j = 0; j < bufs[i].n_planes; j++)
			dma_buf_put(bufs[i].dma_bufs[j]);
	}
err_alloc:
	kfree(custom_data_copy);
	kfree(bufs_copy);
	kfree(intfs_copy);
	return ret;
}
Exemplo n.º 15
0
static void seqno_release(struct fence *fence)
{
	struct seqno_fence *f = to_seqno_fence(fence);

	dma_buf_put(f->sync_buf);
	if (f->ops->release)
		f->ops->release(fence);
	else
		fence_free(&f->base);
}
Exemplo n.º 16
0
int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
				  struct kgd_dev **dma_buf_kgd,
				  uint64_t *bo_size, void *metadata_buffer,
				  size_t buffer_size, uint32_t *metadata_size,
				  uint32_t *flags)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
	struct dma_buf *dma_buf;
	struct drm_gem_object *obj;
	struct amdgpu_bo *bo;
	uint64_t metadata_flags;
	int r = -EINVAL;

	dma_buf = dma_buf_get(dma_buf_fd);
	if (IS_ERR(dma_buf))
		return PTR_ERR(dma_buf);

	if (dma_buf->ops != &amdgpu_dmabuf_ops)
		/* Can't handle non-graphics buffers */
		goto out_put;

	obj = dma_buf->priv;
	if (obj->dev->driver != adev->ddev->driver)
		/* Can't handle buffers from different drivers */
		goto out_put;

	adev = obj->dev->dev_private;
	bo = gem_to_amdgpu_bo(obj);
	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
				    AMDGPU_GEM_DOMAIN_GTT)))
		/* Only VRAM and GTT BOs are supported */
		goto out_put;

	r = 0;
	if (dma_buf_kgd)
		*dma_buf_kgd = (struct kgd_dev *)adev;
	if (bo_size)
		*bo_size = amdgpu_bo_size(bo);
	if (metadata_size)
		*metadata_size = bo->metadata_size;
	if (metadata_buffer)
		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
					   metadata_size, &metadata_flags);
	if (flags) {
		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
			ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;

		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
			*flags |= ALLOC_MEM_FLAGS_PUBLIC;
	}

out_put:
	dma_buf_put(dma_buf);
	return r;
}
Exemplo n.º 17
0
/**
 * rppc_alloc_dmabuf - import a buffer and store in a rppc buffer descriptor
 * @rpc - rppc instance handle
 * @fd - dma_buf file descriptor
 * @autoreg: flag indicating the mode of creation
 *
 * This function primarily imports a buffer into the driver and holds
 * a reference to the buffer on behalf of the remote processor. The
 * buffer to be imported is represented by a dma-buf file descriptor,
 * and as such is agnostic of the buffer allocator and/or exporter.
 * The buffer is imported using the dma-buf api, and a driver specific
 * buffer descriptor is used to store the imported buffer properties.
 * The imported buffers are all stored in a rppc instance specific
 * idr, to be used for looking up and cleaning up the driver buffer
 * descriptors.
 *
 * The @autoreg field is used to dictate the manner in which the buffer
 * is imported. The user-side can pre-register the buffers with the driver
 * (which will import the buffers) if the application is going to use
 * these repeatedly in consecutive function invocations. The buffers
 * are auto-imported if the user-side has not registered them previously
 * and are un-imported once the remote function call returns.
 *
 * This function is to be called only after checking that buffer has
 * not been imported already (see rppc_find_dmabuf).
 *
 * Return: allocated rppc_dma_buf or error
 */
struct rppc_dma_buf *rppc_alloc_dmabuf(struct rppc_instance *rpc, int fd,
				       bool autoreg)
{
	struct rppc_dma_buf *dma;
	void *ret;
	int id;

	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
	if (!dma)
		return ERR_PTR(-ENOMEM);

	dma->fd = fd;
	dma->autoreg = !!autoreg;
	dma->buf = dma_buf_get(dma->fd);
	if (IS_ERR(dma->buf)) {
		ret = dma->buf;
		goto free_dma;
	}

	dma->attach = dma_buf_attach(dma->buf, rpc->dev);
	if (IS_ERR(dma->attach)) {
		ret = dma->attach;
		goto put_buf;
	}

	dma->sgt = dma_buf_map_attachment(dma->attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(dma->sgt)) {
		ret = dma->sgt;
		goto detach_buf;
	}

	dma->pa = sg_dma_address(dma->sgt->sgl);
	mutex_lock(&rpc->lock);
	id = idr_alloc(&rpc->dma_idr, dma, 0, 0, GFP_KERNEL);
	dma->id = id;
	mutex_unlock(&rpc->lock);
	if (id < 0) {
		ret = ERR_PTR(id);
		goto unmap_buf;
	}

	return dma;

unmap_buf:
	dma_buf_unmap_attachment(dma->attach, dma->sgt, DMA_BIDIRECTIONAL);
detach_buf:
	dma_buf_detach(dma->buf, dma->attach);
put_buf:
	dma_buf_put(dma->buf);
free_dma:
	kfree(dma);

	return ret;
}
Exemplo n.º 18
0
/**
 * rppc_map_page - import and map a kernel page in a dma_buf
 * @rpc - rppc instance handle
 * @fd: file descriptor of the dma_buf to import
 * @offset: offset of the translate location within the buffer
 * @base_ptr: pointer for returning mapped kernel address
 * @dmabuf: pointer for returning the imported dma_buf
 *
 * A helper function to import the dma_buf buffer and map into kernel
 * the page containing the offset within the buffer. The function is
 * called by rppc_xlate_buffers and returns the pointers to the kernel
 * mapped address and the imported dma_buf handle in arguments. The
 * mapping is used for performing in-place translation of the user
 * provided pointer at location @offset within the buffer.
 *
 * The mapping is achieved through the appropriate dma_buf ops, and
 * the page will be unmapped after performing the translation. See
 * also rppc_unmap_page.
 *
 * Return: 0 on success, or an appropriate failure code otherwise
 */
static int rppc_map_page(struct rppc_instance *rpc, int fd, u32 offset,
			 uint8_t **base_ptr, struct dma_buf **dmabuf)
{
	int ret = 0;
	uint8_t *ptr = NULL;
	struct dma_buf *dbuf = NULL;
	uint32_t pg_offset;
	unsigned long pg_num;
	size_t begin, end = PAGE_SIZE;
	struct device *dev = rpc->dev;

	if (!base_ptr || !dmabuf)
		return -EINVAL;

	pg_offset = (offset & (PAGE_SIZE - 1));
	begin = offset & PAGE_MASK;
	pg_num = offset >> PAGE_SHIFT;

	dbuf = dma_buf_get(fd);
	if (IS_ERR(dbuf)) {
		ret = PTR_ERR(dbuf);
		dev_err(dev, "invalid dma_buf file descriptor passed! fd = %d ret = %d\n",
			fd, ret);
		goto out;
	}

	ret = dma_buf_begin_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
	if (ret < 0) {
		dev_err(dev, "failed to acquire cpu access to the dma buf fd = %d offset = 0x%x, ret = %d\n",
			fd, offset, ret);
		goto put_dmabuf;
	}

	ptr = dma_buf_kmap(dbuf, pg_num);
	if (!ptr) {
		ret = -ENOBUFS;
		dev_err(dev, "failed to map the page containing the translation into kernel fd = %d offset = 0x%x\n",
			fd, offset);
		goto end_cpuaccess;
	}

	*base_ptr = ptr;
	*dmabuf = dbuf;
	dev_dbg(dev, "kmap'd base_ptr = %p buf = %p into kernel from %zu for %zu bytes, pg_offset = 0x%x\n",
		ptr, dbuf, begin, end, pg_offset);
	return 0;

end_cpuaccess:
	dma_buf_end_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
put_dmabuf:
	dma_buf_put(dbuf);
out:
	return ret;
}
static int dmabuf_ioctl_delete(struct dmabuf_file *priv, unsigned long flags)
{
	dma_free_writecombine(NULL, priv->size, priv->virt, priv->phys);
	priv->virt = NULL;
	priv->phys = 0;
	priv->size = 0;

	dma_buf_put(priv->buf);
	priv->buf = NULL;

	return 0;
}
Exemplo n.º 20
0
/**
 * tee_shm_get_fd() - Increase reference count and return file descriptor
 * @shm:	Shared memory handle
 * @returns user space file descriptor to shared memory
 */
int tee_shm_get_fd(struct tee_shm *shm)
{
	int fd;

	if (!(shm->flags & TEE_SHM_DMA_BUF))
		return -EINVAL;

	get_dma_buf(shm->dmabuf);
	fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
	if (fd < 0)
		dma_buf_put(shm->dmabuf);
	return fd;
}
Exemplo n.º 21
0
static void fimg2d_unmap_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma)
{
	if (!dma->dma_addr)
		return;

	iovmm_unmap(info->dev, dma->dma_addr);
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			dma->direction);
	dma_buf_detach(dma->dma_buf, dma->attachment);
	dma_buf_put(dma->dma_buf);
	memset(dma, 0, sizeof(struct fimg2d_dma));
}
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf_to_obj(dma_buf);
		/* is it from our device? */
		if (obj->base.dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		i915_gem_object_free(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Exemplo n.º 23
0
/**
 * tee_shm_free() - Free shared memory
 * @shm:	Handle to shared memory to free
 */
void tee_shm_free(struct tee_shm *shm)
{
	/*
	 * dma_buf_put() decreases the dmabuf reference counter and will
	 * call tee_shm_release() when the last reference is gone.
	 *
	 * In the case of driver private memory we call tee_shm_release
	 * directly instead as it doesn't have a reference counter.
	 */
	if (shm->flags & TEE_SHM_DMA_BUF)
		dma_buf_put(shm->dmabuf);
	else
		tee_shm_release(shm);
}
Exemplo n.º 24
0
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_gem_object *obj;
	struct sg_table *sgt;
	int ret;

	if (dma_buf->ops == &omap_dmabuf_ops) {
		obj = dma_buf->priv;
		if (obj->dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(obj);
			return obj;
		}
	}

	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
	if (IS_ERR(sgt)) {
		ret = PTR_ERR(sgt);
		goto fail_detach;
	}

	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto fail_unmap;
	}

	obj->import_attach = attach;

	return obj;

fail_unmap:
	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
Exemplo n.º 25
0
static void exynos_dmabuf_detach(struct dma_buf *dmabuf,
					struct dma_buf_attachment *attach)
{
	DRM_DEBUG_KMS("%s\n", __FILE__);

	/* TODO */

	/*
	 * when drm_prime_handle_to_fd() is called, file->f_count of this
	 * dmabuf will be increased by dma_buf_get() so drop the reference
	 * here.
	 */
	dma_buf_put(dmabuf);
}
Exemplo n.º 26
0
static unsigned int fimg2d_map_dma_buf(struct fimg2d_control *info,
		struct fimg2d_dma *dma, int fd,
		enum dma_data_direction direction)
{
	dma_addr_t dma_addr;

	dma->direction = direction;
	dma->dma_buf = dma_buf_get(fd);
	if (IS_ERR_OR_NULL(dma->dma_buf)) {
		dev_err(info->dev, "dma_buf_get() failed: %ld\n",
				PTR_ERR(dma->dma_buf));
		goto err_buf_get;
	}

	dma->attachment = dma_buf_attach(dma->dma_buf, info->dev);
	if (IS_ERR_OR_NULL(dma->attachment)) {
		dev_err(info->dev, "dma_buf_attach() failed: %ld\n",
				PTR_ERR(dma->attachment));
		goto err_buf_attach;
	}

	dma->sg_table = dma_buf_map_attachment(dma->attachment,
			direction);
	if (IS_ERR_OR_NULL(dma->sg_table)) {
		dev_err(info->dev, "dma_buf_map_attachment() failed: %ld\n",
				PTR_ERR(dma->sg_table));
		goto err_buf_map_attachment;
	}

	dma_addr = iovmm_map(info->dev, dma->sg_table->sgl, 0,
			dma->dma_buf->size);
	if (!dma_addr || IS_ERR_VALUE(dma_addr)) {
		dev_err(info->dev, "iovmm_map() failed: %d\n", dma->dma_addr);
		goto err_iovmm_map;
	}

	dma->dma_addr = dma_addr;
	return dma->dma_buf->size;

err_iovmm_map:
	dma_buf_unmap_attachment(dma->attachment, dma->sg_table,
			direction);
err_buf_map_attachment:
	dma_buf_detach(dma->dma_buf, dma->attachment);
err_buf_attach:
	dma_buf_put(dma->dma_buf);
err_buf_get:
	return 0;
}
Exemplo n.º 27
0
/**
 * _mfc_dmabuf_put() - release memory associated with
 * a DMABUF shared buffer
 */
static void _mfc_dmabuf_put(struct vb2_plane *planes)
{
	unsigned int plane;

	for (plane = 0; plane < MFC_NUM_PLANE; ++plane) {
		void *mem_priv = planes[plane].mem_priv;

		if (mem_priv) {
			dma_buf_detach(planes[plane].dbuf,
					planes[plane].mem_priv);
			dma_buf_put(planes[plane].dbuf);
			planes[plane].dbuf = NULL;
			planes[plane].mem_priv = NULL;
		}
	}
}
static int dmabuf_file_release(struct inode *inode, struct file *file)
{
	struct dmabuf_file *priv = file->private_data;
	int ret = 0;

	if (priv->virt)
		dma_free_writecombine(priv->dev, priv->size, priv->virt,
				      priv->phys);

	if (priv->buf)
		dma_buf_put(priv->buf);

	kfree(priv);

	return ret;
}
Exemplo n.º 29
0
struct mem_handle *nvhost_dmabuf_get(u32 id, struct nvhost_device *dev)
{
	struct mem_handle *h;
	struct dma_buf *buf;

	buf = dma_buf_get(to_dmabuf_fd(id));
	if (IS_ERR_OR_NULL(buf))
		return (struct mem_handle *)buf;
	else {
		h = (struct mem_handle *)dma_buf_attach(buf, &dev->dev);
		if (IS_ERR_OR_NULL(h))
			dma_buf_put(buf);
	}

	return (struct mem_handle *) ((u32)h | mem_mgr_type_dmabuf);
}
int s3cfb_extdsp_unmap_fd(void)
{
	struct s3cfb_extdsp_global *fbdev = get_extdsp_global(0);
	int i;

	printk("[VFB] %s\n", __func__);

	for (i = 0; i < CONFIG_FB_S5P_EXTDSP_NR_BUFFERS; i++) {
		if (fbdev->buf_list[i].dma_buf) {
			dma_buf_put(fbdev->buf_list[i].dma_buf);
			fbdev->buf_list[i].dma_buf = NULL;
			fbdev->buf_list[i].dma_buf_uv = NULL;
			printk("fbdev->buf_list[%d].dma_buf: %p\n", i, fbdev->buf_list[i].dma_buf);
		}
	}
	return 0;
}