static void vb2_ion_detach_dmabuf(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (buf->kva != NULL) {
        dma_buf_kunmap(buf->dma_buf, 0, buf->kva);
        dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0);
    }

    dma_buf_detach(buf->dma_buf, buf->attachment);
    kfree(buf);
}
/**
 * rppc_unmap_page - unmap and release a previously mapped page
 * @rpc - rppc instance handle
 * @offset: offset of the translate location within the buffer
 * @base_ptr: kernel mapped address for the page to be unmapped
 * @dmabuf: imported dma_buf to be released
 *
 * This function is called by rppc_xlate_buffers to unmap the
 * page and release the imported buffer. It essentially undoes
 * the functionality of rppc_map_page.
 */
static void rppc_unmap_page(struct rppc_instance *rpc, u32 offset,
			    uint8_t *base_ptr, struct dma_buf *dmabuf)
{
	uint32_t pg_offset;
	unsigned long pg_num;
	size_t begin, end = PAGE_SIZE;
	struct device *dev = rpc->dev;

	if (!base_ptr || !dmabuf)
		return;

	pg_offset = (offset & (PAGE_SIZE - 1));
	begin = offset & PAGE_MASK;
	pg_num = offset >> PAGE_SHIFT;

	dev_dbg(dev, "Unkmaping base_ptr = %p of buf = %p from %zu to %zu bytes\n",
		base_ptr, dmabuf, begin, end);
	dma_buf_kunmap(dmabuf, pg_num, base_ptr);
	dma_buf_end_cpu_access(dmabuf, begin, end, DMA_BIDIRECTIONAL);
	dma_buf_put(dmabuf);
}
Example #3
0
void nvhost_dmabuf_kunmap(struct mem_handle *handle, unsigned int pagenum,
		void *addr)
{
	dma_buf_kunmap(to_dmabuf(handle), pagenum, addr);
}
Example #4
0
static int igt_dmabuf_export_kmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		err = PTR_ERR(dmabuf);
		pr_err("i915_gem_prime_export failed with err=%d\n", err);
		return err;
	}

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] not initialiased to zero!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
		goto err;
	}
	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
	i915_gem_object_unpin_map(obj);

	ptr = dma_buf_kmap(dmabuf, 1);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 1, ptr);
		pr_err("Exported page[1] not set to 0xaa!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 1, ptr);

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}
	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] did not retain 0xc5!\n");
		err = -EINVAL;
		goto err;
	}
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = dma_buf_kmap(dmabuf, 2);
	if (ptr) {
		pr_err("Erroneously kmapped beyond the end of the object!\n");
		dma_buf_kunmap(dmabuf, 2, ptr);
		err = -EINVAL;
		goto err;
	}

	ptr = dma_buf_kmap(dmabuf, -1);
	if (ptr) {
		pr_err("Erroneously kmapped before the start of the object!\n");
		dma_buf_kunmap(dmabuf, -1, ptr);
		err = -EINVAL;
		goto err;
	}

	err = 0;
err:
	dma_buf_put(dmabuf);
	return err;
}