/**
 * rppc_map_page - import and map a kernel page in a dma_buf
 * @rpc - rppc instance handle
 * @fd: file descriptor of the dma_buf to import
 * @offset: offset of the translate location within the buffer
 * @base_ptr: pointer for returning mapped kernel address
 * @dmabuf: pointer for returning the imported dma_buf
 *
 * A helper function to import the dma_buf buffer and map into kernel
 * the page containing the offset within the buffer. The function is
 * called by rppc_xlate_buffers and returns the pointers to the kernel
 * mapped address and the imported dma_buf handle in arguments. The
 * mapping is used for performing in-place translation of the user
 * provided pointer at location @offset within the buffer.
 *
 * The mapping is achieved through the appropriate dma_buf ops, and
 * the page will be unmapped after performing the translation. See
 * also rppc_unmap_page.
 *
 * Return: 0 on success, or an appropriate failure code otherwise
 */
static int rppc_map_page(struct rppc_instance *rpc, int fd, u32 offset,
			 uint8_t **base_ptr, struct dma_buf **dmabuf)
{
	int ret = 0;
	uint8_t *ptr = NULL;
	struct dma_buf *dbuf = NULL;
	uint32_t pg_offset;
	unsigned long pg_num;
	size_t begin, end = PAGE_SIZE;
	struct device *dev = rpc->dev;

	if (!base_ptr || !dmabuf)
		return -EINVAL;

	pg_offset = (offset & (PAGE_SIZE - 1));
	begin = offset & PAGE_MASK;
	pg_num = offset >> PAGE_SHIFT;

	dbuf = dma_buf_get(fd);
	if (IS_ERR(dbuf)) {
		ret = PTR_ERR(dbuf);
		dev_err(dev, "invalid dma_buf file descriptor passed! fd = %d ret = %d\n",
			fd, ret);
		goto out;
	}

	ret = dma_buf_begin_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
	if (ret < 0) {
		dev_err(dev, "failed to acquire cpu access to the dma buf fd = %d offset = 0x%x, ret = %d\n",
			fd, offset, ret);
		goto put_dmabuf;
	}

	ptr = dma_buf_kmap(dbuf, pg_num);
	if (!ptr) {
		ret = -ENOBUFS;
		dev_err(dev, "failed to map the page containing the translation into kernel fd = %d offset = 0x%x\n",
			fd, offset);
		goto end_cpuaccess;
	}

	*base_ptr = ptr;
	*dmabuf = dbuf;
	dev_dbg(dev, "kmap'd base_ptr = %p buf = %p into kernel from %zu for %zu bytes, pg_offset = 0x%x\n",
		ptr, dbuf, begin, end, pg_offset);
	return 0;

end_cpuaccess:
	dma_buf_end_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
put_dmabuf:
	dma_buf_put(dbuf);
out:
	return ret;
}
static void *vb2_ion_vaddr(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (WARN_ON(!buf))
        return NULL;

    if (buf->kva != NULL)
        return buf->kva;

    if (dma_buf_begin_cpu_access(buf->dma_buf,
                0, buf->size, buf->direction))
        return NULL;

    buf->kva = dma_buf_kmap(buf->dma_buf, 0);

    if (buf->kva == NULL)
        dma_buf_end_cpu_access(buf->dma_buf,
                0, buf->size, buf->direction);

    return buf->kva;
}
Example #3
0
void *nvhost_dmabuf_kmap(struct mem_handle *handle, unsigned int pagenum)
{
	return dma_buf_kmap(to_dmabuf(handle), pagenum);
}
Example #4
0
static int igt_dmabuf_export_kmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		err = PTR_ERR(dmabuf);
		pr_err("i915_gem_prime_export failed with err=%d\n", err);
		return err;
	}

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] not initialiased to zero!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
		goto err;
	}
	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
	i915_gem_object_unpin_map(obj);

	ptr = dma_buf_kmap(dmabuf, 1);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 1, ptr);
		pr_err("Exported page[1] not set to 0xaa!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 1, ptr);

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}
	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] did not retain 0xc5!\n");
		err = -EINVAL;
		goto err;
	}
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = dma_buf_kmap(dmabuf, 2);
	if (ptr) {
		pr_err("Erroneously kmapped beyond the end of the object!\n");
		dma_buf_kunmap(dmabuf, 2, ptr);
		err = -EINVAL;
		goto err;
	}

	ptr = dma_buf_kmap(dmabuf, -1);
	if (ptr) {
		pr_err("Erroneously kmapped before the start of the object!\n");
		dma_buf_kunmap(dmabuf, -1, ptr);
		err = -EINVAL;
		goto err;
	}

	err = 0;
err:
	dma_buf_put(dmabuf);
	return err;
}