示例#1
0
/**
 * rppc_map_page - import and map a kernel page in a dma_buf
 * @rpc - rppc instance handle
 * @fd: file descriptor of the dma_buf to import
 * @offset: offset of the translate location within the buffer
 * @base_ptr: pointer for returning mapped kernel address
 * @dmabuf: pointer for returning the imported dma_buf
 *
 * A helper function to import the dma_buf buffer and map into kernel
 * the page containing the offset within the buffer. The function is
 * called by rppc_xlate_buffers and returns the pointers to the kernel
 * mapped address and the imported dma_buf handle in arguments. The
 * mapping is used for performing in-place translation of the user
 * provided pointer at location @offset within the buffer.
 *
 * The mapping is achieved through the appropriate dma_buf ops, and
 * the page will be unmapped after performing the translation. See
 * also rppc_unmap_page.
 *
 * Return: 0 on success, or an appropriate failure code otherwise
 */
static int rppc_map_page(struct rppc_instance *rpc, int fd, u32 offset,
			 uint8_t **base_ptr, struct dma_buf **dmabuf)
{
	int ret = 0;
	uint8_t *ptr = NULL;
	struct dma_buf *dbuf = NULL;
	uint32_t pg_offset;
	unsigned long pg_num;
	size_t begin, end = PAGE_SIZE;
	struct device *dev = rpc->dev;

	if (!base_ptr || !dmabuf)
		return -EINVAL;

	pg_offset = (offset & (PAGE_SIZE - 1));
	begin = offset & PAGE_MASK;
	pg_num = offset >> PAGE_SHIFT;

	dbuf = dma_buf_get(fd);
	if (IS_ERR(dbuf)) {
		ret = PTR_ERR(dbuf);
		dev_err(dev, "invalid dma_buf file descriptor passed! fd = %d ret = %d\n",
			fd, ret);
		goto out;
	}

	ret = dma_buf_begin_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
	if (ret < 0) {
		dev_err(dev, "failed to acquire cpu access to the dma buf fd = %d offset = 0x%x, ret = %d\n",
			fd, offset, ret);
		goto put_dmabuf;
	}

	ptr = dma_buf_kmap(dbuf, pg_num);
	if (!ptr) {
		ret = -ENOBUFS;
		dev_err(dev, "failed to map the page containing the translation into kernel fd = %d offset = 0x%x\n",
			fd, offset);
		goto end_cpuaccess;
	}

	*base_ptr = ptr;
	*dmabuf = dbuf;
	dev_dbg(dev, "kmap'd base_ptr = %p buf = %p into kernel from %zu for %zu bytes, pg_offset = 0x%x\n",
		ptr, dbuf, begin, end, pg_offset);
	return 0;

end_cpuaccess:
	dma_buf_end_cpu_access(dbuf, begin, end, DMA_BIDIRECTIONAL);
put_dmabuf:
	dma_buf_put(dbuf);
out:
	return ret;
}
static void vb2_ion_detach_dmabuf(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (buf->kva != NULL) {
        dma_buf_kunmap(buf->dma_buf, 0, buf->kva);
        dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0);
    }

    dma_buf_detach(buf->dma_buf, buf->attachment);
    kfree(buf);
}
示例#3
0
static int evdi_user_framebuffer_dirty(struct drm_framebuffer *fb,
				       __always_unused struct drm_file *file,
				       __always_unused unsigned int flags,
				       __always_unused unsigned int color,
				       struct drm_clip_rect *clips,
				       unsigned int num_clips)
{
	struct evdi_framebuffer *ufb = to_evdi_fb(fb);
	struct drm_device *dev = ufb->base.dev;
	struct evdi_device *evdi = dev->dev_private;
	int i;
	int ret = 0;

	EVDI_CHECKPT();
	drm_modeset_lock_all(fb->dev);

	if (!ufb->active)
		goto unlock;

	if (ufb->obj->base.import_attach) {
		ret =
			dma_buf_begin_cpu_access(
					ufb->obj->base.import_attach->dmabuf,
#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
					0, ufb->obj->base.size,
#endif
					DMA_FROM_DEVICE);
		if (ret)
			goto unlock;
	}

	for (i = 0; i < num_clips; i++) {
		ret = evdi_handle_damage(ufb, clips[i].x1, clips[i].y1,
					 clips[i].x2 - clips[i].x1,
					 clips[i].y2 - clips[i].y1);
		if (ret)
			goto unlock;
	}

	if (ufb->obj->base.import_attach)
		dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
				       0, ufb->obj->base.size,
#endif
				       DMA_FROM_DEVICE);
	atomic_add(1, &evdi->frame_count);
 unlock:
	drm_modeset_unlock_all(fb->dev);
	return ret;
}
示例#4
0
void udl_gem_vunmap(struct udl_gem_object *obj)
{
	if (obj->base.import_attach) {
		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
		dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
				       obj->base.size, DMA_BIDIRECTIONAL);
		return;
	}

	if (obj->vmapping)
		vunmap(obj->vmapping);

	udl_gem_put_pages(obj);
}
示例#5
0
文件: evdi_fb.c 项目: ajbogh/evdi
static int evdi_user_framebuffer_dirty(struct drm_framebuffer *fb,
                                       struct drm_file *file,
                                       unsigned flags,
                                       unsigned color,
                                       struct drm_clip_rect *clips,
                                       unsigned num_clips)
{
  struct evdi_framebuffer *ufb = to_evdi_fb(fb);
  struct drm_device *dev = ufb->base.dev;
  struct evdi_device *evdi = dev->dev_private;
  int i;
  int ret = 0;

  EVDI_CHECKPT();
  drm_modeset_lock_all(fb->dev);

  if (ufb->obj->base.import_attach) {
    ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
                                   0, ufb->obj->base.size,
                                   DMA_FROM_DEVICE);
    if (ret) {
      goto unlock;
    }
  }

  for (i = 0; i < num_clips; i++) {
    ret = evdi_handle_damage(ufb, clips[i].x1, clips[i].y1,
                             clips[i].x2 - clips[i].x1,
                             clips[i].y2 - clips[i].y1);
    if (ret) {
      goto unlock;
    }
  }

  if (ufb->obj->base.import_attach) {
    dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
                           0,
                           ufb->obj->base.size,
                           DMA_FROM_DEVICE);
  }
  atomic_add(1, &evdi->frame_count);
unlock:
  drm_modeset_unlock_all(fb->dev);
  return ret;
}
示例#6
0
/**
 * rppc_unmap_page - unmap and release a previously mapped page
 * @rpc - rppc instance handle
 * @offset: offset of the translate location within the buffer
 * @base_ptr: kernel mapped address for the page to be unmapped
 * @dmabuf: imported dma_buf to be released
 *
 * This function is called by rppc_xlate_buffers to unmap the
 * page and release the imported buffer. It essentially undoes
 * the functionality of rppc_map_page.
 */
static void rppc_unmap_page(struct rppc_instance *rpc, u32 offset,
			    uint8_t *base_ptr, struct dma_buf *dmabuf)
{
	uint32_t pg_offset;
	unsigned long pg_num;
	size_t begin, end = PAGE_SIZE;
	struct device *dev = rpc->dev;

	if (!base_ptr || !dmabuf)
		return;

	pg_offset = (offset & (PAGE_SIZE - 1));
	begin = offset & PAGE_MASK;
	pg_num = offset >> PAGE_SHIFT;

	dev_dbg(dev, "Unkmaping base_ptr = %p of buf = %p from %zu to %zu bytes\n",
		base_ptr, dmabuf, begin, end);
	dma_buf_kunmap(dmabuf, pg_num, base_ptr);
	dma_buf_end_cpu_access(dmabuf, begin, end, DMA_BIDIRECTIONAL);
	dma_buf_put(dmabuf);
}
示例#7
0
static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
				      struct drm_file *file,
				      unsigned flags, unsigned color,
				      struct drm_clip_rect *clips,
				      unsigned num_clips)
{
	struct udl_framebuffer *ufb = to_udl_fb(fb);
	int i;
	int ret = 0;

	drm_modeset_lock_all(fb->dev);

	if (!ufb->active_16)
		goto unlock;

	if (ufb->obj->base.import_attach) {
		ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
					       DMA_FROM_DEVICE);
		if (ret)
			goto unlock;
	}

	for (i = 0; i < num_clips; i++) {
		ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
				  clips[i].x2 - clips[i].x1,
				  clips[i].y2 - clips[i].y1);
		if (ret)
			break;
	}

	if (ufb->obj->base.import_attach) {
		ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
					     DMA_FROM_DEVICE);
	}

 unlock:
	drm_modeset_unlock_all(fb->dev);

	return ret;
}
static void *vb2_ion_vaddr(void *buf_priv)
{
    struct vb2_ion_buf *buf = buf_priv;

    if (WARN_ON(!buf))
        return NULL;

    if (buf->kva != NULL)
        return buf->kva;

    if (dma_buf_begin_cpu_access(buf->dma_buf,
                0, buf->size, buf->direction))
        return NULL;

    buf->kva = dma_buf_kmap(buf->dma_buf, 0);

    if (buf->kva == NULL)
        dma_buf_end_cpu_access(buf->dma_buf,
                0, buf->size, buf->direction);

    return buf->kva;
}