Пример #1
0
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct device *dev = drm->dev->dev;
	struct nvkm_vma *vma;
	int ret;

	if (!cli->vm)
		return;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
	if (ret)
		return;

	vma = nouveau_bo_vma_find(nvbo, cli->vm);
	if (vma) {
		if (--vma->refcount == 0) {
			ret = pm_runtime_get_sync(dev);
			if (!WARN_ON(ret < 0 && ret != -EACCES)) {
				nouveau_gem_object_unmap(nvbo, vma);
				pm_runtime_mark_last_busy(dev);
				pm_runtime_put_autosuspend(dev);
			}
		}
	}
	ttm_bo_unreserve(&nvbo->bo);
}
Пример #2
0
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (--nvbo->pin_refcnt)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        return ret;

    nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free += bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free += bo->mem.size;
            break;
        default:
            break;
        }
    }

    ttm_bo_unreserve(bo);
    return ret;
}
Пример #3
0
/**
 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @placement:  The placement to pin it.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				struct ttm_placement *placement,
				bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	int ret;
	uint32_t new_flags;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err;

	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
		ret = ttm_bo_validate(bo, placement, &ctx);

	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);

err:
	ttm_write_unlock(&dev_priv->reservation_sem);
	return ret;
}
Пример #4
0
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct device *dev = drm->dev->dev;
	struct nouveau_vma *vma;
	int ret;

	if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50)
		return 0;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
	if (ret)
		return ret;

	ret = pm_runtime_get_sync(dev);
	if (ret < 0 && ret != -EACCES)
		goto out;

	ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);
out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}
Пример #5
0
/**
 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 *
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err;

	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
			      false);
	if (likely(ret == 0) || ret == -ERESTARTSYS)
		goto out_unreserve;

	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);

out_unreserve:
	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);
err:
	ttm_write_unlock(&dev_priv->reservation_sem);
	return ret;
}
int ttm_pl_reference_ioctl(struct ttm_object_file *tfile, void *data)
{
	union ttm_pl_reference_arg *arg = data;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_bo_user_object *user_bo;
	struct ttm_buffer_object *bo;
	struct ttm_base_object *base;
	int ret;

	user_bo = ttm_bo_user_lookup(tfile, arg->req.handle);
	if (unlikely(user_bo == NULL)) {
		printk(KERN_ERR "Could not reference buffer object.\n");
		return -EINVAL;
	}

	bo = &user_bo->bo;
	ret = ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
	if (unlikely(ret != 0)) {
		printk(KERN_ERR
		       "Could not add a reference to buffer object.\n");
		goto out;
	}

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);

out:
	base = &user_bo->base;
	ttm_base_object_unref(&base);
	return ret;
}
Пример #7
0
/**
 * Pin or unpin a buffer in vram.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to pin or unpin.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Takes the current masters ttm lock in read.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool pin, bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement *overlay_placement = &vmw_vram_placement;
	int ret;

	ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	if (pin)
		overlay_placement = &vmw_vram_ne_placement;

	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);

	ttm_bo_unreserve(bo);

err:
	ttm_read_unlock(&dev_priv->active_master->lock);

	return ret;
}
Пример #8
0
/**
 * vmw_dmabuf_to_placement - Validate a buffer to placement.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
			    struct vmw_dma_buffer *buf,
			    struct ttm_placement *placement,
			    bool interruptible)
{
	struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

	ret = ttm_write_lock(&vmaster->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	ret = ttm_bo_validate(bo, placement, interruptible, false);

	ttm_bo_unreserve(bo);

err:
	ttm_write_unlock(&vmaster->lock);
	return ret;
}
Пример #9
0
/**
 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
 *
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to pin.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
				    struct vmw_dma_buffer *buf,
				    bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	struct ttm_place place;
	int ret = 0;
	uint32_t new_flags;

	place = vmw_vram_placement.placement[0];
	place.lpfn = bo->num_pages;
	placement.num_placement = 1;
	placement.placement = &place;
	placement.num_busy_placement = 1;
	placement.busy_placement = &place;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err_unlock;

	/*
	 * Is this buffer already in vram but not at the start of it?
	 * In that case, evict it first because TTM isn't good at handling
	 * that situation.
	 */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0 &&
	    buf->pin_count == 0) {
		ctx.interruptible = false;
		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
	}

	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(&placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
		ret = ttm_bo_validate(bo, &placement, &ctx);

	/* For some reason we didn't end up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);
	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);
err_unlock:
	ttm_write_unlock(&dev_priv->reservation_sem);

	return ret;
}
Пример #10
0
/**
 * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
			      struct vmw_dma_buffer *buf,
			      bool pin, bool interruptible)
{
	struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement *placement;
	int ret;

	ret = ttm_write_lock(&vmaster->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	/**
	 * Put BO in VRAM if there is space, otherwise as a GMR.
	 * If there is no space in VRAM and GMR ids are all used up,
	 * start evicting GMRs to make room. If the DMA buffer can't be
	 * used as a GMR, this will return -ENOMEM.
	 */

	if (pin)
		placement = &vmw_vram_gmr_ne_placement;
	else
		placement = &vmw_vram_gmr_placement;

	ret = ttm_bo_validate(bo, placement, interruptible, false);
	if (likely(ret == 0) || ret == -ERESTARTSYS)
		goto err_unreserve;


	/**
	 * If that failed, try VRAM again, this time evicting
	 * previous contents.
	 */

	if (pin)
		placement = &vmw_vram_ne_placement;
	else
		placement = &vmw_vram_placement;

	ret = ttm_bo_validate(bo, placement, interruptible, false);

err_unreserve:
	ttm_bo_unreserve(bo);
err:
	ttm_write_unlock(&vmaster->lock);
	return ret;
}
Пример #11
0
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
	if (unlikely(r != 0))
		return r;
	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}
Пример #12
0
static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
				    struct drm_framebuffer *old_fb)
{
	struct bochs_device *bochs =
		container_of(crtc, struct bochs_device, crtc);
	struct bochs_framebuffer *bochs_fb;
	struct bochs_bo *bo;
	u64 gpu_addr = 0;
	int ret;

	if (old_fb) {
		bochs_fb = to_bochs_framebuffer(old_fb);
		bo = gem_to_bochs_bo(bochs_fb->obj);
		ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
		if (ret) {
			DRM_ERROR("failed to reserve old_fb bo\n");
		} else {
			bochs_bo_unpin(bo);
			ttm_bo_unreserve(&bo->bo);
		}
	}

	if (WARN_ON(crtc->primary->fb == NULL))
		return -EINVAL;

	bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
	bo = gem_to_bochs_bo(bochs_fb->obj);
	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
	if (ret)
		return ret;

	ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
	if (ret) {
		ttm_bo_unreserve(&bo->bo);
		return ret;
	}

	ttm_bo_unreserve(&bo->bo);
	bochs_hw_setbase(bochs, x, y, gpu_addr);
	return 0;
}
Пример #13
0
int
nouveau_bo_map(struct nouveau_bo *nvbo)
{
    int ret;

    ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
    if (ret)
        return ret;

    ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
    ttm_bo_unreserve(&nvbo->bo);
    return ret;
}
Пример #14
0
int psb_validate_kernel_buffer(struct psb_context *context,
			       struct ttm_buffer_object *bo,
			       uint32_t fence_class,
			       uint64_t set_flags, uint64_t clr_flags)
{
	struct psb_validate_buffer *item;
	uint32_t cur_fence_type;
	int ret;

	if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) {
		DRM_ERROR("Out of free validation buffer entries for "
			  "kernel buffer validation.\n");
		return -ENOMEM;
	}

	item = &context->buffers[context->used_buffers];
	item->user_val_arg = NULL;
	item->base.reserved = 0;

	ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq);
	if (unlikely(ret != 0))
	        return ret;

	ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class,
				       &cur_fence_type);
	if (unlikely(ret != 0)) {
		ttm_bo_unreserve(bo);
		return ret;
	}

	item->base.bo = ttm_bo_reference(bo);
	item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type;
	item->base.reserved = 1;

	/* Internal locking ??? FIXMEAC */
	list_add_tail(&item->base.head, &context->kern_validate_list);
	context->used_buffers++;
	/*
	ret = ttm_bo_validate(bo, 1, 0, 0);
	if (unlikely(ret != 0))
		goto out_unlock;
	*/
	item->offset = bo->offset;
	item->flags = bo->mem.placement;
	context->fence_types |= cur_fence_type;

	return ret;
}
Пример #15
0
/**
 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				bool pin, bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	struct ttm_place place;
	int ret = 0;

	if (pin)
		place = vmw_vram_ne_placement.placement[0];
	else
		place = vmw_vram_placement.placement[0];
	place.lpfn = bo->num_pages;

	placement.num_placement = 1;
	placement.placement = &place;
	placement.num_busy_placement = 1;
	placement.busy_placement = &place;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
	if (unlikely(ret != 0))
		goto err_unlock;

	/* Is this buffer already in vram but not at the start of it? */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0)
		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);

	ret = ttm_bo_validate(bo, &placement, interruptible, false);

	/* For some reason we didn't up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);

	ttm_bo_unreserve(bo);
err_unlock:
	ttm_write_unlock(&dev_priv->reservation_sem);

	return ret;
}
Пример #16
0
/**
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 *
 * @dev_priv: A device private structure.
 *
 * This function creates a small buffer object that holds the query
 * result for dummy queries emitted as query barriers.
 * The function will then map the first page and initialize a pending
 * occlusion query result structure, Finally it will unmap the buffer.
 * No interruptible waits are done within this function.
 *
 * Returns an error if bo creation or initialization fails.
 */
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
	int ret;
	struct vmw_dma_buffer *vbo;
	struct ttm_bo_kmap_obj map;
	volatile SVGA3dQueryResult *result;
	bool dummy;

	/*
	 * Create the vbo as pinned, so that a tryreserve will
	 * immediately succeed. This is because we're the only
	 * user of the bo currently.
	 */
	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
	if (!vbo)
		return -ENOMEM;

	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
			      &vmw_sys_ne_placement, false,
			      &vmw_dmabuf_bo_free);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
	BUG_ON(ret != 0);
	vmw_bo_pin_reserved(vbo, true);

	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
	if (likely(ret == 0)) {
		result = ttm_kmap_obj_virtual(&map, &dummy);
		result->totalSize = sizeof(*result);
		result->state = SVGA3D_QUERYSTATE_PENDING;
		result->result32 = 0xff;
		ttm_bo_kunmap(&map);
	}
	vmw_bo_pin_reserved(vbo, false);
	ttm_bo_unreserve(&vbo->base);

	if (unlikely(ret != 0)) {
		DRM_ERROR("Dummy query buffer map failed.\n");
		vmw_dmabuf_unreference(&vbo);
	} else
		dev_priv->dummy_query_bo = vbo;

	return ret;
}
Пример #17
0
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
	int ret = 0;

	/*
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
	 */

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
	if (unlikely(ret != 0))
		return ret;
	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, true, no_wait);
	spin_unlock(&bo->lock);
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
Пример #18
0
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct nvkm_vma *vma;
	struct device *dev = drm->dev->dev;
	int ret;

	if (!cli->vm)
		return 0;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
	if (ret)
		return ret;

	vma = nouveau_bo_vma_find(nvbo, cli->vm);
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

		ret = pm_runtime_get_sync(dev);
		if (ret < 0 && ret != -EACCES)
			goto out;

		ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
		if (ret)
			kfree(vma);

		pm_runtime_mark_last_busy(dev);
		pm_runtime_put_autosuspend(dev);
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}
Пример #19
0
/**
 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				bool pin, bool interruptible)
{
//   struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	int ret = 0;

	if (pin)
		placement = vmw_vram_ne_placement;
	else
		placement = vmw_vram_placement;
	placement.lpfn = bo->num_pages;

//   ret = ttm_write_lock(&vmaster->lock, interruptible);
//   if (unlikely(ret != 0))
//       return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err_unlock;

	/* Is this buffer already in vram but not at the start of it? */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0)
		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);

	ret = ttm_bo_validate(bo, &placement, interruptible, false);

	/* For some reason we didn't up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);

	ttm_bo_unreserve(bo);
err_unlock:
//   ttm_write_unlock(&vmaster->lock);

	return ret;
}
Пример #20
0
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
    struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
    struct ttm_buffer_object *bo = &nvbo->bo;
    int ret;

    if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
        NV_ERROR(nouveau_bdev(bo->bdev)->dev,
                 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
                 1 << bo->mem.mem_type, memtype);
        return -EINVAL;
    }

    if (nvbo->pin_refcnt++)
        return 0;

    ret = ttm_bo_reserve(bo, false, false, false, 0);
    if (ret)
        goto out;

    nouveau_bo_placement_set(nvbo, memtype, 0);

    ret = nouveau_bo_validate(nvbo, false, false, false);
    if (ret == 0) {
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
            dev_priv->fb_aper_free -= bo->mem.size;
            break;
        case TTM_PL_TT:
            dev_priv->gart_info.aper_free -= bo->mem.size;
            break;
        default:
            break;
        }
    }
    ttm_bo_unreserve(bo);
out:
    if (unlikely(ret))
        nvbo->pin_refcnt--;
    return ret;
}
Пример #21
0
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;

	if (!cli->base.vm)
		return;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return;

	vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
	if (vma) {
		if (--vma->refcount == 0)
			nouveau_gem_object_unmap(nvbo, vma);
	}
	ttm_bo_unreserve(&nvbo->bo);
}
Пример #22
0
/**
 * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
 *
 * This function takes the reservation_sem in write mode.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to unpin.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
		     struct vmw_dma_buffer *buf,
		     bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err;

	vmw_bo_pin_reserved(buf, false);

	ttm_bo_unreserve(bo);

err:
	ttm_read_unlock(&dev_priv->reservation_sem);
	return ret;
}
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;

	if (!fpriv->vm)
		return;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return;

	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
	if (vma) {
		if (--vma->refcount == 0) {
			nouveau_bo_vma_del(nvbo, vma);
			kfree(vma);
		}
	}
	ttm_bo_unreserve(&nvbo->bo);
}
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_vma *vma;
	int ret;

	if (!fpriv->vm)
		return 0;

	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
	if (ret)
		return ret;

	vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
	if (!vma) {
		vma = kzalloc(sizeof(*vma), GFP_KERNEL);
		if (!vma) {
			ret = -ENOMEM;
			goto out;
		}

		ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
		if (ret) {
			kfree(vma);
			goto out;
		}
	} else {
		vma->refcount++;
	}

out:
	ttm_bo_unreserve(&nvbo->bo);
	return ret;
}
int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_setstatus_arg *arg = data;
	struct ttm_pl_setstatus_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_bo_device *bdev;
	struct ttm_placement placement = default_placement;
	uint32_t flags[2];
	int ret;

	bo = ttm_buffer_object_lookup(tfile, req->handle);
	if (unlikely(bo == NULL)) {
		printk(KERN_ERR
		       "Could not find buffer object for setstatus.\n");
		return -EINVAL;
	}

	bdev = bo->bdev;

	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0))
		goto out_err0;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err1;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_wait_cpu(bo, false);
	if (unlikely(ret != 0))
		goto out_err2;
#endif

	flags[0] = req->set_placement;
	flags[1] = req->clr_placement;

	placement.num_placement = 2;
	placement.placement = flags;

	/* spin_lock(&bo->lock); */ /* Already get reserve lock */

	ret = psb_ttm_bo_check_placement(bo, &placement);
	if (unlikely(ret != 0))
		goto out_err2;

	placement.num_placement = 1;
	flags[0] = (req->set_placement | bo->mem.placement) & ~req->clr_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_validate(bo, &placement, true, false, false);
#else
	ret = ttm_bo_validate(bo, &placement, true, false);
#endif
	if (unlikely(ret != 0))
		goto out_err2;

	ttm_pl_fill_rep(bo, rep);
out_err2:
	/* spin_unlock(&bo->lock); */
	ttm_bo_unreserve(bo);
out_err1:
	ttm_read_unlock(lock);
out_err0:
	ttm_bo_unref(&bo);
	return ret;
}
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
			   struct ttm_bo_device *bdev,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_ub_arg *arg = data;
	struct ttm_pl_create_ub_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	if (req->user_address & ~PAGE_MASK) {
		printk(KERN_ERR "User pointer buffer need page alignment\n");
		return -EFAULT;
	}

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}
	bo = &user_bo->bo;

	placement.num_placement = 1;
	placement.placement = &flags;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))

/*  For kernel 3.0, use the desired type. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user

#else
/*  TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting,
    as ttm_bo_type_user is no longer implemented.
    This will not result in working code.
    FIXME - to be removed. */

#warning warning: ttm_bo_type_user no longer supported

/*  For kernel 3.3+, use the wrong type, which will compile but not work. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel

#endif

#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
		/* Handle frame buffer allocated in user space, Convert
		  user space virtual address into pages list */
		unsigned int page_nr = 0;
		struct vm_area_struct *vma = NULL;
		struct sg_table *sg = NULL;
		unsigned long num_pages = 0;
		struct page **pages = 0;

		num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
		if (unlikely(pages == NULL)) {
			printk(KERN_ERR "kzalloc pages failed\n");
			return -ENOMEM;
		}

		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, req->user_address);
		if (unlikely(vma == NULL)) {
			up_read(&current->mm->mmap_sem);
			kfree(pages);
			printk(KERN_ERR "find_vma failed\n");
			return -EFAULT;
		}
		unsigned long before_flags = vma->vm_flags;
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP));
		page_nr = get_user_pages(current, current->mm,
					 req->user_address,
					 (int)(num_pages), 1, 0, pages,
					 NULL);
		vma->vm_flags = before_flags;
		up_read(&current->mm->mmap_sem);

		/* can be written by caller, not forced */
		if (unlikely(page_nr < num_pages)) {
			kfree(pages);
			pages = 0;
			printk(KERN_ERR "get_user_pages err.\n");
			return -ENOMEM;
		}
		sg = drm_prime_pages_to_sg(pages, num_pages);
		if (unlikely(sg == NULL)) {
			kfree(pages);
			printk(KERN_ERR "drm_prime_pages_to_sg err.\n");
			return -ENOMEM;
		}
		kfree(pages);
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  TTM_HACK_WORKAROUND_ttm_bo_type_user,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  NULL,
			  &ttm_bo_user_destroy);
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#endif

	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */
	ttm_read_unlock(lock);
	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
			struct ttm_bo_device *bdev,
			struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_arg *arg = data;
	struct ttm_pl_create_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}

	bo = &user_bo->bo;
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}

	placement.num_placement = 1;
	placement.placement = &flags;

	if ((flags & TTM_PL_MASK_CACHING) == 0)
		flags |=  TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, 0, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#endif
	ttm_read_unlock(lock);
	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */

	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
			   uint32_t handle, uint32_t width, uint32_t height)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
	int ret;

	if (handle) {
		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
						     handle, &surface);
		if (!ret) {
			if (!surface->snooper.image) {
				DRM_ERROR("surface not suitable for cursor\n");
				return -EINVAL;
			}
		} else {
			ret = vmw_user_dmabuf_lookup(tfile,
						     handle, &dmabuf);
			if (ret) {
				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
				return -EINVAL;
			}
		}
	}

	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
					64, 64, du->hotspot_x, du->hotspot_y);
	} else if (dmabuf) {
		struct ttm_bo_kmap_obj map;
		unsigned long kmap_offset;
		unsigned long kmap_num;
		void *virtual;
		bool dummy;

		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

		kmap_offset = 0;
		kmap_num = (64*64*4) >> PAGE_SHIFT;

		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
		if (unlikely(ret != 0)) {
			DRM_ERROR("reserve failed\n");
			return -EINVAL;
		}

		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
		if (unlikely(ret != 0))
			goto err_unreserve;

		virtual = ttm_kmap_obj_virtual(&map, &dummy);
		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
					du->hotspot_x, du->hotspot_y);

		ttm_bo_kunmap(&map);
err_unreserve:
		ttm_bo_unreserve(&dmabuf->base);

	} else {
Пример #29
0
static int bochsfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct bochs_device *bochs =
		container_of(helper, struct bochs_device, fb.helper);
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_gem_object *gobj = NULL;
	struct bochs_bo *bo = NULL;
	int size, ret;

	if (sizes->surface_bpp != 32)
		return -EINVAL;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
	size = mode_cmd.pitches[0] * mode_cmd.height;

	/* alloc, pin & map bo */
	ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
	if (ret) {
		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
		return ret;
	}

	bo = gem_to_bochs_bo(gobj);

	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
	if (ret)
		return ret;

	ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fbcon\n");
		ttm_bo_unreserve(&bo->bo);
		return ret;
	}

	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
			  &bo->kmap);
	if (ret) {
		DRM_ERROR("failed to kmap fbcon\n");
		ttm_bo_unreserve(&bo->bo);
		return ret;
	}

	ttm_bo_unreserve(&bo->bo);

	/* init fb device */
	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info))
		return PTR_ERR(info);

	info->par = &bochs->fb.helper;

	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
	if (ret) {
		drm_fb_helper_release_fbi(helper);
		return ret;
	}

	bochs->fb.size = size;

	/* setup helper */
	fb = &bochs->fb.gfb.base;
	bochs->fb.helper.fb = fb;

	strcpy(info->fix.id, "bochsdrmfb");

	info->flags = FBINFO_DEFAULT;
	info->fbops = &bochsfb_ops;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
			       sizes->fb_height);

	info->screen_base = bo->kmap.virtual;
	info->screen_size = size;

	drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
	info->fix.smem_start = 0;
	info->fix.smem_len = size;

	return 0;
}
Пример #30
0
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
		  const char __user *wbuf, char __user *rbuf, size_t count,
		  loff_t *f_pos, bool write)
{
	struct ttm_buffer_object *bo;
	struct ttm_bo_driver *driver;
	struct ttm_bo_kmap_obj map;
	unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
	unsigned long kmap_offset;
	unsigned long kmap_end;
	unsigned long kmap_num;
	size_t io_size;
	unsigned int page_offset;
	char *virtual;
	int ret;
	bool no_wait = false;
	bool dummy;

	read_lock(&bdev->vm_lock);
	bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
	if (likely(bo != NULL))
		ttm_bo_reference(bo);
	read_unlock(&bdev->vm_lock);

	if (unlikely(bo == NULL))
		return -EFAULT;

	driver = bo->bdev->driver;
	if (unlikely(!driver->verify_access)) {
		ret = -EPERM;
		goto out_unref;
	}

	ret = driver->verify_access(bo, filp);
	if (unlikely(ret != 0))
		goto out_unref;

	kmap_offset = dev_offset - bo->vm_node->start;
	if (unlikely(kmap_offset >= bo->num_pages)) {
		ret = -EFBIG;
		goto out_unref;
	}

	page_offset = *f_pos & ~PAGE_MASK;
	io_size = bo->num_pages - kmap_offset;
	io_size = (io_size << PAGE_SHIFT) - page_offset;
	if (count < io_size)
		io_size = count;

	kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
	kmap_num = kmap_end - kmap_offset + 1;

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);

	switch (ret) {
	case 0:
		break;
	case -EBUSY:
		ret = -EAGAIN;
		goto out_unref;
	default:
		goto out_unref;
	}

	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
	if (unlikely(ret != 0)) {
		ttm_bo_unreserve(bo);
		goto out_unref;
	}

	virtual = ttm_kmap_obj_virtual(&map, &dummy);
	virtual += page_offset;

	if (write)
		ret = copy_from_user(virtual, wbuf, io_size);
	else