Exemplo n.º 1
0
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
	int ret;

	ret = qxl_bo_reserve(surf, false);
	if (ret == -EBUSY)
		return -EBUSY;

	if (surf->fence.num_active_releases > 0 && stall == false) {
		qxl_bo_unreserve(surf);
		return -EBUSY;
	}

	if (stall)
		mutex_unlock(&qdev->surf_evict_mutex);

	spin_lock(&surf->tbo.bdev->fence_lock);
	ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
	spin_unlock(&surf->tbo.bdev->fence_lock);

	if (stall)
		mutex_lock(&qdev->surf_evict_mutex);
	if (ret == -EBUSY) {
		qxl_bo_unreserve(surf);
		return -EBUSY;
	}

	qxl_surface_evict_locked(qdev, surf, true);
	qxl_bo_unreserve(surf);
	return 0;
}
Exemplo n.º 2
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
	struct fence *fence = NULL;

	fobj = reservation_object_get_list(resv);

	list_del(&vma->head);

	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, true, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
		fence = reservation_object_get_excl(nvbo->bo.resv);

	if (fence && mapped) {
		nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
	} else {
		if (mapped)
			nvkm_vm_unmap(vma);
		nvkm_vm_put(vma);
		kfree(vma);
	}
}
int ttm_pl_waitidle_ioctl(struct ttm_object_file *tfile, void *data)
{
	struct ttm_pl_waitidle_arg *arg = data;
	struct ttm_buffer_object *bo;
	int ret;

	bo = ttm_buffer_object_lookup(tfile, arg->handle);
	if (unlikely(bo == NULL)) {
		printk(KERN_ERR "Could not find buffer object for waitidle.\n");
		return -EINVAL;
	}

	ret =
		psb_ttm_bo_block_reservation(bo, true,
					     arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
	if (unlikely(ret != 0))
		goto out;
	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo,
			  arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
			  true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
	spin_unlock(&bo->bdev->fence_lock);
	psb_ttm_bo_unblock_reservation(bo);
out:
	ttm_bo_unref(&bo);
	return ret;
}
Exemplo n.º 4
0
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
			       bool interruptible,
			       bool no_wait_reserve,
			       bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
	int ret = 0;

retry:
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0))
		return ret;

	spin_lock(&glob->lru_lock);

	if (unlikely(list_empty(&bo->ddestroy))) {
		spin_unlock(&glob->lru_lock);
		return 0;
	}

	ret = ttm_bo_reserve_locked(bo, interruptible,
				    no_wait_reserve, false, 0);

	if (unlikely(ret != 0)) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}

	/**
	 * We can re-check for sync object without taking
	 * the bo::lock since setting the sync object requires
	 * also bo::reserved. A busy object at this point may
	 * be caused by another thread recently starting an accelerated
	 * eviction.
	 */

	if (unlikely(bo->sync_obj)) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
		spin_unlock(&glob->lru_lock);
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

	ttm_bo_list_ref_sub(bo, put_count, true);

	return 0;
}
Exemplo n.º 5
0
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
			bool no_wait_reserve, bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
	struct ttm_placement placement;
	int ret = 0;

	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0)) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to expire sync object before "
			       "buffer eviction.\n");
		}
		goto out;
	}

	BUG_ON(!atomic_read(&bo->reserved));

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;

	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
				no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to find memory space for "
			       "buffer 0x%p eviction.\n", bo);
			ttm_bo_mem_space_debug(bo, &placement);
		}
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
				     no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS)
			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
		ttm_bo_mem_put(bo, &evict_mem);
		goto out;
	}
	bo->evicted = true;
out:
	return ret;
}
Exemplo n.º 6
0
static int
psb_placement_fence_type(struct ttm_buffer_object *bo,
			 uint64_t set_val_flags,
			 uint64_t clr_val_flags,
			 uint32_t new_fence_class,
			 uint32_t *new_fence_type)
{
	int ret;
	uint32_t n_fence_type;
	/*
	uint32_t set_flags = set_val_flags & 0xFFFFFFFF;
	uint32_t clr_flags = clr_val_flags & 0xFFFFFFFF;
	*/
	struct ttm_fence_object *old_fence;
	uint32_t old_fence_type;
	struct ttm_placement placement;

	if (unlikely
	    (!(set_val_flags &
	       (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)))) {
		DRM_ERROR
		    ("GPU access type (read / write) is not indicated.\n");
		return -EINVAL;
	}

	/* User space driver doesn't set any TTM placement flags in
					set_val_flags or clr_val_flags */
	placement.num_placement = 0;/* FIXME  */
	placement.num_busy_placement = 0;
	placement.fpfn = 0;
	placement.lpfn = 0;
	ret = psb_ttm_bo_check_placement(bo, &placement);
	if (unlikely(ret != 0))
		return ret;

	switch (new_fence_class) {
	default:
		n_fence_type = _PSB_FENCE_TYPE_EXE;
	}

	*new_fence_type = n_fence_type;
	old_fence = (struct ttm_fence_object *) bo->sync_obj;
	old_fence_type = (uint32_t) (unsigned long) bo->sync_obj_arg;

	if (old_fence && ((new_fence_class != old_fence->fence_class) ||
			  ((n_fence_type ^ old_fence_type) &
			   old_fence_type))) {
		ret = ttm_bo_wait(bo, 0, 1, 0);
		if (unlikely(ret != 0))
			return ret;
	}
	/*
	bo->proposed_flags = (bo->proposed_flags | set_flags)
		& ~clr_flags & TTM_PL_MASK_MEMTYPE;
	*/
	return 0;
}
Exemplo n.º 7
0
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver;
	void *sync_obj = NULL;
	void *sync_obj_arg;
	int put_count;
	int ret;

	spin_lock(&bdev->fence_lock);
	(void) ttm_bo_wait(bo, false, false, true);
	if (!bo->sync_obj) {

		spin_lock(&glob->lru_lock);

		/**
		 * Lock inversion between bo:reserve and bdev::fence_lock here,
		 * but that's OK, since we're only trylocking.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);

		if (unlikely(ret == -EBUSY))
			goto queue;

		spin_unlock(&bdev->fence_lock);
		put_count = ttm_bo_del_from_lru(bo);

		spin_unlock(&glob->lru_lock);
		ttm_bo_cleanup_memtype_use(bo);

		ttm_bo_list_ref_sub(bo, put_count, true);

		return;
	} else {
		spin_lock(&glob->lru_lock);
	}
queue:
	driver = bdev->driver;
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
	sync_obj_arg = bo->sync_obj_arg;

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);
	spin_unlock(&bdev->fence_lock);

	if (sync_obj) {
		driver->sync_obj_flush(sync_obj, sync_obj_arg);
		driver->sync_obj_unref(&sync_obj);
	}
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
Exemplo n.º 8
0
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
	if (unlikely(r != 0))
		return r;
	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}
Exemplo n.º 9
0
static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
			      struct ttm_operation_ctx *ctx,
			      struct ttm_mem_reg *new_mem)
{
	int ret;

	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
	if (ret)
		return ret;

	virtio_gpu_move_null(bo, new_mem);
	return 0;
}
Exemplo n.º 10
0
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
	int ret = 0;

	/*
	 * Using ttm_bo_reserve makes sure the lru lists are updated.
	 */

	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
	if (unlikely(ret != 0))
		return ret;
	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, true, no_wait);
	spin_unlock(&bo->lock);
	if (likely(ret == 0))
		atomic_inc(&bo->cpu_writers);
	ttm_bo_unreserve(bo);
	return ret;
}
Exemplo n.º 11
0
static int qxl_bo_move(struct ttm_buffer_object *bo,
		       bool evict, bool interruptible,
		       bool no_wait_gpu,
		       struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
	if (ret)
		return ret;


	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		qxl_move_null(bo, new_mem);
		return 0;
	}
	return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu,
				  new_mem);
}
Exemplo n.º 12
0
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
			bool interruptible, bool no_wait_reserve,
			bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret = 0;
	struct ttm_mem_reg mem;

	BUG_ON(!atomic_read(&bo->reserved));

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bo->lock);
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
	mem.bus.io_reserved = false;
	/*
	 * Determine where to move the buffer.
	 */
	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
	if (ret)
		goto out_unlock;
	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
	if (ret && mem.mm_node) {
		spin_lock(&glob->lru_lock);
		drm_mm_put_block(mem.mm_node);
		spin_unlock(&glob->lru_lock);
	}
	return ret;
}
Exemplo n.º 13
0
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
		   struct ttm_operation_ctx *ctx,
		    struct ttm_mem_reg *new_mem)
{
	struct ttm_tt *ttm = bo->ttm;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int ret;

	if (old_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);

		if (unlikely(ret != 0)) {
			if (ret != -ERESTARTSYS)
				pr_err("Failed to expire sync object before unbinding TTM\n");
			return ret;
		}

		ttm_tt_unbind(ttm);
		ttm_bo_free_old_node(bo);
		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
				TTM_PL_MASK_MEM);
		old_mem->mem_type = TTM_PL_SYSTEM;
	}

	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
	if (unlikely(ret != 0))
		return ret;

	if (new_mem->mem_type != TTM_PL_SYSTEM) {
		ret = ttm_tt_bind(ttm, new_mem, ctx);
		if (unlikely(ret != 0))
			return ret;
	}

	*old_mem = *new_mem;
	new_mem->mm_node = NULL;

	return 0;
}
Exemplo n.º 14
0
static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
	const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
	struct reservation_object *resv = nvbo->bo.resv;
	struct reservation_object_list *fobj;
	struct nouveau_gem_object_unmap *work;
	struct dma_fence *fence = NULL;

	fobj = reservation_object_get_list(resv);

	list_del_init(&vma->head);

	if (fobj && fobj->shared_count > 1)
		ttm_bo_wait(&nvbo->bo, false, false);
	else if (fobj && fobj->shared_count == 1)
		fence = rcu_dereference_protected(fobj->shared[0],
						reservation_object_held(resv));
	else
		fence = reservation_object_get_excl(nvbo->bo.resv);

	if (!fence || !mapped) {
		nouveau_gem_object_delete(vma);
		return;
	}

	if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
		WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
		nouveau_gem_object_delete(vma);
		return;
	}

	work->work.func = nouveau_gem_object_delete_work;
	work->vma = vma;
	nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
Exemplo n.º 15
0
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
	int ret;

	ret = qxl_bo_reserve(surf, false);
	if (ret)
		return ret;

	if (stall)
		mutex_unlock(&qdev->surf_evict_mutex);

	ret = ttm_bo_wait(&surf->tbo, true, !stall);

	if (stall)
		mutex_lock(&qdev->surf_evict_mutex);
	if (ret) {
		qxl_bo_unreserve(surf);
		return ret;
	}

	qxl_surface_evict_locked(qdev, surf, true);
	qxl_bo_unreserve(surf);
	return 0;
}
Exemplo n.º 16
0
/**
 * vmw_swap_notify - TTM move_notify_callback
 *
 * @bo: The TTM buffer object about to be swapped out.
 */
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
	vmw_bo_swap_notify(bo);
	(void) ttm_bo_wait(bo, false, false);
}
Exemplo n.º 17
0
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

	spin_lock(&glob->lru_lock);
	while (ret == -EBUSY) {
		if (unlikely(list_empty(&glob->swap_lru))) {
			spin_unlock(&glob->lru_lock);
			return -EBUSY;
		}

		bo = list_first_entry(&glob->swap_lru,
				      struct ttm_buffer_object, swap);
		kref_get(&bo->list_kref);

		if (!list_empty(&bo->ddestroy)) {
			spin_unlock(&glob->lru_lock);
			(void) ttm_bo_cleanup_refs(bo, false, false, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			continue;
		}

		/**
		 * Reserve buffer. Since we unlock while sleeping, we need
		 * to re-check that nobody removed us from the swap-list while
		 * we slept.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			ttm_bo_wait_unreserved(bo, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
		}
	}

	BUG_ON(ret != 0);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	/**
	 * Wait for GPU, then move to system cached.
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
					     false, false, false);
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Exemplo n.º 18
0
static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
{
	static const struct ttm_place placements = {
		.fpfn  = 0,
		.lpfn  = 0,
		.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
	};

	placement->placement = &placements;
	placement->busy_placement = &placements;
	placement->num_placement = 1;
	placement->num_busy_placement = 1;
}

static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
				    struct file *filp)
{
	return 0;
}

static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
					 struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
	case TTM_PL_TT:
		/* system memory */
		return 0;
	default:
		return -EINVAL;
	}
	return 0;
}

static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
				       struct ttm_mem_reg *mem)
{
}

/*
 * TTM backend functions.
 */
struct virtio_gpu_ttm_tt {
	struct ttm_dma_tt		ttm;
	struct virtio_gpu_device	*vgdev;
	u64				offset;
};

static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
				       struct ttm_mem_reg *bo_mem)
{
	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;

	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
	if (!ttm->num_pages)
		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
		     ttm->num_pages, bo_mem, ttm);

	/* Not implemented */
	return 0;
}

static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
	/* Not implemented */
	return 0;
}

static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
{
	struct virtio_gpu_ttm_tt *gtt = (void *)ttm;

	ttm_dma_tt_fini(&gtt->ttm);
	kfree(gtt);
}

static struct ttm_backend_func virtio_gpu_backend_func = {
	.bind = &virtio_gpu_ttm_backend_bind,
	.unbind = &virtio_gpu_ttm_backend_unbind,
	.destroy = &virtio_gpu_ttm_backend_destroy,
};

static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
					       uint32_t page_flags)
{
	struct virtio_gpu_device *vgdev;
	struct virtio_gpu_ttm_tt *gtt;

	vgdev = virtio_gpu_get_vgdev(bo->bdev);
	gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
	if (gtt == NULL)
		return NULL;
	gtt->ttm.ttm.func = &virtio_gpu_backend_func;
	gtt->vgdev = vgdev;
	if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
		kfree(gtt);
		return NULL;
	}
	return &gtt->ttm.ttm;
}

static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
				 struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;

	BUG_ON(old_mem->mm_node != NULL);
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
}

static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
			      struct ttm_operation_ctx *ctx,
			      struct ttm_mem_reg *new_mem)
{
	int ret;

	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
	if (ret)
		return ret;

	virtio_gpu_move_null(bo, new_mem);
	return 0;
}
Exemplo n.º 19
0
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver = bdev->driver;
	int ret;

	spin_lock(&bo->lock);
	(void) ttm_bo_wait(bo, false, false, !remove_all);

	if (!bo->sync_obj) {
		int put_count;

		spin_unlock(&bo->lock);

		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);

		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
		BUG_ON(ret);
		if (bo->ttm)
			ttm_tt_unbind(bo->ttm);

		if (!list_empty(&bo->ddestroy)) {
			list_del_init(&bo->ddestroy);
			++put_count;
		}
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			bo->mem.mm_node = NULL;
		}
		spin_unlock(&glob->lru_lock);

		atomic_set(&bo->reserved, 0);

		while (put_count--)
			kref_put(&bo->list_kref, ttm_bo_ref_bug);

		return 0;
	}

	spin_lock(&glob->lru_lock);
	if (list_empty(&bo->ddestroy)) {
		void *sync_obj = bo->sync_obj;
		void *sync_obj_arg = bo->sync_obj_arg;

		kref_get(&bo->list_kref);
		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);

		if (sync_obj)
			driver->sync_obj_flush(sync_obj, sync_obj_arg);
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
		ret = 0;

	} else {
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);
		ret = -EBUSY;
	}

	return ret;
}
Exemplo n.º 20
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{

	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			kern_yield(0);
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTART:
		case -EINTR:
			kern_yield(0);
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	mtx_lock(&bdev->fence_lock);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		mtx_unlock(&bdev->fence_lock);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		mtx_unlock(&bdev->fence_lock);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
		    offset);
		KASSERT((m->flags & PG_FICTITIOUS) != 0,
		    ("physical address %#jx not fictitious",
		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
		    + offset)));
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if (vm_page_busied(m)) {
		vm_page_lock(m);
		VM_OBJECT_WUNLOCK(vm_obj);
		vm_page_busy_sleep(m, "ttmpbs");
		VM_OBJECT_WLOCK(vm_obj);
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
			VM_OBJECT_WUNLOCK(vm_obj);
			VM_WAIT;
			VM_OBJECT_WLOCK(vm_obj);
			ttm_mem_io_unlock(man);
			ttm_bo_unreserve(bo);
			goto retry;
		}
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_xbusy(m);

	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}