static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
			       bool interruptible,
			       bool no_wait_reserve,
			       bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
	int ret = 0;

retry:
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0))
		return ret;

	spin_lock(&glob->lru_lock);

	if (unlikely(list_empty(&bo->ddestroy))) {
		spin_unlock(&glob->lru_lock);
		return 0;
	}

	ret = ttm_bo_reserve_locked(bo, interruptible,
				    no_wait_reserve, false, 0);

	if (unlikely(ret != 0)) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}

	/**
	 * We can re-check for sync object without taking
	 * the bo::lock since setting the sync object requires
	 * also bo::reserved. A busy object at this point may
	 * be caused by another thread recently starting an accelerated
	 * eviction.
	 */

	if (unlikely(bo->sync_obj)) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
		spin_unlock(&glob->lru_lock);
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

	ttm_bo_list_ref_sub(bo, put_count, true);

	return 0;
}
示例#2
0
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver;
	void *sync_obj = NULL;
	void *sync_obj_arg;
	int put_count;
	int ret;

	spin_lock(&bdev->fence_lock);
	(void) ttm_bo_wait(bo, false, false, true);
	if (!bo->sync_obj) {

		spin_lock(&glob->lru_lock);

		/**
		 * Lock inversion between bo:reserve and bdev::fence_lock here,
		 * but that's OK, since we're only trylocking.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);

		if (unlikely(ret == -EBUSY))
			goto queue;

		spin_unlock(&bdev->fence_lock);
		put_count = ttm_bo_del_from_lru(bo);

		spin_unlock(&glob->lru_lock);
		ttm_bo_cleanup_memtype_use(bo);

		ttm_bo_list_ref_sub(bo, put_count, true);

		return;
	} else {
		spin_lock(&glob->lru_lock);
	}
queue:
	driver = bdev->driver;
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
	sync_obj_arg = bo->sync_obj_arg;

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);
	spin_unlock(&bdev->fence_lock);

	if (sync_obj) {
		driver->sync_obj_flush(sync_obj, sync_obj_arg);
		driver->sync_obj_unref(&sync_obj);
	}
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}