Esempio n. 1
0
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
			       bool interruptible,
			       bool no_wait_reserve,
			       bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	int put_count;
	int ret = 0;

retry:
	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0))
		return ret;

	spin_lock(&glob->lru_lock);

	if (unlikely(list_empty(&bo->ddestroy))) {
		spin_unlock(&glob->lru_lock);
		return 0;
	}

	ret = ttm_bo_reserve_locked(bo, interruptible,
				    no_wait_reserve, false, 0);

	if (unlikely(ret != 0)) {
		spin_unlock(&glob->lru_lock);
		return ret;
	}

	/**
	 * We can re-check for sync object without taking
	 * the bo::lock since setting the sync object requires
	 * also bo::reserved. A busy object at this point may
	 * be caused by another thread recently starting an accelerated
	 * eviction.
	 */

	if (unlikely(bo->sync_obj)) {
		atomic_set(&bo->reserved, 0);
		wake_up_all(&bo->event_queue);
		spin_unlock(&glob->lru_lock);
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	list_del_init(&bo->ddestroy);
	++put_count;

	spin_unlock(&glob->lru_lock);
	ttm_bo_cleanup_memtype_use(bo);

	ttm_bo_list_ref_sub(bo, put_count, true);

	return 0;
}
Esempio n. 2
0
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver;
	void *sync_obj = NULL;
	void *sync_obj_arg;
	int put_count;
	int ret;

	spin_lock(&bdev->fence_lock);
	(void) ttm_bo_wait(bo, false, false, true);
	if (!bo->sync_obj) {

		spin_lock(&glob->lru_lock);

		/**
		 * Lock inversion between bo:reserve and bdev::fence_lock here,
		 * but that's OK, since we're only trylocking.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);

		if (unlikely(ret == -EBUSY))
			goto queue;

		spin_unlock(&bdev->fence_lock);
		put_count = ttm_bo_del_from_lru(bo);

		spin_unlock(&glob->lru_lock);
		ttm_bo_cleanup_memtype_use(bo);

		ttm_bo_list_ref_sub(bo, put_count, true);

		return;
	} else {
		spin_lock(&glob->lru_lock);
	}
queue:
	driver = bdev->driver;
	if (bo->sync_obj)
		sync_obj = driver->sync_obj_ref(bo->sync_obj);
	sync_obj_arg = bo->sync_obj_arg;

	kref_get(&bo->list_kref);
	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
	spin_unlock(&glob->lru_lock);
	spin_unlock(&bdev->fence_lock);

	if (sync_obj) {
		driver->sync_obj_flush(sync_obj, sync_obj_arg);
		driver->sync_obj_unref(&sync_obj);
	}
	schedule_delayed_work(&bdev->wq,
			      ((HZ / 100) < 1) ? 1 : HZ / 100);
}
Esempio n. 3
0
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
				bool interruptible, bool no_wait_reserve,
				bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
	int ret, put_count = 0;

retry:
	spin_lock(&glob->lru_lock);
	if (list_empty(&man->lru)) {
		spin_unlock(&glob->lru_lock);
		return -EBUSY;
	}

	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
	kref_get(&bo->list_kref);

	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);

	if (unlikely(ret == -EBUSY)) {
		spin_unlock(&glob->lru_lock);
		if (likely(!no_wait_gpu))
			ret = ttm_bo_wait_unreserved(bo, interruptible);

		kref_put(&bo->list_kref, ttm_bo_release_list);

		/**
		 * We *need* to retry after releasing the lru lock.
		 */

		if (unlikely(ret != 0))
			return ret;
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	BUG_ON(ret != 0);

	while (put_count--)
		kref_put(&bo->list_kref, ttm_bo_ref_bug);

	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
	ttm_bo_unreserve(bo);

	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Esempio n. 4
0
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
	struct ttm_validate_buffer *entry;

	list_for_each_entry(entry, list, head) {
		struct ttm_buffer_object *bo = entry->bo;
		if (!entry->reserved)
			continue;

		if (!entry->removed) {
			entry->put_count = ttm_bo_del_from_lru(bo);
			entry->removed = true;
		}
	}
}
Esempio n. 5
0
int ttm_bo_reserve(struct ttm_buffer_object *bo,
		   bool interruptible,
		   bool no_wait, bool use_sequence, uint32_t sequence)
{
	struct ttm_bo_global *glob = bo->glob;
	int put_count = 0;
	int ret;

	spin_lock(&glob->lru_lock);
	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
				    sequence);
	if (likely(ret == 0))
		put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	return ret;
}
Esempio n. 6
0
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

	spin_lock(&glob->lru_lock);
	while (ret == -EBUSY) {
		if (unlikely(list_empty(&glob->swap_lru))) {
			spin_unlock(&glob->lru_lock);
			return -EBUSY;
		}

		bo = list_first_entry(&glob->swap_lru,
				      struct ttm_buffer_object, swap);
		kref_get(&bo->list_kref);

		if (!list_empty(&bo->ddestroy)) {
			spin_unlock(&glob->lru_lock);
			(void) ttm_bo_cleanup_refs(bo, false, false, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			continue;
		}

		/**
		 * Reserve buffer. Since we unlock while sleeping, we need
		 * to re-check that nobody removed us from the swap-list while
		 * we slept.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			ttm_bo_wait_unreserved(bo, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
		}
	}

	BUG_ON(ret != 0);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	/**
	 * Wait for GPU, then move to system cached.
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
					     false, false, false);
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Esempio n. 7
0
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver = bdev->driver;
	int ret;

	spin_lock(&bo->lock);
	(void) ttm_bo_wait(bo, false, false, !remove_all);

	if (!bo->sync_obj) {
		int put_count;

		spin_unlock(&bo->lock);

		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);

		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
		BUG_ON(ret);
		if (bo->ttm)
			ttm_tt_unbind(bo->ttm);

		if (!list_empty(&bo->ddestroy)) {
			list_del_init(&bo->ddestroy);
			++put_count;
		}
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			bo->mem.mm_node = NULL;
		}
		spin_unlock(&glob->lru_lock);

		atomic_set(&bo->reserved, 0);

		while (put_count--)
			kref_put(&bo->list_kref, ttm_bo_ref_bug);

		return 0;
	}

	spin_lock(&glob->lru_lock);
	if (list_empty(&bo->ddestroy)) {
		void *sync_obj = bo->sync_obj;
		void *sync_obj_arg = bo->sync_obj_arg;

		kref_get(&bo->list_kref);
		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);

		if (sync_obj)
			driver->sync_obj_flush(sync_obj, sync_obj_arg);
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
		ret = 0;

	} else {
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);
		ret = -EBUSY;
	}

	return ret;
}