Exemple #1
0
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
			  bool interruptible,
			  bool no_wait, bool use_sequence, uint32_t sequence)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;

	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
		if (use_sequence && bo->seq_valid &&
			(sequence - bo->val_seq < (1 << 31))) {
			return -EAGAIN;
		}

		if (no_wait)
			return -EBUSY;

		spin_unlock(&glob->lru_lock);
		ret = ttm_bo_wait_unreserved(bo, interruptible);
		spin_lock(&glob->lru_lock);

		if (unlikely(ret))
			return ret;
	}

	if (use_sequence) {
		bo->val_seq = sequence;
		bo->seq_valid = true;
	} else {
		bo->seq_valid = false;
	}

	return 0;
}
Exemple #2
0
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
			  bool interruptible,
			  bool no_wait, bool use_sequence, uint32_t sequence)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;

	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
		/**
		 * Deadlock avoidance for multi-bo reserving.
		 */
		if (use_sequence && bo->seq_valid) {
			/**
			 * We've already reserved this one.
			 */
			if (unlikely(sequence == bo->val_seq))
				return -EDEADLK;
			/**
			 * Already reserved by a thread that will not back
			 * off for us. We need to back off.
			 */
			if (unlikely(sequence - bo->val_seq < (1 << 31)))
				return -EAGAIN;
		}

		if (no_wait)
			return -EBUSY;

		spin_unlock(&glob->lru_lock);
		ret = ttm_bo_wait_unreserved(bo, interruptible);
		spin_lock(&glob->lru_lock);

		if (unlikely(ret))
			return ret;
	}

	if (use_sequence) {
		/**
		 * Wake up waiters that may need to recheck for deadlock,
		 * if we decreased the sequence number.
		 */
		if (unlikely((bo->val_seq - sequence < (1 << 31))
			     || !bo->seq_valid))
			wake_up_all(&bo->event_queue);

		bo->val_seq = sequence;
		bo->seq_valid = true;
	} else {
		bo->seq_valid = false;
	}

	return 0;
}
Exemple #3
0
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
				uint32_t mem_type,
				bool interruptible, bool no_wait_reserve,
				bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bdev->glob;
	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
	struct ttm_buffer_object *bo;
	int ret, put_count = 0;

retry:
	spin_lock(&glob->lru_lock);
	if (list_empty(&man->lru)) {
		spin_unlock(&glob->lru_lock);
		return -EBUSY;
	}

	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
	kref_get(&bo->list_kref);

	ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);

	if (unlikely(ret == -EBUSY)) {
		spin_unlock(&glob->lru_lock);
		if (likely(!no_wait_gpu))
			ret = ttm_bo_wait_unreserved(bo, interruptible);

		kref_put(&bo->list_kref, ttm_bo_release_list);

		/**
		 * We *need* to retry after releasing the lru lock.
		 */

		if (unlikely(ret != 0))
			return ret;
		goto retry;
	}

	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	BUG_ON(ret != 0);

	while (put_count--)
		kref_put(&bo->list_kref, ttm_bo_ref_bug);

	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
	ttm_bo_unreserve(bo);

	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}
Exemple #4
0
static int ttm_eu_wait_unreserved_locked(struct list_head *list,
					 struct ttm_buffer_object *bo)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret;

	ttm_eu_del_from_lru_locked(list);
	spin_unlock(&glob->lru_lock);
	ret = ttm_bo_wait_unreserved(bo, true);
	spin_lock(&glob->lru_lock);
	if (unlikely(ret != 0))
		ttm_eu_backoff_reservation_locked(list);
	return ret;
}
Exemple #5
0
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
{
	struct ttm_bo_global *glob =
	    container_of(shrink, struct ttm_bo_global, shrink);
	struct ttm_buffer_object *bo;
	int ret = -EBUSY;
	int put_count;
	uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);

	spin_lock(&glob->lru_lock);
	while (ret == -EBUSY) {
		if (unlikely(list_empty(&glob->swap_lru))) {
			spin_unlock(&glob->lru_lock);
			return -EBUSY;
		}

		bo = list_first_entry(&glob->swap_lru,
				      struct ttm_buffer_object, swap);
		kref_get(&bo->list_kref);

		if (!list_empty(&bo->ddestroy)) {
			spin_unlock(&glob->lru_lock);
			(void) ttm_bo_cleanup_refs(bo, false, false, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			continue;
		}

		/**
		 * Reserve buffer. Since we unlock while sleeping, we need
		 * to re-check that nobody removed us from the swap-list while
		 * we slept.
		 */

		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
		if (unlikely(ret == -EBUSY)) {
			spin_unlock(&glob->lru_lock);
			ttm_bo_wait_unreserved(bo, false);
			kref_put(&bo->list_kref, ttm_bo_release_list);
			spin_lock(&glob->lru_lock);
		}
	}

	BUG_ON(ret != 0);
	put_count = ttm_bo_del_from_lru(bo);
	spin_unlock(&glob->lru_lock);

	ttm_bo_list_ref_sub(bo, put_count, true);

	/**
	 * Wait for GPU, then move to system cached.
	 */

	spin_lock(&bo->bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, false, false);
	spin_unlock(&bo->bdev->fence_lock);

	if (unlikely(ret != 0))
		goto out;

	if ((bo->mem.placement & swap_placement) != swap_placement) {
		struct ttm_mem_reg evict_mem;

		evict_mem = bo->mem;
		evict_mem.mm_node = NULL;
		evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
		evict_mem.mem_type = TTM_PL_SYSTEM;

		ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
					     false, false, false);
		if (unlikely(ret != 0))
			goto out;
	}

	ttm_bo_unmap_virtual(bo);

	/**
	 * Swap out. Buffer will be swapped in again as soon as
	 * anyone tries to access a ttm page.
	 */

	if (bo->bdev->driver->swap_notify)
		bo->bdev->driver->swap_notify(bo);

	ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
out:

	/**
	 *
	 * Unreserve without putting on LRU to avoid swapping out an
	 * already swapped buffer.
	 */

	atomic_set(&bo->reserved, 0);
	wake_up_all(&bo->event_queue);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	return ret;
}