Ejemplo n.º 1
0
static void __i915_vma_retire(struct i915_active *ref)
{
	struct i915_vma *vma = container_of(ref, typeof(*vma), active);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/*
	 * Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	obj_bump_mru(obj);

	i915_gem_object_put(obj); /* and drop the active reference */
}
Ejemplo n.º 2
0
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(tmp, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(tmp))
			break;

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
								 true));

		if (tmp->base.size >= size) {
			/* Clear the set of shared fences early */
			ww_mutex_lock(&tmp->resv->lock, NULL);
			reservation_object_add_excl_fence(tmp->resv, NULL);
			ww_mutex_unlock(&tmp->resv->lock);

			obj = tmp;
			break;
		}
	}

	if (obj == NULL) {
		obj = i915_gem_object_create_internal(pool->engine->i915, size);
		if (IS_ERR(obj))
			return obj;
	}

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
Ejemplo n.º 3
0
static void
i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	spin_lock(&rq->i915->mm.obj_lock);
	if (obj->bind_count)
		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
	spin_unlock(&rq->i915->mm.obj_lock);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Ejemplo n.º 4
0
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
	struct i915_request *rq;
	int err;

	if (i915_gem_object_is_active(h->obj)) {
		struct drm_i915_gem_object *obj;
		void *vaddr;

		obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
		if (IS_ERR(obj))
			return ERR_CAST(obj);

		vaddr = i915_gem_object_pin_map(obj,
						i915_coherent_map_type(h->i915));
		if (IS_ERR(vaddr)) {
			i915_gem_object_put(obj);
			return ERR_CAST(vaddr);
		}

		i915_gem_object_unpin_map(h->obj);
		i915_gem_object_put(h->obj);

		h->obj = obj;
		h->batch = vaddr;
	}

	rq = i915_request_alloc(engine, h->ctx);
	if (IS_ERR(rq))
		return rq;

	err = emit_recurse_batch(h, rq);
	if (err) {
		i915_request_add(rq);
		return ERR_PTR(err);
	}

	return rq;
}
Ejemplo n.º 5
0
static void
i915_vma_retire(struct i915_gem_active *active,
		struct drm_i915_gem_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	if (obj->bind_count)
		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Ejemplo n.º 6
0
/**
 * i915_gem_shrink - Shrink buffer object caches
 * @dev_priv: i915 device
 * @target: amount of memory to make available, in pages
 * @nr_scanned: optional output for number of pages scanned (incremental)
 * @flags: control flags for selecting cache types
 *
 * This function is the main interface to the shrinker. It will try to release
 * up to @target pages of main memory backing storage from buffer objects.
 * Selection of the specific caches can be done with @flags. This is e.g. useful
 * when purgeable objects should be removed from caches preferentially.
 *
 * Note that it's not guaranteed that released amount is actually available as
 * free system memory - the pages might still be in-used to due to other reasons
 * (like cpu mmaps) or the mm core has reused them before we could grab them.
 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
 *
 * Also note that any kind of pinning (both per-vma address space pins and
 * backing storage pins at the buffer object level) result in the shrinker code
 * having to skip the object.
 *
 * Returns:
 * The number of pages of backing storage actually released.
 */
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
		unsigned long target,
		unsigned long *nr_scanned,
		unsigned flags)
{
	const struct {
		struct list_head *list;
		unsigned int bit;
	} phases[] = {
		{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
		{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
		{ NULL, 0 },
	}, *phase;
	unsigned long count = 0;
	unsigned long scanned = 0;
	bool unlock;

	if (!shrinker_lock(dev_priv, &unlock))
		return 0;

	/*
	 * When shrinking the active list, also consider active contexts.
	 * Active contexts are pinned until they are retired, and so can
	 * not be simply unbound to retire and unpin their pages. To shrink
	 * the contexts, we must wait until the gpu is idle.
	 *
	 * We don't care about errors here; if we cannot wait upon the GPU,
	 * we will free as much as we can and hope to get a second chance.
	 */
	if (flags & I915_SHRINK_ACTIVE)
		i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);

	trace_i915_gem_shrink(dev_priv, target, flags);
	i915_gem_retire_requests(dev_priv);

	/*
	 * Unbinding of objects will require HW access; Let us not wake the
	 * device just to recover a little memory. If absolutely necessary,
	 * we will force the wake during oom-notifier.
	 */
	if ((flags & I915_SHRINK_BOUND) &&
	    !intel_runtime_pm_get_if_in_use(dev_priv))
		flags &= ~I915_SHRINK_BOUND;

	/*
	 * As we may completely rewrite the (un)bound list whilst unbinding
	 * (due to retiring requests) we have to strictly process only
	 * one element of the list at the time, and recheck the list
	 * on every iteration.
	 *
	 * In particular, we must hold a reference whilst removing the
	 * object as we may end up waiting for and/or retiring the objects.
	 * This might release the final reference (held by the active list)
	 * and result in the object being freed from under us. This is
	 * similar to the precautions the eviction code must take whilst
	 * removing objects.
	 *
	 * Also note that although these lists do not hold a reference to
	 * the object we can safely grab one here: The final object
	 * unreferencing and the bound_list are both protected by the
	 * dev->struct_mutex and so we won't ever be able to observe an
	 * object on the bound_list with a reference count equals 0.
	 */
	for (phase = phases; phase->list; phase++) {
		struct list_head still_in_list;
		struct drm_i915_gem_object *obj;

		if ((flags & phase->bit) == 0)
			continue;

		INIT_LIST_HEAD(&still_in_list);

		/*
		 * We serialize our access to unreferenced objects through
		 * the use of the struct_mutex. While the objects are not
		 * yet freed (due to RCU then a workqueue) we still want
		 * to be able to shrink their pages, so they remain on
		 * the unbound/bound list until actually freed.
		 */
		spin_lock(&dev_priv->mm.obj_lock);
		while (count < target &&
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
						       mm.link))) {
			list_move_tail(&obj->mm.link, &still_in_list);

			if (flags & I915_SHRINK_PURGEABLE &&
			    obj->mm.madv != I915_MADV_DONTNEED)
				continue;

			if (flags & I915_SHRINK_VMAPS &&
			    !is_vmalloc_addr(obj->mm.mapping))
				continue;

			if (!(flags & I915_SHRINK_ACTIVE) &&
			    (i915_gem_object_is_active(obj) ||
			     i915_gem_object_is_framebuffer(obj)))
				continue;

			if (!can_release_pages(obj))
				continue;

			spin_unlock(&dev_priv->mm.obj_lock);

			if (unsafe_drop_pages(obj)) {
				/* May arrive from get_pages on another bo */
				mutex_lock_nested(&obj->mm.lock,
						  I915_MM_SHRINKER);
				if (!i915_gem_object_has_pages(obj)) {
					__i915_gem_object_invalidate(obj);
					count += obj->base.size >> PAGE_SHIFT;
				}
				mutex_unlock(&obj->mm.lock);
			}
			scanned += obj->base.size >> PAGE_SHIFT;

			spin_lock(&dev_priv->mm.obj_lock);
		}
		list_splice_tail(&still_in_list, phase->list);
		spin_unlock(&dev_priv->mm.obj_lock);
	}
Ejemplo n.º 7
0
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(obj, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(obj)) {
			struct reservation_object *resv = obj->resv;

			if (!reservation_object_test_signaled_rcu(resv, true))
				break;

			i915_retire_requests(pool->engine->i915);
			GEM_BUG_ON(i915_gem_object_is_active(obj));

			/*
			 * The object is now idle, clear the array of shared
			 * fences before we add a new request. Although, we
			 * remain on the same engine, we may be on a different
			 * timeline and so may continually grow the array,
			 * trapping a reference to all the old fences, rather
			 * than replace the existing fence.
			 */
			if (rcu_access_pointer(resv->fence)) {
				reservation_object_lock(resv, NULL);
				reservation_object_add_excl_fence(resv, NULL);
				reservation_object_unlock(resv);
			}
		}

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
								 true));

		if (obj->base.size >= size)
			goto found;
	}

	obj = i915_gem_object_create_internal(pool->engine->i915, size);
	if (IS_ERR(obj))
		return obj;

found:
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}