Exemple #1
0
static void __i915_vma_retire(struct i915_active *ref)
{
	struct i915_vma *vma = container_of(ref, typeof(*vma), active);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/*
	 * Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	obj_bump_mru(obj);

	i915_gem_object_put(obj); /* and drop the active reference */
}
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(tmp, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(tmp))
			break;

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
								 true));

		if (tmp->base.size >= size) {
			/* Clear the set of shared fences early */
			ww_mutex_lock(&tmp->resv->lock, NULL);
			reservation_object_add_excl_fence(tmp->resv, NULL);
			ww_mutex_unlock(&tmp->resv->lock);

			obj = tmp;
			break;
		}
	}

	if (obj == NULL) {
		obj = i915_gem_object_create_internal(pool->engine->i915, size);
		if (IS_ERR(obj))
			return obj;
	}

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
Exemple #3
0
static void
i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	spin_lock(&rq->i915->mm.obj_lock);
	if (obj->bind_count)
		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
	spin_unlock(&rq->i915->mm.obj_lock);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Exemple #4
0
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	bool write = !!(op & MSM_PREP_WRITE);

	if (op & MSM_PREP_NOSYNC) {
		if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
			return -EBUSY;
	} else {
		int ret;

		ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
				true, timeout_to_jiffies(timeout));
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}

	/* TODO cache maintenance */

	return 0;
}
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(obj, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(obj)) {
			struct reservation_object *resv = obj->resv;

			if (!reservation_object_test_signaled_rcu(resv, true))
				break;

			i915_retire_requests(pool->engine->i915);
			GEM_BUG_ON(i915_gem_object_is_active(obj));

			/*
			 * The object is now idle, clear the array of shared
			 * fences before we add a new request. Although, we
			 * remain on the same engine, we may be on a different
			 * timeline and so may continually grow the array,
			 * trapping a reference to all the old fences, rather
			 * than replace the existing fence.
			 */
			if (rcu_access_pointer(resv->fence)) {
				reservation_object_lock(resv, NULL);
				reservation_object_add_excl_fence(resv, NULL);
				reservation_object_unlock(resv);
			}
		}

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
								 true));

		if (obj->base.size >= size)
			goto found;
	}

	obj = i915_gem_object_create_internal(pool->engine->i915, size);
	if (IS_ERR(obj))
		return obj;

found:
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
Exemple #6
0
/*
 * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
 *
 * Create and attach a fence to the vGEM handle. This fence is then exposed
 * via the dma-buf reservation object and visible to consumers of the exported
 * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
 * vGEM buffer is being written to by the client and is exposed as an exclusive
 * fence, otherwise the fence indicates the client is current reading from the
 * buffer and all future writes should wait for the client to signal its
 * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
 * an exclusive fence when adding a read, or any fence when adding a write),
 * -EBUSY is reported. Serialisation between operations should be handled
 * by waiting upon the dma-buf.
 *
 * This returns the handle for the new fence that must be signaled within 10
 * seconds (or otherwise it will automatically expire). See
 * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
 *
 * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
 */
int vgem_fence_attach_ioctl(struct drm_device *dev,
			    void *data,
			    struct drm_file *file)
{
	struct drm_vgem_fence_attach *arg = data;
	struct vgem_file *vfile = file->driver_priv;
	struct reservation_object *resv;
	struct drm_gem_object *obj;
	struct dma_fence *fence;
	int ret;

	if (arg->flags & ~VGEM_FENCE_WRITE)
		return -EINVAL;

	if (arg->pad)
		return -EINVAL;

	obj = drm_gem_object_lookup(file, arg->handle);
	if (!obj)
		return -ENOENT;

	ret = attach_dmabuf(dev, obj);
	if (ret)
		goto err;

	fence = vgem_fence_create(vfile, arg->flags);
	if (!fence) {
		ret = -ENOMEM;
		goto err;
	}

	/* Check for a conflicting fence */
	resv = obj->dma_buf->resv;
	if (!reservation_object_test_signaled_rcu(resv,
						  arg->flags & VGEM_FENCE_WRITE)) {
		ret = -EBUSY;
		goto err_fence;
	}

	/* Expose the fence via the dma-buf */
	ret = 0;
	reservation_object_lock(resv, NULL);
	if (arg->flags & VGEM_FENCE_WRITE)
		reservation_object_add_excl_fence(resv, fence);
	else if ((ret = reservation_object_reserve_shared(resv)) == 0)
		reservation_object_add_shared_fence(resv, fence);
	reservation_object_unlock(resv);

	/* Record the fence in our idr for later signaling */
	if (ret == 0) {
		mutex_lock(&vfile->fence_mutex);
		ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
		mutex_unlock(&vfile->fence_mutex);
		if (ret > 0) {
			arg->out_fence = ret;
			ret = 0;
		}
	}
err_fence:
	if (ret) {
		dma_fence_signal(fence);
		dma_fence_put(fence);
	}
err:
	drm_gem_object_put_unlocked(obj);
	return ret;
}