Exemple #1
0
static int hang_init(struct hang *h, struct drm_i915_private *i915)
{
	void *vaddr;
	int err;

	memset(h, 0, sizeof(*h));
	h->i915 = i915;

	h->ctx = kernel_context(i915);
	if (IS_ERR(h->ctx))
		return PTR_ERR(h->ctx);

	h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(h->hws)) {
		err = PTR_ERR(h->hws);
		goto err_ctx;
	}

	h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(h->obj)) {
		err = PTR_ERR(h->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	vaddr = i915_gem_object_pin_map(h->obj,
					i915_coherent_map_type(i915));
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	h->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(h->hws);
err_obj:
	i915_gem_object_put(h->obj);
err_hws:
	i915_gem_object_put(h->hws);
err_ctx:
	kernel_context_close(h->ctx);
	return err;
}
Exemple #2
0
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
{
	unsigned int mode;
	void *vaddr;
	int err;

	GEM_BUG_ON(INTEL_GEN(i915) < 8);

	memset(spin, 0, sizeof(*spin));
	spin->i915 = i915;

	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->hws)) {
		err = PTR_ERR(spin->hws);
		goto err;
	}

	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->obj)) {
		err = PTR_ERR(spin->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	mode = i915_coherent_map_type(i915);
	vaddr = i915_gem_object_pin_map(spin->obj, mode);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	spin->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(spin->hws);
err_obj:
	i915_gem_object_put(spin->obj);
err_hws:
	i915_gem_object_put(spin->hws);
err:
	return err;
}
int i915_gem_render_state_emit(struct i915_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;
	struct intel_render_state so = {}; /* keep the compiler happy */
	int err;

	so.rodata = render_state_get_rodata(engine);
	if (!so.rodata)
		return 0;

	if (so.rodata->batch_items * 4 > PAGE_SIZE)
		return -EINVAL;

	so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(so.obj))
		return PTR_ERR(so.obj);

	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
	if (IS_ERR(so.vma)) {
		err = PTR_ERR(so.vma);
		goto err_obj;
	}

	err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	if (err)
		goto err_vma;

	err = render_state_setup(&so, rq->i915);
	if (err)
		goto err_unpin;

	err = engine->emit_bb_start(rq,
				    so.batch_offset, so.batch_size,
				    I915_DISPATCH_SECURE);
	if (err)
		goto err_unpin;

	if (so.aux_size > 8) {
		err = engine->emit_bb_start(rq,
					    so.aux_offset, so.aux_size,
					    I915_DISPATCH_SECURE);
		if (err)
			goto err_unpin;
	}

	i915_vma_lock(so.vma);
	err = i915_vma_move_to_active(so.vma, rq, 0);
	i915_vma_unlock(so.vma);
err_unpin:
	i915_vma_unpin(so.vma);
err_vma:
	i915_vma_close(so.vma);
err_obj:
	i915_gem_object_put(so.obj);
	return err;
}
Exemple #4
0
static int populate_ggtt(struct drm_i915_private *i915,
			 struct list_head *objects)
{
	unsigned long unbound, bound, count;
	struct drm_i915_gem_object *obj;
	u64 size;

	count = 0;
	for (size = 0;
	     size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
	     size += I915_GTT_PAGE_SIZE) {
		struct i915_vma *vma;

		obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
		if (IS_ERR(obj))
			return PTR_ERR(obj);

		quirk_add(obj, objects);

		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
		if (IS_ERR(vma))
			return PTR_ERR(vma);

		count++;
	}

	unbound = 0;
	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
		if (obj->mm.quirked)
			unbound++;
	if (unbound) {
		pr_err("%s: Found %lu objects unbound, expected %u!\n",
		       __func__, unbound, 0);
		return -EINVAL;
	}

	bound = 0;
	list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
		if (obj->mm.quirked)
			bound++;
	if (bound != count) {
		pr_err("%s: Found %lu objects bound, expected %lu!\n",
		       __func__, bound, count);
		return -EINVAL;
	}

	if (list_empty(&i915->ggtt.vm.bound_list)) {
		pr_err("No objects on the GGTT inactive list!\n");
		return -EINVAL;
	}

	return 0;
}
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(tmp, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(tmp))
			break;

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
								 true));

		if (tmp->base.size >= size) {
			/* Clear the set of shared fences early */
			ww_mutex_lock(&tmp->resv->lock, NULL);
			reservation_object_add_excl_fence(tmp->resv, NULL);
			ww_mutex_unlock(&tmp->resv->lock);

			obj = tmp;
			break;
		}
	}

	if (obj == NULL) {
		obj = i915_gem_object_create_internal(pool->engine->i915, size);
		if (IS_ERR(obj))
			return obj;
	}

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
Exemple #6
0
int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
	struct intel_render_state *so;
	const struct intel_renderstate_rodata *rodata;
	struct drm_i915_gem_object *obj;
	int ret;

	if (engine->id != RCS)
		return 0;

	rodata = render_state_get_rodata(engine);
	if (!rodata)
		return 0;

	if (rodata->batch_items * 4 > PAGE_SIZE)
		return -EINVAL;

	so = kmalloc(sizeof(*so), GFP_KERNEL);
	if (!so)
		return -ENOMEM;

	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto err_free;
	}

	so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
	if (IS_ERR(so->vma)) {
		ret = PTR_ERR(so->vma);
		goto err_obj;
	}

	so->rodata = rodata;
	engine->render_state = so;
	return 0;

err_obj:
	i915_gem_object_put(obj);
err_free:
	kfree(so);
	return ret;
}
Exemple #7
0
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
	struct i915_request *rq;
	int err;

	if (i915_gem_object_is_active(h->obj)) {
		struct drm_i915_gem_object *obj;
		void *vaddr;

		obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
		if (IS_ERR(obj))
			return ERR_CAST(obj);

		vaddr = i915_gem_object_pin_map(obj,
						i915_coherent_map_type(h->i915));
		if (IS_ERR(vaddr)) {
			i915_gem_object_put(obj);
			return ERR_CAST(vaddr);
		}

		i915_gem_object_unpin_map(h->obj);
		i915_gem_object_put(h->obj);

		h->obj = obj;
		h->batch = vaddr;
	}

	rq = i915_request_alloc(engine, h->ctx);
	if (IS_ERR(rq))
		return rq;

	err = emit_recurse_batch(h, rq);
	if (err) {
		i915_request_add(rq);
		return ERR_PTR(err);
	}

	return rq;
}
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(obj, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(obj)) {
			struct reservation_object *resv = obj->resv;

			if (!reservation_object_test_signaled_rcu(resv, true))
				break;

			i915_retire_requests(pool->engine->i915);
			GEM_BUG_ON(i915_gem_object_is_active(obj));

			/*
			 * The object is now idle, clear the array of shared
			 * fences before we add a new request. Although, we
			 * remain on the same engine, we may be on a different
			 * timeline and so may continually grow the array,
			 * trapping a reference to all the old fences, rather
			 * than replace the existing fence.
			 */
			if (rcu_access_pointer(resv->fence)) {
				reservation_object_lock(resv, NULL);
				reservation_object_add_excl_fence(resv, NULL);
				reservation_object_unlock(resv);
			}
		}

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
								 true));

		if (obj->base.size >= size)
			goto found;
	}

	obj = i915_gem_object_create_internal(pool->engine->i915, size);
	if (IS_ERR(obj))
		return obj;

found:
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}