Exemplo n.º 1
0
void igt_spinner_fini(struct igt_spinner *spin)
{
	igt_spinner_end(spin);

	i915_gem_object_unpin_map(spin->obj);
	i915_gem_object_put(spin->obj);

	i915_gem_object_unpin_map(spin->hws);
	i915_gem_object_put(spin->hws);
}
Exemplo n.º 2
0
static int hang_init(struct hang *h, struct drm_i915_private *i915)
{
	void *vaddr;
	int err;

	memset(h, 0, sizeof(*h));
	h->i915 = i915;

	h->ctx = kernel_context(i915);
	if (IS_ERR(h->ctx))
		return PTR_ERR(h->ctx);

	h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(h->hws)) {
		err = PTR_ERR(h->hws);
		goto err_ctx;
	}

	h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(h->obj)) {
		err = PTR_ERR(h->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	h->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	vaddr = i915_gem_object_pin_map(h->obj,
					i915_coherent_map_type(i915));
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	h->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(h->hws);
err_obj:
	i915_gem_object_put(h->obj);
err_hws:
	i915_gem_object_put(h->hws);
err_ctx:
	kernel_context_close(h->ctx);
	return err;
}
Exemplo n.º 3
0
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
{
	unsigned int mode;
	void *vaddr;
	int err;

	GEM_BUG_ON(INTEL_GEN(i915) < 8);

	memset(spin, 0, sizeof(*spin));
	spin->i915 = i915;

	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->hws)) {
		err = PTR_ERR(spin->hws);
		goto err;
	}

	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->obj)) {
		err = PTR_ERR(spin->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	mode = i915_coherent_map_type(i915);
	vaddr = i915_gem_object_pin_map(spin->obj, mode);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	spin->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(spin->hws);
err_obj:
	i915_gem_object_put(spin->obj);
err_hws:
	i915_gem_object_put(spin->hws);
err:
	return err;
}
Exemplo n.º 4
0
static void hang_fini(struct hang *h)
{
	*h->batch = MI_BATCH_BUFFER_END;
	i915_gem_chipset_flush(h->i915);

	i915_gem_object_unpin_map(h->obj);
	i915_gem_object_put(h->obj);

	i915_gem_object_unpin_map(h->hws);
	i915_gem_object_put(h->hws);

	kernel_context_close(h->ctx);

	igt_flush_test(h->i915, I915_WAIT_LOCKED);
}
Exemplo n.º 5
0
static void __i915_vma_retire(struct i915_active *ref)
{
	struct i915_vma *vma = container_of(ref, typeof(*vma), active);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/*
	 * Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	obj_bump_mru(obj);

	i915_gem_object_put(obj); /* and drop the active reference */
}
Exemplo n.º 6
0
static void cancel_userptr(struct work_struct *work)
{
	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
	struct drm_i915_gem_object *obj = mo->obj;
	struct work_struct *active;

	/* Cancel any active worker and force us to re-evaluate gup */
	mutex_lock(&obj->mm.lock);
	active = fetch_and_zero(&obj->userptr.work);
	mutex_unlock(&obj->mm.lock);
	if (active)
		goto out;

	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);

	mutex_lock(&obj->base.dev->struct_mutex);

	/* We are inside a kthread context and can't be interrupted */
	if (i915_gem_object_unbind(obj) == 0)
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
	WARN_ONCE(i915_gem_object_has_pages(obj),
		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
		  obj->bind_count,
		  atomic_read(&obj->mm.pages_pin_count),
		  obj->pin_global);

	mutex_unlock(&obj->base.dev->struct_mutex);

out:
	i915_gem_object_put(obj);
}
Exemplo n.º 7
0
static int intelfb_alloc(struct drm_fb_helper *helper,
			 struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_framebuffer *fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj = NULL;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	mutex_lock(&dev->struct_mutex);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = PAGE_ALIGN(size);

	/* If the FB is too big, just don't use it since fbdev is not very
	 * important and we should probably use that space with FBC or other
	 * features. */
	if (size * 2 < ggtt->stolen_usable_size)
		obj = i915_gem_object_create_stolen(dev_priv, size);
	if (obj == NULL)
		obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj)) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = PTR_ERR(obj);
		goto out;
	}

	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
	if (IS_ERR(fb)) {
		i915_gem_object_put(obj);
		ret = PTR_ERR(fb);
		goto out;
	}

	mutex_unlock(&dev->struct_mutex);

	ifbdev->fb = to_intel_framebuffer(fb);

	return 0;

out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Exemplo n.º 8
0
static int igt_dmabuf_export_vmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		err = PTR_ERR(dmabuf);
		goto err_obj;
	}
	i915_gem_object_put(obj);

	ptr = dma_buf_vmap(dmabuf);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("dma_buf_vmap failed with err=%d\n", err);
		goto out;
	}

	if (memchr_inv(ptr, 0, dmabuf->size)) {
		pr_err("Exported object not initialiased to zero!\n");
		err = -EINVAL;
		goto out;
	}

	memset(ptr, 0xc5, dmabuf->size);

	err = 0;
	dma_buf_vunmap(dmabuf, ptr);
out:
	dma_buf_put(dmabuf);
	return err;

err_obj:
	i915_gem_object_put(obj);
	return err;
}
Exemplo n.º 9
0
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
	struct render_state so;
	struct drm_i915_gem_object *obj;
	int ret;

	if (WARN_ON(req->engine->id != RCS))
		return -ENOENT;

	so.rodata = render_state_get_rodata(req);
	if (!so.rodata)
		return 0;

	if (so.rodata->batch_items * 4 > 4096)
		return -EINVAL;

	obj = i915_gem_object_create(&req->i915->drm, 4096);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
	if (IS_ERR(so.vma)) {
		ret = PTR_ERR(so.vma);
		goto err_obj;
	}

	ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
	if (ret)
		goto err_obj;

	ret = render_state_setup(&so);
	if (ret)
		goto err_unpin;

	ret = req->engine->emit_bb_start(req, so.vma->node.start,
					 so.rodata->batch_items * 4,
					 I915_DISPATCH_SECURE);
	if (ret)
		goto err_unpin;

	if (so.aux_batch_size > 8) {
		ret = req->engine->emit_bb_start(req,
						 (so.vma->node.start +
						  so.aux_batch_offset),
						 so.aux_batch_size,
						 I915_DISPATCH_SECURE);
		if (ret)
			goto err_unpin;
	}

	i915_vma_move_to_active(so.vma, req, 0);
err_unpin:
	i915_vma_unpin(so.vma);
err_obj:
	i915_gem_object_put(obj);
	return ret;
}
Exemplo n.º 10
0
int i915_gem_render_state_emit(struct i915_request *rq)
{
	struct intel_engine_cs *engine = rq->engine;
	struct intel_render_state so = {}; /* keep the compiler happy */
	int err;

	so.rodata = render_state_get_rodata(engine);
	if (!so.rodata)
		return 0;

	if (so.rodata->batch_items * 4 > PAGE_SIZE)
		return -EINVAL;

	so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(so.obj))
		return PTR_ERR(so.obj);

	so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
	if (IS_ERR(so.vma)) {
		err = PTR_ERR(so.vma);
		goto err_obj;
	}

	err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	if (err)
		goto err_vma;

	err = render_state_setup(&so, rq->i915);
	if (err)
		goto err_unpin;

	err = engine->emit_bb_start(rq,
				    so.batch_offset, so.batch_size,
				    I915_DISPATCH_SECURE);
	if (err)
		goto err_unpin;

	if (so.aux_size > 8) {
		err = engine->emit_bb_start(rq,
					    so.aux_offset, so.aux_size,
					    I915_DISPATCH_SECURE);
		if (err)
			goto err_unpin;
	}

	i915_vma_lock(so.vma);
	err = i915_vma_move_to_active(so.vma, rq, 0);
	i915_vma_unlock(so.vma);
err_unpin:
	i915_vma_unpin(so.vma);
err_vma:
	i915_vma_close(so.vma);
err_obj:
	i915_gem_object_put(so.obj);
	return err;
}
Exemplo n.º 11
0
static int igt_dmabuf_import_self(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct drm_gem_object *import;
	struct dma_buf *dmabuf;
	int err;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		err = PTR_ERR(dmabuf);
		goto out;
	}

	import = i915_gem_prime_import(&i915->drm, dmabuf);
	if (IS_ERR(import)) {
		pr_err("i915_gem_prime_import failed with err=%d\n",
		       (int)PTR_ERR(import));
		err = PTR_ERR(import);
		goto out_dmabuf;
	}

	if (import != &obj->base) {
		pr_err("i915_gem_prime_import created a new object!\n");
		err = -EINVAL;
		goto out_import;
	}

	err = 0;
out_import:
	i915_gem_object_put(to_intel_bo(import));
out_dmabuf:
	dma_buf_put(dmabuf);
out:
	i915_gem_object_put(obj);
	return err;
}
Exemplo n.º 12
0
static void cleanup_objects(struct drm_i915_private *i915,
			    struct list_head *list)
{
	struct drm_i915_gem_object *obj, *on;

	list_for_each_entry_safe(obj, on, list, st_link) {
		GEM_BUG_ON(!obj->mm.quirked);
		obj->mm.quirked = false;
		i915_gem_object_put(obj);
	}
Exemplo n.º 13
0
/* Cleans up uC firmware by releasing the firmware GEM obj.
 */
static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
	struct drm_i915_gem_object *obj;

	obj = fetch_and_zero(&uc_fw->obj);
	if (obj)
		i915_gem_object_put(obj);

	uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
Exemplo n.º 14
0
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
	struct i915_request *rq;
	int err;

	if (i915_gem_object_is_active(h->obj)) {
		struct drm_i915_gem_object *obj;
		void *vaddr;

		obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
		if (IS_ERR(obj))
			return ERR_CAST(obj);

		vaddr = i915_gem_object_pin_map(obj,
						i915_coherent_map_type(h->i915));
		if (IS_ERR(vaddr)) {
			i915_gem_object_put(obj);
			return ERR_CAST(vaddr);
		}

		i915_gem_object_unpin_map(h->obj);
		i915_gem_object_put(h->obj);

		h->obj = obj;
		h->batch = vaddr;
	}

	rq = i915_request_alloc(engine, h->ctx);
	if (IS_ERR(rq))
		return rq;

	err = emit_recurse_batch(h, rq);
	if (err) {
		i915_request_add(rq);
		return ERR_PTR(err);
	}

	return rq;
}
Exemplo n.º 15
0
static int igt_dmabuf_import_ownership(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	dmabuf = mock_dmabuf(1);
	if (IS_ERR(dmabuf))
		return PTR_ERR(dmabuf);

	ptr = dma_buf_vmap(dmabuf);
	if (!ptr) {
		pr_err("dma_buf_vmap failed\n");
		err = -ENOMEM;
		goto err_dmabuf;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_vunmap(dmabuf, ptr);

	obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
	if (IS_ERR(obj)) {
		pr_err("i915_gem_prime_import failed with err=%d\n",
		       (int)PTR_ERR(obj));
		err = PTR_ERR(obj);
		goto err_dmabuf;
	}

	dma_buf_put(dmabuf);

	err = i915_gem_object_pin_pages(obj);
	if (err) {
		pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
		goto out_obj;
	}

	err = 0;
	i915_gem_object_unpin_pages(obj);
out_obj:
	i915_gem_object_put(obj);
	return err;

err_dmabuf:
	dma_buf_put(dmabuf);
	return err;
}
Exemplo n.º 16
0
/**
 * i915_gem_batch_pool_fini() - clean up a batch buffer pool
 * @pool: the pool to clean up
 *
 * Note: Callers must hold the struct_mutex.
 */
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
	int n;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
		struct drm_i915_gem_object *obj, *next;

		list_for_each_entry_safe(obj, next,
					 &pool->cache_list[n],
					 batch_pool_link)
			i915_gem_object_put(obj);

		INIT_LIST_HEAD(&pool->cache_list[n]);
	}
}
Exemplo n.º 17
0
static void
i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Prune the shared fence arrays iff completely idle (inc. external) */
	if (reservation_object_trylock(obj->resv)) {
		if (reservation_object_test_signaled_rcu(obj->resv, true))
			reservation_object_add_excl_fence(obj->resv, NULL);
		reservation_object_unlock(obj->resv);
	}

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	spin_lock(&rq->i915->mm.obj_lock);
	if (obj->bind_count)
		list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
	spin_unlock(&rq->i915->mm.obj_lock);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Exemplo n.º 18
0
int i915_gem_render_state_init(struct intel_engine_cs *engine)
{
	struct intel_render_state *so;
	const struct intel_renderstate_rodata *rodata;
	struct drm_i915_gem_object *obj;
	int ret;

	if (engine->id != RCS)
		return 0;

	rodata = render_state_get_rodata(engine);
	if (!rodata)
		return 0;

	if (rodata->batch_items * 4 > PAGE_SIZE)
		return -EINVAL;

	so = kmalloc(sizeof(*so), GFP_KERNEL);
	if (!so)
		return -ENOMEM;

	obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
	if (IS_ERR(obj)) {
		ret = PTR_ERR(obj);
		goto err_free;
	}

	so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
	if (IS_ERR(so->vma)) {
		ret = PTR_ERR(so->vma);
		goto err_obj;
	}

	so->rodata = rodata;
	engine->render_state = so;
	return 0;

err_obj:
	i915_gem_object_put(obj);
err_free:
	kfree(so);
	return ret;
}
Exemplo n.º 19
0
static int igt_gem_object(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	int err = -ENOMEM;

	/* Basic test to ensure we can create an object */

	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
	if (IS_ERR(obj)) {
		err = PTR_ERR(obj);
		pr_err("i915_gem_object_create failed, err=%d\n", err);
		goto out;
	}

	err = 0;
	i915_gem_object_put(obj);
out:
	return err;
}
Exemplo n.º 20
0
static void i915_clflush_work(struct work_struct *work)
{
	struct clflush *clflush = container_of(work, typeof(*clflush), work);
	struct drm_i915_gem_object *obj = clflush->obj;

	if (i915_gem_object_pin_pages(obj)) {
		DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
		goto out;
	}

	__i915_do_clflush(obj);

	i915_gem_object_unpin_pages(obj);

out:
	i915_gem_object_put(obj);

	dma_fence_signal(&clflush->dma);
	dma_fence_put(&clflush->dma);
}
Exemplo n.º 21
0
static int igt_dmabuf_export(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		return PTR_ERR(dmabuf);
	}

	dma_buf_put(dmabuf);
	return 0;
}
Exemplo n.º 22
0
static int igt_gem_huge(void *arg)
{
	const unsigned int nreal = 509; /* just to be awkward */
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	unsigned int n;
	int err;

	/* Basic sanitycheck of our huge fake object allocation */

	obj = huge_gem_object(i915,
			      nreal * PAGE_SIZE,
			      i915->ggtt.vm.total + PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	err = i915_gem_object_pin_pages(obj);
	if (err) {
		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
		       nreal, obj->base.size / PAGE_SIZE, err);
		goto out;
	}

	for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
		if (i915_gem_object_get_page(obj, n) !=
		    i915_gem_object_get_page(obj, n % nreal)) {
			pr_err("Page lookup mismatch at index %u [%u]\n",
			       n, n % nreal);
			err = -EINVAL;
			goto out_unpin;
		}
	}

out_unpin:
	i915_gem_object_unpin_pages(obj);
out:
	i915_gem_object_put(obj);
	return err;
}
Exemplo n.º 23
0
static struct drm_i915_gem_object *
alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
{
	struct drm_i915_gem_object *obj;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj))
		return obj;

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
	 */
	if (IS_IVYBRIDGE(dev_priv)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			i915_gem_object_put(obj);
			return ERR_PTR(ret);
		}
	}

	return obj;
}
Exemplo n.º 24
0
static void
i915_vma_retire(struct i915_gem_active *active,
		struct drm_i915_gem_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	if (obj->bind_count)
		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Exemplo n.º 25
0
static void guc_free_load_err_log(struct intel_guc *guc)
{
	if (guc->load_err_log)
		i915_gem_object_put(guc->load_err_log);
}
Exemplo n.º 26
0
/*
 * Creates a new mm object that wraps some normal memory from the process
 * context - user memory.
 *
 * We impose several restrictions upon the memory being mapped
 * into the GPU.
 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
 * 2. It must be normal system memory, not a pointer into another map of IO
 *    space (e.g. it must not be a GTT mmapping of another object).
 * 3. We only allow a bo as large as we could in theory map into the GTT,
 *    that is we limit the size to the total size of the GTT.
 * 4. The bo is marked as being snoopable. The backing pages are left
 *    accessible directly by the CPU, but reads and writes by the GPU may
 *    incur the cost of a snoop (unless you have an LLC architecture).
 *
 * Synchronisation between multiple users and the GPU is left to userspace
 * through the normal set-domain-ioctl. The kernel will enforce that the
 * GPU relinquishes the VMA before it is returned back to the system
 * i.e. upon free(), munmap() or process termination. However, the userspace
 * malloc() library may not immediately relinquish the VMA after free() and
 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 * Caveat emptor.
 *
 * Also note, that the object created here is not currently a "first class"
 * object, in that several ioctls are banned. These are the CPU access
 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
 * direct access via your pointer rather than use those ioctls. Another
 * restriction is that we do not allow userptr surfaces to be pinned to the
 * hardware and so we reject any attempt to create a framebuffer out of a
 * userptr.
 *
 * If you think this is a good interface to use to pass GPU memory between
 * drivers, please use dma-buf instead. In fact, wherever possible use
 * dma-buf instead.
 */
int
i915_gem_userptr_ioctl(struct drm_device *dev,
		       void *data,
		       struct drm_file *file)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_gem_userptr *args = data;
	struct drm_i915_gem_object *obj;
	int ret;
	u32 handle;

	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
		/* We cannot support coherent userptr objects on hw without
		 * LLC and broken snooping.
		 */
		return -ENODEV;
	}

	if (args->flags & ~(I915_USERPTR_READ_ONLY |
			    I915_USERPTR_UNSYNCHRONIZED))
		return -EINVAL;

	if (!args->user_size)
		return -EINVAL;

	if (offset_in_page(args->user_ptr | args->user_size))
		return -EINVAL;

	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
		return -EFAULT;

	if (args->flags & I915_USERPTR_READ_ONLY) {
		struct i915_hw_ppgtt *ppgtt;

		/*
		 * On almost all of the older hw, we cannot tell the GPU that
		 * a page is readonly.
		 */
		ppgtt = dev_priv->kernel_context->ppgtt;
		if (!ppgtt || !ppgtt->vm.has_read_only)
			return -ENODEV;
	}

	obj = i915_gem_object_alloc();
	if (obj == NULL)
		return -ENOMEM;

	drm_gem_private_object_init(dev, &obj->base, args->user_size);
	i915_gem_object_init(obj, &i915_gem_userptr_ops);
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);

	obj->userptr.ptr = args->user_ptr;
	if (args->flags & I915_USERPTR_READ_ONLY)
		i915_gem_object_set_readonly(obj);

	/* And keep a pointer to the current->mm for resolving the user pages
	 * at binding. This means that we need to hook into the mmu_notifier
	 * in order to detect if the mmu is destroyed.
	 */
	ret = i915_gem_userptr_init__mm_struct(obj);
	if (ret == 0)
		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
	if (ret == 0)
		ret = drm_gem_handle_create(file, &obj->base, &handle);

	/* drop reference from allocate - handle holds it now */
	i915_gem_object_put(obj);
	if (ret)
		return ret;

	args->handle = handle;
	return 0;
}
Exemplo n.º 27
0
static int igt_dmabuf_import(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *obj_map, *dma_map;
	u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
	int err, i;

	dmabuf = mock_dmabuf(1);
	if (IS_ERR(dmabuf))
		return PTR_ERR(dmabuf);

	obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
	if (IS_ERR(obj)) {
		pr_err("i915_gem_prime_import failed with err=%d\n",
		       (int)PTR_ERR(obj));
		err = PTR_ERR(obj);
		goto out_dmabuf;
	}

	if (obj->base.dev != &i915->drm) {
		pr_err("i915_gem_prime_import created a non-i915 object!\n");
		err = -EINVAL;
		goto out_obj;
	}

	if (obj->base.size != PAGE_SIZE) {
		pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
		       (long long)obj->base.size, PAGE_SIZE);
		err = -EINVAL;
		goto out_obj;
	}

	dma_map = dma_buf_vmap(dmabuf);
	if (!dma_map) {
		pr_err("dma_buf_vmap failed\n");
		err = -ENOMEM;
		goto out_obj;
	}

	if (0) { /* Can not yet map dmabuf */
		obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
		if (IS_ERR(obj_map)) {
			err = PTR_ERR(obj_map);
			pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
			goto out_dma_map;
		}

		for (i = 0; i < ARRAY_SIZE(pattern); i++) {
			memset(dma_map, pattern[i], PAGE_SIZE);
			if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
				err = -EINVAL;
				pr_err("imported vmap not all set to %x!\n", pattern[i]);
				i915_gem_object_unpin_map(obj);
				goto out_dma_map;
			}
		}

		for (i = 0; i < ARRAY_SIZE(pattern); i++) {
			memset(obj_map, pattern[i], PAGE_SIZE);
			if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
				err = -EINVAL;
				pr_err("exported vmap not all set to %x!\n", pattern[i]);
				i915_gem_object_unpin_map(obj);
				goto out_dma_map;
			}
		}

		i915_gem_object_unpin_map(obj);
	}

	err = 0;
out_dma_map:
	dma_buf_vunmap(dmabuf, dma_map);
out_obj:
	i915_gem_object_put(obj);
out_dmabuf:
	dma_buf_put(dmabuf);
	return err;
}
Exemplo n.º 28
0
static int
userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
				  const struct mmu_notifier_range *range)
{
	struct i915_mmu_notifier *mn =
		container_of(_mn, struct i915_mmu_notifier, mn);
	struct interval_tree_node *it;
	struct mutex *unlock = NULL;
	unsigned long end;
	int ret = 0;

	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
		return 0;

	/* interval ranges are inclusive, but invalidate range is exclusive */
	end = range->end - 1;

	spin_lock(&mn->lock);
	it = interval_tree_iter_first(&mn->objects, range->start, end);
	while (it) {
		struct drm_i915_gem_object *obj;

		if (!mmu_notifier_range_blockable(range)) {
			ret = -EAGAIN;
			break;
		}

		/*
		 * The mmu_object is released late when destroying the
		 * GEM object so it is entirely possible to gain a
		 * reference on an object in the process of being freed
		 * since our serialisation is via the spinlock and not
		 * the struct_mutex - and consequently use it after it
		 * is freed and then double free it. To prevent that
		 * use-after-free we only acquire a reference on the
		 * object if it is not in the process of being destroyed.
		 */
		obj = container_of(it, struct i915_mmu_object, it)->obj;
		if (!kref_get_unless_zero(&obj->base.refcount)) {
			it = interval_tree_iter_next(it, range->start, end);
			continue;
		}
		spin_unlock(&mn->lock);

		if (!unlock) {
			unlock = &mn->mm->i915->drm.struct_mutex;

			switch (mutex_trylock_recursive(unlock)) {
			default:
			case MUTEX_TRYLOCK_FAILED:
				if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
					i915_gem_object_put(obj);
					return -EINTR;
				}
				/* fall through */
			case MUTEX_TRYLOCK_SUCCESS:
				break;

			case MUTEX_TRYLOCK_RECURSIVE:
				unlock = ERR_PTR(-EEXIST);
				break;
			}
		}

		ret = i915_gem_object_unbind(obj);
		if (ret == 0)
			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
		i915_gem_object_put(obj);
		if (ret)
			goto unlock;

		spin_lock(&mn->lock);

		/*
		 * As we do not (yet) protect the mmu from concurrent insertion
		 * over this range, there is no guarantee that this search will
		 * terminate given a pathologic workload.
		 */
		it = interval_tree_iter_first(&mn->objects, range->start, end);
	}
	spin_unlock(&mn->lock);

unlock:
	if (!IS_ERR_OR_NULL(unlock))
		mutex_unlock(unlock);

	return ret;

}
Exemplo n.º 29
0
static int igt_dmabuf_export_kmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		err = PTR_ERR(dmabuf);
		pr_err("i915_gem_prime_export failed with err=%d\n", err);
		return err;
	}

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] not initialiased to zero!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
		goto err;
	}
	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
	i915_gem_object_unpin_map(obj);

	ptr = dma_buf_kmap(dmabuf, 1);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 1, ptr);
		pr_err("Exported page[1] not set to 0xaa!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 1, ptr);

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}
	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] did not retain 0xc5!\n");
		err = -EINVAL;
		goto err;
	}
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = dma_buf_kmap(dmabuf, 2);
	if (ptr) {
		pr_err("Erroneously kmapped beyond the end of the object!\n");
		dma_buf_kunmap(dmabuf, 2, ptr);
		err = -EINVAL;
		goto err;
	}

	ptr = dma_buf_kmap(dmabuf, -1);
	if (ptr) {
		pr_err("Erroneously kmapped before the start of the object!\n");
		dma_buf_kunmap(dmabuf, -1, ptr);
		err = -EINVAL;
		goto err;
	}

	err = 0;
err:
	dma_buf_put(dmabuf);
	return err;
}
Exemplo n.º 30
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
					       resource_size_t stolen_offset,
					       resource_size_t gtt_offset,
					       resource_size_t size)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
			 &stolen_offset, &gtt_offset, &size);

	/* KISS and expect everything to be page-aligned */
	if (WARN_ON(size == 0) ||
	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	mutex_unlock(&dev_priv->mm.stolen_lock);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
	if (obj == NULL) {
		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
		i915_gem_stolen_remove_node(dev_priv, stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_pages;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
				   size, gtt_offset, obj->cache_level,
				   0);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
		goto err_pages;
	}

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

	vma->pages = obj->mm.pages;
	vma->flags |= I915_VMA_GLOBAL_BIND;
	__i915_vma_set_map_and_fenceable(vma);

	mutex_lock(&ggtt->vm.mutex);
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
	obj->bind_count++;
	spin_unlock(&dev_priv->mm.obj_lock);

	return obj;

err_pages:
	i915_gem_object_unpin_pages(obj);
err:
	i915_gem_object_put(obj);
	return NULL;
}