コード例 #1
0
ファイル: intel_fbdev.c プロジェクト: mkahola/drm-intel-mika
static int intelfb_alloc(struct drm_fb_helper *helper,
			 struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct drm_framebuffer *fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_mode_fb_cmd2 mode_cmd = {};
	struct drm_i915_gem_object *obj = NULL;
	int size, ret;

	/* we don't do packed 24bpp */
	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
				    DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	mutex_lock(&dev->struct_mutex);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = PAGE_ALIGN(size);

	/* If the FB is too big, just don't use it since fbdev is not very
	 * important and we should probably use that space with FBC or other
	 * features. */
	if (size * 2 < ggtt->stolen_usable_size)
		obj = i915_gem_object_create_stolen(dev_priv, size);
	if (obj == NULL)
		obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj)) {
		DRM_ERROR("failed to allocate framebuffer\n");
		ret = PTR_ERR(obj);
		goto out;
	}

	fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
	if (IS_ERR(fb)) {
		i915_gem_object_put(obj);
		ret = PTR_ERR(fb);
		goto out;
	}

	mutex_unlock(&dev->struct_mutex);

	ifbdev->fb = to_intel_framebuffer(fb);

	return 0;

out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
コード例 #2
0
int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
	struct render_state so;
	struct drm_i915_gem_object *obj;
	int ret;

	if (WARN_ON(req->engine->id != RCS))
		return -ENOENT;

	so.rodata = render_state_get_rodata(req);
	if (!so.rodata)
		return 0;

	if (so.rodata->batch_items * 4 > 4096)
		return -EINVAL;

	obj = i915_gem_object_create(&req->i915->drm, 4096);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
	if (IS_ERR(so.vma)) {
		ret = PTR_ERR(so.vma);
		goto err_obj;
	}

	ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
	if (ret)
		goto err_obj;

	ret = render_state_setup(&so);
	if (ret)
		goto err_unpin;

	ret = req->engine->emit_bb_start(req, so.vma->node.start,
					 so.rodata->batch_items * 4,
					 I915_DISPATCH_SECURE);
	if (ret)
		goto err_unpin;

	if (so.aux_batch_size > 8) {
		ret = req->engine->emit_bb_start(req,
						 (so.vma->node.start +
						  so.aux_batch_offset),
						 so.aux_batch_size,
						 I915_DISPATCH_SECURE);
		if (ret)
			goto err_unpin;
	}

	i915_vma_move_to_active(so.vma, req, 0);
err_unpin:
	i915_vma_unpin(so.vma);
err_obj:
	i915_gem_object_put(obj);
	return ret;
}
コード例 #3
0
ファイル: i915_gem_batch_pool.c プロジェクト: AK101111/linux
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj = NULL;
	struct drm_i915_gem_object *tmp, *next;
	struct list_head *list;
	int n;

	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (tmp->active)
			break;

		/* While we're looping, do some clean up */
		if (tmp->madv == __I915_MADV_PURGED) {
			list_del(&tmp->batch_pool_link);
			drm_gem_object_unreference(&tmp->base);
			continue;
		}

		if (tmp->base.size >= size) {
			obj = tmp;
			break;
		}
	}

	if (obj == NULL) {
		int ret;

		obj = i915_gem_object_create(pool->dev, size);
		if (IS_ERR(obj))
			return obj;

		ret = i915_gem_object_get_pages(obj);
		if (ret)
			return ERR_PTR(ret);

		obj->madv = I915_MADV_DONTNEED;
	}

	list_move_tail(&obj->batch_pool_link, list);
	i915_gem_object_pin_pages(obj);
	return obj;
}
コード例 #4
0
ファイル: i915_gem_dmabuf.c プロジェクト: wallento/linux
static int igt_dmabuf_export_vmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		err = PTR_ERR(dmabuf);
		goto err_obj;
	}
	i915_gem_object_put(obj);

	ptr = dma_buf_vmap(dmabuf);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("dma_buf_vmap failed with err=%d\n", err);
		goto out;
	}

	if (memchr_inv(ptr, 0, dmabuf->size)) {
		pr_err("Exported object not initialiased to zero!\n");
		err = -EINVAL;
		goto out;
	}

	memset(ptr, 0xc5, dmabuf->size);

	err = 0;
	dma_buf_vunmap(dmabuf, ptr);
out:
	dma_buf_put(dmabuf);
	return err;

err_obj:
	i915_gem_object_put(obj);
	return err;
}
コード例 #5
0
ファイル: i915_gem_dmabuf.c プロジェクト: wallento/linux
static int igt_dmabuf_import_self(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct drm_gem_object *import;
	struct dma_buf *dmabuf;
	int err;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		err = PTR_ERR(dmabuf);
		goto out;
	}

	import = i915_gem_prime_import(&i915->drm, dmabuf);
	if (IS_ERR(import)) {
		pr_err("i915_gem_prime_import failed with err=%d\n",
		       (int)PTR_ERR(import));
		err = PTR_ERR(import);
		goto out_dmabuf;
	}

	if (import != &obj->base) {
		pr_err("i915_gem_prime_import created a new object!\n");
		err = -EINVAL;
		goto out_import;
	}

	err = 0;
out_import:
	i915_gem_object_put(to_intel_bo(import));
out_dmabuf:
	dma_buf_put(dmabuf);
out:
	i915_gem_object_put(obj);
	return err;
}
コード例 #6
0
ファイル: i915_gem_dmabuf.c プロジェクト: wallento/linux
static int igt_dmabuf_export(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;

	obj = i915_gem_object_create(i915, PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		pr_err("i915_gem_prime_export failed with err=%d\n",
		       (int)PTR_ERR(dmabuf));
		return PTR_ERR(dmabuf);
	}

	dma_buf_put(dmabuf);
	return 0;
}
コード例 #7
0
ファイル: i915_gem_context.c プロジェクト: asmalldev/linux
static struct drm_i915_gem_object *
alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
{
	struct drm_i915_gem_object *obj;
	int ret;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	obj = i915_gem_object_create(dev_priv, size);
	if (IS_ERR(obj))
		return obj;

	/*
	 * Try to make the context utilize L3 as well as LLC.
	 *
	 * On VLV we don't have L3 controls in the PTEs so we
	 * shouldn't touch the cache level, especially as that
	 * would make the object snooped which might have a
	 * negative performance impact.
	 *
	 * Snooping is required on non-llc platforms in execlist
	 * mode, but since all GGTT accesses use PAT entry 0 we
	 * get snooping anyway regardless of cache_level.
	 *
	 * This is only applicable for Ivy Bridge devices since
	 * later platforms don't have L3 control bits in the PTE.
	 */
	if (IS_IVYBRIDGE(dev_priv)) {
		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
		/* Failure shouldn't ever happen this early */
		if (WARN_ON(ret)) {
			i915_gem_object_put(obj);
			return ERR_PTR(ret);
		}
	}

	return obj;
}
コード例 #8
0
ファイル: i915_gem_dmabuf.c プロジェクト: wallento/linux
static int igt_dmabuf_export_kmap(void *arg)
{
	struct drm_i915_private *i915 = arg;
	struct drm_i915_gem_object *obj;
	struct dma_buf *dmabuf;
	void *ptr;
	int err;

	obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
	if (IS_ERR(obj))
		return PTR_ERR(obj);

	dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
	i915_gem_object_put(obj);
	if (IS_ERR(dmabuf)) {
		err = PTR_ERR(dmabuf);
		pr_err("i915_gem_prime_export failed with err=%d\n", err);
		return err;
	}

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] not initialiased to zero!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
	if (IS_ERR(ptr)) {
		err = PTR_ERR(ptr);
		pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
		goto err;
	}
	memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
	i915_gem_object_unpin_map(obj);

	ptr = dma_buf_kmap(dmabuf, 1);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}

	if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 1, ptr);
		pr_err("Exported page[1] not set to 0xaa!\n");
		err = -EINVAL;
		goto err;
	}

	memset(ptr, 0xc5, PAGE_SIZE);
	dma_buf_kunmap(dmabuf, 1, ptr);

	ptr = dma_buf_kmap(dmabuf, 0);
	if (!ptr) {
		pr_err("dma_buf_kmap failed\n");
		err = -ENOMEM;
		goto err;
	}
	if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
		dma_buf_kunmap(dmabuf, 0, ptr);
		pr_err("Exported page[0] did not retain 0xc5!\n");
		err = -EINVAL;
		goto err;
	}
	dma_buf_kunmap(dmabuf, 0, ptr);

	ptr = dma_buf_kmap(dmabuf, 2);
	if (ptr) {
		pr_err("Erroneously kmapped beyond the end of the object!\n");
		dma_buf_kunmap(dmabuf, 2, ptr);
		err = -EINVAL;
		goto err;
	}

	ptr = dma_buf_kmap(dmabuf, -1);
	if (ptr) {
		pr_err("Erroneously kmapped before the start of the object!\n");
		dma_buf_kunmap(dmabuf, -1, ptr);
		err = -EINVAL;
		goto err;
	}

	err = 0;
err:
	dma_buf_put(dmabuf);
	return err;
}