Пример #1
0
static int intel_fbdev_set_par(struct fb_info *info)
{
    struct drm_fb_helper *fb_helper = info->par;
    struct intel_fbdev *ifbdev =
        container_of(fb_helper, struct intel_fbdev, helper);
    int ret;

    ret = drm_fb_helper_set_par(info);

    if (ret == 0) {
        /*
         * FIXME: fbdev presumes that all callbacks also work from
         * atomic contexts and relies on that for emergency oops
         * printing. KMS totally doesn't do that and the locking here is
         * by far not the only place this goes wrong.  Ignore this for
         * now until we solve this for real.
         */
        mutex_lock(&fb_helper->dev->struct_mutex);
        ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
                                                true);
        mutex_unlock(&fb_helper->dev->struct_mutex);
    }

    return ret;
}
Пример #2
0
static int gtt_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
{
	struct i915_vma *vma;
	u32 __iomem *map;
	int err;

	err = i915_gem_object_set_to_gtt_domain(obj, true);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	map = i915_vma_pin_iomap(vma);
	i915_vma_unpin(vma);
	if (IS_ERR(map))
		return PTR_ERR(map);

	iowrite32(v, &map[offset / sizeof(*map)]);
	i915_vma_unpin_iomap(vma);

	return 0;
}
Пример #3
0
static int gpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct drm_i915_gem_request *rq;
	struct i915_vma *vma;
	u32 *cs;
	int err;

	err = i915_gem_object_set_to_gtt_domain(obj, true);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
	if (IS_ERR(rq)) {
		i915_vma_unpin(vma);
		return PTR_ERR(rq);
	}

	cs = intel_ring_begin(rq, 4);
	if (IS_ERR(cs)) {
		__i915_add_request(rq, false);
		i915_vma_unpin(vma);
		return PTR_ERR(cs);
	}

	if (INTEL_GEN(i915) >= 8) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = v;
	} else if (INTEL_GEN(i915) >= 4) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = 0;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
	} else {
		*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
		*cs++ = i915_ggtt_offset(vma) + offset;
		*cs++ = v;
		*cs++ = MI_NOOP;
	}
	intel_ring_advance(rq, cs);

	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
	i915_vma_unpin(vma);

	reservation_object_lock(obj->resv, NULL);
	reservation_object_add_excl_fence(obj->resv, &rq->fence);
	reservation_object_unlock(obj->resv);

	__i915_add_request(rq, true);

	return 0;
}
Пример #4
0
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
    struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
    struct drm_device *dev = obj->base.dev;
    int ret;

    ret = i915_mutex_lock_interruptible(dev);
    if (ret)
        return ret;

    ret = i915_gem_object_set_to_gtt_domain(obj, false);
    mutex_unlock(&dev->struct_mutex);

    return ret;
}
Пример #5
0
static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	bool was_interruptible;
	int ret;

	mutex_lock(&dev->struct_mutex);
	was_interruptible = dev_priv->mm.interruptible;
	dev_priv->mm.interruptible = false;

	ret = i915_gem_object_set_to_gtt_domain(obj, false);

	dev_priv->mm.interruptible = was_interruptible;
	mutex_unlock(&dev->struct_mutex);

	if (unlikely(ret))
		DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
}
Пример #6
0
static int render_state_setup(struct render_state *so)
{
	const struct intel_renderstate_rodata *rodata = so->rodata;
	unsigned int i = 0, reloc_index = 0;
	struct page *page;
	u32 *d;
	int ret;

	ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
	if (ret)
		return ret;

	page = i915_gem_object_get_dirty_page(so->obj, 0);
	d = kmap(page);

	while (i < rodata->batch_items) {
		u32 s = rodata->batch[i];

		if (i * 4  == rodata->reloc[reloc_index]) {
			u64 r = s + so->ggtt_offset;
			s = lower_32_bits(r);
			if (so->gen >= 8) {
				if (i + 1 >= rodata->batch_items ||
				    rodata->batch[i + 1] != 0) {
					ret = -EINVAL;
					goto err_out;
				}

				d[i++] = s;
				s = upper_32_bits(r);
			}

			reloc_index++;
		}

		d[i++] = s;
	}

	while (i % CACHELINE_DWORDS)
		OUT_BATCH(d, i, MI_NOOP);

	so->aux_batch_offset = i * sizeof(u32);

	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;

	/*
	 * Since we are sending length, we need to strictly conform to
	 * all requirements. For Gen2 this must be a multiple of 8.
	 */
	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);

	kunmap(page);

	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
	if (ret)
		return ret;

	if (rodata->reloc[reloc_index] != -1) {
		DRM_ERROR("only %d relocs resolved\n", reloc_index);
		return -EINVAL;
	}

	return 0;

err_out:
	kunmap(page);
	return ret;
}
				    rodata->batch[i + 1] != 0)
					return -EINVAL;

				d[i] = s;
				i++;
				s = (goffset & 0xffffffff00000000ull) >> 32;
			}

			reloc_index++;
		}

		d[i] = s;
		i++;
	}

	ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
	if (ret)
		return ret;

	if (rodata->reloc_items != reloc_index) {
		DRM_ERROR("not all relocs resolved, %d out of %d\n",
			  reloc_index, rodata->reloc_items);
		return -EINVAL;
	}

	so->len = rodata->batch_items * 4;

	return 0;
}

int i915_gem_render_state_init(struct intel_ring_buffer *ring)
static int do_switch(struct drm_i915_gem_object *from_obj,
		     struct i915_hw_context *to,
		     u32 seqno)
{
	struct intel_ring_buffer *ring = NULL;
	u32 hw_flags = 0;
	int ret;

	BUG_ON(to == NULL);
	BUG_ON(from_obj != NULL && from_obj->pin_count == 0);

	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
	if (ret)
		return ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 * XXX: We need a real interface to do this instead of trickery. */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;
	else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
		hw_flags |= MI_FORCE_RESTORE;

	ring = to->ring;
	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from_obj != NULL) {
		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_gem_object_move_to_active(from_obj, ring, seqno);
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from_obj->dirty = 1;
		BUG_ON(from_obj->ring != to->ring);
		i915_gem_object_unpin(from_obj);

		drm_gem_object_unreference(&from_obj->base);
	}

	drm_gem_object_reference(&to->obj->base);
	ring->last_context_obj = to->obj;
	to->is_initialized = true;

	return 0;
}
static int do_switch(struct i915_hw_context *to)
{
	struct intel_ring_buffer *ring = to->ring;
	struct i915_hw_context *from = ring->last_context;
	u32 hw_flags = 0;
	int ret, i;

	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);

	if (from == to && !to->remap_slice)
		return 0;

	ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
	if (ret)
		return ret;

	/*
	 * Pin can switch back to the default context if we end up calling into
	 * evict_everything - as a last ditch gtt defrag effort that also
	 * switches to the default context. Hence we need to reload from here.
	 */
	from = ring->last_context;

	/*
	 * Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 *
	 * XXX: We need a real interface to do this instead of trickery.
	 */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;

	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	for (i = 0; i < MAX_L3_SLICES; i++) {
		if (!(to->remap_slice & (1<<i)))
			continue;

		ret = i915_gem_l3_remap(ring, i);
		/* If it failed, try again next round */
		if (ret)
			DRM_DEBUG_DRIVER("L3 remapping failed\n");
		else
			to->remap_slice &= ~(1<<i);
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from != NULL) {
		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from->obj->dirty = 1;
		BUG_ON(from->obj->ring != ring);

		/* obj is kept alive until the next request by its active ref */
		i915_gem_object_unpin(from->obj);
		i915_gem_context_unreference(from);
	}

	i915_gem_context_reference(to);
	ring->last_context = to;
	to->is_initialized = true;

	return 0;
}
Пример #10
0
static int render_state_setup(struct render_state *so)
{
	struct drm_device *dev = so->vma->vm->dev;
	const struct intel_renderstate_rodata *rodata = so->rodata;
	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
	unsigned int i = 0, reloc_index = 0;
	struct page *page;
	u32 *d;
	int ret;

	ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
	if (ret)
		return ret;

	page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
	d = kmap(page);

	while (i < rodata->batch_items) {
		u32 s = rodata->batch[i];

		if (i * 4  == rodata->reloc[reloc_index]) {
			u64 r = s + so->vma->node.start;
			s = lower_32_bits(r);
			if (has_64bit_reloc) {
				if (i + 1 >= rodata->batch_items ||
				    rodata->batch[i + 1] != 0) {
					ret = -EINVAL;
					goto err_out;
				}

				d[i++] = s;
				s = upper_32_bits(r);
			}

			reloc_index++;
		}

		d[i++] = s;
	}

	while (i % CACHELINE_DWORDS)
		OUT_BATCH(d, i, MI_NOOP);

	so->aux_batch_offset = i * sizeof(u32);

	if (HAS_POOLED_EU(dev)) {
		/*
		 * We always program 3x6 pool config but depending upon which
		 * subslice is disabled HW drops down to appropriate config
		 * shown below.
		 *
		 * In the below table 2x6 config always refers to
		 * fused-down version, native 2x6 is not available and can
		 * be ignored
		 *
		 * SNo  subslices config                eu pool configuration
		 * -----------------------------------------------------------
		 * 1    3 subslices enabled (3x6)  -    0x00777000  (9+9)
		 * 2    ss0 disabled (2x6)         -    0x00777000  (3+9)
		 * 3    ss1 disabled (2x6)         -    0x00770000  (6+6)
		 * 4    ss2 disabled (2x6)         -    0x00007000  (9+3)
		 */
		u32 eu_pool_config = 0x00777000;

		OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
		OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
		OUT_BATCH(d, i, eu_pool_config);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
	}

	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;

	/*
	 * Since we are sending length, we need to strictly conform to
	 * all requirements. For Gen2 this must be a multiple of 8.
	 */
	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);

	kunmap(page);

	ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
	if (ret)
		return ret;

	if (rodata->reloc[reloc_index] != -1) {
		DRM_ERROR("only %d relocs resolved\n", reloc_index);
		return -EINVAL;
	}

	return 0;

err_out:
	kunmap(page);
	return ret;
}
Пример #11
0
static int render_state_setup(struct intel_render_state *so,
			      struct drm_i915_private *i915)
{
	const struct intel_renderstate_rodata *rodata = so->rodata;
	struct drm_i915_gem_object *obj = so->vma->obj;
	unsigned int i = 0, reloc_index = 0;
	unsigned int needs_clflush;
	u32 *d;
	int ret;

	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	if (ret)
		return ret;

	d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));

	while (i < rodata->batch_items) {
		u32 s = rodata->batch[i];

		if (i * 4  == rodata->reloc[reloc_index]) {
			u64 r = s + so->vma->node.start;
			s = lower_32_bits(r);
			if (HAS_64BIT_RELOC(i915)) {
				if (i + 1 >= rodata->batch_items ||
				    rodata->batch[i + 1] != 0)
					goto err;

				d[i++] = s;
				s = upper_32_bits(r);
			}

			reloc_index++;
		}

		d[i++] = s;
	}

	if (rodata->reloc[reloc_index] != -1) {
		DRM_ERROR("only %d relocs resolved\n", reloc_index);
		goto err;
	}

	so->batch_offset = so->vma->node.start;
	so->batch_size = rodata->batch_items * sizeof(u32);

	while (i % CACHELINE_DWORDS)
		OUT_BATCH(d, i, MI_NOOP);

	so->aux_offset = i * sizeof(u32);

	if (HAS_POOLED_EU(i915)) {
		/*
		 * We always program 3x6 pool config but depending upon which
		 * subslice is disabled HW drops down to appropriate config
		 * shown below.
		 *
		 * In the below table 2x6 config always refers to
		 * fused-down version, native 2x6 is not available and can
		 * be ignored
		 *
		 * SNo  subslices config                eu pool configuration
		 * -----------------------------------------------------------
		 * 1    3 subslices enabled (3x6)  -    0x00777000  (9+9)
		 * 2    ss0 disabled (2x6)         -    0x00777000  (3+9)
		 * 3    ss1 disabled (2x6)         -    0x00770000  (6+6)
		 * 4    ss2 disabled (2x6)         -    0x00007000  (9+3)
		 */
		u32 eu_pool_config = 0x00777000;

		OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
		OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
		OUT_BATCH(d, i, eu_pool_config);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
		OUT_BATCH(d, i, 0);
	}

	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
	so->aux_size = i * sizeof(u32) - so->aux_offset;
	so->aux_offset += so->batch_offset;
	/*
	 * Since we are sending length, we need to strictly conform to
	 * all requirements. For Gen2 this must be a multiple of 8.
	 */
	so->aux_size = ALIGN(so->aux_size, 8);

	if (needs_clflush)
		drm_clflush_virt_range(d, i * sizeof(u32));
	kunmap_atomic(d);

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
out:
	i915_gem_obj_finish_shmem_access(obj);
	return ret;

err:
	kunmap_atomic(d);
	ret = -EINVAL;
	goto out;
}