/**
 * i915_switch_context() - perform a GPU context switch.
 * @ring: ring for which we'll execute the context switch
 * @file_priv: file_priv associated with the context, may be NULL
 * @id: context id number
 * @seqno: sequence number by which the new context will be switched to
 * @flags:
 *
 * The context life cycle is simple. The context refcount is incremented and
 * decremented by 1 and create and destroy. If the context is in use by the GPU,
 * it will have a refoucnt > 1. This allows us to destroy the context abstract
 * object while letting the normal object tracking destroy the backing BO.
 */
int i915_switch_context(struct intel_ring_buffer *ring,
			struct drm_file *file,
			int to_id)
{
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
	struct drm_i915_file_private *file_priv = NULL;
	struct i915_hw_context *to;
	struct drm_i915_gem_object *from_obj = ring->last_context_obj;

	if (dev_priv->hw_contexts_disabled)
		return 0;

	if (ring != &dev_priv->ring[RCS])
		return 0;

	if (file)
		file_priv = file->driver_priv;

	if (to_id == DEFAULT_CONTEXT_ID) {
		to = ring->default_context;
	} else {
		to = i915_gem_context_get(file_priv, to_id);
		if (to == NULL)
			return -ENOENT;
	}

	if (from_obj == to->obj)
		return 0;

	return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
}
Beispiel #2
0
static int do_switch(struct i915_hw_context *to)
{
	struct intel_ring_buffer *ring = to->ring;
	struct drm_i915_gem_object *from_obj = ring->last_context_obj;
	u32 hw_flags = 0;
	int ret;

	KASSERT(!(from_obj != NULL && from_obj->pin_count == 0),
	    ("i915_gem_context: invalid \"from\" context"));

	if (from_obj == to->obj)
		return 0;

	ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
	if (ret)
		return ret;

	/* Clear this page out of any CPU caches for coherent swap-in/out. Note
	 * that thanks to write = false in this call and us not setting any gpu
	 * write domains when putting a context object onto the active list
	 * (when switching away from it), this won't block.
	 * XXX: We need a real interface to do this instead of trickery. */
	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	if (!to->obj->has_global_gtt_mapping)
		i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);

	if (!to->is_initialized || is_default_context(to))
		hw_flags |= MI_RESTORE_INHIBIT;
	else if (from_obj == to->obj) /* not yet expected */
		hw_flags |= MI_FORCE_RESTORE;

	ret = mi_set_context(ring, to, hw_flags);
	if (ret) {
		i915_gem_object_unpin(to->obj);
		return ret;
	}

	/* The backing object for the context is done after switching to the
	 * *next* context. Therefore we cannot retire the previous context until
	 * the next context has already started running. In fact, the below code
	 * is a bit suboptimal because the retiring can occur simply after the
	 * MI_SET_CONTEXT instead of when the next seqno has completed.
	 */
	if (from_obj != NULL) {
		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
		i915_gem_object_move_to_active(from_obj, ring,
		    i915_gem_next_request_seqno(ring));
		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
		 * whole damn pipeline, we don't need to explicitly mark the
		 * object dirty. The only exception is that the context must be
		 * correct in case the object gets swapped out. Ideally we'd be
		 * able to defer doing this until we know the object would be
		 * swapped, but there is no way to do that yet.
		 */
		from_obj->dirty = 1;
		KASSERT(from_obj->ring == ring, ("i915_gem_context: from_ring != ring"));
		i915_gem_object_unpin(from_obj);

		drm_gem_object_unreference(&from_obj->base);
	}

	drm_gem_object_reference(&to->obj->base);
	ring->last_context_obj = to->obj;
	to->is_initialized = true;

	return 0;
}