Beispiel #1
0
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);

	GEM_BUG_ON(!stolen);

	__i915_gem_object_unpin_pages(obj);

	i915_gem_stolen_remove_node(dev_priv, stolen);
	kfree(stolen);
}
Beispiel #2
0
static int guc_action_control_log(struct intel_guc *guc, bool enable,
				  bool default_logging, u32 verbosity)
{
	u32 action[] = {
		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
	};

	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
Beispiel #3
0
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
{
	unsigned int mode;
	void *vaddr;
	int err;

	GEM_BUG_ON(INTEL_GEN(i915) < 8);

	memset(spin, 0, sizeof(*spin));
	spin->i915 = i915;

	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->hws)) {
		err = PTR_ERR(spin->hws);
		goto err;
	}

	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
	if (IS_ERR(spin->obj)) {
		err = PTR_ERR(spin->obj);
		goto err_hws;
	}

	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_obj;
	}
	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);

	mode = i915_coherent_map_type(i915);
	vaddr = i915_gem_object_pin_map(spin->obj, mode);
	if (IS_ERR(vaddr)) {
		err = PTR_ERR(vaddr);
		goto err_unpin_hws;
	}
	spin->batch = vaddr;

	return 0;

err_unpin_hws:
	i915_gem_object_unpin_map(spin->hws);
err_obj:
	i915_gem_object_put(spin->obj);
err_hws:
	i915_gem_object_put(spin->hws);
err:
	return err;
}
Beispiel #4
0
static void
i915_vma_retire(struct i915_gem_active *active,
		struct drm_i915_gem_request *rq)
{
	const unsigned int idx = rq->engine->id;
	struct i915_vma *vma =
		container_of(active, struct i915_vma, last_read[idx]);
	struct drm_i915_gem_object *obj = vma->obj;

	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));

	i915_vma_clear_active(vma, idx);
	if (i915_vma_is_active(vma))
		return;

	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
		WARN_ON(i915_vma_unbind(vma));

	GEM_BUG_ON(!i915_gem_object_is_active(obj));
	if (--obj->active_count)
		return;

	/* Bump our place on the bound list to keep it roughly in LRU order
	 * so that we don't steal from recently used but inactive objects
	 * (unless we are forced to ofc!)
	 */
	if (obj->bind_count)
		list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);

	obj->mm.dirty = true; /* be paranoid  */

	if (i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_clear_active_reference(obj);
		i915_gem_object_put(obj);
	}
}
Beispiel #5
0
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
	   struct i915_address_space *vm,
	   const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;
	struct rb_node *rb, **p;

	/* The aliasing_ppgtt should never be used directly! */
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);

	vma = i915_vma_alloc();
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);

	i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
	INIT_ACTIVE_REQUEST(&vma->last_fence);

	vma->vm = vm;
	vma->ops = &vm->vma_ops;
	vma->obj = obj;
	vma->resv = obj->resv;
	vma->size = obj->base.size;
	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;

	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
		vma->ggtt_view = *view;
		if (view->type == I915_GGTT_VIEW_PARTIAL) {
			GEM_BUG_ON(range_overflows_t(u64,
						     view->partial.offset,
						     view->partial.size,
						     obj->base.size >> PAGE_SHIFT));
			vma->size = view->partial.size;
			vma->size <<= PAGE_SHIFT;
			GEM_BUG_ON(vma->size > obj->base.size);
		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
Beispiel #6
0
static struct i915_vma *
vma_create(struct drm_i915_gem_object *obj,
	   struct i915_address_space *vm,
	   const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;
	struct rb_node *rb, **p;
	int i;

	/* The aliasing_ppgtt should never be used directly! */
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);

	vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);

	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
		init_request_active(&vma->last_read[i], i915_vma_retire);
	init_request_active(&vma->last_fence, NULL);
	vma->vm = vm;
	vma->obj = obj;
	vma->resv = obj->resv;
	vma->size = obj->base.size;
	vma->display_alignment = I915_GTT_MIN_ALIGNMENT;

	if (view && view->type != I915_GGTT_VIEW_NORMAL) {
		vma->ggtt_view = *view;
		if (view->type == I915_GGTT_VIEW_PARTIAL) {
			GEM_BUG_ON(range_overflows_t(u64,
						     view->partial.offset,
						     view->partial.size,
						     obj->base.size >> PAGE_SHIFT));
			vma->size = view->partial.size;
			vma->size <<= PAGE_SHIFT;
			GEM_BUG_ON(vma->size > obj->base.size);
		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
Beispiel #7
0
void intel_uc_fini_hw(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;

	if (!intel_guc_is_loaded(guc))
		return;

	GEM_BUG_ON(!HAS_GUC(i915));

	if (USES_GUC_SUBMISSION(i915))
		intel_guc_submission_disable(guc);

	guc_disable_communication(guc);
	__uc_sanitize(i915);
}
Beispiel #8
0
int intel_guc_log_create(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct i915_vma *vma;
	u32 guc_log_size;
	int ret;

	GEM_BUG_ON(log->vma);

	/*
	 *  GuC Log buffer Layout
	 *
	 *  +===============================+ 00B
	 *  |    Crash dump state header    |
	 *  +-------------------------------+ 32B
	 *  |       DPC state header        |
	 *  +-------------------------------+ 64B
	 *  |       ISR state header        |
	 *  +-------------------------------+ 96B
	 *  |                               |
	 *  +===============================+ PAGE_SIZE (4KB)
	 *  |        Crash Dump logs        |
	 *  +===============================+ + CRASH_SIZE
	 *  |           DPC logs            |
	 *  +===============================+ + DPC_SIZE
	 *  |           ISR logs            |
	 *  +===============================+ + ISR_SIZE
	 */
	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
			ISR_BUFFER_SIZE;

	vma = intel_guc_allocate_vma(guc, guc_log_size);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err;
	}

	log->vma = vma;

	log->level = i915_modparams.guc_log_level;

	return 0;

err:
	DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
	return ret;
}
Beispiel #9
0
void intel_uc_fini(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;

	if (!USES_GUC(i915))
		return;

	GEM_BUG_ON(!HAS_GUC(i915));

	if (USES_GUC_SUBMISSION(i915))
		intel_guc_submission_fini(guc);

	if (USES_HUC(i915))
		intel_huc_fini(&i915->huc);

	intel_guc_fini(guc);
}
Beispiel #10
0
int intel_uc_init(struct drm_i915_private *i915)
{
	struct intel_guc *guc = &i915->guc;
	struct intel_huc *huc = &i915->huc;
	int ret;

	if (!USES_GUC(i915))
		return 0;

	if (!HAS_GUC(i915))
		return -ENODEV;

	/* XXX: GuC submission is unavailable for now */
	GEM_BUG_ON(USES_GUC_SUBMISSION(i915));

	ret = intel_guc_init(guc);
	if (ret)
		return ret;

	if (USES_HUC(i915)) {
		ret = intel_huc_init(huc);
		if (ret)
			goto err_guc;
	}

	if (USES_GUC_SUBMISSION(i915)) {
		/*
		 * This is stuff we need to have available at fw load time
		 * if we are planning to enable submission later
		 */
		ret = intel_guc_submission_init(guc);
		if (ret)
			goto err_huc;
	}

	return 0;

err_huc:
	if (USES_HUC(i915))
		intel_huc_fini(huc);
err_guc:
	intel_guc_fini(guc);
	return ret;
}
Beispiel #11
0
/**
 * huc_fw_xfer() - DMA's the firmware
 * @huc_fw: the firmware descriptor
 * @vma: the firmware image (bound into the GGTT)
 *
 * Transfer the firmware image to RAM for execution by the microcontroller.
 *
 * Return: 0 on success, non-zero on failure
 */
static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
	struct drm_i915_private *dev_priv = huc_to_i915(huc);
	unsigned long offset = 0;
	u32 size;
	int ret;

	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Set the source address for the uCode */
	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
		 huc_fw->header_offset;
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);

	/* Hardware doesn't look at destination address for HuC. Set it to 0,
	 * but still program the correct address space.
	 */
	I915_WRITE(DMA_ADDR_1_LOW, 0);
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	size = huc_fw->header_size + huc_fw->ucode_size;
	I915_WRITE(DMA_COPY_SIZE, size);

	/* Start the DMA */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));

	/* Wait for DMA to finish */
	ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);

	DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);

	/* Disable the bits once DMA is over */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Beispiel #12
0
int i915_vma_unbind(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
	unsigned long active;
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	/* First wait upon any activity as retiring the request may
	 * have side-effects such as unpinning or even unbinding this vma.
	 */
	active = i915_vma_get_active(vma);
	if (active) {
		int idx;

		/* When a closed VMA is retired, it is unbound - eek.
		 * In order to prevent it from being recursively closed,
		 * take a pin on the vma so that the second unbind is
		 * aborted.
		 *
		 * Even more scary is that the retire callback may free
		 * the object (last active vma). To prevent the explosion
		 * we defer the actual object free to a worker that can
		 * only proceed once it acquires the struct_mutex (which
		 * we currently hold, therefore it cannot free this object
		 * before we are finished).
		 */
		__i915_vma_pin(vma);

		for_each_active(active, idx) {
			ret = i915_gem_active_retire(&vma->last_read[idx],
						   &vma->vm->dev->struct_mutex);
			if (ret)
				break;
		}

		__i915_vma_unpin(vma);
		if (ret)
			return ret;

		GEM_BUG_ON(i915_vma_is_active(vma));
	}
Beispiel #13
0
static int igt_wedged_reset(void *arg)
{
	struct drm_i915_private *i915 = arg;
	intel_wakeref_t wakeref;

	/* Check that we can recover a wedged device with a GPU reset */

	igt_global_reset_lock(i915);
	wakeref = intel_runtime_pm_get(i915);

	i915_gem_set_wedged(i915);

	GEM_BUG_ON(!i915_reset_failed(i915));
	i915_reset(i915, ALL_ENGINES, NULL);

	intel_runtime_pm_put(i915, wakeref);
	igt_global_reset_unlock(i915);

	return i915_reset_failed(i915) ? -EIO : 0;
}
Beispiel #14
0
/**
 * intel_guc_ads_create() - creates GuC ADS
 * @guc: intel_guc struct
 *
 */
int intel_guc_ads_create(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct i915_vma *vma, *kernel_ctx_vma;
	struct page *page;
	/* The ads obj includes the struct itself and buffers passed to GuC */
	struct {
		struct guc_ads ads;
		struct guc_policies policies;
		struct guc_mmio_reg_state reg_state;
		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
	} __packed *blob;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
	u32 base;

	GEM_BUG_ON(guc->ads_vma);

	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	guc->ads_vma = vma;

	page = i915_vma_first_page(vma);
	blob = kmap(page);

	/* GuC scheduling policies */
	guc_policies_init(&blob->policies);

	/* MMIO reg state */
	for_each_engine(engine, dev_priv, id) {
		blob->reg_state.white_list[engine->guc_id].mmio_start =
			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;

		/* Nothing to be saved or restored for now. */
		blob->reg_state.white_list[engine->guc_id].count = 0;
	}
Beispiel #15
0
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	intel_wakeref_t wakeref;
	int ret = 0;

	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
	GEM_BUG_ON(!log->vma);

	/*
	 * GuC is recognizing log levels starting from 0 to max, we're using 0
	 * as indication that logging should be disabled.
	 */
	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
		return -EINVAL;

	mutex_lock(&dev_priv->drm.struct_mutex);

	if (log->level == level)
		goto out_unlock;

	with_intel_runtime_pm(dev_priv, wakeref)
		ret = guc_action_control_log(guc,
					     GUC_LOG_LEVEL_IS_VERBOSE(level),
					     GUC_LOG_LEVEL_IS_ENABLED(level),
					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
	if (ret) {
		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
		goto out_unlock;
	}

	log->level = level;

out_unlock:
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return ret;
}
Beispiel #16
0
void
intel_context_init(struct intel_context *ce,
		   struct i915_gem_context *ctx,
		   struct intel_engine_cs *engine)
{
	GEM_BUG_ON(!engine->cops);

	kref_init(&ce->ref);

	ce->gem_context = ctx;
	ce->engine = engine;
	ce->ops = engine->cops;
	ce->sseu = engine->sseu;
	ce->saturated = 0;

	INIT_LIST_HEAD(&ce->signal_link);
	INIT_LIST_HEAD(&ce->signals);

	mutex_init(&ce->pin_mutex);

	i915_active_request_init(&ce->active_tracker,
				 NULL, intel_context_retire);
}
Beispiel #17
0
static int guc_log_relay_create(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct rchan *guc_log_relay_chan;
	size_t n_subbufs, subbuf_size;
	int ret;

	lockdep_assert_held(&log->relay.lock);

	 /* Keep the size of sub buffers same as shared log buffer */
	subbuf_size = log->vma->size;

	/*
	 * Store up to 8 snapshots, which is large enough to buffer sufficient
	 * boot time logs and provides enough leeway to User, in terms of
	 * latency, for consuming the logs from relay. Also doesn't take
	 * up too much memory.
	 */
	n_subbufs = 8;

	guc_log_relay_chan = relay_open("guc_log",
					dev_priv->drm.primary->debugfs_root,
					subbuf_size, n_subbufs,
					&relay_callbacks, dev_priv);
	if (!guc_log_relay_chan) {
		DRM_ERROR("Couldn't create relay chan for GuC logging\n");

		ret = -ENOMEM;
		return ret;
	}

	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
	log->relay.channel = guc_log_relay_chan;

	return 0;
}
Beispiel #18
0
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *i915 = guc_to_i915(guc);

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	if (!HAS_GUC(i915))
		return;

	if (i915_modparams.guc_firmware_path) {
		guc_fw->path = i915_modparams.guc_firmware_path;
		guc_fw->major_ver_wanted = 0;
		guc_fw->minor_ver_wanted = 0;
	} else if (IS_ICELAKE(i915)) {
		guc_fw->path = ICL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
	} else if (IS_GEMINILAKE(i915)) {
		guc_fw->path = GLK_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
	} else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		guc_fw->path = KBL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
	} else if (IS_BROXTON(i915)) {
		guc_fw->path = BXT_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
	} else if (IS_SKYLAKE(i915)) {
		guc_fw->path = SKL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
	}
}
Beispiel #19
0
/*
 * Create as many clients as number of doorbells. Note that there's already
 * client(s)/doorbell(s) created during driver load, but this test creates
 * its own and do not interact with the existing ones.
 */
static int igt_guc_doorbells(void *arg)
{
	struct drm_i915_private *dev_priv = arg;
	struct intel_guc *guc;
	int i, err = 0;
	u16 db_id;

	GEM_BUG_ON(!HAS_GUC(dev_priv));
	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_runtime_pm_get(dev_priv);

	guc = &dev_priv->guc;
	if (!guc) {
		pr_err("No guc object!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = check_all_doorbells(guc);
	if (err)
		goto unlock;

	for (i = 0; i < ATTEMPTS; i++) {
		clients[i] = guc_client_alloc(dev_priv,
					      INTEL_INFO(dev_priv)->ring_mask,
					      i % GUC_CLIENT_PRIORITY_NUM,
					      dev_priv->kernel_context);

		if (!clients[i]) {
			pr_err("[%d] No guc client\n", i);
			err = -EINVAL;
			goto out;
		}

		if (IS_ERR(clients[i])) {
			if (PTR_ERR(clients[i]) != -ENOSPC) {
				pr_err("[%d] unexpected error\n", i);
				err = PTR_ERR(clients[i]);
				goto out;
			}

			if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
				pr_err("[%d] non-db related alloc fail\n", i);
				err = -EINVAL;
				goto out;
			}

			/* expected, ran out of dbs for this client type */
			continue;
		}

		/*
		 * The check below is only valid because we keep a doorbell
		 * assigned during the whole life of the client.
		 */
		if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
			pr_err("[%d] more clients than doorbells (%d >= %d)\n",
			       i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
			err = -EINVAL;
			goto out;
		}

		err = validate_client(clients[i],
				      i % GUC_CLIENT_PRIORITY_NUM, false);
		if (err) {
			pr_err("[%d] client_alloc sanity check failed!\n", i);
			err = -EINVAL;
			goto out;
		}

		db_id = clients[i]->doorbell_id;

		err = __guc_client_enable(clients[i]);
		if (err) {
			pr_err("[%d] Failed to create a doorbell\n", i);
			goto out;
		}

		/* doorbell id shouldn't change, we are holding the mutex */
		if (db_id != clients[i]->doorbell_id) {
			pr_err("[%d] doorbell id changed (%d != %d)\n",
			       i, db_id, clients[i]->doorbell_id);
			err = -EINVAL;
			goto out;
		}

		err = check_all_doorbells(guc);
		if (err)
			goto out;

		err = ring_doorbell_nop(clients[i]);
		if (err)
			goto out;
	}

out:
	for (i = 0; i < ATTEMPTS; i++)
		if (!IS_ERR_OR_NULL(clients[i])) {
			__guc_client_disable(clients[i]);
			guc_client_free(clients[i]);
		}
unlock:
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	return err;
}
Beispiel #20
0
/*
 * Check that we're able to synchronize guc_clients with their doorbells
 *
 * We're creating clients and reserving doorbells once, at module load. During
 * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
 * GuC being reset. In other words - GuC clients are still around, but the
 * status of their doorbells may be incorrect. This is the reason behind
 * validating that the doorbells status expected by the driver matches what the
 * GuC/HW have.
 */
static int igt_guc_clients(void *args)
{
	struct drm_i915_private *dev_priv = args;
	struct intel_guc *guc;
	int err = 0;

	GEM_BUG_ON(!HAS_GUC(dev_priv));
	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_runtime_pm_get(dev_priv);

	guc = &dev_priv->guc;
	if (!guc) {
		pr_err("No guc object!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = check_all_doorbells(guc);
	if (err)
		goto unlock;

	/*
	 * Get rid of clients created during driver load because the test will
	 * recreate them.
	 */
	guc_clients_disable(guc);
	guc_clients_destroy(guc);
	if (guc->execbuf_client || guc->preempt_client) {
		pr_err("guc_clients_destroy lied!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = guc_clients_create(guc);
	if (err) {
		pr_err("Failed to create clients\n");
		goto unlock;
	}
	GEM_BUG_ON(!guc->execbuf_client);

	err = validate_client(guc->execbuf_client,
			      GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
	if (err) {
		pr_err("execbug client validation failed\n");
		goto out;
	}

	if (guc->preempt_client) {
		err = validate_client(guc->preempt_client,
				      GUC_CLIENT_PRIORITY_KMD_HIGH, true);
		if (err) {
			pr_err("preempt client validation failed\n");
			goto out;
		}
	}

	/* each client should now have reserved a doorbell */
	if (!has_doorbell(guc->execbuf_client) ||
	    (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
		pr_err("guc_clients_create didn't reserve doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/* Now enable the clients */
	guc_clients_enable(guc);

	/* each client should now have received a doorbell */
	if (!client_doorbell_in_sync(guc->execbuf_client) ||
	    !client_doorbell_in_sync(guc->preempt_client)) {
		pr_err("failed to initialize the doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/*
	 * Basic test - an attempt to reallocate a valid doorbell to the
	 * client it is currently assigned should not cause a failure.
	 */
	err = create_doorbell(guc->execbuf_client);

out:
	/*
	 * Leave clean state for other test, plus the driver always destroy the
	 * clients during unload.
	 */
	guc_clients_disable(guc);
	guc_clients_destroy(guc);
	guc_clients_create(guc);
	guc_clients_enable(guc);
unlock:
	intel_runtime_pm_put(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	return err;
}
Beispiel #21
0
static struct i915_vma *
__i915_vma_create(struct drm_i915_gem_object *obj,
		  struct i915_address_space *vm,
		  const struct i915_ggtt_view *view)
{
	struct i915_vma *vma;
	struct rb_node *rb, **p;
	int i;

	GEM_BUG_ON(vm->closed);

	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&vma->exec_list);
	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
		init_request_active(&vma->last_read[i], i915_vma_retire);
	init_request_active(&vma->last_fence, NULL);
	list_add(&vma->vm_link, &vm->unbound_list);
	vma->vm = vm;
	vma->obj = obj;
	vma->size = obj->base.size;

	if (view) {
		vma->ggtt_view = *view;
		if (view->type == I915_GGTT_VIEW_PARTIAL) {
			vma->size = view->params.partial.size;
			vma->size <<= PAGE_SHIFT;
		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
			vma->size =
				intel_rotation_info_size(&view->params.rotated);
			vma->size <<= PAGE_SHIFT;
		}
	}

	if (i915_is_ggtt(vm)) {
		vma->flags |= I915_VMA_GGTT;
		list_add(&vma->obj_link, &obj->vma_list);
	} else {
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
		list_add_tail(&vma->obj_link, &obj->vma_list);
	}

	rb = NULL;
	p = &obj->vma_tree.rb_node;
	while (*p) {
		struct i915_vma *pos;

		rb = *p;
		pos = rb_entry(rb, struct i915_vma, obj_node);
		if (i915_vma_compare(pos, vm, view) < 0)
			p = &rb->rb_right;
		else
			p = &rb->rb_left;
	}
	rb_link_node(&vma->obj_node, rb, p);
	rb_insert_color(&vma->obj_node, &obj->vma_tree);

	return vma;
}
Beispiel #22
0
/*
 * Check that we're able to synchronize guc_clients with their doorbells
 *
 * We're creating clients and reserving doorbells once, at module load. During
 * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
 * GuC being reset. In other words - GuC clients are still around, but the
 * status of their doorbells may be incorrect. This is the reason behind
 * validating that the doorbells status expected by the driver matches what the
 * GuC/HW have.
 */
static int igt_guc_clients(void *args)
{
	struct drm_i915_private *dev_priv = args;
	struct intel_guc *guc;
	int err = 0;

	GEM_BUG_ON(!HAS_GUC(dev_priv));
	mutex_lock(&dev_priv->drm.struct_mutex);

	guc = &dev_priv->guc;
	if (!guc) {
		pr_err("No guc object!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = check_all_doorbells(guc);
	if (err)
		goto unlock;

	/*
	 * Get rid of clients created during driver load because the test will
	 * recreate them.
	 */
	guc_clients_destroy(guc);
	if (guc->execbuf_client || guc->preempt_client) {
		pr_err("guc_clients_destroy lied!\n");
		err = -EINVAL;
		goto unlock;
	}

	err = guc_clients_create(guc);
	if (err) {
		pr_err("Failed to create clients\n");
		goto unlock;
	}
	GEM_BUG_ON(!guc->execbuf_client);
	GEM_BUG_ON(!guc->preempt_client);

	err = validate_client(guc->execbuf_client,
			      GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
	if (err) {
		pr_err("execbug client validation failed\n");
		goto out;
	}

	err = validate_client(guc->preempt_client,
			      GUC_CLIENT_PRIORITY_KMD_HIGH, true);
	if (err) {
		pr_err("preempt client validation failed\n");
		goto out;
	}

	/* each client should now have reserved a doorbell */
	if (!has_doorbell(guc->execbuf_client) ||
	    !has_doorbell(guc->preempt_client)) {
		pr_err("guc_clients_create didn't reserve doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/* Now create the doorbells */
	guc_clients_doorbell_init(guc);

	/* each client should now have received a doorbell */
	if (!client_doorbell_in_sync(guc->execbuf_client) ||
	    !client_doorbell_in_sync(guc->preempt_client)) {
		pr_err("failed to initialize the doorbells\n");
		err = -EINVAL;
		goto out;
	}

	/*
	 * Basic test - an attempt to reallocate a valid doorbell to the
	 * client it is currently assigned should not cause a failure.
	 */
	err = guc_clients_doorbell_init(guc);
	if (err)
		goto out;

	/*
	 * Negative test - a client with no doorbell (invalid db id).
	 * After destroying the doorbell, the db id is changed to
	 * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
	 * allocate a doorbell with an invalid id (db has to be reserved before
	 * allocation).
	 */
	destroy_doorbell(guc->execbuf_client);
	if (client_doorbell_in_sync(guc->execbuf_client)) {
		pr_err("destroy db did not work\n");
		err = -EINVAL;
		goto out;
	}

	unreserve_doorbell(guc->execbuf_client);
	err = guc_clients_doorbell_init(guc);
	if (err != -EIO) {
		pr_err("unexpected (err = %d)", err);
		goto out;
	}

	if (!available_dbs(guc, guc->execbuf_client->priority)) {
		pr_err("doorbell not available when it should\n");
		err = -EIO;
		goto out;
	}

	/* clean after test */
	err = reserve_doorbell(guc->execbuf_client);
	if (err) {
		pr_err("failed to reserve back the doorbell back\n");
	}
	err = create_doorbell(guc->execbuf_client);
	if (err) {
		pr_err("recreate doorbell failed\n");
		goto out;
	}

out:
	/*
	 * Leave clean state for other test, plus the driver always destroy the
	 * clients during unload.
	 */
	destroy_doorbell(guc->execbuf_client);
	destroy_doorbell(guc->preempt_client);
	guc_clients_destroy(guc);
	guc_clients_create(guc);
	guc_clients_doorbell_init(guc);
unlock:
	mutex_unlock(&dev_priv->drm.struct_mutex);
	return err;
}
/**
 * i915_gem_batch_pool_get() - allocate a buffer from the pool
 * @pool: the batch buffer pool
 * @size: the minimum desired size of the returned buffer
 *
 * Returns an inactive buffer from @pool with at least @size bytes,
 * with the pages pinned. The caller must i915_gem_object_unpin_pages()
 * on the returned object.
 *
 * Note: Callers must hold the struct_mutex
 *
 * Return: the buffer object or an error pointer
 */
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
			size_t size)
{
	struct drm_i915_gem_object *obj;
	struct list_head *list;
	int n, ret;

	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);

	/* Compute a power-of-two bucket, but throw everything greater than
	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
	 * (1 page, 2 pages, 4 pages, 8+ pages).
	 */
	n = fls(size >> PAGE_SHIFT) - 1;
	if (n >= ARRAY_SIZE(pool->cache_list))
		n = ARRAY_SIZE(pool->cache_list) - 1;
	list = &pool->cache_list[n];

	list_for_each_entry(obj, list, batch_pool_link) {
		/* The batches are strictly LRU ordered */
		if (i915_gem_object_is_active(obj)) {
			struct reservation_object *resv = obj->resv;

			if (!reservation_object_test_signaled_rcu(resv, true))
				break;

			i915_retire_requests(pool->engine->i915);
			GEM_BUG_ON(i915_gem_object_is_active(obj));

			/*
			 * The object is now idle, clear the array of shared
			 * fences before we add a new request. Although, we
			 * remain on the same engine, we may be on a different
			 * timeline and so may continually grow the array,
			 * trapping a reference to all the old fences, rather
			 * than replace the existing fence.
			 */
			if (rcu_access_pointer(resv->fence)) {
				reservation_object_lock(resv, NULL);
				reservation_object_add_excl_fence(resv, NULL);
				reservation_object_unlock(resv);
			}
		}

		GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
								 true));

		if (obj->base.size >= size)
			goto found;
	}

	obj = i915_gem_object_create_internal(pool->engine->i915, size);
	if (IS_ERR(obj))
		return obj;

found:
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ERR_PTR(ret);

	list_move_tail(&obj->batch_pool_link, list);
	return obj;
}
Beispiel #24
0
static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct drm_mm_node *uninitialized_var(compressed_llb);
	int size, fb_cpp, ret;

	WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));

	size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
	fb_cpp = fbc->state_cache.fb.format->cpp[0];

	ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
					 size, fb_cpp);
	if (!ret)
		goto err_llb;
	else if (ret > 1) {
		DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");

	}

	fbc->threshold = ret;

	if (INTEL_GEN(dev_priv) >= 5)
		I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
	else if (IS_GM45(dev_priv)) {
		I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
						  4096, 4096);
		if (ret)
			goto err_fb;

		fbc->compressed_llb = compressed_llb;

		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_fb.start,
					     U32_MAX));
		GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
					     fbc->compressed_llb->start,
					     U32_MAX));
		I915_WRITE(FBC_CFB_BASE,
			   dev_priv->dsm.start + fbc->compressed_fb.start);
		I915_WRITE(FBC_LL_BASE,
			   dev_priv->dsm.start + compressed_llb->start);
	}

	DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
		      fbc->compressed_fb.size, fbc->threshold);

	return 0;

err_fb:
	kfree(compressed_llb);
	i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
	if (drm_mm_initialized(&dev_priv->mm.stolen))
		pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
	return -ENOSPC;
}
Beispiel #25
0
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	struct hang h;
	int err = 0;

	/* Check that we can issue an engine reset on an idle engine (no-op) */

	if (!intel_has_reset_engine(i915))
		return 0;

	if (active) {
		mutex_lock(&i915->drm.struct_mutex);
		err = hang_init(&h, i915);
		mutex_unlock(&i915->drm.struct_mutex);
		if (err)
			return err;
	}

	for_each_engine(engine, i915, id) {
		unsigned int reset_count, reset_engine_count;
		IGT_TIMEOUT(end_time);

		if (active && !intel_engine_can_store_dword(engine))
			continue;

		if (!wait_for_idle(engine)) {
			pr_err("%s failed to idle before reset\n",
			       engine->name);
			err = -EIO;
			break;
		}

		reset_count = i915_reset_count(&i915->gpu_error);
		reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
							     engine);

		set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
		do {
			u32 seqno = intel_engine_get_seqno(engine);

			if (active) {
				struct i915_request *rq;

				mutex_lock(&i915->drm.struct_mutex);
				rq = hang_create_request(&h, engine);
				if (IS_ERR(rq)) {
					err = PTR_ERR(rq);
					mutex_unlock(&i915->drm.struct_mutex);
					break;
				}

				i915_request_get(rq);
				i915_request_add(rq);
				mutex_unlock(&i915->drm.struct_mutex);

				if (!wait_until_running(&h, rq)) {
					struct drm_printer p = drm_info_printer(i915->drm.dev);

					pr_err("%s: Failed to start request %x, at %x\n",
					       __func__, rq->fence.seqno, hws_seqno(&h, rq));
					intel_engine_dump(engine, &p,
							  "%s\n", engine->name);

					i915_request_put(rq);
					err = -EIO;
					break;
				}

				GEM_BUG_ON(!rq->global_seqno);
				seqno = rq->global_seqno - 1;
				i915_request_put(rq);
			}

			err = i915_reset_engine(engine, NULL);
			if (err) {
				pr_err("i915_reset_engine failed\n");
				break;
			}

			if (i915_reset_count(&i915->gpu_error) != reset_count) {
				pr_err("Full GPU reset recorded! (engine reset expected)\n");
				err = -EINVAL;
				break;
			}

			reset_engine_count += active;
			if (i915_reset_engine_count(&i915->gpu_error, engine) !=
			    reset_engine_count) {
				pr_err("%s engine reset %srecorded!\n",
				       engine->name, active ? "not " : "");
				err = -EINVAL;
				break;
			}

			if (!wait_for_idle(engine)) {
				struct drm_printer p =
					drm_info_printer(i915->drm.dev);

				pr_err("%s failed to idle after reset\n",
				       engine->name);
				intel_engine_dump(engine, &p,
						  "%s\n", engine->name);

				err = -EIO;
				break;
			}
		} while (time_before(jiffies, end_time));
		clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);

		if (err)
			break;

		err = igt_flush_test(i915, 0);
		if (err)
			break;
	}
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			     unsigned int flags)
{
	struct clflush *clflush;

	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 * Similarly, we only access struct pages through the CPU cache, so
	 * anything not backed by physical memory we consider to be always
	 * coherent and not need clflushing.
	 */
	if (!i915_gem_object_has_struct_page(obj)) {
		obj->cache_dirty = false;
		return false;
	}

	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
	if (!(flags & I915_CLFLUSH_FORCE) &&
	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
		return false;

	trace_i915_gem_object_clflush(obj);

	clflush = NULL;
	if (!(flags & I915_CLFLUSH_SYNC))
		clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
	if (clflush) {
		GEM_BUG_ON(!obj->cache_dirty);

		dma_fence_init(&clflush->dma,
			       &i915_clflush_ops,
			       &clflush_lock,
			       to_i915(obj->base.dev)->mm.unordered_timeline,
			       0);
		i915_sw_fence_init(&clflush->wait, i915_clflush_notify);

		clflush->obj = i915_gem_object_get(obj);
		INIT_WORK(&clflush->work, i915_clflush_work);

		dma_fence_get(&clflush->dma);

		i915_sw_fence_await_reservation(&clflush->wait,
						obj->resv, NULL,
						true, I915_FENCE_TIMEOUT,
						I915_FENCE_GFP);

		reservation_object_lock(obj->resv, NULL);
		reservation_object_add_excl_fence(obj->resv, &clflush->dma);
		reservation_object_unlock(obj->resv);

		i915_sw_fence_commit(&clflush->wait);
	} else if (obj->mm.pages) {
		__i915_do_clflush(obj);
	} else {
		GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
	}

	obj->cache_dirty = false;
	return true;
}
Beispiel #27
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
					       resource_size_t stolen_offset,
					       resource_size_t gtt_offset,
					       resource_size_t size)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	lockdep_assert_held(&dev_priv->drm.struct_mutex);

	DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
			 &stolen_offset, &gtt_offset, &size);

	/* KISS and expect everything to be page-aligned */
	if (WARN_ON(size == 0) ||
	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	mutex_unlock(&dev_priv->mm.stolen_lock);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
	if (obj == NULL) {
		DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
		i915_gem_stolen_remove_node(dev_priv, stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

	vma = i915_vma_instance(obj, &ggtt->vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_pages;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
				   size, gtt_offset, obj->cache_level,
				   0);
	if (ret) {
		DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
		goto err_pages;
	}

	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

	vma->pages = obj->mm.pages;
	vma->flags |= I915_VMA_GLOBAL_BIND;
	__i915_vma_set_map_and_fenceable(vma);

	mutex_lock(&ggtt->vm.mutex);
	list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
	mutex_unlock(&ggtt->vm.mutex);

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
	obj->bind_count++;
	spin_unlock(&dev_priv->mm.obj_lock);

	return obj;

err_pages:
	i915_gem_object_unpin_pages(obj);
err:
	i915_gem_object_put(obj);
	return NULL;
}
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	drm_clflush_sg(obj->mm.pages);
	intel_fb_obj_flush(obj, ORIGIN_CPU);
}
Beispiel #29
0
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
	resource_size_t reserved_base, stolen_top;
	resource_size_t reserved_total, reserved_size;

	mutex_init(&dev_priv->mm.stolen_lock);

	if (intel_vgpu_active(dev_priv)) {
		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
		return 0;
	}

	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}

	if (resource_size(&intel_graphics_stolen_res) == 0)
		return 0;

	dev_priv->dsm = intel_graphics_stolen_res;

	if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
		return 0;

	GEM_BUG_ON(dev_priv->dsm.start == 0);
	GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);

	stolen_top = dev_priv->dsm.end + 1;
	reserved_base = stolen_top;
	reserved_size = 0;

	switch (INTEL_GEN(dev_priv)) {
	case 2:
	case 3:
		break;
	case 4:
		if (!IS_G4X(dev_priv))
			break;
		/* fall through */
	case 5:
		g4x_get_stolen_reserved(dev_priv,
					&reserved_base, &reserved_size);
		break;
	case 6:
		gen6_get_stolen_reserved(dev_priv,
					 &reserved_base, &reserved_size);
		break;
	case 7:
		if (IS_VALLEYVIEW(dev_priv))
			vlv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			gen7_get_stolen_reserved(dev_priv,
						 &reserved_base, &reserved_size);
		break;
	case 8:
	case 9:
	case 10:
		if (IS_LP(dev_priv))
			chv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		else
			bdw_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
		break;
	case 11:
	default:
		icl_get_stolen_reserved(dev_priv, &reserved_base,
					&reserved_size);
		break;
	}

	/*
	 * Our expectation is that the reserved space is at the top of the
	 * stolen region and *never* at the bottom. If we see !reserved_base,
	 * it likely means we failed to read the registers correctly.
	 */
	if (!reserved_base) {
		DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
			  &reserved_base, &reserved_size);
		reserved_base = stolen_top;
		reserved_size = 0;
	}

	dev_priv->dsm_reserved =
		(struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);

	if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
		DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
			  &dev_priv->dsm_reserved, &dev_priv->dsm);
		return 0;
	}

	/* It is possible for the reserved area to end before the end of stolen
	 * memory, so just consider the start. */
	reserved_total = stolen_top - reserved_base;

	DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
			 (u64)resource_size(&dev_priv->dsm) >> 10,
			 ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);

	dev_priv->stolen_usable_size =
		resource_size(&dev_priv->dsm) - reserved_total;

	/* Basic memrange allocator for stolen space. */
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size);

	return 0;
}
Beispiel #30
0
static void add_object(struct i915_mmu_object *mo)
{
	GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
	interval_tree_insert(&mo->it, &mo->mn->objects);
}