Exemple #1
0
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	if (!HAS_GUC(dev_priv))
		return;

	if (i915_modparams.guc_firmware_path) {
		guc_fw->path = i915_modparams.guc_firmware_path;
		guc_fw->major_ver_wanted = 0;
		guc_fw->minor_ver_wanted = 0;
	} else if (IS_SKYLAKE(dev_priv)) {
		guc_fw->path = I915_SKL_GUC_UCODE;
		guc_fw->major_ver_wanted = SKL_FW_MAJOR;
		guc_fw->minor_ver_wanted = SKL_FW_MINOR;
	} else if (IS_BROXTON(dev_priv)) {
		guc_fw->path = I915_BXT_GUC_UCODE;
		guc_fw->major_ver_wanted = BXT_FW_MAJOR;
		guc_fw->minor_ver_wanted = BXT_FW_MINOR;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		guc_fw->path = I915_KBL_GUC_UCODE;
		guc_fw->major_ver_wanted = KBL_FW_MAJOR;
		guc_fw->minor_ver_wanted = KBL_FW_MINOR;
	} else {
		DRM_WARN("%s: No firmware known for this platform!\n",
			 intel_uc_fw_type_repr(guc_fw->type));
	}
}
Exemple #2
0
/*
 * Transfer the firmware image to RAM for execution by the microcontroller.
 *
 * Architecturally, the DMA engine is bidirectional, and can potentially even
 * transfer between GTT locations. This functionality is left out of the API
 * for now as there is no need for it.
 */
static int guc_xfer_ucode(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct intel_uc_fw *guc_fw = &guc->fw;
	unsigned long offset;

	/*
	 * The header plus uCode will be copied to WOPCM via DMA, excluding any
	 * other components
	 */
	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);

	/* Set the source address for the new blob */
	offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);

	/*
	 * Set the DMA destination. Current uCode expects the code to be
	 * loaded at 8k; locations below this are used for the stack.
	 */
	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	/* Finally start the DMA */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));

	return guc_wait_ucode(guc);
}
Exemple #3
0
/*
 * Load the GuC firmware blob into the MinuteIA.
 */
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	guc_prepare_xfer(guc);

	/*
	 * Note that GuC needs the CSS header plus uKernel code to be copied
	 * by the DMA engine in one operation, whereas the RSA signature is
	 * loaded via MMIO.
	 */
	ret = guc_xfer_rsa(guc, vma);
	if (ret)
		DRM_WARN("GuC firmware signature xfer error %d\n", ret);

	ret = guc_xfer_ucode(guc, vma);
	if (ret)
		DRM_WARN("GuC firmware code xfer error %d\n", ret);

	ret = guc_wait_ucode(guc);
	if (ret)
		DRM_ERROR("GuC firmware xfer error %d\n", ret);

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Exemple #4
0
/**
 * intel_guc_fw_select() - selects GuC firmware for uploading
 *
 * @guc:	intel_guc struct
 *
 * Return: zero when we know firmware, non-zero in other case
 */
int intel_guc_fw_select(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);

	if (i915_modparams.guc_firmware_path) {
		guc->fw.path = i915_modparams.guc_firmware_path;
		guc->fw.major_ver_wanted = 0;
		guc->fw.minor_ver_wanted = 0;
	} else if (IS_SKYLAKE(dev_priv)) {
		guc->fw.path = I915_SKL_GUC_UCODE;
		guc->fw.major_ver_wanted = SKL_FW_MAJOR;
		guc->fw.minor_ver_wanted = SKL_FW_MINOR;
	} else if (IS_BROXTON(dev_priv)) {
		guc->fw.path = I915_BXT_GUC_UCODE;
		guc->fw.major_ver_wanted = BXT_FW_MAJOR;
		guc->fw.minor_ver_wanted = BXT_FW_MINOR;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		guc->fw.path = I915_KBL_GUC_UCODE;
		guc->fw.major_ver_wanted = KBL_FW_MAJOR;
		guc->fw.minor_ver_wanted = KBL_FW_MINOR;
	} else if (IS_GEMINILAKE(dev_priv)) {
		guc->fw.path = I915_GLK_GUC_UCODE;
		guc->fw.major_ver_wanted = GLK_FW_MAJOR;
		guc->fw.minor_ver_wanted = GLK_FW_MINOR;
	} else {
		DRM_ERROR("No GuC firmware known for platform with GuC!\n");
		return -ENOENT;
	}

	return 0;
}
Exemple #5
0
static void guc_prepare_xfer(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	/* Must program this register before loading the ucode with DMA */
	I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
				     GUC_ENABLE_READ_CACHE_LOGIC |
				     GUC_ENABLE_MIA_CACHING |
				     GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
				     GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
				     GUC_ENABLE_MIA_CLOCK_GATING);

	if (IS_GEN9_LP(dev_priv))
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
	else
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);

	if (IS_GEN(dev_priv, 9)) {
		/* DOP Clock Gating Enable for GuC clocks */
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
					    I915_READ(GEN7_MISCCPCTL)));

		/* allows for 5us (in 10ns units) before GT can go to RC6 */
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
	}
}
Exemple #6
0
static int guc_log_map(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	void *vaddr;
	int ret;

	lockdep_assert_held(&log->relay.lock);

	if (!log->vma)
		return -ENODEV;

	mutex_lock(&dev_priv->drm.struct_mutex);
	ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true);
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (ret)
		return ret;

	/*
	 * Create a WC (Uncached for read) vmalloc mapping of log
	 * buffer pages, so that we can directly get the data
	 * (up-to-date) from memory.
	 */
	vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC);
	if (IS_ERR(vaddr)) {
		DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
		return PTR_ERR(vaddr);
	}

	log->relay.buf_addr = vaddr;

	return 0;
}
Exemple #7
0
/*
 * Load the GuC firmware blob into the MinuteIA.
 */
static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);

	guc_prepare_xfer(guc);

	/*
	 * Note that GuC needs the CSS header plus uKernel code to be copied
	 * by the DMA engine in one operation, whereas the RSA signature is
	 * loaded via MMIO.
	 */
	guc_xfer_rsa(guc);

	ret = guc_xfer_ucode(guc);

	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);

	return ret;
}
Exemple #8
0
static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	/* Did we complete the xfer? */
	*status = I915_READ(DMA_CTRL);
	return !(*status & START_DMA);
}
Exemple #9
0
static void guc_disable_communication(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	if (HAS_GUC_CT(dev_priv))
		intel_guc_disable_ct(guc);

	guc->send = intel_guc_send_nop;
}
Exemple #10
0
/*
 * Read the GuC status register (GUC_STATUS) and store it in the
 * specified location; then return a boolean indicating whether
 * the value matches either of two values representing completion
 * of the GuC boot process.
 *
 * This is used for polling the GuC status in a wait_for()
 * loop below.
 */
static inline bool guc_ready(struct intel_guc *guc, u32 *status)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 val = I915_READ(GUC_STATUS);
	u32 uk_val = val & GS_UKERNEL_MASK;

	*status = val;
	return (uk_val == GS_UKERNEL_READY) ||
		((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
}
Exemple #11
0
static void guc_stop_communication(struct intel_guc *guc)
{
	struct drm_i915_private *i915 = guc_to_i915(guc);

	if (HAS_GUC_CT(i915))
		intel_guc_ct_stop(&guc->ct);

	guc->send = intel_guc_send_nop;
	guc->handler = intel_guc_to_host_event_handler_nop;
}
Exemple #12
0
/*
 * This function implements the MMIO based host to GuC interface.
 */
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 status;
	int i;
	int ret;

	GEM_BUG_ON(!len);
	GEM_BUG_ON(len > guc->send_regs.count);

	/* If CT is available, we expect to use MMIO only during init/fini */
	GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
		*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
		*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);

	mutex_lock(&guc->send_mutex);
	intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);

	for (i = 0; i < len; i++)
		I915_WRITE(guc_send_reg(guc, i), action[i]);

	POSTING_READ(guc_send_reg(guc, i - 1));

	intel_guc_notify(guc);

	/*
	 * No GuC command should ever take longer than 10ms.
	 * Fast commands should still complete in 10us.
	 */
	ret = __intel_wait_for_register_fw(dev_priv,
					   guc_send_reg(guc, 0),
					   INTEL_GUC_RECV_MASK,
					   INTEL_GUC_RECV_MASK,
					   10, 10, &status);
	if (status != INTEL_GUC_STATUS_SUCCESS) {
		/*
		 * Either the GuC explicitly returned an error (which
		 * we convert to -EIO here) or no response at all was
		 * received within the timeout limit (-ETIMEDOUT)
		 */
		if (ret != -ETIMEDOUT)
			ret = -EIO;

		DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
			 " ret=%d status=0x%08X response=0x%08X\n",
			 action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
	}

	intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
	mutex_unlock(&guc->send_mutex);

	return ret;
}
Exemple #13
0
/* Copy RSA signature from the fw image to HW for verification */
static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 rsa[UOS_RSA_SCRATCH_COUNT];
	int i;

	sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
			   rsa, sizeof(rsa), guc->fw.rsa_offset);

	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
}
Exemple #14
0
static int guc_enable_communication(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	guc_init_send_regs(guc);

	if (HAS_GUC_CT(dev_priv))
		return intel_guc_enable_ct(guc);

	guc->send = intel_guc_send_mmio;
	return 0;
}
Exemple #15
0
/*
 * Initialise the GuC parameter block before starting the firmware
 * transfer. These parameters are read by the firmware on startup
 * and cannot be changed thereafter.
 */
void intel_guc_init_params(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 params[GUC_CTL_MAX_DWORDS];
	int i;

	memset(params, 0, sizeof(params));

	params[GUC_CTL_DEVICE_INFO] |=
		(get_gt_type(dev_priv) << GUC_CTL_GT_TYPE_SHIFT) |
		(get_core_family(dev_priv) << GUC_CTL_CORE_FAMILY_SHIFT);

	/*
	 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
	 * second. This ARAR is calculated by:
	 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
	 */
	params[GUC_CTL_ARAT_HIGH] = 0;
	params[GUC_CTL_ARAT_LOW] = 100000000;

	params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;

	params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
			GUC_CTL_VCS2_ENABLED;

	params[GUC_CTL_LOG_PARAMS] = guc->log.flags;

	if (i915_modparams.guc_log_level >= 0) {
		params[GUC_CTL_DEBUG] =
			i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
	} else {
		params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
	}

	/* If GuC submission is enabled, set up additional parameters here */
	if (i915_modparams.enable_guc_submission) {
		u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
		u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
		u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;

		params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
		params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;

		pgs >>= PAGE_SHIFT;
		params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
			(ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);

		params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;

		/* Unmask this bit to enable the GuC's internal scheduler */
		params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
	}
Exemple #16
0
static int guc_enable_communication(struct intel_guc *guc)
{
	struct drm_i915_private *i915 = guc_to_i915(guc);

	guc_enable_interrupts(guc);

	if (HAS_GUC_CT(i915))
		return intel_guc_ct_enable(&guc->ct);

	guc->send = intel_guc_send_mmio;
	guc->handler = intel_guc_to_host_event_handler_mmio;
	return 0;
}
Exemple #17
0
/* Copy RSA signature from the fw image to HW for verification */
static void guc_xfer_rsa(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct intel_uc_fw *fw = &guc->fw;
	struct sg_table *pages = fw->obj->mm.pages;
	u32 rsa[UOS_RSA_SCRATCH_COUNT];
	int i;

	sg_pcopy_to_buffer(pages->sgl, pages->nents,
			   rsa, sizeof(rsa), fw->rsa_offset);

	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
}
Exemple #18
0
static void guc_log_capture_logs(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	intel_wakeref_t wakeref;

	guc_read_update_log_buffer(log);

	/*
	 * Generally device is expected to be active only at this
	 * time, so get/put should be really quick.
	 */
	with_intel_runtime_pm(dev_priv, wakeref)
		guc_action_flush_log_complete(guc);
}
Exemple #19
0
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	u32 action[2];

	action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
	/* WaRsDisableCoarsePowerGating:skl,bxt */
	if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
		action[1] = 0;
	else
		/* bit 0 and 1 are for Render and Media domain separately */
		action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;

	return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
Exemple #20
0
/*
 * Basic client sanity check, handy to validate create_clients.
 */
static int validate_client(struct intel_guc_client *client,
			   int client_priority,
			   bool is_preempt_client)
{
	struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
	struct i915_gem_context *ctx_owner = is_preempt_client ?
			dev_priv->preempt_context : dev_priv->kernel_context;

	if (client->owner != ctx_owner ||
	    client->engines != INTEL_INFO(dev_priv)->ring_mask ||
	    client->priority != client_priority ||
	    client->doorbell_id == GUC_DOORBELL_INVALID)
		return -EINVAL;
	else
		return 0;
}
Exemple #21
0
static void guc_init_send_regs(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	enum forcewake_domains fw_domains = 0;
	unsigned int i;

	guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
	guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;

	for (i = 0; i < guc->send_regs.count; i++) {
		fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
					guc_send_reg(guc, i),
					FW_REG_READ | FW_REG_WRITE);
	}
	guc->send_regs.fw_domains = fw_domains;
}
Exemple #22
0
/* Copy RSA signature from the fw image to HW for verification */
static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct intel_uc_fw *guc_fw = &guc->fw;
	struct sg_table *sg = vma->pages;
	u32 rsa[UOS_RSA_SCRATCH_COUNT];
	int i;

	if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
			       guc_fw->rsa_offset) != sizeof(rsa))
		return -EINVAL;

	for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);

	return 0;
}
Exemple #23
0
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *i915 = guc_to_i915(guc);
	intel_wakeref_t wakeref;

	/*
	 * Before initiating the forceful flush, wait for any pending/ongoing
	 * flush to complete otherwise forceful flush may not actually happen.
	 */
	flush_work(&log->relay.flush_work);

	with_intel_runtime_pm(i915, wakeref)
		guc_action_flush_log(guc);

	/* GuC would have updated log buffer by now, so capture it */
	guc_log_capture_logs(log);
}
Exemple #24
0
/*
 * Load the GuC firmware blob into the MinuteIA.
 */
static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	int ret;

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Enable MIA caching. GuC clock gating is disabled. */
	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);

	/* WaDisableMinuteIaClockGating:bxt */
	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
					      ~GUC_ENABLE_MIA_CLOCK_GATING));
	}

	/* WaC6DisallowByGfxPause:bxt */
	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);

	if (IS_GEN9_LP(dev_priv))
		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
	else
		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);

	if (IS_GEN9(dev_priv)) {
		/* DOP Clock Gating Enable for GuC clocks */
		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
					    I915_READ(GEN7_MISCCPCTL)));

		/* allows for 5us (in 10ns units) before GT can go to RC6 */
		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
	}

	ret = guc_ucode_xfer_dma(dev_priv, vma);

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Exemple #25
0
/**
 * intel_guc_ads_create() - creates GuC ADS
 * @guc: intel_guc struct
 *
 */
int intel_guc_ads_create(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct i915_vma *vma, *kernel_ctx_vma;
	struct page *page;
	/* The ads obj includes the struct itself and buffers passed to GuC */
	struct {
		struct guc_ads ads;
		struct guc_policies policies;
		struct guc_mmio_reg_state reg_state;
		u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
	} __packed *blob;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
	u32 base;

	GEM_BUG_ON(guc->ads_vma);

	vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	guc->ads_vma = vma;

	page = i915_vma_first_page(vma);
	blob = kmap(page);

	/* GuC scheduling policies */
	guc_policies_init(&blob->policies);

	/* MMIO reg state */
	for_each_engine(engine, dev_priv, id) {
		blob->reg_state.white_list[engine->guc_id].mmio_start =
			engine->mmio_base + GUC_MMIO_WHITE_LIST_START;

		/* Nothing to be saved or restored for now. */
		blob->reg_state.white_list[engine->guc_id].count = 0;
	}
Exemple #26
0
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	intel_wakeref_t wakeref;
	int ret = 0;

	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
	GEM_BUG_ON(!log->vma);

	/*
	 * GuC is recognizing log levels starting from 0 to max, we're using 0
	 * as indication that logging should be disabled.
	 */
	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
		return -EINVAL;

	mutex_lock(&dev_priv->drm.struct_mutex);

	if (log->level == level)
		goto out_unlock;

	with_intel_runtime_pm(dev_priv, wakeref)
		ret = guc_action_control_log(guc,
					     GUC_LOG_LEVEL_IS_VERBOSE(level),
					     GUC_LOG_LEVEL_IS_ENABLED(level),
					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
	if (ret) {
		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
		goto out_unlock;
	}

	log->level = level;

out_unlock:
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return ret;
}
Exemple #27
0
static int guc_log_relay_create(struct intel_guc_log *log)
{
	struct intel_guc *guc = log_to_guc(log);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct rchan *guc_log_relay_chan;
	size_t n_subbufs, subbuf_size;
	int ret;

	lockdep_assert_held(&log->relay.lock);

	 /* Keep the size of sub buffers same as shared log buffer */
	subbuf_size = log->vma->size;

	/*
	 * Store up to 8 snapshots, which is large enough to buffer sufficient
	 * boot time logs and provides enough leeway to User, in terms of
	 * latency, for consuming the logs from relay. Also doesn't take
	 * up too much memory.
	 */
	n_subbufs = 8;

	guc_log_relay_chan = relay_open("guc_log",
					dev_priv->drm.primary->debugfs_root,
					subbuf_size, n_subbufs,
					&relay_callbacks, dev_priv);
	if (!guc_log_relay_chan) {
		DRM_ERROR("Couldn't create relay chan for GuC logging\n");

		ret = -ENOMEM;
		return ret;
	}

	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
	log->relay.channel = guc_log_relay_chan;

	return 0;
}
Exemple #28
0
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *i915 = guc_to_i915(guc);

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	if (!HAS_GUC(i915))
		return;

	if (i915_modparams.guc_firmware_path) {
		guc_fw->path = i915_modparams.guc_firmware_path;
		guc_fw->major_ver_wanted = 0;
		guc_fw->minor_ver_wanted = 0;
	} else if (IS_ICELAKE(i915)) {
		guc_fw->path = ICL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
	} else if (IS_GEMINILAKE(i915)) {
		guc_fw->path = GLK_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
	} else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		guc_fw->path = KBL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
	} else if (IS_BROXTON(i915)) {
		guc_fw->path = BXT_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
	} else if (IS_SKYLAKE(i915)) {
		guc_fw->path = SKL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
	}
}
Exemple #29
0
/*
 * Transfer the firmware image to RAM for execution by the microcontroller.
 *
 * Architecturally, the DMA engine is bidirectional, and can potentially even
 * transfer between GTT locations. This functionality is left out of the API
 * for now as there is no need for it.
 */
static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);
	struct intel_uc_fw *guc_fw = &guc->fw;
	unsigned long offset;
	u32 status;
	int ret;

	/*
	 * The header plus uCode will be copied to WOPCM via DMA, excluding any
	 * other components
	 */
	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);

	/* Set the source address for the new blob */
	offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);

	/*
	 * Set the DMA destination. Current uCode expects the code to be
	 * loaded at 8k; locations below this are used for the stack.
	 */
	I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	/* Finally start the DMA */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));

	/* Wait for DMA to finish */
	ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
					   2, 100, &status);
	DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);

	return ret;
}
Exemple #30
0
static void guc_write_irq_trigger(struct intel_guc *guc)
{
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}