Example #1
0
/**
 * intel_csr_ucode_init() - initialize the firmware loading.
 * @dev_priv: i915 drm device.
 *
 * This function is called at the time of loading the display driver to read
 * firmware from a .bin file and copied into a internal memory.
 */
void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{
	struct intel_csr *csr = &dev_priv->csr;

	INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);

	if (!HAS_CSR(dev_priv))
		return;

	if (IS_KABYLAKE(dev_priv))
		csr->fw_path = I915_CSR_KBL;
	else if (IS_SKYLAKE(dev_priv))
		csr->fw_path = I915_CSR_SKL;
	else if (IS_BROXTON(dev_priv))
		csr->fw_path = I915_CSR_BXT;
	else {
		DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
		return;
	}

	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);

	/*
	 * Obtain a runtime pm reference, until CSR is loaded,
	 * to avoid entering runtime-suspend.
	 */
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);

	schedule_work(&dev_priv->csr.work);
}
Example #2
0
static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private *dev_priv)
{
	const struct stepping_info *si;
	unsigned int size;

	if (IS_ICELAKE(dev_priv)) {
		size = ARRAY_SIZE(icl_stepping_info);
		si = icl_stepping_info;
	} else if (IS_SKYLAKE(dev_priv)) {
		size = ARRAY_SIZE(skl_stepping_info);
		si = skl_stepping_info;
	} else if (IS_BROXTON(dev_priv)) {
		size = ARRAY_SIZE(bxt_stepping_info);
		si = bxt_stepping_info;
	} else {
		size = 0;
		si = NULL;
	}

	if (INTEL_REVID(dev_priv) < size)
		return si + INTEL_REVID(dev_priv);

	return &no_stepping_info;
}
Example #3
0
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *dev_priv = guc_to_i915(guc);

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	if (!HAS_GUC(dev_priv))
		return;

	if (i915_modparams.guc_firmware_path) {
		guc_fw->path = i915_modparams.guc_firmware_path;
		guc_fw->major_ver_wanted = 0;
		guc_fw->minor_ver_wanted = 0;
	} else if (IS_SKYLAKE(dev_priv)) {
		guc_fw->path = I915_SKL_GUC_UCODE;
		guc_fw->major_ver_wanted = SKL_FW_MAJOR;
		guc_fw->minor_ver_wanted = SKL_FW_MINOR;
	} else if (IS_BROXTON(dev_priv)) {
		guc_fw->path = I915_BXT_GUC_UCODE;
		guc_fw->major_ver_wanted = BXT_FW_MAJOR;
		guc_fw->minor_ver_wanted = BXT_FW_MINOR;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		guc_fw->path = I915_KBL_GUC_UCODE;
		guc_fw->major_ver_wanted = KBL_FW_MAJOR;
		guc_fw->minor_ver_wanted = KBL_FW_MINOR;
	} else {
		DRM_WARN("%s: No firmware known for this platform!\n",
			 intel_uc_fw_type_repr(guc_fw->type));
	}
}
Example #4
0
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
	u32 tmp;
	struct drm_device *dev = encoder->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);

	/* Clear old configurations */
	if (IS_BROXTON(dev_priv)) {
		tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
		tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
		tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
		tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
		tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
		I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
	} else {
		tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
		tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
		I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);

		tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
		tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
		I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
	}
	I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
Example #5
0
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
	int err = 0;

	dev_priv->workarounds.count = 0;

	if (INTEL_GEN(dev_priv) < 8)
		err = 0;
	else if (IS_BROADWELL(dev_priv))
		err = bdw_ctx_workarounds_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		err = chv_ctx_workarounds_init(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		err = skl_ctx_workarounds_init(dev_priv);
	else if (IS_BROXTON(dev_priv))
		err = bxt_ctx_workarounds_init(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		err = kbl_ctx_workarounds_init(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		err = glk_ctx_workarounds_init(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		err = cfl_ctx_workarounds_init(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		err = cnl_ctx_workarounds_init(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		err = icl_ctx_workarounds_init(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
	if (err)
		return err;

	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
			 dev_priv->workarounds.count);
	return 0;
}
Example #6
0
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) < 8)
		return;
	else if (IS_BROADWELL(dev_priv))
		bdw_gt_workarounds_apply(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		chv_gt_workarounds_apply(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		skl_gt_workarounds_apply(dev_priv);
	else if (IS_BROXTON(dev_priv))
		bxt_gt_workarounds_apply(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		kbl_gt_workarounds_apply(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		glk_gt_workarounds_apply(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		cfl_gt_workarounds_apply(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		cnl_gt_workarounds_apply(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		icl_gt_workarounds_apply(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
}
Example #7
0
static u32 calc_residency(struct drm_device *dev,
			  i915_reg_t reg)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 raw_time; /* 32b value may overflow during fixed point math */
	u64 units = 128ULL, div = 100000ULL;
	u32 ret;

	if (!intel_enable_rc6(dev))
		return 0;

	intel_runtime_pm_get(dev_priv);

	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
		units = 1;
		div = dev_priv->czclk_freq;

		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
			units <<= 8;
	} else if (IS_BROXTON(dev)) {
		units = 1;
		div = 1200;		/* 833.33ns */
	}

	raw_time = I915_READ(reg) * units;
	ret = DIV_ROUND_UP_ULL(raw_time, div);

	intel_runtime_pm_put(dev_priv);
	return ret;
}
Example #8
0
static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
					 struct whitelist *w)
{
	struct drm_i915_private *i915 = engine->i915;

	GEM_BUG_ON(engine->id != RCS);

	w->count = 0;
	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));

	if (INTEL_GEN(i915) < 8)
		return NULL;
	else if (IS_BROADWELL(i915))
		bdw_whitelist_build(w);
	else if (IS_CHERRYVIEW(i915))
		chv_whitelist_build(w);
	else if (IS_SKYLAKE(i915))
		skl_whitelist_build(w);
	else if (IS_BROXTON(i915))
		bxt_whitelist_build(w);
	else if (IS_KABYLAKE(i915))
		kbl_whitelist_build(w);
	else if (IS_GEMINILAKE(i915))
		glk_whitelist_build(w);
	else if (IS_COFFEELAKE(i915))
		cfl_whitelist_build(w);
	else if (IS_CANNONLAKE(i915))
		cnl_whitelist_build(w);
	else if (IS_ICELAKE(i915))
		icl_whitelist_build(w);
	else
		MISSING_CASE(INTEL_GEN(i915));

	return w;
}
Example #9
0
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
			struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
	u32 dsi_clk;

	dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
				    intel_dsi->lane_count);

	/*
	 * From clock diagram, to get PLL ratio divider, divide double of DSI
	 * link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
	 * round 'up' the result
	 */
	dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);

	if (IS_BROXTON(dev_priv)) {
		dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
		dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
	} else {
		dsi_ratio_min = GLK_DSI_PLL_RATIO_MIN;
		dsi_ratio_max = GLK_DSI_PLL_RATIO_MAX;
	}

	if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
		DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
		return -ECHRNG;
	} else
		DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");

	/*
	 * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
	 * Spec says both have to be programmed, even if one is not getting
	 * used. Configure MIPI_CLOCK_CTL dividers in modeset
	 */
	config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;

	/* As per recommendation from hardware team,
	 * Prog PVD ratio =1 if dsi ratio <= 50
	 */
	if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
		config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;

	return 0;
}
Example #10
0
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
		       struct intel_crtc_state *config)
{
	if (IS_BROXTON(encoder->base.dev))
		return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
	else
		return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
}
Example #11
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Resuming device\n");

	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;

	intel_guc_resume(dev);

	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_enable_interrupts(dev_priv);

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);

	intel_enable_gt_powersave(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
Example #12
0
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_BROXTON(dev))
		bxt_dsi_reset_clocks(encoder, port);
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_dsi_reset_clocks(encoder, port);
}
Example #13
0
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_disable_dsi_pll(encoder);
	else if (IS_BROXTON(dev))
		bxt_disable_dsi_pll(encoder);
}
Example #14
0
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
	if (IS_BROXTON(dev_priv))
		return bxt_dsi_pll_is_enabled(dev_priv);

	MISSING_CASE(INTEL_DEVID(dev_priv));

	return false;
}
Example #15
0
void intel_enable_dsi_pll(struct intel_encoder *encoder,
			  const struct intel_crtc_state *config)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_enable_dsi_pll(encoder, config);
	else if (IS_BROXTON(dev))
		bxt_enable_dsi_pll(encoder, config);
}
Example #16
0
static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
{
	/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
	if (intel_vtd_active() &&
	    (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
		DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
		return true;
	}

	return false;
}
Example #17
0
/**
 * intel_vgpu_reset_mmio - reset virtual MMIO space
 * @vgpu: a vGPU
 * @dmlr: whether this is device model level reset
 */
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
{
	struct intel_gvt *gvt = vgpu->gvt;
	const struct intel_gvt_device_info *info = &gvt->device_info;
	void  *mmio = gvt->firmware.mmio;

	if (dmlr) {
		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);

		vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;

		/* set the bit 0:2(Core C-State ) to C0 */
		vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;

		if (IS_BROXTON(vgpu->gvt->dev_priv)) {
			vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
				    ~(BIT(0) | BIT(1));
			vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
				    ~PHY_POWER_GOOD;
			vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
				    ~PHY_POWER_GOOD;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
				    ~BIT(30);
			vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
				    ~BIT(30);
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
				    ~BXT_PHY_LANE_ENABLED;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
				    BXT_PHY_CMNLANE_POWERDOWN_ACK |
				    BXT_PHY_LANE_POWERDOWN_ACK;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
				    ~BXT_PHY_LANE_ENABLED;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
				    BXT_PHY_CMNLANE_POWERDOWN_ACK |
				    BXT_PHY_LANE_POWERDOWN_ACK;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
				    ~BXT_PHY_LANE_ENABLED;
			vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
				    BXT_PHY_CMNLANE_POWERDOWN_ACK |
				    BXT_PHY_LANE_POWERDOWN_ACK;
		}
	} else {
#define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
		/* only reset the engine related, so starting with 0x44200
		 * interrupt include DE,display mmio related will not be
		 * touched
		 */
		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
	}

}
Example #18
0
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
	if (IS_BROADWELL(dev_priv))
		return true;
	if (IS_SKYLAKE(dev_priv))
		return true;
	if (IS_KABYLAKE(dev_priv))
		return true;
	if (IS_BROXTON(dev_priv))
		return true;
	return false;
}
Example #19
0
int intel_compute_dsi_pll(struct intel_encoder *encoder,
			  struct intel_crtc_state *config)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		return vlv_compute_dsi_pll(encoder, config);
	else if (IS_BROXTON(dev))
		return bxt_compute_dsi_pll(encoder, config);

	return -ENODEV;
}
Example #20
0
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	int ret;

	if (IS_BROXTON(dev_priv))
		ret = bxt_suspend_complete(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		ret = hsw_suspend_complete(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
	else
		ret = 0;

	return ret;
}
Example #21
0
static int i915_drm_resume_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}

	pci_set_master(dev->pdev);

	disable_rpm_wakeref_asserts(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);

	intel_uncore_early_sanitize(dev, true);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);

	intel_uncore_sanitize(dev);

	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
		intel_power_domains_init_hw(dev_priv, true);

out:
	dev_priv->suspended_to_idle = false;

	enable_rpm_wakeref_asserts(dev_priv);

	return ret;
}
Example #22
0
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
	uint32_t val, mask;

	mask = DC_STATE_DEBUG_MASK_MEMORY_UP;

	if (IS_BROXTON(dev_priv))
		mask |= DC_STATE_DEBUG_MASK_CORES;

	/* The below bit doesn't need to be cleared ever afterwards */
	val = I915_READ(DC_STATE_DEBUG);
	if ((val & mask) != mask) {
		val |= mask;
		I915_WRITE(DC_STATE_DEBUG, val);
		POSTING_READ(DC_STATE_DEBUG);
	}
}
Example #23
0
/**
 * intel_csr_ucode_init() - initialize the firmware loading.
 * @dev_priv: i915 drm device.
 *
 * This function is called at the time of loading the display driver to read
 * firmware from a .bin file and copied into a internal memory.
 */
void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{
	struct intel_csr *csr = &dev_priv->csr;

	INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);

	if (!HAS_CSR(dev_priv))
		return;

	if (i915_modparams.dmc_firmware_path)
		csr->fw_path = i915_modparams.dmc_firmware_path;
	else if (IS_ICELAKE(dev_priv))
		csr->fw_path = I915_CSR_ICL;
	else if (IS_CANNONLAKE(dev_priv))
		csr->fw_path = I915_CSR_CNL;
	else if (IS_GEMINILAKE(dev_priv))
		csr->fw_path = I915_CSR_GLK;
	else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
		csr->fw_path = I915_CSR_KBL;
	else if (IS_SKYLAKE(dev_priv))
		csr->fw_path = I915_CSR_SKL;
	else if (IS_BROXTON(dev_priv))
		csr->fw_path = I915_CSR_BXT;

	/*
	 * Obtain a runtime pm reference, until CSR is loaded,
	 * to avoid entering runtime-suspend.
	 */
	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);

	if (csr->fw_path == NULL) {
		DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
		WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));

		return;
	}

	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
	schedule_work(&dev_priv->csr.work);
}
Example #24
0
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
			const struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	enum port port;
	u32 val;

	DRM_DEBUG_KMS("\n");

	/* Configure PLL vales */
	I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
	POSTING_READ(BXT_DSI_PLL_CTL);

	/* Program TX, RX, Dphy clocks */
	if (IS_BROXTON(dev_priv)) {
		for_each_dsi_port(port, intel_dsi->ports)
			bxt_dsi_program_clocks(encoder->base.dev, port, config);
	} else {
		glk_dsi_program_esc_clock(encoder->base.dev, config);
	}

	/* Enable DSI PLL */
	val = I915_READ(BXT_DSI_PLL_ENABLE);
	val |= BXT_DSI_PLL_DO_ENABLE;
	I915_WRITE(BXT_DSI_PLL_ENABLE, val);

	/* Timeout and fail if PLL not locked */
	if (intel_wait_for_register(dev_priv,
				    BXT_DSI_PLL_ENABLE,
				    BXT_DSI_PLL_LOCKED,
				    BXT_DSI_PLL_LOCKED,
				    1)) {
		DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
		return;
	}

	DRM_DEBUG_KMS("DSI PLL locked\n");
}
Example #25
0
static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
{
	const struct stepping_info *si;
	unsigned int size;

	if (IS_KABYLAKE(dev)) {
		size = ARRAY_SIZE(kbl_stepping_info);
		si = kbl_stepping_info;
	} else if (IS_SKYLAKE(dev)) {
		size = ARRAY_SIZE(skl_stepping_info);
		si = skl_stepping_info;
	} else if (IS_BROXTON(dev)) {
		size = ARRAY_SIZE(bxt_stepping_info);
		si = bxt_stepping_info;
	} else {
		return NULL;
	}

	if (INTEL_REVID(dev) < size)
		return si + INTEL_REVID(dev);

	return NULL;
}
Example #26
0
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
	u32 tiled, int stride_mask, int bpp)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;

	u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
	u32 stride = stride_reg;

	if (IS_SKYLAKE(dev_priv)
		|| IS_KABYLAKE(dev_priv)
		|| IS_BROXTON(dev_priv)) {
		switch (tiled) {
		case PLANE_CTL_TILED_LINEAR:
			stride = stride_reg * 64;
			break;
		case PLANE_CTL_TILED_X:
			stride = stride_reg * 512;
			break;
		case PLANE_CTL_TILED_Y:
			stride = stride_reg * 128;
			break;
		case PLANE_CTL_TILED_YF:
			if (bpp == 8)
				stride = stride_reg * 64;
			else if (bpp == 16 || bpp == 32 || bpp == 64)
				stride = stride_reg * 128;
			else
				gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
			break;
		default:
			gvt_dbg_core("skl: unsupported tile format:%x\n",
				tiled);
		}
	}

	return stride;
}
Example #27
0
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
	struct drm_i915_private *i915 = guc_to_i915(guc);

	GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);

	if (!HAS_GUC(i915))
		return;

	if (i915_modparams.guc_firmware_path) {
		guc_fw->path = i915_modparams.guc_firmware_path;
		guc_fw->major_ver_wanted = 0;
		guc_fw->minor_ver_wanted = 0;
	} else if (IS_ICELAKE(i915)) {
		guc_fw->path = ICL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
	} else if (IS_GEMINILAKE(i915)) {
		guc_fw->path = GLK_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
	} else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
		guc_fw->path = KBL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
	} else if (IS_BROXTON(i915)) {
		guc_fw->path = BXT_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
	} else if (IS_SKYLAKE(i915)) {
		guc_fw->path = SKL_GUC_FIRMWARE_PATH;
		guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
		guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
	}
}
Example #28
0
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
			      const struct firmware *fw)
{
	struct intel_css_header *css_header;
	struct intel_package_header *package_header;
	struct intel_dmc_header *dmc_header;
	struct intel_csr *csr = &dev_priv->csr;
	const struct stepping_info *si = intel_get_stepping_info(dev_priv);
	uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
	uint32_t i;
	uint32_t *dmc_payload;
	uint32_t required_version;

	if (!fw)
		return NULL;

	/* Extract CSS Header information*/
	css_header = (struct intel_css_header *)fw->data;
	if (sizeof(struct intel_css_header) !=
	    (css_header->header_len * 4)) {
		DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
			  (css_header->header_len * 4));
		return NULL;
	}

	csr->version = css_header->version;

	if (IS_KABYLAKE(dev_priv)) {
		required_version = KBL_CSR_VERSION_REQUIRED;
	} else if (IS_SKYLAKE(dev_priv)) {
		required_version = SKL_CSR_VERSION_REQUIRED;
	} else if (IS_BROXTON(dev_priv)) {
		required_version = BXT_CSR_VERSION_REQUIRED;
	} else {
		MISSING_CASE(INTEL_REVID(dev_priv));
		required_version = 0;
	}

	if (csr->version != required_version) {
		DRM_INFO("Refusing to load DMC firmware v%u.%u,"
			 " please use v%u.%u [" FIRMWARE_URL "].\n",
			 CSR_VERSION_MAJOR(csr->version),
			 CSR_VERSION_MINOR(csr->version),
			 CSR_VERSION_MAJOR(required_version),
			 CSR_VERSION_MINOR(required_version));
		return NULL;
	}

	readcount += sizeof(struct intel_css_header);

	/* Extract Package Header information*/
	package_header = (struct intel_package_header *)
		&fw->data[readcount];
	if (sizeof(struct intel_package_header) !=
	    (package_header->header_len * 4)) {
		DRM_ERROR("Firmware has wrong package header length %u bytes\n",
			  (package_header->header_len * 4));
		return NULL;
	}
	readcount += sizeof(struct intel_package_header);

	/* Search for dmc_offset to find firware binary. */
	for (i = 0; i < package_header->num_entries; i++) {
		if (package_header->fw_info[i].substepping == '*' &&
		    si->stepping == package_header->fw_info[i].stepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (si->stepping == package_header->fw_info[i].stepping &&
			   si->substepping == package_header->fw_info[i].substepping) {
			dmc_offset = package_header->fw_info[i].offset;
			break;
		} else if (package_header->fw_info[i].stepping == '*' &&
			   package_header->fw_info[i].substepping == '*')
			dmc_offset = package_header->fw_info[i].offset;
	}
	if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
		DRM_ERROR("Firmware not supported for %c stepping\n",
			  si->stepping);
		return NULL;
	}
	readcount += dmc_offset;

	/* Extract dmc_header information. */
	dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
	if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
		DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
			  (dmc_header->header_len));
		return NULL;
	}
	readcount += sizeof(struct intel_dmc_header);

	/* Cache the dmc header info. */
	if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
		DRM_ERROR("Firmware has wrong mmio count %u\n",
			  dmc_header->mmio_count);
		return NULL;
	}
	csr->mmio_count = dmc_header->mmio_count;
	for (i = 0; i < dmc_header->mmio_count; i++) {
		if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
		    dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
			DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
				  dmc_header->mmioaddr[i]);
			return NULL;
		}
		csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
		csr->mmiodata[i] = dmc_header->mmiodata[i];
	}

	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
	nbytes = dmc_header->fw_size * 4;
	if (nbytes > CSR_MAX_FW_SIZE) {
		DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
		return NULL;
	}
	csr->dmc_fw_size = dmc_header->fw_size;

	dmc_payload = kmalloc(nbytes, GFP_KERNEL);
	if (!dmc_payload) {
		DRM_ERROR("Memory allocation failed for dmc payload\n");
		return NULL;
	}

	return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
Example #29
0
/**
 * intel_vgpu_decode_primary_plane - Decode primary plane
 * @vgpu: input vgpu
 * @plane: primary plane to save decoded info
 * This function is called for decoding plane
 *
 * Returns:
 * 0 on success, non-zero if failed.
 */
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
	struct intel_vgpu_primary_plane_format *plane)
{
	u32 val, fmt;
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	int pipe;

	pipe = get_active_pipe(vgpu);
	if (pipe >= I915_MAX_PIPES)
		return -ENODEV;

	val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
	plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
	if (!plane->enabled)
		return -ENODEV;

	if (IS_SKYLAKE(dev_priv)
		|| IS_KABYLAKE(dev_priv)
		|| IS_BROXTON(dev_priv)) {
		plane->tiled = val & PLANE_CTL_TILED_MASK;
		fmt = skl_format_to_drm(
			val & PLANE_CTL_FORMAT_MASK,
			val & PLANE_CTL_ORDER_RGBX,
			val & PLANE_CTL_ALPHA_MASK,
			val & PLANE_CTL_YUV422_ORDER_MASK);

		if (fmt >= ARRAY_SIZE(skl_pixel_formats)) {
			gvt_vgpu_err("Out-of-bounds pixel format index\n");
			return -EINVAL;
		}

		plane->bpp = skl_pixel_formats[fmt].bpp;
		plane->drm_format = skl_pixel_formats[fmt].drm_format;
	} else {
		plane->tiled = val & DISPPLANE_TILED;
		fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
		plane->bpp = bdw_pixel_formats[fmt].bpp;
		plane->drm_format = bdw_pixel_formats[fmt].drm_format;
	}

	if (!plane->bpp) {
		gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
		return -EINVAL;
	}

	plane->hw_format = fmt;

	plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
	if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
		return  -EINVAL;

	plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
	if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
		gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
				plane->base);
		return  -EINVAL;
	}

	plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
		(IS_SKYLAKE(dev_priv)
		|| IS_KABYLAKE(dev_priv)
		|| IS_BROXTON(dev_priv)) ?
			(_PRI_PLANE_STRIDE_MASK >> 6) :
				_PRI_PLANE_STRIDE_MASK, plane->bpp);

	plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
		_PIPE_H_SRCSZ_SHIFT;
	plane->width += 1;
	plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
			_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
	plane->height += 1;	/* raw height is one minus the real value */

	val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
	plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
		_PRI_PLANE_X_OFF_SHIFT;
	plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
		_PRI_PLANE_Y_OFF_SHIFT;

	return 0;
}
Example #30
0
/**
 * intel_csr_ucode_init() - initialize the firmware loading.
 * @dev_priv: i915 drm device.
 *
 * This function is called at the time of loading the display driver to read
 * firmware from a .bin file and copied into a internal memory.
 */
void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{
	struct intel_csr *csr = &dev_priv->csr;

	INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);

	if (!HAS_CSR(dev_priv))
		return;

	/*
	 * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
	 * runtime-suspend.
	 *
	 * On error, we return with the rpm wakeref held to prevent runtime
	 * suspend as runtime suspend *requires* a working CSR for whatever
	 * reason.
	 */
	intel_csr_runtime_pm_get(dev_priv);

	if (INTEL_GEN(dev_priv) >= 12) {
		/* Allow to load fw via parameter using the last known size */
		csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
	} else if (IS_GEN(dev_priv, 11)) {
		csr->fw_path = ICL_CSR_PATH;
		csr->required_version = ICL_CSR_VERSION_REQUIRED;
		csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
	} else if (IS_CANNONLAKE(dev_priv)) {
		csr->fw_path = CNL_CSR_PATH;
		csr->required_version = CNL_CSR_VERSION_REQUIRED;
		csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
	} else if (IS_GEMINILAKE(dev_priv)) {
		csr->fw_path = GLK_CSR_PATH;
		csr->required_version = GLK_CSR_VERSION_REQUIRED;
		csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
	} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
		csr->fw_path = KBL_CSR_PATH;
		csr->required_version = KBL_CSR_VERSION_REQUIRED;
		csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
	} else if (IS_SKYLAKE(dev_priv)) {
		csr->fw_path = SKL_CSR_PATH;
		csr->required_version = SKL_CSR_VERSION_REQUIRED;
		csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
	} else if (IS_BROXTON(dev_priv)) {
		csr->fw_path = BXT_CSR_PATH;
		csr->required_version = BXT_CSR_VERSION_REQUIRED;
		csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
	}

	if (i915_modparams.dmc_firmware_path) {
		if (strlen(i915_modparams.dmc_firmware_path) == 0) {
			csr->fw_path = NULL;
			DRM_INFO("Disabling CSR firmware and runtime PM\n");
			return;
		}

		csr->fw_path = i915_modparams.dmc_firmware_path;
		/* Bypass version check for firmware override. */
		csr->required_version = 0;
	}

	if (csr->fw_path == NULL) {
		DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
		WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));

		return;
	}

	DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
	schedule_work(&dev_priv->csr.work);
}