Ejemplo n.º 1
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Resuming device\n");

	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");

	intel_guc_resume(dev);

	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_enable_interrupts(dev_priv);

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);

	intel_enable_gt_powersave(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
Ejemplo n.º 2
0
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
	int err = 0;

	dev_priv->workarounds.count = 0;

	if (INTEL_GEN(dev_priv) < 8)
		err = 0;
	else if (IS_BROADWELL(dev_priv))
		err = bdw_ctx_workarounds_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		err = chv_ctx_workarounds_init(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		err = skl_ctx_workarounds_init(dev_priv);
	else if (IS_BROXTON(dev_priv))
		err = bxt_ctx_workarounds_init(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		err = kbl_ctx_workarounds_init(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		err = glk_ctx_workarounds_init(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		err = cfl_ctx_workarounds_init(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		err = cnl_ctx_workarounds_init(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		err = icl_ctx_workarounds_init(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
	if (err)
		return err;

	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
			 dev_priv->workarounds.count);
	return 0;
}
Ejemplo n.º 3
0
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) < 8)
		return;
	else if (IS_BROADWELL(dev_priv))
		bdw_gt_workarounds_apply(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		chv_gt_workarounds_apply(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		skl_gt_workarounds_apply(dev_priv);
	else if (IS_BROXTON(dev_priv))
		bxt_gt_workarounds_apply(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		kbl_gt_workarounds_apply(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		glk_gt_workarounds_apply(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		cfl_gt_workarounds_apply(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		cnl_gt_workarounds_apply(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		icl_gt_workarounds_apply(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
}
Ejemplo n.º 4
0
static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
					 struct whitelist *w)
{
	struct drm_i915_private *i915 = engine->i915;

	GEM_BUG_ON(engine->id != RCS);

	w->count = 0;
	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));

	if (INTEL_GEN(i915) < 8)
		return NULL;
	else if (IS_BROADWELL(i915))
		bdw_whitelist_build(w);
	else if (IS_CHERRYVIEW(i915))
		chv_whitelist_build(w);
	else if (IS_SKYLAKE(i915))
		skl_whitelist_build(w);
	else if (IS_BROXTON(i915))
		bxt_whitelist_build(w);
	else if (IS_KABYLAKE(i915))
		kbl_whitelist_build(w);
	else if (IS_GEMINILAKE(i915))
		glk_whitelist_build(w);
	else if (IS_COFFEELAKE(i915))
		cfl_whitelist_build(w);
	else if (IS_CANNONLAKE(i915))
		cnl_whitelist_build(w);
	else if (IS_ICELAKE(i915))
		icl_whitelist_build(w);
	else
		MISSING_CASE(INTEL_GEN(i915));

	return w;
}
Ejemplo n.º 5
0
bool
gputop_perf_initialize(void)
{
    if (intel_dev.device)
	return true;

    drm_fd = open_render_node(&intel_dev);
    if (drm_fd < 0) {
	gputop_log(GPUTOP_LOG_LEVEL_HIGH, "Failed to open render node", -1);
	return false;
    }

    /* NB: eu_count needs to be initialized before declaring counters */
    init_dev_info(drm_fd, intel_dev.device);
    page_size = sysconf(_SC_PAGE_SIZE);

    if (IS_HASWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_extended_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_reads_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_writes_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_sampler_balance_counter_query_hsw(&gputop_devinfo);
    } else if (IS_BROADWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_bdw(&gputop_devinfo);
    } else if (IS_CHERRYVIEW(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_chv(&gputop_devinfo);
    } else if (IS_SKYLAKE(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_skl(&gputop_devinfo);
    } else
	assert(0);

    return true;
}
Ejemplo n.º 6
0
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
				bool rpm_resume)
{
	struct drm_device *dev = dev_priv->dev;
	int err;
	int ret;

	/*
	 * If any of the steps fail just try to continue, that's the best we
	 * can do at this point. Return the first error code (which will also
	 * leave RPM permanently disabled).
	 */
	ret = vlv_force_gfx_clock(dev_priv, true);

	if (!IS_CHERRYVIEW(dev_priv))
		vlv_restore_gunit_s0ix_state(dev_priv);

	err = vlv_allow_gt_wake(dev_priv, true);
	if (!ret)
		ret = err;

	err = vlv_force_gfx_clock(dev_priv, false);
	if (!ret)
		ret = err;

	vlv_check_no_gt_access(dev_priv);

	if (rpm_resume) {
		intel_init_clock_gating(dev);
		i915_gem_restore_fences(dev);
	}

	return ret;
}
Ejemplo n.º 7
0
static u32 calc_residency(struct drm_device *dev,
			  i915_reg_t reg)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u64 raw_time; /* 32b value may overflow during fixed point math */
	u64 units = 128ULL, div = 100000ULL;
	u32 ret;

	if (!intel_enable_rc6(dev))
		return 0;

	intel_runtime_pm_get(dev_priv);

	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
		units = 1;
		div = dev_priv->czclk_freq;

		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
			units <<= 8;
	} else if (IS_BROXTON(dev)) {
		units = 1;
		div = 1200;		/* 833.33ns */
	}

	raw_time = I915_READ(reg) * units;
	ret = DIV_ROUND_UP_ULL(raw_time, div);

	intel_runtime_pm_put(dev_priv);
	return ret;
}
Ejemplo n.º 8
0
static void i915_save_display(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Display arbitration control */
	if (INTEL_INFO(dev)->gen <= 4)
		dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);

	/* LVDS state */
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
		dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
		dev_priv->regfile.saveLVDS = I915_READ(LVDS);

	/* Panel power sequencer */
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
		dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
	} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
		dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
		dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
		dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
		dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
	}

	/* save FBC interval */
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
		dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
Ejemplo n.º 9
0
static struct platform_device *
lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = &dev_priv->drm;
	struct platform_device_info pinfo = {};
	struct resource *rsc;
	struct platform_device *platdev;
	struct intel_hdmi_lpe_audio_pdata *pdata;

	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return ERR_PTR(-ENOMEM);

	rsc = kcalloc(2, sizeof(*rsc), GFP_KERNEL);
	if (!rsc) {
		kfree(pdata);
		return ERR_PTR(-ENOMEM);
	}

	rsc[0].start    = rsc[0].end = dev_priv->lpe_audio.irq;
	rsc[0].flags    = IORESOURCE_IRQ;
	rsc[0].name     = "hdmi-lpe-audio-irq";

	rsc[1].start    = pci_resource_start(dev->pdev, 0) +
		I915_HDMI_LPE_AUDIO_BASE;
	rsc[1].end      = pci_resource_start(dev->pdev, 0) +
		I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1;
	rsc[1].flags    = IORESOURCE_MEM;
	rsc[1].name     = "hdmi-lpe-audio-mmio";

	pinfo.parent = dev->dev;
	pinfo.name = "hdmi-lpe-audio";
	pinfo.id = -1;
	pinfo.res = rsc;
	pinfo.num_res = 2;
	pinfo.data = pdata;
	pinfo.size_data = sizeof(*pdata);
	pinfo.dma_mask = DMA_BIT_MASK(32);

	pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes;
	pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */
	pdata->port[0].pipe = -1;
	pdata->port[1].pipe = -1;
	pdata->port[2].pipe = -1;
	spin_lock_init(&pdata->lpe_audio_slock);

	platdev = platform_device_register_full(&pinfo);
	kfree(rsc);
	kfree(pdata);

	if (IS_ERR(platdev)) {
		DRM_ERROR("Failed to allocate LPE audio platform device\n");
		return platdev;
	}

	pm_runtime_no_callbacks(&platdev->dev);

	return platdev;
}
Ejemplo n.º 10
0
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
			      size_t *values_cnt)
{
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
	enum intel_display_power_domain power_domain;
	enum intel_pipe_crc_source source;
	u32 val = 0; /* shut up gcc */
	int ret = 0;

	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
		return -EINVAL;
	}

	power_domain = POWER_DOMAIN_PIPE(crtc->index);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
	if (ret != 0)
		goto out;

	if (source) {
		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		hsw_disable_ips(intel_crtc);
	}

	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
	POSTING_READ(PIPE_CRC_CTL(crtc->index));

	if (!source) {
		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
			hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);

		hsw_enable_ips(intel_crtc);
	}

	pipe_crc->skipped = 0;
	*values_cnt = 5;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
Ejemplo n.º 11
0
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_disable_dsi_pll(encoder);
	else if (IS_BROXTON(dev))
		bxt_disable_dsi_pll(encoder);
}
Ejemplo n.º 12
0
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_BROXTON(dev))
		bxt_dsi_reset_clocks(encoder, port);
	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_dsi_reset_clocks(encoder, port);
}
Ejemplo n.º 13
0
void intel_enable_dsi_pll(struct intel_encoder *encoder,
			  const struct intel_crtc_state *config)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		vlv_enable_dsi_pll(encoder, config);
	else if (IS_BROXTON(dev))
		bxt_enable_dsi_pll(encoder, config);
}
Ejemplo n.º 14
0
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
			struct intel_crtc_state *config,
			int target_dsi_clk)
{
	unsigned int m_min, m_max, p_min = 2, p_max = 6;
	unsigned int m, n, p;
	unsigned int calc_m, calc_p;
	int delta, ref_clk;

	/* target_dsi_clk is expected in kHz */
	if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
		DRM_ERROR("DSI CLK Out of Range\n");
		return -ECHRNG;
	}

	if (IS_CHERRYVIEW(dev_priv)) {
		ref_clk = 100000;
		n = 4;
		m_min = 70;
		m_max = 96;
	} else {
		ref_clk = 25000;
		n = 1;
		m_min = 62;
		m_max = 92;
	}

	calc_p = p_min;
	calc_m = m_min;
	delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));

	for (m = m_min; m <= m_max && delta; m++) {
		for (p = p_min; p <= p_max && delta; p++) {
			/*
			 * Find the optimal m and p divisors with minimal delta
			 * +/- the required clock
			 */
			int calc_dsi_clk = (m * ref_clk) / (p * n);
			int d = abs(target_dsi_clk - calc_dsi_clk);
			if (d < delta) {
				delta = d;
				calc_m = m;
				calc_p = p;
			}
		}
	}

	/* register has log2(N1), this works fine for powers of two */
	config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
	config->dsi_pll.div =
		(ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
		(u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;

	return 0;
}
Ejemplo n.º 15
0
int intel_compute_dsi_pll(struct intel_encoder *encoder,
			  struct intel_crtc_state *config)
{
	struct drm_device *dev = encoder->base.dev;

	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
		return vlv_compute_dsi_pll(encoder, config);
	else if (IS_BROXTON(dev))
		return bxt_compute_dsi_pll(encoder, config);

	return -ENODEV;
}
Ejemplo n.º 16
0
static ssize_t gt_act_freq_mhz_show(struct device *kdev,
				    struct device_attribute *attr, char *buf)
{
	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
	int ret;

	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->rps.hw_lock);
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		u32 freq;
		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
	} else {
Ejemplo n.º 17
0
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
			       enum pipe pipe,
			       enum intel_pipe_crc_source *source, u32 *val)
{
	if (IS_GEN2(dev_priv))
		return i8xx_pipe_crc_ctl_reg(source, val);
	else if (INTEL_GEN(dev_priv) < 5)
		return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
	else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
		return ilk_pipe_crc_ctl_reg(source, val);
	else
		return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
Ejemplo n.º 18
0
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	int ret;

	if (IS_BROXTON(dev_priv))
		ret = bxt_suspend_complete(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		ret = hsw_suspend_complete(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
	else
		ret = 0;

	return ret;
}
Ejemplo n.º 19
0
static int i915_drm_resume_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}

	pci_set_master(dev->pdev);

	disable_rpm_wakeref_asserts(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);

	intel_uncore_early_sanitize(dev, true);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);

	intel_uncore_sanitize(dev);

	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
		intel_power_domains_init_hw(dev_priv, true);

out:
	dev_priv->suspended_to_idle = false;

	enable_rpm_wakeref_asserts(dev_priv);

	return ret;
}
Ejemplo n.º 20
0
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
			      size_t *values_cnt)
{
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
	enum intel_display_power_domain power_domain;
	enum intel_pipe_crc_source source;
	u32 val = 0; /* shut up gcc */
	int ret = 0;

	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
		return -EINVAL;
	}

	power_domain = POWER_DOMAIN_PIPE(crtc->index);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
	if (ret != 0)
		goto out;

	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
	POSTING_READ(PIPE_CRC_CTL(crtc->index));

	if (!source) {
		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if ((IS_HASWELL(dev_priv) ||
			  IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
			hsw_pipe_A_crc_wa(dev_priv, false);
	}

	pipe_crc->skipped = 0;
	*values_cnt = 5;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
Ejemplo n.º 21
0
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
					int fw_engine)
{

	/* Check for Render Engine */
	if (FORCEWAKE_RENDER & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
					_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));


	/* Check for Media Engine */
	if (FORCEWAKE_MEDIA & fw_engine)
		__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
				_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));

	/* something from same cacheline, but !FORCEWAKE_VLV */
	__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
	if (!IS_CHERRYVIEW(dev_priv->dev))
		gen6_gt_check_fifodbg(dev_priv);
}
Ejemplo n.º 22
0
static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
{
	int lpe_present = false;

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		static const struct pci_device_id atom_hdaudio_ids[] = {
			/* Baytrail */
			{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f04)},
			/* Braswell */
			{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2284)},
			{}
		};

		if (!pci_dev_present(atom_hdaudio_ids)) {
			DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
			lpe_present = true;
		}
	}
	return lpe_present;
}
Ejemplo n.º 23
0
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
	u32 mask;
	int err;

	/*
	 * Bspec defines the following GT well on flags as debug only, so
	 * don't treat them as hard failures.
	 */
	(void)vlv_wait_for_gt_wells(dev_priv, false);

	mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
	WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);

	vlv_check_no_gt_access(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, true);
	if (err)
		goto err1;

	err = vlv_allow_gt_wake(dev_priv, false);
	if (err)
		goto err2;

	if (!IS_CHERRYVIEW(dev_priv))
		vlv_save_gunit_s0ix_state(dev_priv);

	err = vlv_force_gfx_clock(dev_priv, false);
	if (err)
		goto err2;

	return 0;

err2:
	/* For safety always re-enable waking and disable gfx clock forcing */
	vlv_allow_gt_wake(dev_priv, true);
err1:
	vlv_force_gfx_clock(dev_priv, false);

	return err;
}
Ejemplo n.º 24
0
static void i915_restore_display(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 mask = 0xffffffff;

	/* Display arbitration */
	if (INTEL_INFO(dev)->gen <= 4)
		I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);

	mask = ~LVDS_PORT_EN;

	/* LVDS state */
	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
		I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
	else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
		I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);

	/* Panel power sequencer */
	if (HAS_PCH_SPLIT(dev)) {
		I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
		I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
		I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
	} else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
		I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
		I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
		I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
		I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
	}

	/* only restore FBC info on the platform that supports FBC*/
	intel_fbc_disable(dev_priv);

	/* restore FBC interval */
	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
		I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);

	i915_redisable_vga(dev);
}
Ejemplo n.º 25
0
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
		     struct intel_crtc_state *config)
{
	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
	struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
	u32 dsi_clock, pclk;
	u32 pll_ctl, pll_div;
	u32 m = 0, p = 0, n;
	int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
	int i;

	DRM_DEBUG_KMS("\n");

	mutex_lock(&dev_priv->sb_lock);
	pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
	pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
	mutex_unlock(&dev_priv->sb_lock);

	config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
	config->dsi_pll.div = pll_div;

	/* mask out other bits and extract the P1 divisor */
	pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
	pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);

	/* N1 divisor */
	n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
	n = 1 << n; /* register has log2(N1) */

	/* mask out the other bits and extract the M1 divisor */
	pll_div &= DSI_PLL_M1_DIV_MASK;
	pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;

	while (pll_ctl) {
		pll_ctl = pll_ctl >> 1;
		p++;
	}
	p--;

	if (!p) {
		DRM_ERROR("wrong P1 divisor\n");
		return 0;
	}

	for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
		if (lfsr_converts[i] == pll_div)
			break;
	}

	if (i == ARRAY_SIZE(lfsr_converts)) {
		DRM_ERROR("wrong m_seed programmed\n");
		return 0;
	}

	m = i + 62;

	dsi_clock = (m * refclk) / (p * n);

	/* pixel_format and pipe_bpp should agree */
	assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);

	pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);

	return pclk;
}
Ejemplo n.º 26
0
static void
init_dev_info(int drm_fd, uint32_t devid)
{
    i915_getparam_t test;
    int test_n_eus;
    int status;

    gputop_devinfo.devid = devid;

    test.param = I915_PARAM_EU_TOTAL;
    test.value = &test_n_eus;
    status = perf_ioctl(drm_fd, I915_IOCTL_GETPARAM, &test);
    if (status == -1)
	fprintf(stderr, "error calling I915_IOCTL_GETPARAM %m\n");

    if (IS_HASWELL(devid)) {
	if (IS_HSW_GT1(devid)) {
	    gputop_devinfo.n_eus = 10;
	    gputop_devinfo.n_eu_slices = 1;
	    gputop_devinfo.n_eu_sub_slices = 1;
	    gputop_devinfo.subslice_mask = 0x1;
	} else if (IS_HSW_GT2(devid)) {
	    gputop_devinfo.n_eus = 20;
	    gputop_devinfo.n_eu_slices = 1;
	    gputop_devinfo.n_eu_sub_slices = 2;
	    gputop_devinfo.subslice_mask = 0x3;
	} else if (IS_HSW_GT3(devid)) {
	    gputop_devinfo.n_eus = 40;
	    gputop_devinfo.n_eu_slices = 2;
	    gputop_devinfo.n_eu_sub_slices = 4;
	    gputop_devinfo.subslice_mask = 0xf;
	}
    } else {
#ifdef I915_PARAM_EU_TOTAL
	i915_getparam_t gp;
	int ret;
	int n_eus = 0;
	int slice_mask = 0;
	int ss_mask = 0;
	int s_max;
	int ss_max;
	uint64_t subslice_mask = 0;
	int s;

	if (IS_BROADWELL(devid)) {
	    s_max = 2;
	    ss_max = 3;
	} else if (IS_CHERRYVIEW(devid)) {
	    s_max = 1;
	    ss_max = 2;
	} else if (IS_SKYLAKE(devid)) {
	    s_max = 3;
	    ss_max = 4;
	}

	gp.param = I915_PARAM_EU_TOTAL;
	gp.value = &n_eus;
	ret = perf_ioctl(drm_fd, I915_IOCTL_GETPARAM, &gp);
	assert(ret == 0 && n_eus > 0);

	gp.param = I915_PARAM_SLICE_MASK;
	gp.value = &slice_mask;
	ret = perf_ioctl(drm_fd, I915_IOCTL_GETPARAM, &gp);
	assert(ret == 0 && slice_mask);

	gp.param = I915_PARAM_SUBSLICE_MASK;
	gp.value = &ss_mask;
	ret = perf_ioctl(drm_fd, I915_IOCTL_GETPARAM, &gp);
	assert(ret == 0 && ss_mask);

	gputop_devinfo.n_eus = n_eus;
	gputop_devinfo.n_eu_slices = __builtin_popcount(slice_mask);

	/* Note: some of the metrics we have (as described in XML)
	 * are conditional on a $SubsliceMask variable which is
	 * expected to also reflect the slice mask by packing
	 * together subslice masks for each slice in one value...
	 */
	for (s = 0; s < s_max; s++) {
	    if (slice_mask & (1<<s)) {
		slice_mask |= ss_mask << (ss_max * s);
	    }
	}
	gputop_devinfo.subslice_mask = subslice_mask;
#else
	assert(0);
#endif
    }
}
Ejemplo n.º 27
0
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
				uint32_t *val)
{
	bool need_stable_symbols = false;

	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
		int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
		if (ret)
			return ret;
	}

	switch (*source) {
	case INTEL_PIPE_CRC_SOURCE_PIPE:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_B:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
		need_stable_symbols = true;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_C:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
		need_stable_symbols = true;
		break;
	case INTEL_PIPE_CRC_SOURCE_DP_D:
		if (!IS_CHERRYVIEW(dev_priv))
			return -EINVAL;
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
		need_stable_symbols = true;
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

	/*
	 * When the pipe CRC tap point is after the transcoders we need
	 * to tweak symbol-level features to produce a deterministic series of
	 * symbols for a given frame. We need to reset those features only once
	 * a frame (instead of every nth symbol):
	 *   - DC-balance: used to ensure a better clock recovery from the data
	 *     link (SDVO)
	 *   - DisplayPort scrambling: used for EMI reduction
	 */
	if (need_stable_symbols) {
		uint32_t tmp = I915_READ(PORT_DFT2_G4X);

		tmp |= DC_BALANCE_RESET_VLV;
		switch (pipe) {
		case PIPE_A:
			tmp |= PIPE_A_SCRAMBLE_RESET;
			break;
		case PIPE_B:
			tmp |= PIPE_B_SCRAMBLE_RESET;
			break;
		case PIPE_C:
			tmp |= PIPE_C_SCRAMBLE_RESET;
			break;
		default:
			return -EINVAL;
		}
		I915_WRITE(PORT_DFT2_G4X, tmp);
	}

	return 0;
}
Ejemplo n.º 28
0
static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
			       enum pipe pipe,
			       enum intel_pipe_crc_source source)
{
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
	enum intel_display_power_domain power_domain;
	u32 val = 0; /* shut up gcc */
	int ret;

	if (pipe_crc->source == source)
		return 0;

	/* forbid changing the source without going back to 'none' */
	if (pipe_crc->source && source)
		return -EINVAL;

	power_domain = POWER_DOMAIN_PIPE(pipe);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
	if (ret != 0)
		goto out;

	/* none -> real source transition */
	if (source) {
		struct intel_pipe_crc_entry *entries;

		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
				 pipe_name(pipe), pipe_crc_source_name(source));

		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
				  sizeof(pipe_crc->entries[0]),
				  GFP_KERNEL);
		if (!entries) {
			ret = -ENOMEM;
			goto out;
		}

		/*
		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
		 * enabled and disabled dynamically based on package C states,
		 * user space can't make reliable use of the CRCs, so let's just
		 * completely disable it.
		 */
		hsw_disable_ips(crtc);

		spin_lock_irq(&pipe_crc->lock);
		kfree(pipe_crc->entries);
		pipe_crc->entries = entries;
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
		spin_unlock_irq(&pipe_crc->lock);
	}

	pipe_crc->source = source;

	I915_WRITE(PIPE_CRC_CTL(pipe), val);
	POSTING_READ(PIPE_CRC_CTL(pipe));

	/* real source -> none transition */
	if (!source) {
		struct intel_pipe_crc_entry *entries;
		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
								  pipe);

		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
				 pipe_name(pipe));

		drm_modeset_lock(&crtc->base.mutex, NULL);
		if (crtc->base.state->active)
			intel_wait_for_vblank(dev_priv, pipe);
		drm_modeset_unlock(&crtc->base.mutex);

		spin_lock_irq(&pipe_crc->lock);
		entries = pipe_crc->entries;
		pipe_crc->entries = NULL;
		pipe_crc->head = 0;
		pipe_crc->tail = 0;
		spin_unlock_irq(&pipe_crc->lock);

		kfree(entries);

		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, pipe);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, pipe);
		else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
			hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);

		hsw_enable_ips(crtc);
	}

	ret = 0;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
int main(int argc, char *argv[])
{
	uint32_t port, reg, val;
	struct pci_device *dev = intel_get_pci_device();
	int i, nregs, count = 1, reg_stride;
	const char *name;

	fprintf(stderr, "WARNING: Use of %s has been deprecated and replaced by"
		" intel_reg.\n", argv[0]);

	if (!IS_VALLEYVIEW(dev->device_id) &&
	    !IS_CHERRYVIEW(dev->device_id)) {
		usage(argv[0]);
		return 1;
	}

	for (;;) {
		int c = getopt(argc, argv, "hc:");

		if (c == -1)
			break;

		switch (c) {
		case 'h':
			usage(argv[0]);
			return 0;
		case 'c':
			count = strtol(optarg, NULL, 0);
			if (count < 1) {
				usage(argv[0]);
				return 3;
			}
			break;
		}
	}

	nregs = argc - optind;
	if (nregs < 1) {
		usage(argv[0]);
		return 2;
	}

	i = optind;
	name = argv[i++];
	port = iosf_sb_port_parse(name, &reg_stride);

	intel_register_access_init(dev, 0);

	for (; i < argc; i++) {
		int j;

		reg = strtoul(argv[i], NULL, 16);

		for (j = 0; j < count; j++) {
			val = intel_iosf_sb_read(port, reg);
			printf("0x%02x(%s)/0x%04x : 0x%08x\n", port, name, reg, val);
			reg += reg_stride;
		}
	}

	intel_register_access_fini();

	return 0;
}
Ejemplo n.º 30
0
BOOL
media_driver_data_init (VADriverContextP ctx)
{
  MEDIA_DRV_CONTEXT *drv_ctx = NULL;
  MEDIA_DRV_ASSERT (ctx);
  drv_ctx = ctx->pDriverData;
  if (IS_GEN75 (drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen75_hw_codec_info;
  else if (IS_GEN7 (drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen7_hw_codec_info;
  else if (IS_GEN8(drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &gen8_hw_codec_info;
  else if (IS_CHERRYVIEW(drv_ctx->drv_data.device_id))
    drv_ctx->codec_info = &chv_hw_codec_info;
  else
    return false;

  if (object_heap_init (&drv_ctx->config_heap,
			sizeof (struct object_config), CONFIG_ID_OFFSET))
    goto err_config_heap;
  if (object_heap_init (&drv_ctx->context_heap,
			sizeof (struct object_context), CONTEXT_ID_OFFSET))
    goto err_context_heap;

  if (object_heap_init (&drv_ctx->surface_heap,
			sizeof (struct object_surface), SURFACE_ID_OFFSET))
    goto err_surface_heap;
  if (object_heap_init (&drv_ctx->buffer_heap,
			sizeof (struct object_buffer), BUFFER_ID_OFFSET))
    goto err_buffer_heap;
  if (object_heap_init (&drv_ctx->image_heap,
			sizeof (struct object_image), IMAGE_ID_OFFSET))
    goto err_image_heap;

  if (object_heap_init (&drv_ctx->subpic_heap,
                        sizeof (struct object_subpic), IMAGE_ID_OFFSET))
    goto err_subpic_heap;

  drv_ctx->batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  drv_ctx->pp_batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  drv_ctx->render_batch =
    media_batchbuffer_new (&drv_ctx->drv_data, I915_EXEC_RENDER, 0);
  media_drv_mutex_init (&drv_ctx->render_mutex);
  media_drv_mutex_init (&drv_ctx->pp_mutex);

  return true;

err_subpic_heap:
  object_heap_destroy(&drv_ctx->subpic_heap);

err_image_heap:
  object_heap_destroy (&drv_ctx->buffer_heap);
err_buffer_heap:
  object_heap_destroy (&drv_ctx->surface_heap);
err_surface_heap:
  object_heap_destroy (&drv_ctx->context_heap);
err_context_heap:
  object_heap_destroy (&drv_ctx->config_heap);
err_config_heap:
  return false;
}