Exemple #1
0
static int i915_pm_suspend_late(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = drm_dev->dev_private;

	/*
	 * We have a suspedn ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an late
	 * suspend hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

	if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
		hsw_enable_pc8(dev_priv);

	pci_disable_device(pdev);
	pci_set_power_state(pdev, PCI_D3hot);

	return 0;
}
Exemple #2
0
static bool intel_fbc_can_choose(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct intel_fbc *fbc = &dev_priv->fbc;
	bool enable_by_default = IS_BROADWELL(dev_priv);

	if (intel_vgpu_active(dev_priv->dev)) {
		fbc->no_fbc_reason = "VGPU is active";
		return false;
	}

	if (i915.enable_fbc < 0 && !enable_by_default) {
		fbc->no_fbc_reason = "disabled per chip default";
		return false;
	}

	if (!i915.enable_fbc) {
		fbc->no_fbc_reason = "disabled per module param";
		return false;
	}

	if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
		fbc->no_fbc_reason = "no enabled pipes can have FBC";
		return false;
	}

	if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
		fbc->no_fbc_reason = "no enabled planes can have FBC";
		return false;
	}

	return true;
}
Exemple #3
0
int Calc_CSC_Param(struct CSCCoeff_Matrix *CSC_Matrix,
				struct csc_coeff *CSC_Coeff_t, int devid)
{
    struct drm_intel_csc_params input_csc_params;
    unsigned short Hsw_Vlv_CSC_Coeff[CSC_MAX_COEFF_COUNT];

    memcpy(input_csc_params.m_CSCCoeff, CSC_Matrix->CoeffMatrix,
					sizeof(float) * CSC_MAX_COEFF_COUNT);
    Convert_Coeff_ToBinary(&input_csc_params, Hsw_Vlv_CSC_Coeff, devid);
    Convert_Coeff_ToBSpecFormat(CSC_Coeff_t->csc_coeff,
						Hsw_Vlv_CSC_Coeff, devid);

    if (IS_HASWELL(devid) || IS_BROADWELL(devid)) {
        if (CSC_Matrix->param_valid & CSC_OFFSET_VALID_MASK)
            Convert_CSC_Offset_ToBSpecFormat(CSC_Matrix, CSC_Coeff_t);

        if (CSC_Matrix->param_valid & CSC_MODE_VALID_MASK) {
            if(CSC_Matrix->CSCMode == 0x1)
                CSC_Coeff_t->csc_mode = 0x2; /* CSC is before Gamma */
            else
                CSC_Coeff_t->csc_mode = 0;
        }
        CSC_Coeff_t->param_valid = CSC_Matrix->param_valid;
    }

    return 0;
}
Exemple #4
0
void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
{
	if (INTEL_GEN(dev_priv) < 8)
		return;
	else if (IS_BROADWELL(dev_priv))
		bdw_gt_workarounds_apply(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		chv_gt_workarounds_apply(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		skl_gt_workarounds_apply(dev_priv);
	else if (IS_BROXTON(dev_priv))
		bxt_gt_workarounds_apply(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		kbl_gt_workarounds_apply(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		glk_gt_workarounds_apply(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		cfl_gt_workarounds_apply(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		cnl_gt_workarounds_apply(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		icl_gt_workarounds_apply(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
}
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
{
	enum intel_pch ret = PCH_NOP;

	/*
	 * In a virtualized passthrough environment we can be in a
	 * setup where the ISA bridge is not able to be passed through.
	 * In this case, a south bridge can be emulated and we have to
	 * make an educated guess as to which PCH is really there.
	 */

	if (IS_GEN5(dev)) {
		ret = PCH_IBX;
		DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
	} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
		ret = PCH_CPT;
		DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = PCH_LPT;
		DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
		ret = PCH_SPT;
		DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
	}

	return ret;
}
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
		return -ENODEV;

	DRM_DEBUG_KMS("Resuming device\n");

	WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
	disable_rpm_wakeref_asserts(dev_priv);

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;
	if (intel_uncore_unclaimed_mmio(dev_priv))
		DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");

	intel_guc_resume(dev);

	if (IS_GEN6(dev_priv))
		intel_init_pch_refclk(dev);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, true);

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_enable_interrupts(dev_priv);

	/*
	 * On VLV/CHV display interrupts are part of the display
	 * power well, so hpd is reinitialized from there. For
	 * everyone else do it here.
	 */
	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
		intel_hpd_init(dev_priv);

	intel_enable_gt_powersave(dev);

	enable_rpm_wakeref_asserts(dev_priv);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
Exemple #7
0
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
	vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
			SDE_PORTC_HOTPLUG_CPT |
			SDE_PORTD_HOTPLUG_CPT);

	if (IS_SKYLAKE(dev_priv))
		vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
				SDE_PORTE_HOTPLUG_SPT);

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;

	if (IS_SKYLAKE(dev_priv) &&
			intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
		vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
	}

	if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
		if (IS_BROADWELL(dev_priv))
			vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
				GEN8_PORT_DP_A_HOTPLUG;
		else
			vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
	}
}
Exemple #8
0
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
				enum pipe pipe,
				enum intel_pipe_crc_source *source,
				uint32_t *val)
{
	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
		*source = INTEL_PIPE_CRC_SOURCE_PF;

	switch (*source) {
	case INTEL_PIPE_CRC_SOURCE_PLANE1:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PLANE2:
		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_PF:
		if ((IS_HASWELL(dev_priv) ||
		     IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
			hsw_pipe_A_crc_wa(dev_priv, true);

		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
		break;
	case INTEL_PIPE_CRC_SOURCE_NONE:
		*val = 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
Exemple #9
0
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
							int fw_engine)
{
	u32 forcewake_ack;

	if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
		forcewake_ack = FORCEWAKE_ACK_HSW;
	else
		forcewake_ack = FORCEWAKE_MT_ACK;

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");

	__raw_i915_write32(dev_priv, FORCEWAKE_MT,
			   _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
	/* something from same cacheline, but !FORCEWAKE_MT */
	__raw_posting_read(dev_priv, ECOBUS);

	if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
			    FORCEWAKE_ACK_TIMEOUT_MS))
		DRM_ERROR("Timed out waiting for forcewake to ack request.\n");

	/* WaRsForcewakeWaitTC0:ivb,hsw */
	__gen6_gt_wait_for_thread_c0(dev_priv);
}
Exemple #10
0
static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
					 struct whitelist *w)
{
	struct drm_i915_private *i915 = engine->i915;

	GEM_BUG_ON(engine->id != RCS);

	w->count = 0;
	w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));

	if (INTEL_GEN(i915) < 8)
		return NULL;
	else if (IS_BROADWELL(i915))
		bdw_whitelist_build(w);
	else if (IS_CHERRYVIEW(i915))
		chv_whitelist_build(w);
	else if (IS_SKYLAKE(i915))
		skl_whitelist_build(w);
	else if (IS_BROXTON(i915))
		bxt_whitelist_build(w);
	else if (IS_KABYLAKE(i915))
		kbl_whitelist_build(w);
	else if (IS_GEMINILAKE(i915))
		glk_whitelist_build(w);
	else if (IS_COFFEELAKE(i915))
		cfl_whitelist_build(w);
	else if (IS_CANNONLAKE(i915))
		cnl_whitelist_build(w);
	else if (IS_ICELAKE(i915))
		icl_whitelist_build(w);
	else
		MISSING_CASE(INTEL_GEN(i915));

	return w;
}
Exemple #11
0
static void __intel_uncore_early_sanitize(struct drm_device *dev,
					  bool restore_forcewake)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (HAS_FPGA_DBG_UNCLAIMED(dev))
		__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);

	if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
	    (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
		/* The docs do not explain exactly how the calculation can be
		 * made. It is somewhat guessable, but for now, it's always
		 * 128MB.
		 * NB: We can't write IDICR yet because we do not have gt funcs
		 * set up */
		dev_priv->ellc_size = 128;
		DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
	}

	/* clear out old GT FIFO errors */
	if (IS_GEN6(dev) || IS_GEN7(dev))
		__raw_i915_write32(dev_priv, GTFIFODBG,
				   __raw_i915_read32(dev_priv, GTFIFODBG));

	intel_uncore_forcewake_reset(dev, restore_forcewake);
}
Exemple #12
0
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
{
	int err = 0;

	dev_priv->workarounds.count = 0;

	if (INTEL_GEN(dev_priv) < 8)
		err = 0;
	else if (IS_BROADWELL(dev_priv))
		err = bdw_ctx_workarounds_init(dev_priv);
	else if (IS_CHERRYVIEW(dev_priv))
		err = chv_ctx_workarounds_init(dev_priv);
	else if (IS_SKYLAKE(dev_priv))
		err = skl_ctx_workarounds_init(dev_priv);
	else if (IS_BROXTON(dev_priv))
		err = bxt_ctx_workarounds_init(dev_priv);
	else if (IS_KABYLAKE(dev_priv))
		err = kbl_ctx_workarounds_init(dev_priv);
	else if (IS_GEMINILAKE(dev_priv))
		err = glk_ctx_workarounds_init(dev_priv);
	else if (IS_COFFEELAKE(dev_priv))
		err = cfl_ctx_workarounds_init(dev_priv);
	else if (IS_CANNONLAKE(dev_priv))
		err = cnl_ctx_workarounds_init(dev_priv);
	else if (IS_ICELAKE(dev_priv))
		err = icl_ctx_workarounds_init(dev_priv);
	else
		MISSING_CASE(INTEL_GEN(dev_priv));
	if (err)
		return err;

	DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
			 dev_priv->workarounds.count);
	return 0;
}
Exemple #13
0
bool
gputop_perf_initialize(void)
{
    if (intel_dev.device)
	return true;

    drm_fd = open_render_node(&intel_dev);
    if (drm_fd < 0) {
	gputop_log(GPUTOP_LOG_LEVEL_HIGH, "Failed to open render node", -1);
	return false;
    }

    /* NB: eu_count needs to be initialized before declaring counters */
    init_dev_info(drm_fd, intel_dev.device);
    page_size = sysconf(_SC_PAGE_SIZE);

    if (IS_HASWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_basic_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_compute_extended_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_reads_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_memory_writes_counter_query_hsw(&gputop_devinfo);
	gputop_oa_add_sampler_balance_counter_query_hsw(&gputop_devinfo);
    } else if (IS_BROADWELL(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_bdw(&gputop_devinfo);
    } else if (IS_CHERRYVIEW(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_chv(&gputop_devinfo);
    } else if (IS_SKYLAKE(intel_dev.device)) {
	gputop_oa_add_render_basic_counter_query_skl(&gputop_devinfo);
    } else
	assert(0);

    return true;
}
Exemple #14
0
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
	if (IS_BROADWELL(dev_priv))
		return true;
	if (IS_SKYLAKE(dev_priv))
		return true;
	return false;
}
Exemple #15
0
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long irqflags;

	if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
		gen6_force_wake_timer((unsigned long)dev_priv);

	/* Hold uncore.lock across reset to prevent any register access
	 * with forcewake not set correctly
	 */
	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);

	if (IS_VALLEYVIEW(dev))
		vlv_force_wake_reset(dev_priv);
	else if (IS_GEN6(dev) || IS_GEN7(dev))
		__gen6_gt_force_wake_reset(dev_priv);

	if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
		__gen7_gt_force_wake_mt_reset(dev_priv);

	if (IS_GEN9(dev))
		__gen9_gt_force_wake_mt_reset(dev_priv);

	if (restore) { /* If reset with a user forcewake, try to restore */
		unsigned fw = 0;

		if (IS_VALLEYVIEW(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;
		} else if (IS_GEN9(dev)) {
			if (dev_priv->uncore.fw_rendercount)
				fw |= FORCEWAKE_RENDER;

			if (dev_priv->uncore.fw_mediacount)
				fw |= FORCEWAKE_MEDIA;

			if (dev_priv->uncore.fw_blittercount)
				fw |= FORCEWAKE_BLITTER;
		} else {
			if (dev_priv->uncore.forcewake_count)
				fw = FORCEWAKE_ALL;
		}

		if (fw)
			dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);

		if (IS_GEN6(dev) || IS_GEN7(dev))
			dev_priv->uncore.fifo_count =
				__raw_i915_read32(dev_priv, GTFIFOCTL) &
				GT_FIFO_FREE_ENTRIES_MASK;
	}

	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
Exemple #16
0
/*
 * Starting with Haswell, DDI port buffers must be programmed with correct
 * values in advance. The buffer values are different for FDI and DP modes,
 * but the HDMI/DVI fields are shared among those. So we program the DDI
 * in either FDI or DP modes only, as HDMI connections will work with both
 * of those
 */
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	u32 reg;
	int i;
	int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
	const u32 *ddi_translations_fdi;
	const u32 *ddi_translations_dp;
	const u32 *ddi_translations_edp;
	const u32 *ddi_translations;

	if (IS_BROADWELL(dev)) {
		ddi_translations_fdi = bdw_ddi_translations_fdi;
		ddi_translations_dp = bdw_ddi_translations_dp;
		ddi_translations_edp = bdw_ddi_translations_edp;
	} else if (IS_HASWELL(dev)) {
		ddi_translations_fdi = hsw_ddi_translations_fdi;
		ddi_translations_dp = hsw_ddi_translations_dp;
		ddi_translations_edp = hsw_ddi_translations_dp;
	} else {
		WARN(1, "ddi translation table missing\n");
		ddi_translations_edp = bdw_ddi_translations_dp;
		ddi_translations_fdi = bdw_ddi_translations_fdi;
		ddi_translations_dp = bdw_ddi_translations_dp;
	}

	switch (port) {
	case PORT_A:
		ddi_translations = ddi_translations_edp;
		break;
	case PORT_B:
	case PORT_C:
		ddi_translations = ddi_translations_dp;
		break;
	case PORT_D:
		if (intel_dp_is_edp(dev, PORT_D))
			ddi_translations = ddi_translations_edp;
		else
			ddi_translations = ddi_translations_dp;
		break;
	case PORT_E:
		ddi_translations = ddi_translations_fdi;
		break;
	default:
		BUG();
	}

	for (i = 0, reg = DDI_BUF_TRANS(port);
	     i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
		I915_WRITE(reg, ddi_translations[i]);
		reg += 4;
	}
	/* Entry 9 is for HDMI: */
	for (i = 0; i < 2; i++) {
		I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
		reg += 4;
	}
}
static void gen7_fbc_enable(struct intel_crtc *crtc)
{
	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
	struct drm_framebuffer *fb = crtc->base.primary->fb;
	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
	u32 dpfc_ctl;
	int threshold = dev_priv->fbc.threshold;

	dev_priv->fbc.enabled = true;

	dpfc_ctl = 0;
	if (IS_IVYBRIDGE(dev_priv))
		dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);

	if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
		threshold++;

	switch (threshold) {
	case 4:
	case 3:
		dpfc_ctl |= DPFC_CTL_LIMIT_4X;
		break;
	case 2:
		dpfc_ctl |= DPFC_CTL_LIMIT_2X;
		break;
	case 1:
		dpfc_ctl |= DPFC_CTL_LIMIT_1X;
		break;
	}

	dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;

	if (dev_priv->fbc.false_color)
		dpfc_ctl |= FBC_CTL_FALSE_COLOR;

	if (IS_IVYBRIDGE(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:ivb */
		I915_WRITE(ILK_DISPLAY_CHICKEN1,
			   I915_READ(ILK_DISPLAY_CHICKEN1) |
			   ILK_FBCQ_DIS);
	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
		I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
			   I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
			   HSW_FBCQ_DIS);
	}

	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

	I915_WRITE(SNB_DPFC_CTL_SA,
		   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
	I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));

	intel_fbc_nuke(dev_priv);

	DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(crtc->plane));
}
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
    struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
    u32 dpfc_ctl;
    int threshold = dev_priv->fbc.threshold;

    dev_priv->fbc.active = true;

    dpfc_ctl = 0;
    if (IS_IVYBRIDGE(dev_priv))
        dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);

    if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
        threshold++;

    switch (threshold) {
    case 4:
    case 3:
        dpfc_ctl |= DPFC_CTL_LIMIT_4X;
        break;
    case 2:
        dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        break;
    case 1:
        dpfc_ctl |= DPFC_CTL_LIMIT_1X;
        break;
    }

    dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;

    if (dev_priv->fbc.false_color)
        dpfc_ctl |= FBC_CTL_FALSE_COLOR;

    if (IS_IVYBRIDGE(dev_priv)) {
        /* WaFbcAsynchFlipDisableFbcQueue:ivb */
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
                   I915_READ(ILK_DISPLAY_CHICKEN1) |
                   ILK_FBCQ_DIS);
    } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
        /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
        I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
                   I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
                   HSW_FBCQ_DIS);
    }

    I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);

    I915_WRITE(SNB_DPFC_CTL_SA,
               SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
    I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);

    intel_fbc_recompress(dev_priv);
}
Exemple #19
0
/*
 * The DDX driver changes its behavior depending on the value it reads from
 * i915.enable_fbc, so sanitize it by translating the default value into either
 * 0 or 1 in order to allow it to know what's going on.
 *
 * Notice that this is done at driver initialization and we still allow user
 * space to change the value during runtime without sanitizing it again. IGT
 * relies on being able to change i915.enable_fbc at runtime.
 */
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
{
	if (i915_modparams.enable_fbc >= 0)
		return !!i915_modparams.enable_fbc;

	if (!HAS_FBC(dev_priv))
		return 0;

	if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
		return 1;

	return 0;
}
Exemple #20
0
static int i915_drm_thaw_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
		hsw_disable_pc8(dev_priv);

	intel_uncore_early_sanitize(dev, true);
	intel_uncore_sanitize(dev);
	intel_power_domains_init_hw(dev_priv);

	return 0;
}
Exemple #21
0
static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
{
	int ret;
	u32 val;

	val = I915_READ(HDCP_KEY_STATUS);
	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
		return 0;

	/*
	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
	 * out of reset. So if Key is not already loaded, its an error state.
	 */
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
			return -ENXIO;

	/*
	 * Initiate loading the HDCP key from fuses.
	 *
	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
	 * differ in the key load trigger process from other platforms.
	 */
	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
		mutex_lock(&dev_priv->pcu_lock);
		ret = sandybridge_pcode_write(dev_priv,
					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
		mutex_unlock(&dev_priv->pcu_lock);
		if (ret) {
			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
			          ret);
			return ret;
		}
	} else {
		I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
	}

	/* Wait for the keys to load (500us) */
	ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
					10, 1, &val);
	if (ret)
		return ret;
	else if (!(val & HDCP_KEY_LOAD_STATUS))
		return -ENXIO;

	/* Send Aksv over to PCH display for use in authentication */
	I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);

	return 0;
}
Exemple #22
0
static int find_compression_threshold(struct drm_i915_private *dev_priv,
				      struct drm_mm_node *node,
				      int size,
				      int fb_cpp)
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	int compression_threshold = 1;
	int ret;
	u64 end;

	/* The FBC hardware for BDW/SKL doesn't have access to the stolen
	 * reserved range size, so it always assumes the maximum (8mb) is used.
	 * If we enable FBC using a CFB on that memory range we'll get FIFO
	 * underruns, even if that range is not reserved by the BIOS. */
	if (IS_BROADWELL(dev_priv) ||
	    IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
		end = ggtt->stolen_size - 8 * 1024 * 1024;
	else
		end = ggtt->stolen_usable_size;

	/* HACK: This code depends on what we will do in *_enable_fbc. If that
	 * code changes, this code needs to change as well.
	 *
	 * The enable_fbc code will attempt to use one of our 2 compression
	 * thresholds, therefore, in that case, we only have 1 resort.
	 */

	/* Try to over-allocate to reduce reallocations and fragmentation. */
	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
						   4096, 0, end);
	if (ret == 0)
		return compression_threshold;

again:
	/* HW's ability to limit the CFB is 1:4 */
	if (compression_threshold > 4 ||
	    (fb_cpp == 2 && compression_threshold == 2))
		return 0;

	ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
						   4096, 0, end);
	if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
		return 0;
	} else if (ret) {
		compression_threshold <<= 1;
		goto again;
	} else {
		return compression_threshold;
	}
}
/*
 * This function implements common functionality of runtime and system
 * suspend sequence.
 */
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
	int ret;

	if (IS_BROXTON(dev_priv))
		ret = bxt_suspend_complete(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		ret = hsw_suspend_complete(dev_priv);
	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_suspend_complete(dev_priv);
	else
		ret = 0;

	return ret;
}
static int i915_drm_resume_early(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;

	/*
	 * We have a resume ordering issue with the snd-hda driver also
	 * requiring our device to be power up. Due to the lack of a
	 * parent/child relationship we currently solve this with an early
	 * resume hook.
	 *
	 * FIXME: This should be solved with a special hdmi sink device or
	 * similar so that power domains can be employed.
	 */
	if (pci_enable_device(dev->pdev)) {
		ret = -EIO;
		goto out;
	}

	pci_set_master(dev->pdev);

	disable_rpm_wakeref_asserts(dev_priv);

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
		ret = vlv_resume_prepare(dev_priv, false);
	if (ret)
		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
			  ret);

	intel_uncore_early_sanitize(dev, true);

	if (IS_BROXTON(dev))
		ret = bxt_resume_prepare(dev_priv);
	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		hsw_disable_pc8(dev_priv);

	intel_uncore_sanitize(dev);

	if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
		intel_power_domains_init_hw(dev_priv, true);

out:
	dev_priv->suspended_to_idle = false;

	enable_rpm_wakeref_asserts(dev_priv);

	return ret;
}
Exemple #25
0
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
			      size_t *values_cnt)
{
	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
	enum intel_display_power_domain power_domain;
	enum intel_pipe_crc_source source;
	u32 val = 0; /* shut up gcc */
	int ret = 0;

	if (display_crc_ctl_parse_source(source_name, &source) < 0) {
		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
		return -EINVAL;
	}

	power_domain = POWER_DOMAIN_PIPE(crtc->index);
	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
		return -EIO;
	}

	ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
	if (ret != 0)
		goto out;

	I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
	POSTING_READ(PIPE_CRC_CTL(crtc->index));

	if (!source) {
		if (IS_G4X(dev_priv))
			g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
			vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
		else if ((IS_HASWELL(dev_priv) ||
			  IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
			hsw_pipe_A_crc_wa(dev_priv, false);
	}

	pipe_crc->skipped = 0;
	*values_cnt = 5;

out:
	intel_display_power_put(dev_priv, power_domain);

	return ret;
}
Exemple #26
0
static void init_device_info(struct intel_gvt *gvt)
{
	struct intel_gvt_device_info *info = &gvt->device_info;
	struct pci_dev *pdev = gvt->dev_priv->drm.pdev;

	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
		info->max_support_vgpus = 8;
		info->cfg_space_size = 256;
		info->mmio_size = 2 * 1024 * 1024;
		info->mmio_bar = 0;
		info->gtt_start_offset = 8 * 1024 * 1024;
		info->gtt_entry_size = 8;
		info->gtt_entry_size_shift = 3;
		info->gmadr_bytes_in_cmd = 8;
		info->max_surface_size = 36 * 1024 * 1024;
	}
	info->msi_cap_offset = pdev->msi_cap;
}
Exemple #27
0
static int intel_runtime_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	WARN_ON(!HAS_RUNTIME_PM(dev));

	DRM_DEBUG_KMS("Resuming device\n");

	intel_opregion_notify_adapter(dev, PCI_D0);
	dev_priv->pm.suspended = false;

	if (IS_GEN6(dev)) {
		ret = snb_runtime_resume(dev_priv);
	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
		ret = hsw_runtime_resume(dev_priv);
	} else if (IS_VALLEYVIEW(dev)) {
		ret = vlv_runtime_resume(dev_priv);
	} else {
		WARN_ON(1);
		ret = -ENODEV;
	}

	/*
	 * No point of rolling back things in case of an error, as the best
	 * we can do is to hope that things will still work (and disable RPM).
	 */
	i915_gem_init_swizzling(dev);
	gen6_update_ring_freq(dev);

	intel_runtime_pm_restore_interrupts(dev);
	intel_reset_gt_powersave(dev);

	if (ret)
		DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
	else
		DRM_DEBUG_KMS("Device resumed\n");

	return ret;
}
Exemple #28
0
/* Assigns the Binary CSC coefficients into BSpec register format */
static void Convert_Coeff_ToBSpecFormat(unsigned int *CSCCoeff,
					unsigned short *Coeff_binary, int devid)
{
    short int RegIndex;
    short int CoeffIndex;

    for(RegIndex = 0, CoeffIndex = 0; RegIndex < CSC_MAX_COEFF_REG_COUNT;
                                   RegIndex += 2, CoeffIndex += 3) {
        if (IS_HASWELL(devid) || IS_BROADWELL(devid)) {
            CSCCoeff[RegIndex] = (*(Coeff_binary + CoeffIndex) << 16 |
                                            *(Coeff_binary + (CoeffIndex + 1)));
            CSCCoeff[RegIndex+1] = *(Coeff_binary + (CoeffIndex + 2)) << 16;
        } else if (IS_VALLEYVIEW(devid)) {
            CSCCoeff[RegIndex] = (*(Coeff_binary + CoeffIndex + 1) << 16 |
                                            *(Coeff_binary + CoeffIndex));
            CSCCoeff[RegIndex+1] = *(Coeff_binary + (CoeffIndex + 2));
        }
    }
    return;
}
Exemple #29
0
/*
 * Starting with Haswell, we have a "Power Down Well" that can be turned off
 * when not needed anymore. We have 4 registers that can request the power well
 * to be enabled, and it will only be disabled if none of the registers is
 * requesting it to be enabled.
 */
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;

	/*
	 * After we re-enable the power well, if we touch VGA register 0x3d5
	 * we'll get unclaimed register interrupts. This stops after we write
	 * anything to the VGA MSR register. The vgacon module uses this
	 * register all the time, so if we unbind our driver and, as a
	 * consequence, bind vgacon, we'll get stuck in an infinite loop at
	 * console_unlock(). So make here we touch the VGA MSR register, making
	 * sure vgacon can keep working normally without triggering interrupts
	 * and error messages.
	 */
	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
	outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);

	if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
		gen8_irq_power_well_post_enable(dev_priv);
}
Exemple #30
0
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
					 struct intel_crtc_state *crtc_state,
					 struct intel_plane_state *plane_state)
{
	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
	struct intel_fbc *fbc = &dev_priv->fbc;
	struct intel_fbc_state_cache *cache = &fbc->state_cache;
	struct drm_framebuffer *fb = plane_state->base.fb;

	cache->vma = NULL;
	cache->flags = 0;

	cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
		cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;

	cache->plane.rotation = plane_state->base.rotation;
	/*
	 * Src coordinates are already rotated by 270 degrees for
	 * the 90/270 degree plane rotation cases (to match the
	 * GTT mapping), hence no need to account for rotation here.
	 */
	cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
	cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
	cache->plane.visible = plane_state->base.visible;
	cache->plane.adjusted_x = plane_state->main.x;
	cache->plane.adjusted_y = plane_state->main.y;
	cache->plane.y = plane_state->base.src.y1 >> 16;

	if (!cache->plane.visible)
		return;

	cache->fb.format = fb->format;
	cache->fb.stride = fb->pitches[0];

	cache->vma = plane_state->vma;
	cache->flags = plane_state->flags;
	if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
		cache->flags &= ~PLANE_HAS_FENCE;
}