int i915_restore_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_write_config(dev->dev, LBB, dev_priv->saveLBB, 1);


	/* Hardware status page */
	I915_WRITE(HWS_PGA, dev_priv->saveHWS);

	i915_restore_display(dev);

	/* Interrupt state */
	if (HAS_PCH_SPLIT(dev)) {
		I915_WRITE(DEIER, dev_priv->saveDEIER);
		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
		I915_WRITE(GTIER, dev_priv->saveGTIER);
		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
		I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
		I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
		I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
	} else {
		I915_WRITE(IER, dev_priv->saveIER);
		I915_WRITE(IMR, dev_priv->saveIMR);
	}
	DRM_UNLOCK(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		intel_init_clock_gating(dev);

	if (IS_IRONLAKE_M(dev)) {
		ironlake_enable_drps(dev);
		intel_init_emon(dev);
	}

	if (INTEL_INFO(dev)->gen >= 6) {
		gen6_enable_rps(dev_priv);
		gen6_update_ring_freq(dev_priv);
	}

	DRM_LOCK(dev);

	/* Cache mode state */
	I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

	/* Memory arbitration state */
	I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);

	for (i = 0; i < 16; i++) {
		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
	}
	for (i = 0; i < 3; i++)
		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);

	intel_iic_reset(dev);

	return 0;
}
int i915_save_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);

	/* Hardware status page */
	dev_priv->saveHWS = I915_READ(HWS_PGA);

	i915_save_display(dev);

	/* Interrupt state */
	if (IS_IRONLAKE(dev)) {
		dev_priv->saveDEIER = I915_READ(DEIER);
		dev_priv->saveDEIMR = I915_READ(DEIMR);
		dev_priv->saveGTIER = I915_READ(GTIER);
		dev_priv->saveGTIMR = I915_READ(GTIMR);
		dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
		dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
		dev_priv->saveMCHBAR_RENDER_STANDBY =
			I915_READ(MCHBAR_RENDER_STANDBY);
	} else {
		dev_priv->saveIER = I915_READ(IER);
		dev_priv->saveIMR = I915_READ(IMR);
	}

	if (IS_IRONLAKE_M(dev))
		ironlake_disable_drps(dev);

	/* Cache mode state */
	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);

	/* Memory Arbitration state */
	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);

	/* Scratch space */
	for (i = 0; i < 16; i++) {
		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
	}
	for (i = 0; i < 3; i++)
		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));

	/* Fences */
	if (IS_I965G(dev)) {
		for (i = 0; i < 16; i++)
			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
	} else {
		for (i = 0; i < 8; i++)
			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));

		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
	}

	return 0;
}
int i915_save_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);

	mutex_lock(&dev->struct_mutex);

	
	dev_priv->saveHWS = I915_READ(HWS_PGA);

	i915_save_display(dev);

	
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->saveDEIER = I915_READ(DEIER);
		dev_priv->saveDEIMR = I915_READ(DEIMR);
		dev_priv->saveGTIER = I915_READ(GTIER);
		dev_priv->saveGTIMR = I915_READ(GTIMR);
		dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
		dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
		dev_priv->saveMCHBAR_RENDER_STANDBY =
			I915_READ(RSTDBYCTL);
		dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
	} else {
		dev_priv->saveIER = I915_READ(IER);
		dev_priv->saveIMR = I915_READ(IMR);
	}

	if (IS_IRONLAKE_M(dev))
		ironlake_disable_drps(dev);
	if (INTEL_INFO(dev)->gen >= 6)
		gen6_disable_rps(dev);

	
	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);

	
	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);

	
	for (i = 0; i < 16; i++) {
		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
	}
	for (i = 0; i < 3; i++)
		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));

	mutex_unlock(&dev->struct_mutex);

	return 0;
}
Exemple #4
0
int i915_restore_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);

	/* Hardware status page */
	I915_WRITE(HWS_PGA, dev_priv->saveHWS);

	i915_restore_display(dev);

	/* Interrupt state */
	if (HAS_PCH_SPLIT(dev)) {
		I915_WRITE(DEIER, dev_priv->saveDEIER);
		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
		I915_WRITE(GTIER, dev_priv->saveGTIER);
		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
		I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
		I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
	} else {
		I915_WRITE(IER, dev_priv->saveIER);
		I915_WRITE(IMR, dev_priv->saveIMR);
	}

	intel_init_clock_gating(dev);

	if (IS_IRONLAKE_M(dev)) {
		ironlake_enable_drps(dev);
		intel_init_emon(dev);
	}

	if (IS_GEN6(dev))
		gen6_enable_rps(dev_priv);

	/* Cache mode state */
	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

	/* Memory arbitration state */
	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);

	for (i = 0; i < 16; i++) {
		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
	}
	for (i = 0; i < 3; i++)
		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);

	intel_i2c_reset(dev);

	return 0;
}
int i915_save_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	dev_priv->saveLBB = pci_read_config(dev->dev, LBB, 1);

	/* Hardware status page */
	dev_priv->saveHWS = I915_READ(HWS_PGA);

	i915_save_display(dev);

	/* Interrupt state */
	if (HAS_PCH_SPLIT(dev)) {
		dev_priv->saveDEIER = I915_READ(DEIER);
		dev_priv->saveDEIMR = I915_READ(DEIMR);
		dev_priv->saveGTIER = I915_READ(GTIER);
		dev_priv->saveGTIMR = I915_READ(GTIMR);
		dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
		dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
		dev_priv->saveMCHBAR_RENDER_STANDBY =
			I915_READ(RSTDBYCTL);
		dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
	} else {
		dev_priv->saveIER = I915_READ(IER);
		dev_priv->saveIMR = I915_READ(IMR);
	}

	if (IS_IRONLAKE_M(dev))
		ironlake_disable_drps(dev);
	if (INTEL_INFO(dev)->gen >= 6)
		gen6_disable_rps(dev);

	/* Cache mode state */
	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);

	/* Memory Arbitration state */
	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);

	/* Scratch space */
	for (i = 0; i < 16; i++) {
		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
	}
	for (i = 0; i < 3; i++)
		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));

	return 0;
}
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		i915_gem_restore_gtt_mappings(dev);
		mutex_unlock(&dev->struct_mutex);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		mutex_lock(&dev->struct_mutex);
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);
		mutex_unlock(&dev->struct_mutex);

		if (HAS_PCH_SPLIT(dev))
			ironlake_init_pch_refclk(dev);

		drm_mode_config_reset(dev);
		drm_irq_install(dev);

		/* Resume the modeset for every activated CRTC */
		mutex_lock(&dev->mode_config.mutex);
		drm_helper_resume_force_mode(dev);
		mutex_unlock(&dev->mode_config.mutex);

		if (IS_IRONLAKE_M(dev))
			ironlake_enable_rc6(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	console_lock();
	intel_fbdev_set_suspend(dev, 0);
	console_unlock();
	return error;
}
Exemple #7
0
static int i915_drm_thaw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int error = 0;

	DRM_LOCK(dev);
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		i915_gem_restore_gtt_mappings(dev);
	}

	i915_restore_state(dev);
	intel_opregion_setup(dev);

	/* KMS EnterVT equivalent */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		dev_priv->mm.suspended = 0;

		error = i915_gem_init_hw(dev);

		if (HAS_PCH_SPLIT(dev))
			ironlake_init_pch_refclk(dev);

		DRM_UNLOCK(dev);
		lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE);
		drm_mode_config_reset(dev);
		lockmgr(&dev->mode_config.mutex, LK_RELEASE);
		drm_irq_install(dev);

		lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE);
		/* Resume the modeset for every activated CRTC */
		drm_helper_resume_force_mode(dev);
		lockmgr(&dev->mode_config.mutex, LK_RELEASE);

		if (IS_IRONLAKE_M(dev))
			ironlake_enable_rc6(dev);
		DRM_LOCK(dev);
	}

	intel_opregion_init(dev);

	dev_priv->modeset_on_lid = 0;

	DRM_UNLOCK(dev);

	return error;
}
int i915_restore_state(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int i;

	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);

	/* Hardware status page */
	I915_WRITE(HWS_PGA, dev_priv->saveHWS);

	/* Fences */
	if (IS_I965G(dev)) {
		for (i = 0; i < 16; i++)
			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
	} else {
		for (i = 0; i < 8; i++)
			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
			for (i = 0; i < 8; i++)
				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
	}

	i915_restore_display(dev);

	/* Interrupt state */
	if (IS_IRONLAKE(dev)) {
		I915_WRITE(DEIER, dev_priv->saveDEIER);
		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
		I915_WRITE(GTIER, dev_priv->saveGTIER);
		I915_WRITE(GTIMR, dev_priv->saveGTIMR);
		I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
		I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
	} else {
		I915_WRITE (IER, dev_priv->saveIER);
		I915_WRITE (IMR,  dev_priv->saveIMR);
	}

	/* Clock gating state */
	intel_init_clock_gating(dev);

	if (IS_IRONLAKE_M(dev))
		ironlake_enable_drps(dev);

	/* Cache mode state */
	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);

	/* Memory arbitration state */
	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);

	for (i = 0; i < 16; i++) {
		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
	}
	for (i = 0; i < 3; i++)
		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);

	/* I2C state */
	intel_i2c_reset_gmbus(dev);

	return 0;
}