int i915_restore_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; pci_write_config(dev->dev, LBB, dev_priv->saveLBB, 1); /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); i915_restore_display(dev); /* Interrupt state */ if (HAS_PCH_SPLIT(dev)) { I915_WRITE(DEIER, dev_priv->saveDEIER); I915_WRITE(DEIMR, dev_priv->saveDEIMR); I915_WRITE(GTIER, dev_priv->saveGTIER); I915_WRITE(GTIMR, dev_priv->saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); } else { I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IMR, dev_priv->saveIMR); } DRM_UNLOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_init_clock_gating(dev); if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } if (INTEL_INFO(dev)->gen >= 6) { gen6_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); } DRM_LOCK(dev); /* Cache mode state */ I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); for (i = 0; i < 16; i++) { I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); } for (i = 0; i < 3; i++) I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); intel_iic_reset(dev); return 0; }
static int intel_runtime_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) return -ENODEV; DRM_DEBUG_KMS("Resuming device\n"); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); disable_rpm_wakeref_asserts(dev_priv); intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); intel_guc_resume(dev); if (IS_GEN6(dev_priv)) intel_init_pch_refclk(dev); if (IS_BROXTON(dev)) ret = bxt_resume_prepare(dev_priv); else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) hsw_disable_pc8(dev_priv); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = vlv_resume_prepare(dev_priv, true); /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); gen6_update_ring_freq(dev); intel_runtime_pm_enable_interrupts(dev_priv); /* * On VLV/CHV display interrupts are part of the display * power well, so hpd is reinitialized from there. For * everyone else do it here. */ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); intel_enable_gt_powersave(dev); enable_rpm_wakeref_asserts(dev_priv); if (ret) DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); else DRM_DEBUG_KMS("Device resumed\n"); return ret; }
static int intel_runtime_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = dev->dev_private; int ret; WARN_ON(!HAS_RUNTIME_PM(dev)); DRM_DEBUG_KMS("Resuming device\n"); intel_opregion_notify_adapter(dev, PCI_D0); dev_priv->pm.suspended = false; if (IS_GEN6(dev)) { ret = snb_runtime_resume(dev_priv); } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { ret = hsw_runtime_resume(dev_priv); } else if (IS_VALLEYVIEW(dev)) { ret = vlv_runtime_resume(dev_priv); } else { WARN_ON(1); ret = -ENODEV; } /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); gen6_update_ring_freq(dev); intel_runtime_pm_restore_interrupts(dev); intel_reset_gt_powersave(dev); if (ret) DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); else DRM_DEBUG_KMS("Device resumed\n"); return ret; }