static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); if (!fsl_dev) return 0; disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm); console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock(); fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); } clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; }
int i915_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; #ifndef __NetBSD__ /* pmf handles this for us. */ if (pci_enable_device(dev->pdev)) return -EIO; #endif pci_set_master(dev->pdev); intel_gt_reset(dev); /* * Platforms with opregion should have sane BIOS, older ones (gen3 and * earlier) need this since the BIOS might clear all our scratch PTEs. */ if (drm_core_check_feature(dev, DRIVER_MODESET) && !dev_priv->opregion.header) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } ret = __i915_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret; if (!fsl_dev) return 0; ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; } if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret; if (!fsl_dev) return 0; ret = clk_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); clk_unprepare(fsl_dev->clk); return ret; } ret = clk_prepare(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to prepare dcu clk\n"); return ret; } drm_kms_helper_poll_enable(fsl_dev->drm); regcache_cache_only(fsl_dev->regmap, false); regcache_sync(fsl_dev->regmap); return 0; }
int i915_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); #ifdef CONFIG_I915_VGT if (i915_host_mediate) { int error = vgt_resume(dev->pdev); if (error) return error; } #endif /* * Platforms with opregion should have sane BIOS, older ones (gen3 and * earlier) need to restore the GTT mappings since the BIOS might clear * all our scratch PTEs. */ ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
static int omap_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); drm_kms_helper_poll_enable(drm_dev); return omap_gem_resume(dev); }
static int udl_usb_resume(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); drm_kms_helper_poll_enable(dev); udl_modeset_restore(dev); return 0; }
static int rcar_du_pm_resume(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); /* TODO Resume the CRTC */ drm_kms_helper_poll_enable(rcdu->ddev); return 0; }
static int __maybe_unused xylon_drm_pm_resume(struct device *dev) { struct xylon_drm_device *xdev = dev_get_drvdata(dev); drm_helper_connector_dpms(xdev->connector, DRM_MODE_DPMS_ON); drm_kms_helper_poll_enable(xdev->dev); return 0; }
static int rcar_du_pm_resume(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); drm_atomic_helper_resume(rcdu->ddev, rcdu->suspend_state); drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false); drm_kms_helper_poll_enable(rcdu->ddev); return 0; }
static __maybe_unused int drv_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct ltdc_device *ldev = ddev->dev_private; ltdc_resume(ddev); drm_atomic_helper_resume(ddev, ldev->suspend_state); drm_kms_helper_poll_enable(ddev); return 0; }
static int shmob_drm_pm_resume(struct device *dev) { struct shmob_drm_device *sdev = dev_get_drvdata(dev); drm_modeset_lock_all(sdev->ddev); shmob_drm_crtc_resume(&sdev->crtc); drm_modeset_unlock_all(sdev->ddev); drm_kms_helper_poll_enable(sdev->ddev); return 0; }
static int shmob_drm_pm_resume(struct device *dev) { struct shmob_drm_device *sdev = dev_get_drvdata(dev); mutex_lock(&sdev->ddev->mode_config.mutex); shmob_drm_crtc_resume(&sdev->crtc); mutex_unlock(&sdev->ddev->mode_config.mutex); drm_kms_helper_poll_enable(sdev->ddev); return 0; }
static int omap_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); drm_modeset_lock_all(drm_dev); omap_drm_resume_all_displays(); drm_modeset_unlock_all(drm_dev); drm_kms_helper_poll_enable(drm_dev); return omap_gem_resume(drm_dev); }
static int bochs_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct bochs_device *bochs = drm_dev->dev_private; drm_helper_resume_force_mode(drm_dev); drm_fb_helper_set_suspend_unlocked(&bochs->fb.helper, 0); drm_kms_helper_poll_enable(drm_dev); return 0; }
static int ast_drm_resume(struct drm_device *dev) { int ret; if (pci_enable_device(dev->pdev)) return -EIO; ret = ast_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; if (!hdlcd) return 0; drm_atomic_helper_resume(drm, hdlcd->state); drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0); drm_kms_helper_poll_enable(drm); return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; if (!hdlcd) return 0; drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev); return 0; }
static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); nouveau_pci_resume(pdev); drm_kms_helper_poll_enable(dev); } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); drm_kms_helper_poll_disable(dev); nouveau_pci_suspend(pdev, pmm); } }
int i915_resume(struct drm_device *dev) { int ret; if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); ret = i915_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
int i915_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; /* * Platforms with opregion should have sane BIOS, older ones (gen3 and * earlier) need to restore the GTT mappings since the BIOS might clear * all our scratch PTEs. */ ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
static int cirrus_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct cirrus_device *cdev = drm_dev->dev_private; drm_helper_resume_force_mode(drm_dev); if (cdev->mode_info.gfbdev) { console_lock(); fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0); console_unlock(); } drm_kms_helper_poll_enable(drm_dev); return 0; }
static int bochs_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct bochs_device *bochs = drm_dev->dev_private; drm_helper_resume_force_mode(drm_dev); if (bochs->fb.initialized) { console_lock(); fb_set_suspend(bochs->fb.helper.fbdev, 0); console_unlock(); } drm_kms_helper_poll_enable(drm_dev); return 0; }
static __maybe_unused int drv_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct ltdc_device *ldev = ddev->dev_private; struct drm_atomic_state *state; drm_kms_helper_poll_disable(ddev); state = drm_atomic_helper_suspend(ddev); if (IS_ERR(state)) { drm_kms_helper_poll_enable(ddev); return PTR_ERR(state); } ldev->suspend_state = state; ltdc_suspend(ddev); return 0; }
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; if (!hdlcd) return 0; drm_kms_helper_poll_disable(drm); hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); } return 0; }
int i915_resume(struct drm_device *dev) { int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); ret = i915_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; }
static int rcar_du_pm_suspend(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); struct drm_atomic_state *state; drm_kms_helper_poll_disable(rcdu->ddev); drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, true); state = drm_atomic_helper_suspend(rcdu->ddev); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false); drm_kms_helper_poll_enable(rcdu->ddev); return PTR_ERR(state); } rcdu->suspend_state = state; return 0; }
static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; nouveau_pmops_resume(&pdev->dev); drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); nouveau_pmops_suspend(&pdev->dev); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } }
static int i915_resume(device_t kdev) { struct drm_device *dev; int ret; dev = device_get_softc(kdev); DRM_DEBUG_KMS("starting resume\n"); #if 0 if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); #endif ret = i915_drm_thaw(dev); if (ret != 0) return (-ret); drm_kms_helper_poll_enable(dev); ret = bus_generic_resume(kdev); DRM_DEBUG_KMS("finished resume %d\n", ret); return (ret); }
static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; disable_rpm_wakeref_asserts(dev_priv); mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); intel_opregion_setup(dev); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); /* * Interrupts have to be enabled before any batches are run. If not the * GPU will hang. i915_gem_init_hw() will initiate batches to * update/restore the context. * * Modeset enabling in intel_modeset_init_hw() also needs working * interrupts. */ intel_runtime_pm_enable_interrupts(dev_priv); mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); intel_guc_resume(dev); intel_modeset_init_hw(dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); intel_display_resume(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev_priv); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); intel_opregion_init(dev); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, PCI_D0); drm_kms_helper_poll_enable(dev); enable_rpm_wakeref_asserts(dev_priv); return 0; }