void arm_machine_flush_console(void) { printk("\n"); pr_emerg("Restarting %s\n", linux_banner); if (console_trylock()) { console_unlock(); return; } mdelay(50); local_irq_disable(); if (!console_trylock()) pr_emerg("arm_restart: Console was locked! Busting\n"); else pr_emerg("arm_restart: Console was locked!\n"); console_unlock(); }
static int __i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); /* We need working interrupts for modeset enabling ... */ drm_irq_install(dev); intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev, false); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); dev_priv->enable_hotplug_processing = true; } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, 0); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); return error; }
static void tegra_pm_flush_console(void) { if (console_flushed) return; console_flushed = true; pr_emerg("Restarting %s\n", linux_banner); if (console_trylock()) { console_unlock(); return; } mdelay(50); local_irq_disable(); if (!console_trylock()) pr_emerg("%s: Console was locked! Busting\n", __func__); else pr_emerg("%s: Console was locked!\n", __func__); console_unlock(); }
static int __i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev, false); drm_irq_install(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; #ifndef __NetBSD__ /* XXX fb */ /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, 0); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } #endif return error; }
static void flush_console(void) { if (console_flushed) return; console_flushed = true; printk("\n"); pr_emerg("Restarting %s\n", linux_banner); if (!is_console_locked()) return; mdelay(50); local_irq_disable(); if (!console_trylock()) pr_emerg("flush_console: console was locked! busting!\n"); else pr_emerg("flush_console: console was locked!\n"); console_unlock(); }
static void omap2_enter_full_retention(void) { u32 l; struct timespec ts_preidle, ts_postidle, ts_idle; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* * Set MPU powerdomain's next power state to RETENTION; * preserve logic state during retention */ pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); if (omap2_pm_debug) { omap2_pm_dump(0, 0, 0); getnstimeofday(&ts_preidle); } /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Block console output in case it is on one of the OMAP UARTs */ if (!is_suspending()) if (!console_trylock()) goto no_sleep; omap_uart_prepare_idle(0); omap_uart_prepare_idle(1); omap_uart_prepare_idle(2); /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); omap_uart_resume_idle(2); omap_uart_resume_idle(1); omap_uart_resume_idle(0); if (!is_suspending()) console_unlock(); no_sleep: if (omap2_pm_debug) { unsigned long long tmp; getnstimeofday(&ts_postidle); ts_idle = timespec_sub(ts_postidle, ts_preidle); tmp = timespec_to_ns(&ts_idle) * NSEC_PER_USEC; omap2_pm_dump(0, 1, tmp); } omap2_gpio_resume_after_idle(); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); }
static void serial_omap_console_write(struct console *co, const char *s, unsigned int count) { struct uart_omap_port *up = serial_omap_console_ports[co->index]; unsigned long flags; unsigned int ier; int console_lock = 0, locked = 1; if (console_trylock()) console_lock = 1; /* * If console_lock is not available and we are in suspending * state then we can avoid the console usage scenario * as this may introduce recursive prints. * Basically this scenario occurs during boot while * printing debug bootlogs. */ if (!console_lock && up->pdev->dev.power.runtime_status == RPM_SUSPENDING) return; local_irq_save(flags); if (up->port.sysrq) locked = 0; else if (oops_in_progress) locked = spin_trylock(&up->port.lock); else spin_lock(&up->port.lock); serial_omap_port_enable(up); /* * First save the IER then disable the interrupts */ ier = serial_in(up, UART_IER); serial_out(up, UART_IER, 0); uart_console_write(&up->port, s, count, serial_omap_console_putchar); /* * Finally, wait for transmitter to become empty * and restore the IER */ wait_for_xmitr(up); serial_out(up, UART_IER, ier); /* * The receive handling will happen properly because the * receive ready bit will still be set; it is not cleared * on read. However, modem control will not, we must * call it if we have saved something in the saved flags * while processing with interrupts off. */ if (up->msr_saved_flags) check_modem_status(up); if (console_lock) console_unlock(); serial_omap_port_disable(up); if (locked) spin_unlock(&up->port.lock); local_irq_restore(flags); }
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; intel_uncore_early_sanitize(dev); intel_uncore_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } intel_power_domains_init_hw(dev); i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); /* We need working interrupts for modeset enabling ... */ drm_irq_install(dev); intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ intel_resume_hotplug(dev); } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } /* Undo what we did at i915_drm_freeze so the refcount goes back to the * expected level. */ hsw_enable_package_c8(dev_priv); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_runtime_pm_put(dev_priv); return error; }
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); drm_mode_config_reset(dev); mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); intel_runtime_pm_restore_interrupts(dev); intel_modeset_init_hw(dev); { unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } intel_dp_mst_resume(dev); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, PCI_D0); return 0; }
static void vigs_crtc_dpms(struct drm_crtc *crtc, int mode) { struct vigs_crtc *vigs_crtc = crtc_to_vigs_crtc(crtc); struct vigs_device *vigs_dev = crtc->dev->dev_private; int blank, i; struct fb_event event; DRM_DEBUG_KMS("enter: fb_blank = %d, mode = %d\n", vigs_crtc->in_fb_blank, mode); if (vigs_crtc->in_fb_blank) { return; } switch (mode) { case DRM_MODE_DPMS_ON: blank = FB_BLANK_UNBLANK; break; case DRM_MODE_DPMS_STANDBY: blank = FB_BLANK_NORMAL; break; case DRM_MODE_DPMS_SUSPEND: blank = FB_BLANK_VSYNC_SUSPEND; break; case DRM_MODE_DPMS_OFF: blank = FB_BLANK_POWERDOWN; break; default: DRM_ERROR("unspecified mode %d\n", mode); return; } event.info = vigs_dev->fbdev->base.fbdev; event.data = ␣ /* * We can't just 'console_lock' here, since * this may result in deadlock: * fb func: * console_lock(); * mutex_lock(&dev->mode_config.mutex); * DRM func: * mutex_lock(&dev->mode_config.mutex); * console_lock(); * * So we just try to acquire it for 5 times with a delay * and then just skip. * * This code is here only because pm is currently done via * backlight which is bad, we need to make proper pm via * kernel support. */ for (i = 0; i < 5; ++i) { if (console_trylock()) { fb_notifier_call_chain(FB_EVENT_BLANK, &event); console_unlock(); return; } msleep(100); DRM_ERROR("unable to lock console, trying again\n"); } DRM_ERROR("unable to lock console, skipping fb call chain\n"); }