/* * This is our default idle handler. */ void arch_cpu_idle(void) { /* * This should do all the clock switching and wait for interrupt * tricks */ trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); }
static void s5p64x0_idle(void) { unsigned long val; val = __raw_readl(S5P64X0_PWR_CFG); val &= ~(0x3 << 5); val |= (0x1 << 5); __raw_writel(val, S5P64X0_PWR_CFG); cpu_do_idle(); }
/* Actual code that puts the SoC in different idle states */ static int xilinx_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); if (index == 0) /* Wait for interrupt state */ cpu_do_idle(); else if (index == 1) { unsigned int cpu_id = smp_processor_id(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); /* Devices must be stopped here */ cpu_pm_enter(); /* Add code for DDR self refresh start */ cpu_do_idle(); /*cpu_suspend(foo, bar);*/ /* Add code for DDR self refresh stop */ cpu_pm_exit(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); } do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); dev->last_residency = idle_time; return index; }
static int rk3288_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { void *sel = RK_CRU_VIRT + RK3288_CRU_CLKSELS_CON(36); u32 con = readl_relaxed(sel); u32 cpu = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 0); writel_relaxed(0x70007 << (cpu << 2), sel); cpu_do_idle(); writel_relaxed((0x70000 << (cpu << 2)) | con, sel); dsb(); return index; }
/** * zynq_cpu_die - Let a CPU core die * @cpu: Dying CPU * * Platform-specific code to shutdown a CPU. * Called with IRQs disabled on the dying CPU. */ static void zynq_cpu_die(unsigned int cpu) { zynq_slcr_cpu_state_write(cpu, true); /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) cpu_do_idle(); }
static int mxs_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: cpu_do_idle(); break; default: return -EINVAL; } return 0; }
/* * platform-specific code to shutdown a CPU * */ void __ref highbank_cpu_die(unsigned int cpu) { flush_cache_all(); highbank_set_cpu_jump(cpu, secondary_startup); highbank_set_core_pwr(); cpu_do_idle(); /* We should never return from idle */ panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); }
/*! * This function puts the CPU into idle mode. It is called by default_idle() * in process.c file. */ void arch_idle(void) { /* * This should do all the clock switching * and wait for interrupt tricks. */ if (!mxc_jtag_enabled) { /* set as Wait mode */ mxc_cpu_lp_set(WAIT_UNCLOCKED); cpu_do_idle(); } }
/* * platform-specific code to shutdown a CPU * */ void platform_cpu_die(unsigned int cpu) { flush_cache_all(); highbank_set_cpu_jump(cpu, secondary_startup); scu_power_mode(scu_base_addr, SCU_PM_POWEROFF); cpu_do_idle(); /* We should never return from idle */ panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); }
static int mt6735_rgidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (printk_ratelimit()) printk(KERN_WARNING "MT6735 cpuidle rgidle\n"); cpu_do_idle(); idle_cnt_inc(IDLE_TYPE_RG, smp_processor_id()); return index; }
static int mt6735_soidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (printk_ratelimit()) printk(KERN_WARNING "MT6735 cpuidle SODI\n"); cpu_do_idle(); idle_cnt_inc(IDLE_TYPE_SO, 0); return index; }
static int highbank_suspend_finish(unsigned long val) { outer_flush_all(); outer_disable(); highbank_set_pwr_suspend(); cpu_do_idle(); highbank_clear_pwr_request(); return 0; }
/* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ static void socfpga_cpu_die(unsigned int cpu) { /* Flush the L1 data cache. */ flush_cache_all(); /* This will put CPU #1 into reset.*/ __raw_writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr + 0x10); cpu_do_idle(); /* We should have never returned from idle */ panic("cpu %d unexpectedly exit from shutdown\n", cpu); }
int meson_enter_idle_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_fiq_disable(); // printk("enter wfi.\n"); cpu_do_idle(); // printk("exit wfi.\n"); local_fiq_enable(); return index; }
static void s3c2412_idle(void) { unsigned long tmp; /* ensure our idle mode is to go to idle */ tmp = __raw_readl(S3C2412_PWRCFG); tmp &= ~S3C2412_PWRCFG_STANDBYWFI_MASK; tmp |= S3C2412_PWRCFG_STANDBYWFI_IDLE; __raw_writel(tmp, S3C2412_PWRCFG); cpu_do_idle(); }
static void s5p6440_idle(void) { unsigned long tmp; /* Ensure our idle mode is to go to idle */ /* Set WFI instruction to SLEEP mode */ tmp = __raw_readl(S3C_PWR_CFG); tmp &= ~(0x3<<5); tmp |= (0x1<<5); __raw_writel(tmp, S3C_PWR_CFG); cpu_do_idle(); }
static void s5p64x0_idle(void) { unsigned long val; if (!need_resched()) { val = __raw_readl(S5P64X0_PWR_CFG); val &= ~(0x3 << 5); val |= (0x1 << 5); __raw_writel(val, S5P64X0_PWR_CFG); cpu_do_idle(); } local_irq_enable(); }
/*! * This function puts the CPU into idle mode. It is called by default_idle() * in process.c file. */ void arch_idle(void) { unsigned long crm_ctrl; /* * This should do all the clock switching * and wait for interrupt tricks. */ if ((__raw_readl(AVIC_VECTOR) & MXC_WFI_ENABLE) != 0) { crm_ctrl = (__raw_readl(CRM_CONTROL) & ~(LPMD1)) | (LPMD0); __raw_writel(crm_ctrl, CRM_CONTROL); cpu_do_idle(); } }
static void imx5_idle(void) { /* */ if (gpc_dvfs_clk == NULL) { gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs"); if (IS_ERR(gpc_dvfs_clk)) return; } clk_enable(gpc_dvfs_clk); mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); if (!tzic_enable_wake()) cpu_do_idle(); clk_disable(gpc_dvfs_clk); }
static int imx6sl_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { imx6q_set_lpm(WAIT_UNCLOCKED); /* * Software workaround for ERR005311, see function * description for details. */ imx6sl_set_wait_clk(true); cpu_do_idle(); imx6sl_set_wait_clk(false); imx6q_set_lpm(WAIT_CLOCKED); return index; }
static void s5p_enter_idle(void) { unsigned long tmp; tmp = __raw_readl(S5P_IDLE_CFG); tmp &= ~((3<<30)|(3<<28)|(1<<0)); tmp |= ((2<<30)|(2<<28)); __raw_writel(tmp, S5P_IDLE_CFG); tmp = __raw_readl(S5P_PWR_CFG); tmp &= S5P_CFG_WFI_CLEAN; __raw_writel(tmp, S5P_PWR_CFG); cpu_do_idle(); }
static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { if (atomic_inc_return(&master) == num_online_cpus()) { /* * With this lock, we prevent other cpu to exit and enter * this function again and become the master. */ if (!spin_trylock(&master_lock)) goto idle; imx6_set_lpm(WAIT_UNCLOCKED); cpu_do_idle(); imx6_set_lpm(WAIT_CLOCKED); spin_unlock(&master_lock); goto done; } idle: cpu_do_idle(); done: atomic_dec(&master); return index; }
static void gemini_idle(void) { /* * Because of broken hardware we have to enable interrupts or the CPU * will never wakeup... Acctualy it is not very good to enable * interrupts first since scheduler can miss a tick, but there is * no other way around this. Platforms that needs it for power saving * should enable it in init code, since by default it is * disabled. */ /* FIXME: Enabling interrupts here is racy! */ local_irq_enable(); cpu_do_idle(); }
static int imx25_suspend_enter(suspend_state_t state) { if (!IS_ENABLED(CONFIG_PM)) return 0; switch (state) { case PM_SUSPEND_MEM: cpu_do_idle(); break; default: return -EINVAL; } return 0; }
/*! * @file mach-mx35/pm.c * @brief This file contains suspend operations * * @ingroup MSL_MX35 */ static int mx35_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_MEM: mxc_cpu_lp_set(STOP_POWER_OFF); break; case PM_SUSPEND_STANDBY: mxc_cpu_lp_set(STOP_POWER_ON); break; default: return -EINVAL; } /* Executing CP15 (Wait-for-Interrupt) Instruction */ cpu_do_idle(); return 0; }
static int s3c64xx_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long tmp; /* Setup PWRCFG to enter idle mode */ tmp = __raw_readl(S3C64XX_PWR_CFG); tmp &= ~S3C64XX_PWRCFG_CFG_WFI_MASK; tmp |= S3C64XX_PWRCFG_CFG_WFI_IDLE; __raw_writel(tmp, S3C64XX_PWR_CFG); cpu_do_idle(); return index; }
static void s5p6442_idle(void) { unsigned long tmp; tmp = __raw_readl(S5P_PWR_CFG); tmp &= S5P_CFG_WFI_CLEAN; tmp |= S5P_CFG_WFI_IDLE; __raw_writel(tmp, S5P_PWR_CFG); tmp = __raw_readl(S5P_OTHERS); tmp |= S5P_OTHER_SYSC_INTOFF; __raw_writel(tmp, S5P_OTHERS); cpu_do_idle(); }
static int zynq_pm_suspend(unsigned long arg) { u32 reg; int (*zynq_suspend_ptr)(void __iomem *, void __iomem *) = (__force void *)ocm_base; int do_ddrpll_bypass = 1; /* Topswitch clock stop disable */ zynq_clk_topswitch_disable(); if (!ocm_base || !ddrc_base) { do_ddrpll_bypass = 0; } else { /* enable DDRC self-refresh mode */ reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS); reg |= DDRC_SELFREFRESH_MASK; writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS); } if (do_ddrpll_bypass) { /* * Going this way will turn off DDR related clocks and the DDR * PLL. I.e. We might brake sub systems relying on any of this * clocks. And even worse: If there are any other masters in the * system (e.g. in the PL) accessing DDR they are screwed. */ flush_cache_all(); if (zynq_suspend_ptr(ddrc_base, zynq_slcr_base)) pr_warn("DDR self refresh failed.\n"); } else { WARN_ONCE(1, "DRAM self-refresh not available\n"); cpu_do_idle(); } /* disable DDRC self-refresh mode */ if (do_ddrpll_bypass) { reg = readl(ddrc_base + DDRC_CTRL_REG1_OFFS); reg &= ~DDRC_SELFREFRESH_MASK; writel(reg, ddrc_base + DDRC_CTRL_REG1_OFFS); } /* Topswitch clock stop enable */ zynq_clk_topswitch_enable(); return 0; }
static int mx31_suspend_enter(suspend_state_t state) { unsigned long reg; /* Enable Well Bias and set VSTBY * VSTBY pin will be asserted during SR mode. This asks the * PM IC to set the core voltage to the standby voltage * Must clear the MXC_CCM_CCMR_SBYCS bit as well?? */ reg = __raw_readl(MXC_CCM_CCMR); reg &= ~MXC_CCM_CCMR_LPM_MASK; reg |= MXC_CCM_CCMR_WBEN | MXC_CCM_CCMR_VSTBY | MXC_CCM_CCMR_SBYCS; switch (state) { case PM_SUSPEND_MEM: /* State Retention mode */ reg |= 2 << MXC_CCM_CCMR_LPM_OFFSET; __raw_writel(reg, MXC_CCM_CCMR); /* Executing CP15 (Wait-for-Interrupt) Instruction */ cpu_do_idle(); break; case PM_SUSPEND_STANDBY: /* Deep Sleep Mode */ reg |= 3 << MXC_CCM_CCMR_LPM_OFFSET; __raw_writel(reg, MXC_CCM_CCMR); /* wake up by keypad */ reg = __raw_readl(MXC_CCM_WIMR); reg &= ~(1 << 18); __raw_writel(reg, MXC_CCM_WIMR); flush_cache_all(); l2x0_disable(); mxc_pm_arch_entry(MX31_IO_ADDRESS(MX31_NFC_BASE_ADDR), 2048); printk(KERN_INFO "Resume from DSM\n"); l2x0_enable(); mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); break; default: return -EINVAL; } return 0; }
/*! * This function puts the CPU into idle mode. It is called by default_idle() * in process.c file. */ void arch_idle(void) { /* * This should do all the clock switching * and wait for interrupt tricks. */ if (!mxc_jtag_enabled) { #ifdef CONFIG_MX35_DOZE_DURING_IDLE /*set as Doze mode */ mxc_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); #else /* set as Wait mode */ mxc_cpu_lp_set(WAIT_UNCLOCKED); #endif cpu_do_idle(); } }