/* * arm_enter_idle_state - Programs CPU to enter the specified state * * @dev: cpuidle device * @drv: cpuidle driver * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int arm_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { int ret = 0; struct timeval start_time, end_time; long usec_elapsed; if (cpuidle_debug) { do_gettimeofday(&start_time); } switch (idx) { case STANDBY: cpu_do_idle(); break; case L_SLEEP: light_sleep_en(); cpu_do_idle(); light_sleep_dis(); break; case CORE_PD: light_sleep_en(); cpu_pm_enter(); ret = cpu_suspend(idx); cpu_pm_exit(); light_sleep_dis(); break; case CLUSTER_PD: light_sleep_en(); cpu_pm_enter(); cpu_cluster_pm_enter(); ret = cpu_suspend(idx); cpu_cluster_pm_exit(); cpu_pm_exit(); light_sleep_dis(); break; #ifdef CONFIG_ARCH_SCX35LT8 case TOP_PD: light_sleep_en(); cpu_pm_enter(); cpu_cluster_pm_enter(); ret = cpu_suspend(idx); cpu_cluster_pm_exit(); cpu_pm_exit(); light_sleep_dis(); break; #endif default: cpu_do_idle(); WARN(1, "[CPUIDLE]: NO THIS IDLE LEVEL!!!"); } if (cpuidle_debug) { do_gettimeofday(&end_time); usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + (end_time.tv_usec - start_time.tv_usec); pr_info("[CPUIDLE] Enter idle state: %d ,usec_elapsed = %ld \n", idx, usec_elapsed); } return ret ? -1 : idx; }
static int imx6sx_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { imx6q_set_lpm(WAIT_UNCLOCKED); switch (index) { case 1: cpu_do_idle(); break; case 2: imx6_enable_rbc(true); imx_gpc_set_arm_power_in_lpm(true); imx_set_cpu_jump(0, v7_cpu_resume); /* Need to notify there is a cpu pm operation. */ cpu_pm_enter(); cpu_cluster_pm_enter(); cpu_suspend(0, imx6sx_idle_finish); cpu_cluster_pm_exit(); cpu_pm_exit(); imx_gpc_set_arm_power_in_lpm(false); imx6_enable_rbc(false); break; default: break; } imx6q_set_lpm(WAIT_CLOCKED); return index; }
void tegra_idle_lp2_last(void) { tegra_pmc_pm_set(TEGRA_SUSPEND_LP2); cpu_cluster_pm_enter(); suspend_cpu_complex(); cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu); restore_cpu_complex(); cpu_cluster_pm_exit(); }
static int highbank_pm_enter(suspend_state_t state) { cpu_pm_enter(); cpu_cluster_pm_enter(0); highbank_set_cpu_jump(0, cpu_resume); cpu_suspend(0, highbank_suspend_finish); cpu_cluster_pm_exit(0); cpu_pm_exit(); highbank_smc1(0x102, 0x1); if (scu_base_addr) scu_enable(scu_base_addr); return 0; }
static void mmp_pm_powered_up(void) { int mpidr, cpu, cluster; unsigned long flags; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER); cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX); #ifdef CONFIG_VOLDC_STAT vol_dcstat_event(VLSTAT_LPM_EXIT, 0, 0); vol_ledstatus_event(MAX_LPM_INDEX); #endif trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster); local_irq_save(flags); arch_spin_lock(&mmp_lpm_lock); if (cluster_is_idle(cluster)) { if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) { mmp_wake_saved = 0; mmp_idle->ops->restore_wakeup(); } /* If hardware really shutdown MP subsystem */ if (!(readl_relaxed(regs_addr_get_va(REGS_ADDR_GIC_DIST) + GIC_DIST_CTRL) & 0x1)) { pr_debug("%s: cpu%u: cluster%u is up!\n", __func__, cpu, cluster); cpu_cluster_pm_exit(); } } if (!mmp_pm_use_count[cluster][cpu]) mmp_pm_use_count[cluster][cpu] = 1; mmp_enter_lpm[cluster][cpu] = 0; if (mmp_idle->ops->clr_pmu) mmp_idle->ops->clr_pmu(cpu); arch_spin_unlock(&mmp_lpm_lock); local_irq_restore(flags); }
static void cpu_pm_resume(void) { cpu_cluster_pm_exit(); cpu_pm_exit(); }
static void cpu_pm_resume(void) { from_suspend = false; cpu_cluster_pm_exit(0); cpu_pm_exit(); }