/* * arm_enter_idle_state - Programs CPU to enter the specified state * * @dev: cpuidle device * @drv: cpuidle driver * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int arm_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { int ret = 0; struct timeval start_time, end_time; long usec_elapsed; if (cpuidle_debug) { do_gettimeofday(&start_time); } switch (idx) { case STANDBY: cpu_do_idle(); break; case L_SLEEP: light_sleep_en(); cpu_do_idle(); light_sleep_dis(); break; case CORE_PD: light_sleep_en(); cpu_pm_enter(); ret = cpu_suspend(idx); cpu_pm_exit(); light_sleep_dis(); break; case CLUSTER_PD: light_sleep_en(); cpu_pm_enter(); cpu_cluster_pm_enter(); ret = cpu_suspend(idx); cpu_cluster_pm_exit(); cpu_pm_exit(); light_sleep_dis(); break; #ifdef CONFIG_ARCH_SCX35LT8 case TOP_PD: light_sleep_en(); cpu_pm_enter(); cpu_cluster_pm_enter(); ret = cpu_suspend(idx); cpu_cluster_pm_exit(); cpu_pm_exit(); light_sleep_dis(); break; #endif default: cpu_do_idle(); WARN(1, "[CPUIDLE]: NO THIS IDLE LEVEL!!!"); } if (cpuidle_debug) { do_gettimeofday(&end_time); usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 + (end_time.tv_usec - start_time.tv_usec); pr_info("[CPUIDLE] Enter idle state: %d ,usec_elapsed = %ld \n", idx, usec_elapsed); } return ret ? -1 : idx; }
static int tegra114_idle_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_fiq_disable(); tegra_set_cpu_in_lp2(); cpu_pm_enter(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); call_firmware_op(prepare_idle); /* Do suspend by ourselves if the firmware does not implement it */ if (call_firmware_op(do_idle) == -ENOSYS) cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); cpu_pm_exit(); tegra_clear_cpu_in_lp2(); local_fiq_enable(); return index; }
static int __cpuinit tegra30_idle_lp2(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u32 cpu = is_smp() ? cpu_logical_map(dev->cpu) : dev->cpu; bool entered_lp2 = false; bool last_cpu; local_fiq_disable(); last_cpu = tegra_set_cpu_in_lp2(cpu); cpu_pm_enter(); if (cpu == 0) { if (last_cpu) entered_lp2 = tegra30_cpu_cluster_power_down(dev, drv, index); else cpu_do_idle(); } else { entered_lp2 = tegra30_cpu_core_power_down(dev, drv, index); } cpu_pm_exit(); tegra_clear_cpu_in_lp2(cpu); local_fiq_enable(); smp_rmb(); return (entered_lp2) ? index : 0; }
static int sunxi_cpu_power_down_c2state(struct cpuidle_device *dev, \ struct cpuidle_driver *drv, \ int index) { unsigned int mpidr = read_cpuid_mpidr(); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpu_pm_enter(); //cpu_cluster_pm_enter(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); smp_wmb(); cpu_suspend(CPUIDLE_FLAG_C2_STATE, sunxi_powerdown_c2_finisher); /* * Since this is called with IRQs enabled, and no arch_spin_lock_irq * variant exists, we need to disable IRQs manually here. */ local_irq_disable(); arch_spin_lock(&sun8i_mcpm_lock); sun8i_cpu_use_count[cluster][cpu]++; sun8i_cluster_use_count[cluster]++; arch_spin_unlock(&sun8i_mcpm_lock); local_irq_enable(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); //cpu_cluster_pm_exit(); cpu_pm_exit(); return index; }
static int imx6sx_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { imx6q_set_lpm(WAIT_UNCLOCKED); switch (index) { case 1: cpu_do_idle(); break; case 2: imx6_enable_rbc(true); imx_gpc_set_arm_power_in_lpm(true); imx_set_cpu_jump(0, v7_cpu_resume); /* Need to notify there is a cpu pm operation. */ cpu_pm_enter(); cpu_cluster_pm_enter(); cpu_suspend(0, imx6sx_idle_finish); cpu_cluster_pm_exit(); cpu_pm_exit(); imx_gpc_set_arm_power_in_lpm(false); imx6_enable_rbc(false); break; default: break; } imx6q_set_lpm(WAIT_CLOCKED); return index; }
static int msm_cpuidle_enter( struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret = 0; int i = 0; enum msm_pm_sleep_mode pm_mode; struct cpuidle_state_usage *st_usage = NULL; #ifdef CONFIG_CPU_PM cpu_pm_enter(); #endif pm_mode = msm_pm_idle_prepare(dev, drv, index); dev->last_residency = msm_pm_idle_enter(pm_mode); for (i = 0; i < dev->state_count; i++) { st_usage = &dev->states_usage[i]; if ((enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage) == pm_mode) { ret = i; break; } } #ifdef CONFIG_CPU_PM cpu_pm_exit(); #endif local_irq_enable(); return ret; }
/* * bl_enter_powerdown - Programs CPU to enter the specified state * @dev: cpuidle device * @drv: The target state to be programmed * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int bl_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { struct timespec ts_preidle, ts_postidle, ts_idle; int ret; /* Used to keep track of the total time in idle */ getnstimeofday(&ts_preidle); BUG_ON(!irqs_disabled()); cpu_pm_enter(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); ret = cpu_suspend((unsigned long) dev, bl_powerdown_finisher); if (ret) BUG(); mcpm_cpu_powered_up(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); cpu_pm_exit(); getnstimeofday(&ts_postidle); local_irq_enable(); ts_idle = timespec_sub(ts_postidle, ts_preidle); dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC; return idx; }
static int msm_cpuidle_enter( struct cpuidle_device *dev, struct cpuidle_state *state) { int ret; #ifdef CONFIG_MSM_SLEEP_STATS struct atomic_notifier_head *head = &__get_cpu_var(msm_cpuidle_notifiers); #endif local_irq_disable(); #ifdef CONFIG_MSM_SLEEP_STATS atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_ENTER, NULL); #endif #ifdef CONFIG_CPU_PM cpu_pm_enter(); #endif ret = msm_pm_idle_enter((enum msm_pm_sleep_mode) (state->driver_data)); #ifdef CONFIG_CPU_PM cpu_pm_exit(); #endif #ifdef CONFIG_MSM_SLEEP_STATS atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_EXIT, NULL); #endif local_irq_enable(); return ret; }
static int hi6xxx_pm_enter(suspend_state_t state) { //volatile int wait_loop = 600000; pr_info("%s ++\n", __func__); //while(wait_loop >= 0) // wait_loop--; cpu_pm_enter(); /* setup_mm_for_reboot(); gic_cpu_if_down(); hisi_cluster_exit_coherence(0); */ hisi_set_acpu_subsys_powerdown_flag(); cpu_suspend(3); hisi_clear_acpu_subsys_powerdown_flag(); //coherent_init(); //coherent_slave_port_config(); cpu_pm_exit(); g_pwcAcpuWakeFlagIcc = 1; g_pwcAcpuWakeFlagRfile = 1; pr_info("%s --\n", __func__); pwrctrl_mcu_debug_info_show(); pwrctrl_ccpu_debug_info_show(); return 0; }
static int calxeda_pwrdown_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_pm_enter(); cpu_suspend(0, calxeda_idle_finish); cpu_pm_exit(); return index; }
static int msm_cpuidle_enter( struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret = 0; int i = 0; enum msm_pm_sleep_mode pm_mode; struct cpuidle_state_usage *st_usage = NULL; #ifdef CONFIG_MSM_SLEEP_STATS struct atomic_notifier_head *head = &__get_cpu_var(msm_cpuidle_notifiers); #endif local_irq_disable(); #ifdef CONFIG_MSM_SLEEP_STATS atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_ENTER, NULL); #endif #ifdef CONFIG_CPU_PM cpu_pm_enter(); #endif pm_mode = msm_pm_idle_prepare(dev, drv, index); dev->last_residency = msm_pm_idle_enter(pm_mode); #ifdef CONFIG_SHSYS_CUST if (sh_pm_is_idle_pc()) { sh_pm_set_idle_pc_flg_off(); } #endif for (i = 0; i < dev->state_count; i++) { st_usage = &dev->states_usage[i]; if ((enum msm_pm_sleep_mode) cpuidle_get_statedata(st_usage) == pm_mode) { ret = i; break; } } #ifdef CONFIG_CPU_PM cpu_pm_exit(); #endif #ifdef CONFIG_MSM_SLEEP_STATS atomic_notifier_call_chain(head, MSM_CPUIDLE_STATE_EXIT, NULL); #endif local_irq_enable(); return ret; }
/** * bl_enter_powerdown - Programs CPU to enter the specified state * @dev: cpuidle device * @drv: The target state to be programmed * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int bl_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { cpu_pm_enter(); cpu_suspend(0, bl_powerdown_finisher); /* signals the MCPM core that CPU is out of low power state */ mcpm_cpu_powered_up(); cpu_pm_exit(); return idx; }
static int sunxi_cpu_core_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_pm_enter(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); smp_wmb(); cpu_suspend(0, sunxi_powerdown_finisher); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); cpu_pm_exit(); return index; }
static int highbank_pm_enter(suspend_state_t state) { cpu_pm_enter(); cpu_cluster_pm_enter(0); highbank_set_cpu_jump(0, cpu_resume); cpu_suspend(0, highbank_suspend_finish); cpu_cluster_pm_exit(0); cpu_pm_exit(); highbank_smc1(0x102, 0x1); if (scu_base_addr) scu_enable(scu_base_addr); return 0; }
static int msm_cpuidle_enter( struct cpuidle_device *dev, struct cpuidle_state *state) { #ifdef CONFIG_CPU_PM cpu_pm_enter(); #endif dev->last_residency = msm_pm_idle_enter((enum msm_pm_sleep_mode) (state->driver_data)); #ifdef CONFIG_CPU_PM cpu_pm_exit(); #endif local_irq_enable(); return 0; }
/* * mcpm_enter_powerdown - Programs CPU to enter the specified state * * @dev: cpuidle device * @drv: cpuidle driver * @idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int mmp_enter_powerdown(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { int ret; BUG_ON(!idx); cpu_pm_enter(); ret = cpu_suspend((unsigned long)&idx, mcpm_powerdown_finisher); if (ret) pr_err("cpu%d failed to enter power down!", dev->cpu); mcpm_cpu_powered_up(); cpu_pm_exit(); return idx; }
static int armada_370_xp_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { int ret; bool deepidle = false; cpu_pm_enter(); if (drv->states[index].flags & ARMADA_370_XP_FLAG_DEEP_IDLE) deepidle = true; ret = armada_370_xp_cpu_suspend(deepidle); if (ret) return ret; cpu_pm_exit(); return index; }
/* Actual code that puts the SoC in different idle states */ static int xilinx_enter_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); if (index == 0) /* Wait for interrupt state */ cpu_do_idle(); else if (index == 1) { unsigned int cpu_id = smp_processor_id(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); /* Devices must be stopped here */ cpu_pm_enter(); /* Add code for DDR self refresh start */ cpu_do_idle(); /*cpu_suspend(foo, bar);*/ /* Add code for DDR self refresh stop */ cpu_pm_exit(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); } do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); dev->last_residency = idle_time; return index; }
static int tegra114_idle_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_fiq_disable(); tegra_set_cpu_in_lp2(); cpu_pm_enter(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); cpu_pm_exit(); tegra_clear_cpu_in_lp2(); local_fiq_enable(); return index; }
static int tegra114_idle_power_down(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_fiq_disable(); tegra_set_cpu_in_lp2(); cpu_pm_enter(); call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2); /* Do suspend by ourselves if the firmware does not implement it */ if (call_firmware_op(do_idle, 0) == -ENOSYS) cpu_suspend(0, tegra30_sleep_cpu_secondary_finish); cpu_pm_exit(); tegra_clear_cpu_in_lp2(); local_fiq_enable(); return index; }
int __init mcpm_loopback(void (*cache_disable)(void)) { int ret; /* * We're going to soft-restart the current CPU through the * low-level MCPM code by leveraging the suspend/resume * infrastructure. Let's play it safe by using cpu_pm_enter() * in case the CPU init code path resets the VFP or similar. */ local_irq_disable(); local_fiq_disable(); ret = cpu_pm_enter(); if (!ret) { ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); cpu_pm_exit(); } local_fiq_enable(); local_irq_enable(); if (ret) pr_err("%s returned %d\n", __func__, ret); return ret; }
static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u32 cpu = is_smp() ? cpu_logical_map(dev->cpu) : dev->cpu; bool entered_lp2 = false; if (tegra_pending_sgi()) ACCESS_ONCE_RW(abort_flag) = true; cpuidle_coupled_parallel_barrier(dev, &abort_barrier); if (abort_flag) { cpuidle_coupled_parallel_barrier(dev, &abort_barrier); abort_flag = false; /* clean flag for next coming */ return -EINTR; } local_fiq_disable(); tegra_set_cpu_in_lp2(cpu); cpu_pm_enter(); if (cpu == 0) entered_lp2 = tegra20_cpu_cluster_power_down(dev, drv, index); else entered_lp2 = tegra20_idle_enter_lp2_cpu_1(dev, drv, index); cpu_pm_exit(); tegra_clear_cpu_in_lp2(cpu); local_fiq_enable(); smp_rmb(); return entered_lp2 ? index : 0; }
/* * arm64_enter_idle_state - Programs CPU to enter the specified state * * dev: cpuidle device * drv: cpuidle driver * idx: state index * * Called from the CPUidle framework to program the device to the * specified target state selected by the governor. */ static int arm64_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { int ret; if (!idx) { cpu_do_idle(); return idx; } ret = cpu_pm_enter(); if (!ret) { /* * Pass idle state index to cpu_suspend which in turn will * call the CPU ops suspend protocol with idle index as a * parameter. */ ret = cpu_suspend(idx); cpu_pm_exit(); } return ret ? -1 : idx; }
static void cpu_pm_resume(void) { from_suspend = false; cpu_cluster_pm_exit(0); cpu_pm_exit(); }
static void post_idle(unsigned int cpuid) { cpu_pm_exit(); }
static void cpu_pm_resume(void) { cpu_cluster_pm_exit(); cpu_pm_exit(); }