static void iseries_dedicated_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_stop_sched_tick(1); if (!need_resched()) { while (!need_resched()) { ppc64_runlatch_off(); HMT_low(); if (hvlpevent_is_pending()) { HMT_medium(); ppc64_runlatch_on(); process_iSeries_events(); } } HMT_medium(); } ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
static void iseries_shared_idle(void) { while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched() && !hvlpevent_is_pending()) { local_irq_disable(); ppc64_runlatch_off(); /* Recheck with irqs off */ if (!need_resched() && !hvlpevent_is_pending()) yield_shared_processor(); HMT_medium(); local_irq_enable(); } ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); if (hvlpevent_is_pending()) process_iSeries_events(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
/* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); start_critical_timings(); /* Some power_save functions return with * interrupts enabled, some don't. */ if (irqs_disabled()) local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); rcu_idle_exit(); tick_nohz_idle_exit(); if (cpu_should_die()) { sched_preempt_enable_no_resched(); cpu_die(); } schedule_preempt_disabled(); } }
/* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { hrtimer_stop_sched_tick(); while (!need_resched() && !need_resched_delayed() && !cpu_should_die()) { ppc64_runlatch_off(); /* * * We have irqs disabled here, so stop latency tracing * at this point and restart it after we return: */ stop_critical_timing(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } touch_critical_timing(); } HMT_medium(); ppc64_runlatch_on(); if (cpu_should_die()) cpu_die(); hrtimer_restart_sched_tick(); __preempt_enable_no_resched(); schedule(); preempt_disable(); } }
static int nap_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ppc64_runlatch_off(); power7_idle(); ppc64_runlatch_on(); return index; }
static int pseries_dedicated_idle(void) { long oldval; struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); while (1) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ lpaca->lppaca.idle = 1; oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); start_snooze = __get_tb() + *smt_snooze_delay * tb_ticks_per_usec; while (!need_resched() && !cpu_is_offline(cpu)) { ppc64_runlatch_off(); /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); if (*smt_snooze_delay != 0 && __get_tb() > start_snooze) { HMT_medium(); dedicated_idle_sleep(cpu); } } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } lpaca->lppaca.idle = 0; ppc64_runlatch_on(); schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } }
static inline void idle_loop_epilog(unsigned long in_purr) { u64 wait_cycles; wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); wait_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); get_lppaca()->idle = 0; if (irqs_disabled()) local_irq_enable(); ppc64_runlatch_on(); }
void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_idle_enter(); rcu_idle_enter(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); stop_critical_timings(); if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); start_critical_timings(); if (irqs_disabled()) local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); rcu_idle_exit(); tick_nohz_idle_exit(); if (cpu_should_die()) { sched_preempt_enable_no_resched(); cpu_die(); } schedule_preempt_disabled(); } }
/* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_stop_sched_tick(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); if (cpu_should_die()) cpu_die(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
static void cbe_idle(void) { unsigned long ctrl; /* Why do we do that on every idle ? Couldn't that be done once for * all or do we lose the state some way ? Also, the pm_control * register setting, that can't be set once at boot ? We really want * to move that away in order to implement a simple powersave */ cbe_enable_pause_zero(); while (1) { if (!need_resched()) { local_irq_disable(); while (!need_resched()) { /* go into low thread priority */ HMT_low(); /* * atomically disable thread execution * and runlatch. * External and Decrementer exceptions * are still handled when the thread * is disabled but now enter in * cbe_system_reset_exception() */ ctrl = mfspr(SPRN_CTRLF); ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); } /* restore thread prio */ HMT_medium(); local_irq_enable(); } /* * turn runlatch on again before scheduling the * process we just woke up */ ppc64_runlatch_on(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
static int pseries_shared_idle(void) { struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); while (1) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ lpaca->lppaca.idle = 1; while (!need_resched() && !cpu_is_offline(cpu)) { local_irq_disable(); ppc64_runlatch_off(); /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another * processor. When returning here, external interrupts * are enabled. * * Check need_resched() again with interrupts disabled * to avoid a race. */ if (!need_resched()) cede_processor(); else local_irq_enable(); HMT_medium(); } lpaca->lppaca.idle = 0; ppc64_runlatch_on(); schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } return 0; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); return index; }
static void cbe_idle(void) { unsigned long ctrl; cbe_enable_pause_zero(); while (1) { if (!need_resched()) { local_irq_disable(); while (!need_resched()) { /* go into low thread priority */ HMT_low(); /* * atomically disable thread execution * and runlatch. * External and Decrementer exceptions * are still handled when the thread * is disabled but now enter in * cbe_system_reset_exception() */ ctrl = mfspr(SPRN_CTRLF); ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); } /* restore thread prio */ HMT_medium(); local_irq_enable(); } /* * turn runlatch on again before scheduling the * process we just woke up */ ppc64_runlatch_on(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
static void yield_shared_processor(void) { unsigned long tb; HvCall_setEnabledInterrupts(HvCall_MaskIPI | HvCall_MaskLpEvent | HvCall_MaskLpProd | HvCall_MaskTimeout); tb = get_tb(); /* Compute future tb value when yield should expire */ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); /* * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. */ get_lppaca()->int_dword.fields.decr_int = 1; ppc64_runlatch_on(); process_iSeries_events(); }
static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); /* We don't want to take decrementer interrupts while we are offline, * so clear LPCR:PECE1. We keep PECE2 enabled. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { ppc64_runlatch_off(); power7_nap(1); ppc64_runlatch_on(); /* Reenable IRQs briefly to clear the IPI that woke us */ local_irq_enable(); local_irq_disable(); mb(); if (cpu_core_split_required()) continue; if (!generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u64 snooze_exit_time; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); snooze_exit_time = get_tb() + snooze_timeout; ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); if (snooze_timeout_en && get_tb() > snooze_exit_time) break; } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); return index; }