static inline void dedicated_idle_sleep(unsigned int cpu) { struct paca_struct *ppaca = &paca[cpu ^ 1]; /* Only sleep if the other thread is not idle */ if (!(ppaca->lppaca.idle)) { local_irq_disable(); /* * We are about to sleep the thread and so wont be polling any * more. */ clear_thread_flag(TIF_POLLING_NRFLAG); /* * SMT dynamic mode. Cede will result in this thread going * dormant, if the partner thread is still doing work. Thread * wakes up if partner goes idle, an interrupt is presented, or * a prod occurs. Returning from the cede enables external * interrupts. */ if (!need_resched()) cede_processor(); else local_irq_enable(); } else { /* * Give the HV an opportunity at the processor, since we are * not doing any work. */ poll_pending(); } }
static void check_and_cede_processor(void) { /* * Interrupts are soft-disabled at this point, * but not hard disabled. So an interrupt might have * occurred before entering NAP, and would be potentially * lost (edge events, decrementer events, etc...) unless * we first hard disable then check. */ hard_irq_disable(); if (get_paca()->irq_happened == 0) cede_processor(); }
static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); unsigned long in_purr, out_purr; /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; get_lppaca()->cpuctls_task_attrs = 1; in_purr = mfspr(SPRN_PURR); /* * We come in with interrupts disabled, and need_resched() * has been checked recently. If we should poll for a little * while, do so. */ if (*smt_snooze_delay) { start_snooze = get_tb() + *smt_snooze_delay * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); if (need_resched() || cpu_is_offline(cpu)) goto out; } cede_processor(); out: HMT_medium(); get_lppaca()->cpuctls_task_attrs = 0; out_purr = mfspr(SPRN_PURR); get_lppaca()->wait_state_cycles += out_purr - in_purr; get_lppaca()->idle = 0; }
static void check_and_cede_processor(void) { /* * Ensure our interrupt state is properly tracked, * also checks if no interrupt has occurred while we * were soft-disabled */ if (prep_irq_for_idle()) { cede_processor(); #ifdef CONFIG_TRACE_IRQFLAGS /* Ensure that H_CEDE returns with IRQs on */ if (WARN_ON(!(mfmsr() & MSR_EE))) __hard_irq_enable(); #endif } }
static int pseries_shared_idle(void) { struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); while (1) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ lpaca->lppaca.idle = 1; while (!need_resched() && !cpu_is_offline(cpu)) { local_irq_disable(); ppc64_runlatch_off(); /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another * processor. When returning here, external interrupts * are enabled. * * Check need_resched() again with interrupts disabled * to avoid a race. */ if (!need_resched()) cede_processor(); else local_irq_enable(); HMT_medium(); } lpaca->lppaca.idle = 0; ppc64_runlatch_on(); schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } return 0; }
static void pseries_shared_idle_sleep(void) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another * processor. When returning here, external interrupts * are enabled. */ cede_processor(); get_lppaca()->idle = 0; }
int dedicated_idle(void) { long oldval; struct paca_struct *lpaca = get_paca(), *ppaca; unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); unsigned int cpu = smp_processor_id(); ppaca = &paca[cpu ^ 1]; while (1) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ lpaca->lppaca.xIdle = 1; oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); start_snooze = __get_tb() + *smt_snooze_delay * tb_ticks_per_usec; while (!need_resched() && !cpu_is_offline(cpu)) { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); if (*smt_snooze_delay == 0 || __get_tb() < start_snooze) continue; HMT_medium(); if (!(ppaca->lppaca.xIdle)) { local_irq_disable(); /* * We are about to sleep the thread * and so wont be polling any * more. */ clear_thread_flag(TIF_POLLING_NRFLAG); /* * SMT dynamic mode. Cede will result * in this thread going dormant, if the * partner thread is still doing work. * Thread wakes up if partner goes idle, * an interrupt is presented, or a prod * occurs. Returning from the cede * enables external interrupts. */ if (!need_resched()) cede_processor(); else local_irq_enable(); } else { /* * Give the HV an opportunity at the * processor, since we are not doing * any work. */ poll_pending(); } } clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } HMT_medium(); lpaca->lppaca.xIdle = 0; schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } return 0; }