void idle_loop(void) { for ( ; ; ) { if ( cpu_is_offline(smp_processor_id()) ) stop_cpu(); local_irq_disable(); if ( cpu_is_haltable(smp_processor_id()) ) { dsb(sy); wfi(); } local_irq_enable(); do_tasklet(); do_softirq(); } }
void idle_loop(void) { for ( ; ; ) { if ( cpu_is_offline(smp_processor_id()) ) stop_cpu(); local_irq_disable(); if ( cpu_is_haltable(smp_processor_id()) ) { dsb(sy); wfi(); } local_irq_enable(); do_tasklet(); do_softirq(); /* * We MUST be last (or before dsb, wfi). Otherwise after we get the * softirq we would execute dsb,wfi (and sleep) and not patch. */ check_for_livepatch_work(); } }
static void acpi_processor_idle(void) { struct acpi_processor_power *power = processor_powers[smp_processor_id()]; struct acpi_processor_cx *cx = NULL; int next_state; uint64_t t1, t2 = 0; u32 exp = 0, pred = 0; u32 irq_traced[4] = { 0 }; if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() && (next_state = cpuidle_current_governor->select(power)) > 0 ) { cx = &power->states[next_state]; if ( power->flags.bm_check && acpi_idle_bm_check() && cx->type == ACPI_STATE_C3 ) cx = power->safe_state; if ( cx->idx > max_cstate ) cx = &power->states[max_cstate]; menu_get_trace_data(&exp, &pred); } if ( !cx ) { if ( pm_idle_save ) pm_idle_save(); else safe_halt(); return; } cpufreq_dbs_timer_suspend(); sched_tick_suspend(); /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */ process_pending_softirqs(); /* * Interrupts must be disabled during bus mastering calculations and * for C2/C3 transitions. */ local_irq_disable(); if ( !cpu_is_haltable(smp_processor_id()) ) { local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) cx = power->safe_state; power->last_state = cx; /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ switch ( cx->type ) { case ACPI_STATE_C1: case ACPI_STATE_C2: if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok ) { /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* Invoke C2 */ acpi_idle_do_entry(cx); /* Get end time (ticks) */ t2 = get_tick(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2)); /* Re-enable interrupts */ local_irq_enable(); break; } case ACPI_STATE_C3: /* * Before invoking C3, be aware that TSC/APIC timer may be * stopped by H/W. Without carefully handling of TSC/APIC stop issues, * deep C state can't work correctly. */ /* preparing APIC stop */ lapic_timer_off(); /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if ( cx->type != ACPI_STATE_C3 ) /* nothing to be done here */; else if ( power->flags.bm_check && power->flags.bm_control ) { spin_lock(&c3_cpu_status.lock); if ( ++c3_cpu_status.count == num_online_cpus() ) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } spin_unlock(&c3_cpu_status.lock); } else if ( !power->flags.bm_check ) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Invoke C3 */ acpi_idle_do_entry(cx); if ( (cx->type == ACPI_STATE_C3) && power->flags.bm_check && power->flags.bm_control ) { /* Enable bus master arbitration */ spin_lock(&c3_cpu_status.lock); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_status.count--; spin_unlock(&c3_cpu_status.lock); } /* Get end time (ticks) */ t2 = get_tick(); /* recovering TSC */ cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Update statistics */ acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2)); /* Re-enable interrupts */ local_irq_enable(); /* recovering APIC */ lapic_timer_on(); break; default: /* Now in C0 */ power->last_state = &power->states[0]; local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } /* Now in C0 */ power->last_state = &power->states[0]; sched_tick_resume(); cpufreq_dbs_timer_resume(); if ( cpuidle_current_governor->reflect ) cpuidle_current_governor->reflect(power); }