/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @drv: cpuidle driver with cpuidle state information * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); sched_clock_idle_wakeup_event(0); lapic_timer_state_broadcast(pr, cx, 0); return index; }
/* * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT * with interrupts enabled and no flags, which is backwards compatible with the * original MWAIT implementation. */ static void mwait_idle(void) { if (!current_set_polling_and_test()) { trace_cpu_idle_rcuidle(1, smp_processor_id()); if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { smp_mb(); /* quirk */ clflush((void *)¤t_thread_info()->flags); smp_mb(); /* quirk */ } __monitor((void *)¤t_thread_info()->flags, 0, 0); if (!need_resched()) __sti_mwait(0, 0); else local_irq_enable(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } else { local_irq_enable(); } __current_clr_polling(); }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }
static int poll_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ktime_t t1, t2; s64 diff; t1 = ktime_get(); local_irq_enable(); if (!current_set_polling_and_test()) { while (!need_resched()) cpu_relax(); } current_clr_polling(); t2 = ktime_get(); diff = ktime_to_us(ktime_sub(t2, t1)); if (diff > INT_MAX) diff = INT_MAX; dev->last_residency = (int) diff; return index; }
/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @drv: cpuidle driver containing state data * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { acpi_safe_halt(); return -EBUSY; } } if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } sched_clock_idle_wakeup_event(0); lapic_timer_state_broadcast(pr, cx, 0); return index; }