/* * Generic idle loop implementation */ static void cpu_idle_loop(void) { while (1) { tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); rmb(); if (cpu_is_offline(smp_processor_id())) arch_cpu_idle_dead(); local_irq_disable(); arch_cpu_idle_enter(); /* * In poll mode we reenable interrupts and spin. * * Also if we detected in the wakeup from idle * path that the tick broadcast device expired * for us, we don't want to go deep idle as we * know that the IPI is going to arrive right * away */ if (cpu_idle_force_poll || tick_check_broadcast_expired()) { cpu_idle_poll(); } else { current_clr_polling(); if (!need_resched()) { stop_critical_timings(); rcu_idle_enter(); arch_cpu_idle(); WARN_ON_ONCE(irqs_disabled()); rcu_idle_exit(); start_critical_timings(); } else { local_irq_enable(); } current_set_polling(); } arch_cpu_idle_exit(); } tick_nohz_idle_exit(); schedule_preempt_disabled(); } }
static int poll_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ktime_t t1, t2; s64 diff; t1 = ktime_get(); local_irq_enable(); if (!current_set_polling_and_test()) { while (!need_resched()) cpu_relax(); } current_clr_polling(); t2 = ktime_get(); diff = ktime_to_us(ktime_sub(t2, t1)); if (diff > INT_MAX) diff = INT_MAX; dev->last_residency = (int) diff; return index; }