static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { /* * In order to return to user mode, we need to have IRQs off with * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags * can be set at any time on preemptible kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ while (true) { /* We have work to do. */ local_irq_enable(); if (cached_flags & _TIF_NEED_RESCHED) schedule(); if (cached_flags & _TIF_UPROBE) uprobe_notify_resume(regs); if (cached_flags & _TIF_PATCH_PENDING) klp_update_patch_state(current); /* deal with pending signal delivery */ if (cached_flags & _TIF_SIGPENDING) do_signal(regs); if (cached_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } if (cached_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); /* Disable IRQs and retry */ local_irq_disable(); cached_flags = READ_ONCE(current_thread_info()->flags); if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) break; } }
/* * Generic idle loop implementation * * Called with polling cleared. */ static void do_idle(void) { int cpu = smp_processor_id(); /* * If the arch has a polling bit, we maintain an invariant: * * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != * rq->idle). This means that, if rq->idle has the polling bit set, * then setting need_resched is guaranteed to cause the CPU to * reschedule. */ __current_set_polling(); tick_nohz_idle_enter(); while (!need_resched()) { check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) { tick_nohz_idle_stop_tick_protected(); cpuhp_report_idle_dead(); arch_cpu_idle_dead(); } local_irq_disable(); arch_cpu_idle_enter(); /* * In poll mode we reenable interrupts and spin. Also if we * detected in the wakeup from idle path that the tick * broadcast device expired for us, we don't want to go deep * idle as we know that the IPI is going to arrive right away. */ if (cpu_idle_force_poll || tick_check_broadcast_expired()) { tick_nohz_idle_restart_tick(); cpu_idle_poll(); } else { cpuidle_idle_call(); } arch_cpu_idle_exit(); } /* * Since we fell out of the loop above, we know TIF_NEED_RESCHED must * be set, propagate it into PREEMPT_NEED_RESCHED. * * This is required because for polling idle loops we will not have had * an IPI to fold the state for us. */ preempt_set_need_resched(); tick_nohz_idle_exit(); __current_clr_polling(); /* * We promise to call sched_ttwu_pending() and reschedule if * need_resched() is set while polling is set. That means that clearing * polling needs to be visible before doing these things. */ smp_mb__after_atomic(); sched_ttwu_pending(); schedule_idle(); if (unlikely(klp_patch_pending(current))) klp_update_patch_state(current); }