void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags) { #ifdef DEBUG_SIG printk("do_notify_resume flags:%x rip:%lx rsp:%lx caller:%lx pending:%lx\n", thread_info_flags, regs->rip, regs->rsp, __builtin_return_address(0),signal_pending(current)); #endif /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { regs->eflags |= TF_MASK; clear_thread_flag(TIF_SINGLESTEP); } /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs,oldset); }
static inline void poll_idle(void) { int oldval; local_irq_enable(); oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched()) cpu_relax(); clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } }
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { user_exit(); if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } user_enter(); }
static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; get_lppaca()->donate_dedicated_cpu = 1; /* * We come in with interrupts disabled, and need_resched() * has been checked recently. If we should poll for a little * while, do so. */ if (__get_cpu_var(smt_snooze_delay)) { start_snooze = get_tb() + __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); if (need_resched() || cpu_is_offline(cpu)) goto out; } cede_processor(); out: HMT_medium(); get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->idle = 0; }
/* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); if (cpu_should_die()) cpu_die(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
/* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { local_irq_enable(); user_exit(); /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } user_enter(); }
void default_idle(void) { if (hlt_works()) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); set_bl_bit(); if (!need_resched()) { local_irq_enable(); cpu_sleep(); } else local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); clear_bl_bit(); } else poll_idle(); }
static void default_idle(void) { if (!hlt_counter) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); set_bl_bit(); stop_critical_timings(); while (!need_resched()) cpu_sleep(); start_critical_timings(); clear_bl_bit(); set_thread_flag(TIF_POLLING_NRFLAG); } else while (!need_resched()) cpu_relax(); }
/* * cpu_init() initializes state that is per-CPU. */ void __cpuinit cpu_init(void) { /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ get_cpu_id(&S390_lowcore.cpu_id); /* * Force FPU initialization: */ clear_thread_flag(TIF_USEDFPU); clear_used_math(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); }
/* * Called immediately after a successful exec. */ void arch_setup_new_exec(void) { /* If cpuid was previously disabled for this task, re-enable it. */ if (test_thread_flag(TIF_NOCPUID)) enable_cpuid(); /* * Don't inherit TIF_SSBD across exec boundary when * PR_SPEC_DISABLE_NOEXEC is used. */ if (test_thread_flag(TIF_SSBD) && task_spec_ssb_noexec(current)) { clear_thread_flag(TIF_SSBD); task_clear_spec_ssb_disable(current); task_clear_spec_ssb_noexec(current); speculation_ctrl_update(task_thread_info(current)->flags); } }
void default_idle(void) { if (likely(hlt_counter)) { local_irq_disable(); stop_critical_timings(); cpu_relax(); start_critical_timings(); local_irq_enable(); } else { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); local_irq_disable(); while (!need_resched()) cpu_sleep(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } }
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { /* * In order to return to user mode, we need to have IRQs off with * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags * can be set at any time on preemptible kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ while (true) { /* We have work to do. */ local_irq_enable(); if (cached_flags & _TIF_NEED_RESCHED) schedule(); if (cached_flags & _TIF_UPROBE) uprobe_notify_resume(regs); if (cached_flags & _TIF_PATCH_PENDING) klp_update_patch_state(current); /* deal with pending signal delivery */ if (cached_flags & _TIF_SIGPENDING) do_signal(regs); if (cached_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } if (cached_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); /* Disable IRQs and retry */ local_irq_disable(); cached_flags = READ_ONCE(current_thread_info()->flags); if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) break; } }
static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { unsigned long stop; long rc; u8 status; bool canceled = false; /* check current status */ status = chip->ops->status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->flags & TPM_CHIP_FLAG_IRQ) { again: timeout = stop - jiffies; if ((long)timeout <= 0) return -ETIME; rc = wait_event_interruptible_timeout(*queue, wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled), timeout); if (rc > 0) { if (canceled) return -ECANCELED; return 0; } if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } else { do { tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); return index; }
/* * Free current thread data structures etc.. */ void exit_thread(void) { struct task_struct *me = current; struct thread_struct *t = &me->thread; unsigned long *bp = t->io_bitmap_ptr; if (bp) { struct tss_struct *tss = init_tss + get_cpu(); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ memset(tss->io_bitmap, 0xff, t->io_bitmap_max); t->io_bitmap_max = 0; put_cpu(); kfree(bp); } }
void default_idle(void) { void (*powersave)(void); powersave = ppc_md.power_save; if (!need_resched()) { if (powersave != NULL) powersave(); #ifdef CONFIG_SMP else { set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched() && !cpu_is_offline(smp_processor_id())) barrier(); clear_thread_flag(TIF_POLLING_NRFLAG); } #endif } }
void do_sigdelayed(void) { struct siginfo siginfo; pid_t pid; struct task_struct *t; clear_thread_flag(TIF_SIGDELAYED); memset(&siginfo, 0, sizeof(siginfo)); siginfo.si_signo = current_thread_info()->sigdelayed.signo; siginfo.si_code = current_thread_info()->sigdelayed.code; siginfo.si_addr = current_thread_info()->sigdelayed.addr; pid = current_thread_info()->sigdelayed.pid; t = find_task_by_pid(pid); if (!t) return; if (current_thread_info()->sigdelayed.start_time != start_time_ul(t)) return; force_sig_info(siginfo.si_signo, &siginfo, t); }
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) { _sigregs32 user_sregs; int i; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) return -EFAULT; if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) return -EINVAL; /* Loading the floating-point-control word can fail. Do that first. */ if (restore_fp_ctl(&user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); /* Check for invalid user address space control. */ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN); for (i = 0; i < NUM_GPRS; i++) regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); restore_access_regs(current->thread.acrs); memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, sizeof(current->thread.fp_regs)); restore_fp_regs(current->thread.fp_regs.fprs); clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ return 0; }
void do_work_pending(struct pt_regs *regs, unsigned long thread_flags, unsigned long r0, unsigned long r19) { do { if (thread_flags & _TIF_NEED_RESCHED) { schedule(); } else { local_irq_enable(); if (thread_flags & _TIF_SIGPENDING) { do_signal(regs, r0, r19); r0 = 0; } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } } local_irq_disable(); thread_flags = current_thread_info()->flags; } while (thread_flags & _TIF_WORK_MASK); }
/* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. */ static inline void poll_idle(void) { int oldval; local_irq_enable(); /* * Deal with another CPU just having chosen a thread to * run here: */ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched()) cpu_relax(); clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } }
asmlinkage void __ipipe_sync_root(void) { void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(irqs_disabled()); flags = hard_local_irq_save(); if (irq_tail_hook) irq_tail_hook(); clear_thread_flag(TIF_IRQ_SYNC); p = ipipe_root_cpudom_ptr(); if (__ipipe_ipending_p(p)) __ipipe_sync_pipeline(); hard_local_irq_restore(flags); }
void default_idle(void) { void (*powersave)(void); powersave = ppc_md.power_save; if (!need_resched()) { if (powersave != NULL) powersave(); #ifdef CONFIG_SMP else { set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched()) barrier(); clear_thread_flag(TIF_POLLING_NRFLAG); } #endif } if (need_resched()) schedule(); }
static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs))) return -EFAULT; if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) return -EINVAL; /* Loading the floating-point-control word can fail. Do that first. */ if (restore_fp_ctl(&user_sregs.fpregs.fpc)) return -EINVAL; /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); /* Check for invalid user address space control. */ if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) regs->psw.mask = PSW_ASC_PRIMARY | (regs->psw.mask & ~PSW_MASK_ASC); /* Check for invalid amode */ if (regs->psw.mask & PSW_MASK_EA) regs->psw.mask |= PSW_MASK_BA; regs->psw.addr = user_sregs.regs.psw.addr; memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs)); memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); restore_access_regs(current->thread.acrs); memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, sizeof(current->thread.fp_regs)); restore_fp_regs(current->thread.fp_regs.fprs); clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */ return 0; }
asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { /* * The assembly code enters us with IRQs off, but it hasn't * informed the tracing code of that for efficiency reasons. * Update the trace code with the current status. */ trace_hardirqs_off(); do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); } else { if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); if (thread_flags & _TIF_SIGPENDING) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* * Restart without handlers. * Deal with it without leaving * the kernel space. */ return restart; } syscall = 0; } else if (thread_flags & _TIF_UPROBE) { uprobe_notify_resume(regs); } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); } } local_irq_disable(); thread_flags = current_thread_info()->flags; } while (thread_flags & _TIF_WORK_MASK); return 0; }
void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { regs->flags |= X86_EFLAGS_TF; clear_thread_flag(TIF_SINGLESTEP); } #ifdef CONFIG_X86_MCE /* notify userspace of pending MCEs */ if (thread_info_flags & _TIF_MCE_NOTIFY) mce_notify_user(); #endif /* CONFIG_X86_MCE */ /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_HRTICK_RESCHED) hrtick_resched(); }
/* * cpu_init() initializes state that is per-CPU. */ void __devinit cpu_init (void) { int addr = hard_smp_processor_id(); /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ get_cpu_id(&S390_lowcore.cpu_data.cpu_id); S390_lowcore.cpu_data.cpu_addr = addr; /* * Force FPU initialization: */ clear_thread_flag(TIF_USEDFPU); clear_used_math(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current); }
asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); } else { if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); if (thread_flags & _TIF_SIGPENDING) { #ifdef CONFIG_OWL_DEBUG_IRQ_STACK int restart; owl_debug_check_irqstack(regs); restart = do_signal(regs, syscall); owl_debug_save_irqstack(regs); #else int restart = do_signal(regs, syscall); #endif if (unlikely(restart)) { /* * Restart without handlers. * Deal with it without leaving * the kernel space. */ return restart; } syscall = 0; } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } } local_irq_disable(); thread_flags = current_thread_info()->flags; } while (thread_flags & _TIF_WORK_MASK); return 0; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long in_purr; idle_loop_prolog(&in_purr); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (!need_resched()) { HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); idle_loop_epilog(in_purr); return index; }
/* * Free current thread data structures etc.. */ void exit_thread(void) { /* The process may have allocated an io port bitmap... nuke it. */ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { struct task_struct *tsk = current; struct thread_struct *t = &tsk->thread; int cpu = get_cpu(); struct tss_struct *tss = &per_cpu(init_tss, cpu); kfree(t->io_bitmap_ptr); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); /* * Careful, clear this in the TSS too: */ memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); t->io_bitmap_max = 0; tss->io_bitmap_owner = NULL; tss->io_bitmap_max = 0; tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; put_cpu(); } }
/* * Decrement the user count and bring down lockd if we're the last. */ void lockd_down(void) { static int warned; mutex_lock(&nlmsvc_mutex); if (nlmsvc_users) { if (--nlmsvc_users) goto out; } else printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid); if (!nlmsvc_pid) { if (warned++ == 0) printk(KERN_WARNING "lockd_down: no lockd running.\n"); goto out; } warned = 0; kill_proc(nlmsvc_pid, SIGKILL, 1); /* * Wait for the lockd process to exit, but since we're holding * the lockd semaphore, we can't wait around forever ... */ clear_thread_flag(TIF_SIGPENDING); interruptible_sleep_on_timeout(&lockd_exit, HZ); if (nlmsvc_pid) { printk(KERN_WARNING "lockd_down: lockd failed to exit, clearing pid\n"); nlmsvc_pid = 0; } spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); out: mutex_unlock(&nlmsvc_mutex); }