void elf_set_personality(const struct elf32_hdr *x) { unsigned int eflags = x->e_flags; unsigned int personality = PER_LINUX_32BIT; /* * APCS-26 is only valid for OABI executables */ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { if (eflags & EF_ARM_APCS_26) personality = PER_LINUX; } set_personality(personality); /* * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 * and CP1, we only enable access to the iWMMXt coprocessor if the * binary is EABI or softfloat (and thus, guaranteed not to use * FPA instructions.) */ if (elf_hwcap & HWCAP_IWMMXT && eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) { set_thread_flag(TIF_USING_IWMMXT); } else { clear_thread_flag(TIF_USING_IWMMXT); } }
static void iseries_dedicated_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_stop_sched_tick(); if (!need_resched()) { while (!need_resched()) { ppc64_runlatch_off(); HMT_low(); if (hvlpevent_is_pending()) { HMT_medium(); ppc64_runlatch_on(); process_iSeries_events(); } } HMT_medium(); } ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) { compat_sigset_t __user *unewset; compat_sigset_t uset; size_t sigsetsize; sigset_t newset; /* XXX Don't preclude handling different sized sigset_t's. */ sigsetsize = regs.regs[5]; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; unewset = (compat_sigset_t __user *) regs.regs[4]; if (copy_from_user(&uset, unewset, sizeof(uset))) return -EFAULT; sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); current->saved_sigmask = current->blocked; set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long in_purr; ktime_t kt_before; int cpu = dev->cpu; idle_loop_prolog(&in_purr, &kt_before); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while ((!need_resched()) && cpu_online(cpu)) { ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); dev->last_residency = (int)idle_loop_epilog(in_purr, kt_before); return index; }
/** * prctl_set_seccomp: configures current->seccomp.mode * @seccomp_mode: requested mode to use * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER * * This function may be called repeatedly with a @seccomp_mode of * SECCOMP_MODE_FILTER to install additional filters. Every filter * successfully installed will be evaluated (in reverse order) for each system * call the task makes. * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) { long ret = -EINVAL; if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) goto out; switch (seccomp_mode) { case SECCOMP_MODE_STRICT: ret = 0; #ifdef TIF_NOTSC disable_TSC(); #endif break; #ifdef CONFIG_SECCOMP_FILTER case SECCOMP_MODE_FILTER: ret = seccomp_attach_user_filter(filter); if (ret) goto out; break; #endif default: goto out; } current->seccomp.mode = seccomp_mode; set_thread_flag(TIF_SECCOMP); out: return ret; }
asmlinkage int do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs, struct switch_stack *sw) { sigset_t set; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&set, uset, sizeof(set))) return -EFAULT; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); /* Indicate EINTR on return from any possible signal handler, which will not come back through here, but via sigreturn. */ regs->r0 = EINTR; regs->r19 = 1; current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) { sigset_t newset; sigset_t __user *unewset; size_t sigsetsize; /* XXX Don't preclude handling different sized sigset_t's. */ sigsetsize = regs.regs[5]; if (sigsetsize != sizeof(sigset_t)) return -EINVAL; unewset = (sigset_t __user *) regs.regs[4]; if (copy_from_user(&newset, unewset, sizeof(newset))) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; current->blocked = newset; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; restore_sigmask(&set); if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; /* do_sigaltstack expects a __user pointer and won't modify * what's in there anyway */ do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); set_thread_flag(TIF_RESTOREALL); return 0; badframe: #if DEBUG_SIG printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif force_sig(SIGSEGV, current); return 0; }
long prctl_set_seccomp(unsigned long seccomp_mode) { long ret; /* can set it only once to be even more secure */ ret = -EPERM; if (unlikely(current->seccomp.mode)) goto out; ret = 0; switch (seccomp_mode) { case 1: #ifdef TIF_NOTSC disable_TSC(); #endif #ifdef CONFIG_SECCOMP_FILTER case 13: #endif current->seccomp.mode = seccomp_mode; set_thread_flag(TIF_SECCOMP); break; default: ret = -EINVAL; } out: return ret; }
void fpsimd_flush_thread(void) { preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); set_thread_flag(TIF_FOREIGN_FPSTATE); preempt_enable(); }
int check_hw_breakpoint(struct pt_regs *regs) { if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) { int i; struct perf_event **bp = this_cpu_ptr(bp_on_reg); for (i = 0; i < XCHAL_NUM_IBREAK; ++i) { if (bp[i] && !bp[i]->attr.disabled && regs->pc == bp[i]->attr.bp_addr) perf_bp_event(bp[i], regs); } return 0; } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) { struct perf_event **bp = this_cpu_ptr(wp_on_reg); int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >> DEBUGCAUSE_DBNUM_SHIFT; if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) { if (user_mode(regs)) { perf_bp_event(bp[dbnum], regs); } else { set_thread_flag(TIF_DB_DISABLED); xtensa_wsr(0, SREG_DBREAKC + dbnum); } } else { WARN_ONCE(1, "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n", dbnum); } return 0; } return -ENOENT; }
/* * this changes the io permissions bitmap in the current task. */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) { struct thread_struct *t = ¤t->thread; struct physdev_set_iobitmap set_iobitmap; if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; /* * If it's the first ioperm() call in this thread's lifetime, set the * IO bitmap up. ioperm() is much less timing critical than clone(), * this is why we delay this operation until now: */ if (!t->io_bitmap_ptr) { unsigned long *bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!bitmap) return -ENOMEM; memset(bitmap, 0xff, IO_BITMAP_BYTES); t->io_bitmap_ptr = bitmap; set_thread_flag(TIF_IO_BITMAP); set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap); set_iobitmap.nr_ports = IO_BITMAP_BITS; WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap)); } set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); return 0; }
/* * The body of the idle task. */ void cpu_idle(void) { if (ppc_md.idle_loop) ppc_md.idle_loop(); /* doesn't return */ set_thread_flag(TIF_POLLING_NRFLAG); while (1) { tick_nohz_stop_sched_tick(); while (!need_resched() && !cpu_should_die()) { ppc64_runlatch_off(); if (ppc_md.power_save) { clear_thread_flag(TIF_POLLING_NRFLAG); /* * smp_mb is so clearing of TIF_POLLING_NRFLAG * is ordered w.r.t. need_resched() test. */ smp_mb(); local_irq_disable(); /* check again after disabling irqs */ if (!need_resched() && !cpu_should_die()) ppc_md.power_save(); local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else { /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); } } HMT_medium(); ppc64_runlatch_on(); tick_nohz_restart_sched_tick(); if (cpu_should_die()) cpu_die(); preempt_enable_no_resched(); schedule(); preempt_disable(); } }
/* * this changes the io permissions bitmap in the current task. */ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) { unsigned int i, max_long, bytes, bytes_updated; struct thread_struct * t = ¤t->thread; struct tss_struct * tss; unsigned long *bitmap; if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; /* * If it's the first ioperm() call in this thread's lifetime, set the * IO bitmap up. ioperm() is much less timing critical than clone(), * this is why we delay this operation until now: */ if (!t->io_bitmap_ptr) { bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!bitmap) return -ENOMEM; memset(bitmap, 0xff, IO_BITMAP_BYTES); t->io_bitmap_ptr = bitmap; set_thread_flag(TIF_IO_BITMAP); } /* * do it in the per-thread copy and in the TSS ... * * Disable preemption via get_cpu() - we must not switch away * because the ->io_bitmap_max value must match the bitmap * contents: */ tss = &per_cpu(init_tss, get_cpu()); set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); /* * Search for a (possibly new) maximum. This is simple and stupid, * to keep it obviously correct: */ max_long = 0; for (i = 0; i < IO_BITMAP_LONGS; i++) if (t->io_bitmap_ptr[i] != ~0UL) max_long = i; bytes = (max_long + 1) * sizeof(long); bytes_updated = max(bytes, t->io_bitmap_max); t->io_bitmap_max = bytes; /* Update the TSS: */ memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); put_cpu(); return 0; }
asmlinkage long compat_sys_rt_sigaction(int sig, struct sigaction32 __user *act, struct sigaction32 __user *oact, void __user *restorer, compat_size_t sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; compat_sigset_t set32; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(compat_sigset_t)) return -EINVAL; /* All tasks which use RT signals (effectively) use * new style signals. */ set_thread_flag(TIF_NEWSIGNALS); if (act) { u32 u_handler, u_restorer; new_ka.ka_restorer = restorer; ret = get_user(u_handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(u_handler); ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)); switch (_NSIG_WORDS) { case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); } ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); ret |= __get_user(u_restorer, &act->sa_restorer); new_ka.sa.sa_restorer = compat_ptr(u_restorer); if (ret) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { switch (_NSIG_WORDS) { case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; } ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); if (ret) ret = -EFAULT; } return ret; }
void start_kernel_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) { set_fs(KERNEL_DS); regs->pc = pc; regs->r1 = usp; regs->pt_mode = 0; regs->msr &= ~MSR_UMS; set_thread_flag(TIF_KU); }
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; #endif /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; } else /* Fall through, for non-TM restore */ #endif if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; if (restore_altstack(&uc->uc_stack)) goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; badframe: #if DEBUG_SIG printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
void fpsimd_flush_thread(void) { if (!system_supports_fpsimd()) return; preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_flush_task_state(current); set_thread_flag(TIF_FOREIGN_FPSTATE); preempt_enable(); }
static int pseries_dedicated_idle(void) { long oldval; struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); while (1) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ lpaca->lppaca.idle = 1; oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); if (!oldval) { set_thread_flag(TIF_POLLING_NRFLAG); start_snooze = __get_tb() + *smt_snooze_delay * tb_ticks_per_usec; while (!need_resched() && !cpu_is_offline(cpu)) { ppc64_runlatch_off(); /* * Go into low thread priority and possibly * low power mode. */ HMT_low(); HMT_very_low(); if (*smt_snooze_delay != 0 && __get_tb() > start_snooze) { HMT_medium(); dedicated_idle_sleep(cpu); } } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); } else { set_need_resched(); } lpaca->lppaca.idle = 0; ppc64_runlatch_on(); schedule(); if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) cpu_die(); } }
void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); while (1) { while (!need_resched()) cpu_relax(); schedule(); } }
int main() { initialize_flag(); pthread_t thread_id; pthread_create(&thread_id, NULL, &thread_function, NULL); printf("flag not set...\n"); sleep(5); set_thread_flag(1); printf("flag alredey set...\n"); pthread_join(thread_id, NULL); return 0; }
int vmadump_restore_cpu(cr_rstrt_proc_req_t *ctx, struct file *file, struct pt_regs *regs) { struct pt_regs regtmp; int r; r = read_kern(ctx, file, ®tmp, sizeof(regtmp)); if (r != sizeof(regtmp)) goto bad_read; /* Don't restore machine state register since this is * unpriviledged user space stuff we're restoring. */ if (regtmp.msr & MSR_SF) { regtmp.msr = MSR_USER64; clear_thread_flag(TIF_32BIT); } else { regtmp.msr = MSR_USER32; set_thread_flag(TIF_32BIT); } memcpy(regs, ®tmp, sizeof(regtmp)); /* Floating point regs */ r = read_kern(ctx, file, ¤t->thread.fpr, sizeof(current->thread.fpr)); if (r != sizeof(current->thread.fpr)) goto bad_read; r = read_kern(ctx, file, ¤t->thread.fpscr, sizeof(current->thread.fpscr)); if (r != sizeof(current->thread.fpscr)) goto bad_read; #if HAVE_THREAD_VDSO_BASE /* unconditonally restore this */ r = read_kern(ctx, file, ¤t->thread.vdso_base, sizeof(current->thread.vdso_base)); if (r != sizeof(current->thread.vdso_base)) goto bad_read; #endif #ifdef CONFIG_ALTIVEC /* Restore Altivec */ r = read_kern(ctx, file, ¤t->thread.vr, sizeof(current->thread.vr)); if (r != sizeof(current->thread.vr)) goto bad_read; r = read_kern(ctx, file, ¤t->thread.vscr, sizeof(current->thread.vscr)); if (r != sizeof(current->thread.vscr)) goto bad_read; #endif current->thread.regs = regs; return 0; bad_read: if (r >= 0) r = -EIO; return r; }
/* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) barrier(); <<<<<<< HEAD schedule_preempt_disabled(); ======= <<<<<<< HEAD
long sys_sigsuspend(int history0, int history1, old_sigset_t mask) { sigset_t blocked; mask &= _BLOCKABLE; siginitset(&blocked, mask); set_current_blocked(&blocked); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); unsigned long in_purr, out_purr; /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; get_lppaca()->cpuctls_task_attrs = 1; in_purr = mfspr(SPRN_PURR); /* * We come in with interrupts disabled, and need_resched() * has been checked recently. If we should poll for a little * while, do so. */ if (*smt_snooze_delay) { start_snooze = get_tb() + *smt_snooze_delay * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); if (need_resched() || cpu_is_offline(cpu)) goto out; } cede_processor(); out: HMT_medium(); get_lppaca()->cpuctls_task_attrs = 0; out_purr = mfspr(SPRN_PURR); get_lppaca()->wait_state_cycles += out_purr - in_purr; get_lppaca()->idle = 0; }
/* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) barrier(); preempt_enable_no_resched(); schedule(); preempt_disable(); check_pgt_cache(); } }
/* * Atomically swap in the new signal mask, and wait for a signal. Define * dummy arguments to be able to reach the regs argument. (Note that this * arrangement relies on old_sigset_t occupying one register.) */ int sys_sigsuspend(old_sigset_t mask, long r11, long r12, long r13, long mof, long srp, struct pt_regs *regs) { mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
/* * Atomically swap in the new signal mask, and wait for a signal. */ long sys_sigsuspend(int history0, int history1, old_sigset_t mask) { mask &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(¤t->blocked, mask); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
static long _sigpause_common(old_sigset_t set) { set &= _BLOCKABLE; spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; siginitset(¤t->blocked, set); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); current->state = TASK_INTERRUPTIBLE; schedule(); set_thread_flag(TIF_RESTORE_SIGMASK); return -ERESTARTNOHAND; }
void cpu_idle(void) { set_thread_flag(TIF_POLLING_NRFLAG); while (1) { /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ while (!need_resched()) cpu_relax(); schedule(); } }