/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
static int ptrace_detach(struct task_struct *child, unsigned int data) { bool dead = false; if (!valid_signal(data)) return -EIO; /* Architecture-specific hardware disable .. */ ptrace_disable(child); clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); write_lock_irq(&tasklist_lock); /* * This child can be already killed. Make sure de_thread() or * our sub-thread doing do_wait() didn't do release_task() yet. */ if (child->ptrace) { child->exit_code = data; dead = __ptrace_detach(current, child); if (!child->exit_state) wake_up_state(child, TASK_TRACED | TASK_STOPPED); } write_unlock_irq(&tasklist_lock); if (unlikely(dead)) release_task(child); return 0; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { smp_rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); } else { user_disable_single_step(child); } child->exit_code = data; wake_up_state(child, __TASK_TRACED); return 0; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
bool freeze_task(struct task_struct *p, bool sig_only) { if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE * flag and either sending a fake signal to it or waking it up, depending * on whether it has %PF_FREEZER_NOSIG set. * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }
static void ptrace_unfreeze_traced(struct task_struct *task) { if (task->state != __TASK_TRACED) return; WARN_ON(!task->ptrace || task->parent != current); spin_lock_irq(&task->sighand->siglock); if (__fatal_signal_pending(task)) wake_up_state(task, __TASK_TRACED); else task->state = TASK_TRACED; spin_unlock_irq(&task->sighand->siglock); }
static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) { clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* * We clear the IS_PARKED bit here as we don't wait * until the task has left the park code. So if we'd * park before that happens we'd see the IS_PARKED bit * which might be about to be cleared. */ if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) __kthread_bind(k, kthread->cpu, TASK_PARKED); wake_up_state(k, TASK_PARKED); } }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @with_mm_only: if set, the request will only be sent if the task has its * own mm * Return value: 0, if @with_mm_only is set and the task has no mm of its * own or the task is frozen, 1, otherwise * * The freeze request is sent by seting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has its own mm (ie. it is a user land task). If @with_mm_only * is set and the task has no mm of its own (ie. it is a kernel thread), * its TIF_FREEZE flag should not be set. * * The task_lock() is necessary to prevent races with exit_mm() or * use_mm()/unuse_mm() from occuring. */ static int freeze_task(struct task_struct *p, int with_mm_only) { int ret = 1; task_lock(p); if (freezing(p)) { if (has_mm(p)) { if (!signal_pending(p)) fake_signal_wake_up(p, 0); } else { if (with_mm_only) ret = 0; else wake_up_state(p, TASK_INTERRUPTIBLE); } } else { rmb(); if (frozen(p)) { ret = 0; } else { if (has_mm(p)) { set_freeze_flag(p); send_fake_signal(p); } else { if (with_mm_only) { ret = 0; } else { set_freeze_flag(p); wake_up_state(p, TASK_INTERRUPTIBLE); } } } } task_unlock(p); return ret; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }