/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { smp_rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
bool freeze_task(struct task_struct *p, bool sig_only) { if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE * flag and either sending a fake signal to it or waking it up, depending * on whether it has %PF_FREEZER_NOSIG set. * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @with_mm_only: if set, the request will only be sent if the task has its * own mm * Return value: 0, if @with_mm_only is set and the task has no mm of its * own or the task is frozen, 1, otherwise * * The freeze request is sent by seting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has its own mm (ie. it is a user land task). If @with_mm_only * is set and the task has no mm of its own (ie. it is a kernel thread), * its TIF_FREEZE flag should not be set. * * The task_lock() is necessary to prevent races with exit_mm() or * use_mm()/unuse_mm() from occuring. */ static int freeze_task(struct task_struct *p, int with_mm_only) { int ret = 1; task_lock(p); if (freezing(p)) { if (has_mm(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else { if (with_mm_only) ret = 0; else wake_up_state(p, TASK_INTERRUPTIBLE); } } else { rmb(); if (frozen(p)) { ret = 0; } else { if (has_mm(p)) { set_freeze_flag(p); fake_signal_wake_up(p); } else { if (with_mm_only) { ret = 0; } else { set_freeze_flag(p); wake_up_state(p, TASK_INTERRUPTIBLE); } } } } task_unlock(p); return ret; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }
static void send_fake_signal(struct task_struct *p) { if (p->state == TASK_STOPPED) force_sig_specific(SIGSTOP, p); fake_signal_wake_up(p, p->state == TASK_STOPPED); }