/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }
static int try_to_freeze_tasks(bool sig_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; unsigned int wakeup = 0; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezeable(p)) continue; if (!freeze_task(p, sig_only)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (!todo || time_after(jiffies, end_time)) break; /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ msleep(10); } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", sig_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze):\n", elapsed_csecs / 100, elapsed_csecs % 100, todo); } read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p) && elapsed_csecs > 100) sched_show_task(p); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; /* */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* */ msleep(10); } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", user_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); } if (!wakeup) { read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p) && elapsed_csecs > 100) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's scheduler lock, it's * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING * transition can't race with task state testing here. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ msleep(10); } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", user_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); } if (!wakeup) { read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p) && elapsed_csecs > 100) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; char suspend_abort[MAX_SUSPEND_ABORT_LEN]; do_gettimeofday(&start); end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { #ifndef CONFIG_UML pm_get_active_wakeup_sources(suspend_abort, MAX_SUSPEND_ABORT_LEN); #endif log_suspend_abort_reason(suspend_abort); wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (wakeup) { printk("\n"); printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds", elapsed_msecs / 1000, elapsed_msecs % 1000); } else if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds" " (%d tasks refusing to freeze, wq_busy=%d):\n", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p, *q; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { cpu_relax(); if (p == current || !freeze_task(p)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's scheduler lock, it's * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING * transition can't race with task state testing here. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { todo++; q = p; } } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ msleep(10); }
static int try_to_freeze_tasks(bool sig_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; unsigned int wakeup = 0; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; do { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezeable(p)) continue; if (!freeze_task(p, sig_only)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's * scheduler lock after setting TIF_FREEZE, it's * guaranteed that either we see TASK_RUNNING or * try_to_stop() after schedule() in ptrace/signal * stop sees TIF_FREEZE. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); yield(); /* Yield is okay here */ if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (time_after(jiffies, end_time)) break; } while (todo); do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " "(%d tasks refusing to freeze):\n", wakeup ? "aborted" : "failed", elapsed_csecs / 100, elapsed_csecs % 100, todo); if(!wakeup) show_state(); read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p) && elapsed_csecs > 100) printk(KERN_ERR " %s\n", p->comm); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (todo) { if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", user_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); } if (!wakeup) { #ifdef CONFIG_MSM_WATCHDOG msm_watchdog_suspend(NULL); #endif read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p) && elapsed_msecs > 1000) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); #ifdef CONFIG_MSM_WATCHDOG msm_watchdog_resume(NULL); #endif } } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's * scheduler lock after setting TIF_FREEZE, it's * guaranteed that either we see TASK_RUNNING or * try_to_stop() after schedule() in ptrace/signal * stop sees TIF_FREEZE. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ msleep(10); } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); read_lock(&tasklist_lock); do_each_thread(g, p) { if (!wakeup && !freezer_should_skip(p) && p != current && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static int try_to_freeze_tasks(int freeze_user_space) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; struct timeval start, end; s64 elapsed_csecs64; unsigned int elapsed_csecs; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; do { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezeable(p)) continue; if (p->state == TASK_TRACED && frozen(p->parent)) { cancel_freezing(p); continue; } if (!freeze_task(p, freeze_user_space)) continue; if (!freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); yield(); /* Yield is okay here */ if (time_after(jiffies, end_time)) break; } while (todo); do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze):\n", elapsed_csecs / 100, elapsed_csecs % 100, todo); show_state(); read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p)) printk(KERN_ERR " %s\n", p->comm); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; struct task_struct *t = NULL; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's scheduler lock, it's * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING * transition can't race with task state testing here. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { todo++; t = p; } } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); if (!wakeup) { read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } } else {
static int try_to_freeze_tasks(bool user_only) { struct task_struct *g, *p; #ifdef CONFIG_SEC_PM_DEBUG struct task_struct *q; #endif unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_msecs64; unsigned int elapsed_msecs; bool wakeup = false; int sleep_usecs = USEC_PER_MSEC; do_gettimeofday(&start); end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); if (!user_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (p == current || !freeze_task(p)) continue; if (!freezer_should_skip(p)) { todo++; #ifdef CONFIG_SEC_PM_DEBUG q = p; #endif } } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!user_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; } do_gettimeofday(&end); elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_msecs64, NSEC_PER_MSEC); elapsed_msecs = elapsed_msecs64; if (todo) { printk("\n"); printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", wakeup ? "aborted" : "failed", elapsed_msecs / 1000, elapsed_msecs % 1000, todo - wq_busy, wq_busy); #ifdef CONFIG_SEC_PM_DEBUG if (wakeup) { printk(KERN_ERR "Freezing of %s aborted (%d) (%s)\n", user_only ? "user space " : "tasks ", q ? q->pid : 0, q ? q->comm : "NONE"); } #endif if (!wakeup) { read_lock(&tasklist_lock); do_each_thread(g, p) { if (p != current && !freezer_should_skip(p) && freezing(p) && !frozen(p)) sched_show_task(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } } else {
static int try_to_freeze_tasks(bool sig_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; #ifdef CONFIG_SHSYS_CUST struct timespec tu; #endif do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!sig_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezable(p)) continue; if (!freeze_task(p, sig_only)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's * scheduler lock after setting TIF_FREEZE, it's * guaranteed that either we see TASK_RUNNING or * try_to_stop() after schedule() in ptrace/signal * stop sees TIF_FREEZE. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!sig_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ #ifdef CONFIG_SHSYS_CUST tu.tv_sec = 0; tu.tv_nsec = 10000000; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); #else msleep(10); #endif } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", sig_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); } thaw_workqueues(); read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p) && elapsed_csecs > 100) sched_show_task(p); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {