static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; DEFINE_WAIT_FUNC(wait, woken_wake_function); set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); add_wait_queue(&vb->config_change, &wait); for (;;) { if ((diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&vb->config_change, &wait); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); /* * For large balloon changes, we could spend a lot of time * and always have work to do. Be nice if preempt disabled. */ cond_resched(); } return 0; }
/* * wait_for_stat wait for a TPM_STS value * @param: chip, the tpm chip description * @param: mask, the value mask to wait * @param: timeout, the timeout * @param: queue, the wait queue. * @param: check_cancel, does the command can be cancelled ? * @return: the tpm status, 0 if success, -ETIME if timeout is reached. */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev); unsigned long stop; int ret = 0; bool canceled = false; bool condition; u32 cur_intrs; u8 status; /* check current status */ status = st33zp24_status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->flags & TPM_CHIP_FLAG_IRQ) { cur_intrs = tpm_dev->intrs; clear_interruption(tpm_dev); enable_irq(tpm_dev->irq); do { if (ret == -ERESTARTSYS && freezing(current)) clear_thread_flag(TIF_SIGPENDING); timeout = stop - jiffies; if ((long) timeout <= 0) return -1; ret = wait_event_interruptible_timeout(*queue, cur_intrs != tpm_dev->intrs, timeout); clear_interruption(tpm_dev); condition = wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled); if (ret >= 0 && condition) { if (canceled) return -ECANCELED; return 0; } } while (ret == -ERESTARTSYS && freezing(current)); disable_irq_nosync(tpm_dev->irq); } else { do { msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; } /* wait_for_stat() */
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent by setting %TIF_FREEZE * flag and either sending a fake signal to it or waking it up, depending * on whether it has %PF_FREEZER_NOSIG set. * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else { wake_up_state(p, TASK_INTERRUPTIBLE); } spin_unlock_irqrestore(&freezer_lock, flags); return true; }
bool freeze_task(struct task_struct *p, bool sig_only) { if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { if (!signal_pending(p)) fake_signal_wake_up(p); } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
bool __refrigerator(bool check_kthr_stop) { bool was_frozen = false; long save = current->state; pr_debug("%s entered refrigerator\n", current->comm); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); spin_lock_irq(&freezer_lock); current->flags |= PF_FROZEN; if (!freezing(current) || (check_kthr_stop && kthread_should_stop())) current->flags &= ~PF_FROZEN; spin_unlock_irq(&freezer_lock); if (!(current->flags & PF_FROZEN)) break; was_frozen = true; schedule(); } pr_debug("%s left refrigerator\n", current->comm); set_current_state(save); return was_frozen; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * @sig_only: if set, the request will only be sent if the task has the * PF_FREEZER_NOSIG flag unset * Return value: 'false', if @sig_only is set and the task has * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise * * The freeze request is sent by setting the tasks's TIF_FREEZE flag and * either sending a fake signal to it or waking it up, depending on whether * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its * TIF_FREEZE flag will not be set. */ bool freeze_task(struct task_struct *p, bool sig_only) { /* * We first check if the task is freezing and next if it has already * been frozen to avoid the race with frozen_process() which first marks * the task as frozen and next clears its TIF_FREEZE. */ if (!freezing(p)) { smp_rmb(); if (frozen(p)) return false; if (!sig_only || should_send_signal(p)) set_freeze_flag(p); else return false; } if (should_send_signal(p)) { fake_signal_wake_up(p); /* * fake_signal_wake_up() goes through p's scheduler * lock and guarantees that TASK_STOPPED/TRACED -> * TASK_RUNNING transition can't race with task state * testing in try_to_freeze_tasks(). */ } else if (sig_only) { return false; } else { wake_up_state(p, TASK_INTERRUPTIBLE); } return true; }
/** * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep * @restart: ptr to restart block * * Handles restarted clock_nanosleep calls */ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) { enum alarmtimer_type type = restart->nanosleep.clockid; ktime_t exp; struct timespec __user *rmtp; struct alarm alarm; int ret = 0; exp.tv64 = restart->nanosleep.expires; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); if (alarmtimer_do_nsleep(&alarm, exp)) goto out; if (freezing(current)) alarmtimer_freezerset(exp, type); rmtp = restart->nanosleep.rmtp; if (rmtp) { ret = update_rmtp(exp, type, rmtp); if (ret <= 0) goto out; } /* The other values in restart are already filled in */ ret = -ERESTART_RESTARTBLOCK; out: return ret; }
/* Trigger work thread*/ static void max3107_dowork(struct max3107_port *s) { if (!work_pending(&s->work) && !freezing(current) && !s->suspended) queue_work(s->workqueue, &s->work); else dev_warn(&s->spi->dev, "interrup isn't serviced normally!\n"); }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; /* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) return false; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
static int monitor_task(void *arg) { struct thermostat* th = arg; while(!kthread_should_stop()) { if (unlikely(freezing(current))) refrigerator(); msleep_interruptible(2000); #ifdef DEBUG DumpTachoRegisters(); #endif read_sensors(th); update_fan_speed(th); #ifdef DEBUG /* be carefule with the stats displayed. The Fan Counter value depends * on what value is written in the register during the read sensors * call. If its in temperature read setting, the fan counter and hence * the rpm will be WRONG */ display_stats(th); #endif } return 0; }
/* * wait_for_stat wait for a TPM_STS value * @param: chip, the tpm chip description * @param: mask, the value mask to wait * @param: timeout, the timeout * @param: queue, the wait queue. * @param: check_cancel, does the command can be cancelled ? * @return: the tpm status, 0 if success, -ETIME if timeout is reached. */ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { unsigned long stop; int ret; bool canceled = false; bool condition; u32 cur_intrs; u8 interrupt, status; struct tpm_stm_dev *tpm_dev; tpm_dev = (struct tpm_stm_dev *)TPM_VPRIV(chip); /* check current status */ status = tpm_stm_i2c_status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->vendor.irq) { cur_intrs = tpm_dev->intrs; interrupt = clear_interruption(tpm_dev); enable_irq(chip->vendor.irq); again: timeout = stop - jiffies; if ((long) timeout <= 0) return -1; ret = wait_event_interruptible_timeout(*queue, cur_intrs != tpm_dev->intrs, timeout); interrupt |= clear_interruption(tpm_dev); status = interrupt_to_status(interrupt); condition = wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled); if (ret >= 0 && condition) { if (canceled) return -ECANCELED; return 0; } if (ret == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } disable_irq_nosync(chip->vendor.irq); } else { do { msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; } /* wait_for_stat() */
/* Refrigerator is place where frozen processes are stored :-). */ void refrigerator(void) { /* Hmm, should we be allowed to suspend when there are realtime processes around? */ long save; task_lock(current); if (freezing(current)) { frozen_process(); task_unlock(current); } else { task_unlock(current); return; } save = current->state; pr_debug("%s entered refrigerator\n", current->comm); spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); /* We sent fake signal, clean it up */ spin_unlock_irq(¤t->sighand->siglock); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!frozen(current)) break; schedule(); } pr_debug("%s left refrigerator\n", current->comm); __set_current_state(save); }
static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); wait_event_interruptible(vb->config_change, (diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); /* * For large balloon changes, we could spend a lot of time * and always have work to do. Be nice if preempt disabled. */ cond_resched(); } return 0; }
/* 0 = success, else # of processes that we failed to stop */ int freeze_processes(void) { int todo; unsigned long start_time; struct task_struct *g, *p; unsigned long flags; printk( "Stopping tasks: " ); start_time = jiffies; do { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (!freezeable(p)) continue; if (frozen(p)) continue; freeze(p); spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, 0); spin_unlock_irqrestore(&p->sighand->siglock, flags); todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); yield(); /* Yield is okay here */ if (todo && time_after(jiffies, start_time + TIMEOUT)) { printk( "\n" ); printk(KERN_ERR " stopping tasks failed (%d tasks remaining)\n", todo ); break; } } while(todo); /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if (todo) { read_lock(&tasklist_lock); do_each_thread(g, p) if (freezing(p)) { pr_debug(" clean up: %s\n", p->comm); p->flags &= ~PF_FREEZE; spin_lock_irqsave(&p->sighand->siglock, flags); recalc_sigpending_tsk(p); spin_unlock_irqrestore(&p->sighand->siglock, flags); } while_each_thread(g, p); read_unlock(&tasklist_lock); return todo; } printk( "|\n" ); BUG_ON(in_atomic()); return 0; }
static inline void freeze_process(struct task_struct *p) { unsigned long flags; if (!freezing(p)) { freeze(p); spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, 0); spin_unlock_irqrestore(&p->sighand->siglock, flags); } }
void cancel_freezing(struct task_struct *p) { unsigned long flags; if (freezing(p)) { pr_debug(" clean up: %s\n", p->comm); clear_freeze_flag(p); spin_lock_irqsave(&p->sighand->siglock, flags); recalc_sigpending_and_wake(p); spin_unlock_irqrestore(&p->sighand->siglock, flags); } }
/** * alarm_timer_nsleep - alarmtimer nanosleep * @which_clock: clockid * @flags: determins abstime or relative * @tsreq: requested sleep time (abs or rel) * @rmtp: remaining sleep time saved * * Handles clock_nanosleep calls against _ALARM clockids */ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, struct timespec *tsreq, struct timespec __user *rmtp) { enum alarmtimer_type type = clock2alarm(which_clock); struct alarm alarm; ktime_t exp; int ret = 0; struct restart_block *restart; if (!alarmtimer_get_rtcdev()) return -ENOTSUPP; if (!capable(CAP_WAKE_ALARM)) return -EPERM; alarm_init(&alarm, type, alarmtimer_nsleep_wakeup); exp = timespec_to_ktime(*tsreq); /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { ktime_t now = alarm_bases[type].gettime(); exp = ktime_add(now, exp); } if (alarmtimer_do_nsleep(&alarm, exp)) goto out; if (freezing(current)) alarmtimer_freezerset(exp, type); /* abs timers don't set remaining time or restart */ if (flags == TIMER_ABSTIME) { ret = -ERESTARTNOHAND; goto out; } if (rmtp) { ret = update_rmtp(exp, type, rmtp); if (ret <= 0) goto out; } restart = ¤t_thread_info()->restart_block; restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; restart->nanosleep.expires = exp.tv64; restart->nanosleep.rmtp = rmtp; ret = -ERESTART_RESTARTBLOCK; out: return ret; }
static void hkey_poll_stop(void) { if (hkey_poll_task) { if (frozen(hkey_poll_task) || freezing(hkey_poll_task)) thaw_process(hkey_poll_task); kthread_stop(hkey_poll_task); hkey_poll_task = NULL; mutex_lock(&hkey_poll_mutex); /* at this point, the thread did exit */ mutex_unlock(&hkey_poll_mutex); } }
/** * kthread_freezable_should_stop - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen * * kthread_should_stop() for freezable kthreads, which will enter * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly. */ bool kthread_freezable_should_stop(bool *was_frozen) { bool frozen = false; might_sleep(); if (unlikely(freezing(current))) frozen = __refrigerator(true); if (was_frozen) *was_frozen = frozen; return kthread_should_stop(); }
int gfs2_recoverd(void *data) { struct gfs2_sbd *sdp = data; unsigned long t; while (!kthread_should_stop()) { gfs2_check_journals(sdp); t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; if (freezing(current)) refrigerator(); schedule_timeout_interruptible(t); } return 0; }
static inline void freeze_process(struct task_struct *p) { unsigned long flags; if (!freezing(p)) { rmb(); if (!frozen(p)) { if (p->state == TASK_STOPPED) force_sig_specific(SIGSTOP, p); freeze(p); spin_lock_irqsave(&p->sighand->siglock, flags); signal_wake_up(p, p->state == TASK_STOPPED); spin_unlock_irqrestore(&p->sighand->siglock, flags); } } }
static void hkey_poll_stop(void) { if (hkey_poll_task) { if (frozen(hkey_poll_task) || freezing(hkey_poll_task)) #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) thaw_process(hkey_poll_task); #else wake_up_process(hkey_poll_task); #endif kthread_stop(hkey_poll_task); hkey_poll_task = NULL; mutex_lock(&hkey_poll_mutex); /* at this point, the thread did exit */ mutex_unlock(&hkey_poll_mutex); } }
int gfs2_glockd(void *data) { struct gfs2_sbd *sdp = data; while (!kthread_should_stop()) { while (atomic_read(&sdp->sd_reclaim_count)) gfs2_reclaim_glock(sdp); wait_event_interruptible(sdp->sd_reclaim_wq, (atomic_read(&sdp->sd_reclaim_count) || kthread_should_stop())); if (freezing(current)) refrigerator(); } return 0; }
int testcase(int check_kthr_stop) { int was_frozen = 0; for (;;) { if (!freezing(cur) || (check_kthr_stop && blah())) cur->flags &= ~PF_FROZEN; if (!(cur->flags & PF_FROZEN)) break; was_frozen = 1; } return was_frozen; }
int gfs2_quotad(void *data) { struct gfs2_sbd *sdp = data; unsigned long t; int error; while (!kthread_should_stop()) { /* Update the master statfs file */ t = sdp->sd_statfs_sync_time + gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; if (time_after_eq(jiffies, t)) { error = gfs2_statfs_sync(sdp); if (error && error != -EROFS && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) fs_err(sdp, "quotad: (1) error=%d\n", error); sdp->sd_statfs_sync_time = jiffies; } /* Update quota file */ t = sdp->sd_quota_sync_time + gfs2_tune_get(sdp, gt_quota_quantum) * HZ; if (time_after_eq(jiffies, t)) { error = gfs2_quota_sync(sdp); if (error && error != -EROFS && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) fs_err(sdp, "quotad: (2) error=%d\n", error); sdp->sd_quota_sync_time = jiffies; } gfs2_quota_scan(sdp); t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ; if (freezing(current)) refrigerator(); schedule_timeout_interruptible(t); } return 0; }
/** * freeze_task - send a freeze request to given task * @p: task to send the request to * * If @p is freezing, the freeze request is sent either by sending a fake * signal (if it's not a kernel thread) or waking it up (if it's a kernel * thread). * * RETURNS: * %false, if @p is not freezing or already frozen; %true, otherwise */ bool freeze_task(struct task_struct *p) { unsigned long flags; spin_lock_irqsave(&freezer_lock, flags); if (!freezing(p) || frozen(p)) { spin_unlock_irqrestore(&freezer_lock, flags); return false; } if (!(p->flags & PF_KTHREAD)) fake_signal_wake_up(p); else wake_up_state(p, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&freezer_lock, flags); return true; }
static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, wait_queue_head_t *queue, bool check_cancel) { unsigned long stop; long rc; u8 status; bool canceled = false; /* check current status */ status = chip->ops->status(chip); if ((status & mask) == mask) return 0; stop = jiffies + timeout; if (chip->flags & TPM_CHIP_FLAG_IRQ) { again: timeout = stop - jiffies; if ((long)timeout <= 0) return -ETIME; rc = wait_event_interruptible_timeout(*queue, wait_for_tpm_stat_cond(chip, mask, check_cancel, &canceled), timeout); if (rc > 0) { if (canceled) return -ECANCELED; return 0; } if (rc == -ERESTARTSYS && freezing(current)) { clear_thread_flag(TIF_SIGPENDING); goto again; } } else { do { tpm_msleep(TPM_TIMEOUT); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; } while (time_before(jiffies, stop)); } return -ETIME; }
static void bu92747_irda_work(struct work_struct *w) { struct bu92747_port *s = container_of(w, struct bu92747_port, work); struct circ_buf *xmit = &s->port.state->xmit; dev_dbg(s->dev, "%s\n", __func__); BU92747_IRDA_DBG("line %d, enter %s \n", __LINE__, __FUNCTION__); if (!s->force_end_work && !freezing(current)) { //BU92725GUW_dump_register(); if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { if (s->tx_empty) bu92747_irda_do_tx(s); else bu92747_irda_dowork(s); } } }
/** * Cache management thread. * The thread loads and preprcess static Web content using inotify (TODO). */ static int tfw_cache_mgr(void *arg) { do { /* * TODO wait while the thread is propagating disk Web data * to the cache when the server starts. */ if (!freezing(current)) { set_current_state(TASK_INTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); } else try_to_freeze(); } while (!kthread_should_stop()); return 0; }