/* afs_osi_TimedSleep * * Arguments: * event - event to sleep on * ams --- max sleep time in milliseconds * aintok - 1 if should sleep interruptibly * * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might * have raced. */ int afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok) { int code = 0; long ticks = (ams * HZ / 1000) + 1; struct afs_event *evp; #ifdef DECLARE_WAITQUEUE DECLARE_WAITQUEUE(wait, current); #else struct wait_queue wait = { current, NULL }; #endif evp = afs_getevent(event); if (!evp) { afs_addevent(event); evp = afs_getevent(event); } add_wait_queue(&evp->cond, &wait); set_current_state(TASK_INTERRUPTIBLE); /* always sleep TASK_INTERRUPTIBLE to keep load average * from artifically increasing. */ AFS_GUNLOCK(); if (aintok) { if (schedule_timeout(ticks)) code = EINTR; } else schedule_timeout(ticks); #ifdef CONFIG_PM if ( #ifdef PF_FREEZE current->flags & PF_FREEZE #else #if defined(STRUCT_TASK_STRUCT_HAS_TODO) !current->todo #else #if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO) test_ti_thread_flag(current->thread_info, TIF_FREEZE) #else test_ti_thread_flag(task_thread_info(current), TIF_FREEZE) #endif #endif #endif ) #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE refrigerator(PF_FREEZE); #else refrigerator(); #endif #endif AFS_GLOCK(); remove_wait_queue(&evp->cond, &wait); set_current_state(TASK_RUNNING); relevent(evp); return code; }
/* afs_osi_SleepSig * * Waits for an event to be notified, returning early if a signal * is received. Returns EINTR if signaled, and 0 otherwise. */ int afs_osi_SleepSig(void *event) { struct afs_event *evp; int seq, retval; #ifdef DECLARE_WAITQUEUE DECLARE_WAITQUEUE(wait, current); #else struct wait_queue wait = { current, NULL }; #endif evp = afs_getevent(event); if (!evp) { afs_addevent(event); evp = afs_getevent(event); } seq = evp->seq; retval = 0; add_wait_queue(&evp->cond, &wait); while (seq == evp->seq) { set_current_state(TASK_INTERRUPTIBLE); AFS_ASSERT_GLOCK(); AFS_GUNLOCK(); schedule(); #ifdef CONFIG_PM if ( #ifdef PF_FREEZE current->flags & PF_FREEZE #else #if defined(STRUCT_TASK_STRUCT_HAS_TODO) !current->todo #else #if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO) test_ti_thread_flag(current->thread_info, TIF_FREEZE) #else test_ti_thread_flag(task_thread_info(current), TIF_FREEZE) #endif #endif #endif ) #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE refrigerator(PF_FREEZE); #else refrigerator(); #endif #endif AFS_GLOCK(); if (signal_pending(current)) { retval = EINTR; break; } } remove_wait_queue(&evp->cond, &wait); set_current_state(TASK_RUNNING); relevent(evp); return retval; }
STATIC int syncd(void *arg) { vfs_t *vfsp = (vfs_t *) arg; int error; daemonize("xfs_syncd"); vfsp->vfs_sync_task = current; wmb(); wake_up(&vfsp->vfs_wait_sync_task); for (;;) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(xfs_syncd_interval); /* swsusp */ if (current->flags & PF_FREEZE) refrigerator(PF_IOTHREAD); if (vfsp->vfs_flag & VFS_UMOUNT) break; if (vfsp->vfs_flag & VFS_RDONLY) continue; VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error); } vfsp->vfs_sync_task = NULL; wmb(); wake_up(&vfsp->vfs_wait_sync_task); return 0; }
static int monitor_task(void *arg) { struct thermostat* th = arg; while(!kthread_should_stop()) { if (unlikely(freezing(current))) refrigerator(); msleep_interruptible(2000); #ifdef DEBUG DumpTachoRegisters(); #endif read_sensors(th); update_fan_speed(th); #ifdef DEBUG /* be carefule with the stats displayed. The Fan Counter value depends * on what value is written in the register during the read sensors * call. If its in temperature read setting, the fan counter and hence * the rpm will be WRONG */ display_stats(th); #endif } return 0; }
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; struct k_sigaction ka; #ifdef CONFIG_PREEMPT_RT /* * Fully-preemptible kernel does not need interrupts disabled: */ local_irq_enable(); preempt_check_resched(); #endif /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &ka, &info, oldset, regs); return 1; } no_signal: /* Did we come from a system call? */ if (regs->tra >= 0) { /* Restart the system call - no handlers present */ if (regs->regs[0] == -ERESTARTNOHAND || regs->regs[0] == -ERESTARTSYS || regs->regs[0] == -ERESTARTNOINTR || regs->regs[0] == -ERESTART_RESTARTBLOCK) { regs->pc -= 2; } } return 0; }
int gfs2_recoverd(void *data) { struct gfs2_sbd *sdp = data; unsigned long t; while (!kthread_should_stop()) { gfs2_check_journals(sdp); t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; if (freezing(current)) refrigerator(); schedule_timeout_interruptible(t); } return 0; }
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ int do_signal(struct pt_regs *regs, sigset_t *oldset) { struct k_sigaction ka; siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { handle_signal(signr, &info, &ka, oldset, regs); return 1; } no_signal: /* Did we come from a system call? */ if (regs->syscallno >= 0) { /* Restart the system call - no handlers present */ if (regs->gr8 == -ERESTARTNOHAND || regs->gr8 == -ERESTARTSYS || regs->gr8 == -ERESTARTNOINTR) { regs->gr8 = regs->orig_gr8; regs->pc -= 4; } if (regs->gr8 == -ERESTART_RESTARTBLOCK){ regs->gr8 = __NR_restart_syscall; regs->pc -= 4; } } return 0; } /* end do_signal() */
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; struct k_sigaction ka; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if ((regs->ccr & 0x10)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } current->thread.esp0 = (unsigned long) regs; if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, &ka, oldset, regs); return 1; } no_signal: /* Did we come from a system call? */ if (regs->orig_er0 >= 0) { /* Restart the system call - no handlers present */ if (regs->er0 == -ERESTARTNOHAND || regs->er0 == -ERESTARTSYS || regs->er0 == -ERESTARTNOINTR) { regs->er0 = regs->orig_er0; regs->pc -= 2; } if (regs->er0 == -ERESTART_RESTARTBLOCK){ regs->er0 = __NR_restart_syscall; regs->pc -= 2; } } return 0; }
int gfs2_glockd(void *data) { struct gfs2_sbd *sdp = data; while (!kthread_should_stop()) { while (atomic_read(&sdp->sd_reclaim_count)) gfs2_reclaim_glock(sdp); wait_event_interruptible(sdp->sd_reclaim_wq, (atomic_read(&sdp->sd_reclaim_count) || kthread_should_stop())); if (freezing(current)) refrigerator(); } return 0; }
int gfs2_quotad(void *data) { struct gfs2_sbd *sdp = data; unsigned long t; int error; while (!kthread_should_stop()) { /* Update the master statfs file */ t = sdp->sd_statfs_sync_time + gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; if (time_after_eq(jiffies, t)) { error = gfs2_statfs_sync(sdp); if (error && error != -EROFS && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) fs_err(sdp, "quotad: (1) error=%d\n", error); sdp->sd_statfs_sync_time = jiffies; } /* Update quota file */ t = sdp->sd_quota_sync_time + gfs2_tune_get(sdp, gt_quota_quantum) * HZ; if (time_after_eq(jiffies, t)) { error = gfs2_quota_sync(sdp); if (error && error != -EROFS && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) fs_err(sdp, "quotad: (2) error=%d\n", error); sdp->sd_quota_sync_time = jiffies; } gfs2_quota_scan(sdp); t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ; if (freezing(current)) refrigerator(); schedule_timeout_interruptible(t); } return 0; }
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ handle_signal(signr, &info, oldset, regs); return 1; } no_signal: /* Did we come from a system call? */ if (regs->tra >= 0) { /* Restart the system call - no handlers present */ if (regs->regs[0] == -ERESTARTNOHAND || regs->regs[0] == -ERESTARTSYS || regs->regs[0] == -ERESTARTNOINTR || regs->regs[0] == -ERESTART_RESTARTBLOCK) { regs->pc -= 2; } } return 0; }
static int rpc_kernel_thread(void *p) { char readbuf[sizeof(RPC_STRUCT)+2*sizeof(unsigned long)]; RPC_KERN_Dev *dev; RPC_STRUCT *rpc; unsigned long *tmp; char name[20]; sprintf(name, "rpc-%d", (int)p); daemonize(name); dev = &rpc_kern_devices[(int)p]; while (1) { if (current->flags & PF_FREEZE) refrigerator(PF_FREEZE); // printk(" #@# wait %s %x %x \n", current->comm, dev, dev->waitQueue); if (wait_event_interruptible(dev->waitQueue, dev->ringIn != dev->ringOut) == -ERESTARTSYS) { printk("%s got signal...\n", current->comm); continue; } // printk(" #@# wakeup %s \n", current->comm); // read the reply data... if (rpc_kern_read(((int)p)/RPC_NR_PAIR, readbuf, sizeof(readbuf)) != sizeof(readbuf)) { printk("ERROR in read kernel RPC...\n"); continue; } // parse the reply data... rpc = (RPC_STRUCT *)readbuf; tmp = (unsigned long *)(readbuf+sizeof(RPC_STRUCT)); *((unsigned long *)ntohl(rpc->mycontext)) = ntohl(*(tmp+1)); *((unsigned long *)ntohl(*tmp)) = 1; // ack the sync... } return 0; }
static int hub_thread(void *__hub) { /* * This thread doesn't need any user-level access, * so get rid of all our resources */ daemonize("khubd"); allow_signal(SIGKILL); /* Send me a signal to get me die (for debugging) */ do { hub_events(); wait_event_interruptible(khubd_wait, !list_empty(&hub_event_list)); if (current->flags & PF_FREEZE) refrigerator(PF_IOTHREAD); } while (!signal_pending(current)); // dbg("hub_thread exiting"); complete_and_exit(&khubd_exited, 0); }
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) { siginfo_t info; int signr; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 0; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (current->ptrace & PT_SINGLESTEP) ptrace_cancel_bpt(current); signr = get_signal_to_deliver(&info, regs, NULL); if (signr > 0) { handle_signal(signr, &info, oldset, regs, syscall); if (current->ptrace & PT_SINGLESTEP) ptrace_set_bpt(current); return 1; } no_signal: /* * No signal to deliver to the process - restart the syscall. */ if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { if (thumb_mode(regs)) { regs->ARM_r7 = __NR_restart_syscall; regs->ARM_pc -= 2; } else { u32 __user *usp; regs->ARM_sp -= 12; usp = (u32 __user *)regs->ARM_sp; put_user(regs->ARM_pc, &usp[0]); /* swi __NR_restart_syscall */ put_user(0xef000000 | __NR_restart_syscall, &usp[1]); /* ldr pc, [sp], #12 */ put_user(0xe49df00c, &usp[2]); flush_icache_range((unsigned long)usp, (unsigned long)(usp + 3)); regs->ARM_pc = regs->ARM_sp + 4; } } if (regs->ARM_r0 == -ERESTARTNOHAND || regs->ARM_r0 == -ERESTARTSYS || regs->ARM_r0 == -ERESTARTNOINTR) { restart_syscall(regs); } } if (current->ptrace & PT_SINGLESTEP) ptrace_set_bpt(current); return 0; }
static inline void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { switch(regs->regs[0]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: regs->regs[2] = EINTR; break; case ERESTARTSYS: if(!(ka->sa.sa_flags & SA_RESTART)) { regs->regs[2] = EINTR; break; } /* fallthrough */ case ERESTARTNOINTR: /* Userland will reload $v0. */ regs->regs[7] = regs->regs[26]; regs->cp0_epc -= 8; } regs->regs[0] = 0; /* Don't deal with this again. */ #ifdef CONFIG_TRAD_SIGNALS if (ka->sa.sa_flags & SA_SIGINFO) { #else if (1) { #endif #ifdef CONFIG_MIPS32_N32 if ((current->thread.mflags & MF_ABI_MASK) == MF_N32) setup_rt_frame_n32 (ka, regs, sig, oldset, info); else #endif setup_rt_frame(ka, regs, sig, oldset, info); } #ifdef CONFIG_TRAD_SIGNALS else setup_frame(ka, regs, sig, oldset); #endif if (!(ka->sa.sa_flags & SA_NODEFER)) { spin_lock_irq(¤t->sighand->siglock); sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); } } extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs) { struct k_sigaction ka; siginfo_t info; int signr; #ifdef CONFIG_BINFMT_ELF32 if ((current->thread.mflags & MF_ABI_MASK) == MF_O32) { return do_signal32(oldset, regs); } #endif /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { handle_signal(signr, &info, &ka, oldset, regs); return 1; } no_signal: /* * Who's code doesn't conform to the restartable syscall convention * dies here!!! The li instruction, a single machine instruction, * must directly be followed by the syscall instruction. */ if (regs->regs[0]) { if (regs->regs[2] == ERESTARTNOHAND || regs->regs[2] == ERESTARTSYS || regs->regs[2] == ERESTARTNOINTR) { regs->regs[7] = regs->regs[26]; regs->cp0_epc -= 8; } if (regs->regs[2] == ERESTART_RESTARTBLOCK) { regs->regs[2] = __NR_restart_syscall; regs->regs[7] = regs->regs[26]; regs->cp0_epc -= 4; } } return 0; } extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs); /* * notification of userspace execution resumption * - triggered by current->work.notify_resume */ asmlinkage void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, __u32 thread_info_flags) { /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) { #ifdef CONFIG_BINFMT_ELF32 if (likely((current->thread.mflags & MF_ABI_MASK) == MF_O32)) { do_signal32(oldset, regs); return; } #endif #ifdef CONFIG_BINFMT_IRIX if (unlikely(current->personality != PER_LINUX)) { do_irix_signal(oldset, regs); return; } #endif do_signal(oldset, regs); } }
static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; daemonize("jffs2_gcd_mtd%d", c->mtd->index); allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGCONT); c->gc_task = current; up(&c->gc_thread_start); set_user_nice(current, 10); for (;;) { allow_signal(SIGHUP); if (!thread_should_wake(c)) { set_current_state (TASK_INTERRUPTIBLE); D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); /* Yes, there's a race here; we checked thread_should_wake() before setting current->state to TASK_INTERRUPTIBLE. But it doesn't matter - We don't care if we miss a wakeup, because the GC thread is only an optimisation anyway. */ schedule(); } if (current->flags & PF_FREEZE) { refrigerator(0); /* refrigerator() should recalc sigpending for us but doesn't. No matter - allow_signal() will. */ continue; } cond_resched(); /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current)) { siginfo_t info; unsigned long signr; signr = dequeue_signal_lock(current, ¤t->blocked, &info); switch(signr) { case SIGSTOP: D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); set_current_state(TASK_STOPPED); schedule(); break; case SIGKILL: D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); die: spin_lock(&c->erase_completion_lock); c->gc_task = NULL; spin_unlock(&c->erase_completion_lock); complete_and_exit(&c->gc_thread_exit, 0); case SIGHUP: D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); break; default: D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); } } /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ disallow_signal(SIGHUP); D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); if (jffs2_garbage_collect_pass(c) == -ENOSPC) { printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); goto die; } } }
/* * FIXME: use linux/kthread.h */ static int dvb_frontend_thread (void *data) { struct dvb_frontend *fe = (struct dvb_frontend *) data; unsigned long timeout; char name [15]; int quality = 0, delay = 3*HZ; fe_status_t s; int check_wrapped = 0; dprintk ("%s\n", __FUNCTION__); snprintf (name, sizeof(name), "kdvb-fe-%i", fe->dvb->num); lock_kernel (); daemonize (name); sigfillset (¤t->blocked); unlock_kernel (); fe->status = 0; dvb_frontend_init (fe); fe->wakeup = 0; while (1) { up (&fe->sem); /* is locked when we enter the thread... */ timeout = wait_event_interruptible_timeout(fe->wait_queue, dvb_frontend_should_wakeup(fe), delay); if (0 != dvb_frontend_is_exiting (fe)) { /* got signal or quitting */ break; } if (current->flags & PF_FREEZE) refrigerator(PF_FREEZE); if (down_interruptible (&fe->sem)) break; /* if we've got no parameters, just keep idling */ if (fe->state & FESTATE_IDLE) { delay = 3*HZ; quality = 0; continue; } retune: /* get the frontend status */ if (fe->state & FESTATE_RETUNE) { s = 0; } else { if (fe->ops->read_status) fe->ops->read_status(fe, &s); if (s != fe->status) { dvb_frontend_add_event (fe, s); fe->status = s; } } /* if we're not tuned, and we have a lock, move to the TUNED state */ if ((fe->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) { update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK); fe->state = FESTATE_TUNED; /* if we're tuned, then we have determined the correct inversion */ if ((!(fe->ops->info.caps & FE_CAN_INVERSION_AUTO)) && (fe->parameters.inversion == INVERSION_AUTO)) { fe->parameters.inversion = fe->inversion; } continue; } /* if we are tuned already, check we're still locked */ if (fe->state & FESTATE_TUNED) { update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK); /* we're tuned, and the lock is still good... */ if (s & FE_HAS_LOCK) continue; else { /* if we _WERE_ tuned, but now don't have a lock, * need to zigzag */ fe->state = FESTATE_ZIGZAG_FAST; fe->started_auto_step = fe->auto_step; check_wrapped = 0; } } /* don't actually do anything if we're in the LOSTLOCK state, * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */ if ((fe->state & FESTATE_LOSTLOCK) && (fe->ops->info.caps & FE_CAN_RECOVER) && (fe->max_drift == 0)) { update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK); continue; } /* don't do anything if we're in the DISEQC state, since this * might be someone with a motorized dish controlled by DISEQC. * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */ if (fe->state & FESTATE_DISEQC) { update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK); continue; } /* if we're in the RETUNE state, set everything up for a brand * new scan, keeping the current inversion setting, as the next * tune is _very_ likely to require the same */ if (fe->state & FESTATE_RETUNE) { fe->lnb_drift = 0; fe->auto_step = 0; fe->auto_sub_step = 0; fe->started_auto_step = 0; check_wrapped = 0; } /* fast zigzag. */ if ((fe->state & FESTATE_SEARCHING_FAST) || (fe->state & FESTATE_RETUNE)) { delay = fe->min_delay; /* peform a tune */ if (dvb_frontend_autotune(fe, check_wrapped)) { /* OK, if we've run out of trials at the fast speed. * Drop back to slow for the _next_ attempt */ fe->state = FESTATE_SEARCHING_SLOW; fe->started_auto_step = fe->auto_step; continue; } check_wrapped = 1; /* if we've just retuned, enter the ZIGZAG_FAST state. * This ensures we cannot return from an * FE_SET_FRONTEND ioctl before the first frontend tune * occurs */ if (fe->state & FESTATE_RETUNE) { fe->state = FESTATE_TUNING_FAST; goto retune; } } /* slow zigzag */ if (fe->state & FESTATE_SEARCHING_SLOW) { update_delay(&quality, &delay, fe->min_delay, s & FE_HAS_LOCK); /* Note: don't bother checking for wrapping; we stay in this * state until we get a lock */ dvb_frontend_autotune(fe, 0); } } if (dvb_shutdown_timeout) { if (dvb_powerdown_on_sleep) if (fe->ops->set_voltage) fe->ops->set_voltage(fe, SEC_VOLTAGE_OFF); if (fe->ops->sleep) fe->ops->sleep(fe); } fe->thread_pid = 0; mb(); dvb_frontend_wakeup(fe); return 0; }
/* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ int do_signal(struct pt_regs *regs, sigset_t *oldset) { siginfo_t info; int signr; struct k_sigaction ka; unsigned short inst; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return 1; if (current->flags & PF_FREEZE) { refrigerator(0); goto no_signal; } if (!oldset) oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Reenable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. */ /* Whee! Actually deliver the signal. */ handle_signal(signr, &ka, &info, oldset, regs); return 1; } no_signal: /* Did we come from a system call? */ if (regs->syscall_nr >= 0) { /* Restart the system call - no handlers present */ if (regs->r0 == -ERESTARTNOHAND || regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) { regs->r0 = regs->orig_r0; inst = *(unsigned short *)(regs->bpc - 2); if ((inst & 0xfff0) == 0x10f0) /* trap ? */ regs->bpc -= 2; else regs->bpc -= 4; } if (regs->r0 == -ERESTART_RESTARTBLOCK){ regs->r0 = regs->orig_r0; regs->r7 = __NR_restart_syscall; inst = *(unsigned short *)(regs->bpc - 2); if ((inst & 0xfff0) == 0x10f0) /* trap ? */ regs->bpc -= 2; else regs->bpc -= 4; } } return 0; }
/* CV_WAIT and CV_TIMEDWAIT sleep until the specified event occurs, or, in the * case of CV_TIMEDWAIT, until the specified timeout occurs. * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE * can wake up, even if all signals are blocked * - TODO: handle signals correctly by passing an indication back to the * caller that the wait has been interrupted and the stack should be cleaned * up preparatory to signal delivery */ int afs_cv_wait(afs_kcondvar_t * cv, afs_kmutex_t * l, int sigok) { int seq, isAFSGlocked = ISAFS_GLOCK(); sigset_t saved_set; #ifdef DECLARE_WAITQUEUE DECLARE_WAITQUEUE(wait, current); #else struct wait_queue wait = { current, NULL }; #endif sigemptyset(&saved_set); seq = cv->seq; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&cv->waitq, &wait); if (isAFSGlocked) AFS_GUNLOCK(); MUTEX_EXIT(l); if (!sigok) { SIG_LOCK(current); saved_set = current->blocked; sigfillset(¤t->blocked); RECALC_SIGPENDING(current); SIG_UNLOCK(current); } while(seq == cv->seq) { schedule(); #ifdef AFS_LINUX26_ENV #ifdef CONFIG_PM if ( #ifdef PF_FREEZE current->flags & PF_FREEZE #else #if defined(STRUCT_TASK_STRUCT_HAS_TODO) !current->todo #else #if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO) test_ti_thread_flag(current->thread_info, TIF_FREEZE) #else test_ti_thread_flag(task_thread_info(current), TIF_FREEZE) #endif #endif #endif ) #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE refrigerator(PF_FREEZE); #else refrigerator(); #endif set_current_state(TASK_INTERRUPTIBLE); #endif #endif } remove_wait_queue(&cv->waitq, &wait); set_current_state(TASK_RUNNING); if (!sigok) { SIG_LOCK(current); current->blocked = saved_set; RECALC_SIGPENDING(current); SIG_UNLOCK(current); } if (isAFSGlocked) AFS_GLOCK(); MUTEX_ENTER(l); return (sigok && signal_pending(current)) ? EINTR : 0; }
/* * FIXME: use linux/kthread.h */ static int dvb_frontend_thread(void *data) { struct dvb_frontend *fe = data; struct dvb_frontend_private *fepriv = fe->frontend_priv; unsigned long timeout; char name [15]; int quality = 0, delay = 3*HZ; fe_status_t s; int check_wrapped = 0; dprintk("%s\n", __FUNCTION__); snprintf (name, sizeof(name), "kdvb-fe-%i", fe->dvb->num); lock_kernel(); daemonize(name); sigfillset(¤t->blocked); unlock_kernel(); fepriv->status = 0; dvb_frontend_init(fe); fepriv->wakeup = 0; while (1) { up(&fepriv->sem); /* is locked when we enter the thread... */ timeout = wait_event_interruptible_timeout(fepriv->wait_queue, dvb_frontend_should_wakeup(fe), delay); if (0 != dvb_frontend_is_exiting(fe)) { /* got signal or quitting */ break; } if (current->flags & PF_FREEZE) refrigerator(PF_FREEZE); if (down_interruptible(&fepriv->sem)) break; /* if we've got no parameters, just keep idling */ if (fepriv->state & FESTATE_IDLE) { delay = 3*HZ; quality = 0; continue; } /* get the frontend status */ if (fepriv->state & FESTATE_RETUNE) { s = 0; } else { if (fe->ops->read_status) fe->ops->read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); fepriv->status = s; } } /* if we're not tuned, and we have a lock, move to the TUNED state */ if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) { update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK); fepriv->state = FESTATE_TUNED; /* if we're tuned, then we have determined the correct inversion */ if ((!(fe->ops->info.caps & FE_CAN_INVERSION_AUTO)) && (fepriv->parameters.inversion == INVERSION_AUTO)) { fepriv->parameters.inversion = fepriv->inversion; } continue; } /* if we are tuned already, check we're still locked */ if (fepriv->state & FESTATE_TUNED) { update_delay(&quality, &delay, fepriv->min_delay, s & FE_HAS_LOCK); /* we're tuned, and the lock is still good... */ if (s & FE_HAS_LOCK){ delay = HZ >> 1; /* kevin_add for speed up update speed */ continue; } else {