/* * This is the NFSv4 callback kernel thread. */ static int nfs4_callback_svc(void *vrqstp) { int err, preverr = 0; struct svc_rqst *rqstp = vrqstp; set_freezable(); while (!kthread_should_stop()) { /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "NFS: %s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_uninterruptible(HZ); continue; } preverr = err; svc_process(rqstp); } return 0; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0; struct svc_rqst *rqstp = vrqstp; struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); return 0; }
/* * This is the callback kernel thread. */ static void nfs_callback_svc(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err; __module_get(THIS_MODULE); lock_kernel(); nfs_callback_info.pid = current->pid; daemonize("nfsv4-svc"); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); complete(&nfs_callback_info.started); for(;;) { if (signalled()) { if (nfs_callback_info.users == 0) break; flush_signals(current); } /* * Listen for a request on the socket */ err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "%s: terminating on error %d\n", __FUNCTION__, -err); break; } dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__, NIPQUAD(rqstp->rq_addr.sin_addr.s_addr)); svc_process(serv, rqstp); } flush_signals(current); svc_exit_thread(rqstp); nfs_callback_info.pid = 0; complete(&nfs_callback_info.stopped); unlock_kernel(); module_put_and_exit(0); }
/* * This is the NFSv4 callback kernel thread. */ static int nfs4_callback_svc(void *vrqstp) { int err; struct svc_rqst *rqstp = vrqstp; set_freezable(); while (!kthread_should_stop()) { /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; svc_process(rqstp); } return 0; }
/* * This is the callback kernel thread. */ static int nfs_callback_svc(void *vrqstp) { int err, preverr = 0; struct svc_rqst *rqstp = vrqstp; set_freezable(); /* * FIXME: do we really need to run this under the BKL? If so, please * add a comment about what it's intended to protect. */ lock_kernel(); while (!kthread_should_stop()) { /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_uninterruptible(HZ); continue; } preverr = err; svc_process(rqstp); } unlock_kernel(); return 0; }
/* * This is the NFSv4 callback kernel thread. */ static int nfs4_callback_svc(void *vrqstp) { int err; struct svc_rqst *rqstp = vrqstp; set_freezable(); while (!kthread_freezable_should_stop(NULL)) { if (signal_pending(current)) flush_signals(current); /* * Listen for a request on the socket */ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; svc_process(rqstp); } svc_exit_thread(rqstp); module_put_and_exit(0); return 0; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * FIXME: it would be nice if lockd didn't spend its entire life * running under the BKL. At the very least, it would be good to * have someone clarify what it's intended to protect here. I've * seen some handwavy posts about posix locking needing to be * done under the BKL, but it's far from clear. */ lock_kernel(); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); unlock_kernel(); return 0; }
/* * This is the lockd kernel thread */ static void lockd(struct svc_rqst *rqstp) { int err = 0; unsigned long grace_period_expire; /* Lock module and set up kernel thread */ /* lockd_up is waiting for us to startup, so will * be holding a reference to this module, so it * is safe to just claim another reference */ __module_get(THIS_MODULE); lock_kernel(); /* * Let our maker know we're running. */ nlmsvc_pid = current->pid; nlmsvc_serv = rqstp->rq_server; complete(&lockd_start_done); daemonize("lockd"); set_freezable(); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; grace_period_expire = set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away, and we've been sent a * signal, or else another process has taken over our job. */ while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { long timeout = MAX_SCHEDULE_TIMEOUT; char buf[RPC_MAX_ADDRBUFLEN]; if (signalled()) { flush_signals(current); if (nlmsvc_ops) { nlmsvc_invalidate_all(); grace_period_expire = set_grace_period(); } } /* * Retry any blocked locks that have been notified by * the VFS. Don't do this during grace period. * (Theoretically, there shouldn't even be blocked locks * during grace period). */ if (!nlmsvc_grace_period) { timeout = nlmsvc_retry_blocked(); } else if (time_before(grace_period_expire, jiffies)) clear_grace_period(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "lockd: terminating on error %d\n", -err); break; } dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); /* * Check whether there's a new lockd process before * shutting down the hosts and clearing the slot. */ if (!nlmsvc_pid || current->pid == nlmsvc_pid) { if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); nlmsvc_pid = 0; nlmsvc_serv = NULL; } else printk(KERN_DEBUG "lockd: new process, skipping host shutdown\n"); wake_up(&lockd_exit); /* Exit the RPC thread */ svc_exit_thread(rqstp); /* Release module */ unlock_kernel(); module_put_and_exit(0); }
/* * This is the lockd kernel thread */ static void lockd(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err = 0; unsigned long grace_period_expire; /* Lock module and set up kernel thread */ MOD_INC_USE_COUNT; lock_kernel(); /* * Let our maker know we're running. */ nlmsvc_pid = current->pid; up(&lockd_start); daemonize(); reparent_to_init(); sprintf(current->comm, "lockd"); /* Process request with signals blocked. */ spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); /* kick rpciod */ rpciod_up(); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; grace_period_expire = set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away, and we've been sent a * signal, or else another process has taken over our job. */ while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { long timeout = MAX_SCHEDULE_TIMEOUT; if (signalled()) { spin_lock_irq(¤t->sighand->siglock); flush_signals(current); spin_unlock_irq(¤t->sighand->siglock); if (nlmsvc_ops) { nlmsvc_ops->detach(); grace_period_expire = set_grace_period(); } } /* * Retry any blocked locks that have been notified by * the VFS. Don't do this during grace period. * (Theoretically, there shouldn't even be blocked locks * during grace period). */ if (!nlmsvc_grace_period) timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(serv, rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "lockd: terminating on error %d\n", -err); break; } dprintk("lockd: request from %08x\n", (unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr)); /* * Look up the NFS client handle. The handle is needed for * all but the GRANTED callback RPCs. */ rqstp->rq_client = NULL; if (nlmsvc_ops) { nlmsvc_ops->exp_readlock(); rqstp->rq_client = nlmsvc_ops->exp_getclient(&rqstp->rq_addr); } if (nlmsvc_grace_period && time_before(grace_period_expire, jiffies)) nlmsvc_grace_period = 0; svc_process(serv, rqstp); /* Unlock export hash tables */ if (nlmsvc_ops) nlmsvc_ops->exp_unlock(); } /* * Check whether there's a new lockd process before * shutting down the hosts and clearing the slot. */ if (!nlmsvc_pid || current->pid == nlmsvc_pid) { if (nlmsvc_ops) nlmsvc_ops->detach(); nlm_shutdown_hosts(); nlmsvc_pid = 0; } else printk(KERN_DEBUG "lockd: new process, skipping host shutdown\n"); wake_up(&lockd_exit); /* Exit the RPC thread */ svc_exit_thread(rqstp); /* release rpciod */ rpciod_down(); /* Release module */ MOD_DEC_USE_COUNT; }
static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; set_freezable(); allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; set_grace_period(); while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); return 0; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); return 0; }
/* * This is the NFS server kernel thread */ static void nfsd(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct fs_struct *fsp; int err; struct nfsd_list me; sigset_t shutdown_mask, allowed_mask; /* Lock module and set up kernel thread */ lock_kernel(); daemonize("nfsd"); current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; /* After daemonize() this kernel thread shares current->fs * with the init process. We need to create files with a * umask of 0 instead of init's umask. */ fsp = copy_fs_struct(current->fs); if (!fsp) { printk("Unable to start nfsd thread: out of memory\n"); goto out; } exit_fs(current); current->fs = fsp; current->fs->umask = 0; siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS); siginitsetinv(&allowed_mask, ALLOWED_SIGS); nfsdstats.th_cnt++; lockd_up(); /* start lockd */ me.task = current; list_add(&me.list, &nfsd_list); unlock_kernel(); /* * We want less throttling in balance_dirty_pages() so that nfs to * localhost doesn't cause nfsd to lock up due to all the client's * dirty pages. */ current->flags |= PF_LESS_THROTTLE; /* * The main request loop */ for (;;) { /* Block all but the shutdown signals */ sigprocmask(SIG_SETMASK, &shutdown_mask, NULL); /* * Find a socket with data available and call its * recvfrom routine. */ while ((err = svc_recv(serv, rqstp, 60*60*HZ)) == -EAGAIN) ; if (err < 0) break; update_thread_usage(atomic_read(&nfsd_busy)); atomic_inc(&nfsd_busy); /* Lock the export hash tables for reading. */ exp_readlock(); /* Process request with signals blocked. */ sigprocmask(SIG_SETMASK, &allowed_mask, NULL); svc_process(serv, rqstp); /* Unlock export hash tables */ exp_readunlock(); update_thread_usage(atomic_read(&nfsd_busy)); atomic_dec(&nfsd_busy); } if (err != -EINTR) { printk(KERN_WARNING "nfsd: terminating on error %d\n", -err); } else { unsigned int signo; for (signo = 1; signo <= _NSIG; signo++) if (sigismember(¤t->pending.signal, signo) && !sigismember(¤t->blocked, signo)) break; err = signo; } lock_kernel(); /* Release lockd */ lockd_down(); /* Check if this is last thread */ if (serv->sv_nrthreads==1) { printk(KERN_WARNING "nfsd: last server has exited\n"); if (err != SIG_NOCLEAN) { printk(KERN_WARNING "nfsd: unexporting all filesystems\n"); nfsd_export_flush(); } nfsd_serv = NULL; nfsd_racache_shutdown(); /* release read-ahead cache */ nfs4_state_shutdown(); } list_del(&me.list); nfsdstats.th_cnt --; out: /* Release the thread */ svc_exit_thread(rqstp); /* Release module */ module_put_and_exit(0); }