static int nlm_wait_on_grace(wait_queue_head_t *queue) { DEFINE_WAIT(wait); int status = -EINTR; prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); if (!signalled ()) { schedule_timeout(NLMCLNT_GRACE_WAIT); if (!signalled ()) status = 0; } finish_wait(queue, &wait); return status; }
struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize) { struct smb_request *req = NULL; for (;;) { atomic_inc(&server->nr_requests); if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) { req = smb_do_alloc_request(server, bufsize); if (req != NULL) break; } #if 0 /* * Try to free up at least one request in order to stay * below the hard limit */ if (nfs_try_to_free_pages(server)) continue; if (signalled() && (server->flags & NFS_MOUNT_INTR)) return ERR_PTR(-ERESTARTSYS); current->policy = SCHED_YIELD; schedule(); #else /* FIXME: we want something like nfs does above, but that requires changes to all callers and can wait. */ break; #endif } return req; }
/* * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in rpc_task_release). */ static void call_allocate(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; unsigned int bufsiz; dprintk("RPC: %4d call_allocate (status %d)\n", task->tk_pid, task->tk_status); task->tk_action = call_encode; if (task->tk_buffer) return; /* FIXME: compute buffer requirements more exactly using * auth->au_wslack */ bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE; if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL) return; printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) { xprt_release(task); task->tk_action = call_reserve; rpc_delay(task, HZ>>4); return; }
/* * Allocate memory for RPC purpose. * * This is yet another tricky issue: For sync requests issued by * a user process, we want to make kmalloc sleep if there isn't * enough memory. Async requests should not sleep too excessively * because that will block rpciod (but that's not dramatic when * it's starved of memory anyway). Finally, swapout requests should * never sleep at all, and should not trigger another swap_out * request through kmalloc which would just increase memory contention. * * I hope the following gets it right, which gives async requests * a slight advantage over sync requests (good for writeback, debatable * for readahead): * * sync user requests: GFP_KERNEL * async requests: GFP_RPC (== GFP_NFS) * swap requests: GFP_ATOMIC (or new GFP_SWAPPER) */ void * rpc_allocate(unsigned int flags, unsigned int size) { u32 *buffer; int gfp; if (flags & RPC_TASK_SWAPPER) gfp = GFP_ATOMIC; else if (flags & RPC_TASK_ASYNC) gfp = GFP_RPC; else gfp = GFP_KERNEL; do { if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) { dprintk("RPC: allocated buffer %p\n", buffer); return buffer; } if ((flags & RPC_TASK_SWAPPER) && !swap_buffer_used++) { dprintk("RPC: used last-ditch swap buffer\n"); return swap_buffer; } if (flags & RPC_TASK_ASYNC) return NULL; current->state = TASK_INTERRUPTIBLE; schedule_timeout(HZ>>4); } while (!signalled()); return NULL; }
static int gssp_call(struct net *net, struct rpc_message *msg) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_clnt *clnt; int status; clnt = get_gssp_clnt(sn); if (!clnt) return -EIO; status = rpc_call_sync(clnt, msg, 0); if (status < 0) { dprintk("gssp: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: status = -EAGAIN; break; case -ERESTARTSYS: if (signalled ()) status = -EINTR; break; default: break; } } rpc_release_client(clnt); return status; }
/* * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in rpc_task_release). */ static void call_allocate(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; unsigned int bufsiz; dprintk("RPC: %4d call_allocate (status %d)\n", task->tk_pid, task->tk_status); task->tk_action = call_encode; if (task->tk_buffer) return; /* FIXME: compute buffer requirements more exactly using * auth->au_wslack */ bufsiz = rpcproc_bufsiz(clnt, task->tk_proc) + RPC_SLACK_SPACE; if ((task->tk_buffer = rpc_malloc(task, bufsiz)) != NULL) return; printk("RPC: buffer allocation failed for task %p\n", task); if (!signalled()) { xprt_release(task); task->tk_action = call_reserve; rpc_delay(task, HZ); return; } rpc_exit(task, -ERESTARTSYS); }
bool K3Process::coreDumped() const { #ifdef WCOREDUMP return signalled() && WCOREDUMP(status); #else return false; #endif }
/* * Wait while server is in grace period */ static inline int nlmclnt_grace_wait(struct nlm_host *host) { if (!host->h_reclaiming) interruptible_sleep_on_timeout(&host->h_gracewait, 10*HZ); else interruptible_sleep_on(&host->h_gracewait); return signalled()? -ERESTARTSYS : 0; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0; struct svc_rqst *rqstp = vrqstp; struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); cancel_delayed_work_sync(&ln->grace_period_end); locks_end_grace(&ln->lockd_manager); return 0; }
/* * Block on a lock */ int nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp) { struct nlm_wait block, **head; int err; u32 pstate; block.b_host = host; block.b_lock = fl; init_waitqueue_head(&block.b_wait); block.b_status = NLM_LCK_BLOCKED; block.b_next = nlm_blocked; nlm_blocked = █ /* Remember pseudo nsm state */ pstate = host->h_state; /* Go to sleep waiting for GRANT callback. Some servers seem * to lose callbacks, however, so we're going to poll from * time to time just to make sure. * * For now, the retry frequency is pretty high; normally * a 1 minute timeout would do. See the comment before * nlmclnt_lock for an explanation. */ sleep_on_timeout(&block.b_wait, 30*HZ); for (head = &nlm_blocked; *head; head = &(*head)->b_next) { if (*head == &block) { *head = block.b_next; break; } } if (!signalled()) { *statp = block.b_status; return 0; } /* Okay, we were interrupted. Cancel the pending request * unless the server has rebooted. */ if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0) printk(KERN_NOTICE "lockd: CANCEL call failed (errno %d)\n", -err); return -ERESTARTSYS; }
/* A wrapper to handle the EJUKEBOX error message */ static int nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) { sigset_t oldset; int res; rpc_clnt_sigmask(clnt, &oldset); do { res = rpc_call_sync(clnt, msg, flags); if (res != -EJUKEBOX) break; schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME); res = -ERESTARTSYS; } while (!signalled()); rpc_clnt_sigunmask(clnt, &oldset); return res; }
/* * Allocate an NLM RPC call struct */ struct nlm_rqst * nlmclnt_alloc_call(void) { struct nlm_rqst *call; while (!signalled()) { call = (struct nlm_rqst *) rpc_allocate(RPC_TASK_ASYNC, sizeof(struct nlm_rqst)); if (call) return call; printk("nlmclnt_alloc_call: failed, waiting for memory\n"); current->state = TASK_INTERRUPTIBLE; schedule_timeout(5*HZ); } return NULL; }
void rpciod_down(void) { unsigned long flags; MOD_INC_USE_COUNT; down(&rpciod_sema); dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users); if (rpciod_users) { if (--rpciod_users) goto out; } else printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid); if (!rpciod_pid) { dprintk("rpciod_down: Nothing to do!\n"); goto out; } kill_proc(rpciod_pid, SIGKILL, 1); /* * Usually rpciod will exit very quickly, so we * wait briefly before checking the process id. */ current->sigpending = 0; current->state = TASK_INTERRUPTIBLE; schedule_timeout(1); /* * Display a message if we're going to wait longer. */ while (rpciod_pid) { dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid); if (signalled()) { dprintk("rpciod_down: caught signal\n"); break; } interruptible_sleep_on(&rpciod_killer); } spin_lock_irqsave(¤t->sigmask_lock, flags); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags); out: up(&rpciod_sema); MOD_DEC_USE_COUNT; }
/* * This is the callback kernel thread. */ static void nfs_callback_svc(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err; __module_get(THIS_MODULE); lock_kernel(); nfs_callback_info.pid = current->pid; daemonize("nfsv4-svc"); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); complete(&nfs_callback_info.started); for(;;) { if (signalled()) { if (nfs_callback_info.users == 0) break; flush_signals(current); } /* * Listen for a request on the socket */ err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "%s: terminating on error %d\n", __FUNCTION__, -err); break; } dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__, NIPQUAD(rqstp->rq_addr.sin_addr.s_addr)); svc_process(serv, rqstp); } flush_signals(current); svc_exit_thread(rqstp); nfs_callback_info.pid = 0; complete(&nfs_callback_info.stopped); unlock_kernel(); module_put_and_exit(0); }
/* * Allocate an NLM RPC call struct */ struct nlm_rqst * nlmclnt_alloc_call(void) { struct nlm_rqst *call; while (!signalled()) { call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL); if (call) { memset(call, 0, sizeof(*call)); locks_init_lock(&call->a_args.lock.fl); locks_init_lock(&call->a_res.lock.fl); return call; } printk("nlmclnt_alloc_call: failed, waiting for memory\n"); current->state = TASK_INTERRUPTIBLE; schedule_timeout(5*HZ); } return NULL; }
/** * nfs_create_request - Create an NFS read/write request. * @file: file descriptor to use * @inode: inode to which the request is attached * @page: page to write * @offset: starting offset within the page for the write * @count: number of bytes to read/write * * The page must be locked by the caller. This makes sure we never * create two different requests for the same page, and avoids * a possible deadlock when we reach the hard limit on the number * of dirty pages. * User should ensure it is safe to sleep in this function. */ struct nfs_page * nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_page *req; /* Deal with hard limits. */ for (;;) { /* try to allocate the request struct */ req = nfs_page_alloc(); if (req != NULL) break; /* Try to free up at least one request in order to stay * below the hard limit */ if (signalled() && (server->flags & NFS_MOUNT_INTR)) return ERR_PTR(-ERESTARTSYS); yield(); } /* Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ req->wb_page = page; atomic_set(&req->wb_complete, 0); req->wb_index = page->index; page_cache_get(page); BUG_ON(PagePrivate(page)); BUG_ON(!PageLocked(page)); BUG_ON(page->mapping->host != inode); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; atomic_set(&req->wb_count, 1); req->wb_context = get_nfs_open_context(ctx); return req; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); return 0; }
/* * Generic NLM call */ int nlmclnt_call(struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; struct file *filp = argp->lock.fl.fl_file; struct rpc_message msg = { .rpc_argp = argp, .rpc_resp = resp, }; int status; dprintk("lockd: call procedure %d on %s\n", (int)proc, host->h_name); if (filp) msg.rpc_cred = nfs_file_cred(filp); do { if (host->h_reclaiming && !argp->reclaim) goto in_grace_period; /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; msg.rpc_proc = &clnt->cl_procinfo[proc]; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: nlm_rebind_host(host); status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } break; } else if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { if (!argp->reclaim) { /* We appear to be out of the grace period */ wake_up_all(&host->h_gracewait); } dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } in_grace_period: /* * The server has rebooted and appears to be in the grace * period during which locks are only allowed to be * reclaimed. * We can only back off and try again later. */ status = nlm_wait_on_grace(&host->h_gracewait); } while (status == 0); return status; }
/* * Generic NLM call */ int nlmclnt_call(struct nlm_rqst *req, u32 proc) { struct nlm_host *host = req->a_host; struct rpc_clnt *clnt; struct nlm_args *argp = &req->a_args; struct nlm_res *resp = &req->a_res; int status; dprintk("lockd: call procedure %s on %s\n", nlm_procname(proc), host->h_name); do { if (host->h_reclaiming && !argp->reclaim) { interruptible_sleep_on(&host->h_gracewait); continue; } /* If we have no RPC client yet, create one. */ if ((clnt = nlm_bind_host(host)) == NULL) return -ENOLCK; /* Perform the RPC call. If an error occurs, try again */ if ((status = rpc_call(clnt, proc, argp, resp, 0)) < 0) { dprintk("lockd: rpc_call returned error %d\n", -status); switch (status) { case -EPROTONOSUPPORT: status = -EINVAL; break; case -ECONNREFUSED: case -ETIMEDOUT: case -ENOTCONN: status = -EAGAIN; break; case -ERESTARTSYS: return signalled () ? -EINTR : status; default: break; } if (req->a_args.block) nlm_rebind_host(host); else break; } else if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) { dprintk("lockd: server in grace period\n"); if (argp->reclaim) { printk(KERN_WARNING "lockd: spurious grace period reject?!\n"); return -ENOLCK; } } else { dprintk("lockd: server returns status %d\n", resp->status); return 0; /* Okay, call complete */ } /* Back off a little and try again */ interruptible_sleep_on_timeout(&host->h_gracewait, 15*HZ); /* When the lock requested by F_SETLKW isn't available, we will wait until the request can be satisfied. If a signal is received during wait, we should return -EINTR. */ if (signalled ()) { status = -EINTR; break; } } while (1); return status; }
/* * Receive the next request on any socket. */ int svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) { struct svc_sock *svsk; int len; DECLARE_WAITQUEUE(wait, current); dprintk("svc: server %p waiting for data (to = %ld)\n", rqstp, timeout); if (rqstp->rq_sock) printk(KERN_ERR "svc_recv: service %p, socket not NULL!\n", rqstp); if (waitqueue_active(&rqstp->rq_wait)) printk(KERN_ERR "svc_recv: service %p, wait queue active!\n", rqstp); /* Initialize the buffers */ rqstp->rq_argbuf = rqstp->rq_defbuf; rqstp->rq_resbuf = rqstp->rq_defbuf; if (signalled()) return -EINTR; spin_lock_bh(&serv->sv_lock); if ((svsk = svc_sock_dequeue(serv)) != NULL) { rqstp->rq_sock = svsk; svsk->sk_inuse++; } else { /* No data pending. Go to sleep */ svc_serv_enqueue(serv, rqstp); /* * We have to be able to interrupt this wait * to bring down the daemons ... */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&rqstp->rq_wait, &wait); spin_unlock_bh(&serv->sv_lock); schedule_timeout(timeout); spin_lock_bh(&serv->sv_lock); remove_wait_queue(&rqstp->rq_wait, &wait); if (!(svsk = rqstp->rq_sock)) { svc_serv_dequeue(serv, rqstp); spin_unlock_bh(&serv->sv_lock); dprintk("svc: server %p, no data yet\n", rqstp); return signalled()? -EINTR : -EAGAIN; } } spin_unlock_bh(&serv->sv_lock); dprintk("svc: server %p, socket %p, inuse=%d\n", rqstp, svsk, svsk->sk_inuse); len = svsk->sk_recvfrom(rqstp); dprintk("svc: got len=%d\n", len); /* No data, incomplete (TCP) read, or accept() */ if (len == 0 || len == -EAGAIN) { svc_sock_release(rqstp); return -EAGAIN; } rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; rqstp->rq_userset = 0; rqstp->rq_verfed = 0; svc_getlong(&rqstp->rq_argbuf, rqstp->rq_xid); svc_putlong(&rqstp->rq_resbuf, rqstp->rq_xid); /* Assume that the reply consists of a single buffer. */ rqstp->rq_resbuf.nriov = 1; if (serv->sv_stats) serv->sv_stats->netcnt++; return len; }
int main(int argc, char **argv) { OPTIONS.verbose = 0; OPTIONS.endpoint = strdup("tcp://127.0.0.1:2997"); OPTIONS.daemonize = 1; OPTIONS.pidfile = strdup("/var/run/" ME ".pid"); OPTIONS.user = strdup("root"); OPTIONS.group = strdup("root"); OPTIONS.match = strdup("."); OPTIONS.avatar = strdup(":robot_face:"); OPTIONS.username = strdup("bolo"); struct option long_opts[] = { { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, 'V' }, { "verbose", no_argument, NULL, 'v' }, { "endpoint", required_argument, NULL, 'e' }, { "foreground", no_argument, NULL, 'F' }, { "pidfile", required_argument, NULL, 'p' }, { "user", required_argument, NULL, 'u' }, { "group", required_argument, NULL, 'g' }, { "match", required_argument, NULL, 'm' }, { "webhook", required_argument, NULL, 'U' }, { "channel", required_argument, NULL, 'C' }, { "botname", required_argument, NULL, 'N' }, { "avatar", required_argument, NULL, 'A' }, { 0, 0, 0, 0 }, }; for (;;) { int idx = 1; int c = getopt_long(argc, argv, "h?Vv+e:Fp:u:g:m:U:C:N:A:", long_opts, &idx); if (c == -1) break; switch (c) { case 'h': case '?': printf(ME " v%s\n", BOLO_VERSION); printf("Usage: " ME " [-h?FVv] [-e tcp://host:port]\n" " [-l level]\n" " [-u user] [-g group] [-p /path/to/pidfile]\n\n"); printf("Options:\n"); printf(" -?, -h show this help screen\n"); printf(" -F, --foreground don't daemonize, stay in the foreground\n"); printf(" -V, --version show version information and exit\n"); printf(" -v, --verbose turn on debugging, to standard error\n"); printf(" -e, --endpoint bolo broadcast endpoint to connect to\n"); printf(" -u, --user user to run as (if daemonized)\n"); printf(" -g, --group group to run as (if daemonized)\n"); printf(" -p, --pidfile where to store the pidfile (if daemonized)\n"); printf(" -U, --webhook Slack webhook URL for integration\n"); printf(" -C, --channel channel (#channel or @user) to notify\n"); printf(" -N, --botname name to use for the notification robot\n"); printf(" -A, --avatar avatar image to use (either :emoji: or a URL)\n"); exit(0); case 'V': logger(LOG_DEBUG, "handling -V/--version"); printf(ME " v%s\n" "Copyright (c) 2016 The Bolo Authors. All Rights Reserved.\n", BOLO_VERSION); exit(0); case 'v': OPTIONS.verbose++; break; case 'e': free(OPTIONS.endpoint); OPTIONS.endpoint = strdup(optarg); break; case 'F': OPTIONS.daemonize = 0; break; case 'p': free(OPTIONS.pidfile); OPTIONS.pidfile = strdup(optarg); break; case 'u': free(OPTIONS.user); OPTIONS.user = strdup(optarg); break; case 'g': free(OPTIONS.group); OPTIONS.group = strdup(optarg); break; case 'm': free(OPTIONS.match); OPTIONS.match = strdup(optarg); break; case 'U': free(OPTIONS.webhook); OPTIONS.webhook = strdup(optarg); break; case 'C': free(OPTIONS.channel); OPTIONS.channel = strdup(optarg); break; case 'N': free(OPTIONS.username); OPTIONS.username = strdup(optarg); break; case 'A': free(OPTIONS.avatar); OPTIONS.avatar = strdup(optarg); break; default: fprintf(stderr, "unhandled option flag %#02x\n", c); return 1; } } if (!OPTIONS.channel) { fprintf(stderr, "Missing required --channel flag.\n"); return 1; } if (!OPTIONS.webhook) { fprintf(stderr, "Missing required --webhook flag.\n"); return 1; } if (OPTIONS.daemonize) { log_open(ME, "daemon"); log_level(LOG_ERR + OPTIONS.verbose, NULL); mode_t um = umask(0); if (daemonize(OPTIONS.pidfile, OPTIONS.user, OPTIONS.group) != 0) { fprintf(stderr, "daemonization failed: (%i) %s\n", errno, strerror(errno)); return 3; } umask(um); } else { log_open(ME, "console"); log_level(LOG_INFO + OPTIONS.verbose, NULL); } logger(LOG_NOTICE, "starting up"); const char *re_err; int re_off; OPTIONS.re = pcre_compile(OPTIONS.match, 0, &re_err, &re_off, NULL); if (!OPTIONS.re) { fprintf(stderr, "Bad --match pattern (%s): %s\n", OPTIONS.match, re_err); exit(1); } OPTIONS.re_extra = pcre_study(OPTIONS.re, 0, &re_err); logger(LOG_DEBUG, "initializing curl subsystem"); OPTIONS.curl = curl_easy_init(); if (!OPTIONS.curl) { logger(LOG_ERR, "failed to initialize curl subsystem"); return 3; } logger(LOG_DEBUG, "allocating 0MQ context"); void *zmq = zmq_ctx_new(); if (!zmq) { logger(LOG_ERR, "failed to initialize 0MQ context"); return 3; } logger(LOG_DEBUG, "allocating 0MQ SUB socket to talk to %s", OPTIONS.endpoint); void *z = zmq_socket(zmq, ZMQ_SUB); if (!z) { logger(LOG_ERR, "failed to create a SUB socket"); return 3; } logger(LOG_DEBUG, "setting subscriber filter"); if (zmq_setsockopt(z, ZMQ_SUBSCRIBE, "", 0) != 0) { logger(LOG_ERR, "failed to set subscriber filter"); return 3; } logger(LOG_DEBUG, "connecting to %s", OPTIONS.endpoint); if (vzmq_connect(z, OPTIONS.endpoint) != 0) { logger(LOG_ERR, "failed to connect to %s", OPTIONS.endpoint); return 3; } pdu_t *p; logger(LOG_INFO, "waiting for a PDU from %s", OPTIONS.endpoint); signal_handlers(); while (!signalled()) { while ((p = pdu_recv(z))) { logger(LOG_INFO, "received a [%s] PDU of %i frames", pdu_type(p), pdu_size(p)); if (strcmp(pdu_type(p), "TRANSITION") == 0 && pdu_size(p) == 6) { s_notify(p); } pdu_free(p); logger(LOG_INFO, "waiting for a PDU from %s", OPTIONS.endpoint); } } logger(LOG_INFO, "shutting down"); vzmq_shutdown(z, 0); zmq_ctx_destroy(zmq); return 0; }
/* * This is the RPC `scheduler' (or rather, the finite state machine). */ static int __rpc_execute(struct rpc_task *task) { unsigned long oldflags; int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", task->tk_pid, task->tk_flags); if (!RPC_IS_RUNNING(task)) { printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n"); return 0; } while (1) { /* * Execute any pending callback. */ if (task->tk_flags & RPC_TASK_CALLBACK) { /* Define a callback save pointer */ void (*save_callback)(struct rpc_task *); task->tk_flags &= ~RPC_TASK_CALLBACK; /* * If a callback exists, save it, reset it, * call it. * The save is needed to stop from resetting * another callback set within the callback handler * - Dave */ if (task->tk_callback) { save_callback=task->tk_callback; task->tk_callback=NULL; save_callback(task); } } /* * No handler for next step means exit. */ if (!task->tk_action) break; /* * Perform the next FSM step. * tk_action may be NULL when the task has been killed * by someone else. */ if (RPC_IS_RUNNING(task) && task->tk_action) task->tk_action(task); /* * Check whether task is sleeping. * Note that if the task may go to sleep in tk_action, * and the RPC reply arrives before we get here, it will * have state RUNNING, but will still be on schedq. */ save_flags(oldflags); cli(); if (RPC_IS_RUNNING(task)) { if (task->tk_rpcwait == &schedq) rpc_remove_wait_queue(task); } else while (!RPC_IS_RUNNING(task)) { if (RPC_IS_ASYNC(task)) { restore_flags(oldflags); return 0; } /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); if (current->pid == rpciod_pid) printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); sleep_on(&task->tk_wait); /* * When the task received a signal, remove from * any queues etc, and make runnable again. */ if (signalled()) __rpc_wake_up(task); dprintk("RPC: %4d sync task resuming\n", task->tk_pid); } restore_flags(oldflags); /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ if (!RPC_IS_ASYNC(task) && signalled()) { dprintk("RPC: %4d got signal\n", task->tk_pid); rpc_exit(task, -ERESTARTSYS); } } dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); if (task->tk_exit) { status = task->tk_status; task->tk_exit(task); } return status; }
/* * This is the lockd kernel thread */ static void lockd(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; int err = 0; unsigned long grace_period_expire; /* Lock module and set up kernel thread */ MOD_INC_USE_COUNT; lock_kernel(); /* * Let our maker know we're running. */ nlmsvc_pid = current->pid; up(&lockd_start); daemonize(); reparent_to_init(); sprintf(current->comm, "lockd"); /* Process request with signals blocked. */ spin_lock_irq(¤t->sighand->siglock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); /* kick rpciod */ rpciod_up(); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; grace_period_expire = set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away, and we've been sent a * signal, or else another process has taken over our job. */ while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { long timeout = MAX_SCHEDULE_TIMEOUT; if (signalled()) { spin_lock_irq(¤t->sighand->siglock); flush_signals(current); spin_unlock_irq(¤t->sighand->siglock); if (nlmsvc_ops) { nlmsvc_ops->detach(); grace_period_expire = set_grace_period(); } } /* * Retry any blocked locks that have been notified by * the VFS. Don't do this during grace period. * (Theoretically, there shouldn't even be blocked locks * during grace period). */ if (!nlmsvc_grace_period) timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(serv, rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "lockd: terminating on error %d\n", -err); break; } dprintk("lockd: request from %08x\n", (unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr)); /* * Look up the NFS client handle. The handle is needed for * all but the GRANTED callback RPCs. */ rqstp->rq_client = NULL; if (nlmsvc_ops) { nlmsvc_ops->exp_readlock(); rqstp->rq_client = nlmsvc_ops->exp_getclient(&rqstp->rq_addr); } if (nlmsvc_grace_period && time_before(grace_period_expire, jiffies)) nlmsvc_grace_period = 0; svc_process(serv, rqstp); /* Unlock export hash tables */ if (nlmsvc_ops) nlmsvc_ops->exp_unlock(); } /* * Check whether there's a new lockd process before * shutting down the hosts and clearing the slot. */ if (!nlmsvc_pid || current->pid == nlmsvc_pid) { if (nlmsvc_ops) nlmsvc_ops->detach(); nlm_shutdown_hosts(); nlmsvc_pid = 0; } else printk(KERN_DEBUG "lockd: new process, skipping host shutdown\n"); wake_up(&lockd_exit); /* Exit the RPC thread */ svc_exit_thread(rqstp); /* release rpciod */ rpciod_down(); /* Release module */ MOD_DEC_USE_COUNT; }
/* * This is the rpciod kernel thread */ static int rpciod(void *ptr) { struct wait_queue **assassin = (struct wait_queue **) ptr; unsigned long oldflags; int rounds = 0; MOD_INC_USE_COUNT; lock_kernel(); /* * Let our maker know we're running ... */ rpciod_pid = current->pid; up(&rpciod_running); exit_files(current); exit_mm(current); spin_lock_irq(¤t->sigmask_lock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); current->session = 1; current->pgrp = 1; sprintf(current->comm, "rpciod"); dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid); while (rpciod_users) { if (signalled()) { rpciod_killall(); flush_signals(current); } __rpc_schedule(); if (++rounds >= 64) { /* safeguard */ schedule(); rounds = 0; } save_flags(oldflags); cli(); dprintk("RPC: rpciod running checking dispatch\n"); rpciod_tcp_dispatcher(); if (!schedq.task) { dprintk("RPC: rpciod back to sleep\n"); interruptible_sleep_on(&rpciod_idle); dprintk("RPC: switch to rpciod\n"); rpciod_tcp_dispatcher(); rounds = 0; } restore_flags(oldflags); } dprintk("RPC: rpciod shutdown commences\n"); if (all_tasks) { printk(KERN_ERR "rpciod: active tasks at shutdown?!\n"); rpciod_killall(); } rpciod_pid = 0; wake_up(assassin); dprintk("RPC: rpciod exiting\n"); MOD_DEC_USE_COUNT; return 0; }
int main(int argc, char **argv) { OPTIONS.verbose = 0; OPTIONS.endpoint = strdup("tcp://127.0.0.1:2997"); OPTIONS.daemonize = 1; OPTIONS.pidfile = strdup("/var/run/bolo2redis.pid"); OPTIONS.user = strdup("root"); OPTIONS.group = strdup("root"); OPTIONS.redis_host = strdup("127.0.0.1"); OPTIONS.redis_port = 6379; struct option long_opts[] = { { "help", no_argument, NULL, 'h' }, { "version", no_argument, NULL, 'V' }, { "verbose", no_argument, NULL, 'v' }, { "endpoint", required_argument, NULL, 'e' }, { "foreground", no_argument, NULL, 'F' }, { "pidfile", required_argument, NULL, 'p' }, { "user", required_argument, NULL, 'u' }, { "group", required_argument, NULL, 'g' }, { "host", required_argument, NULL, 'H' }, { "port", required_argument, NULL, 'P' }, { 0, 0, 0, 0 }, }; for (;;) { int idx = 1; int c = getopt_long(argc, argv, "h?Vv+e:Fp:u:g:H:P:", long_opts, &idx); if (c == -1) break; switch (c) { case 'h': case '?': printf("bolo2redis v%s\n", BOLO_VERSION); printf("Usage: bolo2redis [-h?FVv] [-e tcp://host:port]\n" " [-H redis.host.or.ip] [-P port]\n" " [-u user] [-g group] [-p /path/to/pidfile]\n\n"); printf("Options:\n"); printf(" -?, -h show this help screen\n"); printf(" -F, --foreground don't daemonize, stay in the foreground\n"); printf(" -V, --version show version information and exit\n"); printf(" -v, --verbose turn on debugging, to standard error\n"); printf(" -e, --endpoint bolo broadcast endpoint to connect to\n"); printf(" -H, --host name or address of redis server\n"); printf(" -P, --port what port redis is running on\n"); printf(" -u, --user user to run as (if daemonized)\n"); printf(" -g, --group group to run as (if daemonized)\n"); printf(" -p, --pidfile where to store the pidfile (if daemonized)\n"); exit(0); case 'V': logger(LOG_DEBUG, "handling -V/--version"); printf("bolo2redis v%s\n" "Copyright (c) 2016 The Bolo Authors. All Rights Reserved.\n", BOLO_VERSION); exit(0); case 'v': OPTIONS.verbose++; break; case 'e': free(OPTIONS.endpoint); OPTIONS.endpoint = strdup(optarg); break; case 'F': OPTIONS.daemonize = 0; break; case 'p': free(OPTIONS.pidfile); OPTIONS.pidfile = strdup(optarg); break; case 'u': free(OPTIONS.user); OPTIONS.user = strdup(optarg); break; case 'g': free(OPTIONS.group); OPTIONS.group = strdup(optarg); break; case 'H': free(OPTIONS.redis_host); OPTIONS.redis_host = strdup(optarg); break; case 'P': OPTIONS.redis_port = atoi(optarg); break; default: fprintf(stderr, "unhandled option flag %#02x\n", c); return 1; } } if (OPTIONS.daemonize) { log_open("bolo2redis", "daemon"); log_level(LOG_ERR + OPTIONS.verbose, NULL); mode_t um = umask(0); if (daemonize(OPTIONS.pidfile, OPTIONS.user, OPTIONS.group) != 0) { fprintf(stderr, "daemonization failed: (%i) %s\n", errno, strerror(errno)); return 3; } umask(um); } else { log_open("bolo2redis", "console"); log_level(LOG_INFO + OPTIONS.verbose, NULL); } logger(LOG_NOTICE, "starting up"); logger(LOG_DEBUG, "allocating 0MQ context"); void *zmq = zmq_ctx_new(); if (!zmq) { logger(LOG_ERR, "failed to initialize 0MQ context"); return 3; } logger(LOG_DEBUG, "allocating 0MQ SUB socket to talk to %s", OPTIONS.endpoint); void *z = zmq_socket(zmq, ZMQ_SUB); if (!z) { logger(LOG_ERR, "failed to create a SUB socket"); return 3; } logger(LOG_DEBUG, "setting subscriber filter"); if (zmq_setsockopt(z, ZMQ_SUBSCRIBE, "", 0) != 0) { logger(LOG_ERR, "failed to set subscriber filter"); return 3; } logger(LOG_DEBUG, "connecting to %s", OPTIONS.endpoint); if (vzmq_connect(z, OPTIONS.endpoint) != 0) { logger(LOG_ERR, "failed to connect to %s", OPTIONS.endpoint); return 3; } logger(LOG_INFO, "connecting to redis at %s:%i", OPTIONS.redis_host, OPTIONS.redis_port); redisContext *redis = redisConnect(OPTIONS.redis_host, OPTIONS.redis_port); if (redis != NULL && redis->err) { logger(LOG_ERR, "failed to connect to redis running at %s:%i: %s", OPTIONS.redis_host, OPTIONS.redis_port, redis->err); return 3; } pdu_t *p; logger(LOG_INFO, "waiting for a PDU from %s", OPTIONS.endpoint); signal_handlers(); while (!signalled()) { while ((p = pdu_recv(z))) { logger(LOG_INFO, "received a [%s] PDU of %i frames", pdu_type(p), pdu_size(p)); if (strcmp(pdu_type(p), "SET.KEYS") == 0 && pdu_size(p) % 2 == 1 ) { int i = 1; while (i < pdu_size(p)) { char *k = pdu_string(p, i++); char *v = pdu_string(p, i++); logger(LOG_DEBUG, "setting key `%s' = '%s'", k, v); redisReply *reply = redisCommand(redis, "SET %s %s", k, v); if (reply->type == REDIS_REPLY_ERROR) { logger(LOG_ERR, "received error from redis: %s", reply->str); } freeReplyObject(reply); free(k); free(v); } } pdu_free(p); logger(LOG_INFO, "waiting for a PDU from %s", OPTIONS.endpoint); } } logger(LOG_INFO, "shutting down"); vzmq_shutdown(z, 0); zmq_ctx_destroy(zmq); return 0; }
/* * This is the lockd kernel thread */ static void lockd(struct svc_rqst *rqstp) { int err = 0; unsigned long grace_period_expire; /* Lock module and set up kernel thread */ /* lockd_up is waiting for us to startup, so will * be holding a reference to this module, so it * is safe to just claim another reference */ __module_get(THIS_MODULE); lock_kernel(); /* * Let our maker know we're running. */ nlmsvc_pid = current->pid; nlmsvc_serv = rqstp->rq_server; complete(&lockd_start_done); daemonize("lockd"); set_freezable(); /* Process request with signals blocked, but allow SIGKILL. */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; grace_period_expire = set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away, and we've been sent a * signal, or else another process has taken over our job. */ while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid) { long timeout = MAX_SCHEDULE_TIMEOUT; char buf[RPC_MAX_ADDRBUFLEN]; if (signalled()) { flush_signals(current); if (nlmsvc_ops) { nlmsvc_invalidate_all(); grace_period_expire = set_grace_period(); } } /* * Retry any blocked locks that have been notified by * the VFS. Don't do this during grace period. * (Theoretically, there shouldn't even be blocked locks * during grace period). */ if (!nlmsvc_grace_period) { timeout = nlmsvc_retry_blocked(); } else if (time_before(grace_period_expire, jiffies)) clear_grace_period(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) continue; if (err < 0) { printk(KERN_WARNING "lockd: terminating on error %d\n", -err); break; } dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); /* * Check whether there's a new lockd process before * shutting down the hosts and clearing the slot. */ if (!nlmsvc_pid || current->pid == nlmsvc_pid) { if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); nlmsvc_pid = 0; nlmsvc_serv = NULL; } else printk(KERN_DEBUG "lockd: new process, skipping host shutdown\n"); wake_up(&lockd_exit); /* Exit the RPC thread */ svc_exit_thread(rqstp); /* Release module */ unlock_kernel(); module_put_and_exit(0); }
int segment_location(char *name, range_t * range) { glctx_t *gcp = &glctx; segment_t *segp; char *apage, *end; off_t offset; size_t length, maxlength; int pgid, i; bool need_nl; segp = segment_get(name); if (segp == NULL) { fprintf(stderr, "%s: no such segment: %s\n", gcp->program_name, name); return SEG_ERR; } if (segp->seg_start == MAP_FAILED) { fprintf(stderr, "%s: segment %s not mapped\n", gcp->program_name, name); return SEG_ERR; } offset = round_down_to_pagesize(range->offset); if (offset >= segp->seg_length) { fprintf(stderr, "%s: offset %ld is past end of segment %s\n", gcp->program_name, offset, name); return SEG_ERR; } apage = segp->seg_start + offset; maxlength = segp->seg_length - offset; length = range->length; if (length) length = round_up_to_pagesize(length); /* * note: we silently truncate to max length [end of segment] */ if (length == 0 || length > maxlength) length = maxlength; end = apage + length; pgid = offset / gcp->pagesize; show_one_segment(segp, false); /* show mapping, no header */ printf("page offset "); for (i = 0; i < PG_PER_LINE; ++i) printf(" +%02d", i); printf("\n"); if (pgid & PPL_MASK) { /* * start partial line */ int pgid2 = pgid & ~PPL_MASK; printf("%12x: ", pgid2); while (pgid2 < pgid) { printf(" "); ++pgid2; } need_nl = true; } else need_nl = false; for (; apage < end; apage += gcp->pagesize, ++pgid) { int node; node = get_node(apage); if (node < 0) { fprintf(stderr, "\n%s: " "failed to get node for segment %s, offset 0x%x\n", gcp->program_name, name, SEG_OFFSET(segp, apage)); return SEG_ERR; } if ((pgid & PPL_MASK) == 0) { if (need_nl) printf("\n"); printf("%12x: ", pgid); /* start a new line */ need_nl = true; } printf(" %3d", node); if (signalled(gcp)) { reset_signal(); break; } } printf("\n"); return SEG_OK; }
/* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; /* try_to_freeze() is called from svc_recv() */ set_freezable(); /* Allow SIGKILL to tell lockd to drop all of its locks */ allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* * FIXME: it would be nice if lockd didn't spend its entire life * running under the BKL. At the very least, it would be good to * have someone clarify what it's intended to protect here. I've * seen some handwavy posts about posix locking needing to be * done under the BKL, but it's far from clear. */ lock_kernel(); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; set_grace_period(); /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); /* * Find a socket with data available and call its * recvfrom routine. */ err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); unlock_kernel(); return 0; }
static int lockd(void *vrqstp) { int err = 0, preverr = 0; struct svc_rqst *rqstp = vrqstp; set_freezable(); allow_signal(SIGKILL); dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); if (!nlm_timeout) nlm_timeout = LOCKD_DFLT_TIMEO; nlmsvc_timeout = nlm_timeout * HZ; set_grace_period(); while (!kthread_should_stop()) { long timeout = MAX_SCHEDULE_TIMEOUT; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); rqstp->rq_server->sv_maxconn = nlm_max_connections; if (signalled()) { flush_signals(current); restart_grace(); continue; } timeout = nlmsvc_retry_blocked(); err = svc_recv(rqstp, timeout); if (err == -EAGAIN || err == -EINTR) { preverr = err; continue; } if (err < 0) { if (err != preverr) { printk(KERN_WARNING "%s: unexpected error " "from svc_recv (%d)\n", __func__, err); preverr = err; } schedule_timeout_interruptible(HZ); continue; } preverr = err; dprintk("lockd: request from %s\n", svc_print_addr(rqstp, buf, sizeof(buf))); svc_process(rqstp); } flush_signals(current); cancel_delayed_work_sync(&grace_period_end); locks_end_grace(&lockd_manager); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); return 0; }