struct fuse_req *fuse_get_req(struct fuse_conn *fc) { struct fuse_req *req; sigset_t oldset; int intr; int err; atomic_inc(&fc->num_waiting); block_sigs(&oldset); intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; err = -ENOTCONN; if (!fc->connected) goto out; req = fuse_request_alloc(); err = -ENOMEM; if (!req) goto out; fuse_req_init_context(req); req->waiting = 1; return req; out: atomic_dec(&fc->num_waiting); return ERR_PTR(err); }
struct gfskdev_req * gfskdev_get_req(struct gfskdev_conn *dc) { struct gfskdev_req *req; sigset_t oldset; int intr; int err; atomic_inc(&dc->num_waiting); block_sigs(&oldset); intr = wait_event_interruptible(dc->blocked_waitq, !dc->blocked); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; err = -ENOTCONN; if (!dc->connected) goto out; req = gfskdev_request_alloc(); err = -ENOMEM; if (!req) goto out; gfskdev_req_init_context(req); req->waiting = 1; return (req); out: atomic_dec(&dc->num_waiting); return (ERR_PTR(err)); }
/* * eval - Evaluate the command line that the user has just typed in * * If the user has requested a built-in command (quit, jobs, bg or fg) * then execute it immediately. Otherwise, fork a child process and * run the job in the context of the child. If the job is running in * the foreground, wait for it to terminate and then return. Note: * each child process must have a unique process group ID so that our * background children don't receive SIGINT (SIGTSTP) from the kernel * when we type ctrl-c (ctrl-z) at the keyboard. */ void eval(char *cmdline) { int bg; /* should the job run in bg or fg? */ struct cmdline_tokens tok; /* Parse command line */ bg = parseline(cmdline, &tok); if (bg == -1) return; /* parsing error */ if (tok.argv[0] == NULL) return; /* ignore empty lines */ if(!check_builtin_cmds(&tok)){ sigset_t newset,oldset; block_sigs(&newset, &oldset); pid_t pid = fork(); if(pid < 0){ unix_error("error in fork child process"); } if(pid == 0){ handle_child_process(&tok, bg, pid, &oldset); } else { handle_parent_process(&tok, bg, pid, &oldset, cmdline); } } return; }
/* Called with fc->lock held. Releases, and then reacquires it. */ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) { if (!fc->no_interrupt) { /* Any signal may interrupt this */ wait_answer_interruptible(fc, req); if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; req->interrupted = 1; if (req->state == FUSE_REQ_SENT) queue_interrupt(fc, req); } if (req->force) { spin_unlock(&fc->lock); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); } else { sigset_t oldset; /* Only fatal signals may interrupt this */ block_sigs(&oldset); wait_answer_interruptible(fc, req); restore_sigs(&oldset); } if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; req->out.h.error = -EINTR; req->aborted = 1; aborted: if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During locked state, there mustn't be any filesystem operation (e.g. page fault), since that could lead to deadlock */ spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); spin_lock(&fc->lock); } if (req->state == FUSE_REQ_PENDING) { list_del(&req->list); __fuse_put_request(req); } else if (req->state == FUSE_REQ_SENT) { spin_unlock(&fc->lock); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); } }
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, bool for_background) { struct fuse_req *req; int err; atomic_inc(&fc->num_waiting); if (fuse_block_alloc(fc, for_background)) { sigset_t oldset; int intr; block_sigs(&oldset); intr = wait_event_interruptible_exclusive(fc->blocked_waitq, !fuse_block_alloc(fc, for_background)); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; } err = -ENOTCONN; if (!fc->connected) goto out; req = fuse_request_alloc(npages); err = -ENOMEM; if (!req) { if (for_background) wake_up(&fc->blocked_waitq); goto out; } fuse_req_init_context(req); req->waiting = 1; req->background = for_background; return req; out: atomic_dec(&fc->num_waiting); return ERR_PTR(err); }