struct fuse_req *fuse_get_req(struct fuse_conn *fc) { struct fuse_req *req; sigset_t oldset; int intr; int err; atomic_inc(&fc->num_waiting); block_sigs(&oldset); intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; err = -ENOTCONN; if (!fc->connected) goto out; req = fuse_request_alloc(); err = -ENOMEM; if (!req) goto out; fuse_req_init_context(req); req->waiting = 1; return req; out: atomic_dec(&fc->num_waiting); return ERR_PTR(err); }
struct gfskdev_req * gfskdev_get_req(struct gfskdev_conn *dc) { struct gfskdev_req *req; sigset_t oldset; int intr; int err; atomic_inc(&dc->num_waiting); block_sigs(&oldset); intr = wait_event_interruptible(dc->blocked_waitq, !dc->blocked); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; err = -ENOTCONN; if (!dc->connected) goto out; req = gfskdev_request_alloc(); err = -ENOMEM; if (!req) goto out; gfskdev_req_init_context(req); req->waiting = 1; return (req); out: atomic_dec(&dc->num_waiting); return (ERR_PTR(err)); }
/* Called with fc->lock held. Releases, and then reacquires it. */ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) { if (!fc->no_interrupt) { /* Any signal may interrupt this */ wait_answer_interruptible(fc, req); if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; req->interrupted = 1; if (req->state == FUSE_REQ_SENT) queue_interrupt(fc, req); } if (req->force) { spin_unlock(&fc->lock); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); } else { sigset_t oldset; /* Only fatal signals may interrupt this */ block_sigs(&oldset); wait_answer_interruptible(fc, req); restore_sigs(&oldset); } if (req->aborted) goto aborted; if (req->state == FUSE_REQ_FINISHED) return; req->out.h.error = -EINTR; req->aborted = 1; aborted: if (req->locked) { /* This is uninterruptible sleep, because data is being copied to/from the buffers of req. During locked state, there mustn't be any filesystem operation (e.g. page fault), since that could lead to deadlock */ spin_unlock(&fc->lock); wait_event(req->waitq, !req->locked); spin_lock(&fc->lock); } if (req->state == FUSE_REQ_PENDING) { list_del(&req->list); __fuse_put_request(req); } else if (req->state == FUSE_REQ_SENT) { spin_unlock(&fc->lock); wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); spin_lock(&fc->lock); } }
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, bool for_background) { struct fuse_req *req; int err; atomic_inc(&fc->num_waiting); if (fuse_block_alloc(fc, for_background)) { sigset_t oldset; int intr; block_sigs(&oldset); intr = wait_event_interruptible_exclusive(fc->blocked_waitq, !fuse_block_alloc(fc, for_background)); restore_sigs(&oldset); err = -EINTR; if (intr) goto out; } err = -ENOTCONN; if (!fc->connected) goto out; req = fuse_request_alloc(npages); err = -ENOMEM; if (!req) { if (for_background) wake_up(&fc->blocked_waitq); goto out; } fuse_req_init_context(req); req->waiting = 1; req->background = for_background; return req; out: atomic_dec(&fc->num_waiting); return ERR_PTR(err); }