/* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs) return 0; ret = 0; mutex_lock(&pipe->inode->i_mutex); while (!pipe->nrbufs) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (!pipe->waiting_writers) { if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } } pipe_wait(pipe); } mutex_unlock(&pipe->inode->i_mutex); return ret; }
static void wait_for_partner(struct inode* inode, unsigned int* cnt) { int cur = *cnt; while(cur == *cnt) { pipe_wait(inode); if(signal_pending(current)) break; } }
static int wait_for_partner(struct inode* inode, unsigned int *cnt) { int cur = *cnt; while (cur == *cnt) { pipe_wait(inode->i_pipe); if (signal_pending(current)) break; } return cur == *cnt ? -ERESTARTSYS : 0; }
static ssize_t pipe_write(struct idesc *idesc, const void *buf, size_t nbyte) { struct pipe *pipe; const void *cbuf; int len; ssize_t res; assert(buf); assert(idesc); assert(idesc->idesc_ops == &idesc_pipe_ops); assert(idesc->idesc_amode == FS_MAY_WRITE); cbuf = buf; /* nbyte == 0 is ok to passthrough */ pipe = idesc_to_pipe(idesc); mutex_lock(&pipe->mutex); do { /* No data can be readed at all */ if (idesc_pipe_isclosed(&pipe->read_desc)) { res = -EPIPE; break; } /* Try to write some data */ len = ring_buff_enqueue(pipe->buff, (void *) cbuf, nbyte); if (len > 0) { /* Notzero was written, adjust pointers and notify * (read end can't be closed) */ cbuf += len; nbyte -= len; idesc_notify(&pipe->read_desc.idesc, POLLIN); } /* Have nothing to write, exit*/ if (!nbyte) { res = cbuf - buf; break; } res = pipe_wait(idesc, pipe, POLLOUT | POLLERR); } while (res == 0); mutex_unlock(&pipe->mutex); return res; }
static void wait_for_dump_helpers(struct file *file) { struct pipe_inode_info *pipe; pipe = file_inode(file)->i_pipe; pipe_lock(pipe); pipe->readers++; pipe->writers--; while ((pipe->readers > 1) && (!signal_pending(current))) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); pipe_wait(pipe); } pipe->readers--; pipe->writers++; pipe_unlock(pipe); }
/* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check ->nrbufs without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (pipe->nrbufs < PIPE_BUFFERS) return 0; ret = 0; mutex_lock(&pipe->inode->i_mutex); while (pipe->nrbufs >= PIPE_BUFFERS) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } mutex_unlock(&pipe->inode->i_mutex); return ret; }
static ssize_t pipe_read(struct idesc *idesc, void *buf, size_t nbyte) { struct pipe *pipe; ssize_t res; assert(buf); assert(idesc); assert(idesc->idesc_ops == &idesc_pipe_ops); assert(idesc->idesc_amode == FS_MAY_READ); if (!nbyte) { return 0; } pipe = idesc_to_pipe(idesc); mutex_lock(&pipe->mutex); do { res = ring_buff_dequeue(pipe->buff, buf, nbyte); if (idesc_pipe_isclosed(&pipe->write_desc)) { /* Nothing to do, what's read, that's read */ break; } if (res > 0) { /* Smth read, notify write end (can't be closed, * checked already) */ idesc_notify(&pipe->write_desc.idesc, POLLOUT); break; } res = pipe_wait(idesc, pipe, POLLIN | POLLERR); } while (res == 0); mutex_unlock(&pipe->mutex); return res; }
static ssize_t pipe_read(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; struct inode *inode = filp->f_path.dentry->d_inode; struct pipe_inode_info *pipe; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error, atomic; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) error = ret; break; } atomic = !iov_fault_in_pages_write(iov, chars); redo: addr = ops->map(pipe, buf, atomic); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic); ops->unmap(pipe, buf, addr); if (unlikely(error)) { /* * Just retry with the slow path if we failed. */ if (atomic) { atomic = 0; goto redo; } if (!ret) ret = error; break; } ret += chars; buf->offset += chars; buf->len -= chars; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } mutex_unlock(&inode->i_mutex); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; }
/* * Pipe input worker. Most of this logic works like a regular pipe, the * key here is the 'actor' worker passed in that actually moves the data * to the wanted destination. See pipe_to_file/pipe_to_sendpage above. */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { int ret, do_wakeup, err; struct splice_desc sd; ret = 0; do_wakeup = 0; sd.total_len = len; sd.flags = flags; sd.file = out; sd.pos = *ppos; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); for (;;) { if (pipe->nrbufs) { struct pipe_buffer *buf = pipe->bufs + pipe->curbuf; struct pipe_buf_operations *ops = buf->ops; sd.len = buf->len; if (sd.len > sd.total_len) sd.len = sd.total_len; err = actor(pipe, buf, &sd); if (err <= 0) { if (!ret && err != -ENODATA) ret = err; break; } ret += err; buf->offset += err; buf->len -= err; sd.len -= err; sd.pos += err; sd.total_len -= err; if (sd.len) continue; if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1); pipe->nrbufs--; if (pipe->inode) do_wakeup = 1; } if (!sd.total_len) break; } if (pipe->nrbufs) continue; if (!pipe->writers) break; if (!pipe->waiting_writers) { if (ret) break; } if (flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); do_wakeup = 0; } pipe_wait(pipe); } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } return ret; }
/* * Pipe output worker. This sets up our pipe format with the page cache * pipe buffer operations. Otherwise very similar to the regular pipe_writev(). */ static ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { int ret, do_wakeup, page_nr; ret = 0; do_wakeup = 0; page_nr = 0; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); for (;;) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } if (pipe->nrbufs < PIPE_BUFFERS) { int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1); struct pipe_buffer *buf = pipe->bufs + newbuf; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->ops = spd->ops; if (spd->flags & SPLICE_F_GIFT) buf->flags |= PIPE_BUF_FLAG_GIFT; pipe->nrbufs++; page_nr++; ret += buf->len; if (pipe->inode) do_wakeup = 1; if (!--spd->nr_pages) break; if (pipe->nrbufs < PIPE_BUFFERS) continue; break; } if (spd->flags & SPLICE_F_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible_sync(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } pipe->waiting_writers++; pipe_wait(pipe); pipe->waiting_writers--; } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (do_wakeup) { smp_mb(); if (waitqueue_active(&pipe->wait)) wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } while (page_nr < spd->nr_pages) page_cache_release(spd->pages[page_nr++]); return ret; }
static ssize_t pipe_read(struct kiocb *iocb, struct iov_iter *to) { size_t total_len = iov_iter_count(to); struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; int do_wakeup; ssize_t ret; /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; __pipe_lock(pipe); for (;;) { int bufs = pipe->nrbufs; if (bufs) { int curbuf = pipe->curbuf; struct pipe_buffer *buf = pipe->bufs + curbuf; const struct pipe_buf_operations *ops = buf->ops; size_t chars = buf->len; size_t written; int error; if (chars > total_len) chars = total_len; error = ops->confirm(pipe, buf); if (error) { if (!ret) ret = error; break; } written = copy_page_to_iter(buf->page, buf->offset, chars, to); if (unlikely(written < chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; /* Was it a packet buffer? Clean up and exit */ if (buf->flags & PIPE_BUF_FLAG_PACKET) { total_len = chars; buf->len = 0; } if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); curbuf = (curbuf + 1) & (pipe->buffers - 1); pipe->curbuf = curbuf; pipe->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!atomic_read(&pipe->writers)) break; if (!atomic_read(&pipe->waiting_writers)) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); } __pipe_unlock(pipe); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; }
static ssize_t pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t size, read, ret; /* Seeks are not allowed on pipes. */ ret = -ESPIPE; read = 0; if (ppos != &filp->f_pos) goto out_nolock; /* Always return 0 on null read. */ ret = 0; if (count == 0) goto out_nolock; /* Get the pipe semaphore */ ret = -ERESTARTSYS; if (down_interruptible(PIPE_SEM(*inode))) goto out_nolock; if (PIPE_EMPTY(*inode)) { do_more_read: ret = 0; if (!PIPE_WRITERS(*inode)) goto out; ret = -EAGAIN; if (filp->f_flags & O_NONBLOCK) goto out; for (;;) { PIPE_WAITING_READERS(*inode)++; pipe_wait(inode); PIPE_WAITING_READERS(*inode)--; ret = -ERESTARTSYS; if (signal_pending(current)) goto out; ret = 0; if (!PIPE_EMPTY(*inode)) break; if (!PIPE_WRITERS(*inode)) goto out; } } /* Read what data is available. */ ret = -EFAULT; while (count > 0 && (size = PIPE_LEN(*inode))) { char *pipebuf = PIPE_BASE(*inode) + PIPE_START(*inode); ssize_t chars = PIPE_MAX_RCHUNK(*inode); if (chars > count) chars = count; if (chars > size) chars = size; if (copy_to_user(buf, pipebuf, chars)) goto out; read += chars; PIPE_START(*inode) += chars; PIPE_START(*inode) &= (PIPE_SIZE - 1); PIPE_LEN(*inode) -= chars; count -= chars; buf += chars; } /* Cache behaviour optimization */ if (!PIPE_LEN(*inode)) PIPE_START(*inode) = 0; if (count && PIPE_WAITING_WRITERS(*inode) && !(filp->f_flags & O_NONBLOCK)) { /* * We know that we are going to sleep: signal * writers synchronously that there is more * room. */ wake_up_interruptible_sync(PIPE_WAIT(*inode)); if (!PIPE_EMPTY(*inode)) BUG(); goto do_more_read; } /* Signal writers asynchronously that there is more room. */ wake_up_interruptible(PIPE_WAIT(*inode)); ret = read; out: up(PIPE_SEM(*inode)); out_nolock: if (read) ret = read; UPDATE_ATIME(inode); return ret; }
static ssize_t pipe_write(struct file *filp, const char *buf, size_t count, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t free, written, ret; /* Seeks are not allowed on pipes. */ ret = -ESPIPE; written = 0; if (ppos != &filp->f_pos) goto out_nolock; /* Null write succeeds. */ ret = 0; if (count == 0) goto out_nolock; ret = -ERESTARTSYS; if (down_interruptible(PIPE_SEM(*inode))) goto out_nolock; /* No readers yields SIGPIPE. */ if (!PIPE_READERS(*inode)) goto sigpipe; /* If count <= PIPE_BUF, we have to make it atomic. */ free = (count <= PIPE_BUF ? count : 1); /* Wait, or check for, available space. */ if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; if (PIPE_FREE(*inode) < free) goto out; } else { while (PIPE_FREE(*inode) < free) { PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; ret = -ERESTARTSYS; if (signal_pending(current)) goto out; if (!PIPE_READERS(*inode)) goto sigpipe; } } /* Copy into available space. */ ret = -EFAULT; while (count > 0) { int space; char *pipebuf = PIPE_BASE(*inode) + PIPE_END(*inode); ssize_t chars = PIPE_MAX_WCHUNK(*inode); if ((space = PIPE_FREE(*inode)) != 0) { if (chars > count) chars = count; if (chars > space) chars = space; if (copy_from_user(pipebuf, buf, chars)) goto out; written += chars; PIPE_LEN(*inode) += chars; count -= chars; buf += chars; space = PIPE_FREE(*inode); continue; } ret = written; if (filp->f_flags & O_NONBLOCK) break; do { /* * Synchronous wake-up: it knows that this process * is going to give up this CPU, so it doesn't have * to do idle reschedules. */ wake_up_interruptible_sync(PIPE_WAIT(*inode)); PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; if (signal_pending(current)) goto out; if (!PIPE_READERS(*inode)) goto sigpipe; } while (!PIPE_FREE(*inode)); ret = -EFAULT; } /* Signal readers asynchronously that there is more data. */ wake_up_interruptible(PIPE_WAIT(*inode)); update_mctime(inode); out: up(PIPE_SEM(*inode)); out_nolock: if (written) ret = written; return ret; sigpipe: if (written) goto out; up(PIPE_SEM(*inode)); send_sig(SIGPIPE, current, 0); return -EPIPE; }
static ssize_t pipe_readv(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; down(PIPE_SEM(*inode)); for (;;) { int size = PIPE_LEN(*inode); if (size) { char *pipebuf = PIPE_BASE(*inode) + PIPE_START(*inode); ssize_t chars = PIPE_MAX_RCHUNK(*inode); if (chars > total_len) chars = total_len; if (chars > size) chars = size; if (pipe_iov_copy_to_user(iov, pipebuf, chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; PIPE_START(*inode) += chars; PIPE_START(*inode) &= (PIPE_SIZE - 1); PIPE_LEN(*inode) -= chars; total_len -= chars; do_wakeup = 1; if (!total_len) break; /* common path: read succeeded */ } if (PIPE_LEN(*inode)) /* test for cyclic buffers */ continue; if (!PIPE_WRITERS(*inode)) break; if (!PIPE_WAITING_WRITERS(*inode)) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } pipe_wait(inode); } up(PIPE_SEM(*inode)); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } /* * Hack: we turn off atime updates for -RT kernels. * Who uses them on pipes anyway? */ #ifndef CONFIG_PREEMPT_RT if (ret > 0) file_accessed(filp); #endif return ret; }
static ssize_t pipe_writev(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; ssize_t ret; size_t min; int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; min = total_len; if (min > PIPE_BUF) min = 1; down(PIPE_SEM(*inode)); for (;;) { int free; if (!PIPE_READERS(*inode)) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } free = PIPE_FREE(*inode); if (free >= min) { /* transfer data */ ssize_t chars = PIPE_MAX_WCHUNK(*inode); char *pipebuf = PIPE_BASE(*inode) + PIPE_END(*inode); /* Always wakeup, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. */ do_wakeup = 1; if (chars > total_len) chars = total_len; if (chars > free) chars = free; if (pipe_iov_copy_from_user(pipebuf, iov, chars)) { if (!ret) ret = -EFAULT; break; } ret += chars; PIPE_LEN(*inode) += chars; total_len -= chars; if (!total_len) break; } if (PIPE_FREE(*inode) && ret) { /* handle cyclic data buffers */ min = 1; continue; } if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); do_wakeup = 0; } PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; } up(PIPE_SEM(*inode)); if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); } /* * Hack: we turn off atime updates for -RT kernels. * Who uses them on pipes anyway? */ #ifndef CONFIG_PREEMPT_RT if (ret > 0) inode_update_time(inode, 1); /* mtime and ctime */ #endif return ret; }
static ssize_t pipe_readv(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *info; int do_wakeup; ssize_t ret; struct iovec *iov = (struct iovec *)_iov; size_t total_len; total_len = iov_length(iov, nr_segs); /* Null read succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; down(PIPE_SEM(*inode)); info = inode->i_pipe; for (;;) { int bufs = info->nrbufs; if (bufs) { int curbuf = info->curbuf; struct pipe_buffer *buf = info->bufs + curbuf; struct pipe_buf_operations *ops = buf->ops; void *addr; size_t chars = buf->len; int error; if (chars > total_len) chars = total_len; addr = ops->map(filp, info, buf); error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars); ops->unmap(info, buf); if (unlikely(error)) { if (!ret) ret = -EFAULT; break; } ret += chars; buf->offset += chars; buf->len -= chars; if (!buf->len) { buf->ops = NULL; ops->release(info, buf); curbuf = (curbuf + 1) & (PIPE_BUFFERS-1); info->curbuf = curbuf; info->nrbufs = --bufs; do_wakeup = 1; } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ } if (bufs) /* More to do? */ continue; if (!PIPE_WRITERS(*inode)) break; if (!PIPE_WAITING_WRITERS(*inode)) { /* syscall merging: Usually we must not sleep * if O_NONBLOCK is set, or if we got some data. * But if a writer sleeps in kernel space, then * we can wait for that data without violating POSIX. */ if (ret) break; if (filp->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } pipe_wait(inode); } up(PIPE_SEM(*inode)); /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } if (ret > 0) file_accessed(filp); return ret; }