/* * initialize and allocate VM and memory for pipe */ int pipe_create(struct pipe *cpipe) { int error; /* so pipe_free_kmem() doesn't follow junk pointer */ cpipe->pipe_buffer.buffer = NULL; /* * protect so pipeclose() doesn't follow a junk pointer * if pipespace() fails. */ bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel); cpipe->pipe_state = 0; cpipe->pipe_peer = NULL; cpipe->pipe_busy = 0; error = pipespace(cpipe, PIPE_SIZE); if (error != 0) return (error); getnanotime(&cpipe->pipe_ctime); cpipe->pipe_atime = cpipe->pipe_ctime; cpipe->pipe_mtime = cpipe->pipe_ctime; cpipe->pipe_pgid = NO_PID; return (0); }
/* * expand the size of pipe while there is data to be read, * and then free the old buffer once the current buffered * data has been transferred to new storage. * Required: PIPE_LOCK and io lock to be held by caller. * returns 0 on success or no expansion possible */ static int expand_pipespace(struct pipe *p, int target_size) { struct pipe tmp, oldpipe; int error; tmp.pipe_buffer.buffer = 0; if (p->pipe_buffer.size >= (unsigned) target_size) { return 0; /* the existing buffer is max size possible */ } /* create enough space in the target */ error = pipespace(&tmp, target_size); if (error != 0) return (error); oldpipe.pipe_buffer.buffer = p->pipe_buffer.buffer; oldpipe.pipe_buffer.size = p->pipe_buffer.size; memcpy(tmp.pipe_buffer.buffer, p->pipe_buffer.buffer, p->pipe_buffer.size); if (p->pipe_buffer.cnt > 0 && p->pipe_buffer.in <= p->pipe_buffer.out ){ /* we are in State 3 and need extra copying for read to be consistent */ memcpy(&tmp.pipe_buffer.buffer[p->pipe_buffer.size], p->pipe_buffer.buffer, p->pipe_buffer.size); p->pipe_buffer.in += p->pipe_buffer.size; } p->pipe_buffer.buffer = tmp.pipe_buffer.buffer; p->pipe_buffer.size = tmp.pipe_buffer.size; pipe_free_kmem(&oldpipe); return 0; }
int pipe_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred) { int error = 0; int orig_resid; struct pipe *wpipe, *rpipe; rpipe = (struct pipe *) fp->f_data; wpipe = rpipe->pipe_peer; /* * detect loss of pipe read side, issue SIGPIPE if lost. */ if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { return (EPIPE); } ++wpipe->pipe_busy; /* * If it is advantageous to resize the pipe buffer, do * so. */ if ((uio->uio_resid > PIPE_SIZE) && (nbigpipe < LIMITBIGPIPES) && (wpipe->pipe_buffer.size <= PIPE_SIZE) && (wpipe->pipe_buffer.cnt == 0)) { if ((error = pipelock(wpipe)) == 0) { if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) nbigpipe++; pipeunlock(wpipe); } } /* * If an early error occurred unbusy and return, waking up any pending * readers. */ if (error) { --wpipe->pipe_busy; if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); wakeup(wpipe); } return (error); } orig_resid = uio->uio_resid; while (uio->uio_resid) { int space; retrywrite: if (wpipe->pipe_state & PIPE_EOF) { error = EPIPE; break; } space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; /* Writes of size <= PIPE_BUF must be atomic. */ if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) space = 0; if (space > 0) { if ((error = pipelock(wpipe)) == 0) { int size; /* Transfer size */ int segsize; /* first segment to transfer */ /* * If a process blocked in uiomove, our * value for space might be bad. * * XXX will we be ok if the reader has gone * away here? */ if (space > wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) { pipeunlock(wpipe); goto retrywrite; } /* * Transfer size is minimum of uio transfer * and free space in pipe buffer. */ if (space > uio->uio_resid) size = uio->uio_resid; else size = space; /* * First segment to transfer is minimum of * transfer size and contiguous space in * pipe buffer. If first segment to transfer * is less than the transfer size, we've got * a wraparound in the buffer. */ segsize = wpipe->pipe_buffer.size - wpipe->pipe_buffer.in; if (segsize > size) segsize = size; /* Transfer first segment */ error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], segsize, uio); if (error == 0 && segsize < size) { /* * Transfer remaining part now, to * support atomic writes. Wraparound * happened. */ #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.in + segsize != wpipe->pipe_buffer.size) panic("Expected pipe buffer wraparound disappeared"); #endif error = uiomove(&wpipe->pipe_buffer.buffer[0], size - segsize, uio); } if (error == 0) { wpipe->pipe_buffer.in += size; if (wpipe->pipe_buffer.in >= wpipe->pipe_buffer.size) { #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) panic("Expected wraparound bad"); #endif wpipe->pipe_buffer.in = size - segsize; } wpipe->pipe_buffer.cnt += size; #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) panic("Pipe buffer overflow"); #endif } pipeunlock(wpipe); } if (error) break; } else { /* * If the "read-side" has been blocked, wake it up now. */ if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; wakeup(wpipe); } /* * don't block on non-blocking I/O */ if (fp->f_flag & FNONBLOCK) { error = EAGAIN; break; } /* * We have no more space and have something to offer, * wake up select/poll. */ pipeselwakeup(wpipe); wpipe->pipe_state |= PIPE_WANTW; error = tsleep(wpipe, (PRIBIO + 1)|PCATCH, "pipewr", 0); if (error) break; /* * If read side wants to go away, we just issue a * signal to ourselves. */ if (wpipe->pipe_state & PIPE_EOF) { error = EPIPE; break; } } } --wpipe->pipe_busy; if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); wakeup(wpipe); } else if (wpipe->pipe_buffer.cnt > 0) { /* * If we have put any characters in the buffer, we wake up * the reader. */ if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; wakeup(wpipe); } } /* * Don't return EPIPE if I/O was successful */ if ((wpipe->pipe_buffer.cnt == 0) && (uio->uio_resid == 0) && (error == EPIPE)) { error = 0; } if (error == 0) getnanotime(&wpipe->pipe_mtime); /* * We have something to offer, wake up select/poll. */ if (wpipe->pipe_buffer.cnt) pipeselwakeup(wpipe); return (error); }
/* ARGSUSED */ int pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) { struct fileproc *rf, *wf; struct pipe *rpipe, *wpipe; lck_mtx_t *pmtx; int fd, error; if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL) return (ENOMEM); rpipe = wpipe = NULL; if (pipe_create(&rpipe) || pipe_create(&wpipe)) { error = ENFILE; goto freepipes; } /* * allocate the space for the normal I/O direction up * front... we'll delay the allocation for the other * direction until a write actually occurs (most likely it won't)... */ error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0)); if (error) goto freepipes; TAILQ_INIT(&rpipe->pipe_evlist); TAILQ_INIT(&wpipe->pipe_evlist); error = falloc(p, &rf, &fd, vfs_context_current()); if (error) { goto freepipes; } retval[0] = fd; /* * for now we'll create half-duplex pipes(refer returns section above). * this is what we've always supported.. */ rf->f_flag = FREAD; rf->f_data = (caddr_t)rpipe; rf->f_ops = &pipeops; error = falloc(p, &wf, &fd, vfs_context_current()); if (error) { fp_free(p, retval[0], rf); goto freepipes; } wf->f_flag = FWRITE; wf->f_data = (caddr_t)wpipe; wf->f_ops = &pipeops; rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; /* both structures share the same mutex */ rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; retval[1] = fd; #if CONFIG_MACF /* * XXXXXXXX SHOULD NOT HOLD FILE_LOCK() XXXXXXXXXXXX * * struct pipe represents a pipe endpoint. The MAC label is shared * between the connected endpoints. As a result mac_pipe_label_init() and * mac_pipe_label_associate() should only be called on one of the endpoints * after they have been connected. */ mac_pipe_label_init(rpipe); mac_pipe_label_associate(kauth_cred_get(), rpipe); wpipe->pipe_label = rpipe->pipe_label; #endif proc_fdlock_spin(p); procfdtbl_releasefd(p, retval[0], NULL); procfdtbl_releasefd(p, retval[1], NULL); fp_drop(p, retval[0], rf, 1); fp_drop(p, retval[1], wf, 1); proc_fdunlock(p); return (0); freepipes: pipeclose(rpipe); pipeclose(wpipe); lck_mtx_free(pmtx, pipe_mtx_grp); return (error); }