/* ARGSUSED */ int pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) { struct fileproc *rf, *wf; struct pipe *rpipe, *wpipe; lck_mtx_t *pmtx; int fd, error; if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL) return (ENOMEM); rpipe = wpipe = NULL; if (pipe_create(&rpipe) || pipe_create(&wpipe)) { error = ENFILE; goto freepipes; } /* * allocate the space for the normal I/O direction up * front... we'll delay the allocation for the other * direction until a write actually occurs (most likely it won't)... */ error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0)); if (error) goto freepipes; TAILQ_INIT(&rpipe->pipe_evlist); TAILQ_INIT(&wpipe->pipe_evlist); error = falloc(p, &rf, &fd, vfs_context_current()); if (error) { goto freepipes; } retval[0] = fd; /* * for now we'll create half-duplex pipes(refer returns section above). * this is what we've always supported.. */ rf->f_flag = FREAD; rf->f_data = (caddr_t)rpipe; rf->f_ops = &pipeops; error = falloc(p, &wf, &fd, vfs_context_current()); if (error) { fp_free(p, retval[0], rf); goto freepipes; } wf->f_flag = FWRITE; wf->f_data = (caddr_t)wpipe; wf->f_ops = &pipeops; rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; /* both structures share the same mutex */ rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; retval[1] = fd; #if CONFIG_MACF /* * XXXXXXXX SHOULD NOT HOLD FILE_LOCK() XXXXXXXXXXXX * * struct pipe represents a pipe endpoint. The MAC label is shared * between the connected endpoints. As a result mac_pipe_label_init() and * mac_pipe_label_associate() should only be called on one of the endpoints * after they have been connected. */ mac_pipe_label_init(rpipe); mac_pipe_label_associate(kauth_cred_get(), rpipe); wpipe->pipe_label = rpipe->pipe_label; #endif proc_fdlock_spin(p); procfdtbl_releasefd(p, retval[0], NULL); procfdtbl_releasefd(p, retval[1], NULL); fp_drop(p, retval[0], rf, 1); fp_drop(p, retval[1], wf, 1); proc_fdunlock(p); return (0); freepipes: pipeclose(rpipe); pipeclose(wpipe); lck_mtx_free(pmtx, pipe_mtx_grp); return (error); }
static int shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) { struct filedesc *fdp; int i; struct ifnet *ifp = (struct ifnet *)arg; if (ifp == NULL) return (PROC_RETURNED); proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { struct fileproc *fp = fdp->fd_ofiles[i]; struct fileglob *fg; struct socket *so; struct inpcb *inp; struct ifnet *inp_ifp; int error; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { continue; } fg = fp->f_fglob; if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) continue; so = (struct socket *)fp->f_fglob->fg_data; if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) continue; inp = (struct inpcb *)so->so_pcb; if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) continue; socket_lock(so, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { socket_unlock(so, 1); continue; } if (inp->inp_boundifp != NULL) { inp_ifp = inp->inp_boundifp; } else if (inp->inp_last_outifp != NULL) { inp_ifp = inp->inp_last_outifp; } else { socket_unlock(so, 1); continue; } if (inp_ifp != ifp && inp_ifp->if_delegated.ifp != ifp) { socket_unlock(so, 1); continue; } error = sosetdefunct(p, so, 0, TRUE); if (error != 0) { log(LOG_ERR, "%s: sosetdefunct() error %d", __func__, error); } else { error = sodefunct(p, so, 0); if (error != 0) { log(LOG_ERR, "%s: sodefunct() error %d", __func__, error); } } socket_unlock(so, 1); } proc_fdunlock(p); return (PROC_RETURNED); }