int sys_socketpair(struct lwp *l, const struct sys_socketpair_args *uap, register_t *retval) { /* { syscallarg(int) domain; syscallarg(int) type; syscallarg(int) protocol; syscallarg(int *) rsv; } */ file_t *fp1, *fp2; struct socket *so1, *so2; int fd, error, sv[2]; proc_t *p = curproc; int flags = SCARG(uap, type) & SOCK_FLAGS_MASK; int type = SCARG(uap, type) & ~SOCK_FLAGS_MASK; int domain = SCARG(uap, domain); int proto = SCARG(uap, protocol); error = makesocket(l, &fp1, &fd, flags, type, domain, proto, NULL); if (error) return error; so1 = fp1->f_socket; sv[0] = fd; error = makesocket(l, &fp2, &fd, flags, type, domain, proto, so1); if (error) goto out; so2 = fp2->f_socket; sv[1] = fd; solock(so1); error = soconnect2(so1, so2); if (error == 0 && type == SOCK_DGRAM) { /* * Datagram socket connection is asymmetric. */ error = soconnect2(so2, so1); } sounlock(so1); if (error == 0) error = copyout(sv, SCARG(uap, rsv), sizeof(sv)); if (error == 0) { fd_affix(p, fp2, sv[1]); fd_affix(p, fp1, sv[0]); return 0; } fd_abort(p, fp2, sv[1]); (void)soclose(so2); out: fd_abort(p, fp1, sv[0]); (void)soclose(so1); return error; }
int pipe1(struct lwp *l, register_t *retval, int flags) { file_t *rf, *wf; struct socket *rso, *wso; int fd, error; proc_t *p; if (flags & ~(O_CLOEXEC|O_NONBLOCK|O_NOSIGPIPE)) return EINVAL; p = curproc; if ((error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0, l, NULL)) != 0) return error; if ((error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0, l, rso)) != 0) goto free1; /* remember this socket pair implements a pipe */ wso->so_state |= SS_ISAPIPE; rso->so_state |= SS_ISAPIPE; if ((error = fd_allocfile(&rf, &fd)) != 0) goto free2; retval[0] = fd; rf->f_flag = FREAD | flags; rf->f_type = DTYPE_SOCKET; rf->f_ops = &socketops; rf->f_socket = rso; if ((error = fd_allocfile(&wf, &fd)) != 0) goto free3; wf->f_flag = FWRITE | flags; wf->f_type = DTYPE_SOCKET; wf->f_ops = &socketops; wf->f_socket = wso; retval[1] = fd; solock(wso); error = unp_connect2(wso, rso); sounlock(wso); if (error != 0) goto free4; fd_affix(p, wf, (int)retval[1]); fd_affix(p, rf, (int)retval[0]); return (0); free4: fd_abort(p, wf, (int)retval[1]); free3: fd_abort(p, rf, (int)retval[0]); free2: (void)soclose(wso); free1: (void)soclose(rso); return error; }
static int pty_alloc_slave(struct lwp *l, int *fd, dev_t dev, struct mount *mp) { int error; struct file *fp; struct vnode *vp; /* Grab a filedescriptor for the slave */ if ((error = fd_allocfile(&fp, fd)) != 0) { DPRINTF(("fd_allocfile %d\n", error)); return error; } if (ptm == NULL) { error = EOPNOTSUPP; goto bad; } if ((error = (*ptm->allocvp)(mp, l, &vp, dev, 't')) != 0) goto bad; if ((error = pty_vn_open(vp, l)) != 0) goto bad; fp->f_flag = FREAD|FWRITE; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = vp; VOP_UNLOCK(vp); fd_affix(curproc, fp, *fd); return 0; bad: fd_abort(curproc, fp, *fd); return error; }
/* * kqueue(2) system call. */ static int kqueue1(struct lwp *l, int flags, register_t *retval) { struct kqueue *kq; file_t *fp; int fd, error; if ((error = fd_allocfile(&fp, &fd)) != 0) return error; fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); fp->f_type = DTYPE_KQUEUE; fp->f_ops = &kqueueops; kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); cv_init(&kq->kq_cv, "kqueue"); selinit(&kq->kq_sel); TAILQ_INIT(&kq->kq_head); fp->f_data = kq; *retval = fd; kq->kq_fdp = curlwp->l_fd; fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); fd_affix(curproc, fp, fd); return error; }
int do_sys_accept(struct lwp *l, int sock, struct mbuf **name, register_t *new_sock, const sigset_t *mask, int flags, int clrflags) { file_t *fp, *fp2; struct mbuf *nam; int error, fd; struct socket *so, *so2; short wakeup_state = 0; if ((fp = fd_getfile(sock)) == NULL) return EBADF; if (fp->f_type != DTYPE_SOCKET) { fd_putfile(sock); return ENOTSOCK; } if ((error = fd_allocfile(&fp2, &fd)) != 0) { fd_putfile(sock); return error; } nam = m_get(M_WAIT, MT_SONAME); *new_sock = fd; so = fp->f_socket; solock(so); if (__predict_false(mask)) sigsuspendsetup(l, mask); if (!(so->so_proto->pr_flags & PR_LISTEN)) { error = EOPNOTSUPP; goto bad; } if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto bad; } if ((so->so_state & SS_NBIO) && so->so_qlen == 0) { error = EWOULDBLOCK; goto bad; } while (so->so_qlen == 0 && so->so_error == 0) { if (so->so_state & SS_CANTRCVMORE) { so->so_error = ECONNABORTED; break; } if (wakeup_state & SS_RESTARTSYS) { error = ERESTART; goto bad; } error = sowait(so, true, 0); if (error) { goto bad; } wakeup_state = so->so_state; } if (so->so_error) { error = so->so_error; so->so_error = 0; goto bad; } /* connection has been removed from the listen queue */ KNOTE(&so->so_rcv.sb_sel.sel_klist, NOTE_SUBMIT); so2 = TAILQ_FIRST(&so->so_q); if (soqremque(so2, 1) == 0) panic("accept"); fp2->f_type = DTYPE_SOCKET; fp2->f_flag = (fp->f_flag & ~clrflags) | ((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)| ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0); fp2->f_ops = &socketops; fp2->f_socket = so2; if (fp2->f_flag & FNONBLOCK) so2->so_state |= SS_NBIO; else so2->so_state &= ~SS_NBIO; error = soaccept(so2, nam); so2->so_cred = kauth_cred_dup(so->so_cred); sounlock(so); if (error) { /* an error occurred, free the file descriptor and mbuf */ m_freem(nam); mutex_enter(&fp2->f_lock); fp2->f_count++; mutex_exit(&fp2->f_lock); closef(fp2); fd_abort(curproc, NULL, fd); } else { fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0); fd_affix(curproc, fp2, fd); *name = nam; } fd_putfile(sock); if (__predict_false(mask)) sigsuspendteardown(l); return error; bad: sounlock(so); m_freem(nam); fd_putfile(sock); fd_abort(curproc, fp2, fd); if (__predict_false(mask)) sigsuspendteardown(l); return error; }
static int pty_alloc_master(struct lwp *l, int *fd, dev_t *dev, struct mount *mp) { int error; struct file *fp; struct vnode *vp; int md; if ((error = fd_allocfile(&fp, fd)) != 0) { DPRINTF(("fd_allocfile %d\n", error)); return error; } retry: /* Find and open a free master pty. */ *dev = pty_getfree(); md = minor(*dev); if ((error = pty_check(md)) != 0) { DPRINTF(("pty_check %d\n", error)); goto bad; } if (ptm == NULL) { DPRINTF(("no ptm\n")); error = EOPNOTSUPP; goto bad; } /* * XXX Since PTYFS has now multiple instance support, if we mounted * more than one PTYFS we must check here the ptyfs_used_tbl, to find * out if the ptyfsnode is under the appropriate mount and skip the * node if not, because the pty could has been released, but * ptyfs_reclaim didn't get a chance to release the corresponding * node other mount point yet. * * It's important to have only one mount point's ptyfsnode for each * appropriate device in ptyfs_used_tbl, else we will have a security * problem, because every entry will have access to this device. * * Also we will not have not efficient vnode and memory usage. * You can test this by changing a_recycle from true to false * in ptyfs_inactive. */ if ((error = (*ptm->allocvp)(mp, l, &vp, *dev, 'p')) != 0) { DPRINTF(("pty_allocvp %d\n", error)); goto bad; } if ((error = pty_vn_open(vp, l)) != 0) { DPRINTF(("pty_vn_open %d\n", error)); /* * Check if the master open failed because we lost * the race to grab it. */ if (error != EIO) goto bad; error = !pty_isfree(md, 1); DPRINTF(("pty_isfree %d\n", error)); if (error) goto retry; else goto bad; } fp->f_flag = FREAD|FWRITE; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = vp; VOP_UNLOCK(vp); fd_affix(curproc, fp, *fd); return 0; bad: fd_abort(curproc, fp, *fd); return error; }