/* * Outer subroutine: * Connect from a socket to a specified address. * Both address and port must be specified in argument sin. * If don't have a local address for this socket yet, * then pick one. */ int in6_pcbconnect( struct inpcb *inp, struct sockaddr *nam, struct proc *p) { struct in6_addr addr6; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; struct inpcb *pcb; int error; unsigned int outif = 0; /* * Call inner routine, to assign local interface address. * in6_pcbladdr() may automatically fill in sin6_scope_id. */ if ((error = in6_pcbladdr(inp, nam, &addr6, &outif)) != 0) return(error); socket_unlock(inp->inp_socket, 0); pcb = in6_pcblookup_hash(inp->inp_pcbinfo, &sin6->sin6_addr, sin6->sin6_port, IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ? &addr6 : &inp->in6p_laddr, inp->inp_lport, 0, NULL); socket_lock(inp->inp_socket, 0); if (pcb != NULL) { in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0); return (EADDRINUSE); } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { if (inp->inp_lport == 0) { error = in6_pcbbind(inp, (struct sockaddr *)0, p); if (error) return (error); } inp->in6p_laddr = addr6; inp->in6p_last_outif = outif; } if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) { /*lock inversion issue, mostly with udp multicast packets */ socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; if (inp->in6p_flags & IN6P_AUTOFLOWLABEL) inp->in6p_flowinfo |= (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK); in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->mtx); return (0); }
/* * Vnode op for read */ int fifo_read(struct vnop_read_args *ap) { struct uio *uio = ap->a_uio; struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; user_ssize_t startresid; int error; int rflags; #if DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("fifo_read mode"); #endif if (uio_resid(uio) == 0) return (0); rflags = (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0; startresid = uio_resid(uio); /* fifo conformance - if we have a reader open on the fifo but no * writers then we need to make sure we do not block. We do that by * checking the receive buffer and if empty set error to EWOULDBLOCK. * If error is set to EWOULDBLOCK we skip the call into soreceive */ error = 0; if (ap->a_vp->v_fifoinfo->fi_writers < 1) { socket_lock(rso, 1); error = (rso->so_rcv.sb_cc == 0) ? EWOULDBLOCK : 0; socket_unlock(rso, 1); } /* skip soreceive to avoid blocking when we have no writers */ if (error != EWOULDBLOCK) { error = soreceive(rso, (struct sockaddr **)0, uio, (struct mbuf **)0, (struct mbuf **)0, &rflags); if (error == 0) lock_vnode_and_post(ap->a_vp, 0); } else { /* clear EWOULDBLOCK and return EOF (zero) */ error = 0; } /* * Clear EOF indication after first such return. */ if (uio_resid(uio) == startresid) { socket_lock(rso, 1); rso->so_state &= ~SS_CANTRCVMORE; socket_unlock(rso, 1); } return (error); }
//============================================================================== // Send // - Sends packet to a specific connection. //============================================================================== void ConnectionHandler::send( boost::shared_ptr<Packet> _packet ) { // Get connection boost::mutex::scoped_lock connection_lock( connection_mutex ); boost::shared_ptr<Connection> connection = this->getConnectionByID( _packet->id() ); if( connection.get() == NULL ) return; // Update timers connection->ack_timer.expires_from_now( boost::posix_time::milliseconds(SEND_ACK_MS) ); connection->ack_timer.async_wait( boost::bind(&ConnectionHandler::requestAck, this, connection, boost::asio::placeholders::error) ); if( m_packet_handler->getPacketSetting(_packet->msg())->reliable && !m_network_handler->isServer() ) { connection->sync_timer.expires_from_now( boost::posix_time::milliseconds(CLOCK_SYNC_MS) ); connection->sync_timer.async_wait( boost::bind(&ConnectionHandler::requestAck, this, connection, boost::asio::placeholders::error) ); } // Send packet boost::mutex::scoped_lock socket_lock( m_socket_mutex ); m_socket->async_send_to( boost::asio::buffer(_packet->data(), _packet->bytes()), connection->endpoint, boost::bind(&ConnectionHandler::sent, this, _packet, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred) ); }
//============================================================================== // Listen // - Listens for incoming connections asynchronous. // - Runs receive() when data is received. //============================================================================== void ConnectionHandler::listen() { boost::mutex::scoped_lock socket_lock( m_socket_mutex ); m_socket->async_receive_from( buffer(m_receive_buffer), m_receive_endpoint, boost::bind(&ConnectionHandler::receive, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred) ); }
int soo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) { struct socket *so = (struct socket *)fp->f_fglob->fg_data; int retnum = 0; proc_t procp; if (so == NULL || so == (struct socket *)-1) return (0); procp = vfs_context_proc(ctx); #if CONFIG_MACF_SOCKET if (mac_socket_check_select(vfs_context_ucred(ctx), so, which) != 0) return (0); #endif /* CONFIG_MACF_SOCKET */ socket_lock(so, 1); switch (which) { case FREAD: so->so_rcv.sb_flags |= SB_SEL; if (soreadable(so)) { retnum = 1; so->so_rcv.sb_flags &= ~SB_SEL; goto done; } selrecord(procp, &so->so_rcv.sb_sel, wql); break; case FWRITE: so->so_snd.sb_flags |= SB_SEL; if (sowriteable(so)) { retnum = 1; so->so_snd.sb_flags &= ~SB_SEL; goto done; } selrecord(procp, &so->so_snd.sb_sel, wql); break; case 0: so->so_rcv.sb_flags |= SB_SEL; if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { retnum = 1; so->so_rcv.sb_flags &= ~SB_SEL; goto done; } selrecord(procp, &so->so_rcv.sb_sel, wql); break; } done: socket_unlock(so, 1); return (retnum); }
/* You'd certainly better have an iocount on the vnode! */ int fifo_freespace(struct vnode *vp, long *count) { struct socket *rsock; rsock = vp->v_fifoinfo->fi_readsock; socket_lock(rsock, 1); *count = sbspace(&rsock->so_rcv); socket_unlock(rsock, 1); return 0; }
int soo_stat(struct socket *so, void *ub, int isstat64) { int ret; /* warning avoidance ; protected by isstat64 */ struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ struct stat64 *sb64 = (struct stat64 *)0; #if CONFIG_MACF_SOCKET ret = mac_socket_check_stat(kauth_cred_get(), so); if (ret) return (ret); #endif if (isstat64 != 0) { sb64 = (struct stat64 *)ub; bzero((caddr_t)sb64, sizeof (*sb64)); } else { sb = (struct stat *)ub; bzero((caddr_t)sb, sizeof (*sb)); } socket_lock(so, 1); if (isstat64 != 0) { sb64->st_mode = S_IFSOCK; if ((so->so_state & SS_CANTRCVMORE) == 0 || so->so_rcv.sb_cc != 0) sb64->st_mode |= S_IRUSR | S_IRGRP | S_IROTH; if ((so->so_state & SS_CANTSENDMORE) == 0) sb64->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH; sb64->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; sb64->st_uid = kauth_cred_getuid(so->so_cred); sb64->st_gid = kauth_cred_getgid(so->so_cred); } else { sb->st_mode = S_IFSOCK; if ((so->so_state & SS_CANTRCVMORE) == 0 || so->so_rcv.sb_cc != 0) sb->st_mode |= S_IRUSR | S_IRGRP | S_IROTH; if ((so->so_state & SS_CANTSENDMORE) == 0) sb->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH; sb->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; sb->st_uid = kauth_cred_getuid(so->so_cred); sb->st_gid = kauth_cred_getgid(so->so_cred); } ret = (*so->so_proto->pr_usrreqs->pru_sense)(so, ub, isstat64); socket_unlock(so, 1); return (ret); }
/* * Function: dhcp_timeout * Purpose: * Wakeup the process waiting for something on a socket. */ static void dhcp_timeout(void * arg) { struct socket * * timer_arg = (struct socket * *)arg; struct socket * so = *timer_arg; dprintf(("dhcp: timeout\n")); *timer_arg = NULL; socket_lock(so, 1); sowakeup(so, &so->so_rcv); socket_unlock(so, 1); return; }
__private_extern__ uint32_t inpcb_count_opportunistic(unsigned int ifindex, struct inpcbinfo *pcbinfo, u_int32_t flags) { uint32_t opportunistic = 0; struct inpcb *inp; inp_gen_t gencnt; lck_rw_lock_shared(pcbinfo->ipi_lock); gencnt = pcbinfo->ipi_gencnt; for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp != NULL; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD && inp->inp_socket != NULL && so_get_opportunistic(inp->inp_socket) && inp->inp_last_outifp != NULL && ifindex == inp->inp_last_outifp->if_index) { opportunistic++; struct socket *so = inp->inp_socket; if ((flags & INPCB_OPPORTUNISTIC_SETCMD) && (so->so_state & SS_ISCONNECTED)) { socket_lock(so, 1); if (flags & INPCB_OPPORTUNISTIC_THROTTLEON) { so->so_flags |= SOF_SUSPENDED; soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND)); } else { so->so_flags &= ~(SOF_SUSPENDED); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME)); } SOTHROTTLELOG(("throttle[%d]: so 0x%llx " "[%d,%d] %s\n", so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), (so->so_flags & SOF_SUSPENDED) ? "SUSPENDED" : "RESUMED")); socket_unlock(so, 1); } } } lck_rw_done(pcbinfo->ipi_lock); return (opportunistic); }
void in6_pcbdisconnect( struct inpcb *inp) { if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) { /*lock inversion issue, mostly with udp multicast packets */ socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr)); inp->inp_fport = 0; /* clear flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->mtx); if (inp->inp_socket->so_state & SS_NOFDREF) in6_pcbdetach(inp); }
static int soo_drain(struct fileproc *fp, __unused vfs_context_t ctx) { int error = 0; struct socket *so = (struct socket *)fp->f_fglob->fg_data; if (so) { socket_lock(so, 1); so->so_state |= SS_DRAINING; wakeup((caddr_t)&so->so_timeo); sorwakeup(so); sowwakeup(so); socket_unlock(so, 1); } return (error); }
/* * Wakeup processes waiting on a socket buffer. * Do asynchronous notification via SIGIO * if the socket has the SS_ASYNC flag set. */ void sowakeup(struct socket *so, struct sockbuf *sb) { sb->sb_flags &= ~SB_SEL; selwakeup(&sb->sb_sel); if (sb->sb_flags & SB_WAIT) { sb->sb_flags &= ~SB_WAIT; wakeup((caddr_t)&sb->sb_cc); } #if 0 // Intentially commented if (so->so_state & SS_ASYNC) { if (so->so_pgid < 0) gsignal(-so->so_pgid, SIGIO); else if (so->so_pgid > 0) proc_signal(so->so_pgid, SIGIO); } if (sb->sb_flags & SB_KNOTE) { KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED); } if (sb->sb_flags & SB_UPCALL) { void (*so_upcall)(struct socket *, caddr_t, int); caddr_t so_upcallarg; so_upcall = so->so_upcall; so_upcallarg = so->so_upcallarg; /* Let close know that we're about to do an upcall */ so->so_flags |= SOF_UPCALLINUSE; socket_unlock(so, 0); (*so_upcall)(so, so_upcallarg, M_DONTWAIT); socket_lock(so, 0); so->so_flags &= ~SOF_UPCALLINUSE; /* Tell close that it's safe to proceed */ if (so->so_flags & SOF_CLOSEWAIT) wakeup((caddr_t)&so->so_upcall); } #endif }
/* * Detach the raw connection block and discard * socket resources. */ void raw_detach(struct rawcb *rp) { struct socket *so = rp->rcb_socket; so->so_pcb = 0; so->so_flags |= SOF_PCBCLEARING; sofree(so); if (!lck_mtx_try_lock(raw_mtx)) { socket_unlock(so, 0); lck_mtx_lock(raw_mtx); socket_lock(so, 0); } LIST_REMOVE(rp, list); lck_mtx_unlock(raw_mtx); #ifdef notdef if (rp->rcb_laddr) m_freem(dtom(rp->rcb_laddr)); rp->rcb_laddr = 0; #endif rp->rcb_socket = NULL; FREE((caddr_t)(rp), M_PCB); }
int server_mode_add_socket_with_lock(struct wldbg *wldbg, const char *name) { int sock; char *path; path = get_socket_path(name); if (!path) return -1; wldbg->server_mode.wldbg_socket_path = path; sock = server_mode_add_socket(wldbg, path); if (sock < 0) return -1; dbg("Taking lock on socket: %s\n", path); if (socket_lock(wldbg, path) < 0) { fprintf(stderr, "Failed locking socket\n"); close(sock); return -1; } return sock; }
int in6_pcbbind( struct inpcb *inp, struct sockaddr *nam, struct proc *p) { struct socket *so = inp->inp_socket; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; u_short lport = 0; int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); if (!in6_ifaddrs) /* XXX broken! */ return (EADDRNOTAVAIL); if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) return(EINVAL); if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) wild = 1; socket_unlock(so, 0); /* keep reference */ lck_rw_lock_exclusive(pcbinfo->mtx); if (nam) { sin6 = (struct sockaddr_in6 *)nam; if (nam->sa_len != sizeof(*sin6)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EINVAL); } /* * family check. */ if (nam->sa_family != AF_INET6) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EAFNOSUPPORT); } /* KAME hack: embed scopeid */ if (in6_embedscope(&sin6->sin6_addr, sin6, inp, NULL) != 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return EINVAL; } /* this must be cleared for ifa_ifwithaddr() */ sin6->sin6_scope_id = 0; lport = sin6->sin6_port; if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { /* * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; * allow compepte duplication of binding if * SO_REUSEPORT is set, or if SO_REUSEADDR is set * and a multicast address is bound on both * new and duplicated sockets. */ if (so->so_options & SO_REUSEADDR) reuseport = SO_REUSEADDR|SO_REUSEPORT; } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct ifaddr *ia = NULL; sin6->sin6_port = 0; /* yech... */ if ((ia = ifa_ifwithaddr((struct sockaddr *)sin6)) == 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRNOTAVAIL); } /* * XXX: bind to an anycast address might accidentally * cause sending a packet with anycast source address. * We should allow to bind to a deprecated address, since * the application dare to use it. */ if (ia && ((struct in6_ifaddr *)ia)->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) { ifafree(ia); lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRNOTAVAIL); } ifafree(ia); ia = NULL; } if (lport) { struct inpcb *t; /* GROSS */ if (ntohs(lport) < IPV6PORT_RESERVED && p && ((so->so_state & SS_PRIV) == 0)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EACCES); } if (so->so_uid && !IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { t = in6_pcblookup_local_and_cleanup(pcbinfo, &sin6->sin6_addr, lport, INPLOOKUP_WILDCARD); if (t && (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) || (t->inp_socket->so_options & SO_REUSEPORT) == 0) && (so->so_uid != t->inp_socket->so_uid) && ((t->inp_socket->so_flags & SOF_REUSESHAREUID) == 0)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local_and_cleanup(pcbinfo, sin.sin_addr, lport, INPLOOKUP_WILDCARD); if (t && (t->inp_socket->so_options & SO_REUSEPORT) == 0 && (so->so_uid != t->inp_socket->so_uid) && (ntohl(t->inp_laddr.s_addr) != INADDR_ANY || INP_SOCKAF(so) == INP_SOCKAF(t->inp_socket))) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } } } t = in6_pcblookup_local_and_cleanup(pcbinfo, &sin6->sin6_addr, lport, wild); if (t && (reuseport & t->inp_socket->so_options) == 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRINUSE); } if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local_and_cleanup(pcbinfo, sin.sin_addr, lport, wild); if (t && (reuseport & t->inp_socket->so_options) == 0 && (ntohl(t->inp_laddr.s_addr) != INADDR_ANY || INP_SOCKAF(so) == INP_SOCKAF(t->inp_socket))) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } } } inp->in6p_laddr = sin6->sin6_addr; } socket_lock(so, 0); if (lport == 0) { int e; if ((e = in6_pcbsetport(&inp->in6p_laddr, inp, p, 1)) != 0) { lck_rw_done(pcbinfo->mtx); return(e); } } else { inp->inp_lport = lport; if (in_pcbinshash(inp, 1) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; lck_rw_done(pcbinfo->mtx); return (EAGAIN); } } lck_rw_done(pcbinfo->mtx); sflt_notify(so, sock_evt_bound, NULL); return(0); }
errno_t sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags, sock_upcall callback, void *cookie, socket_t *new_sock) { struct sockaddr *sa; struct socket *new_so; lck_mtx_t *mutex_held; int dosocklock; errno_t error = 0; if (sock == NULL || new_sock == NULL) return (EINVAL); socket_lock(sock, 1); if ((sock->so_options & SO_ACCEPTCONN) == 0) { socket_unlock(sock, 1); return (EINVAL); } if ((flags & ~(MSG_DONTWAIT)) != 0) { socket_unlock(sock, 1); return (ENOTSUP); } if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) && sock->so_comp.tqh_first == NULL) { socket_unlock(sock, 1); return (EWOULDBLOCK); } if (sock->so_proto->pr_getlock != NULL) { mutex_held = (*sock->so_proto->pr_getlock)(sock, 0); dosocklock = 1; } else { mutex_held = sock->so_proto->pr_domain->dom_mtx; dosocklock = 0; } while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) { if (sock->so_state & SS_CANTRCVMORE) { sock->so_error = ECONNABORTED; break; } error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_accept", NULL); if (error != 0) { socket_unlock(sock, 1); return (error); } } if (sock->so_error != 0) { error = sock->so_error; sock->so_error = 0; socket_unlock(sock, 1); return (error); } new_so = TAILQ_FIRST(&sock->so_comp); TAILQ_REMOVE(&sock->so_comp, new_so, so_list); sock->so_qlen--; /* * Pass the pre-accepted socket to any interested socket filter(s). * Upon failure, the socket would have been closed by the callee. */ if (new_so->so_filt != NULL) { /* * Temporarily drop the listening socket's lock before we * hand off control over to the socket filter(s), but keep * a reference so that it won't go away. We'll grab it * again once we're done with the filter(s). */ socket_unlock(sock, 0); if ((error = soacceptfilter(new_so)) != 0) { /* Drop reference on listening socket */ sodereference(sock); return (error); } socket_lock(sock, 0); } if (dosocklock) { lck_mtx_assert(new_so->so_proto->pr_getlock(new_so, 0), LCK_MTX_ASSERT_NOTOWNED); socket_lock(new_so, 1); } new_so->so_state &= ~SS_COMP; new_so->so_head = NULL; (void) soacceptlock(new_so, &sa, 0); socket_unlock(sock, 1); /* release the head */ /* see comments in sock_setupcall() */ if (callback != NULL) { sock_setupcalls_common(new_so, callback, cookie, NULL, NULL); } if (sa != NULL && from != NULL) { if (fromlen > sa->sa_len) fromlen = sa->sa_len; memcpy(from, sa, fromlen); } if (sa != NULL) FREE(sa, M_SONAME); /* * If the socket has been marked as inactive by sosetdefunct(), * disallow further operations on it. */ if (new_so->so_flags & SOF_DEFUNCT) { (void) sodefunct(current_proc(), new_so, SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL); } *new_sock = new_so; if (dosocklock) socket_unlock(new_so, 1); return (error); }
/* ARGSUSED */ int fifo_open(struct vnop_open_args *ap) { struct vnode *vp = ap->a_vp; struct fifoinfo *fip; struct socket *rso, *wso; int error; vnode_lock(vp); retry: fip = vp->v_fifoinfo; if (fip == (struct fifoinfo *)0) panic("fifo_open with no fifoinfo"); if ((fip->fi_flags & FIFO_CREATED) == 0) { if (fip->fi_flags & FIFO_INCREATE) { fip->fi_flags |= FIFO_CREATEWAIT; error = msleep(&fip->fi_flags, &vp->v_lock, PRIBIO | PCATCH, "fifocreatewait", NULL); if (error) { vnode_unlock(vp); return(error); } goto retry; } else { fip->fi_flags |= FIFO_INCREATE; vnode_unlock(vp); if ( (error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0)) ) { goto bad1; } if ( (error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0)) ) { (void)soclose(rso); goto bad1; } if ( (error = soconnect2(wso, rso)) ) { (void)soclose(wso); (void)soclose(rso); goto bad1; } fip->fi_readers = fip->fi_writers = 0; /* Lock ordering between wso and rso does not matter here * because they are just created and no one has a reference to them */ socket_lock(wso, 1); wso->so_state |= SS_CANTRCVMORE; wso->so_snd.sb_lowat = PIPE_BUF; socket_unlock(wso, 1); socket_lock(rso, 1); rso->so_state |= SS_CANTSENDMORE; socket_unlock(rso, 1); vnode_lock(vp); fip->fi_readsock = rso; fip->fi_writesock = wso; fip->fi_flags |= FIFO_CREATED; fip->fi_flags &= ~FIFO_INCREATE; if ((fip->fi_flags & FIFO_CREATEWAIT)) { fip->fi_flags &= ~FIFO_CREATEWAIT; wakeup(&fip->fi_flags); } /* vnode lock is held to process further */ } } /* vnode is locked at this point */ /* fifo in created already */ if (ap->a_mode & FREAD) { fip->fi_readers++; if (fip->fi_readers == 1) { socket_lock(fip->fi_writesock, 1); fip->fi_writesock->so_state &= ~SS_CANTSENDMORE; socket_unlock(fip->fi_writesock, 1); if (fip->fi_writers > 0) wakeup((caddr_t)&fip->fi_writers); } } if (ap->a_mode & FWRITE) { fip->fi_writers++; if (fip->fi_writers == 1) { socket_lock(fip->fi_readsock, 1); fip->fi_readsock->so_state &= ~SS_CANTRCVMORE; socket_unlock(fip->fi_readsock, 1); if (fip->fi_readers > 0) wakeup((caddr_t)&fip->fi_readers); } } if ((ap->a_mode & FREAD) && (ap->a_mode & O_NONBLOCK) == 0) { if (fip->fi_writers == 0) { error = msleep((caddr_t)&fip->fi_readers, &vp->v_lock, PCATCH | PSOCK, "fifoor", NULL); if (error) goto bad; if (fip->fi_readers == 1) { if (fip->fi_writers > 0) wakeup((caddr_t)&fip->fi_writers); } } } if (ap->a_mode & FWRITE) { if (ap->a_mode & O_NONBLOCK) { if (fip->fi_readers == 0) { error = ENXIO; goto bad; } } else { if (fip->fi_readers == 0) { error = msleep((caddr_t)&fip->fi_writers,&vp->v_lock, PCATCH | PSOCK, "fifoow", NULL); if (error) goto bad; if (fip->fi_writers == 1) { if (fip->fi_readers > 0) wakeup((caddr_t)&fip->fi_readers); } } } } vnode_unlock(vp); return (0); bad: fifo_close_internal(vp, ap->a_mode, ap->a_context, 1); vnode_unlock(vp); return (error); bad1: vnode_lock(vp); fip->fi_flags &= ~FIFO_INCREATE; if ((fip->fi_flags & FIFO_CREATEWAIT)) { fip->fi_flags &= ~FIFO_CREATEWAIT; wakeup(&fip->fi_flags); } vnode_unlock(vp); return (error); }
int fifo_close_internal(vnode_t vp, int fflag, __unused vfs_context_t context, int locked) { struct fifoinfo *fip = vp->v_fifoinfo; int error1, error2; struct socket *rso; struct socket *wso; if (!locked) vnode_lock(vp); if ((fip->fi_flags & FIFO_CREATED) == 0) { if (!locked) vnode_unlock(vp); return(0); } if (fflag & FREAD) { fip->fi_readers--; if (fip->fi_readers == 0){ socket_lock(fip->fi_writesock, 1); socantsendmore(fip->fi_writesock); socket_unlock(fip->fi_writesock, 1); } } if (fflag & FWRITE) { fip->fi_writers--; if (fip->fi_writers == 0) { socket_lock(fip->fi_readsock, 1); socantrcvmore(fip->fi_readsock); socket_unlock(fip->fi_readsock, 1); } } #if 0 if (vnode_isinuse_locked(vp, 0, 1)) { if (!locked) vnode_unlock(vp); return (0); } #endif if (fip->fi_writers || fip->fi_readers) { if (!locked) vnode_unlock(vp); return (0); } wso = fip->fi_writesock; rso = fip->fi_readsock; fip->fi_readsock = NULL; fip->fi_writesock = NULL; fip->fi_flags &= ~FIFO_CREATED; if (!locked) vnode_unlock(vp); error1 = soclose(rso); error2 = soclose(wso); if (error1) return (error1); return (error2); }
static int dhcp_get_ack(struct dhcp_context * context, int wait_ticks) { int error = 0; const struct in_addr * ip; int len; int n; struct dhcp * reply; struct in_addr server_id; struct socket * timer_arg; timer_arg = context->so; reply = dhcp_context_reply(context); timeout((timeout_fcn_t)dhcp_timeout, &timer_arg, wait_ticks); while (1) { error = receive_packet(context->so, context->reply, sizeof(context->reply), &n); if (error == 0) { dhcp_msgtype_t msg; dhcpol_t options; dprintf(("\ndhcp: received packet length %d\n", n)); if (n < (int)sizeof(struct dhcp)) { dprintf(("dhcp: packet is too short %d < %d\n", n, (int)sizeof(struct dhcp))); continue; } if (ntohl(reply->dp_xid) != context->xid || bcmp(reply->dp_chaddr, link_address(context->dl_p), link_address_length(context->dl_p)) != 0) { /* not for us */ continue; } (void)dhcpol_parse_packet(&options, reply, n); server_id.s_addr = 0; ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_server_identifier_e, &len, NULL); if (ip != NULL && len >= (int)sizeof(*ip)) { server_id = *ip; } msg = get_dhcp_msgtype(&options); if (msg == dhcp_msgtype_nak_e && server_id.s_addr == context->server_id.s_addr) { /* server NAK'd us, start over */ dhcpol_free(&options); error = EPROTO; untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); break; } if (msg != dhcp_msgtype_ack_e || reply->dp_yiaddr.s_addr == 0 || reply->dp_yiaddr.s_addr == INADDR_BROADCAST) { /* ignore the packet */ goto next_packet; } printf("dhcp: received ACK: server " IP_FORMAT " IP address " IP_FORMAT "\n", IP_LIST(&server_id), IP_LIST(&reply->dp_yiaddr)); context->iaddr = reply->dp_yiaddr; ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_subnet_mask_e, &len, NULL); if (ip != NULL && len >= (int)sizeof(*ip)) { context->netmask = *ip; } ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_router_e, &len, NULL); if (ip != NULL && len >= (int)sizeof(*ip)) { context->router = *ip; } dhcpol_free(&options); untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); break; next_packet: dhcpol_free(&options); } else if ((error != EWOULDBLOCK)) { /* if some other error occurred, we're done */ untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); break; } else if (timer_arg == NULL) { /* timed out */ break; } else { /* wait for a wait to arrive, or a timeout to occur */ socket_lock(context->so, 1); error = sbwait(&context->so->so_rcv); socket_unlock(context->so, 1); } } return (error); }
static int dhcp_get_offer(struct dhcp_context * context, int wait_ticks) { int error = 0; int gather_count = 0; const struct in_addr * ip; int last_rating = 0; int len; int n; int rating; struct dhcp * reply; struct in_addr server_id; struct socket * timer_arg; timer_arg = context->so; reply = dhcp_context_reply(context); timeout((timeout_fcn_t)dhcp_timeout, &timer_arg, wait_ticks); while (1) { error = receive_packet(context->so, context->reply, sizeof(context->reply), &n); if (error == 0) { dhcpol_t options; dprintf(("\ndhcp: received packet length %d\n", n)); if (n < (int)sizeof(struct dhcp)) { dprintf(("dhcp: packet is too short %d < %d\n", n, (int)sizeof(struct dhcp))); continue; } if (ntohl(reply->dp_xid) != context->xid || reply->dp_yiaddr.s_addr == 0 || reply->dp_yiaddr.s_addr == INADDR_BROADCAST || bcmp(reply->dp_chaddr, link_address(context->dl_p), link_address_length(context->dl_p)) != 0) { /* not for us */ continue; } (void)dhcpol_parse_packet(&options, reply, n); if (get_dhcp_msgtype(&options) != dhcp_msgtype_offer_e) { /* not an offer */ goto next_packet; } ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_server_identifier_e, &len, NULL); if (ip == NULL || len < (int)sizeof(*ip)) { /* missing/invalid server identifier */ goto next_packet; } printf("dhcp: received OFFER: server " IP_FORMAT " IP address " IP_FORMAT "\n", IP_LIST(ip), IP_LIST(&reply->dp_yiaddr)); server_id = *ip; rating = rate_packet(&options); if (rating > last_rating) { context->iaddr = reply->dp_yiaddr; ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_subnet_mask_e, &len, NULL); if (ip != NULL && len >= (int)sizeof(*ip)) { context->netmask = *ip; } ip = (const struct in_addr *) dhcpol_find(&options, dhcptag_router_e, &len, NULL); if (ip != NULL && len >= (int)sizeof(*ip)) { context->router = *ip; } context->server_id = server_id; } if (rating >= GOOD_RATING) { dhcpol_free(&options); /* packet is good enough */ untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); break; } if (gather_count == 0) { untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); timer_arg = context->so; timeout((timeout_fcn_t)dhcp_timeout, &timer_arg, hz * GATHER_TIME_SECS); } gather_count = 1; next_packet: dhcpol_free(&options); } else if ((error != EWOULDBLOCK)) { untimeout((timeout_fcn_t)dhcp_timeout, &timer_arg); break; } else if (timer_arg == NULL) { /* timed out */ if (gather_count != 0) { dprintf(("dhcp: gathering time has expired\n")); error = 0; } break; } else { socket_lock(context->so, 1); error = sbwait(&context->so->so_rcv); socket_unlock(context->so, 1); } } return (error); }
static struct dhcp_context * dhcp_context_create(struct ifnet * ifp, int max_try, struct proc * procp, int * error_p) { struct dhcp_context * context = NULL; struct sockaddr_dl * dl_p; struct in_addr lo_addr; struct in_addr lo_mask; int error; struct sockaddr_in sin; /* get the hardware address from the interface */ dl_p = link_from_ifnet(ifp); if (dl_p == NULL) { printf("dhcp: can't get link address\n"); error = ENXIO; goto failed; } printf("dhcp: h/w addr "); link_print(dl_p); if (dl_p->sdl_type != IFT_ETHER) { printf("dhcp: hardware type %d not supported\n", dl_p->sdl_type); error = ENXIO; goto failed; } context = (struct dhcp_context *)kalloc(sizeof(*context)); if (context == NULL) { printf("dhcp: failed to allocate context\n"); error = ENOMEM; goto failed; } bzero(context, sizeof(*context)); /* get a socket */ error = socreate(AF_INET, &context->so, SOCK_DGRAM, 0); if (error != 0) { printf("dhcp: socreate failed %d\n", error); goto failed; } /* assign 127.0.0.1 to lo0 so that the bind will succeed */ lo_addr.s_addr = htonl(INADDR_LOOPBACK); lo_mask.s_addr = htonl(IN_CLASSA_NET); error = inet_aifaddr(context->so, "lo0", &lo_addr, &lo_mask, NULL); if (error != 0) { printf("dhcp: assigning loopback address failed %d\n", error); } /* enable reception of DHCP packets before an address is assigned */ snprintf(context->ifr.ifr_name, sizeof(context->ifr.ifr_name), "%s%d", ifp->if_name, ifp->if_unit); context->ifr.ifr_intval = 1; error = ifioctl(context->so, SIOCAUTOADDR, (caddr_t)&context->ifr, procp); if (error) { printf("dhcp: SIOCAUTOADDR failed: %d\n", error); goto failed; } dprintf(("dhcp: SIOCAUTOADDR done\n")); error = ifioctl(context->so, SIOCPROTOATTACH, (caddr_t)&context->ifr, procp); if (error) { printf("dhcp: SIOCPROTOATTACH failed: %d\n", error); goto failed; } dprintf(("dhcp: SIOCPROTOATTACH done\n")); /* bind the socket */ sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; sin.sin_port = htons(IPPORT_BOOTPC); sin.sin_addr.s_addr = INADDR_ANY; error = sobind(context->so, (struct sockaddr *)&sin); if (error) { printf("dhcp: sobind failed, %d\n", error); goto failed; } /* make it non-blocking I/O */ socket_lock(context->so, 1); context->so->so_state |= SS_NBIO; socket_unlock(context->so, 1); /* save passed-in information */ context->max_try = max_try; context->dl_p = dl_p; context->ifp = ifp; /* get a random transaction id */ context->xid = random(); return (context); failed: dhcp_context_free(context, procp); *error_p = error; return (NULL); }
__private_extern__ int soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) { int error = 0; int int_arg; socket_lock(so, 1); /* call the socket filter's ioctl handler anything but ours */ if (IOCGROUP(cmd) != 'i' && IOCGROUP(cmd) != 'r') { switch (cmd) { case SIOCGASSOCIDS32: case SIOCGASSOCIDS64: case SIOCGCONNIDS32: case SIOCGCONNIDS64: case SIOCGCONNINFO32: case SIOCGCONNINFO64: case SIOCSCONNORDER: case SIOCGCONNORDER: /* don't pass to filter */ break; default: error = sflt_ioctl(so, cmd, data); if (error != 0) goto out; break; } } switch (cmd) { case FIONBIO: /* int */ bcopy(data, &int_arg, sizeof (int_arg)); if (int_arg) so->so_state |= SS_NBIO; else so->so_state &= ~SS_NBIO; goto out; case FIOASYNC: /* int */ bcopy(data, &int_arg, sizeof (int_arg)); if (int_arg) { so->so_state |= SS_ASYNC; so->so_rcv.sb_flags |= SB_ASYNC; so->so_snd.sb_flags |= SB_ASYNC; } else { so->so_state &= ~SS_ASYNC; so->so_rcv.sb_flags &= ~SB_ASYNC; so->so_snd.sb_flags &= ~SB_ASYNC; } goto out; case FIONREAD: /* int */ bcopy(&so->so_rcv.sb_cc, data, sizeof (u_int32_t)); goto out; case SIOCSPGRP: /* int */ bcopy(data, &so->so_pgid, sizeof (pid_t)); goto out; case SIOCGPGRP: /* int */ bcopy(&so->so_pgid, data, sizeof (pid_t)); goto out; case SIOCATMARK: /* int */ int_arg = (so->so_state & SS_RCVATMARK) != 0; bcopy(&int_arg, data, sizeof (int_arg)); goto out; case SIOCSETOT: /* int; deprecated */ error = EOPNOTSUPP; goto out; case SIOCGASSOCIDS32: /* so_aidreq32 */ case SIOCGASSOCIDS64: /* so_aidreq64 */ case SIOCGCONNIDS32: /* so_cidreq32 */ case SIOCGCONNIDS64: /* so_cidreq64 */ case SIOCGCONNINFO32: /* so_cinforeq32 */ case SIOCGCONNINFO64: /* so_cinforeq64 */ case SIOCSCONNORDER: /* so_cordreq */ case SIOCGCONNORDER: /* so_cordreq */ error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, NULL, p); goto out; } /* * Interface/routing/protocol specific ioctls: * interface and routing ioctls should have a * different entry since a socket's unnecessary */ if (IOCGROUP(cmd) == 'i') { error = ifioctllocked(so, cmd, data, p); } else { if (IOCGROUP(cmd) == 'r') error = rtioctl(cmd, data, p); else error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, NULL, p); } out: socket_unlock(so, 1); if (error == EJUSTRETURN) error = 0; return (error); }
__private_extern__ int soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) { struct sockopt sopt; int error = 0; int dropsockref = -1; socket_lock(so, 1); sopt.sopt_level = cmd; sopt.sopt_name = (int)data; sopt.sopt_p = p; /* Call the socket filter's ioctl handler for most ioctls */ if (IOCGROUP(cmd) != 'i' && IOCGROUP(cmd) != 'r') { int filtered = 0; struct socket_filter_entry *filter; for (filter = so->so_filt; filter && error == 0; filter = filter->sfe_next_onsocket) { if (filter->sfe_filter->sf_filter.sf_ioctl) { if (filtered == 0) { sflt_use(so); socket_unlock(so, 0); filtered = 1; } error = filter->sfe_filter->sf_filter. sf_ioctl(filter->sfe_cookie, so, cmd, data); } } if (filtered) { socket_lock(so, 0); sflt_unuse(so); } if (error != 0) goto out; } switch (cmd) { case FIONBIO: if (*(int *)data) so->so_state |= SS_NBIO; else so->so_state &= ~SS_NBIO; goto out; case FIOASYNC: if (*(int *)data) { so->so_state |= SS_ASYNC; so->so_rcv.sb_flags |= SB_ASYNC; so->so_snd.sb_flags |= SB_ASYNC; } else { so->so_state &= ~SS_ASYNC; so->so_rcv.sb_flags &= ~SB_ASYNC; so->so_snd.sb_flags &= ~SB_ASYNC; } goto out; case FIONREAD: *(int *)data = so->so_rcv.sb_cc; goto out; case SIOCSPGRP: so->so_pgid = *(int *)data; goto out; case SIOCGPGRP: *(int *)data = so->so_pgid; goto out; case SIOCATMARK: *(int *)data = (so->so_state&SS_RCVATMARK) != 0; goto out; case SIOCSETOT: { /* * Set socket level options here and then call protocol * specific routine. */ struct socket *cloned_so = NULL; int cloned_fd = *(int *)data; /* let's make sure it's either -1 or a valid file descriptor */ if (cloned_fd != -1) { error = file_socket(cloned_fd, &cloned_so); if (error) { goto out; } dropsockref = cloned_fd; } /* Always set socket non-blocking for OT */ so->so_state |= SS_NBIO; so->so_options |= SO_DONTTRUNC | SO_WANTMORE; so->so_flags |= SOF_NOSIGPIPE; if (cloned_so && so != cloned_so) { /* Flags options */ so->so_options |= cloned_so->so_options & ~SO_ACCEPTCONN; /* SO_LINGER */ if (so->so_options & SO_LINGER) so->so_linger = cloned_so->so_linger; /* SO_SNDBUF, SO_RCVBUF */ if (cloned_so->so_snd.sb_hiwat > 0) { if (sbreserve(&so->so_snd, cloned_so->so_snd.sb_hiwat) == 0) { error = ENOBUFS; goto out; } } if (cloned_so->so_rcv.sb_hiwat > 0) { if (sbreserve(&so->so_rcv, cloned_so->so_rcv.sb_hiwat) == 0) { error = ENOBUFS; goto out; } } /* SO_SNDLOWAT, SO_RCVLOWAT */ so->so_snd.sb_lowat = (cloned_so->so_snd.sb_lowat > so->so_snd.sb_hiwat) ? so->so_snd.sb_hiwat : cloned_so->so_snd.sb_lowat; so->so_rcv.sb_lowat = (cloned_so->so_rcv.sb_lowat > so->so_rcv.sb_hiwat) ? so->so_rcv.sb_hiwat : cloned_so->so_rcv.sb_lowat; /* SO_SNDTIMEO, SO_RCVTIMEO */ so->so_snd.sb_timeo = cloned_so->so_snd.sb_timeo; so->so_rcv.sb_timeo = cloned_so->so_rcv.sb_timeo; } error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, p); /* Just ignore protocols that do not understand it */ if (error == EOPNOTSUPP) error = 0; goto out; } } /* * Interface/routing/protocol specific ioctls: * interface and routing ioctls should have a * different entry since a socket's unnecessary */ if (IOCGROUP(cmd) == 'i') { error = ifioctllocked(so, cmd, data, p); } else { if (IOCGROUP(cmd) == 'r') error = rtioctl(cmd, data, p); else error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, p); } out: if (dropsockref != -1) file_drop(dropsockref); socket_unlock(so, 1); if (error == EJUSTRETURN) error = 0; return (error); }
ni_status checkserves(unsigned long masteraddr, ni_name mastertag, unsigned long myaddr, ni_name mytag) { struct sockaddr_in mastersin; struct in_addr myinaddr; char myserves[MAXPATHLEN + 16]; ni_index where; int status; int sock; CLIENT *cl; struct timeval tv; nibind_getregister_res res; ni_id_res id_res; ni_lookup_res lu_res; ni_lookup_args childdir; ni_namelist_res prop_res; ni_prop_args pa; mastersin.sin_addr.s_addr = masteraddr; mastersin.sin_port = 0; mastersin.sin_family = AF_INET; myinaddr.s_addr = myaddr; /* connect to master hosts's nibindd */ sock = socket_connect(&mastersin, NIBIND_PROG, NIBIND_VERS); if (sock < 0) { system_log(LOG_WARNING, "sanitycheck can't connect to %s/%s - %m", inet_ntoa(mastersin.sin_addr), mastertag); return NI_FAILED; } FD_SET(sock, &clnt_fdset); /* protect client socket */ cl = clnttcp_create(&mastersin, NIBIND_PROG, NIBIND_VERS, &sock, 0, 0); if (cl == NULL) { socket_close(sock); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ return NI_FAILED; } tv.tv_sec = READALL_TIMEOUT; tv.tv_usec = 0; /* get the ports for master's tag */ bzero((char *)&res, sizeof(res)); if (clnt_call(cl, NIBIND_GETREGISTER, xdr_ni_name, &mastertag, xdr_nibind_getregister_res, &res, tv) != RPC_SUCCESS) { /* call failed */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ return NI_FAILED; } socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ if (res.status != NI_OK) { /* no server for the master's tag */ system_log(LOG_ERR, "tag %s: master's tag %s is not served at master's addresss", mytag, mastertag); return res.status; } /* connect to the master */ mastersin.sin_port = htons(res.nibind_getregister_res_u.addrs.tcp_port); xdr_free(xdr_nibind_getregister_res, (void *)&res); sock = socket_connect(&mastersin, NI_PROG, NI_VERS); if (sock < 0) { system_log(LOG_WARNING, "sanitycheck can't connect to " "%s/%s - %m", inet_ntoa(mastersin.sin_addr), mastertag); return NI_FAILED; } FD_SET(sock, &clnt_fdset); /* protect client socket */ cl = clnttcp_create(&mastersin, NI_PROG, NI_VERS, &sock, 0, 0); if (cl == NULL) { /* can't connect to master */ socket_close(sock); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ return NI_FAILED; } FD_SET(sock, &clnt_fdset); /* protect client socket */ /* get root directory */ bzero((char *)&id_res, sizeof(id_res)); status = clnt_call(cl, _NI_ROOT, xdr_void, NULL, xdr_ni_id_res, &id_res, tv); if (status != NI_OK) { /* can't get root! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's root directory", mytag); return status; } if (id_res.status != NI_OK) { /* can't get root! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no root directory", mytag); return status; } childdir.key = malloc(16); childdir.value = malloc(16); /* get machines subdirectory */ childdir.id = id_res.ni_id_res_u.id; xdr_free(xdr_ni_id_res, (void *)&id_res); strcpy(childdir.key,"name"); strcpy(childdir.value,"machines"); bzero((char *)&lu_res, sizeof(lu_res)); status = clnt_call(cl, _NI_LOOKUP, xdr_ni_lookup_args, &childdir, xdr_ni_lookup_res, &lu_res, tv); if (status != NI_OK) { /* can't get /machines! */ free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's /machines directory", mytag); return status; } if (lu_res.status != NI_OK) { /* no /machines! */ free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's /machines directory", mytag); return lu_res.status; } if (lu_res.ni_lookup_res_u.stuff.idlist.ni_idlist_len == 0) { /* no /machines! */ xdr_free(xdr_ni_lookup_res, (void *)&lu_res); free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no /machines directory", mytag); return NI_NODIR; } /* get my subdirectory */ childdir.id.nii_object = lu_res.ni_lookup_res_u.stuff.idlist.ni_idlist_val[0]; xdr_free(xdr_ni_lookup_res, (void *)&lu_res); strcpy(childdir.key,"ip_address"); strcpy(childdir.value,(char *)inet_ntoa(myinaddr)); bzero((char *)&lu_res, sizeof(lu_res)); status = clnt_call(cl, _NI_LOOKUP, xdr_ni_lookup_args, &childdir, xdr_ni_lookup_res, &lu_res, tv); if (status != NI_OK) { /* can't get /machines/ip_address=<myaddr>! */ free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's /machines/ip_address=%s directory", mytag,(char *)inet_ntoa(myinaddr)); return status; } if (lu_res.status != NI_OK) { /* no /machines! */ free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's /machines/ip_address=%s directory", mytag,(char *)inet_ntoa(myinaddr)); return lu_res.status; } if (lu_res.ni_lookup_res_u.stuff.idlist.ni_idlist_len == 0) { /* can't get /machines/ip_address=<myaddr>! */ xdr_free(xdr_ni_lookup_res, (void *)&lu_res); free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no /machines/ip_address=%s directory", mytag,(char *)inet_ntoa(myinaddr)); return NI_NODIR; } /* list properties */ prop_res.ni_namelist_res_u.stuff.values.ni_namelist_val = NULL; childdir.id.nii_object = lu_res.ni_lookup_res_u.stuff.idlist.ni_idlist_val[0]; xdr_free(xdr_ni_lookup_res, (void *)&lu_res); bzero((char *)&prop_res, sizeof(prop_res)); status = clnt_call(cl, _NI_LISTPROPS, xdr_ni_id, &childdir.id, xdr_ni_namelist_res, &prop_res, tv); if (status != NI_OK) { /* can't get proplist! */ free(childdir.key); free(childdir.value); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's property list for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return status; } free(childdir.key); free(childdir.value); if (prop_res.status != NI_OK) { /* no /machines! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's property list for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return prop_res.status; } if (prop_res.ni_namelist_res_u.stuff.values.ni_namelist_len == 0) { /* can't get proplist! */ xdr_free(xdr_ni_namelist_res, (void *)&prop_res); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no property list for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return NI_NOPROP; } /* find "serves" property */ where = ni_namelist_match(prop_res.ni_namelist_res_u.stuff.values, "serves"); xdr_free(xdr_ni_namelist_res, (void *)&prop_res); if (where == NI_INDEX_NULL) { /* no serves property! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no serves property for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return NI_NOPROP; } /* fetch serves property */ pa.id = childdir.id; pa.prop_index = where; bzero((char *)&prop_res, sizeof(prop_res)); status = clnt_call(cl, _NI_READPROP, xdr_ni_prop_args, &pa, xdr_ni_namelist_res, &prop_res, tv); if (status != NI_OK) { /* can't get proplist! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's serves property for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return status; } if (prop_res.status != NI_OK) { /* can't get proplist! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: can't get master's serves property for /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return prop_res.status; } if (prop_res.ni_namelist_res_u.stuff.values.ni_namelist_len == 0) { /* no values in serves property! */ xdr_free(xdr_ni_namelist_res, (void *)&prop_res); socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no values for serves property in /machines/ip_address=%s", mytag,(char *)inet_ntoa(myinaddr)); return NI_NOPROP; } /* find my "serves" property */ sprintf(myserves, "./%s", mytag); where = ni_namelist_match(prop_res.ni_namelist_res_u.stuff.values, myserves); xdr_free(xdr_ni_namelist_res, (void *)&prop_res); if (where == NI_INDEX_NULL) { /* no serves property! */ socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ system_log(LOG_ERR, "tag %s: master has no serves ./%s property in /machines/ip_address=%s", mytag,mytag,(char *)inet_ntoa(myinaddr)); return NI_NONAME; } socket_lock(); clnt_destroy(cl); (void)close(sock); socket_unlock(); FD_CLR(sock, &clnt_fdset); /* unprotect client socket */ return NI_OK; }
errno_t fill_socketinfo(struct socket *so, struct socket_info *si) { errno_t error = 0; int domain; short type; short protocol; socket_lock(so, 0); si->soi_kind = SOCKINFO_GENERIC; fill_common_sockinfo(so, si); if (so->so_pcb == NULL || so->so_proto == 0 || so->so_proto->pr_domain == NULL) goto out; /* * The kind of socket is determined by the triplet * {domain, type, protocol} */ domain = SOCK_DOM(so); type = SOCK_TYPE(so); protocol = SOCK_PROTO(so); switch (domain) { case PF_INET: case PF_INET6: { struct in_sockinfo *insi = &si->soi_proto.pri_in; struct inpcb *inp = (struct inpcb *)so->so_pcb; si->soi_kind = SOCKINFO_IN; insi->insi_fport = inp->inp_fport; insi->insi_lport = inp->inp_lport; insi->insi_gencnt = inp->inp_gencnt; insi->insi_flags = inp->inp_flags; insi->insi_vflag = inp->inp_vflag; insi->insi_ip_ttl = inp->inp_ip_ttl; insi->insi_faddr.ina_6 = inp->inp_dependfaddr.inp6_foreign; insi->insi_laddr.ina_6 = inp->inp_dependladdr.inp6_local; insi->insi_v4.in4_tos = inp->inp_depend4.inp4_ip_tos; insi->insi_v6.in6_hlim = 0; insi->insi_v6.in6_cksum = inp->inp_depend6.inp6_cksum; insi->insi_v6.in6_ifindex = 0; insi->insi_v6.in6_hops = inp->inp_depend6.inp6_hops; if (type == SOCK_STREAM && (protocol == 0 || protocol == IPPROTO_TCP) && inp->inp_ppcb != NULL) { struct tcp_sockinfo *tcpsi = &si->soi_proto.pri_tcp; struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; si->soi_kind = SOCKINFO_TCP; tcpsi->tcpsi_state = tp->t_state; tcpsi->tcpsi_timer[TCPT_REXMT] = tp->t_timer[TCPT_REXMT]; tcpsi->tcpsi_timer[TCPT_PERSIST] = tp->t_timer[TCPT_PERSIST]; tcpsi->tcpsi_timer[TCPT_KEEP] = tp->t_timer[TCPT_KEEP]; tcpsi->tcpsi_timer[TCPT_2MSL] = tp->t_timer[TCPT_2MSL]; tcpsi->tcpsi_mss = tp->t_maxseg; tcpsi->tcpsi_flags = tp->t_flags; tcpsi->tcpsi_tp = (u_int64_t)VM_KERNEL_ADDRPERM(tp); } break; } case PF_UNIX: { struct unpcb *unp = (struct unpcb *)so->so_pcb; struct un_sockinfo *unsi = &si->soi_proto.pri_un; si->soi_kind = SOCKINFO_UN; unsi->unsi_conn_pcb = (uint64_t)VM_KERNEL_ADDRPERM(unp->unp_conn); if (unp->unp_conn) unsi->unsi_conn_so = (uint64_t) VM_KERNEL_ADDRPERM(unp->unp_conn->unp_socket); if (unp->unp_addr) { size_t addrlen = unp->unp_addr->sun_len; if (addrlen > SOCK_MAXADDRLEN) addrlen = SOCK_MAXADDRLEN; bcopy(unp->unp_addr, &unsi->unsi_addr, addrlen); } if (unp->unp_conn && unp->unp_conn->unp_addr) { size_t addrlen = unp->unp_conn->unp_addr->sun_len; if (addrlen > SOCK_MAXADDRLEN) addrlen = SOCK_MAXADDRLEN; bcopy(unp->unp_conn->unp_addr, &unsi->unsi_caddr, addrlen); } break; } case PF_NDRV: { struct ndrv_cb *ndrv_cb = (struct ndrv_cb *)so->so_pcb; struct ndrv_info *ndrvsi = &si->soi_proto.pri_ndrv; si->soi_kind = SOCKINFO_NDRV; /* TDB lock ifnet ???? */ if (ndrv_cb->nd_if != 0) { struct ifnet *ifp = ndrv_cb->nd_if; ndrvsi->ndrvsi_if_family = ifp->if_family; ndrvsi->ndrvsi_if_unit = ifp->if_unit; strlcpy(ndrvsi->ndrvsi_if_name, ifp->if_name, IFNAMSIZ); } break; } case PF_SYSTEM: if (SOCK_PROTO(so) == SYSPROTO_EVENT) { struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb; struct kern_event_info *kesi = &si->soi_proto.pri_kern_event; si->soi_kind = SOCKINFO_KERN_EVENT; kesi->kesi_vendor_code_filter = ev_pcb->evp_vendor_code_filter; kesi->kesi_class_filter = ev_pcb->evp_class_filter; kesi->kesi_subclass_filter = ev_pcb->evp_subclass_filter; } else if (SOCK_PROTO(so) == SYSPROTO_CONTROL) { struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; struct kern_ctl_info *kcsi = &si->soi_proto.pri_kern_ctl; struct kctl *kctl = kcb->kctl; si->soi_kind = SOCKINFO_KERN_CTL; if (kctl == 0) break; kcsi->kcsi_id = kctl->id; kcsi->kcsi_reg_unit = kctl->id; kcsi->kcsi_flags = kctl->flags; kcsi->kcsi_recvbufsize = kctl->recvbufsize; kcsi->kcsi_sendbufsize = kctl->sendbufsize; kcsi->kcsi_unit = kcb->unit; strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME); } break; case PF_ROUTE: case PF_PPP: default: break; } out: socket_unlock(so, 0); return (error); }
/* * XXX: this is borrowed from in6_pcbbind(). If possible, we should * share this function by all *bsd*... */ int in6_pcbsetport( __unused struct in6_addr *laddr, struct inpcb *inp, struct proc *p, int locked) { struct socket *so = inp->inp_socket; u_int16_t lport = 0, first, last, *lastport; int count, error = 0, wild = 0; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; kauth_cred_t cred; if (!locked) { /* Make sure we don't run into a deadlock: 4052373 */ if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) { socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } } /* XXX: this is redundant when called from in6_pcbbind */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) wild = INPLOOKUP_WILDCARD; inp->inp_flags |= INP_ANONPORT; if (inp->inp_flags & INP_HIGHPORT) { first = ipport_hifirstauto; /* sysctl */ last = ipport_hilastauto; lastport = &pcbinfo->lasthi; } else if (inp->inp_flags & INP_LOWPORT) { cred = kauth_cred_proc_ref(p); error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); kauth_cred_unref(&cred); if (error != 0) { if (!locked) lck_rw_done(pcbinfo->mtx); return error; } first = ipport_lowfirstauto; /* 1023 */ last = ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->lastlow; } else { first = ipport_firstauto; /* sysctl */ last = ipport_lastauto; lastport = &pcbinfo->lastport; } /* * Simple check to ensure all ports are not used up causing * a deadlock here. * * We split the two cases (up and down) so that the direction * is not being tested on each round of the loop. */ if (first > last) { /* * counting down */ count = first - last; do { if (count-- < 0) { /* completely used? */ /* * Undo any address bind that may have * occurred above. */ inp->in6p_laddr = in6addr_any; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } --*lastport; if (*lastport > first || *lastport < last) *lastport = first; lport = htons(*lastport); } while (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, lport, wild)); } else { /* * counting up */ count = last - first; do { if (count-- < 0) { /* completely used? */ /* * Undo any address bind that may have * occurred above. */ inp->in6p_laddr = in6addr_any; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } ++*lastport; if (*lastport < first || *lastport > last) *lastport = first; lport = htons(*lastport); } while (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, lport, wild)); } inp->inp_lport = lport; if (in_pcbinshash(inp, 1) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } if (!locked) lck_rw_done(pcbinfo->mtx); return(0); }
static int shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) { struct filedesc *fdp; int i; struct ifnet *ifp = (struct ifnet *)arg; if (ifp == NULL) return (PROC_RETURNED); proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { struct fileproc *fp = fdp->fd_ofiles[i]; struct fileglob *fg; struct socket *so; struct inpcb *inp; struct ifnet *inp_ifp; int error; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { continue; } fg = fp->f_fglob; if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) continue; so = (struct socket *)fp->f_fglob->fg_data; if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) continue; inp = (struct inpcb *)so->so_pcb; if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) continue; socket_lock(so, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { socket_unlock(so, 1); continue; } if (inp->inp_boundifp != NULL) { inp_ifp = inp->inp_boundifp; } else if (inp->inp_last_outifp != NULL) { inp_ifp = inp->inp_last_outifp; } else { socket_unlock(so, 1); continue; } if (inp_ifp != ifp && inp_ifp->if_delegated.ifp != ifp) { socket_unlock(so, 1); continue; } error = sosetdefunct(p, so, 0, TRUE); if (error != 0) { log(LOG_ERR, "%s: sosetdefunct() error %d", __func__, error); } else { error = sodefunct(p, so, 0); if (error != 0) { log(LOG_ERR, "%s: sodefunct() error %d", __func__, error); } } socket_unlock(so, 1); } proc_fdunlock(p); return (PROC_RETURNED); }