/* * Routine: lck_rw_sleep_deadline */ wait_result_t lck_rw_sleep_deadline( lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, event_t event, wait_interrupt_t interruptible, uint64_t deadline) { wait_result_t res; lck_rw_type_t lck_rw_type; if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) panic("Invalid lock sleep action %x\n", lck_sleep_action); res = assert_wait_deadline(event, interruptible, deadline); if (res == THREAD_WAITING) { lck_rw_type = lck_rw_done(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE))) lck_rw_lock(lck, lck_rw_type); else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) lck_rw_lock_exclusive(lck); else lck_rw_lock_shared(lck); } } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) (void)lck_rw_done(lck); return res; }
/* * Routine: lck_rw_sleep */ wait_result_t lck_rw_sleep( lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, event_t event, wait_interrupt_t interruptible) { wait_result_t res; lck_rw_type_t lck_rw_type; thread_t thread = current_thread(); if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) panic("Invalid lock sleep action %x\n", lck_sleep_action); if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { /* * Although we are dropping the RW lock, the intent in most cases * is that this thread remains as an observer, since it may hold * some secondary resource, but must yield to avoid deadlock. In * this situation, make sure that the thread is boosted to the * RW lock ceiling while blocked, so that it can re-acquire the * RW lock at that priority. */ thread->rwlock_count++; } res = assert_wait(event, interruptible); if (res == THREAD_WAITING) { lck_rw_type = lck_rw_done(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE))) lck_rw_lock(lck, lck_rw_type); else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) lck_rw_lock_exclusive(lck); else lck_rw_lock_shared(lck); } } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) (void)lck_rw_done(lck); if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { /* sched_flags checked without lock, but will be rechecked while clearing */ /* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */ assert(lck_sleep_action & LCK_SLEEP_UNLOCK); lck_rw_clear_promotion(thread); } } return res; }
/* * Routine: lck_rw_sleep_deadline */ wait_result_t lck_rw_sleep_deadline( lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, event_t event, wait_interrupt_t interruptible, uint64_t deadline) { wait_result_t res; lck_rw_type_t lck_rw_type; thread_t thread = current_thread(); if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) panic("Invalid lock sleep action %x\n", lck_sleep_action); if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { thread->rwlock_count++; } res = assert_wait_deadline(event, interruptible, deadline); if (res == THREAD_WAITING) { lck_rw_type = lck_rw_done(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE))) lck_rw_lock(lck, lck_rw_type); else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) lck_rw_lock_exclusive(lck); else lck_rw_lock_shared(lck); } } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) (void)lck_rw_done(lck); if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { /* sched_flags checked without lock, but will be rechecked while clearing */ /* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */ assert(lck_sleep_action & LCK_SLEEP_UNLOCK); lck_rw_clear_promotion(thread); } } return res; }
__private_extern__ uint32_t inpcb_find_anypcb_byaddr(struct ifaddr *ifa, struct inpcbinfo *pcbinfo) { struct inpcb *inp; inp_gen_t gencnt = pcbinfo->ipi_gencnt; struct socket *so = NULL; int af; if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) { return (0); } lck_rw_lock_shared(pcbinfo->ipi_lock); for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp != NULL; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD && inp->inp_socket != NULL) { so = inp->inp_socket; af = SOCK_DOM(so); if (af != ifa->ifa_addr->sa_family) continue; if (inp->inp_last_outifp != ifa->ifa_ifp) continue; if (af == AF_INET) { if (inp->inp_laddr.s_addr == (satosin(ifa->ifa_addr))->sin_addr.s_addr) { lck_rw_done(pcbinfo->ipi_lock); return (1); } } if (af == AF_INET6) { if (IN6_ARE_ADDR_EQUAL(IFA_IN6(ifa), &inp->in6p_laddr)) { lck_rw_done(pcbinfo->ipi_lock); return (1); } } } } lck_rw_done(pcbinfo->ipi_lock); return (0); }
/* * Outer subroutine: * Connect from a socket to a specified address. * Both address and port must be specified in argument sin. * If don't have a local address for this socket yet, * then pick one. */ int in6_pcbconnect( struct inpcb *inp, struct sockaddr *nam, struct proc *p) { struct in6_addr addr6; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; struct inpcb *pcb; int error; unsigned int outif = 0; /* * Call inner routine, to assign local interface address. * in6_pcbladdr() may automatically fill in sin6_scope_id. */ if ((error = in6_pcbladdr(inp, nam, &addr6, &outif)) != 0) return(error); socket_unlock(inp->inp_socket, 0); pcb = in6_pcblookup_hash(inp->inp_pcbinfo, &sin6->sin6_addr, sin6->sin6_port, IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) ? &addr6 : &inp->in6p_laddr, inp->inp_lport, 0, NULL); socket_lock(inp->inp_socket, 0); if (pcb != NULL) { in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0); return (EADDRINUSE); } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { if (inp->inp_lport == 0) { error = in6_pcbbind(inp, (struct sockaddr *)0, p); if (error) return (error); } inp->in6p_laddr = addr6; inp->in6p_last_outif = outif; } if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) { /*lock inversion issue, mostly with udp multicast packets */ socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; /* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; if (inp->in6p_flags & IN6P_AUTOFLOWLABEL) inp->in6p_flowinfo |= (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK); in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->mtx); return (0); }
/* * Default hop limit selection. The precedence is as follows: * 1. Hoplimit value specified via ioctl. * 2. (If the outgoing interface is detected) the current * hop limit of the interface specified by router advertisement. * 3. The system default hoplimit. */ int in6_selecthlim( struct in6pcb *in6p, struct ifnet *ifp) { if (in6p && in6p->in6p_hops >= 0) { return(in6p->in6p_hops); } else { lck_rw_lock_shared(nd_if_rwlock); if (ifp && ifp->if_index < nd_ifinfo_indexlim) { u_int8_t chlim = nd_ifinfo[ifp->if_index].chlim; lck_rw_done(nd_if_rwlock); return (chlim); } else { lck_rw_done(nd_if_rwlock); return(ip6_defhlim); } } }
void lck_rw_unlock_exclusive(lck_rw_t *lck) { lck_rw_type_t ret; ret = lck_rw_done(lck); if (ret != LCK_RW_TYPE_EXCLUSIVE) panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret); }
void lck_rw_unlock_shared(lck_rw_t *lck) { lck_rw_type_t ret; ret = lck_rw_done(lck); if (ret != LCK_RW_TYPE_SHARED) panic("lck_rw_unlock(): lock held in mode: %d\n", ret); }
__private_extern__ uint32_t inpcb_count_opportunistic(unsigned int ifindex, struct inpcbinfo *pcbinfo, u_int32_t flags) { uint32_t opportunistic = 0; struct inpcb *inp; inp_gen_t gencnt; lck_rw_lock_shared(pcbinfo->ipi_lock); gencnt = pcbinfo->ipi_gencnt; for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp != NULL; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD && inp->inp_socket != NULL && so_get_opportunistic(inp->inp_socket) && inp->inp_last_outifp != NULL && ifindex == inp->inp_last_outifp->if_index) { opportunistic++; struct socket *so = inp->inp_socket; if ((flags & INPCB_OPPORTUNISTIC_SETCMD) && (so->so_state & SS_ISCONNECTED)) { socket_lock(so, 1); if (flags & INPCB_OPPORTUNISTIC_THROTTLEON) { so->so_flags |= SOF_SUSPENDED; soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_SUSPEND)); } else { so->so_flags &= ~(SOF_SUSPENDED); soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_RESUME)); } SOTHROTTLELOG(("throttle[%d]: so 0x%llx " "[%d,%d] %s\n", so->last_pid, (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), (so->so_flags & SOF_SUSPENDED) ? "SUSPENDED" : "RESUMED")); socket_unlock(so, 1); } } } lck_rw_done(pcbinfo->ipi_lock); return (opportunistic); }
__private_extern__ void inpcb_get_ports_used(unsigned int ifindex, uint8_t *bitfield, struct inpcbinfo *pcbinfo) { lck_rw_lock_shared(pcbinfo->mtx); struct inpcb *inp; inp_gen_t gencnt = pcbinfo->ipi_gencnt; for (inp = LIST_FIRST(pcbinfo->listhead); inp; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD && (ifindex == 0 || inp->inp_last_outifp == NULL || ifindex == inp->inp_last_outifp->if_index)) { uint16_t port = ntohs(inp->inp_lport); bitfield[port / 8] |= 1 << (port & 0x7); } } lck_rw_done(pcbinfo->mtx); }
static int pflog_clone_destroy(struct ifnet *ifp) { struct pflog_softc *pflogif = ifp->if_softc; lck_rw_lock_shared(pf_perim_lock); lck_mtx_lock(pf_lock); pflogifs[pflogif->sc_unit] = NULL; LIST_REMOVE(pflogif, sc_list); lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); /* bpfdetach() is taken care of as part of interface detach */ (void) ifnet_detach(ifp); return 0; }
void in6_pcbdisconnect( struct inpcb *inp) { if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) { /*lock inversion issue, mostly with udp multicast packets */ socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr)); inp->inp_fport = 0; /* clear flowinfo - draft-itojun-ipv6-flowlabel-api-00 */ inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->mtx); if (inp->inp_socket->so_state & SS_NOFDREF) in6_pcbdetach(inp); }
static int pflog_remove(struct ifnet *ifp) { int error = 0; struct pflog_softc *pflogif = NULL; lck_rw_lock_shared(pf_perim_lock); lck_mtx_lock(pf_lock); pflogif = ifp->if_softc; if (pflogif == NULL || (pflogif->sc_flags & IFPFLF_DETACHING) != 0) { error = EINVAL; goto done; } pflogif->sc_flags |= IFPFLF_DETACHING; LIST_REMOVE(pflogif, sc_list); done: lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); return error; }
int in6_pcbbind( struct inpcb *inp, struct sockaddr *nam, struct proc *p) { struct socket *so = inp->inp_socket; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; u_short lport = 0; int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); if (!in6_ifaddrs) /* XXX broken! */ return (EADDRNOTAVAIL); if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) return(EINVAL); if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) wild = 1; socket_unlock(so, 0); /* keep reference */ lck_rw_lock_exclusive(pcbinfo->mtx); if (nam) { sin6 = (struct sockaddr_in6 *)nam; if (nam->sa_len != sizeof(*sin6)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EINVAL); } /* * family check. */ if (nam->sa_family != AF_INET6) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EAFNOSUPPORT); } /* KAME hack: embed scopeid */ if (in6_embedscope(&sin6->sin6_addr, sin6, inp, NULL) != 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return EINVAL; } /* this must be cleared for ifa_ifwithaddr() */ sin6->sin6_scope_id = 0; lport = sin6->sin6_port; if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { /* * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; * allow compepte duplication of binding if * SO_REUSEPORT is set, or if SO_REUSEADDR is set * and a multicast address is bound on both * new and duplicated sockets. */ if (so->so_options & SO_REUSEADDR) reuseport = SO_REUSEADDR|SO_REUSEPORT; } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct ifaddr *ia = NULL; sin6->sin6_port = 0; /* yech... */ if ((ia = ifa_ifwithaddr((struct sockaddr *)sin6)) == 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRNOTAVAIL); } /* * XXX: bind to an anycast address might accidentally * cause sending a packet with anycast source address. * We should allow to bind to a deprecated address, since * the application dare to use it. */ if (ia && ((struct in6_ifaddr *)ia)->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) { ifafree(ia); lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRNOTAVAIL); } ifafree(ia); ia = NULL; } if (lport) { struct inpcb *t; /* GROSS */ if (ntohs(lport) < IPV6PORT_RESERVED && p && ((so->so_state & SS_PRIV) == 0)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EACCES); } if (so->so_uid && !IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { t = in6_pcblookup_local_and_cleanup(pcbinfo, &sin6->sin6_addr, lport, INPLOOKUP_WILDCARD); if (t && (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) || (t->inp_socket->so_options & SO_REUSEPORT) == 0) && (so->so_uid != t->inp_socket->so_uid) && ((t->inp_socket->so_flags & SOF_REUSESHAREUID) == 0)) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local_and_cleanup(pcbinfo, sin.sin_addr, lport, INPLOOKUP_WILDCARD); if (t && (t->inp_socket->so_options & SO_REUSEPORT) == 0 && (so->so_uid != t->inp_socket->so_uid) && (ntohl(t->inp_laddr.s_addr) != INADDR_ANY || INP_SOCKAF(so) == INP_SOCKAF(t->inp_socket))) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } } } t = in6_pcblookup_local_and_cleanup(pcbinfo, &sin6->sin6_addr, lport, wild); if (t && (reuseport & t->inp_socket->so_options) == 0) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return(EADDRINUSE); } if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 && IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6); t = in_pcblookup_local_and_cleanup(pcbinfo, sin.sin_addr, lport, wild); if (t && (reuseport & t->inp_socket->so_options) == 0 && (ntohl(t->inp_laddr.s_addr) != INADDR_ANY || INP_SOCKAF(so) == INP_SOCKAF(t->inp_socket))) { lck_rw_done(pcbinfo->mtx); socket_lock(so, 0); return (EADDRINUSE); } } } inp->in6p_laddr = sin6->sin6_addr; } socket_lock(so, 0); if (lport == 0) { int e; if ((e = in6_pcbsetport(&inp->in6p_laddr, inp, p, 1)) != 0) { lck_rw_done(pcbinfo->mtx); return(e); } } else { inp->inp_lport = lport; if (in_pcbinshash(inp, 1) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; lck_rw_done(pcbinfo->mtx); return (EAGAIN); } } lck_rw_done(pcbinfo->mtx); sflt_notify(so, sock_evt_bound, NULL); return(0); }
static int pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) { struct pflog_softc *pflogif; struct ifnet_init_eparams pf_init; int error = 0; if (unit >= PFLOGIFS_MAX) { /* Either the interface cloner or our initializer is broken */ panic("%s: unit (%d) exceeds max (%d)", __func__, unit, PFLOGIFS_MAX); /* NOTREACHED */ } if ((pflogif = if_clone_softc_allocate(&pflog_cloner)) == NULL) { error = ENOMEM; goto done; } bzero(&pf_init, sizeof (pf_init)); pf_init.ver = IFNET_INIT_CURRENT_VERSION; pf_init.len = sizeof (pf_init); pf_init.flags = IFNET_INIT_LEGACY; pf_init.name = ifc->ifc_name; pf_init.unit = unit; pf_init.type = IFT_PFLOG; pf_init.family = IFNET_FAMILY_LOOPBACK; pf_init.output = pflogoutput; pf_init.demux = pflogdemux; pf_init.add_proto = pflogaddproto; pf_init.del_proto = pflogdelproto; pf_init.softc = pflogif; pf_init.ioctl = pflogioctl; pf_init.detach = pflogfree; bzero(pflogif, sizeof (*pflogif)); pflogif->sc_unit = unit; pflogif->sc_flags |= IFPFLF_DETACHING; error = ifnet_allocate_extended(&pf_init, &pflogif->sc_if); if (error != 0) { printf("%s: ifnet_allocate failed - %d\n", __func__, error); if_clone_softc_deallocate(&pflog_cloner, pflogif); goto done; } ifnet_set_mtu(pflogif->sc_if, PFLOGMTU); ifnet_set_flags(pflogif->sc_if, IFF_UP, IFF_UP); error = ifnet_attach(pflogif->sc_if, NULL); if (error != 0) { printf("%s: ifnet_attach failed - %d\n", __func__, error); ifnet_release(pflogif->sc_if); if_clone_softc_deallocate(&pflog_cloner, pflogif); goto done; } #if NBPFILTER > 0 bpfattach(pflogif->sc_if, DLT_PFLOG, PFLOG_HDRLEN); #endif lck_rw_lock_shared(pf_perim_lock); lck_mtx_lock(pf_lock); LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list); pflogifs[unit] = pflogif->sc_if; pflogif->sc_flags &= ~IFPFLF_DETACHING; lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); done: return (error); }
/* * Unlock a webdavnode */ __private_extern__ void webdav_unlock(struct webdavnode *pt) { lck_rw_done(&pt->pt_rwlock); pt->pt_lockState = 0; }
struct in6_addr * in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, struct inpcb *inp, struct route_in6 *ro, struct ifnet **ifpp, struct in6_addr *src_storage, unsigned int ifscope, int *errorp) { struct in6_addr dst; struct ifnet *ifp = NULL; struct in6_ifaddr *ia = NULL, *ia_best = NULL; struct in6_pktinfo *pi = NULL; int dst_scope = -1, best_scope = -1, best_matchlen = -1; struct in6_addrpolicy *dst_policy = NULL, *best_policy = NULL; u_int32_t odstzone; int prefer_tempaddr; struct ip6_moptions *mopts; struct timeval timenow; unsigned int nocell; boolean_t islocal = FALSE; getmicrotime(&timenow); dst = dstsock->sin6_addr; /* make a copy for local operation */ *errorp = 0; if (ifpp != NULL) *ifpp = NULL; if (inp != NULL) { mopts = inp->in6p_moptions; nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0; } else { mopts = NULL; nocell = 0; } /* * If the source address is explicitly specified by the caller, * check if the requested source address is indeed a unicast address * assigned to the node, and can be used as the packet's source * address. If everything is okay, use the address as source. */ if (opts && (pi = opts->ip6po_pktinfo) && !IN6_IS_ADDR_UNSPECIFIED(&pi->ipi6_addr)) { struct sockaddr_in6 srcsock; struct in6_ifaddr *ia6; /* get the outgoing interface */ if ((*errorp = in6_selectif(dstsock, opts, mopts, ro, ifscope, nocell, &ifp)) != 0) { return (NULL); } /* * determine the appropriate zone id of the source based on * the zone of the destination and the outgoing interface. * If the specified address is ambiguous wrt the scope zone, * the interface must be specified; otherwise, ifa_ifwithaddr() * will fail matching the address. */ bzero(&srcsock, sizeof(srcsock)); srcsock.sin6_family = AF_INET6; srcsock.sin6_len = sizeof(srcsock); srcsock.sin6_addr = pi->ipi6_addr; if (ifp) { *errorp = in6_setscope(&srcsock.sin6_addr, ifp, NULL); if (*errorp != 0) { ifnet_release(ifp); return (NULL); } } ia6 = (struct in6_ifaddr *)ifa_ifwithaddr((struct sockaddr *)(&srcsock)); if (ia6 == NULL) { *errorp = EADDRNOTAVAIL; if (ifp != NULL) ifnet_release(ifp); return (NULL); } IFA_LOCK_SPIN(&ia6->ia_ifa); if ((ia6->ia6_flags & (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) || (nocell && (ia6->ia_ifa.ifa_ifp->if_type == IFT_CELLULAR))) { IFA_UNLOCK(&ia6->ia_ifa); IFA_REMREF(&ia6->ia_ifa); *errorp = EADDRNOTAVAIL; if (ifp != NULL) ifnet_release(ifp); return (NULL); } *src_storage = satosin6(&ia6->ia_addr)->sin6_addr; IFA_UNLOCK(&ia6->ia_ifa); IFA_REMREF(&ia6->ia_ifa); if (ifpp != NULL) { /* if ifp is non-NULL, refcnt held in in6_selectif() */ *ifpp = ifp; } else if (ifp != NULL) { ifnet_release(ifp); } return (src_storage); } /* * Otherwise, if the socket has already bound the source, just use it. */ if (inp != NULL && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) return (&inp->in6p_laddr); /* * If the address is not specified, choose the best one based on * the outgoing interface and the destination address. */ /* get the outgoing interface */ if ((*errorp = in6_selectif(dstsock, opts, mopts, ro, ifscope, nocell, &ifp)) != 0) return (NULL); #ifdef DIAGNOSTIC if (ifp == NULL) /* this should not happen */ panic("in6_selectsrc: NULL ifp"); #endif *errorp = in6_setscope(&dst, ifp, &odstzone); if (*errorp != 0) { if (ifp != NULL) ifnet_release(ifp); return (NULL); } lck_rw_lock_shared(&in6_ifaddr_rwlock); for (ia = in6_ifaddrs; ia; ia = ia->ia_next) { int new_scope = -1, new_matchlen = -1; struct in6_addrpolicy *new_policy = NULL; u_int32_t srczone, osrczone, dstzone; struct in6_addr src; struct ifnet *ifp1 = ia->ia_ifp; IFA_LOCK(&ia->ia_ifa); /* * We'll never take an address that breaks the scope zone * of the destination. We also skip an address if its zone * does not contain the outgoing interface. * XXX: we should probably use sin6_scope_id here. */ if (in6_setscope(&dst, ifp1, &dstzone) || odstzone != dstzone) goto next; src = ia->ia_addr.sin6_addr; if (in6_setscope(&src, ifp, &osrczone) || in6_setscope(&src, ifp1, &srczone) || osrczone != srczone) goto next; /* avoid unusable addresses */ if ((ia->ia6_flags & (IN6_IFF_NOTREADY | IN6_IFF_ANYCAST | IN6_IFF_DETACHED))) goto next; if (!ip6_use_deprecated && IFA6_IS_DEPRECATED(ia)) goto next; /* Rule 1: Prefer same address */ if (IN6_ARE_ADDR_EQUAL(&dst, &ia->ia_addr.sin6_addr)) BREAK(1); /* there should be no better candidate */ if (ia_best == NULL) REPLACE(0); /* Rule 2: Prefer appropriate scope */ if (dst_scope < 0) dst_scope = in6_addrscope(&dst); new_scope = in6_addrscope(&ia->ia_addr.sin6_addr); if (IN6_ARE_SCOPE_CMP(best_scope, new_scope) < 0) { if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0) REPLACE(2); NEXTSRC(2); } else if (IN6_ARE_SCOPE_CMP(new_scope, best_scope) < 0) { if (IN6_ARE_SCOPE_CMP(new_scope, dst_scope) < 0) NEXTSRC(2); REPLACE(2); } /* * Rule 3: Avoid deprecated addresses. Note that the case of * !ip6_use_deprecated is already rejected above. */ if (!IFA6_IS_DEPRECATED(ia_best) && IFA6_IS_DEPRECATED(ia)) NEXTSRC(3); if (IFA6_IS_DEPRECATED(ia_best) && !IFA6_IS_DEPRECATED(ia)) REPLACE(3); /* Rule 4: Prefer home addresses */ /* * XXX: This is a TODO. We should probably merge the MIP6 * case above. */ /* Rule 5: Prefer outgoing interface */ if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp) NEXTSRC(5); if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp) REPLACE(5); /* * Rule 6: Prefer matching label * Note that best_policy should be non-NULL here. */ if (dst_policy == NULL) dst_policy = in6_addrsel_lookup_policy(dstsock); if (dst_policy->label != ADDR_LABEL_NOTAPP) { new_policy = in6_addrsel_lookup_policy(&ia->ia_addr); if (dst_policy->label == best_policy->label && dst_policy->label != new_policy->label) NEXTSRC(6); if (dst_policy->label != best_policy->label && dst_policy->label == new_policy->label) REPLACE(6); } /* * Rule 7: Prefer public addresses. * We allow users to reverse the logic by configuring * a sysctl variable, so that privacy conscious users can * always prefer temporary addresses. * Don't use temporary addresses for local destinations or * for multicast addresses unless we were passed in an option. */ if (IN6_IS_ADDR_MULTICAST(&dst) || in6_matchlen(&ia_best->ia_addr.sin6_addr, &dst) >= in6_mask2len(&ia_best->ia_prefixmask.sin6_addr, NULL)) islocal = TRUE; if (opts == NULL || opts->ip6po_prefer_tempaddr == IP6PO_TEMPADDR_SYSTEM) { prefer_tempaddr = islocal ? 0 : ip6_prefer_tempaddr; } else if (opts->ip6po_prefer_tempaddr == IP6PO_TEMPADDR_NOTPREFER) { prefer_tempaddr = 0; } else prefer_tempaddr = 1; if (!(ia_best->ia6_flags & IN6_IFF_TEMPORARY) && (ia->ia6_flags & IN6_IFF_TEMPORARY)) { if (prefer_tempaddr) REPLACE(7); else NEXTSRC(7); } if ((ia_best->ia6_flags & IN6_IFF_TEMPORARY) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) { if (prefer_tempaddr) NEXTSRC(7); else REPLACE(7); } /* * Rule 8: prefer addresses on alive interfaces. * This is a KAME specific rule. */ if ((ia_best->ia_ifp->if_flags & IFF_UP) && !(ia->ia_ifp->if_flags & IFF_UP)) NEXTSRC(8); if (!(ia_best->ia_ifp->if_flags & IFF_UP) && (ia->ia_ifp->if_flags & IFF_UP)) REPLACE(8); /* * Rule 14: Use longest matching prefix. * Note: in the address selection draft, this rule is * documented as "Rule 8". However, since it is also * documented that this rule can be overridden, we assign * a large number so that it is easy to assign smaller numbers * to more preferred rules. */ new_matchlen = in6_matchlen(&ia->ia_addr.sin6_addr, &dst); if (best_matchlen < new_matchlen) REPLACE(14); if (new_matchlen < best_matchlen) NEXTSRC(14); /* Rule 15 is reserved. */ /* * Last resort: just keep the current candidate. * Or, do we need more rules? */ IFA_UNLOCK(&ia->ia_ifa); continue; replace: best_scope = (new_scope >= 0 ? new_scope : in6_addrscope(&ia->ia_addr.sin6_addr)); best_policy = (new_policy ? new_policy : in6_addrsel_lookup_policy(&ia->ia_addr)); best_matchlen = (new_matchlen >= 0 ? new_matchlen : in6_matchlen(&ia->ia_addr.sin6_addr, &dst)); IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for ia_best */ IFA_UNLOCK(&ia->ia_ifa); if (ia_best != NULL) IFA_REMREF(&ia_best->ia_ifa); ia_best = ia; continue; next: IFA_UNLOCK(&ia->ia_ifa); continue; out: IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for ia_best */ IFA_UNLOCK(&ia->ia_ifa); if (ia_best != NULL) IFA_REMREF(&ia_best->ia_ifa); ia_best = ia; break; } lck_rw_done(&in6_ifaddr_rwlock); if (nocell && ia_best != NULL && (ia_best->ia_ifa.ifa_ifp->if_type == IFT_CELLULAR)) { IFA_REMREF(&ia_best->ia_ifa); ia_best = NULL; } if ( (ia = ia_best) == NULL) { *errorp = EADDRNOTAVAIL; if (ifp != NULL) ifnet_release(ifp); return (NULL); } IFA_LOCK_SPIN(&ia->ia_ifa); *src_storage = satosin6(&ia->ia_addr)->sin6_addr; IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); if (ifpp != NULL) { /* if ifp is non-NULL, refcnt held in in6_selectif() */ *ifpp = ifp; } else if (ifp != NULL) { ifnet_release(ifp); } return (src_storage); }
void lock_done_EXT( lck_rw_t *lock) { (void) lck_rw_done(lock); }
/* * XXX: this is borrowed from in6_pcbbind(). If possible, we should * share this function by all *bsd*... */ int in6_pcbsetport( __unused struct in6_addr *laddr, struct inpcb *inp, struct proc *p, int locked) { struct socket *so = inp->inp_socket; u_int16_t lport = 0, first, last, *lastport; int count, error = 0, wild = 0; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; kauth_cred_t cred; if (!locked) { /* Make sure we don't run into a deadlock: 4052373 */ if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) { socket_unlock(inp->inp_socket, 0); lck_rw_lock_exclusive(pcbinfo->mtx); socket_lock(inp->inp_socket, 0); } } /* XXX: this is redundant when called from in6_pcbbind */ if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) wild = INPLOOKUP_WILDCARD; inp->inp_flags |= INP_ANONPORT; if (inp->inp_flags & INP_HIGHPORT) { first = ipport_hifirstauto; /* sysctl */ last = ipport_hilastauto; lastport = &pcbinfo->lasthi; } else if (inp->inp_flags & INP_LOWPORT) { cred = kauth_cred_proc_ref(p); error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); kauth_cred_unref(&cred); if (error != 0) { if (!locked) lck_rw_done(pcbinfo->mtx); return error; } first = ipport_lowfirstauto; /* 1023 */ last = ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->lastlow; } else { first = ipport_firstauto; /* sysctl */ last = ipport_lastauto; lastport = &pcbinfo->lastport; } /* * Simple check to ensure all ports are not used up causing * a deadlock here. * * We split the two cases (up and down) so that the direction * is not being tested on each round of the loop. */ if (first > last) { /* * counting down */ count = first - last; do { if (count-- < 0) { /* completely used? */ /* * Undo any address bind that may have * occurred above. */ inp->in6p_laddr = in6addr_any; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } --*lastport; if (*lastport > first || *lastport < last) *lastport = first; lport = htons(*lastport); } while (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, lport, wild)); } else { /* * counting up */ count = last - first; do { if (count-- < 0) { /* completely used? */ /* * Undo any address bind that may have * occurred above. */ inp->in6p_laddr = in6addr_any; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } ++*lastport; if (*lastport < first || *lastport > last) *lastport = first; lport = htons(*lastport); } while (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, lport, wild)); } inp->inp_lport = lport; if (in_pcbinshash(inp, 1) != 0) { inp->in6p_laddr = in6addr_any; inp->inp_lport = 0; inp->in6p_last_outif = 0; if (!locked) lck_rw_done(pcbinfo->mtx); return (EAGAIN); } if (!locked) lck_rw_done(pcbinfo->mtx); return(0); }
__private_extern__ int get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) { int error = 0; int i, n; struct inpcb *inp, **inp_list = NULL; inp_gen_t gencnt; struct xinpgen xig; void *buf = NULL; size_t item_size = ROUNDUP64(sizeof (struct xinpcb_n)) + ROUNDUP64(sizeof (struct xsocket_n)) + 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) + ROUNDUP64(sizeof (struct xsockstat_n)); if (proto == IPPROTO_TCP) item_size += ROUNDUP64(sizeof (struct xtcpcb_n)); /* * The process of preparing the PCB list is too time-consuming and * resource-intensive to repeat twice on every request. */ lck_rw_lock_exclusive(pcbinfo->ipi_lock); if (req->oldptr == USER_ADDR_NULL) { n = pcbinfo->ipi_count; req->oldidx = 2 * (sizeof (xig)) + (n + n/8) * item_size; goto done; } if (req->newptr != USER_ADDR_NULL) { error = EPERM; goto done; } /* * OK, now we're committed to doing something. */ gencnt = pcbinfo->ipi_gencnt; n = pcbinfo->ipi_count; bzero(&xig, sizeof (xig)); xig.xig_len = sizeof (xig); xig.xig_count = n; xig.xig_gen = gencnt; xig.xig_sogen = so_gencnt; error = SYSCTL_OUT(req, &xig, sizeof (xig)); if (error) { goto done; } /* * We are done if there is no pcb */ if (n == 0) { goto done; } buf = _MALLOC(item_size, M_TEMP, M_WAITOK); if (buf == NULL) { error = ENOMEM; goto done; } inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); if (inp_list == NULL) { error = ENOMEM; goto done; } for (inp = pcbinfo->ipi_listhead->lh_first, i = 0; inp && i < n; inp = inp->inp_list.le_next) { if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) inp_list[i++] = inp; } n = i; error = 0; for (i = 0; i < n; i++) { inp = inp_list[i]; if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { struct xinpcb_n *xi = (struct xinpcb_n *)buf; struct xsocket_n *xso = (struct xsocket_n *) ADVANCE64(xi, sizeof (*xi)); struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) ADVANCE64(xso, sizeof (*xso)); struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) ADVANCE64(xsbrcv, sizeof (*xsbrcv)); struct xsockstat_n *xsostats = (struct xsockstat_n *) ADVANCE64(xsbsnd, sizeof (*xsbsnd)); bzero(buf, item_size); inpcb_to_xinpcb_n(inp, xi); sotoxsocket_n(inp->inp_socket, xso); sbtoxsockbuf_n(inp->inp_socket ? &inp->inp_socket->so_rcv : NULL, xsbrcv); sbtoxsockbuf_n(inp->inp_socket ? &inp->inp_socket->so_snd : NULL, xsbsnd); sbtoxsockstat_n(inp->inp_socket, xsostats); if (proto == IPPROTO_TCP) { struct xtcpcb_n *xt = (struct xtcpcb_n *) ADVANCE64(xsostats, sizeof (*xsostats)); /* * inp->inp_ppcb, can only be NULL on * an initialization race window. * No need to lock. */ if (inp->inp_ppcb == NULL) continue; tcpcb_to_xtcpcb_n((struct tcpcb *) inp->inp_ppcb, xt); } error = SYSCTL_OUT(req, buf, item_size); } } if (!error) { /* * Give the user an updated idea of our state. * If the generation differs from what we told * her before, she knows that something happened * while we were processing this request, and it * might be necessary to retry. */ bzero(&xig, sizeof (xig)); xig.xig_len = sizeof (xig); xig.xig_gen = pcbinfo->ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = pcbinfo->ipi_count; error = SYSCTL_OUT(req, &xig, sizeof (xig)); } done: lck_rw_done(pcbinfo->ipi_lock); if (inp_list != NULL) FREE(inp_list, M_TEMP); if (buf != NULL) FREE(buf, M_TEMP); return (error); }
void lock_done( register lock_t * l) { (void) lck_rw_done(l); }
__private_extern__ void inpcb_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, bitstr_t *bitfield, struct inpcbinfo *pcbinfo) { struct inpcb *inp; struct socket *so; inp_gen_t gencnt; bool iswildcard, wildcardok, nowakeok; bool recvanyifonly, extbgidleok; bool activeonly; wildcardok = ((flags & INPCB_GET_PORTS_USED_WILDCARDOK) != 0); nowakeok = ((flags & INPCB_GET_PORTS_USED_NOWAKEUPOK) != 0); recvanyifonly = ((flags & INPCB_GET_PORTS_USED_RECVANYIFONLY) != 0); extbgidleok = ((flags & INPCB_GET_PORTS_USED_EXTBGIDLEONLY) != 0); activeonly = ((flags & INPCB_GET_PORTS_USED_ACTIVEONLY) != 0); lck_rw_lock_shared(pcbinfo->ipi_lock); gencnt = pcbinfo->ipi_gencnt; for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp; inp = LIST_NEXT(inp, inp_list)) { uint16_t port; if (inp->inp_gencnt > gencnt || inp->inp_state == INPCB_STATE_DEAD || inp->inp_wantcnt == WNT_STOPUSING) continue; if ((so = inp->inp_socket) == NULL || (so->so_state & SS_DEFUNCT) || (so->so_state & SS_ISDISCONNECTED)) continue; if (!(protocol == PF_UNSPEC || (protocol == PF_INET && (inp->inp_vflag & INP_IPV4)) || (protocol == PF_INET6 && (inp->inp_vflag & INP_IPV6)))) continue; iswildcard = (((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr == INADDR_ANY) || ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))); if (!wildcardok && iswildcard) continue; if ((so->so_options & SO_NOWAKEFROMSLEEP) && !nowakeok) continue; if (!(inp->inp_flags & INP_RECV_ANYIF) && recvanyifonly) continue; if (!(so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) && extbgidleok) continue; if (!iswildcard && !(ifindex == 0 || inp->inp_last_outifp == NULL || ifindex == inp->inp_last_outifp->if_index)) continue; if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP && so->so_state & SS_CANTRCVMORE) continue; if (SOCK_PROTO(inp->inp_socket) == IPPROTO_TCP) { struct tcpcb *tp = sototcpcb(inp->inp_socket); /* * Workaround race where inp_ppcb is NULL during * socket initialization */ if (tp == NULL) continue; switch (tp->t_state) { case TCPS_CLOSED: continue; /* NOT REACHED */ case TCPS_LISTEN: case TCPS_SYN_SENT: case TCPS_SYN_RECEIVED: case TCPS_ESTABLISHED: case TCPS_FIN_WAIT_1: /* * Note: FIN_WAIT_1 is an active state * because we need our FIN to be * acknowledged */ break; case TCPS_CLOSE_WAIT: case TCPS_CLOSING: case TCPS_LAST_ACK: case TCPS_FIN_WAIT_2: /* * In the closing states, the connection * is not idle when there is outgoing * data having to be acknowledged */ if (activeonly && so->so_snd.sb_cc == 0) continue; break; case TCPS_TIME_WAIT: continue; /* NOT REACHED */ } } /* * Final safeguard to exclude unspecified local port */ port = ntohs(inp->inp_lport); if (port == 0) continue; bit_set(bitfield, port); } lck_rw_done(pcbinfo->ipi_lock); }