static int lpx_datagram_user_disconnect( struct socket *so ) { // int s; struct lpxpcb *lpxp = sotolpxpcb(so); if (lpx_nullhost(lpxp->lpxp_faddr)) return (ENOTCONN); // s = splnet(); Lpx_PCB_disconnect(lpxp); // splx(s); soisdisconnected(so); // Unlock. sock_inject_data_in will lock. lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_OWNED); lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); // It will call sbappend and sorwakeup. sock_retain((socket_t)so); lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(so->so_proto->pr_domain->dom_mtx); lpxp->lpxp_flags |= LPXP_NEEDRELEASE; return (0); }
static int lpx_datagram_user_detach( struct socket *so ) { // int s; struct lpxpcb *lpxp = sotolpxpcb(so); if (lpxp == NULL) return (ENOTCONN); // s = splnet(); Lpx_PCB_detach(lpxp); // sofree(so); so->so_flags |= SOF_PCBCLEARING; Lpx_PCB_dispense(lpxp); if (lpxp->lpxp_flags & LPXP_NEEDRELEASE) { // Unlock. sock_inject_data_in will lock. lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_OWNED); lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); // It will call sbappend and sorwakeup. sock_release((socket_t)so); lck_mtx_assert(so->so_proto->pr_domain->dom_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(so->so_proto->pr_domain->dom_mtx); } // splx(s); return (0); }
void lpx_stream_fasttimo() { register struct lpxpcb *lpxp; register struct stream_pcb *cb; // int s = splnet(); // DEBUG_PRINT(DEBUG_MASK_TIMER_INFO, ("smp_fasttimo\n")); lck_mtx_assert(lpxdomain.dom_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(lpxdomain.dom_mtx); lpxp = lpx_stream_pcb.lpxp_next; for (; lpxp != &lpx_stream_pcb; lpxp = lpxp->lpxp_next) { if ((cb = lpxpcbtostreampcb(lpxp)) != NULL && (cb->s_flags & SF_DELACK)) { cb->s_flags &= ~SF_DELACK; cb->s_flags |= SF_ACKNOW; lpx_stream_stat.smps_delack++; lpx_stream_output(cb, (struct mbuf *)NULL); } } lck_mtx_assert(lpxdomain.dom_mtx, LCK_MTX_ASSERT_OWNED); lck_mtx_unlock(lpxdomain.dom_mtx); // splx(s); }
/* setup interrupt sample buffers */ int kperf_init(void) { static lck_grp_attr_t lck_grp_attr; lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); unsigned ncpus = 0; int err; if (kperf_initted) { return 0; } lck_grp_attr_setdefault(&lck_grp_attr); lck_grp_init(&kperf_lck_grp, "kperf", &lck_grp_attr); ncpus = machine_info.logical_cpu_max; /* create buffers to remember which threads don't need to be sampled by PET */ kperf_thread_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_thread_on_cpus), VM_KERN_MEMORY_DIAG); if (kperf_thread_on_cpus == NULL) { err = ENOMEM; goto error; } bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); /* create the interrupt buffers */ intr_samplec = ncpus; intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev), VM_KERN_MEMORY_DIAG); if (intr_samplev == NULL) { err = ENOMEM; goto error; } bzero(intr_samplev, ncpus * sizeof(*intr_samplev)); /* create kdebug trigger filter buffers */ if ((err = kperf_kdebug_init())) { goto error; } kperf_initted = TRUE; return 0; error: if (intr_samplev) { kfree(intr_samplev, ncpus * sizeof(*intr_samplev)); intr_samplev = NULL; intr_samplec = 0; } if (kperf_thread_on_cpus) { kfree(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); kperf_thread_on_cpus = NULL; } return err; }
void concat_domain(struct domain *dp) { lck_mtx_assert(domain_proto_mtx, LCK_MTX_ASSERT_OWNED); dp->dom_next = domains; domains = dp; }
/* * Free an arp entry. */ static void arptfree(struct llinfo_arp *la) { struct rtentry *rt = la->la_rt; struct sockaddr_dl *sdl; lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); if (rt->rt_refcnt > 0 && (sdl = SDL(rt->rt_gateway)) && sdl->sdl_family == AF_LINK) { sdl->sdl_alen = 0; la->la_asked = 0; rt->rt_flags &= ~RTF_REJECT; RT_UNLOCK(rt); } else { /* * Safe to drop rt_lock and use rt_key, since holding * rnh_lock here prevents another thread from calling * rt_setgate() on this route. */ RT_UNLOCK(rt); rtrequest_locked(RTM_DELETE, rt_key(rt), NULL, rt_mask(rt), 0, NULL); } }
static void ktrace_set_owning_proc(proc_t p) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); assert(p); if (ktrace_state != KTRACE_STATE_FG) { if (proc_uniqueid(p) == ktrace_bg_unique_id) { ktrace_state = KTRACE_STATE_BG; } else { if (ktrace_state == KTRACE_STATE_BG) { if (ktrace_active_mask & KTRACE_KPERF) { kperf_reset(); } if (ktrace_active_mask & KTRACE_KDEBUG) { kdebug_reset(); } ktrace_active_mask = 0; } ktrace_state = KTRACE_STATE_FG; should_notify_on_init = FALSE; } } ktrace_owning_unique_id = proc_uniqueid(p); ktrace_owning_pid = proc_pid(p); strlcpy(ktrace_last_owner_execname, proc_name_address(p), sizeof(ktrace_last_owner_execname)); }
/* If an owning process has exited, reset the ownership. */ static void ktrace_ownership_maintenance(void) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); /* do nothing if ktrace is not owned */ if (ktrace_owning_unique_id == 0) { return; } /* reset ownership if process cannot be found */ proc_t owning_proc = proc_find(ktrace_owning_pid); if (owning_proc != NULL) { /* make sure the pid was not recycled */ if (proc_uniqueid(owning_proc) != ktrace_owning_unique_id) { ktrace_release_ownership(); } proc_rele(owning_proc); } else { ktrace_release_ownership(); } }
int cs_allow_invalid(struct proc *p) { #if MACH_ASSERT lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); #endif #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE /* There needs to be a MAC policy to implement this hook, or else the * kill bits will be cleared here every time. If we have * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy * implementing the hook. */ if( 0 != mac_proc_check_run_cs_invalid(p)) { if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " "not allowed: pid %d\n", p->p_pid); return 0; } if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " "allowed: pid %d\n", p->p_pid); proc_lock(p); p->p_csflags &= ~(CS_KILL | CS_HARD); proc_unlock(p); vm_map_switch_protect(get_task_map(p->task), FALSE); #endif return (p->p_csflags & (CS_KILL | CS_HARD)) == 0; }
int altq_priq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes) { struct ifclassq *ifq = NULL; struct priq_if *pif; struct priq_classstats stats; int error = 0; lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); if ((unsigned)*nbytes < sizeof (stats)) return (EINVAL); if ((pif = altq_lookup(a->ifname, ALTQT_PRIQ)) == NULL) return (EBADF); ifq = pif->pif_ifq; IFCQ_LOCK_ASSERT_HELD(ifq); /* lock held by altq_lookup */ error = priq_get_class_stats(pif, a->qid, &stats); IFCQ_UNLOCK(ifq); if (error != 0) return (error); if ((error = copyout((caddr_t)&stats, (user_addr_t)(uintptr_t)ubuf, sizeof (stats))) != 0) return (error); *nbytes = sizeof (stats); return (0); }
/* ----------------------------------------------------------------------------- called from l2tp_rfc when xmit is full ----------------------------------------------------------------------------- */ void l2tp_wan_xmit_full(struct ppp_link *link) { lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); link->lk_flags |= SC_XMIT_FULL; }
/* ----------------------------------------------------------------------------- called from l2tp_rfc when data are present ----------------------------------------------------------------------------- */ int l2tp_input(void *data, mbuf_t m, struct sockaddr *from, int more) { struct socket *so = (struct socket *)data; int err; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); if (so->so_tpcb) { // we are hooked to ppp return l2tp_wan_input(ALIGNED_CAST(struct ppp_link *)so->so_tpcb, m); // Wcast-align fix - we malloc so->so_tpcb } if (m) { if (from == 0) { // no from address, just free the buffer mbuf_freem(m); return 1; } if (sbappendaddr(&so->so_rcv, from, (struct mbuf *)m, 0, &err) == 0) { //IOLog("l2tp_input no space, so = %p\n", so); return 1; } } if (!more) sorwakeup(so); return 0; }
/* ----------------------------------------------------------------------------- ----------------------------------------------------------------------------- */ void l2tp_event(void *data, u_int32_t event, void *msg) { struct socket *so = (struct socket *)data; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); if (so->so_tpcb) { switch (event) { case L2TP_EVT_XMIT_FULL: l2tp_wan_xmit_full(ALIGNED_CAST(struct ppp_link *)so->so_tpcb); // Wcast-align fix - we malloc so->so_tpcb break; case L2TP_EVT_XMIT_OK: l2tp_wan_xmit_ok(ALIGNED_CAST(struct ppp_link *)so->so_tpcb); break; case L2TP_EVT_INPUTERROR: l2tp_wan_input_error(ALIGNED_CAST(struct ppp_link *)so->so_tpcb); break; } } else { switch (event) { case L2TP_EVT_RELIABLE_FAILED: /* wake up the client with no data */ socantrcvmore(so); break; } } }
int ktrace_set_owning_pid(int pid) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); /* allow user space to successfully unset owning pid */ if (pid == -1) { ktrace_set_invalid_owning_pid(); return 0; } /* use ktrace_reset or ktrace_release_ownership, not this */ if (pid == 0) { ktrace_set_invalid_owning_pid(); return EINVAL; } proc_t p = proc_find(pid); if (!p) { ktrace_set_invalid_owning_pid(); return ESRCH; } ktrace_keep_ownership_on_reset = TRUE; ktrace_set_owning_proc(p); proc_rele(p); return 0; }
static void ktrace_release_ownership(void) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); ktrace_owning_unique_id = 0; ktrace_owning_pid = 0; }
int ktrace_get_owning_pid(void) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); ktrace_ownership_maintenance(); return ktrace_owning_pid; }
/* * Get rid of old routes. When draining, this deletes everything, even when * the timeout is not expired yet. This also applies if the route is dynamic * and there are sufficiently large number of such routes (more than a half of * maximum). When updating, this makes sure that nothing has a timeout longer * than the current value of rtq_reallyold. */ static int in6_rtqkill(struct radix_node *rn, void *rock) { struct rtqk_arg *ap = rock; struct rtentry *rt = (struct rtentry *)rn; int err; struct timeval timenow; getmicrotime(&timenow); lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK(rt); if (rt->rt_flags & RTPRF_OURS) { ap->found++; if (ap->draining || rt->rt_rmx.rmx_expire <= timenow.tv_sec || ((rt->rt_flags & RTF_DYNAMIC) != 0 && ip6_maxdynroutes >= 0 && in6dynroutes > ip6_maxdynroutes / 2)) { if (rt->rt_refcnt > 0) panic("rtqkill route really not free"); /* * Delete this route since we're done with it; * the route may be freed afterwards, so we * can no longer refer to 'rt' upon returning * from rtrequest(). Safe to drop rt_lock and * use rt_key, rt_gateway, since holding rnh_lock * here prevents another thread from calling * rt_setgate() on this route. */ RT_UNLOCK(rt); err = rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); if (err) { log(LOG_WARNING, "in6_rtqkill: error %d", err); } else { ap->killed++; } } else { if (ap->updating && (rt->rt_rmx.rmx_expire - timenow.tv_sec > rtq_reallyold)) { rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } ap->nextstop = lmin(ap->nextstop, rt->rt_rmx.rmx_expire); RT_UNLOCK(rt); } } else { RT_UNLOCK(rt); } return 0; }
/* ----------------------------------------------------------------------------- detach L2TP interface dlil layer ----------------------------------------------------------------------------- */ void l2tp_wan_detach(struct ppp_link *link) { struct l2tp_wan *wan = (struct l2tp_wan *)link; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); ppp_link_detach(link); TAILQ_REMOVE(&l2tp_wan_head, wan, next); FREE(wan, M_TEMP); }
struct protosw * pffindproto(int family, int protocol, int type) { register struct protosw *pr; lck_mtx_assert(domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(domain_proto_mtx); pr = pffindproto_locked(family, protocol, type); lck_mtx_unlock(domain_proto_mtx); return (pr); }
struct domain * pffinddomain(int pf) { struct domain *dp; lck_mtx_assert(domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(domain_proto_mtx); dp = pffinddomain_locked(pf); lck_mtx_unlock(domain_proto_mtx); return(dp); }
int ktrace_read_check(void) { lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); if (proc_uniqueid(current_proc()) == ktrace_owning_unique_id) { return 0; } return kauth_cred_issuser(kauth_cred_get()) ? 0 : EPERM; }
/* ----------------------------------------------------------------------------- detach pppoe interface dlil layer ----------------------------------------------------------------------------- */ int pppoe_wan_attach(void *rfc, struct ppp_link **link) { int ret; struct pppoe_wan *wan; struct ppp_link *lk; u_short unit; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); // Note : we allocate/find number/insert in queue in that specific order // because of funnels and race condition issues MALLOC(wan, struct pppoe_wan *, sizeof(struct pppoe_wan), M_TEMP, M_WAITOK); if (!wan) return ENOMEM; if (pppoe_wan_findfreeunit(&unit)) { FREE(wan, M_TEMP); return ENOMEM; } bzero(wan, sizeof(struct pppoe_wan)); TAILQ_INSERT_TAIL(&pppoe_wan_head, wan, next); lk = (struct ppp_link *) wan; // it's time now to register our brand new link lk->lk_name = (u_char*)PPPOE_NAME; lk->lk_mtu = PPPOE_MTU; lk->lk_mru = PPPOE_MTU;; lk->lk_type = PPP_TYPE_PPPoE; lk->lk_hdrlen = 14; // ethernet header len //ld->lk_if.link_lk_baudrate = tp->t_ospeed; lk->lk_ioctl = pppoe_wan_ioctl; lk->lk_output = pppoe_wan_output; lk->lk_unit = unit; lk->lk_support = PPP_LINK_DEL_AC; wan->rfc = rfc; ret = ppp_link_attach((struct ppp_link *)wan); if (ret) { IOLog("pppoe_wan_attach, error = %d, (ld = %p)\n", ret, wan); TAILQ_REMOVE(&pppoe_wan_head, wan, next); FREE(wan, M_TEMP); return ret; } //IOLog("pppoe_wan_attach, link index = %d, (ld = %p)\n", lk->lk_index, lk); *link = lk; return 0; }
/* * On last reference drop, mark the route as belong to us so that it can be * timed out. */ static void in6_clsroute(struct radix_node *rn, __unused struct radix_node_head *head) { struct rtentry *rt = (struct rtentry *)rn; lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); if (!(rt->rt_flags & RTF_UP)) return; /* prophylactic measures */ if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) return; if (rt->rt_flags & RTPRF_OURS) return; if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) return; /* * Delete the route immediately if RTF_DELCLONE is set or * if route caching is disabled (rtq_reallyold set to 0). * Otherwise, let it expire and be deleted by in6_rtqkill(). */ if ((rt->rt_flags & RTF_DELCLONE) || rtq_reallyold == 0) { /* * Delete the route from the radix tree but since we are * called when the route's reference count is 0, don't * deallocate it until we return from this routine by * telling rtrequest that we're interested in it. * Safe to drop rt_lock and use rt_key, rt_gateway, * since holding rnh_lock here prevents another thread * from calling rt_setgate() on this route. */ RT_UNLOCK(rt); if (rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), rt->rt_flags, &rt) == 0) { /* Now let the caller free it */ RT_LOCK(rt); RT_REMREF_LOCKED(rt); } else { RT_LOCK(rt); } } else { struct timeval timenow; getmicrotime(&timenow); rt->rt_flags |= RTPRF_OURS; rt->rt_rmx.rmx_expire = timenow.tv_sec + rtq_reallyold; } }
struct protosw * pffindprotonotype(int family, int protocol) { register struct protosw *pr; if (protocol == 0) { return (NULL); } lck_mtx_assert(domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(domain_proto_mtx); pr = pffindprotonotype_locked(family, protocol, 0); lck_mtx_unlock(domain_proto_mtx); return (pr); }
/* ----------------------------------------------------------------------------- called from l2tp_rfc when data are present ----------------------------------------------------------------------------- */ int l2tp_wan_input(struct ppp_link *link, mbuf_t m) { struct timespec tv; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); link->lk_ipackets++; link->lk_ibytes += mbuf_pkthdr_len(m); nanouptime(&tv); link->lk_last_recv = tv.tv_sec; ppp_link_input(link, m); return 0; }
int altq_priq_remove(struct pf_altq *a) { struct priq_if *pif; lck_mtx_assert(pf_lock, LCK_MTX_ASSERT_OWNED); if ((pif = a->altq_disc) == NULL) return (EINVAL); a->altq_disc = NULL; return (priq_destroy(pif)); }
int esp_aes_schedule( __unused const struct esp_algorithm *algo, struct secasvar *sav) { lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED); aes_ctx *ctx = (aes_ctx*)sav->sched; aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt); aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt); return 0; }
/* ----------------------------------------------------------------------------- Process an ioctl request to the ppp interface ----------------------------------------------------------------------------- */ int l2tp_wan_ioctl(struct ppp_link *link, u_long cmd, void *data) { //struct l2tp_wan *wan = (struct l2tp_wan *)link;; int error = 0; lck_mtx_assert(ppp_domain_mutex, LCK_MTX_ASSERT_OWNED); //LOGDBG(ifp, ("l2tp_wan_ioctl, cmd = 0x%x\n", cmd)); switch (cmd) { default: error = ENOTSUP; } return error; }
static int flowadv_thread_cont(int err) { #pragma unused(err) for (;;) { lck_mtx_assert(&fadv_lock, LCK_MTX_ASSERT_OWNED); while (STAILQ_EMPTY(&fadv_list)) { VERIFY(!fadv_active); (void) msleep0(&fadv_list, &fadv_lock, (PSOCK | PSPIN), "flowadv_cont", 0, flowadv_thread_cont); /* NOTREACHED */ } fadv_active = 1; for (;;) { struct flowadv_fcentry *fce; VERIFY(!STAILQ_EMPTY(&fadv_list)); fce = STAILQ_FIRST(&fadv_list); STAILQ_REMOVE(&fadv_list, fce, flowadv_fcentry, fce_link); STAILQ_NEXT(fce, fce_link) = NULL; lck_mtx_unlock(&fadv_lock); switch (fce->fce_flowsrc) { case FLOWSRC_INPCB: inp_flowadv(fce->fce_flowid); break; case FLOWSRC_IFNET: ifnet_flowadv(fce->fce_flowid); break; case FLOWSRC_PF: default: break; } flowadv_free_entry(fce); lck_mtx_lock_spin(&fadv_lock); /* if there's no pending request, we're done */ if (STAILQ_EMPTY(&fadv_list)) break; } fadv_active = 0; } }
/* * Send any deferred ICMP param problem error messages; caller must not be * holding ip6qlock and is expected to have saved the per-packet parameter * value via frag6_save_context(). */ static void frag6_icmp6_paramprob_error(struct fq6_head *diq6) { lck_mtx_assert(&ip6qlock, LCK_MTX_ASSERT_NOTOWNED); if (!MBUFQ_EMPTY(diq6)) { struct mbuf *merr, *merr_tmp; int param; MBUFQ_FOREACH_SAFE(merr, diq6, merr_tmp) { MBUFQ_REMOVE(diq6, merr); MBUFQ_NEXT(merr) = NULL; param = frag6_restore_context(merr); frag6_scrub_context(merr); icmp6_error(merr, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, param); } }