static void bcmrng_get(struct bcm2835rng_softc *sc) { uint32_t status, cnt; uint32_t buf[RNG_DATA_MAX]; /* 1k on the stack */ mutex_spin_enter(&sc->sc_intr_lock); while (sc->sc_bytes_wanted) { status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, RNG_STATUS); cnt = __SHIFTOUT(status, RNG_STATUS_CNT); KASSERT(cnt < RNG_DATA_MAX); if (cnt == 0) continue; /* XXX Busy-waiting seems wrong... */ bus_space_read_multi_4(sc->sc_iot, sc->sc_ioh, RNG_DATA, buf, cnt); /* * This lock dance is necessary because rnd_add_data * may call bcmrng_get_cb which takes the intr lock. */ mutex_spin_exit(&sc->sc_intr_lock); mutex_spin_enter(&sc->sc_rnd_lock); rnd_add_data(&sc->sc_rndsource, buf, (cnt * 4), (cnt * 4 * NBBY)); mutex_spin_exit(&sc->sc_rnd_lock); mutex_spin_enter(&sc->sc_intr_lock); sc->sc_bytes_wanted -= MIN(sc->sc_bytes_wanted, (cnt * 4)); } explicit_memset(buf, 0, sizeof(buf)); mutex_spin_exit(&sc->sc_intr_lock); }
void arch_phys_wc_del(int id) { #if defined(MTRR) struct mtrr *mtrr; int n; int ret __diagused; KASSERT(0 <= id); mutex_spin_enter(&linux_writecomb.lock); mtrr = idr_find(&linux_writecomb.idr, id); idr_remove(&linux_writecomb.idr, id); mutex_spin_enter(&linux_writecomb.lock); if (mtrr != NULL) { mtrr->type = 0; mtrr->flags = 0; /* XXX errno NetBSD->Linux */ ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL); KASSERT(ret == 0); KASSERT(n == 1); kmem_free(mtrr, sizeof(*mtrr)); } #endif }
void adjtime1(const struct timeval *delta, struct timeval *olddelta, struct proc *p) { extern int64_t time_adjtime; /* in kern_ntptime.c */ if (olddelta) { mutex_spin_enter(&timecounter_lock); olddelta->tv_sec = time_adjtime / 1000000; olddelta->tv_usec = time_adjtime % 1000000; if (olddelta->tv_usec < 0) { olddelta->tv_usec += 1000000; olddelta->tv_sec--; } mutex_spin_exit(&timecounter_lock); } if (delta) { mutex_spin_enter(&timecounter_lock); time_adjtime = delta->tv_sec * 1000000 + delta->tv_usec; if (time_adjtime) { /* We need to save the system time during shutdown */ time_adjusted |= 1; } mutex_spin_exit(&timecounter_lock); } }
void rndsinks_distribute(void) { uint8_t buffer[RNDSINK_MAX_BYTES]; struct rndsink *rndsink; explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */ mutex_spin_enter(&rndsinks_lock); while ((rndsink = TAILQ_FIRST(&rndsinks)) != NULL) { KASSERT(rndsink->rsink_state == RNDSINK_QUEUED); /* Bail if we can't get some entropy for this rndsink. */ if (!rndpool_maybe_extract(buffer, rndsink->rsink_bytes)) break; /* * Got some entropy. Take the sink off the queue and * feed the entropy to the callback, with rndsinks_lock * dropped. While running the callback, lock out * rndsink_destroy by marking the sink in flight. */ TAILQ_REMOVE(&rndsinks, rndsink, rsink_entry); rndsink->rsink_state = RNDSINK_IN_FLIGHT; mutex_spin_exit(&rndsinks_lock); (*rndsink->rsink_callback)(rndsink->rsink_arg, buffer, rndsink->rsink_bytes); explicit_memset(buffer, 0, rndsink->rsink_bytes); mutex_spin_enter(&rndsinks_lock); /* * If, while the callback was running, anyone requested * it be queued up again, do so now. Otherwise, idle. * Either way, it is now safe to destroy, so wake the * pending rndsink_destroy, if there is one. */ if (rndsink->rsink_state == RNDSINK_REQUEUED) { TAILQ_INSERT_TAIL(&rndsinks, rndsink, rsink_entry); rndsink->rsink_state = RNDSINK_QUEUED; } else { KASSERT(rndsink->rsink_state == RNDSINK_IN_FLIGHT); rndsink->rsink_state = RNDSINK_IDLE; } cv_broadcast(&rndsink->rsink_cv); } mutex_spin_exit(&rndsinks_lock); explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */ }
/* * If we have as much entropy as is requested, fill the buffer with it * and return true. Otherwise, leave the buffer alone and return * false. */ static bool rndpool_maybe_extract(void *buffer, size_t bytes) { bool ok; KASSERT(bytes <= RNDSINK_MAX_BYTES); CTASSERT(RND_ENTROPY_THRESHOLD <= 0xffffffffUL); CTASSERT(RNDSINK_MAX_BYTES <= (0xffffffffUL - RND_ENTROPY_THRESHOLD)); CTASSERT((RNDSINK_MAX_BYTES + RND_ENTROPY_THRESHOLD) <= (0xffffffffUL / NBBY)); const uint32_t bits_needed = ((bytes + RND_ENTROPY_THRESHOLD) * NBBY); mutex_spin_enter(&rndpool_mtx); if (bits_needed <= rndpool_get_entropy_count(&rnd_pool)) { const uint32_t extracted __unused = rndpool_extract_data(&rnd_pool, buffer, bytes, RND_EXTRACT_GOOD); KASSERT(extracted == bytes); ok = true; } else { ok = false; rnd_getmore(howmany(bits_needed - rndpool_get_entropy_count(&rnd_pool), NBBY)); } mutex_spin_exit(&rndpool_mtx); return ok; }
static u_int iomd_timecounter0_get(struct timecounter *tc) { int s; u_int tm; /* * Latch the current value of the timer and then read it. * This guarantees an atomic reading of the time. */ s = splhigh(); bus_space_write_1(clock_sc->sc_iot, clock_sc->sc_ioh, IOMD_T0LATCH, 0); tm = bus_space_read_1(clock_sc->sc_iot, clock_sc->sc_ioh, IOMD_T0LOW); tm += (bus_space_read_1(clock_sc->sc_iot, clock_sc->sc_ioh, IOMD_T0HIGH) << 8); splx(s); mutex_spin_enter(&tmr_lock); tm = timer0_count - tm; if (timer0_count && (tm < timer0_lastcount || (!timer0_ticked && false/* XXX: clkintr_pending */))) { timer0_ticked = 1; timer0_offset += timer0_count; } timer0_lastcount = tm; tm += timer0_offset; mutex_spin_exit(&tmr_lock); return tm; }
/* * callout_hardclock: * * Called from hardclock() once every tick. We schedule a soft * interrupt if there is work to be done. */ void callout_hardclock(void) { struct callout_cpu *cc; int needsoftclock, ticks; cc = curcpu()->ci_data.cpu_callout; mutex_spin_enter(cc->cc_lock); ticks = ++cc->cc_ticks; MOVEBUCKET(cc, 0, ticks); if (MASKWHEEL(0, ticks) == 0) { MOVEBUCKET(cc, 1, ticks); if (MASKWHEEL(1, ticks) == 0) { MOVEBUCKET(cc, 2, ticks); if (MASKWHEEL(2, ticks) == 0) MOVEBUCKET(cc, 3, ticks); } } needsoftclock = !CIRCQ_EMPTY(&cc->cc_todo); mutex_spin_exit(cc->cc_lock); if (needsoftclock) softint_schedule(callout_sih); }
static int tap_dev_poll(int unit, int events, struct lwp *l) { struct tap_softc *sc = device_lookup_private(&tap_cd, unit); int revents = 0; if (sc == NULL) return POLLERR; if (events & (POLLIN|POLLRDNORM)) { struct ifnet *ifp = &sc->sc_ec.ec_if; struct mbuf *m; int s; s = splnet(); IFQ_POLL(&ifp->if_snd, m); if (m != NULL) revents |= events & (POLLIN|POLLRDNORM); else { mutex_spin_enter(&sc->sc_kqlock); selrecord(l, &sc->sc_rsel); mutex_spin_exit(&sc->sc_kqlock); } splx(s); } revents |= events & (POLLOUT|POLLWRNORM); return (revents); }
/* * data contains amount of time to sleep, in milliseconds */ static int filt_timerattach(struct knote *kn) { callout_t *calloutp; struct kqueue *kq; int tticks; tticks = mstohz(kn->kn_sdata); /* if the supplied value is under our resolution, use 1 tick */ if (tticks == 0) { if (kn->kn_sdata == 0) return EINVAL; tticks = 1; } if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax || (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) { atomic_dec_uint(&kq_ncallouts); return ENOMEM; } callout_init(calloutp, CALLOUT_MPSAFE); kq = kn->kn_kq; mutex_spin_enter(&kq->kq_lock); kn->kn_flags |= EV_CLEAR; /* automatically set */ kn->kn_hook = calloutp; mutex_spin_exit(&kq->kq_lock); callout_reset(calloutp, tticks, filt_timerexpire, kn); return (0); }
int pcppi_detach(device_t self, int flags) { int rc; struct pcppi_softc *sc = device_private(self); #if NATTIMER > 0 pcppi_detach_speaker(sc); #endif if ((rc = config_detach_children(sc->sc_dv, flags)) != 0) return rc; pmf_device_deregister(self); #if NPCKBD > 0 pckbd_unhook_bell(pcppi_pckbd_bell, sc); #endif mutex_spin_enter(&tty_lock); pcppi_bell_stop(sc); mutex_spin_exit(&tty_lock); callout_halt(&sc->sc_bell_ch, NULL); callout_destroy(&sc->sc_bell_ch); cv_destroy(&sc->sc_slp); bus_space_unmap(sc->sc_iot, sc->sc_ppi_ioh, sc->sc_size); return 0; }
/* ARGSUSED */ int gtmpsc_intr(void *arg) { struct gt_softc *gt = (struct gt_softc *)arg; struct gtmpsc_softc *sc; uint32_t icause; int i; icause = gt_sdma_icause(gt->sc_dev, sdma_imask); for (i = 0; i < GTMPSC_NCHAN; i++) { sc = device_lookup_private(>mpsc_cd, i); if (sc == NULL) continue; mutex_spin_enter(&sc->sc_lock); if (icause & SDMA_INTR_RXBUF(sc->sc_unit)) { gtmpsc_intr_rx(sc); icause &= ~SDMA_INTR_RXBUF(sc->sc_unit); } if (icause & SDMA_INTR_TXBUF(sc->sc_unit)) { gtmpsc_intr_tx(sc); icause &= ~SDMA_INTR_TXBUF(sc->sc_unit); } mutex_spin_exit(&sc->sc_lock); } return 1; }
static bool yds_resume(device_t dv, const pmf_qual_t *qual) { struct yds_softc *sc = device_private(dv); pci_chipset_tag_t pc = sc->sc_pc; pcitag_t tag = sc->sc_pcitag; pcireg_t reg; /* Disable legacy mode */ mutex_enter(&sc->sc_lock); mutex_spin_enter(&sc->sc_intr_lock); reg = pci_conf_read(pc, tag, YDS_PCI_LEGACY); pci_conf_write(pc, tag, YDS_PCI_LEGACY, reg & YDS_PCI_LEGACY_LAD); /* Enable the device. */ reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); reg |= (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg); reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (yds_init(sc)) { aprint_error_dev(dv, "reinitialize failed\n"); mutex_spin_exit(&sc->sc_intr_lock); mutex_exit(&sc->sc_lock); return false; } pci_conf_write(pc, tag, YDS_PCI_DSCTRL, sc->sc_dsctrl); mutex_spin_exit(&sc->sc_intr_lock); sc->sc_codec[0].codec_if->vtbl->restore_ports(sc->sc_codec[0].codec_if); mutex_exit(&sc->sc_lock); return true; }
/* * Independent profiling "tick" in case we're using a separate * clock or profiling event source. Currently, that's just * performance counters--hence the wrapper. */ void proftick(struct clockframe *frame) { #ifdef GPROF struct gmonparam *g; intptr_t i; #endif struct lwp *l; struct proc *p; l = curcpu()->ci_data.cpu_onproc; p = (l ? l->l_proc : NULL); if (CLKF_USERMODE(frame)) { mutex_spin_enter(&p->p_stmutex); if (p->p_stflag & PST_PROFIL) addupc_intr(l, CLKF_PC(frame)); mutex_spin_exit(&p->p_stmutex); } else { #ifdef GPROF g = &_gmonparam; if (g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #ifdef LWP_PC if (p != NULL && (p->p_stflag & PST_PROFIL) != 0) addupc_intr(l, LWP_PC(l)); #endif } }
void ucom_status_change(struct ucom_softc *sc) { struct tty *tp = sc->sc_tty; u_char old_msr; if (sc->sc_methods->ucom_get_status != NULL) { old_msr = sc->sc_msr; sc->sc_methods->ucom_get_status(sc->sc_parent, sc->sc_portno, &sc->sc_lsr, &sc->sc_msr); if (ISSET((sc->sc_msr ^ old_msr), UMSR_DCD)) { mutex_spin_enter(&timecounter_lock); pps_capture(&sc->sc_pps_state); pps_event(&sc->sc_pps_state, (sc->sc_msr & UMSR_DCD) ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR); mutex_spin_exit(&timecounter_lock); (*tp->t_linesw->l_modem)(tp, ISSET(sc->sc_msr, UMSR_DCD)); } } else { sc->sc_lsr = 0; /* Assume DCD is present, if we have no chance to check it. */ sc->sc_msr = UMSR_DCD; } }
static int tap_dev_kqfilter(int unit, struct knote *kn) { struct tap_softc *sc = device_lookup_private(&tap_cd, unit); if (sc == NULL) return (ENXIO); KERNEL_LOCK(1, NULL); switch(kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &tap_read_filterops; break; case EVFILT_WRITE: kn->kn_fop = &tap_seltrue_filterops; break; default: KERNEL_UNLOCK_ONE(NULL); return (EINVAL); } kn->kn_hook = sc; mutex_spin_enter(&sc->sc_kqlock); SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext); mutex_spin_exit(&sc->sc_kqlock); KERNEL_UNLOCK_ONE(NULL); return (0); }
/* * pserialize_perform: * * Perform the write side of passive serialization. The calling * thread holds an exclusive lock on the data object(s) being updated. * We wait until every processor in the system has made at least two * passes through cpu_swichto(). The wait is made with the caller's * update lock held, but is short term. */ void pserialize_perform(pserialize_t psz) { uint64_t xc; KASSERT(!cpu_intr_p()); KASSERT(!cpu_softintr_p()); if (__predict_false(panicstr != NULL)) { return; } KASSERT(psz->psz_owner == NULL); KASSERT(ncpu > 0); /* * Set up the object and put it onto the queue. The lock * activity here provides the necessary memory barrier to * make the caller's data update completely visible to * other processors. */ psz->psz_owner = curlwp; kcpuset_copy(psz->psz_target, kcpuset_running); kcpuset_zero(psz->psz_pass); mutex_spin_enter(&psz_lock); TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain); psz_work_todo++; do { mutex_spin_exit(&psz_lock); /* * Force some context switch activity on every CPU, as * the system may not be busy. Pause to not flood. */ xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL); xc_wait(xc); kpause("psrlz", false, 1, NULL); mutex_spin_enter(&psz_lock); } while (!kcpuset_iszero(psz->psz_target)); psz_ev_excl.ev_count++; mutex_spin_exit(&psz_lock); psz->psz_owner = NULL; }
STATIC void gtmpsc_common_putc(struct gtmpsc_softc *sc, int c) { gtmpsc_polltx_t *vtxp; int ix; const int nc = 1; /* Get a DMA descriptor */ if (!cold) mutex_spin_enter(&sc->sc_lock); ix = sc->sc_nexttx; sc->sc_nexttx = (ix + 1) % GTMPSC_NTXDESC; if (sc->sc_nexttx == sc->sc_lasttx) { gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx); sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC; } if (!cold) mutex_spin_exit(&sc->sc_lock); vtxp = &sc->sc_poll_sdmapage->tx[ix]; vtxp->txbuf[0] = c; bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map, ix * sizeof(gtmpsc_polltx_t) + sizeof(sdma_desc_t), sizeof(vtxp->txbuf), BUS_DMASYNC_PREWRITE); vtxp->txdesc.sdma_cnt = (nc << SDMA_TX_CNT_BCNT_SHIFT) | nc; vtxp->txdesc.sdma_csr = SDMA_CSR_TX_L | SDMA_CSR_TX_F | SDMA_CSR_TX_OWN; bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map, ix * sizeof(gtmpsc_polltx_t), sizeof(sdma_desc_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (!cold) mutex_spin_enter(&sc->sc_lock); /* * now kick some SDMA */ GT_SDMA_WRITE(sc, SDMA_SDCM, SDMA_SDCM_TXD); while (sc->sc_lasttx != sc->sc_nexttx) { gtmpsc_common_putc_wait_complete(sc, sc->sc_lasttx); sc->sc_lasttx = (sc->sc_lasttx + 1) % GTMPSC_NTXDESC; } if (!cold) mutex_spin_exit(&sc->sc_lock); }
void pcppi_bell(pcppi_tag_t self, int pitch, int period, int slp) { mutex_spin_enter(&tty_lock); pcppi_bell_locked(self, pitch, period, slp); mutex_spin_exit(&tty_lock); }
/* * pserialize_switchpoint: * * Monitor system context switch activity. Called from machine * independent code after mi_switch() returns. */ void pserialize_switchpoint(void) { pserialize_t psz, next; cpuid_t cid; /* * If no updates pending, bail out. No need to lock in order to * test psz_work_todo; the only ill effect of missing an update * would be to delay LWPs waiting in pserialize_perform(). That * will not happen because updates are on the queue before an * xcall is generated (serialization) to tickle every CPU. */ if (__predict_true(psz_work_todo == 0)) { return; } mutex_spin_enter(&psz_lock); cid = cpu_index(curcpu()); /* * At first, scan through the second queue and update each request, * if passed all processors, then transfer to the third queue. */ for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) { next = TAILQ_NEXT(psz, psz_chain); kcpuset_set(psz->psz_pass, cid); if (!kcpuset_match(psz->psz_pass, psz->psz_target)) { continue; } kcpuset_zero(psz->psz_pass); TAILQ_REMOVE(&psz_queue1, psz, psz_chain); TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain); } /* * Scan through the first queue and update each request, * if passed all processors, then move to the second queue. */ for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) { next = TAILQ_NEXT(psz, psz_chain); kcpuset_set(psz->psz_pass, cid); if (!kcpuset_match(psz->psz_pass, psz->psz_target)) { continue; } kcpuset_zero(psz->psz_pass); TAILQ_REMOVE(&psz_queue0, psz, psz_chain); TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain); } /* * Process the third queue: entries have been seen twice on every * processor, remove from the queue and notify the updating thread. */ while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) { TAILQ_REMOVE(&psz_queue2, psz, psz_chain); kcpuset_zero(psz->psz_target); psz_work_todo--; } mutex_spin_exit(&psz_lock); }
static void vmem_kick_pdaemon(void) { #if defined(_KERNEL) mutex_spin_enter(&uvm_fpageqlock); uvm_kick_pdaemon(); mutex_spin_exit(&uvm_fpageqlock); #endif }
static void auich_close(void *addr) { struct auich_softc *sc; sc = (struct auich_softc *)addr; mutex_spin_exit(&sc->sc_intr_lock); sc->codec_if->vtbl->unlock(sc->codec_if); mutex_spin_enter(&sc->sc_intr_lock); }
/* * used by kbd_sun_start_tx(); */ void sunkbd_write_data(struct kbd_sun_softc *k, int c) { struct tty *tp = k->k_priv; mutex_spin_enter(&tty_lock); ttyoutput(c, tp); ttstart(tp); mutex_spin_exit(&tty_lock); }
static int fms_intr(void *arg) { struct fms_softc *sc = arg; #if NMPU > 0 struct mpu_softc *sc_mpu = device_private(sc->sc_mpu_dev); #endif uint16_t istat; mutex_spin_enter(&sc->sc_intr_lock); istat = bus_space_read_2(sc->sc_iot, sc->sc_ioh, FM_INTSTATUS); if (istat & FM_INTSTATUS_PLAY) { if ((sc->sc_play_nextblk += sc->sc_play_blksize) >= sc->sc_play_end) sc->sc_play_nextblk = sc->sc_play_start; bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_play_flip++ & 1 ? FM_PLAY_DMABUF2 : FM_PLAY_DMABUF1, sc->sc_play_nextblk); if (sc->sc_pintr) sc->sc_pintr(sc->sc_parg); else printf("unexpected play intr\n"); } if (istat & FM_INTSTATUS_REC) { if ((sc->sc_rec_nextblk += sc->sc_rec_blksize) >= sc->sc_rec_end) sc->sc_rec_nextblk = sc->sc_rec_start; bus_space_write_4(sc->sc_iot, sc->sc_ioh, sc->sc_rec_flip++ & 1 ? FM_REC_DMABUF2 : FM_REC_DMABUF1, sc->sc_rec_nextblk); if (sc->sc_rintr) sc->sc_rintr(sc->sc_rarg); else printf("unexpected rec intr\n"); } #if NMPU > 0 if (istat & FM_INTSTATUS_MPU) mpu_intr(sc_mpu); #endif bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_INTSTATUS, istat & (FM_INTSTATUS_PLAY | FM_INTSTATUS_REC)); mutex_spin_exit(&sc->sc_intr_lock); return 1; }
/* * Filter detach method for EVFILT_READ on kqueue descriptor. */ static void filt_kqdetach(struct knote *kn) { struct kqueue *kq; kq = ((file_t *)kn->kn_obj)->f_data; mutex_spin_enter(&kq->kq_lock); SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext); mutex_spin_exit(&kq->kq_lock); }
static int auich_open(void *addr, int flags) { struct auich_softc *sc; sc = (struct auich_softc *)addr; mutex_spin_exit(&sc->sc_intr_lock); sc->codec_if->vtbl->lock(sc->codec_if); mutex_spin_enter(&sc->sc_intr_lock); return 0; }
static void pcppi_bell_callout(void *arg) { struct pcppi_softc *sc = arg; mutex_spin_enter(&tty_lock); if (sc->sc_timeout != 0) { pcppi_bell_stop(sc); } mutex_spin_exit(&tty_lock); }
static void tap_kqdetach(struct knote *kn) { struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; KERNEL_LOCK(1, NULL); mutex_spin_enter(&sc->sc_kqlock); SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); mutex_spin_exit(&sc->sc_kqlock); KERNEL_UNLOCK_ONE(NULL); }
void rndsink_schedule(struct rndsink *rndsink) { /* Optimistically check without the lock whether we're queued. */ if ((rndsink->rsink_state != RNDSINK_QUEUED) && (rndsink->rsink_state != RNDSINK_REQUEUED)) { mutex_spin_enter(&rndsinks_lock); rndsinks_enqueue(rndsink); mutex_spin_exit(&rndsinks_lock); } }
/* * ntp_gettime() - NTP user application interface */ void ntp_gettime(struct ntptimeval *ntv) { mutex_spin_enter(&timecounter_lock); nanotime(&ntv->time); ntv->maxerror = time_maxerror; ntv->esterror = time_esterror; ntv->tai = time_tai; ntv->time_state = time_state; mutex_spin_exit(&timecounter_lock); }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtr(pThis); Assert(pThis->u32Magic == RTSPINLOCK_MAGIC); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { mutex_spin_enter(&pThis->pSpinLock); } else { mutex_enter(&pThis->pSpinLock); } }