int taskq_next_work(struct taskq *tq, struct task *work, sleepfn tqsleep) { struct task *next; mtx_enter(&tq->tq_mtx); while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) { if (tq->tq_state != TQ_S_RUNNING) { mtx_leave(&tq->tq_mtx); return (0); } tqsleep(tq, &tq->tq_mtx, PWAIT, "bored", 0); } TAILQ_REMOVE(&tq->tq_worklist, next, t_entry); CLR(next->t_flags, TASK_ONQUEUE); *work = *next; /* copy to caller to avoid races */ next = TAILQ_FIRST(&tq->tq_worklist); mtx_leave(&tq->tq_mtx); if (next != NULL) wakeup_one(tq); return (1); }
/* * softintr_dispatch: * * Process pending software interrupts. */ void softintr_dispatch(int which) { struct i386_soft_intr *si = &i386_soft_intrs[which]; struct i386_soft_intrhand *sih; void *arg; void (*fn)(void *); for (;;) { mtx_enter(&si->softintr_lock); sih = TAILQ_FIRST(&si->softintr_q); if (sih == NULL) { mtx_leave(&si->softintr_lock); break; } TAILQ_REMOVE(&si->softintr_q, sih, sih_q); sih->sih_pending = 0; uvmexp.softs++; arg = sih->sih_arg; fn = sih->sih_fn; mtx_leave(&si->softintr_lock); (*fn)(arg); } }
void taskq_destroy(struct taskq *tq) { mtx_enter(&tq->tq_mtx); switch (tq->tq_state) { case TQ_S_CREATED: /* tq is still referenced by taskq_create_thread */ tq->tq_state = TQ_S_DESTROYED; mtx_leave(&tq->tq_mtx); return; case TQ_S_RUNNING: tq->tq_state = TQ_S_DESTROYED; break; default: panic("unexpected %s tq state %u", tq->tq_name, tq->tq_state); } while (tq->tq_running > 0) { wakeup(tq); msleep(&tq->tq_running, &tq->tq_mtx, PWAIT, "tqdestroy", 0); } mtx_leave(&tq->tq_mtx); free(tq, M_DEVBUF, sizeof(*tq)); }
void mvmdio_miibus_writereg(struct device *dev, int phy, int reg, int val) { struct mvmdio_softc *sc = (struct mvmdio_softc *) dev; uint32_t smi; int i; mtx_enter(&sc->sc_mtx); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { DELAY(1); if (!(MVNETA_READ(sc, 0) & MVNETA_SMI_BUSY)) break; } if (i == MVNETA_PHY_TIMEOUT) { printf("%s: SMI busy timeout\n", sc->sc_dev.dv_xname); mtx_leave(&sc->sc_mtx); return; } smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK); MVNETA_WRITE(sc, 0, smi); for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { DELAY(1); if (!(MVNETA_READ(sc, 0) & MVNETA_SMI_BUSY)) break; } mtx_leave(&sc->sc_mtx); if (i == MVNETA_PHY_TIMEOUT) printf("%s: phy write timed out\n", sc->sc_dev.dv_xname); }
/** * drm_vblank_off - disable vblank events on a CRTC * @dev: DRM device * @crtc: CRTC in question * * Caller must hold event lock. */ void drm_vblank_off(struct drm_device *dev, int crtc) { struct drmevlist *list; struct drm_pending_event *ev, *tmp; struct drm_pending_vblank_event *vev; struct timeval now; unsigned int seq; mtx_enter(&dev->vbl_lock); vblank_disable_and_save(dev, crtc); wakeup(&dev->vbl_queue[crtc]); list = &dev->vbl_events; /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); mtx_enter(&dev->event_lock); for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) { tmp = TAILQ_NEXT(ev, link); vev = (struct drm_pending_vblank_event *)ev; if (vev->pipe != crtc) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", vev->event.sequence, seq); TAILQ_REMOVE(list, ev, link); drm_vblank_put(dev, vev->pipe); send_vblank_event(dev, vev, seq, &now); } mtx_leave(&dev->event_lock); mtx_leave(&dev->vbl_lock); }
/* * softintr_dispatch: * * Process pending software interrupts. */ void softintr_dispatch(int which) { struct i386_soft_intr *si = &i386_soft_intrs[which]; struct i386_soft_intrhand *sih; KERNEL_LOCK(); for (;;) { mtx_enter(&si->softintr_lock); sih = TAILQ_FIRST(&si->softintr_q); if (sih == NULL) { mtx_leave(&si->softintr_lock); break; } TAILQ_REMOVE(&si->softintr_q, sih, sih_q); sih->sih_pending = 0; uvmexp.softs++; mtx_leave(&si->softintr_lock); (*sih->sih_fn)(sih->sih_arg); } KERNEL_UNLOCK(); }
int drm_queue_vblank_event(struct drm_device *dev, int crtc, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_pending_vblank_event *vev; struct timeval now; u_int seq; vev = drm_calloc(1, sizeof(*vev)); if (vev == NULL) return (ENOMEM); vev->event.base.type = DRM_EVENT_VBLANK; vev->event.base.length = sizeof(vev->event); vev->event.user_data = vblwait->request.signal; vev->base.event = &vev->event.base; vev->base.file_priv = file_priv; vev->base.destroy = (void (*) (struct drm_pending_event *))drm_free; microtime(&now); mtx_enter(&dev->event_lock); if (file_priv->event_space < sizeof(vev->event)) { mtx_leave(&dev->event_lock); drm_free(vev); return (ENOMEM); } seq = drm_vblank_count(dev, crtc); file_priv->event_space -= sizeof(vev->event); DPRINTF("%s: queueing event %d on crtc %d\n", __func__, seq, crtc); if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; vblwait->reply.sequence = vblwait->request.sequence; } vev->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { vev->event.tv_sec = now.tv_sec; vev->event.tv_usec = now.tv_usec; DPRINTF("%s: already passed, dequeuing: crtc %d, value %d\n", __func__, crtc, seq); drm_vblank_put(dev, crtc); TAILQ_INSERT_TAIL(&file_priv->evlist, &vev->base, link); wakeup(&file_priv->evlist); selwakeup(&file_priv->rsel); } else { TAILQ_INSERT_TAIL(&dev->vblank->vb_crtcs[crtc].vbl_events, &vev->base, link); } mtx_leave(&dev->event_lock); return (0); }
int midiread(dev_t dev, struct uio *uio, int ioflag) { struct midi_softc *sc; struct midi_buffer *mb; size_t count; int error; sc = (struct midi_softc *)device_lookup(&midi_cd, minor(dev)); if (sc == NULL) return ENXIO; if (!(sc->flags & FREAD)) { error = ENXIO; goto done; } mb = &sc->inbuf; /* if there is no data then sleep (unless IO_NDELAY flag is set) */ error = 0; mtx_enter(&audio_lock); while (MIDIBUF_ISEMPTY(mb)) { if (ioflag & IO_NDELAY) { mtx_leave(&audio_lock); error = EWOULDBLOCK; goto done; } sc->rchan = 1; error = msleep(&sc->rchan, &audio_lock, PWAIT | PCATCH, "mid_rd", 0); if (!(sc->dev.dv_flags & DVF_ACTIVE)) error = EIO; if (error) { mtx_leave(&audio_lock); goto done; } } /* at this stage, there is at least 1 byte */ while (uio->uio_resid > 0 && mb->used > 0) { count = MIDIBUF_SIZE - mb->start; if (count > mb->used) count = mb->used; if (count > uio->uio_resid) count = uio->uio_resid; mtx_leave(&audio_lock); error = uiomove(mb->data + mb->start, count, uio); if (error) goto done; mtx_enter(&audio_lock); MIDIBUF_REMOVE(mb, count); } mtx_leave(&audio_lock); done: device_unref(&sc->dev); return error; }
void db_stopcpu(int cpu) { mtx_enter(&ddb_mp_mutex); if (cpu != cpu_number() && cpu_info[cpu] != NULL && cpu_info[cpu]->ci_ddb_paused != CI_DDB_STOPPED) { cpu_info[cpu]->ci_ddb_paused = CI_DDB_SHOULDSTOP; mtx_leave(&ddb_mp_mutex); x86_send_ipi(cpu_info[cpu], X86_IPI_DDB); } else { mtx_leave(&ddb_mp_mutex); } }
void vdsp_read(void *arg1) { struct vdsp_softc *sc = arg1; mtx_enter(&sc->sc_desc_mtx); while (sc->sc_desc_tail != sc->sc_desc_head) { mtx_leave(&sc->sc_desc_mtx); vdsp_read_desc(sc, sc->sc_desc_msg[sc->sc_desc_tail]); mtx_enter(&sc->sc_desc_mtx); sc->sc_desc_tail++; sc->sc_desc_tail &= (VDSK_RX_ENTRIES - 1); } mtx_leave(&sc->sc_desc_mtx); }
int tmpfs_mem_incr(struct tmpfs_mount *mp, size_t sz) { uint64_t lim; mtx_enter(&mp->tm_acc_lock); lim = tmpfs_bytes_max(mp); if (mp->tm_bytes_used + sz >= lim) { mtx_leave(&mp->tm_acc_lock); return 0; } mp->tm_bytes_used += sz; mtx_leave(&mp->tm_acc_lock); return 1; }
void ttm_write_lock_downgrade(struct ttm_lock *lock) { mtx_enter(&lock->lock); lock->rw = 1; wakeup(&lock->queue); mtx_leave(&lock->lock); }
void ttm_write_unlock(struct ttm_lock *lock) { mtx_enter(&lock->lock); lock->rw = 0; wakeup(&lock->queue); mtx_leave(&lock->lock); }
void smp_rendezvous_cpus(unsigned long map, void (* action_func)(void *), void *arg) { unsigned int cpumask = 1 << cpu_number(); if (ncpus == 1) { if (action_func != NULL) action_func(arg); return; } /* obtain rendezvous lock */ mtx_enter(&smp_ipi_mtx); /* set static function pointers */ smp_rv_map = map; smp_rv_action_func = action_func; smp_rv_func_arg = arg; smp_rv_waiters[0] = 0; smp_rv_waiters[1] = 0; /* signal other processors, which will enter the IPI with interrupts off */ mips64_multicast_ipi(map & ~cpumask, MIPS64_IPI_RENDEZVOUS); /* Check if the current CPU is in the map */ if (map & cpumask) smp_rendezvous_action(); while (smp_rv_waiters[1] != smp_rv_map) ; /* release lock */ mtx_leave(&smp_ipi_mtx); }
/** * Uninstall the IRQ handler. * * \param dev DRM device. * * Calls the driver's \c irq_uninstall() function, and stops the irq. */ int drm_irq_uninstall(struct drm_device *dev) { int i; DRM_LOCK(); if (!dev->irq_enabled) { DRM_UNLOCK(); return (EINVAL); } dev->irq_enabled = 0; DRM_UNLOCK(); /* * Ick. we're about to turn of vblanks, so make sure anyone waiting * on them gets woken up. Also make sure we update state correctly * so that we can continue refcounting correctly. */ if (dev->num_crtcs) { mtx_enter(&dev->vbl_lock); for (i = 0; i < dev->num_crtcs; i++) { wakeup(&dev->vbl_queue[i]); dev->vblank_enabled[i] = 0; dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); } mtx_leave(&dev->vbl_lock); } DRM_DEBUG("irq=%d\n", dev->irq); dev->driver->irq_uninstall(dev); return (0); }
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; mtx_enter(&rman->lock); if (drm_mm_clean(mm)) { drm_mm_takedown(mm); mtx_leave(&rman->lock); kfree(rman); man->priv = NULL; return 0; } mtx_leave(&rman->lock); return -EBUSY; }
int midipoll(dev_t dev, int events, struct proc *p) { struct midi_softc *sc; int revents; sc = (struct midi_softc *)device_lookup(&midi_cd, minor(dev)); if (sc == NULL) return POLLERR; revents = 0; mtx_enter(&audio_lock); if (events & (POLLIN | POLLRDNORM)) { if (!MIDIBUF_ISEMPTY(&sc->inbuf)) revents |= events & (POLLIN | POLLRDNORM); } if (events & (POLLOUT | POLLWRNORM)) { if (!MIDIBUF_ISFULL(&sc->outbuf)) revents |= events & (POLLOUT | POLLWRNORM); } if (revents == 0) { if (events & (POLLIN | POLLRDNORM)) selrecord(p, &sc->rsel); if (events & (POLLOUT | POLLWRNORM)) selrecord(p, &sc->wsel); } mtx_leave(&audio_lock); device_unref(&sc->dev); return (revents); }
struct mbuf * m_free(struct mbuf *m) { struct mbuf *n; if (m == NULL) return (NULL); mtx_enter(&mbstatmtx); mbstat.m_mtypes[m->m_type]--; mtx_leave(&mbstatmtx); n = m->m_next; if (m->m_flags & M_ZEROIZE) { m_zero(m); /* propagate M_ZEROIZE to the next mbuf in the chain */ if (n) n->m_flags |= M_ZEROIZE; } if (m->m_flags & M_PKTHDR) m_tag_delete_chain(m); if (m->m_flags & M_EXT) m_extfree(m); pool_put(&mbpool, m); return (n); }
int neo_trigger_output(void *addr, void *start, void *end, int blksize, void (*intr)(void *), void *arg, struct audio_params *param) { struct neo_softc *sc = addr; int ssz; mtx_enter(&audio_lock); sc->pintr = intr; sc->parg = arg; ssz = (param->precision * param->factor == 16)? 2 : 1; if (param->channels == 2) ssz <<= 1; sc->pbufsize = ((char *)end - (char *)start); sc->pblksize = blksize; sc->pwmark = blksize; nm_wr(sc, NM_PBUFFER_START, sc->pbuf, 4); nm_wr(sc, NM_PBUFFER_END, sc->pbuf + sc->pbufsize - ssz, 4); nm_wr(sc, NM_PBUFFER_CURRP, sc->pbuf, 4); nm_wr(sc, NM_PBUFFER_WMARK, sc->pbuf + sc->pwmark, 4); nm_wr(sc, NM_PLAYBACK_ENABLE_REG, NM_PLAYBACK_FREERUN | NM_PLAYBACK_ENABLE_FLAG, 1); nm_wr(sc, NM_AUDIO_MUTE_REG, 0, 2); mtx_leave(&audio_lock); return (0); }
void drm_handle_vblank_events(struct drm_device *dev, int crtc) { struct drmevlist *list; struct drm_pending_event *ev, *tmp; struct drm_pending_vblank_event *vev; struct timeval now; u_int seq; list = &dev->vblank->vb_crtcs[crtc].vbl_events; microtime(&now); seq = drm_vblank_count(dev, crtc); mtx_enter(&dev->event_lock); for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) { tmp = TAILQ_NEXT(ev, link); vev = (struct drm_pending_vblank_event *)ev; if ((seq - vev->event.sequence) > (1 << 23)) continue; DPRINTF("%s: got vblank event on crtc %d, value %d\n", __func__, crtc, seq); vev->event.sequence = seq; vev->event.tv_sec = now.tv_sec; vev->event.tv_usec = now.tv_usec; drm_vblank_put(dev, crtc); TAILQ_REMOVE(list, ev, link); TAILQ_INSERT_TAIL(&ev->file_priv->evlist, ev, link); wakeup(&ev->file_priv->evlist); selwakeup(&ev->file_priv->rsel); } mtx_leave(&dev->event_lock); }
int tmpfs_mntmem_adjust(struct tmpfs_mount *mp, uint64_t memlimit) { mtx_enter(&mp->tm_acc_lock); if (memlimit < mp->tm_bytes_used) { mtx_leave(&mp->tm_acc_lock); return EINVAL; } mp->tm_mem_limit = memlimit; mtx_leave(&mp->tm_acc_lock); return 0; }
void buf_free_pages(struct buf *bp) { struct uvm_object *uobj = bp->b_pobj; struct vm_page *pg; voff_t off, i; int s; KASSERT(bp->b_data == NULL); KASSERT(uobj != NULL); s = splbio(); off = bp->b_poffs; bp->b_pobj = NULL; bp->b_poffs = 0; mtx_enter(&uobj->vmobjlock); for (i = 0; i < atop(bp->b_bufsize); i++) { pg = uvm_pagelookup(uobj, off + ptoa(i)); KASSERT(pg != NULL); KASSERT(pg->wire_count == 1); pg->wire_count = 0; /* Never on a pageq, no pageqlock needed. */ uvm_pagefree(pg); bcstats.numbufpages--; } mtx_leave(&uobj->vmobjlock); splx(s); }
int midikqfilter(dev_t dev, struct knote *kn) { struct midi_softc *sc; struct klist *klist; int error; sc = (struct midi_softc *)device_lookup(&midi_cd, minor(dev)); if (sc == NULL) return ENXIO; error = 0; switch (kn->kn_filter) { case EVFILT_READ: klist = &sc->rsel.si_note; kn->kn_fop = &midiread_filtops; break; case EVFILT_WRITE: klist = &sc->wsel.si_note; kn->kn_fop = &midiwrite_filtops; break; default: error = EINVAL; goto done; } kn->kn_hook = (void *)sc; mtx_enter(&audio_lock); SLIST_INSERT_HEAD(klist, kn, kn_selnext); mtx_leave(&audio_lock); done: device_unref(&sc->dev); return error; }
void ttm_suspend_unlock(struct ttm_lock *lock) { mtx_enter(&lock->lock); lock->flags &= ~TTM_SUSPEND_LOCK; wakeup(&lock->queue); mtx_leave(&lock->lock); }
void ttm_read_unlock(struct ttm_lock *lock) { mtx_enter(&lock->lock); if (--lock->rw == 0) wakeup(&lock->queue); mtx_leave(&lock->lock); }
int neo_trigger_input(void *addr, void *start, void *end, int blksize, void (*intr)(void *), void *arg, struct audio_params *param) { struct neo_softc *sc = addr; int ssz; mtx_enter(&audio_lock); sc->rintr = intr; sc->rarg = arg; ssz = (param->precision == 16) ? 2 : 1; if (param->channels == 2) ssz <<= 1; sc->rbufsize = ((char *)end - (char *)start); sc->rblksize = blksize; sc->rwmark = blksize; nm_wr(sc, NM_RBUFFER_START, sc->rbuf, 4); nm_wr(sc, NM_RBUFFER_END, sc->rbuf + sc->rbufsize, 4); nm_wr(sc, NM_RBUFFER_CURRP, sc->rbuf, 4); nm_wr(sc, NM_RBUFFER_WMARK, sc->rbuf + sc->rwmark, 4); nm_wr(sc, NM_RECORD_ENABLE_REG, NM_RECORD_FREERUN | NM_RECORD_ENABLE_FLAG, 1); mtx_leave(&audio_lock); return (0); }
int drm_vblank_get(struct drm_device *dev, int crtc) { struct drm_vblank_info *vbl = dev->vblank; int ret = 0; if (dev->irq_enabled == 0) return (EINVAL); mtx_enter(&vbl->vb_lock); DPRINTF("%s: %d refs = %d\n", __func__, crtc, vbl->vb_crtcs[crtc].vbl_refs); vbl->vb_crtcs[crtc].vbl_refs++; if (vbl->vb_crtcs[crtc].vbl_refs == 1 && vbl->vb_crtcs[crtc].vbl_enabled == 0) { if ((ret = dev->driver->enable_vblank(dev, crtc)) == 0) { vbl->vb_crtcs[crtc].vbl_enabled = 1; drm_update_vblank_count(dev, crtc); } else { vbl->vb_crtcs[crtc].vbl_refs--; } } mtx_leave(&vbl->vb_lock); return (ret); }
void midi_timeout(void *addr) { mtx_enter(&audio_lock); midi_ointr(addr); mtx_leave(&audio_lock); }
int yds_halt_output(void *addr) { struct yds_softc *sc = addr; DPRINTF(("yds: yds_halt_output\n")); mtx_enter(&audio_lock); if (sc->sc_play.intr) { sc->sc_play.intr = 0; /* Sync play slot control data */ bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map, sc->pbankoff, sizeof(struct play_slot_ctrl_bank)* (*sc->ptbl)*N_PLAY_SLOT_CTRL_BANK, BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); /* Stop the play slot operation */ sc->pbankp[0]->status = sc->pbankp[1]->status = sc->pbankp[2]->status = sc->pbankp[3]->status = 1; /* Sync ring buffer */ bus_dmamap_sync(sc->sc_dmatag, sc->sc_play.dma->map, 0, sc->sc_play.length, BUS_DMASYNC_POSTWRITE); } mtx_leave(&audio_lock); return 0; }
int yds_halt_input(void *addr) { struct yds_softc *sc = addr; DPRINTF(("yds: yds_halt_input\n")); mtx_enter(&audio_lock); if (sc->sc_rec.intr) { /* Stop the rec slot operation */ YWRITE4(sc, YDS_MAPOF_REC, 0); sc->sc_rec.intr = 0; /* Sync rec slot control data */ bus_dmamap_sync(sc->sc_dmatag, sc->sc_ctrldata.map, sc->rbankoff, sizeof(struct rec_slot_ctrl_bank)* N_REC_SLOT_CTRL*N_REC_SLOT_CTRL_BANK, BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); /* Sync ring buffer */ bus_dmamap_sync(sc->sc_dmatag, sc->sc_rec.dma->map, 0, sc->sc_rec.length, BUS_DMASYNC_POSTREAD); } sc->sc_rec.intr = NULL; mtx_leave(&audio_lock); return 0; }