int ppb_sleep(device_t bus, void *wchan, int priority, const char *wmesg, int timo) { struct ppb_data *ppb = DEVTOSOFTC(bus); return (mtx_sleep(wchan, ppb->ppc_lock, priority, wmesg, timo)); }
/* * ppb_poll_bus() * * Polls the bus * * max is a delay in 10-milliseconds */ int ppb_poll_bus(device_t bus, int max, char mask, char status, int how) { struct ppb_data *ppb = DEVTOSOFTC(bus); int i, j, error; char r; ppb_assert_locked(bus); /* try at least up to 10ms */ for (j = 0; j < ((how & PPB_POLL) ? max : 1); j++) { for (i = 0; i < 10000; i++) { r = ppb_rstr(bus); DELAY(1); if ((r & mask) == status) return (0); } } if (!(how & PPB_POLL)) { for (i = 0; max == PPB_FOREVER || i < max-1; i++) { if ((ppb_rstr(bus) & mask) == status) return (0); /* wait 10 ms */ error = mtx_sleep((caddr_t)bus, ppb->ppc_lock, PPBPRI | (how == PPB_NOINTR ? 0 : PCATCH), "ppbpoll", hz/100); if (error != EWOULDBLOCK) return (error); } } return (EWOULDBLOCK); }
static struct vmbus_msghc * vmbus_msghc_get1(struct vmbus_msghc_ctx *mhc, uint32_t dtor_flag) { struct vmbus_msghc *mh; mtx_lock(&mhc->mhc_free_lock); while ((mhc->mhc_flags & dtor_flag) == 0 && mhc->mhc_free == NULL) { mtx_sleep(&mhc->mhc_free, &mhc->mhc_free_lock, 0, "gmsghc", 0); } if (mhc->mhc_flags & dtor_flag) { /* Being destroyed */ mh = NULL; } else { mh = mhc->mhc_free; KASSERT(mh != NULL, ("no free hypercall msg")); KASSERT(mh->mh_resp == NULL, ("hypercall msg has pending response")); mhc->mhc_free = NULL; } mtx_unlock(&mhc->mhc_free_lock); return mh; }
static int gpiobus_acquire_bus(device_t busdev, device_t child, int how) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner != NULL) { if (sc->sc_owner == child) panic("%s: %s still owns the bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (how == GPIOBUS_DONTWAIT) { GPIOBUS_UNLOCK(sc); return (EWOULDBLOCK); } while (sc->sc_owner != NULL) mtx_sleep(sc, &sc->sc_mtx, 0, "gpiobuswait", 0); } sc->sc_owner = child; GPIOBUS_UNLOCK(sc); return (0); }
static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb) { struct ida_qcb *qcb_done = NULL; bus_addr_t completed; int delay; if (!dumping) mtx_assert(&ida->lock, MA_OWNED); if (ida->flags & IDA_INTERRUPTS) { if (mtx_sleep(qcb, &ida->lock, PRIBIO, "idacmd", 5 * hz)) { qcb->state = QCB_TIMEDOUT; return (ETIMEDOUT); } return (0); } again: delay = 5 * 1000 * 100; /* 5 sec delay */ while ((completed = ida->cmd.done(ida)) == 0) { if (delay-- == 0) { qcb->state = QCB_TIMEDOUT; return (ETIMEDOUT); } DELAY(10); } qcb_done = idahwqcbptov(ida, completed & ~3); if (qcb_done != qcb) goto again; ida_done(ida, qcb); return (0); }
static int cyapa_detach(device_t dev) { struct cyapa_softc *sc; sc = device_get_softc(dev); /* Cleanup poller thread */ cyapa_lock(sc); while (sc->poll_thread_running) { sc->detaching = 1; mtx_sleep(&sc->detaching, &sc->mutex, PCATCH, "cyapadet", hz); } cyapa_unlock(sc); destroy_dev(sc->devnode); knlist_clear(&sc->selinfo.si_note, 0); seldrain(&sc->selinfo); knlist_destroy(&sc->selinfo.si_note); mtx_destroy(&sc->mutex); return (0); }
/* * Wait up to 25ms for the requested status using a 25uS polling loop. */ static int wait_status(ig4iic_softc_t *sc, uint32_t status) { uint32_t v; int error; int txlvl = -1; int count; int limit; error = SMB_ETIMEOUT; count = ticks; limit = hz / 40; while (ticks - count <= limit) { /* * Check requested status */ v = reg_read(sc, IG4_REG_I2C_STA); if (v & status) { error = 0; break; } /* * Shim RX_NOTEMPTY of the data was read by the * interrupt code. */ if (status & IG4_STATUS_RX_NOTEMPTY) { if (sc->rpos != sc->rnext) { error = 0; break; } } /* * Shim TX_EMPTY by resetting the retry timer if we * see a change in the transmit fifo level. */ if (status & IG4_STATUS_TX_EMPTY) { v = reg_read(sc, IG4_REG_TXFLR) & IG4_FIFOLVL_MASK; if (txlvl != v) { txlvl = v; count = ticks; } } /* * The interrupt will wake us up if we are waiting for * read data, otherwise poll. */ if (status & IG4_STATUS_RX_NOTEMPTY) { mtx_sleep(sc, &sc->mtx, 0, "i2cwait", (hz + 99) / 100); } else { DELAY(25); } } return error; }
static const void * vmbus_xact_wait1(struct vmbus_xact *xact, size_t *resp_len, bool can_sleep) { struct vmbus_xact_ctx *ctx = xact->x_ctx; const void *resp; mtx_lock(&ctx->xc_lock); KASSERT(ctx->xc_active == xact, ("xact mismatch")); while (xact->x_resp == NULL && (ctx->xc_flags & VMBUS_XACT_CTXF_DESTROY) == 0) { if (can_sleep) { mtx_sleep(&ctx->xc_active, &ctx->xc_lock, 0, "wxact", 0); } else { mtx_unlock(&ctx->xc_lock); DELAY(1000); mtx_lock(&ctx->xc_lock); } } resp = vmbus_xact_return(xact, resp_len); mtx_unlock(&ctx->xc_lock); return (resp); }
static void ald_shutdown(void *arg, int howto) { struct alq *alq; ALD_LOCK(); /* Ensure no new queues can be created. */ ald_shutingdown = 1; /* Shutdown all ALQs prior to terminating the ald_daemon. */ while ((alq = BSD_LIST_FIRST(&ald_queues)) != NULL) { BSD_LIST_REMOVE(alq, aq_link); ALD_UNLOCK(); alq_shutdown(alq); ALD_LOCK(); } /* At this point, all ALQs are flushed and shutdown. */ /* * Wake ald_daemon so that it exits. It won't be able to do * anything until we mtx_sleep because we hold the ald_mtx. */ wakeup(&ald_active); /* Wait for ald_daemon to exit. */ mtx_sleep(ald_proc, &ald_mtx, PWAIT, "aldslp", 0); ALD_UNLOCK(); }
static void vm_handle_rendezvous(struct vm *vm, int vcpuid) { KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU), ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid)); mtx_lock(&vm->rendezvous_mtx); while (vm->rendezvous_func != NULL) { /* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */ CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus); if (vcpuid != -1 && CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) && !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) { VCPU_CTR0(vm, vcpuid, "Calling rendezvous func"); (*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg); CPU_SET(vcpuid, &vm->rendezvous_done_cpus); } if (CPU_CMP(&vm->rendezvous_req_cpus, &vm->rendezvous_done_cpus) == 0) { VCPU_CTR0(vm, vcpuid, "Rendezvous completed"); vm_set_rendezvous_func(vm, NULL); wakeup(&vm->rendezvous_func); break; } RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion"); mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0, "vmrndv", 0); } mtx_unlock(&vm->rendezvous_mtx); }
static void wait_for_completion(struct completion *c) { mtx_lock(&c->lock); while (c->done == 0) mtx_sleep(c, &c->lock, 0, "hvwfc", 0); c->done--; mtx_unlock(&c->lock); }
/* Clear previous configuration of the PL by asserting PROG_B. */ static int zy7_devcfg_reset_pl(struct zy7_devcfg_softc *sc) { uint32_t devcfg_ctl; int tries, err; DEVCFG_SC_ASSERT_LOCKED(sc); devcfg_ctl = RD4(sc, ZY7_DEVCFG_CTRL); /* Deassert PROG_B (active low). */ devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* Wait for INIT_B deasserted (active low). */ tries = 0; while ((RD4(sc, ZY7_DEVCFG_STATUS) & ZY7_DEVCFG_STATUS_PCFG_INIT) == 0) { if (++tries >= 100) return (EIO); DELAY(5); } /* Reassert PROG_B. */ devcfg_ctl &= ~ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* Wait for INIT_B asserted. */ tries = 0; while ((RD4(sc, ZY7_DEVCFG_STATUS) & ZY7_DEVCFG_STATUS_PCFG_INIT) != 0) { if (++tries >= 100) return (EIO); DELAY(5); } /* Clear sticky bits and set up INIT_B positive edge interrupt. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_PCFG_INIT_PE); /* Deassert PROG_B again. */ devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B; WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl); /* Wait for INIT_B deasserted indicating FPGA internal initialization * is complete. This takes much longer than the previous waits for * INIT_B transition (on the order of 700us). */ err = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "zy7in", hz); if (err != 0) return (err); /* Clear sticky DONE bit in interrupt status. */ WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL); return (0); }
static void soaio_kproc_loop(void *arg) { struct proc *p; struct vmspace *myvm; struct task *task; int error, id, pending; id = (intptr_t)arg; /* * Grab an extra reference on the daemon's vmspace so that it * doesn't get freed by jobs that switch to a different * vmspace. */ p = curproc; myvm = vmspace_acquire_ref(p); mtx_lock(&soaio_jobs_lock); MPASS(soaio_starting > 0); soaio_starting--; for (;;) { while (!STAILQ_EMPTY(&soaio_jobs)) { task = STAILQ_FIRST(&soaio_jobs); STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link); soaio_queued--; pending = task->ta_pending; task->ta_pending = 0; mtx_unlock(&soaio_jobs_lock); task->ta_func(task->ta_context, pending); mtx_lock(&soaio_jobs_lock); } MPASS(soaio_queued == 0); if (p->p_vmspace != myvm) { mtx_unlock(&soaio_jobs_lock); vmspace_switch_aio(myvm); mtx_lock(&soaio_jobs_lock); continue; } soaio_idle++; error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-", soaio_lifetime); soaio_idle--; if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) && soaio_num_procs > soaio_target_procs) break; } soaio_num_procs--; mtx_unlock(&soaio_jobs_lock); free_unr(soaio_kproc_unr, id); kproc_exit(0); }
void rtwn_pci_delay(struct rtwn_softc *sc, int usec) { if (usec < 1000) DELAY(usec); else { (void) mtx_sleep(sc, &sc->sc_mtx, 0, "rtwn_pci", msecs_to_ticks(usec / 1000)); } }
void lock_driver_idle(IAL_ADAPTER_T *pAdapter) { _VBUS_INST(&pAdapter->VBus) mtx_lock(&pAdapter->lock); while (pAdapter->outstandingCommands) { KdPrint(("outstandingCommands is %d, wait..\n", pAdapter->outstandingCommands)); if (!mWaitingForIdle(_VBUS_P0)) CallWhenIdle(_VBUS_P nothing, 0); mtx_sleep(pAdapter, &pAdapter->lock, 0, "hptidle", 0); } CheckIdleCall(_VBUS_P0); }
static int iicbus_poll(struct iicbus_softc *sc, int how) { int error; IICBUS_ASSERT_LOCKED(sc); switch (how) { case IIC_WAIT | IIC_INTR: error = mtx_sleep(sc, &sc->lock, IICPRI|PCATCH, "iicreq", 0); break; case IIC_WAIT | IIC_NOINTR: error = mtx_sleep(sc, &sc->lock, IICPRI, "iicreq", 0); break; default: return (EWOULDBLOCK); } return (error); }
void khttpd_log_stop(void) { mtx_lock(&khttpd_log_lock); khttpd_log_shutdown = TRUE; wakeup(&khttpd_busy_logs); while (khttpd_log_shutdown) mtx_sleep(&khttpd_log_shutdown, &khttpd_log_lock, 0, "logstop", 0); mtx_unlock(&khttpd_log_lock); mtx_destroy(&khttpd_log_lock); }
static void scan_start(void *arg, int pending) { #define ISCAN_REP (ISCAN_MINDWELL | ISCAN_DISCARD) struct ieee80211_scan_state *ss = (struct ieee80211_scan_state *) arg; struct scan_state *ss_priv = SCAN_PRIVATE(ss); struct ieee80211vap *vap = ss->ss_vap; struct ieee80211com *ic = ss->ss_ic; IEEE80211_LOCK(ic); if (vap == NULL || (ic->ic_flags & IEEE80211_F_SCAN) == 0 || (ss_priv->ss_iflags & ISCAN_ABORT)) { /* Cancelled before we started */ scan_done(ss, 0); return; } if (ss->ss_next == ss->ss_last) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: no channels to scan\n", __func__); scan_done(ss, 1); return; } if (vap->iv_opmode == IEEE80211_M_STA && vap->iv_state == IEEE80211_S_RUN) { if ((vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) { /* Enable station power save mode */ vap->iv_sta_ps(vap, 1); /* Wait until null data frame will be ACK'ed */ mtx_sleep(vap, IEEE80211_LOCK_OBJ(ic), PCATCH, "sta_ps", msecs_to_ticks(10)); if (ss_priv->ss_iflags & ISCAN_ABORT) { scan_done(ss, 0); return; } } } ss_priv->ss_scanend = ticks + ss_priv->ss_duration; /* XXX scan state can change! Re-validate scan state! */ IEEE80211_UNLOCK(ic); ic->ic_scan_start(ic); /* notify driver */ scan_curchan_task(ss, 0); }
static int jzsmb_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { struct jzsmb_softc *sc; uint32_t n; uint16_t con; int error; sc = device_get_softc(dev); SMB_LOCK(sc); while (sc->busy) mtx_sleep(sc, &sc->mtx, 0, "i2cbuswait", 0); sc->busy = 1; sc->status = 0; for (n = 0; n < nmsgs; n++) { /* Set target address */ if (n == 0 || msgs[n].slave != msgs[n - 1].slave) jzsmb_reset_locked(dev, msgs[n].slave); /* Set read or write */ if ((msgs[n].flags & IIC_M_RD) != 0) error = jzsmb_transfer_read(dev, &msgs[n]); else error = jzsmb_transfer_write(dev, &msgs[n], n < nmsgs - 1); if (error != 0) goto done; } done: /* Send stop if necessary */ con = SMB_READ(sc, SMBCON); con &= ~SMBCON_STPHLD; SMB_WRITE(sc, SMBCON, con); /* Disable SMB */ jzsmb_enable(sc, 0); sc->msg = NULL; sc->busy = 0; wakeup(sc); SMB_UNLOCK(sc); return (error); }
/* * Allocate or resize buffers. */ int bpf_buffer_ioctl_sblen(struct bpf_d *d, u_int *i) { u_int size; caddr_t fbuf, sbuf; size = *i; if (size > bpf_maxbufsize) *i = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *i = size = BPF_MINBUFSIZE; /* Allocate buffers immediately */ fbuf = (caddr_t)malloc(size, M_BPF, M_WAITOK); sbuf = (caddr_t)malloc(size, M_BPF, M_WAITOK); BPFD_LOCK(d); if (d->bd_bif != NULL) { /* Interface already attached, unable to change buffers */ BPFD_UNLOCK(d); free(fbuf, M_BPF); free(sbuf, M_BPF); return (EINVAL); } while (d->bd_hbuf_in_use) mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, "bd_hbuf", 0); /* Free old buffers if set */ if (d->bd_fbuf != NULL) free(d->bd_fbuf, M_BPF); if (d->bd_sbuf != NULL) free(d->bd_sbuf, M_BPF); /* Fill in new data */ d->bd_bufsize = size; d->bd_fbuf = fbuf; d->bd_sbuf = sbuf; d->bd_hbuf = NULL; d->bd_slen = 0; d->bd_hlen = 0; BPFD_UNLOCK(d); return (0); }
void AcpiOsDeleteMutex(ACPI_MUTEX Handle) { struct acpi_mutex *am = (struct acpi_mutex *)Handle; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (am == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n")); return_VOID; } mtx_lock(&am->am_lock); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name)); if (am->am_waiters > 0) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "reset %s, owner %p\n", am->am_name, am->am_owner)); am->am_reset = 1; wakeup(am); while (am->am_waiters > 0) { if (mtx_sleep(&am->am_reset, &am->am_lock, PCATCH, "acmrst", hz) == EINTR) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "failed to reset %s, waiters %d\n", am->am_name, am->am_waiters)); mtx_unlock(&am->am_lock); return_VOID; } if (ACPIMTX_AVAIL(am)) ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "wait %s, waiters %d\n", am->am_name, am->am_waiters)); else ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "wait %s, owner %p, waiters %d\n", am->am_name, am->am_owner, am->am_waiters)); } } mtx_unlock(&am->am_lock); mtx_destroy(&am->am_lock); free(am, M_ACPISEM); }
const struct vmbus_message * vmbus_msghc_wait_result(struct vmbus_softc *sc, struct vmbus_msghc *mh) { struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc; mtx_lock(&mhc->mhc_active_lock); KASSERT(mhc->mhc_active == mh, ("msghc mismatch")); while (mh->mh_resp == NULL) { mtx_sleep(&mhc->mhc_active, &mhc->mhc_active_lock, 0, "wmsghc", 0); } mhc->mhc_active = NULL; mtx_unlock(&mhc->mhc_active_lock); return mh->mh_resp; }
static int cuda_gettime(device_t dev, struct timespec *ts) { struct cuda_softc *sc = device_get_softc(dev); uint8_t cmd[] = {CUDA_PSEUDO, CMD_READ_RTC}; mtx_lock(&sc->sc_mutex); sc->sc_rtc = -1; cuda_send(sc, 1, 2, cmd); if (sc->sc_rtc == -1) mtx_sleep(&sc->sc_rtc, &sc->sc_mutex, 0, "rtc", 100); ts->tv_sec = sc->sc_rtc - DIFF19041970; ts->tv_nsec = 0; mtx_unlock(&sc->sc_mutex); return (0); }
ACPI_STATUS AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle) { struct acpi_sema *as = (struct acpi_sema *)Handle; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (as == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); mtx_lock(&as->as_lock); ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name)); if (as->as_waiters > 0) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "reset %s, units %u, waiters %d\n", as->as_name, as->as_units, as->as_waiters)); as->as_reset = 1; cv_broadcast(&as->as_cv); while (as->as_waiters > 0) { if (mtx_sleep(&as->as_reset, &as->as_lock, PCATCH, "acsrst", hz) == EINTR) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "failed to reset %s, waiters %d\n", as->as_name, as->as_waiters)); mtx_unlock(&as->as_lock); return_ACPI_STATUS (AE_ERROR); } ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "wait %s, units %u, waiters %d\n", as->as_name, as->as_units, as->as_waiters)); } } mtx_unlock(&as->as_lock); mtx_destroy(&as->as_lock); cv_destroy(&as->as_cv); free(as, M_ACPISEM); return_ACPI_STATUS (AE_OK); }
static void if_pcap_send(void *arg) { struct mbuf *m; struct if_pcap_softc *sc = (struct if_pcap_softc *)arg; struct ifnet *ifp = sc->ifp; uint8_t copybuf[2048]; uint8_t *pkt; unsigned int pktlen; if (sc->uif->cpu >= 0) sched_bind(sc->tx_thread, sc->uif->cpu); while (1) { mtx_lock(&sc->tx_lock); while (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mtx_sleep(&ifp->if_drv_flags, &sc->tx_lock, 0, "wtxlk", 0); } mtx_unlock(&sc->tx_lock); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); pktlen = m_length(m, NULL); ifp->if_opackets++; if (!sc->isfile && (pktlen <= sizeof(copybuf))) { if (NULL == m->m_next) { /* all in one piece - avoid copy */ pkt = mtod(m, uint8_t *); ifp->if_ozcopies++; } else { pkt = copybuf; m_copydata(m, 0, pktlen, pkt); ifp->if_ocopies++; } if (0 != if_pcap_sendpacket(sc->pcap_host_ctx, pkt, pktlen)) ifp->if_oerrors++; } else { if (sc->isfile)
static void tws_reinit(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; int timeout_val=0; int try=2; int done=0; // device_printf(sc->tws_dev, "Waiting for Controller Ready\n"); while ( !done && try ) { if ( tws_ctlr_ready(sc) ) { done = 1; break; } else { timeout_val += 5; if ( timeout_val >= TWS_RESET_TIMEOUT ) { timeout_val = 0; if ( try ) tws_assert_soft_reset(sc); try--; } mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz); } } if (!done) { device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n"); return; } sc->obfl_q_overrun = false; // device_printf(sc->tws_dev, "Sending initConnect\n"); if ( tws_init_connect(sc, tws_queue_depth) ) { TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); } tws_init_obfl_q(sc); tws_turn_on_interrupts(sc); wakeup_one(sc); }
/* * Enable or disable the controller and wait for the controller to acknowledge * the state change. */ static int set_controller(ig4iic_softc_t *sc, uint32_t ctl) { int retry; int error; uint32_t v; reg_write(sc, IG4_REG_I2C_EN, ctl); error = SMB_ETIMEOUT; for (retry = 100; retry > 0; --retry) { v = reg_read(sc, IG4_REG_ENABLE_STATUS); if (((v ^ ctl) & IG4_I2C_ENABLE) == 0) { error = 0; break; } mtx_sleep(sc, &sc->io_lock, 0, "i2cslv", 1); } return (error); }
static void khttpd_log_choke(struct khttpd_log *log) { mtx_lock(&khttpd_log_lock); log->choking = TRUE; if (0 < mbufq_len(&log->queue)) { TAILQ_REMOVE(&khttpd_busy_logs, log, link); TAILQ_INSERT_HEAD(&khttpd_busy_logs, log, link); while (0 < mbufq_len(&log->queue)) { log->draining = TRUE; mtx_sleep(log, &khttpd_log_lock, 0, "drain", 0); } } mtx_unlock(&khttpd_log_lock); }
static void kthrdlk_done(void) { int ret; DPRINTF(("sending QUIT signal to the thrdlk threads\n")); /* wait kernel threads end */ mtx_lock(&test_global_lock); QUIT = 1; while (test_thrcnt != 0) { ret = mtx_sleep(&global_condvar, &test_global_lock, 0, "waiting thrs end", 30 * hz); if (ret == EWOULDBLOCK) { panic("some threads not die! remaining: %d", test_thrcnt); break; } } if (test_thrcnt == 0) DPRINTF(("All test_pause threads die\n")); mtx_destroy(&test_global_lock); }
static void ic_alloc_buffers(struct ic_softc *sc, int mtu) { char *obuf, *ifbuf; obuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK); ifbuf = malloc(mtu + ICHDRLEN, M_DEVBUF, M_WAITOK); mtx_lock(&sc->ic_lock); while (sc->ic_flags & IC_BUFFERS_BUSY) { sc->ic_flags |= IC_BUFFER_WAITER; mtx_sleep(sc, &sc->ic_lock, 0, "icalloc", 0); sc->ic_flags &= ~IC_BUFFER_WAITER; } free(sc->ic_obuf, M_DEVBUF); free(sc->ic_ifbuf, M_DEVBUF); sc->ic_obuf = obuf; sc->ic_ifbuf = ifbuf; sc->ic_ifp->if_mtu = mtu; mtx_unlock(&sc->ic_lock); }