int __thr_umutex_timedlock(struct umutex *mtx, uint32_t id, const struct timespec *abstime) { struct _umtx_time *tm_p, timeout; size_t tm_size; uint32_t owner; int ret; if (abstime == NULL) { tm_p = NULL; tm_size = 0; } else { timeout._clockid = CLOCK_REALTIME; timeout._flags = UMTX_ABSTIME; timeout._timeout = *abstime; tm_p = &timeout; tm_size = sizeof(timeout); } for (;;) { if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { /* wait in kernel */ ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, (void *)tm_size, __DECONST(void *, tm_p)); /* now try to lock it */ owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) return (0); } else {
int __thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) { uint32_t owner; if (!_thr_is_smp) return __thr_umutex_lock(mtx, id); if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { int count = SPINLOOPS; while (count--) { owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0) { if (atomic_cmpset_acq_32( &mtx->m_owner, owner, id|owner)) { return (0); } } CPU_SPINWAIT; } /* wait in kernel */ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); } } return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); }
int syscall_thread_enter(struct thread *td, struct sysent *se) { u_int32_t cnt, oldcnt; do { oldcnt = se->sy_thrcnt; if ((oldcnt & SY_THR_STATIC) != 0) return (0); if ((oldcnt & (SY_THR_DRAINING | SY_THR_ABSENT)) != 0) return (ENOSYS); cnt = oldcnt + SY_THR_INCR; } while (atomic_cmpset_acq_32(&se->sy_thrcnt, oldcnt, cnt) == 0); return (0); }
static void syscall_thread_drain(struct sysent *se) { u_int32_t cnt, oldcnt; do { oldcnt = se->sy_thrcnt; KASSERT((oldcnt & SY_THR_STATIC) == 0, ("drain on static syscall")); cnt = oldcnt | SY_THR_DRAINING; } while (atomic_cmpset_acq_32(&se->sy_thrcnt, oldcnt, cnt) == 0); while (atomic_cmpset_32(&se->sy_thrcnt, SY_THR_DRAINING, SY_THR_ABSENT) == 0) pause("scdrn", hz/2); }
int __thr_umutex_lock(struct umutex *mtx, uint32_t id) { uint32_t owner; if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { for (;;) { /* wait in kernel */ _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); owner = mtx->m_owner; if ((owner & ~UMUTEX_CONTESTED) == 0 && atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) return (0); } } return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); }
t_Handle bman_portal_setup(struct bman_softc *bsc) { struct dpaa_portals_softc *sc; t_BmPortalParam bpp; t_Handle portal; unsigned int cpu, p; /* Return NULL if we're not ready or while detach */ if (bp_sc == NULL) return (NULL); sc = bp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ bpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); bpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); bpp.h_Bm = bsc->sc_bh; bpp.swPortalId = cpu; bpp.irq = (int)sc->sc_dp[cpu].dp_ires; portal = BM_PORTAL_Config(&bpp); if (portal == NULL) goto err; if (BM_PORTAL_Init(portal) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) BM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }
t_Handle qman_portal_setup(struct qman_softc *qsc) { struct dpaa_portals_softc *sc; t_QmPortalParam qpp; unsigned int cpu, p; t_Handle portal; /* Return NULL if we're not ready or while detach */ if (qp_sc == NULL) return (NULL); sc = qp_sc; sched_pin(); portal = NULL; cpu = PCPU_GET(cpuid); /* Check if portal is ready */ while (atomic_cmpset_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0, -1) == 0) { p = atomic_load_acq_32((uint32_t *)&sc->sc_dp[cpu].dp_ph); /* Return if portal is already initialized */ if (p != 0 && p != -1) { sched_unpin(); return ((t_Handle)p); } /* Not inititialized and "owned" by another thread */ thread_lock(curthread); mi_switch(SW_VOL, NULL); thread_unlock(curthread); } /* Map portal registers */ dpaa_portal_map_registers(sc); /* Configure and initialize portal */ qpp.ceBaseAddress = rman_get_bushandle(sc->sc_rres[0]); qpp.ciBaseAddress = rman_get_bushandle(sc->sc_rres[1]); qpp.h_Qm = qsc->sc_qh; qpp.swPortalId = cpu; qpp.irq = (int)sc->sc_dp[cpu].dp_ires; qpp.fdLiodnOffset = 0; qpp.f_DfltFrame = qman_received_frame_callback; qpp.f_RejectedFrame = qman_rejected_frame_callback; qpp.h_App = qsc; portal = QM_PORTAL_Config(&qpp); if (portal == NULL) goto err; if (QM_PORTAL_Init(portal) != E_OK) goto err; if (QM_PORTAL_AddPoolChannel(portal, QMAN_COMMON_POOL_CHANNEL) != E_OK) goto err; atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, (uint32_t)portal); sched_unpin(); return (portal); err: if (portal != NULL) QM_PORTAL_Free(portal); atomic_store_rel_32((uint32_t *)&sc->sc_dp[cpu].dp_ph, 0); sched_unpin(); return (NULL); }