RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; if (pThis == NIL_RTSEMMUTEX) return VERR_INVALID_PARAMETER; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Kill it, wake up all waiting threads and release the reference. */ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, ~RTSEMMUTEX_MAGIC, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE); lck_spin_lock(pThis->pSpinlock); if (pThis->cWaiters > 0) thread_wakeup_prim((event_t)pThis, FALSE /* one_thread */, THREAD_RESTART); if (ASMAtomicDecU32(&pThis->cRefs) == 0) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_AC(); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); Assert(pThis->cRefs > 0); RT_ASSERT_INTS_ON(); lck_spin_lock(pThis->pSpinlock); ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK); if (pThis->fHaveBlockedThreads) { /* abort waiting threads. */ thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPT_CPUID_VAR(); RT_ASSERT_INTS_ON(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Set the signal and increment the generation counter. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT; fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); /* * Wake up all sleeping threads. */ if (pThis->fHaveBlockedThreads) { ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false); thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Take the lock and do the job. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); int rc = VINF_SUCCESS; lck_spin_lock(pThis->pSpinlock); if (pThis->hNativeOwner == hNativeSelf) { Assert(pThis->cRecursions > 0); if (--pThis->cRecursions == 0) { pThis->hNativeOwner = NIL_RTNATIVETHREAD; if (pThis->cWaiters > 0) thread_wakeup_prim((event_t)pThis, TRUE /* one_thread */, THREAD_AWAKENED); } } else rc = VERR_NOT_OWNER; lck_spin_unlock(pThis->pSpinlock); AssertRC(rc); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return VINF_SUCCESS; }
void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) { thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); }