/** * Internal worker for the sleep scenario. * * Called owning the spinlock, returns without it. * * @returns IPRT status code. * @param pThis The mutex instance. * @param cMillies The timeout. * @param fInterruptible Whether it's interruptible * (RTSemMutexRequestNoResume) or not * (RTSemMutexRequest). * @param hNativeSelf The thread handle of the caller. */ static int rtR0SemMutexDarwinRequestSleep(PRTSEMMUTEXINTERNAL pThis, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible, RTNATIVETHREAD hNativeSelf) { /* * Grab a reference and indicate that we're waiting. */ pThis->cWaiters++; ASMAtomicIncU32(&pThis->cRefs); /* * Go to sleep, use the address of the mutex instance as sleep/blocking/event id. */ wait_result_t rcWait; if (cMillies == RT_INDEFINITE_WAIT) rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible); else { uint64_t u64AbsTime; nanoseconds_to_absolutetime(cMillies * UINT64_C(1000000), &u64AbsTime); u64AbsTime += mach_absolute_time(); rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible, u64AbsTime); } /* * Translate the rc. */ int rc; switch (rcWait) { case THREAD_AWAKENED: if (RT_LIKELY(pThis->u32Magic == RTSEMMUTEX_MAGIC)) { if (RT_LIKELY( pThis->cRecursions == 0 && pThis->hNativeOwner == NIL_RTNATIVETHREAD)) { pThis->cRecursions = 1; pThis->hNativeOwner = hNativeSelf; rc = VINF_SUCCESS; } else { Assert(pThis->cRecursions == 0); Assert(pThis->hNativeOwner == NIL_RTNATIVETHREAD); rc = VERR_INTERNAL_ERROR_3; } } else rc = VERR_SEM_DESTROYED; break; case THREAD_TIMED_OUT: Assert(cMillies != RT_INDEFINITE_WAIT); rc = VERR_TIMEOUT; break; case THREAD_INTERRUPTED: Assert(fInterruptible); rc = VERR_INTERRUPTED; break; case THREAD_RESTART: Assert(pThis->u32Magic == ~RTSEMMUTEX_MAGIC); rc = VERR_SEM_DESTROYED; break; default: AssertMsgFailed(("rcWait=%d\n", rcWait)); rc = VERR_GENERAL_FAILURE; break; } /* * Dereference it and quit the lock. */ Assert(pThis->cWaiters > 0); pThis->cWaiters--; Assert(pThis->cRefs > 0); if (RT_UNLIKELY(ASMAtomicDecU32(&pThis->cRefs) == 0)) rtSemMutexDarwinFree(pThis); else lck_spin_unlock(pThis->pSpinlock); return rc; }
/** * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventMultiDarwinWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); if (uTimeout != 0 || (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) RT_ASSERT_PREEMPTIBLE(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Is the event already signalled or do we have to wait? */ int rc; uint32_t const fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen); if (fOrgStateAndGen & RTSEMEVENTMULTIDARWIN_STATE_MASK) rc = VINF_SUCCESS; else { /* * We have to wait. So, we'll need to convert the timeout and figure * out if it's indefinite or not. */ uint64_t uNsAbsTimeout = 1; if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout == UINT64_MAX) fFlags |= RTSEMWAIT_FLAGS_INDEFINITE; else { uint64_t u64Now; if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) { if (uTimeout != 0) { u64Now = RTTimeSystemNanoTS(); uNsAbsTimeout = u64Now + uTimeout; if (uNsAbsTimeout < u64Now) /* overflow */ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE; } } else { uNsAbsTimeout = uTimeout; u64Now = RTTimeSystemNanoTS(); uTimeout = u64Now < uTimeout ? uTimeout - u64Now : 0; } } } if ( !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE) && uTimeout == 0) { /* * Poll call, we already checked the condition above so no need to * wait for anything. */ rc = VERR_TIMEOUT; } else { for (;;) { /* * Do the actual waiting. */ ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true); wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT; wait_result_t rcWait; if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible); else { uint64_t u64AbsTime; nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime); rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible, u64AbsTime); } /* * Deal with the wait result. */ if (RT_LIKELY(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC)) { switch (rcWait) { case THREAD_AWAKENED: if (RT_LIKELY(ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)) rc = VINF_SUCCESS; else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE) rc = VERR_INTERRUPTED; else continue; /* Seen this happen after fork/exec/something. */ break; case THREAD_TIMED_OUT: Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)); rc = VERR_TIMEOUT; break; case THREAD_INTERRUPTED: Assert(fInterruptible != THREAD_UNINT); rc = VERR_INTERRUPTED; break; case THREAD_RESTART: AssertMsg(pThis->u32Magic == ~RTSEMEVENTMULTI_MAGIC, ("%#x\n", pThis->u32Magic)); rc = VERR_SEM_DESTROYED; break; default: AssertMsgFailed(("rcWait=%d\n", rcWait)); rc = VERR_INTERNAL_ERROR_3; break; } } else rc = VERR_SEM_DESTROYED; break; } } } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); return rc; }