RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); ASMAtomicIncU32(&pThis->u32Magic); /* make the handle invalid */ if (pThis->cWaiters > 0) { /* abort waiting thread, last man cleans up. */ ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters); ULONG cThreads; KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_BOOST, &cThreads, (ULONG)VERR_SEM_DESTROYED); KernReleaseSpinLock(&pThis->Spinlock); } else if (pThis->cWaking) /* the last waking thread is gonna do the cleanup */ KernReleaseSpinLock(&pThis->Spinlock); else { KernReleaseSpinLock(&pThis->Spinlock); KernFreeSpinLock(&pThis->Spinlock); RTMemFree(pThis); } return VINF_SUCCESS; }
RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem) { PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); if (pThis->cWaiters > 0) { ASMAtomicDecU32(&pThis->cWaiters); ASMAtomicIncU32(&pThis->cWaking); ULONG cThreads; KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_ONE, &cThreads, VINF_SUCCESS); if (RT_UNLIKELY(!cThreads)) { /* shouldn't ever happen on OS/2 */ ASMAtomicXchgU8(&pThis->fSignaled, true); ASMAtomicDecU32(&pThis->cWaking); ASMAtomicIncU32(&pThis->cWaiters); } } else ASMAtomicXchgU8(&pThis->fSignaled, true); KernReleaseSpinLock(&pThis->Spinlock); return VINF_SUCCESS; }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertPtr(pThis); Assert(pThis->u32Magic == RTSPINLOCK_MAGIC); KernAcquireSpinLock(&pThis->Spinlock); Assert(!ASMIntAreEnabled()); /** @todo verify that interrupts are disabled. */ }
RTDECL(int) RTSemEventMultiReset(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); ASMAtomicXchgU8(&pThis->fSignaled, false); KernReleaseSpinLock(&pThis->Spinlock); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); ASMAtomicXchgU8(&pThis->fSignaled, true); if (pThis->cWaiters > 0) { ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters); ASMAtomicXchgU32(&pThis->cWaiters, 0); ULONG cThreads; KernWakeup((ULONG)pThis, WAKEUP_DATA, &cThreads, VINF_SUCCESS); } KernReleaseSpinLock(&pThis->Spinlock); return VINF_SUCCESS; }
/** * Worker for RTSemEventWaitEx and RTSemEventWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventWaitEx. * @param uTimeout See RTSemEventWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventMultiOs2Wait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate and convert the input. */ if (!pThis) return VERR_INVALID_HANDLE; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); ULONG cMsTimeout = rtR0SemWaitOs2ConvertTimeout(fFlags, uTimeout); ULONG fBlock = BLOCK_SPINLOCK; if (!(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE)) fBlock |= BLOCK_UNINTERRUPTABLE; /* * Do the job. */ KernAcquireSpinLock(&pThis->Spinlock); int rc; if (pThis->fSignaled) rc = VINF_SUCCESS; else { ASMAtomicIncU32(&pThis->cWaiters); ULONG ulData = (ULONG)VERR_INTERNAL_ERROR; rc = KernBlock((ULONG)pThis, cMsTimeout, fBlock, &pThis->Spinlock, &ulData); switch (rc) { case NO_ERROR: rc = (int)ulData; Assert(rc == VINF_SUCCESS || rc == VERR_SEM_DESTROYED); Assert(pThis->cWaking > 0); if ( !ASMAtomicDecU32(&pThis->cWaking) && pThis->u32Magic != RTSEMEVENTMULTI_MAGIC) { /* The event was destroyed (ulData == VINF_SUCCESS if it was after we awoke), as the last thread do the cleanup. */ KernReleaseSpinLock(&pThis->Spinlock); KernFreeSpinLock(&pThis->Spinlock); RTMemFree(pThis); return VINF_SUCCESS; } rc = VINF_SUCCESS; break; case ERROR_TIMEOUT: Assert(cMsTimeout != SEM_INDEFINITE_WAIT); ASMAtomicDecU32(&pThis->cWaiters); rc = VERR_TIMEOUT; break; case ERROR_INTERRUPT: Assert(fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE); ASMAtomicDecU32(&pThis->cWaiters); rc = VERR_INTERRUPTED; break; default: AssertMsgFailed(("rc=%d\n", rc)); rc = VERR_GENERAL_FAILURE; break; } } KernReleaseSpinLock(&pThis->Spinlock); return rc; }