RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem) { IPRT_LINUX_SAVE_EFL_AC(); /* * Validate input. */ PRTSEMEVENTINTERNAL pThis = hEventSem; if (pThis == NIL_RTSEMEVENT) return VINF_SUCCESS; AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->cRefs > 0); /* * Invalidate it and signal the object just in case. */ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC); ASMAtomicWriteU32(&pThis->fState, 0); Assert(!waitqueue_active(&pThis->Head)); wake_up_all(&pThis->Head); rtR0SemEventLnxRelease(pThis); IPRT_LINUX_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * Records a invlpg instruction for replaying upon REM entry. * * @param pVM Pointer to the VM. * @param GCPtrPage The */ VMMDECL(void) REMNotifyInvalidatePage(PVM pVM, RTGCPTR GCPtrPage) { /* * Try take the REM lock and push the address onto the array. */ if ( pVM->rem.s.cInvalidatedPages < RT_ELEMENTS(pVM->rem.s.aGCPtrInvalidatedPages) && EMRemTryLock(pVM) == VINF_SUCCESS) { uint32_t iPage = pVM->rem.s.cInvalidatedPages; if (iPage < RT_ELEMENTS(pVM->rem.s.aGCPtrInvalidatedPages)) { ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, iPage + 1); pVM->rem.s.aGCPtrInvalidatedPages[iPage] = GCPtrPage; EMRemUnlock(pVM); return; } CPUMSetChangedFlags(VMMGetCpu(pVM), CPUM_CHANGED_GLOBAL_TLB_FLUSH); /** @todo this array should be per-cpu technically speaking. */ ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, 0); /** @todo leave this alone? Optimize this code? */ EMRemUnlock(pVM); } else { /* Fallback: Simply tell the recompiler to flush its TLB. */ CPUMSetChangedFlags(VMMGetCpu(pVM), CPUM_CHANGED_GLOBAL_TLB_FLUSH); ASMAtomicWriteU32(&pVM->rem.s.cInvalidatedPages, 0); /** @todo leave this alone?! Optimize this code? */ } return; }
int vboxNetAdpCreate(PVBOXNETADP *ppNew, const char *pcszName) { int rc; unsigned i; for (i = 0; i < RT_ELEMENTS(g_aAdapters); i++) { PVBOXNETADP pThis = &g_aAdapters[i]; if (ASMAtomicCmpXchgU32((uint32_t volatile *)&pThis->enmState, kVBoxNetAdpState_Transitional, kVBoxNetAdpState_Invalid)) { RTMAC Mac; /* Found an empty slot -- use it. */ Log(("vboxNetAdpCreate: found empty slot: %d\n", i)); if (pcszName) { Log(("vboxNetAdpCreate: using name: %s\n", pcszName)); pThis->iUnit = vboxNetAdpGetUnitByName(pcszName); strncpy(pThis->szName, pcszName, sizeof(pThis->szName)); pThis->szName[sizeof(pThis->szName) - 1] = '\0'; } else { pThis->iUnit = vboxNetAdpGetNextAvailableUnit(); pThis->szName[0] = '\0'; } if (pThis->iUnit < 0) rc = VERR_INVALID_PARAMETER; else { vboxNetAdpComposeMACAddress(pThis, &Mac); rc = vboxNetAdpOsCreate(pThis, &Mac); Log(("vboxNetAdpCreate: pThis=%p pThis->iUnit=%d, pThis->szName=%s\n", pThis, pThis->iUnit, pThis->szName)); } if (RT_SUCCESS(rc)) { *ppNew = pThis; ASMAtomicWriteU32((uint32_t volatile *)&pThis->enmState, kVBoxNetAdpState_Active); Log2(("VBoxNetAdpCreate: Created %s\n", g_aAdapters[i].szName)); } else { ASMAtomicWriteU32((uint32_t volatile *)&pThis->enmState, kVBoxNetAdpState_Invalid); Log(("vboxNetAdpCreate: vboxNetAdpOsCreate failed with '%Rrc'.\n", rc)); } for (i = 0; i < RT_ELEMENTS(g_aAdapters); i++) Log2(("VBoxNetAdpCreate: Scanning entry: state=%d unit=%d name=%s\n", g_aAdapters[i].enmState, g_aAdapters[i].iUnit, g_aAdapters[i].szName)); return rc; } } Log(("vboxNetAdpCreate: no empty slots!\n")); /* All slots in adapter array are busy. */ return VERR_OUT_OF_RESOURCES; }
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic)); KIRQL SavedIrql = pThis->SavedIrql; if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { RTCCUINTREG fIntSaved = pThis->fIntSaved; pThis->fIntSaved = 0; #ifndef RTSPINLOCK_NT_HACK_NOIRQ KeReleaseSpinLock(&pThis->Spinlock, SavedIrql); ASMSetFlags(fIntSaved); #else Assert(pThis->u32Hack == RTSPINLOCK_NT_HACK_NOIRQ_TAKEN); ASMAtomicWriteU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_FREE); ASMSetFlags(fIntSaved); if (SavedIrql < DISPATCH_LEVEL) KeLowerIrql(SavedIrql); #endif } else KeReleaseSpinLock(&pThis->Spinlock, SavedIrql); }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); rtR0SemEventMultiSolRetain(pThis); rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx); Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC); /* * Do the job. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTISOL_GEN_SHIFT; fNew |= RTSEMEVENTMULTISOL_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); cv_broadcast(&pThis->Cnd); mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSemSpinMutexDestroy(RTSEMSPINMUTEX hSpinMtx) { RTSEMSPINMUTEXINTERNAL *pThis; RTSEMEVENT hEventSem; int rc; if (hSpinMtx == NIL_RTSEMSPINMUTEX) return VINF_SUCCESS; pThis = hSpinMtx; RTSEMSPINMUTEX_VALIDATE_RETURN(pThis); /* No destruction races allowed! */ AssertMsg( pThis->cLockers == 0 && pThis->hOwner == NIL_RTNATIVETHREAD, ("pThis=%p cLockers=%d hOwner=%p\n", pThis, pThis->cLockers, pThis->hOwner)); /* * Invalidate the structure, free the mutex and free the structure. */ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMSPINMUTEX_MAGIC_DEAD); hEventSem = pThis->hEventSem; pThis->hEventSem = NIL_RTSEMEVENT; rc = RTSemEventDestroy(hEventSem); AssertRC(rc); RTMemFree(pThis); return rc; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPT_CPUID_VAR(); RT_ASSERT_INTS_ON(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Set the signal and increment the generation counter. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT; fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); /* * Wake up all sleeping threads. */ if (pThis->fHaveBlockedThreads) { ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false); thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); Assert(pThis->cRefs > 0); RT_ASSERT_INTS_ON(); lck_spin_lock(pThis->pSpinlock); ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENTMULTI_MAGIC); /* make the handle invalid */ ASMAtomicAndU32(&pThis->fStateAndGen, RTSEMEVENTMULTIDARWIN_GEN_MASK); if (pThis->fHaveBlockedThreads) { /* abort waiting threads. */ thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_RESTART); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); return VINF_SUCCESS; }
RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) { if (pTimer == NULL) return VINF_SUCCESS; RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * It is not possible to destroy a timer from it's callback function. * Cyclic makes that impossible (or at least extremely risky). */ AssertReturn(!rtTimerSolIsCallingFromTimerProc(pTimer), VERR_INVALID_CONTEXT); /* * Invalidate the handle, make sure it's stopped and free the associated resources. */ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); if ( !pTimer->fSuspended || pTimer->hCyclicId != CYCLIC_NONE) /* 2nd check shouldn't happen */ rtTimerSolStopIt(pTimer); rtTimerSolRelease(pTimer); return VINF_SUCCESS; }
/** * Destroys the timer when the reference counter has reached zero. * * @returns 0 (new references counter value). * @param pTimer The timer. */ static uint32_t rtTimeSolReleaseCleanup(PRTTIMER pTimer) { Assert(pTimer->hCyclicId == CYCLIC_NONE); ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); RTMemFree(pTimer); return 0; }
RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) { /* It's ok to pass NULL pointer. */ if (pTimer == /*NIL_RTTIMER*/ NULL) return VINF_SUCCESS; AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); /* * We do not support destroying a timer from the callback because it is * not 101% safe since we cannot flush DPCs. Solaris has the same restriction. */ AssertReturn(KeGetCurrentIrql() == PASSIVE_LEVEL, VERR_INVALID_CONTEXT); /* * Invalidate the timer, stop it if it's running and finally * free up the memory. */ ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) rtTimerNtStopWorker(pTimer); /* * Flush DPCs to be on the safe side. */ if (g_pfnrtNtKeFlushQueuedDpcs) g_pfnrtNtKeFlushQueuedDpcs(); RTMemFree(pTimer); return VINF_SUCCESS; }
RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate input. */ if (hMutexSem == NIL_RTSEMMUTEX) return VINF_SUCCESS; struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("hMutexSem=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); /* * Invalidate the semaphore and wake up anyone waiting on it. */ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD); if (ASMAtomicXchgS32(&pThis->iState, 0) > 0) { sys_futex(&pThis->iState, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); usleep(1000); } pThis->Owner = (pthread_t)~0; pThis->cNestings = 0; #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclDelete(&pThis->ValidatorRec); #endif /* * Free the semaphore memory and be gone. */ RTMemFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem) { /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->iMagic == RTSEMEVENT_MAGIC, VERR_INVALID_HANDLE); #ifdef RTSEMEVENT_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD); if (RT_FAILURE(rc9)) return rc9; } #endif ASMAtomicWriteU32(&pThis->fSignalled, 1); if (ASMAtomicReadS32(&pThis->cWaiters) < 1) return VINF_SUCCESS; /* somebody is waiting, try wake up one of them. */ long cWoken = sys_futex(&pThis->fSignalled, FUTEX_WAKE, 1, NULL, NULL, 0); if (RT_LIKELY(cWoken >= 0)) return VINF_SUCCESS; if (RT_UNLIKELY(pThis->iMagic != RTSEMEVENT_MAGIC)) return VERR_SEM_DESTROYED; return VERR_INVALID_PARAMETER; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { /* * Validate input. */ struct RTSEMEVENTMULTIINTERNAL *pThis = hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE); /* * Invalidate the semaphore and wake up anyone waiting on it. */ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMEVENTMULTI_MAGIC + 1); if (ASMAtomicXchgS32(&pThis->iState, -1) == 1) { sys_futex(&pThis->iState, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); usleep(1000); } /* * Free the semaphore memory and be gone. */ #ifdef RTSEMEVENTMULTI_STRICT RTLockValidatorRecSharedDelete(&pThis->Signallers); #endif RTMemFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate input. */ if (hMutexSem == NIL_RTSEMMUTEX) return VINF_SUCCESS; struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Try destroy it. */ int rc = pthread_mutex_destroy(&pThis->Mutex); if (rc) { AssertMsgFailed(("Failed to destroy mutex sem %p, rc=%d.\n", hMutexSem, rc)); return RTErrConvertFromErrno(rc); } /* * Free the memory and be gone. */ ASMAtomicWriteU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD); pThis->Owner = (pthread_t)-1; pThis->cNesting = UINT32_MAX; #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclDelete(&pThis->ValidatorRec); #endif RTMemTmpFree(pThis); return VINF_SUCCESS; }
static PVBOXDISPMPLOGGER vboxDispMpLoggerGet() { if (ASMAtomicCmpXchgU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState, VBOXDISPMPLOGGER_STATE_INITIALIZING, VBOXDISPMPLOGGER_STATE_UNINITIALIZED)) { HRESULT hr = vboxDispKmtCallbacksInit(&g_VBoxDispMpLogger.KmtCallbacks); if (hr == S_OK) { /* we are on Vista+ * check if we can Open Adapter, i.e. WDDM driver is installed */ VBOXDISPKMT_ADAPTER Adapter; hr = vboxDispKmtOpenAdapter(&g_VBoxDispMpLogger.KmtCallbacks, &Adapter); if (hr == S_OK) { ASMAtomicWriteU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState, VBOXDISPMPLOGGER_STATE_INITIALIZED); vboxDispKmtCloseAdapter(&Adapter); return &g_VBoxDispMpLogger; } vboxDispKmtCallbacksTerm(&g_VBoxDispMpLogger.KmtCallbacks); } } else if (ASMAtomicReadU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState) == VBOXDISPMPLOGGER_STATE_INITIALIZED) { return &g_VBoxDispMpLogger; } return NULL; }
/** * Destroys an module after the reference count has reached zero. * * @param pDbgMod The module instance. */ static void rtDbgModDestroy(PRTDBGMODINT pDbgMod) { /* * Close the debug info interpreter first, then the image interpret. */ RTCritSectEnter(&pDbgMod->CritSect); /* paranoia */ if (pDbgMod->pDbgVt) { pDbgMod->pDbgVt->pfnClose(pDbgMod); pDbgMod->pDbgVt = NULL; pDbgMod->pvDbgPriv = NULL; } if (pDbgMod->pImgVt) { pDbgMod->pImgVt->pfnClose(pDbgMod); pDbgMod->pImgVt = NULL; pDbgMod->pvImgPriv = NULL; } /* * Free the resources. */ ASMAtomicWriteU32(&pDbgMod->u32Magic, ~RTDBGMOD_MAGIC); RTStrCacheRelease(g_hDbgModStrCache, pDbgMod->pszName); RTStrCacheRelease(g_hDbgModStrCache, pDbgMod->pszImgFile); RTStrCacheRelease(g_hDbgModStrCache, pDbgMod->pszDbgFile); RTCritSectLeave(&pDbgMod->CritSect); /* paranoia */ RTCritSectDelete(&pDbgMod->CritSect); RTMemFree(pDbgMod); }
RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem) { /* * Validate handle. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; if (pThis == NIL_RTSEMEVENT) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); uint32_t u32 = pThis->u32State; AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); /* * Abort all waiters forcing them to return failure. */ int rc; for (int i = 30; i > 0; i--) { ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_UNINITIALIZED); rc = pthread_cond_destroy(&pThis->Cond); if (rc != EBUSY) break; pthread_cond_broadcast(&pThis->Cond); usleep(1000); } if (rc) { AssertMsgFailed(("Failed to destroy event sem %p, rc=%d.\n", pThis, rc)); return RTErrConvertFromErrno(rc); } /* * Destroy the semaphore * If it's busy we'll wait a bit to give the threads a chance to be scheduled. */ for (int i = 30; i > 0; i--) { rc = pthread_mutex_destroy(&pThis->Mutex); if (rc != EBUSY) break; usleep(1000); } if (rc) { AssertMsgFailed(("Failed to destroy event sem %p, rc=%d. (mutex)\n", pThis, rc)); return RTErrConvertFromErrno(rc); } /* * Free the semaphore memory and be gone. */ #ifdef RTSEMEVENT_STRICT RTLockValidatorRecSharedDelete(&pThis->Signallers); #endif if (!(pThis->fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) RTMemFree(pThis); else rtMemBaseFree(pThis); return VINF_SUCCESS; }
RTDECL(int) RTCritSectDelete(PRTCRITSECT pCritSect) { /* * Assert free waiters and so on. */ Assert(pCritSect); Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC); Assert(pCritSect->cNestings == 0); Assert(pCritSect->cLockers == -1); Assert(pCritSect->NativeThreadOwner == NIL_RTNATIVETHREAD); /* * Invalidate the structure and free the mutex. * In case someone is waiting we'll signal the semaphore cLockers + 1 times. */ ASMAtomicWriteU32(&pCritSect->u32Magic, ~RTCRITSECT_MAGIC); pCritSect->fFlags = 0; pCritSect->cNestings = 0; pCritSect->NativeThreadOwner= NIL_RTNATIVETHREAD; RTSEMEVENT EventSem = pCritSect->EventSem; pCritSect->EventSem = NIL_RTSEMEVENT; while (pCritSect->cLockers-- >= 0) RTSemEventSignal(EventSem); ASMAtomicWriteS32(&pCritSect->cLockers, -1); int rc = RTSemEventDestroy(EventSem); AssertRC(rc); RTLockValidatorRecExclDestroy(&pCritSect->pValidatorRec); return rc; }
/** * Mac OS X: Really ugly hack to bypass a set-uid check in AppKit. * * This will modify the issetugid() function to always return zero. This must * be done _before_ AppKit is initialized, otherwise it will refuse to play ball * with us as it distrusts set-uid processes since Snow Leopard. We, however, * have carefully dropped all root privileges at this point and there should be * no reason for any security concern here. */ static void HideSetUidRootFromAppKit() { /* Find issetguid() and make it always return 0 by modifying the code: */ void *pvAddr = dlsym(RTLD_DEFAULT, "issetugid"); int rc = mprotect((void *)((uintptr_t)pvAddr & ~(uintptr_t)0xfff), 0x2000, PROT_WRITE | PROT_READ | PROT_EXEC); if (!rc) ASMAtomicWriteU32((volatile uint32_t *)pvAddr, 0xccc3c031); /* xor eax, eax; ret; int3 */ }
RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem) { /* * Validate input. */ struct RTSEMEVENTINTERNAL *pThis = hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); uint32_t u32 = pThis->u32State; AssertReturn(u32 == EVENT_STATE_NOT_SIGNALED || u32 == EVENT_STATE_SIGNALED, VERR_INVALID_HANDLE); #ifdef RTSEMEVENT_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckSignaller(&pThis->Signallers, NIL_RTTHREAD); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Lock the mutex semaphore. */ int rc = pthread_mutex_lock(&pThis->Mutex); if (rc) { AssertMsgFailed(("Failed to lock event sem %p, rc=%d.\n", hEventSem, rc)); return RTErrConvertFromErrno(rc); } /* * Check the state. */ if (pThis->u32State == EVENT_STATE_NOT_SIGNALED) { ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_SIGNALED); rc = pthread_cond_signal(&pThis->Cond); AssertMsg(!rc, ("Failed to signal event sem %p, rc=%d.\n", hEventSem, rc)); } else if (pThis->u32State == EVENT_STATE_SIGNALED) { rc = pthread_cond_signal(&pThis->Cond); /* give'm another kick... */ AssertMsg(!rc, ("Failed to signal event sem %p, rc=%d. (2)\n", hEventSem, rc)); } else rc = VERR_SEM_DESTROYED; /* * Release the mutex and return. */ int rc2 = pthread_mutex_unlock(&pThis->Mutex); AssertMsg(!rc2, ("Failed to unlock event sem %p, rc=%d.\n", hEventSem, rc)); if (rc) return RTErrConvertFromErrno(rc); if (rc2) return RTErrConvertFromErrno(rc2); return VINF_SUCCESS; }
static int pdmacFileAioMgrWaitForBlockingEvent(PPDMACEPFILEMGR pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT enmEvent) { ASMAtomicWriteU32((volatile uint32_t *)&pAioMgr->enmBlockingEvent, enmEvent); Assert(!pAioMgr->fBlockingEventPending); ASMAtomicXchgBool(&pAioMgr->fBlockingEventPending, true); /* Wakeup the async I/O manager */ pdmacFileAioMgrWakeup(pAioMgr); /* Wait for completion. */ int rc = RTSemEventWait(pAioMgr->EventSemBlock, RT_INDEFINITE_WAIT); AssertRC(rc); ASMAtomicXchgBool(&pAioMgr->fBlockingEventPending, false); ASMAtomicWriteU32((volatile uint32_t *)&pAioMgr->enmBlockingEvent, PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID); return rc; }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, pThis->cNesting == 1); if (RT_FAILURE(rc9)) return rc9; #endif /* * Check if nested. */ pthread_t Self = pthread_self(); if (RT_UNLIKELY( pThis->Owner != Self || pThis->cNesting == 0)) { AssertMsgFailed(("Not owner of mutex %p!! Self=%08x Owner=%08x cNesting=%d\n", pThis, Self, pThis->Owner, pThis->cNesting)); return VERR_NOT_OWNER; } /* * If nested we'll just pop a nesting. */ if (pThis->cNesting > 1) { ASMAtomicDecU32(&pThis->cNesting); return VINF_SUCCESS; } /* * Clear the state. (cNesting == 1) */ pThis->Owner = (pthread_t)-1; ASMAtomicWriteU32(&pThis->cNesting, 0); /* * Unlock mutex semaphore. */ int rc = pthread_mutex_unlock(&pThis->Mutex); if (RT_UNLIKELY(rc)) { AssertMsgFailed(("Failed to unlock mutex sem %p, rc=%d.\n", hMutexSem, rc)); NOREF(rc); return RTErrConvertFromErrno(rc); } return VINF_SUCCESS; }
RTDECL(int) RTSemEventDestroy(RTSEMEVENT hEventSem) { /* * Validate input. */ PRTSEMEVENTINTERNAL pThis = hEventSem; if (pThis == NIL_RTSEMEVENT) return VINF_SUCCESS; AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->cRefs > 0); /* * Invalidate it and signal the object just in case. */ ASMAtomicWriteU32(&pThis->u32Magic, ~RTSEMEVENT_MAGIC); ASMAtomicWriteU32(&pThis->fState, 0); rtR0SemBsdBroadcast(pThis); rtR0SemEventBsdRelease(pThis); return VINF_SUCCESS; }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate input. */ struct RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, pThis->cNestings == 1); if (RT_FAILURE(rc9)) return rc9; #endif /* * Check if nested. */ pthread_t Self = pthread_self(); if (RT_UNLIKELY( pThis->Owner != Self || pThis->cNestings == 0)) { AssertMsgFailed(("Not owner of mutex %p!! Self=%08x Owner=%08x cNestings=%d\n", pThis, Self, pThis->Owner, pThis->cNestings)); return VERR_NOT_OWNER; } /* * If nested we'll just pop a nesting. */ if (pThis->cNestings > 1) { ASMAtomicDecU32(&pThis->cNestings); return VINF_SUCCESS; } /* * Clear the state. (cNestings == 1) */ pThis->Owner = (pthread_t)~0; ASMAtomicWriteU32(&pThis->cNestings, 0); /* * Release the mutex. */ int32_t iNew = ASMAtomicDecS32(&pThis->iState); if (RT_UNLIKELY(iNew != 0)) { /* somebody is waiting, try wake up one of them. */ ASMAtomicXchgS32(&pThis->iState, 0); (void)sys_futex(&pThis->iState, FUTEX_WAKE, 1, NULL, NULL, 0); } return VINF_SUCCESS; }
RTDECL(int) RTSemFastMutexDestroy(RTSEMFASTMUTEX hFastMtx) { PRTSEMFASTMUTEXINTERNAL pThis = hFastMtx; if (pThis == NIL_RTSEMFASTMUTEX) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMFASTMUTEX_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); ASMAtomicWriteU32(&pThis->u32Magic, RTSEMFASTMUTEX_MAGIC_DEAD); sx_destroy(&pThis->SxLock); RTMemFree(pThis); return VINF_SUCCESS; }
/** * Deletes one critical section. * * @returns Return code from RTCritSectDelete. * * @param pVM Pointer to the VM. * @param pCritSect The critical section. * @param pPrev The previous critical section in the list. * @param fFinal Set if this is the final call and statistics shouldn't be deregistered. * * @remarks Caller must have entered the ListCritSect. */ static int pdmR3CritSectDeleteOne(PVM pVM, PUVM pUVM, PPDMCRITSECTINT pCritSect, PPDMCRITSECTINT pPrev, bool fFinal) { /* * Assert free waiters and so on (c&p from RTCritSectDelete). */ Assert(pCritSect->Core.u32Magic == RTCRITSECT_MAGIC); Assert(pCritSect->Core.cNestings == 0); Assert(pCritSect->Core.cLockers == -1); Assert(pCritSect->Core.NativeThreadOwner == NIL_RTNATIVETHREAD); Assert(RTCritSectIsOwner(&pUVM->pdm.s.ListCritSect)); /* * Unlink it. */ if (pPrev) pPrev->pNext = pCritSect->pNext; else pUVM->pdm.s.pCritSects = pCritSect->pNext; /* * Delete it (parts taken from RTCritSectDelete). * In case someone is waiting we'll signal the semaphore cLockers + 1 times. */ ASMAtomicWriteU32(&pCritSect->Core.u32Magic, 0); SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->Core.EventSem; pCritSect->Core.EventSem = NIL_RTSEMEVENT; while (pCritSect->Core.cLockers-- >= 0) SUPSemEventSignal(pVM->pSession, hEvent); ASMAtomicWriteS32(&pCritSect->Core.cLockers, -1); int rc = SUPSemEventClose(pVM->pSession, hEvent); AssertRC(rc); RTLockValidatorRecExclDestroy(&pCritSect->Core.pValidatorRec); pCritSect->pNext = NULL; pCritSect->pvKey = NULL; pCritSect->pVMR3 = NULL; pCritSect->pVMR0 = NIL_RTR0PTR; pCritSect->pVMRC = NIL_RTRCPTR; RTStrFree((char *)pCritSect->pszName); pCritSect->pszName = NULL; if (!fFinal) { STAMR3Deregister(pVM, &pCritSect->StatContentionRZLock); STAMR3Deregister(pVM, &pCritSect->StatContentionRZUnlock); STAMR3Deregister(pVM, &pCritSect->StatContentionR3); #ifdef VBOX_WITH_STATISTICS STAMR3Deregister(pVM, &pCritSect->StatLocked); #endif } return rc; }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check ownership and recursions. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); RTNATIVETHREAD hNativeOwner; ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner); if (RT_UNLIKELY(hNativeOwner != hNativeSelf)) { AssertMsgFailed(("Not owner of mutex %p!! hNativeSelf=%RTntrd Owner=%RTntrd cRecursions=%d\n", pThis, hNativeSelf, hNativeOwner, pThis->cRecursions)); return VERR_NOT_OWNER; } if (pThis->cRecursions > 1) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorRec); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicDecU32(&pThis->cRecursions); return VINF_SUCCESS; } /* * Unlock mutex semaphore. */ #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, false); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicWriteU32(&pThis->cRecursions, 0); ASMAtomicWriteHandle(&pThis->hNativeOwner, NIL_RTNATIVETHREAD); if (ReleaseMutex(pThis->hMtx)) return VINF_SUCCESS; int rc = RTErrConvertFromWin32(GetLastError()); AssertMsgFailed(("%p/%p, rc=%Rrc lasterr=%d\n", pThis, pThis->hMtx, rc, GetLastError())); return rc; }
VBOXDISPMPLOGGER_DECL(int) VBoxDispMpLoggerTerm(void) { if (ASMAtomicCmpXchgU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState, VBOXDISPMPLOGGER_STATE_UNINITIALIZING, VBOXDISPMPLOGGER_STATE_INITIALIZED)) { vboxDispKmtCallbacksTerm(&g_VBoxDispMpLogger.KmtCallbacks); ASMAtomicWriteU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState, VBOXDISPMPLOGGER_STATE_UNINITIALIZED); return S_OK; } else if (ASMAtomicReadU32((volatile uint32_t *)&g_VBoxDispMpLogger.enmState) == VBOXDISPMPLOGGER_STATE_UNINITIALIZED) { return S_OK; } return VERR_NOT_SUPPORTED; }
RTDECL(int) RTTimerDestroy(PRTTIMER pTimer) { if (pTimer == NULL) return VINF_SUCCESS; RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); /* * Free the associated resources. */ RTTimerStop(pTimer); ASMAtomicWriteU32(&pTimer->u32Magic, ~RTTIMER_MAGIC); RTMemFree(pTimer); return VINF_SUCCESS; }