/** * Timer callback function for the non-omni timers. * * @returns HRTIMER_NORESTART or HRTIMER_RESTART depending on whether it's a one-shot or interval timer. * @param pDpc Pointer to the DPC. * @param pvUser Pointer to our internal timer structure. * @param SystemArgument1 Some system argument. * @param SystemArgument2 Some system argument. */ static void _stdcall rtTimerNtSimpleCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMER pTimer = (PRTTIMER)pvUser; AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtSimpleCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); #endif /* * Check that we haven't been suspended before doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, RTThreadNativeSelf()); if (!pTimer->u64NanoInterval) ASMAtomicWriteBool(&pTimer->fSuspended, true); uint64_t iTick = ++pTimer->aSubTimers[0].iTick; if (pTimer->u64NanoInterval) rtTimerNtRearmInternval(pTimer, iTick, &pTimer->aSubTimers[0].NtDpc); pTimer->pfnTimer(pTimer, pTimer->pvUser, iTick); ASMAtomicWriteHandle(&pTimer->aSubTimers[0].hActiveThread, NIL_RTNATIVETHREAD); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * The slave DPC callback for an omni timer. * * @param pDpc The DPC object. * @param pvUser Pointer to the sub-timer. * @param SystemArgument1 Some system stuff. * @param SystemArgument2 Some system stuff. */ static void _stdcall rtTimerNtOmniSlaveCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; PRTTIMER pTimer = pSubTimer->pParent; AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) RTAssertMsg2Weak("rtTimerNtOmniSlaveCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); #endif /* * Check that we haven't been suspended before doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { ASMAtomicWriteHandle(&pSubTimer->hActiveThread, RTThreadNativeSelf()); if (!pTimer->u64NanoInterval) if (ASMAtomicDecS32(&pTimer->cOmniSuspendCountDown) <= 0) ASMAtomicWriteBool(&pTimer->fSuspended, true); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); ASMAtomicWriteHandle(&pSubTimer->hActiveThread, NIL_RTNATIVETHREAD); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
/** * Toggle fullscreen mode * * @remarks Must be called from the SDL thread! */ void SDLFramebuffer::setFullscreen(bool fFullscreen) { AssertMsg(mSdlNativeThread == RTThreadNativeSelf(), ("Wrong thread! SDL is not threadsafe!\n")); LogFlow(("SDLFramebuffer::SetFullscreen: fullscreen: %d\n", fFullscreen)); mfFullscreen = fFullscreen; resize(); }
Display* stubGetWindowDisplay(WindowInfo *pWindow) { #if defined(CR_NEWWINTRACK) if ((NIL_RTTHREAD!=stub.hSyncThread) && (RTThreadNativeSelf()==RTThreadGetNative(stub.hSyncThread))) { if (pWindow && pWindow->dpy && !pWindow->syncDpy) { crDebug("going to XOpenDisplay(%s)", pWindow->dpyName); pWindow->syncDpy = XOpenDisplay(pWindow->dpyName); if (!pWindow->syncDpy) { crWarning("Failed to open display %s", pWindow->dpyName); } return pWindow->syncDpy; } else { return pWindow ? pWindow->syncDpy:NULL; } } else #endif { return pWindow ? pWindow->dpy:NULL; } }
RTDECL(int) RTThreadCtxHookEnable(RTTHREADCTXHOOK hCtxHook) { /* * Validate input. */ PRTTHREADCTXHOOKINT pThis = hCtxHook; AssertPtr(pThis); AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->hOwner == RTThreadNativeSelf()); Assert(!pThis->fEnabled); if (!pThis->fEnabled) { IPRT_LINUX_SAVE_EFL_AC(); Assert(pThis->PreemptOps.sched_out == rtThreadCtxHooksLnxSchedOut); Assert(pThis->PreemptOps.sched_in == rtThreadCtxHooksLnxSchedIn); /* * Register the callback. */ preempt_disable(); pThis->fEnabled = true; preempt_notifier_register(&pThis->LnxPreemptNotifier); preempt_enable(); IPRT_LINUX_RESTORE_EFL_AC(); } return VINF_SUCCESS; }
RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx) { RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx; RTNATIVETHREAD hSelf = RTThreadNativeSelf(); uint32_t cLockers; RTSEMSPINMUTEXSTATE State; bool fRc; Assert(hSelf != NIL_RTNATIVETHREAD); RTSEMSPINMUTEX_VALIDATE_RETURN(pThis); /* * Get the saved state and try release the semaphore. */ State = pThis->SavedState; ASMCompilerBarrier(); ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc); AssertMsgReturn(fRc, ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers), VERR_NOT_OWNER); cLockers = ASMAtomicDecS32(&pThis->cLockers); rtSemSpinMutexLeave(&State); if (cLockers > 0) { int rc = RTSemEventSignal(pThis->hEventSem); AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc)); } return VINF_SUCCESS; }
RTDECL(int) RTThreadCtxHookDisable(RTTHREADCTXHOOK hCtxHook) { /* * Validate input. */ PRTTHREADCTXHOOKINT pThis = hCtxHook; if (pThis != NIL_RTTHREADCTXHOOK) { AssertPtr(pThis); AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->hOwner == RTThreadNativeSelf()); /* * Deregister the callback. */ if (pThis->fEnabled) { IPRT_LINUX_SAVE_EFL_AC(); rtThreadCtxHookDisable(pThis); IPRT_LINUX_RESTORE_EFL_AC(); } } return VINF_SUCCESS; }
/** * Adopts the calling thread. * No locks are taken or released by this function. */ static int rtThreadAdopt(RTTHREADTYPE enmType, unsigned fFlags, uint32_t fIntFlags, const char *pszName) { int rc; PRTTHREADINT pThread; Assert(!(fFlags & RTTHREADFLAGS_WAITABLE)); fFlags &= ~RTTHREADFLAGS_WAITABLE; /* * Allocate and insert the thread. * (It is vital that rtThreadNativeAdopt updates the TLS before * we try inserting the thread because of locking.) */ rc = VERR_NO_MEMORY; pThread = rtThreadAlloc(enmType, fFlags, RTTHREADINT_FLAGS_ALIEN | fIntFlags, pszName); if (pThread) { RTNATIVETHREAD NativeThread = RTThreadNativeSelf(); rc = rtThreadNativeAdopt(pThread); if (RT_SUCCESS(rc)) { rtThreadInsert(pThread, NativeThread); rtThreadSetState(pThread, RTTHREADSTATE_RUNNING); rtThreadRelease(pThread); } } return rc; }
RTDECL(int) RTThreadCtxHooksRegister(RTTHREADCTX hThreadCtx, PFNRTTHREADCTXHOOK pfnThreadCtxHook, void *pvUser) { /* * Validate input. */ PRTTHREADCTXINT pThis = hThreadCtx; if (pThis == NIL_RTTHREADCTX) return VERR_INVALID_HANDLE; AssertPtr(pThis); AssertMsgReturn(pThis->u32Magic == RTTHREADCTXINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->hOwner == RTThreadNativeSelf()); Assert(!pThis->hPreemptOps.sched_out); Assert(!pThis->hPreemptOps.sched_in); /* * Register the callback. */ pThis->hPreemptOps.sched_out = rtThreadCtxHooksLnxSchedOut; pThis->hPreemptOps.sched_in = rtThreadCtxHooksLnxSchedIn; pThis->pvUser = pvUser; pThis->pfnThreadCtxHook = pfnThreadCtxHook; pThis->fRegistered = true; preempt_notifier_register(&pThis->hPreemptNotifier); return VINF_SUCCESS; }
/** * Update specified framebuffer area. * * @remarks Must be called from the SDL thread on Linux! Update region * on the whole framebuffer, including y offset! * @param x left column * @param y top row * @param w width in pixels * @param h height in pixels */ void SDLFramebuffer::update(int x, int y, int w, int h) { #ifdef VBOXBFE_WITH_X11 AssertMsg(mSdlNativeThread == RTThreadNativeSelf(), ("Wrong thread! SDL is not threadsafe!\n")); #endif Assert(mScreen); uint32_t safeY = y; uint32_t safeH = h; #ifdef VBOX_SECURELABEL /* * Cut down the update area to the untrusted portion */ if (safeY < mLabelHeight) safeY = mLabelHeight; if ((safeH + mLabelHeight) > (mHeight + mTopOffset)) safeH = mHeight + mTopOffset - mLabelHeight; #endif SDL_UpdateRect(mScreen, x, safeY, w, safeH); #ifdef VBOX_SECURELABEL paintSecureLabel(x, y, w, h, false); #endif }
VBoxDbgConsoleOutput::VBoxDbgConsoleOutput(QWidget *pParent/* = NULL*/, const char *pszName/* = NULL*/) : QTextEdit(pParent), m_uCurLine(0), m_uCurPos(0), m_hGUIThread(RTThreadNativeSelf()) { setReadOnly(true); setUndoRedoEnabled(false); setOverwriteMode(false); setPlainText(""); setTextInteractionFlags(Qt::TextBrowserInteraction); setAutoFormatting(QTextEdit::AutoAll); setTabChangesFocus(true); setAcceptRichText(false); /* * Font. * Create actions for font menu items. */ m_pCourierFontAction = new QAction(tr("Courier"), this); m_pCourierFontAction->setCheckable(true); m_pCourierFontAction->setShortcut(Qt::ControlModifier + Qt::Key_D); connect(m_pCourierFontAction, SIGNAL(triggered()), this, SLOT(setFontCourier())); m_pMonospaceFontAction = new QAction(tr("Monospace"), this); m_pMonospaceFontAction->setCheckable(true); m_pMonospaceFontAction->setShortcut(Qt::ControlModifier + Qt::Key_M); connect(m_pMonospaceFontAction, SIGNAL(triggered()), this, SLOT(setFontMonospace())); /* Create action group for grouping of exclusive font menu items. */ QActionGroup *pActionFontGroup = new QActionGroup(this); pActionFontGroup->addAction(m_pCourierFontAction); pActionFontGroup->addAction(m_pMonospaceFontAction); pActionFontGroup->setExclusive(true); /* * Color scheme. * Create actions for color-scheme menu items. */ m_pGreenOnBlackAction = new QAction(tr("Green On Black"), this); m_pGreenOnBlackAction->setCheckable(true); m_pGreenOnBlackAction->setShortcut(Qt::ControlModifier + Qt::Key_1); connect(m_pGreenOnBlackAction, SIGNAL(triggered()), this, SLOT(setColorGreenOnBlack())); m_pBlackOnWhiteAction = new QAction(tr("Black On White"), this); m_pBlackOnWhiteAction->setCheckable(true); m_pBlackOnWhiteAction->setShortcut(Qt::ControlModifier + Qt::Key_2); connect(m_pBlackOnWhiteAction, SIGNAL(triggered()), this, SLOT(setColorBlackOnWhite())); /* Create action group for grouping of exclusive color-scheme menu items. */ QActionGroup *pActionColorGroup = new QActionGroup(this); pActionColorGroup->addAction(m_pGreenOnBlackAction); pActionColorGroup->addAction(m_pBlackOnWhiteAction); pActionColorGroup->setExclusive(true); /* * Set the defaults (which syncs with the menu item checked state). */ setFontCourier(); setColorGreenOnBlack(); NOREF(pszName); }
/** * Internal worker. */ DECLINLINE(int) rtSemMutexSolRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, bool fInterruptible) { PRTSEMMUTEXINTERNAL pThis = hMutexSem; int rc = VERR_GENERAL_FAILURE; /* * Validate. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(pThis->cRefs >= 1); /* * Lock it and check if it's a recursion. */ mutex_enter(&pThis->Mtx); if (pThis->hOwnerThread == RTThreadNativeSelf()) { pThis->cRecursions++; Assert(pThis->cRecursions > 1); Assert(pThis->cRecursions < 256); rc = VINF_SUCCESS; } /* * Not a recursion, claim the unowned mutex if we're there are no waiters. */ else if ( pThis->hOwnerThread == NIL_RTNATIVETHREAD && pThis->cWaiters == 0) { pThis->cRecursions = 1; pThis->hOwnerThread = RTThreadNativeSelf(); rc = VINF_SUCCESS; } /* * A polling call? */ else if (cMillies == 0) rc = VERR_TIMEOUT; /* * No, we really need to get to sleep. */ else rc = rtSemMutexSolRequestSleep(pThis, cMillies, fInterruptible); mutex_exit(&pThis->Mtx); return rc; }
void VBoxDbgConsoleInput::setLineEdit(QLineEdit *pEdit) { Assert(m_hGUIThread == RTThreadNativeSelf()); QComboBox::setLineEdit(pEdit); if (lineEdit() == pEdit && pEdit) connect(pEdit, SIGNAL(returnPressed()), this, SLOT(returnPressed())); }
VBoxDbgConsoleOutput::~VBoxDbgConsoleOutput() { Assert(m_hGUIThread == RTThreadNativeSelf()); if (m_pVirtualBox) { m_pVirtualBox->Release(); m_pVirtualBox = NULL; } }
/** * Native thread main function. * * @param pvThreadInt The thread structure. */ static void rtThreadNativeMain(void *pvThreadInt) { PRTTHREADINT pThreadInt = (PRTTHREADINT)pvThreadInt; AssertCompile(sizeof(kt_did_t) == sizeof(pThreadInt->tid)); uint64_t *pu64ThrId = SOL_THREAD_ID_PTR; pThreadInt->tid = *pu64ThrId; rtThreadMain(pThreadInt, RTThreadNativeSelf(), &pThreadInt->szName[0]); thread_exit(); }
static int rtCritSectRwEnterShared(PRTCRITSECTRW pThis, PCRTLOCKVALSRCPOS pSrcPos, bool fTryOnly) { /* * Validate input. */ AssertPtr(pThis); AssertReturn(pThis->u32Magic == RTCRITSECTRW_MAGIC, VERR_SEM_DESTROYED); #ifdef IN_RING0 Assert(pThis->fFlags & RTCRITSECT_FLAGS_RING0); #else Assert(!(pThis->fFlags & RTCRITSECT_FLAGS_RING0)); #endif #ifdef RTCRITSECTRW_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (!fTryOnly) { int rc9; RTNATIVETHREAD hNativeWriter; ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter); if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf()) rc9 = RTLockValidatorRecExclCheckOrder(pThis->pValidatorWrite, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); else rc9 = RTLockValidatorRecSharedCheckOrder(pThis->pValidatorRead, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Get cracking... */ uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); uint64_t u64OldState = u64State; for (;;) { if ((u64State & RTCSRW_DIR_MASK) == (RTCSRW_DIR_READ << RTCSRW_DIR_SHIFT)) { /* It flows in the right direction, try follow it before it changes. */ uint64_t c = (u64State & RTCSRW_CNT_RD_MASK) >> RTCSRW_CNT_RD_SHIFT; c++; Assert(c < RTCSRW_CNT_MASK / 2); u64State &= ~RTCSRW_CNT_RD_MASK; u64State |= c << RTCSRW_CNT_RD_SHIFT; if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) { #ifdef RTCRITSECTRW_STRICT RTLockValidatorRecSharedAddOwner(pThis->pValidatorRead, hThreadSelf, pSrcPos); #endif break; } } else if ((u64State & (RTCSRW_CNT_RD_MASK | RTCSRW_CNT_WR_MASK)) == 0)
/** * Returns the VMCPU of the calling EMT. * * @returns The VMCPU pointer. NULL if not an EMT. * * @param pVM Pointer to the VM. * @internal */ VMMDECL(PVMCPU) VMMGetCpu(PVM pVM) { #ifdef IN_RING3 VMCPUID idCpu = VMR3GetVMCPUId(pVM); if (idCpu == NIL_VMCPUID) return NULL; Assert(idCpu < pVM->cCpus); return &pVM->aCpus[idCpu]; #elif defined(IN_RING0) if (pVM->cCpus == 1) return &pVM->aCpus[0]; /* * Search first by host cpu id (most common case) * and then by native thread id (page fusion case). */ if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { /** @todo r=ramshankar: This doesn't buy us anything in terms of performance * leaving it here for hysterical raisins and as a reference if we * implemented a hashing approach in the future. */ RTCPUID idHostCpu = RTMpCpuId(); /** @todo optimize for large number of VCPUs when that becomes more common. */ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; if (pVCpu->idHostCpu == idHostCpu) return pVCpu; } } /* RTThreadGetNativeSelf had better be cheap. */ RTNATIVETHREAD hThread = RTThreadNativeSelf(); /** @todo optimize for large number of VCPUs when that becomes more common. * Use a map like GIP does that's indexed by the host CPU index. */ for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; if (pVCpu->hNativeThreadR0 == hThread) return pVCpu; } return NULL; #else /* RC: Always EMT(0) */ return &pVM->aCpus[0]; #endif /* IN_RING0 */ }
static int rtSemRWRequestRead(RTSEMRW hRWSem, RTMSINTERVAL cMillies, bool fInterruptible, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ RTSEMRWINTERNAL *pThis = hRWSem; if (pThis == NIL_RTSEMRW) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMRW_MAGIC, VERR_INVALID_HANDLE); #ifdef RTSEMRW_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); if (cMillies > 0) { int rc9; RTNATIVETHREAD hNativeWriter; ASMAtomicUoReadHandle(&pThis->hNativeWriter, &hNativeWriter); if (hNativeWriter != NIL_RTTHREAD && hNativeWriter == RTThreadNativeSelf()) rc9 = RTLockValidatorRecExclCheckOrder(&pThis->ValidatorWrite, hThreadSelf, pSrcPos, cMillies); else rc9 = RTLockValidatorRecSharedCheckOrder(&pThis->ValidatorRead, hThreadSelf, pSrcPos, cMillies); if (RT_FAILURE(rc9)) return rc9; } #endif /* * Get cracking... */ uint64_t u64State = ASMAtomicReadU64(&pThis->u64State); uint64_t u64OldState = u64State; for (;;) { if ((u64State & RTSEMRW_DIR_MASK) == (RTSEMRW_DIR_READ << RTSEMRW_DIR_SHIFT)) { /* It flows in the right direction, try follow it before it changes. */ uint64_t c = (u64State & RTSEMRW_CNT_RD_MASK) >> RTSEMRW_CNT_RD_SHIFT; c++; Assert(c < RTSEMRW_CNT_MASK / 2); u64State &= ~RTSEMRW_CNT_RD_MASK; u64State |= c << RTSEMRW_CNT_RD_SHIFT; if (ASMAtomicCmpXchgU64(&pThis->u64State, u64State, u64OldState)) { #ifdef RTSEMRW_STRICT RTLockValidatorRecSharedAddOwner(&pThis->ValidatorRead, hThreadSelf, pSrcPos); #endif break; } } else if ((u64State & (RTSEMRW_CNT_RD_MASK | RTSEMRW_CNT_WR_MASK)) == 0)
void VBoxDbgConsoleInput::returnPressed() { Assert(m_hGUIThread == RTThreadNativeSelf()); QString strCommand = currentText(); /** @todo trim whitespace? */ if (strCommand.isEmpty()) return; /* deal with the current command. */ emit commandSubmitted(strCommand); /* * Add current command to history. */ bool fNeedsAppending = true; /* invariant: empty line at the end */ int iLastItem = count() - 1; Assert(itemText(iLastItem).isEmpty()); /* have previous command? check duplicate. */ if (iLastItem > 0) { const QString strPrevCommand(itemText(iLastItem - 1)); if (strCommand == strPrevCommand) fNeedsAppending = false; } if (fNeedsAppending) { /* history full? drop the oldest command. */ if (count() == maxCount()) { removeItem(0); --iLastItem; } /* insert before the empty line. */ insertItem(iLastItem, strCommand); } /* invariant: empty line at the end */ int iNewLastItem = count() - 1; Assert(itemText(iNewLastItem).isEmpty()); /* select empty line to present "new" command line to the user */ setCurrentIndex(iNewLastItem); }
DECL_FORCE_INLINE(int) rtCritSectTryEnter(PRTCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos) { Assert(pCritSect); Assert(pCritSect->u32Magic == RTCRITSECT_MAGIC); /*AssertReturn(pCritSect->u32Magic == RTCRITSECT_MAGIC, VERR_SEM_DESTROYED);*/ /* * Return straight away if NOP. */ if (pCritSect->fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; /* * Try take the lock. (cLockers is -1 if it's free) */ RTNATIVETHREAD NativeThreadSelf = RTThreadNativeSelf(); if (!ASMAtomicCmpXchgS32(&pCritSect->cLockers, 0, -1)) { /* * Somebody is owning it (or will be soon). Perhaps it's us? */ if (pCritSect->NativeThreadOwner == NativeThreadSelf) { if (!(pCritSect->fFlags & RTCRITSECT_FLAGS_NO_NESTING)) { #ifdef RTCRITSECT_STRICT int rc9 = RTLockValidatorRecExclRecursion(pCritSect->pValidatorRec, pSrcPos); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicIncS32(&pCritSect->cLockers); pCritSect->cNestings++; return VINF_SUCCESS; } AssertMsgFailed(("Nested entry of critsect %p\n", pCritSect)); return VERR_SEM_NESTED; } return VERR_SEM_BUSY; } /* * First time */ pCritSect->cNestings = 1; ASMAtomicWriteHandle(&pCritSect->NativeThreadOwner, NativeThreadSelf); #ifdef RTCRITSECT_STRICT RTLockValidatorRecExclSetOwner(pCritSect->pValidatorRec, NIL_RTTHREAD, pSrcPos, true); #endif return VINF_SUCCESS; }
VBoxDbgBase::VBoxDbgBase(VBoxDbgGui *a_pDbgGui) : m_pDbgGui(a_pDbgGui), m_pVM(NULL), m_hGUIThread(RTThreadNativeSelf()) { /* * Register */ PVM pVM = a_pDbgGui->getVMHandle(); if (pVM) { m_pVM = pVM; int rc = VMR3AtStateRegister(pVM, atStateChange, this); AssertRC(rc); } }
RTDECL(int) RTSemMutexRelease(RTSEMMUTEX hMutexSem) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Check ownership and recursions. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); RTNATIVETHREAD hNativeOwner; ASMAtomicReadHandle(&pThis->hNativeOwner, &hNativeOwner); if (RT_UNLIKELY(hNativeOwner != hNativeSelf)) { AssertMsgFailed(("Not owner of mutex %p!! hNativeSelf=%RTntrd Owner=%RTntrd cRecursions=%d\n", pThis, hNativeSelf, hNativeOwner, pThis->cRecursions)); return VERR_NOT_OWNER; } if (pThis->cRecursions > 1) { #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclUnwind(&pThis->ValidatorRec); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicDecU32(&pThis->cRecursions); return VINF_SUCCESS; } /* * Unlock mutex semaphore. */ #ifdef RTSEMMUTEX_STRICT int rc9 = RTLockValidatorRecExclReleaseOwner(&pThis->ValidatorRec, false); if (RT_FAILURE(rc9)) return rc9; #endif ASMAtomicWriteU32(&pThis->cRecursions, 0); ASMAtomicWriteHandle(&pThis->hNativeOwner, NIL_RTNATIVETHREAD); if (ReleaseMutex(pThis->hMtx)) return VINF_SUCCESS; int rc = RTErrConvertFromWin32(GetLastError()); AssertMsgFailed(("%p/%p, rc=%Rrc lasterr=%d\n", pThis, pThis->hMtx, rc, GetLastError())); return rc; }
/** * Gets the ring-3 native thread handle of the calling thread. * * @returns native thread handle (ring-3). * @param pCritSect The critical section. This is used in R0 and RC. */ DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect) { #ifdef IN_RING3 NOREF(pCritSect); RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); #else AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic), NIL_RTNATIVETHREAD); PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD); #endif return hNativeSelf; }
/** * Internal worker for RTSemMutexRequest and RTSemMutexRequestNoResume * * @returns IPRT status code. * @param hMutexSem The mutex handle. * @param cMillies The timeout. * @param fInterruptible Whether it's interruptible * (RTSemMutexRequestNoResume) or not * (RTSemMutexRequest). */ DECLINLINE(int) rtR0SemMutexDarwinRequest(RTSEMMUTEX hMutexSem, RTMSINTERVAL cMillies, wait_interrupt_t fInterruptible) { /* * Validate input. */ PRTSEMMUTEXINTERNAL pThis = (PRTSEMMUTEXINTERNAL)hMutexSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); RT_ASSERT_PREEMPTIBLE(); IPRT_DARWIN_SAVE_EFL_AC(); /* * Grab the lock and check out the state. */ RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf(); int rc = VINF_SUCCESS; lck_spin_lock(pThis->pSpinlock); /* Recursive call? */ if (pThis->hNativeOwner == hNativeSelf) { Assert(pThis->cRecursions > 0); Assert(pThis->cRecursions < 256); pThis->cRecursions++; } /* Is it free and nobody ahead of us in the queue? */ else if ( pThis->hNativeOwner == NIL_RTNATIVETHREAD && pThis->cWaiters == 0) { pThis->hNativeOwner = hNativeSelf; pThis->cRecursions = 1; } /* Polling call? */ else if (cMillies == 0) rc = VERR_TIMEOUT; /* Yawn, time for a nap... */ else { rc = rtR0SemMutexDarwinRequestSleep(pThis, cMillies, fInterruptible, hNativeSelf); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return rc; } lck_spin_unlock(pThis->pSpinlock); IPRT_DARWIN_RESTORE_EFL_ONLY_AC(); return rc; }
/** * Adopts a non-IPRT thread. * * @returns IPRT status code. * @param enmType The thread type. * @param fFlags The thread flags. RTTHREADFLAGS_WAITABLE is not currently allowed. * @param pszName The thread name. Optional. * @param pThread Where to store the thread handle. Optional. */ RTDECL(int) RTThreadAdopt(RTTHREADTYPE enmType, unsigned fFlags, const char *pszName, PRTTHREAD pThread) { int rc; RTTHREAD Thread; AssertReturn(!(fFlags & RTTHREADFLAGS_WAITABLE), VERR_INVALID_PARAMETER); AssertReturn(!pszName || VALID_PTR(pszName), VERR_INVALID_POINTER); AssertReturn(!pThread || VALID_PTR(pThread), VERR_INVALID_POINTER); rc = VINF_SUCCESS; Thread = RTThreadSelf(); if (Thread == NIL_RTTHREAD) { /* generate a name if none was given. */ char szName[RTTHREAD_NAME_LEN]; if (!pszName || !*pszName) { static uint32_t s_i32AlienId = 0; uint32_t i32Id = ASMAtomicIncU32(&s_i32AlienId); RTStrPrintf(szName, sizeof(szName), "ALIEN-%RX32", i32Id); pszName = szName; } /* try adopt it */ rc = rtThreadAdopt(enmType, fFlags, 0, pszName); Thread = RTThreadSelf(); Log(("RTThreadAdopt: %RTthrd %RTnthrd '%s' enmType=%d fFlags=%#x rc=%Rrc\n", Thread, RTThreadNativeSelf(), pszName, enmType, fFlags, rc)); } else Log(("RTThreadAdopt: %RTthrd %RTnthrd '%s' enmType=%d fFlags=%#x - already adopted!\n", Thread, RTThreadNativeSelf(), pszName, enmType, fFlags)); if (pThread) *pThread = Thread; return rc; }
RTDECL(int ) RTThreadCtxHookDestroy(RTTHREADCTXHOOK hCtxHook) { IPRT_LINUX_SAVE_EFL_AC(); /* * Validate input. */ PRTTHREADCTXHOOKINT pThis = hCtxHook; if (pThis == NIL_RTTHREADCTXHOOK) return VINF_SUCCESS; AssertPtr(pThis); AssertMsgReturn(pThis->u32Magic == RTTHREADCTXHOOKINT_MAGIC, ("pThis->u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD)); Assert(!pThis->fEnabled || pThis->hOwner == RTThreadNativeSelf()); /* * If there's still a registered thread-context hook, deregister it now before destroying the object. */ if (pThis->fEnabled) { Assert(pThis->hOwner == RTThreadNativeSelf()); rtThreadCtxHookDisable(pThis); Assert(!pThis->fEnabled); /* paranoia */ } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) preempt_notifier_dec(); #endif ASMAtomicWriteU32(&pThis->u32Magic, ~RTTHREADCTXHOOKINT_MAGIC); RTMemFree(pThis); IPRT_LINUX_RESTORE_EFL_AC(); return VINF_SUCCESS; }
VBoxDbgConsoleInput::VBoxDbgConsoleInput(QWidget *pParent/* = NULL*/, const char *pszName/* = NULL*/) : QComboBox(pParent), m_hGUIThread(RTThreadNativeSelf()) { addItem(""); /* invariant: empty command line is the last item */ setEditable(true); setInsertPolicy(NoInsert); setAutoCompletion(false); setMaxCount(50); const QLineEdit *pEdit = lineEdit(); if (pEdit) connect(pEdit, SIGNAL(returnPressed()), this, SLOT(returnPressed())); NOREF(pszName); }
SDLFramebuffer::~SDLFramebuffer() { LogFlow(("SDLFramebuffer::~SDLFramebuffer\n")); RTCritSectDelete(&mUpdateLock); AssertMsg(mSdlNativeThread == RTThreadNativeSelf(), ("Wrong thread! SDL is not threadsafe!\n")); SDL_QuitSubSystem(SDL_INIT_VIDEO); #ifdef VBOX_SECURELABEL if (mLabelFont) TTF_CloseFont(mLabelFont); TTF_Quit(); #endif mScreen = NULL; }
VBoxDbgBase::VBoxDbgBase(VBoxDbgGui *a_pDbgGui) : m_pDbgGui(a_pDbgGui), m_pUVM(NULL), m_hGUIThread(RTThreadNativeSelf()) { /* * Register */ m_pUVM = a_pDbgGui->getUvmHandle(); if (m_pUVM) { VMR3RetainUVM(m_pUVM); int rc = VMR3AtStateRegister(m_pUVM, atStateChange, this); AssertRC(rc); } }
/** * Yield the critical section if someone is waiting on it. * * When yielding, we'll leave the critical section and try to make sure the * other waiting threads get a chance of entering before we reclaim it. * * @retval true if yielded. * @retval false if not yielded. * @param pCritSect The critical section. */ VMMR3DECL(bool) PDMR3CritSectYield(PPDMCRITSECT pCritSect) { AssertPtrReturn(pCritSect, false); AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false); Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf()); Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)); /* No recursion allowed here. */ int32_t const cNestings = pCritSect->s.Core.cNestings; AssertReturn(cNestings == 1, false); int32_t const cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers); if (cLockers < cNestings) return false; #ifdef PDMCRITSECT_STRICT RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos; #endif PDMCritSectLeave(pCritSect); /* * If we're lucky, then one of the waiters has entered the lock already. * We spin a little bit in hope for this to happen so we can avoid the * yield detour. */ if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0) { int cLoops = 20; while ( cLoops > 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) >= 0) { ASMNopPause(); cLoops--; } if (cLoops == 0) RTThreadYield(); } #ifdef PDMCRITSECT_STRICT int rc = PDMCritSectEnterDebug(pCritSect, VERR_IGNORED, SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction); #else int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED); #endif AssertLogRelRC(rc); return true; }