RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_MAGIC, ("magic=%#x\n", pThis->u32Magic)); KIRQL SavedIrql; if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #ifndef RTSPINLOCK_NT_HACK_NOIRQ RTCCUINTREG fIntSaved = ASMGetFlags(); ASMIntDisable(); KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql); #else SavedIrql = KeGetCurrentIrql(); if (SavedIrql < DISPATCH_LEVEL) { KeRaiseIrql(DISPATCH_LEVEL, &SavedIrql); Assert(SavedIrql < DISPATCH_LEVEL); } RTCCUINTREG fIntSaved = ASMGetFlags(); ASMIntDisable(); if (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE)) { while (!ASMAtomicCmpXchgU32(&pThis->u32Hack, RTSPINLOCK_NT_HACK_NOIRQ_TAKEN, RTSPINLOCK_NT_HACK_NOIRQ_FREE)) ASMNopPause(); } pThis->fIntSaved = fIntSaved; #endif } else KeAcquireSpinLock(&pThis->Spinlock, &SavedIrql); pThis->SavedIrql = SavedIrql; }
VMMRCTestTestWriteMsr(PVM pVM, uint32_t uMsr, uint32_t u32ValueLow, uint32_t u32ValueHi, uint64_t *puValueBefore, uint64_t *puValueAfter) { AssertPtrReturn(puValueBefore, VERR_INVALID_POINTER); AssertPtrReturn(puValueAfter, VERR_INVALID_POINTER); ASMIntDisable(); RT_NOREF_PV(pVM); int rc = VINF_SUCCESS; uint64_t uValueBefore = UINT64_MAX; uint64_t uValueAfter = UINT64_MAX; if (vmmRCSafeMsrRead(uMsr, &uValueBefore)) { if (!vmmRCSafeMsrWrite(uMsr, RT_MAKE_U64(u32ValueLow, u32ValueHi))) rc = VERR_WRITE_PROTECT; if (!vmmRCSafeMsrRead(uMsr, &uValueAfter) && RT_SUCCESS(rc)) rc = VERR_READ_ERROR; vmmRCSafeMsrWrite(uMsr, uValueBefore); } else rc = VERR_ACCESS_DENIED; *puValueBefore = uValueBefore; *puValueAfter = uValueAfter; return rc; }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertPtr(pThis); Assert(pThis->u32Magic == RTSPINLOCK_MAGIC); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { uint32_t fIntSaved = ASMGetFlags(); ASMIntDisable(); lck_spin_lock(pThis->pSpinLock); pThis->fIntSaved = fIntSaved; } else lck_spin_lock(pThis->pSpinLock); }
VMMRCTestReadMsrs(PVM pVM, uint32_t uMsr, uint32_t cMsrs, PVMMTESTMSRENTRY paResults) { AssertReturn(cMsrs <= 16384, VERR_INVALID_PARAMETER); AssertPtrReturn(paResults, VERR_INVALID_POINTER); ASMIntEnable(); /* Run with interrupts enabled, so we can query more MSRs in one block. */ for (uint32_t i = 0; i < cMsrs; i++, uMsr++) { if (vmmRCSafeMsrRead(uMsr, &paResults[i].uValue)) paResults[i].uMsr = uMsr; else paResults[i].uMsr = UINT64_MAX; } ASMIntDisable(); return VINF_SUCCESS; }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtr(pThis); Assert(pThis->u32Magic == RTSPINLOCK_MAGIC); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = ASMIntDisableFlags(); #endif mutex_enter(&pThis->Mtx); /* * Solaris 10 doesn't preserve the interrupt flag, but since we're at PIL_MAX we should be * fine and not get interrupts while lock is held. Re-disable interrupts to not upset * assertions & assumptions callers might have. */ #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) Assert(!ASMIntAreEnabled()); #endif pThis->fIntSaved = fIntSaved; } else { #if defined(RT_STRICT) && (defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)) bool fIntsOn = ASMIntAreEnabled(); #endif mutex_enter(&pThis->Mtx); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) AssertMsg(fIntsOn == ASMIntAreEnabled(), ("fIntsOn=%RTbool\n", fIntsOn)); #endif } RT_ASSERT_PREEMPT_CPUID_SPIN_ACQUIRED(pThis); }
RTDECL(void) RTSpinlockAcquire(RTSPINLOCK Spinlock) { PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock; AssertMsg(pThis && pThis->u32Magic == RTSPINLOCK_GEN_MAGIC, ("pThis=%p u32Magic=%08x\n", pThis, pThis ? (int)pThis->u32Magic : 0)); if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) uint32_t fIntSaved = ASMGetFlags(); #endif #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } ASMNopPause(); } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif RTThreadYield(); } #else for (;;) { #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMIntDisable(); #endif if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) { # if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) pThis->fIntSaved = fIntSaved; # endif return; } #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) ASMSetFlags(fIntSaved); #endif ASMNopPause(); } #endif } else { #if RT_CFG_SPINLOCK_GENERIC_DO_SLEEP for (;;) { for (int c = RT_CFG_SPINLOCK_GENERIC_DO_SLEEP; c > 0; c--) { if (ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) return; ASMNopPause(); } RTThreadYield(); } #else while (!ASMAtomicCmpXchgU32(&pThis->fLocked, 1, 0)) ASMNopPause(); #endif } }
/** * The GC entry point. * * @returns VBox status code. * @param pVM The VM to operate on. * @param uOperation Which operation to execute (VMMGCOPERATION). * @param uArg Argument to that operation. */ VMMRCDECL(int) VMMGCEntry(PVM pVM, unsigned uOperation, unsigned uArg, ...) { /* todo */ switch (uOperation) { /* * Init RC modules. */ case VMMGC_DO_VMMGC_INIT: { /* * Validate the svn revision (uArg). */ if (uArg != VMMGetSvnRev()) return VERR_VMM_RC_VERSION_MISMATCH; /* * Initialize the runtime. * (The program timestamp is found in the elipsis.) */ va_list va; va_start(va, uArg); uint64_t u64TS = va_arg(va, uint64_t); va_end(va); int rc = RTRCInit(u64TS); Log(("VMMGCEntry: VMMGC_DO_VMMGC_INIT - uArg=%u (svn revision) u64TS=%RX64; rc=%Rrc\n", uArg, u64TS, rc)); AssertRCReturn(rc, rc); rc = PGMRegisterStringFormatTypes(); AssertRCReturn(rc, rc); rc = PGMRCDynMapInit(pVM); AssertRCReturn(rc, rc); return VINF_SUCCESS; } /* * Testcase which is used to test interrupt forwarding. * It spins for a while with interrupts enabled. */ case VMMGC_DO_TESTCASE_HYPER_INTERRUPT: { uint32_t volatile i = 0; ASMIntEnable(); while (i < _2G32) i++; ASMIntDisable(); return 0; } /* * Testcase which simply returns, this is used for * profiling of the switcher. */ case VMMGC_DO_TESTCASE_NOP: return 0; /* * Testcase executes a privileged instruction to force a world switch. (in both SVM & VMX) */ case VMMGC_DO_TESTCASE_HWACCM_NOP: ASMRdMsr_Low(MSR_IA32_SYSENTER_CS); return 0; /* * Delay for ~100us. */ case VMMGC_DO_TESTCASE_INTERRUPT_MASKING: { uint64_t u64MaxTicks = (SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) != ~(uint64_t)0 ? SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage) : _2G) / 10000; uint64_t u64StartTSC = ASMReadTSC(); uint64_t u64TicksNow; uint32_t volatile i = 0; do { /* waste some time and protect against getting stuck. */ for (uint32_t volatile j = 0; j < 1000; j++, i++) if (i > _2G32) return VERR_GENERAL_FAILURE; /* check if we're done.*/ u64TicksNow = ASMReadTSC() - u64StartTSC; } while (u64TicksNow < u64MaxTicks); return VINF_SUCCESS; } /* * Trap testcases and unknown operations. */ default: if ( uOperation >= VMMGC_DO_TESTCASE_TRAP_FIRST && uOperation < VMMGC_DO_TESTCASE_TRAP_LAST) return vmmGCTest(pVM, uOperation, uArg); return VERR_INVALID_PARAMETER; } }