/** * Called the first time somebody asks for the time or when the GIP * is mapped/unmapped. */ static DECLCALLBACK(uint64_t) rtTimeNanoTSInternalRediscover(PRTTIMENANOTSDATA pData) { uint32_t iWorker; PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; if ( pGip && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC && ( pGip->u32Mode == SUPGIPMODE_SYNC_TSC || pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) { if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2) iWorker = pGip->u32Mode == SUPGIPMODE_SYNC_TSC ? RTTIMENANO_WORKER_SYNC_LFENCE : RTTIMENANO_WORKER_ASYNC_LFENCE; else iWorker = pGip->u32Mode == SUPGIPMODE_SYNC_TSC ? RTTIMENANO_WORKER_SYNC_CPUID : RTTIMENANO_WORKER_ASYNC_CPUID; } else iWorker = RTTIMENANO_WORKER_FALLBACK; ASMAtomicXchgU32((uint32_t volatile *)&g_iWorker, iWorker); return g_apfnWorkers[iWorker](pData); }
/** * Dispatches an interrupt that arrived while we were in the guest context. * * @param pVM The VM handle. * @remark Must be called with interrupts disabled. */ VMMR0DECL(void) TRPMR0DispatchHostInterrupt(PVM pVM) { /* * Get the active interrupt vector number. */ PVMCPU pVCpu = VMMGetCpu0(pVM); RTUINT uActiveVector = pVCpu->trpm.s.uActiveVector; pVCpu->trpm.s.uActiveVector = ~0; AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector)); #if HC_ARCH_BITS == 64 && defined(RT_OS_DARWIN) /* * Do it the simple and safe way. * * This is a workaround for an optimization bug in the code below * or a gcc 4.2 on mac (snow leopard seed 314). */ trpmR0DispatchHostInterruptSimple(uActiveVector); #else /* The complicated way: */ # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL /* * Check if we're in long mode or not. */ if ( (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (ASMRdMsr(MSR_K6_EFER) & MSR_K6_EFER_LMA)) { trpmR0DispatchHostInterruptSimple(uActiveVector); return; } # endif /* * Get the handler pointer (16:32 ptr) / (16:48 ptr). */ RTIDTR Idtr; ASMGetIDTR(&Idtr); # if HC_ARCH_BITS == 32 PVBOXIDTE pIdte = &((PVBOXIDTE)Idtr.pIdt)[uActiveVector]; # else PVBOXIDTE64 pIdte = &((PVBOXIDTE64)Idtr.pIdt)[uActiveVector]; # endif AssertMsgReturnVoid(pIdte->Gen.u1Present, ("The IDT entry (%d) is not present!\n", uActiveVector)); AssertMsgReturnVoid( pIdte->Gen.u3Type1 == VBOX_IDTE_TYPE1 || pIdte->Gen.u5Type2 == VBOX_IDTE_TYPE2_INT_32, ("The IDT entry (%d) is not 32-bit int gate! type1=%#x type2=%#x\n", uActiveVector, pIdte->Gen.u3Type1, pIdte->Gen.u5Type2)); # if HC_ARCH_BITS == 32 RTFAR32 pfnHandler; pfnHandler.off = VBOXIDTE_OFFSET(*pIdte); pfnHandler.sel = pIdte->Gen.u16SegSel; const RTR0UINTREG uRSP = ~(RTR0UINTREG)0; # else /* 64-bit: */ RTFAR64 pfnHandler; pfnHandler.off = VBOXIDTE64_OFFSET(*pIdte); pfnHandler.sel = pIdte->Gen.u16SegSel; const RTR0UINTREG uRSP = ~(RTR0UINTREG)0; if (pIdte->Gen.u3Ist) { trpmR0DispatchHostInterruptSimple(uActiveVector); return; } # endif /* * Dispatch it. */ trpmR0DispatchHostInterrupt(pfnHandler.off, pfnHandler.sel, uRSP); #endif }
/** * Called the first time somebody asks for the time or when the GIP * is mapped/unmapped. */ static DECLCALLBACK(uint64_t) rtTimeNanoTSInternalRediscover(PRTTIMENANOTSDATA pData) { PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; # ifdef IN_RC uint32_t iWorker; # else PFNTIMENANOTSINTERNAL pfnWorker; # endif if ( pGip && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC && ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC || pGip->u32Mode == SUPGIPMODE_SYNC_TSC || pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) { if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2) { # ifdef IN_RC iWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTIMENANO_WORKER_LFENCE_ASYNC : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTIMENANO_WORKER_LFENCE_SYNC_INVAR_NO_DELTA : RTTIMENANO_WORKER_LFENCE_SYNC_INVAR_WITH_DELTA; # elif defined(IN_RING0) pfnWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTimeNanoTSLFenceAsync : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDelta; # else if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC) pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? RTTimeNanoTSLFenceAsyncUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? RTTimeNanoTSLFenceAsyncUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? RTTimeNanoTSLFenceAsyncUseApicId : rtTimeNanoTSInternalFallback; else pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : rtTimeNanoTSInternalFallback; # endif } else { # ifdef IN_RC iWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTIMENANO_WORKER_LEGACY_ASYNC : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTIMENANO_WORKER_LEGACY_SYNC_INVAR_NO_DELTA : RTTIMENANO_WORKER_LEGACY_SYNC_INVAR_WITH_DELTA; # elif defined(IN_RING0) pfnWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTimeNanoTSLegacyAsync : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDelta; # else if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC) pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? RTTimeNanoTSLegacyAsyncUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? RTTimeNanoTSLegacyAsyncUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? RTTimeNanoTSLegacyAsyncUseApicId : rtTimeNanoTSInternalFallback; else pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId : rtTimeNanoTSInternalFallback; # endif } } else # ifdef IN_RC iWorker = RTTIMENANO_WORKER_FALLBACK; # else pfnWorker = rtTimeNanoTSInternalFallback; # endif # ifdef IN_RC ASMAtomicWriteU32((uint32_t volatile *)&g_iWorker, iWorker); return g_apfnWorkers[iWorker](pData); # else ASMAtomicWritePtr((void * volatile *)&g_pfnWorker, (void *)(uintptr_t)pfnWorker); return pfnWorker(pData); # endif }