RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { int rc; RTMPARGS Args; RTSOLCPUSET CpuSet; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER); AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS); Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu1; Args.idCpu2 = idCpu2; Args.cHits = 0; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[i] = 0; BT_SET(CpuSet.auCpus, idCpu1); BT_SET(CpuSet.auCpus, idCpu2); /* * Check that both CPUs are online before doing the broadcast call. */ RTThreadPreemptDisable(&PreemptState); if ( RTMpIsCpuOnline(idCpu1) && RTMpIsCpuOnline(idCpu2)) { rtMpSolCrossCall(&CpuSet, rtMpSolOnPairCpuWrapper, &Args); Assert(Args.cHits <= 2); if (Args.cHits == 2) rc = VINF_SUCCESS; else if (Args.cHits == 1) rc = VERR_NOT_ALL_CPUS_SHOWED; else if (Args.cHits == 0) rc = VERR_CPU_OFFLINE; else rc = VERR_CPU_IPE_1; } /* * A CPU must be present to be considered just offline. */ else if ( RTMpIsCpuPresent(idCpu1) && RTMpIsCpuPresent(idCpu2)) rc = VERR_CPU_OFFLINE; else rc = VERR_CPU_NOT_FOUND; RTThreadPreemptRestore(&PreemptState); return rc; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) int rc; if (!RTMpIsCpuPossible(idCpu)) return VERR_CPU_NOT_FOUND; if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_OFFLINE; # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */); # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */); # else /* older kernels */ # error oops # endif /* older kernels */ Assert(rc == 0); return VINF_SUCCESS; #else /* older kernels */ /* no unicast here? */ return VERR_NOT_SUPPORTED; #endif /* older kernels */ }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { #if __FreeBSD_version >= 900000 cpuset_t Mask; #elif __FreeBSD_version >= 700000 cpumask_t Mask; #endif RTMPARGS Args; /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */ if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; #if __FreeBSD_version >= 700000 # if __FreeBSD_version >= 900000 CPU_SETOF(idCpu, &Mask); # else Mask = (cpumask_t)1 << idCpu; # endif smp_rendezvous_cpus(Mask, NULL, rtmpOnSpecificFreeBSDWrapper, smp_no_rendezvous_barrier, &Args); #else smp_rendezvous(NULL, rtmpOnSpecificFreeBSDWrapper, NULL, &Args); #endif return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(uint32_t) RTMpGetMaxFrequency(RTCPUID idCpu) { char szFreqLevels[20]; /* Should be enough to get the highest level which is always the first. */ size_t cbFreqLevels = sizeof(szFreqLevels); if (!RTMpIsCpuOnline(idCpu)) return 0; memset(szFreqLevels, 0, sizeof(szFreqLevels)); /* * CPU 0 has the freq levels entry. ENOMEM is ok as we don't need all supported * levels but only the first one. */ int rc = sysctlbyname("dev.cpu.0.freq_levels", szFreqLevels, &cbFreqLevels, NULL, NULL); if ( (rc && (errno != ENOMEM)) || (cbFreqLevels == 0)) return 0; /* Clear everything starting from the '/' */ unsigned i = 0; do { if (szFreqLevels[i] == '/') { memset(&szFreqLevels[i], 0, sizeof(szFreqLevels) - i); break; } i++; } while (i < sizeof(szFreqLevels)); /* Returns 0 on failure. */ return RTStrToUInt32(szFreqLevels); }
RTDECL(RTCPUID) RTMpGetOnlineCoreCount(void) { RTCPUID cMax = rtMpLinuxMaxCpus(); uint32_t *paidCores = (uint32_t *)alloca(sizeof(paidCores[0]) * (cMax + 1)); uint32_t *paidPckgs = (uint32_t *)alloca(sizeof(paidPckgs[0]) * (cMax + 1)); uint32_t cCores = 0; for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) { if (RTMpIsCpuOnline(idCpu)) { uint32_t idCore = (uint32_t)RTLinuxSysFsReadIntFile(0, "devices/system/cpu/cpu%d/topology/core_id", (int)idCpu); uint32_t idPckg = (uint32_t)RTLinuxSysFsReadIntFile(0, "devices/system/cpu/cpu%d/topology/physical_package_id", (int)idCpu); uint32_t i; for (i = 0; i < cCores; i++) if ( paidCores[i] == idCore && paidPckgs[i] == idPckg) break; if (i >= cCores) { paidCores[cCores] = idCore; paidPckgs[cCores] = idPckg; cCores++; } } } Assert(cCores > 0); return cCores; }
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu) { /* Cannot easily distinguish between online and offline cpus. */ /** @todo online/present cpu stuff must be corrected for proper W2K8 support * (KeQueryMaximumProcessorCount). */ return RTMpIsCpuOnline(idCpu); }
RTDECL(uint32_t) RTMpGetMaxFrequency(RTCPUID idCpu) { if (!RTMpIsCpuOnline(idCpu)) return 0; /* * Try the 'hw.cpufrequency_max' one. */ uint64_t CpuFrequencyMax = 0; size_t cb = sizeof(CpuFrequencyMax); int rc = sysctlbyname("hw.cpufrequency_max", &CpuFrequencyMax, &cb, NULL, 0); if (!rc) return (CpuFrequencyMax + 999999) / 1000000; /* * Use the deprecated one. */ int aiMib[2]; aiMib[0] = CTL_HW; aiMib[1] = HW_CPU_FREQ; int cCpus = -1; cb = sizeof(cCpus); rc = sysctl(aiMib, RT_ELEMENTS(aiMib), &cCpus, &cb, NULL, 0); if (rc != -1 && cCpus >= 1) return cCpus; AssertFailed(); return 0; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); if (idCpu >= ncpus) return VERR_CPU_NOT_FOUND; if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu))) return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[i] = 0; BT_SET(CpuSet.auCpus, idCpu); rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args); RTThreadPreemptRestore(&PreemptState); Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1); return ASMAtomicUoReadU32(&Args.cHits) == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { struct timeval tv; if (!rtTimerIsValid(pTimer)) return VERR_INVALID_HANDLE; if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->idCpu)) return VERR_CPU_OFFLINE; /* * Calc when it should start firing. */ u64First += RTTimeNanoTS(); pTimer->fSuspended = false; pTimer->iTick = 0; pTimer->u64StartTS = u64First; pTimer->u64NextTS = u64First; tv.tv_sec = u64First / 1000000000; tv.tv_usec = (u64First % 1000000000) / 1000; callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer); return VINF_SUCCESS; }
RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { int rc; AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER); AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS); if ((fFlags & RTMPON_F_CONCURRENT_EXEC) && !g_pfnrtKeIpiGenericCall) return VERR_NOT_SUPPORTED; /* * Check that both CPUs are online before doing the broadcast call. */ if ( RTMpIsCpuOnline(idCpu1) && RTMpIsCpuOnline(idCpu2)) { /* * The broadcast IPI isn't quite as bad as it could have been, because * it looks like windows doesn't synchronize CPUs on the way out, they * seems to get back to normal work while the pair is still busy. */ uint32_t cHits = 0; if (g_pfnrtKeIpiGenericCall) rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnPairBroadcastIpiWrapper, idCpu1, idCpu2, &cHits); else rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_PAIR, idCpu1, idCpu2, &cHits); if (RT_SUCCESS(rc)) { Assert(cHits <= 2); if (cHits == 2) rc = VINF_SUCCESS; else if (cHits == 1) rc = VERR_NOT_ALL_CPUS_SHOWED; else if (cHits == 0) rc = VERR_CPU_OFFLINE; else rc = VERR_CPU_IPE_1; } } /* * A CPU must be present to be considered just offline. */ else if ( RTMpIsCpuPresent(idCpu1) && RTMpIsCpuPresent(idCpu2)) rc = VERR_CPU_OFFLINE; else rc = VERR_CPU_NOT_FOUND; return rc; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpLinuxMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cCpus = RTMpGetCount(); for (RTCPUID idCpu = 0; idCpu < cCpus; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; /* Calls rtMpSendIpiFallback, rtMpSendIpiWin7AndLater or rtMpSendIpiVista. */ return g_pfnrtMpPokeCpuWorker(idCpu); }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; return rtMpCall(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu); }
RTDECL(int) RTMpGetDescription(RTCPUID idCpu, char *pszBuf, size_t cbBuf) { /* * Check that the specified cpu is valid & online. */ if (idCpu != NIL_RTCPUID && !RTMpIsCpuOnline(idCpu)) return RTMpIsCpuPossible(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND; /* * Construct the description string in a temporary buffer. */ char szString[4*4*3+1]; RT_ZERO(szString); #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) if (!ASMHasCpuId()) return rtMpGetDescriptionUnknown(pszBuf, cbBuf); uint32_t uMax; uint32_t uEBX, uECX, uEDX; ASMCpuId(0x80000000, &uMax, &uEBX, &uECX, &uEDX); if (uMax >= 0x80000002) { ASMCpuId(0x80000002, &szString[0 + 0], &szString[0 + 4], &szString[0 + 8], &szString[0 + 12]); if (uMax >= 0x80000003) ASMCpuId(0x80000003, &szString[16 + 0], &szString[16 + 4], &szString[16 + 8], &szString[16 + 12]); if (uMax >= 0x80000004) ASMCpuId(0x80000004, &szString[32 + 0], &szString[32 + 4], &szString[32 + 8], &szString[32 + 12]); } else { ASMCpuId(0x00000000, &uMax, &uEBX, &uECX, &uEDX); ((uint32_t *)&szString[0])[0] = uEBX; ((uint32_t *)&szString[0])[1] = uEDX; ((uint32_t *)&szString[0])[2] = uECX; } #else # error "PORTME or use RTMpGetDescription-generic-stub.cpp." #endif /* * Copy it out into the buffer supplied by the caller. */ char *pszSrc = RTStrStrip(szString); size_t cchSrc = strlen(pszSrc); if (cchSrc >= cbBuf) return VERR_BUFFER_OVERFLOW; memcpy(pszBuf, pszSrc, cchSrc + 1); return VINF_SUCCESS; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { #if 0 return RTMpGetSet(pSet); #else RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpDarwinMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; #endif }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCPUID idCpu; RTCpuSetEmpty(pSet); idCpu = RTMpGetMaxCpuId(); do { if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); } while (idCpu-- > 0); return pSet; }
RTDECL(uint32_t) RTMpGetCurFrequency(RTCPUID idCpu) { int uFreqCurr = 0; size_t cbParameter = sizeof(uFreqCurr); if (!RTMpIsCpuOnline(idCpu)) return 0; /* CPU's have a common frequency. */ int rc = sysctlbyname("dev.cpu.0.freq", &uFreqCurr, &cbParameter, NULL, NULL); if (rc) return 0; return (uint32_t)uFreqCurr; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { /* * Validate. */ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) return VERR_TIMER_ACTIVE; if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->idCpu)) return VERR_CPU_OFFLINE; /* * Start the timer. */ PKDPC pMasterDpc = pTimer->fOmniTimer ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc : &pTimer->aSubTimers[0].NtDpc; #ifndef RTR0TIMER_NT_MANUAL_RE_ARM uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */ ULONG ulInterval = (ULONG)u64Interval; if (ulInterval != u64Interval) ulInterval = MAXLONG; else if (!ulInterval && pTimer->u64NanoInterval) ulInterval = 1; #endif LARGE_INTEGER DueTime; DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */ if (!DueTime.QuadPart) DueTime.QuadPart = -1; unsigned cSubTimers = pTimer->fOmniTimer ? pTimer->cSubTimers : 1; for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++) pTimer->aSubTimers[iCpu].iTick = 0; ASMAtomicWriteS32(&pTimer->cOmniSuspendCountDown, 0); ASMAtomicWriteBool(&pTimer->fSuspended, false); #ifdef RTR0TIMER_NT_MANUAL_RE_ARM pTimer->uNtStartTime = rtTimerNtQueryInterruptTime() + u64First / 100; KeSetTimerEx(&pTimer->NtTimer, DueTime, 0, pMasterDpc); #else KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc); #endif return VINF_SUCCESS; }
/** * Gets the next online CPU. * * @returns Next CPU index or RTCPUSET_MAX_CPUS. * @param iCurCpu The current CPU (index). */ static int getNextCpu(unsigned iCurCpu) { /* Skip to the next chip. */ iCurCpu = (iCurCpu / g_cThreadsToSkip) * g_cThreadsToSkip; iCurCpu += g_cThreadsToSkip; /* Skip offline cpus. */ while ( iCurCpu < RTCPUSET_MAX_CPUS && !RTMpIsCpuOnline(iCurCpu) ) iCurCpu++; /* Make sure we're within bounds (in case of bad input). */ if (iCurCpu > RTCPUSET_MAX_CPUS) iCurCpu = RTCPUSET_MAX_CPUS; return iCurCpu; }
/** * Wrapper between the native solaris per-cpu callback and PFNRTWORKER * for the RTMpOnAll API. * * @param uArgs Pointer to the RTMPARGS package. * @param uIgnored1 Ignored. * @param uIgnored2 Ignored. */ static int rtMpSolOnAllCpuWrapper(void *uArg, void *uIgnored1, void *uIgnored2) { PRTMPARGS pArgs = (PRTMPARGS)(uArg); /* * Solaris CPU cross calls execute on offline CPUs too. Check our CPU cache * set and ignore if it's offline. */ if (!RTMpIsCpuOnline(RTMpCpuId())) return 0; pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2); NOREF(uIgnored1); NOREF(uIgnored2); return 0; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { int rc; RTMPARGS Args; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; if (!RTMpIsCpuPossible(idCpu)) return VERR_CPU_NOT_FOUND; # ifdef preempt_disable preempt_disable(); # endif if (idCpu != RTMpCpuId()) { if (RTMpIsCpuOnline(idCpu)) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #else /* older kernels */ rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #endif /* older kernels */ Assert(rc == 0); rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE; } else rc = VERR_CPU_OFFLINE; } else { rtmpLinuxWrapper(&Args); rc = VINF_SUCCESS; } # ifdef preempt_enable preempt_enable(); # endif NOREF(rc); return rc; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { IPRT_LINUX_SAVE_EFL_AC(); int rc; RTMPARGS Args; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; if (!RTMpIsCpuPossible(idCpu)) return VERR_CPU_NOT_FOUND; RTThreadPreemptDisable(&PreemptState); if (idCpu != RTMpCpuId()) { if (RTMpIsCpuOnline(idCpu)) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #else /* older kernels */ rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #endif /* older kernels */ Assert(rc == 0); rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE; } else rc = VERR_CPU_OFFLINE; } else { rtmpLinuxWrapper(&Args); rc = VINF_SUCCESS; } RTThreadPreemptRestore(&PreemptState);; NOREF(rc); IPRT_LINUX_RESTORE_EFL_AC(); return rc; }
int VBOXCALL supdrvOSMsrProberWrite(uint32_t uMsr, RTCPUID idCpu, uint64_t uValue) { # ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API int rc; if (idCpu == NIL_RTCPUID) rc = wrmsr_safe(uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue)); else if (RTMpIsCpuOnline(idCpu)) rc = wrmsr_safe_on_cpu(idCpu, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue)); else return VERR_CPU_OFFLINE; if (rc == 0) return VINF_SUCCESS; return VERR_ACCESS_DENIED; # else return VERR_NOT_SUPPORTED; # endif }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { #ifdef CONFIG_SMP RTCPUID idCpu; RTCpuSetEmpty(pSet); idCpu = RTMpGetMaxCpuId(); do { if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); } while (idCpu-- > 0); #else RTCpuSetEmpty(pSet); RTCpuSetAdd(pSet, RTMpCpuId()); #endif return pSet; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */ if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; // XXX: is _sync needed ? call_all_cpus_sync(rtmpOnSpecificHaikuWrapper, &Args); return Args.cHits == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { #if __FreeBSD_version >= 900000 cpuset_t Mask; #elif __FreeBSD_version >= 700000 cpumask_t Mask; #endif /* Will panic if no rendezvousing cpus, so make sure the cpu is online. */ if (!RTMpIsCpuOnline(idCpu)) return VERR_CPU_NOT_FOUND; # if __FreeBSD_version >= 900000 CPU_SETOF(idCpu, &Mask); # else Mask = (cpumask_t)1 << idCpu; # endif smp_rendezvous_cpus(Mask, NULL, rtmpFreeBSDPokeCallback, smp_no_rendezvous_barrier, NULL); return VINF_SUCCESS; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; int rc = g_pfnrtSendIpi(idCpu); if (rc == VINF_SUCCESS) return rc; /* Fallback. */ if (!fPokeDPCsInitialized) { for (unsigned i = 0; i < RT_ELEMENTS(aPokeDpcs); i++) { KeInitializeDpc(&aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL); KeSetImportanceDpc(&aPokeDpcs[i], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[i], (int)i); } fPokeDPCsInitialized = true; } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); KeSetImportanceDpc(&aPokeDpcs[idCpu], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[idCpu], (int)idCpu); /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately. * @note: not true on at least Vista & Windows 7 */ BOOLEAN bRet = KeInsertQueueDpc(&aPokeDpcs[idCpu], 0, 0); KeLowerIrql(oldIrql); return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */; }
int VBOXCALL supdrvOSMsrProberRead(uint32_t uMsr, RTCPUID idCpu, uint64_t *puValue) { # ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API uint32_t u32Low, u32High; int rc; if (idCpu == NIL_RTCPUID) rc = rdmsr_safe(uMsr, &u32Low, &u32High); else if (RTMpIsCpuOnline(idCpu)) rc = rdmsr_safe_on_cpu(idCpu, uMsr, &u32Low, &u32High); else return VERR_CPU_OFFLINE; if (rc == 0) { *puValue = RT_MAKE_U64(u32Low, u32High); return VINF_SUCCESS; } return VERR_ACCESS_DENIED; # else return VERR_NOT_SUPPORTED; # endif }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { /* * Validate. */ AssertPtrReturn(pTimer, VERR_INVALID_HANDLE); AssertReturn(pTimer->u32Magic == RTTIMER_MAGIC, VERR_INVALID_HANDLE); if (!ASMAtomicUoReadBool(&pTimer->fSuspended)) return VERR_TIMER_ACTIVE; if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->idCpu)) return VERR_CPU_OFFLINE; /* * Start the timer. */ PKDPC pMasterDpc = pTimer->fOmniTimer ? &pTimer->aSubTimers[RTMpCpuIdToSetIndex(pTimer->idCpu)].NtDpc : &pTimer->aSubTimers[0].NtDpc; uint64_t u64Interval = pTimer->u64NanoInterval / 1000000; /* This is ms, believe it or not. */ ULONG ulInterval = (ULONG)u64Interval; if (ulInterval != u64Interval) ulInterval = MAXLONG; else if (!ulInterval && pTimer->u64NanoInterval) ulInterval = 1; LARGE_INTEGER DueTime; DueTime.QuadPart = -(int64_t)(u64First / 100); /* Relative, NT time. */ if (DueTime.QuadPart) DueTime.QuadPart = -1; ASMAtomicWriteBool(&pTimer->fSuspended, false); KeSetTimerEx(&pTimer->NtTimer, DueTime, ulInterval, pMasterDpc); return VINF_SUCCESS; }