int CollectorWin::getHostCpuMHz(ULONG *mhz) { uint64_t uTotalMhz = 0; RTCPUID nProcessors = RTMpGetCount(); PPROCESSOR_POWER_INFORMATION ppi = (PPROCESSOR_POWER_INFORMATION)RTMemAllocZ(nProcessors * sizeof(PROCESSOR_POWER_INFORMATION)); if (!ppi) return VERR_NO_MEMORY; LONG ns = CallNtPowerInformation(ProcessorInformation, NULL, 0, ppi, nProcessors * sizeof(PROCESSOR_POWER_INFORMATION)); if (ns) { Log(("CallNtPowerInformation() -> %x\n", ns)); RTMemFree(ppi); return VERR_INTERNAL_ERROR; } /* Compute an average over all CPUs */ for (unsigned i = 0; i < nProcessors; i++) uTotalMhz += ppi[i].CurrentMhz; *mhz = (ULONG)(uTotalMhz / nProcessors); RTMemFree(ppi); LogFlowThisFunc(("mhz=%u\n", *mhz)); LogFlowThisFuncLeave(); return VINF_SUCCESS; }
/** * Determines the NT kernel verison information. * * @param pOsVerInfo Where to return the version information. * * @remarks pOsVerInfo->fSmp is only definitive if @c true. * @remarks pOsVerInfo->uCsdNo is set to MY_NIL_CSD if it cannot be determined. */ static void rtR0NtGetOsVersionInfo(PRTNTSDBOSVER pOsVerInfo) { ULONG ulMajorVersion = 0; ULONG ulMinorVersion = 0; ULONG ulBuildNumber = 0; pOsVerInfo->fChecked = PsGetVersion(&ulMajorVersion, &ulMinorVersion, &ulBuildNumber, NULL) == TRUE; pOsVerInfo->uMajorVer = (uint8_t)ulMajorVersion; pOsVerInfo->uMinorVer = (uint8_t)ulMinorVersion; pOsVerInfo->uBuildNo = ulBuildNumber; #define MY_NIL_CSD 0x3f pOsVerInfo->uCsdNo = MY_NIL_CSD; if (g_pfnrtRtlGetVersion) { RTL_OSVERSIONINFOEXW VerInfo; RT_ZERO(VerInfo); VerInfo.dwOSVersionInfoSize = sizeof(VerInfo); NTSTATUS rcNt = g_pfnrtRtlGetVersion(&VerInfo); if (NT_SUCCESS(rcNt)) pOsVerInfo->uCsdNo = VerInfo.wServicePackMajor; } /* Note! We cannot quite say if something is MP or UNI. So, fSmp is redefined to indicate that it must be MP. */ pOsVerInfo->fSmp = RTMpGetCount() > 1 || ulMajorVersion >= 6; /* Vista and later has no UNI kernel AFAIK. */ }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); int idCpu = RTMpGetCount(); while (idCpu-- > 0) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCPUID iCpu = RTMpGetCount(); RTCpuSetEmpty(pSet); while (iCpu-- > 0) RTCpuSetAddByIndex(pSet, iCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cCpus = RTMpGetCount(); for (RTCPUID idCpu = 0; idCpu < cCpus; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
int CollectorLinux::_getRawHostCpuLoad() { int rc = VINF_SUCCESS; long long unsigned uUser, uNice, uKernel, uIdle, uIowait, uIrq, uSoftirq; FILE *f = fopen("/proc/stat", "r"); if (f) { char szBuf[128]; if (fgets(szBuf, sizeof(szBuf), f)) { if (sscanf(szBuf, "cpu %llu %llu %llu %llu %llu %llu %llu", &uUser, &uNice, &uKernel, &uIdle, &uIowait, &uIrq, &uSoftirq) == 7) { mUser = uUser + uNice; mKernel = uKernel + uIrq + uSoftirq; mIdle = uIdle + uIowait; } /* Try to get single CPU stats. */ if (fgets(szBuf, sizeof(szBuf), f)) { if (sscanf(szBuf, "cpu0 %llu %llu %llu %llu %llu %llu %llu", &uUser, &uNice, &uKernel, &uIdle, &uIowait, &uIrq, &uSoftirq) == 7) { mSingleUser = uUser + uNice; mSingleKernel = uKernel + uIrq + uSoftirq; mSingleIdle = uIdle + uIowait; } else { /* Assume that this is not an SMP system. */ Assert(RTMpGetCount() == 1); mSingleUser = mUser; mSingleKernel = mKernel; mSingleIdle = mIdle; } } else rc = VERR_FILE_IO_ERROR; } else rc = VERR_FILE_IO_ERROR; fclose(f); } else rc = VERR_ACCESS_DENIED; return rc; }
/** * Run once function that initializes the kstats we need here. * * @returns IPRT status code. * @param pvUser1 Unused. * @param pvUser2 Unused. */ static DECLCALLBACK(int) rtMpSolarisOnce(void *pvUser1, void *pvUser2) { int rc = VINF_SUCCESS; NOREF(pvUser1); NOREF(pvUser2); /* * Open kstat and find the cpu_info entries for each of the CPUs. */ g_pKsCtl = kstat_open(); if (g_pKsCtl) { g_capCpuInfo = RTMpGetCount(); g_papCpuInfo = (kstat_t **)RTMemAllocZ(g_capCpuInfo * sizeof(kstat_t *)); if (g_papCpuInfo) { rc = RTCritSectInit(&g_MpSolarisCritSect); if (RT_SUCCESS(rc)) { RTCPUID i = 0; for (kstat_t *pKsp = g_pKsCtl->kc_chain; pKsp != NULL; pKsp = pKsp->ks_next) { if (!strcmp(pKsp->ks_module, "cpu_info")) { AssertBreak(i < g_capCpuInfo); g_papCpuInfo[i++] = pKsp; /** @todo ks_instance == cpu_id (/usr/src/uts/common/os/cpu.c)? Check this and fix it ASAP. */ } } return VINF_SUCCESS; } /* bail out, we failed. */ RTMemFree(g_papCpuInfo); } else rc = VERR_NO_MEMORY; kstat_close(g_pKsCtl); g_pKsCtl = NULL; } else { rc = RTErrConvertFromErrno(errno); if (RT_SUCCESS(rc)) rc = VERR_INTERNAL_ERROR; Log(("kstat_open() -> %d (%Rrc)\n", errno, rc)); } return rc; }
RTDECL(PRTCPUSET) RTMpGetPresentSet(PRTCPUSET pSet) { #ifdef RT_STRICT RTCPUID cCpusPresent = 0; #endif RTCpuSetEmpty(pSet); RTCPUID cCpus = RTMpGetCount(); for (RTCPUID idCpu = 0; idCpu < cCpus; idCpu++) if (RTMpIsCpuPresent(idCpu)) { RTCpuSetAdd(pSet, idCpu); #ifdef RT_STRICT cCpusPresent++; #endif } Assert(cCpusPresent == RTMpGetPresentCount()); return pSet; }
DECLHIDDEN(int) rtR0MpNotificationNativeInit(void) { if (ASMAtomicReadBool(&g_fSolCpuWatch) == true) return VERR_WRONG_ORDER; /* * Register the callback building the online cpu set as we do so. */ RTCpuSetEmpty(&g_rtMpSolCpuSet); mutex_enter(&cpu_lock); register_cpu_setup_func(rtMpNotificationCpuEvent, NULL /* pvArg */); for (int i = 0; i < (int)RTMpGetCount(); ++i) if (cpu_is_online(cpu[i])) rtMpNotificationCpuEvent(CPU_ON, i, NULL /* pvArg */); ASMAtomicWriteBool(&g_fSolCpuWatch, true); mutex_exit(&cpu_lock); return VINF_SUCCESS; }
RTDECL(uint32_t) RTMpGetArraySize(void) { /* * Cache the result here. This whole point of this function is that it * will always return the same value, so that should be safe. * * Note! Because RTCPUSET may be to small to represent all the CPUs, we * check with RTMpGetCount() as well. */ static uint32_t s_cMaxCpus = 0; uint32_t cCpus = s_cMaxCpus; if (RT_UNLIKELY(cCpus == 0)) { RTCPUSET CpuSet; uint32_t cCpus1 = RTCpuLastIndex(RTMpGetSet(&CpuSet)) + 1; uint32_t cCpus2 = RTMpGetCount(); cCpus = RT_MAX(cCpus1, cCpus2); ASMAtomicCmpXchgU32(&s_cMaxCpus, cCpus, 0); return cCpus; } return s_cMaxCpus; }
RTDECL(RTCPUID) RTMpGetPresentCount(void) { return RTMpGetCount(); }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { RT_ASSERT_PREEMPTIBLE(); *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; /* One-shot omni timers are not supported by the cyclic system. */ if ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL && u64NanoInterval == 0) return VERR_NOT_SUPPORTED; /* * Allocate and initialize the timer handle. The omni variant has a * variable sized array of ticks counts, thus the size calculation. */ PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ( (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL ? RT_UOFFSETOF_DYN(RTTIMER, u.Omni.aPerCpu[RTMpGetCount()]) : sizeof(RTTIMER)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->cRefs = 1; pTimer->fSuspended = true; pTimer->fSuspendedFromTimer = false; pTimer->fIntervalChanged = false; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { pTimer->fAllCpus = true; pTimer->fSpecificCpu = false; pTimer->iCpu = UINT32_MAX; } else if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) { pTimer->fAllCpus = false; pTimer->fSpecificCpu = true; pTimer->iCpu = fFlags & RTTIMER_FLAGS_CPU_MASK; /* ASSUMES: index == cpuid */ } else { pTimer->fAllCpus = false; pTimer->fSpecificCpu = false; pTimer->iCpu = UINT32_MAX; } pTimer->cNsInterval = u64NanoInterval; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->hCyclicId = CYCLIC_NONE; *ppTimer = pTimer; return VINF_SUCCESS; }
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu) { return idCpu != NIL_RTCPUID && idCpu < (RTCPUID)RTMpGetCount(); }
RTDECL(RTCPUID) RTMpGetMaxCpuId(void) { return RTMpGetCount() - 1; }
RTDECL(RTCPUID) RTMpGetOnlineCount(void) { /** @todo darwin R0 MP */ return RTMpGetCount(); }
int main() { RTTEST hTest; RTEXITCODE rcExit = RTTestInitAndCreate("tstRTMp-1", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCount -> %u\n", cCpus); else { RTTestIFailed("RTMpGetCount returned zero"); cCpus = 1; } RTCPUID cCoreCpus = RTMpGetCoreCount(); if (cCoreCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCoreCount -> %d\n", (int)cCoreCpus); else { RTTestIFailed("RTMpGetCoreCount returned zero"); cCoreCpus = 1; } RTTESTI_CHECK(cCoreCpus <= cCpus); RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); RTTESTI_CHECK(pSet == &Set); if (pSet == &Set) { RTTESTI_CHECK((RTCPUID)RTCpuSetCount(&Set) == cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, " absent\n"); else { RTTestIPrintf(RTTESTLVL_ALWAYS, " online but absent!\n"); RTTestIFailed("Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); } } if (!RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); } else if (RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); else if (RTMpGetCurFrequency(idCpu) != 0) RTTestIFailed("RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); else if (RTMpGetMaxFrequency(idCpu) != 0) RTTestIFailed("RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); } } else { RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTTestIFailed("RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); cCpusOnline = cCpus; } } else { RTTestIFailed("RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); cCpusOnline = 1; } RTCPUID cCoresOnline = RTMpGetOnlineCoreCount(); if (cCoresOnline > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCoreCount -> %d\n", (int)cCoresOnline); else { RTTestIFailed("RTMpGetOnlineCoreCount -> %d, expected <= %d\n", (int)cCoresOnline, (int)cCpusOnline); cCoresOnline = 1; } RTTESTI_CHECK(cCoresOnline <= cCpusOnline); RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) RTTestIFailed("RTMpGetOnlineSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) RTTestIFailed("RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else RTTestIFailed("RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else RTTestIFailed("RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); } else { RTTestIFailed("RTMpGetPresentCount -> %d\n", (int)cCpusPresent); cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) RTTestIFailed("RTMpGetPresentSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) RTTestIFailed("RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); RTTestIPrintf(RTTESTLVL_ALWAYS, "Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else RTTestIFailed("RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); } else RTTestIFailed("RTMpGetDescription -> %Rrc\n", rc); return RTTestSummaryAndDestroy(hTest); }
int main() { RTR3InitExeNoArguments(0); RTPrintf("tstMp-1: TESTING...\n"); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTPrintf("tstMp-1: RTMpGetCount -> %d\n", (int)cCpus); else { RTPrintf("tstMp-1: FAILURE: RTMpGetCount -> %d\n", (int)cCpus); g_cErrors++; cCpus = 1; } RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); if (pSet == &Set) { if ((RTCPUID)RTCpuSetCount(&Set) != cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetSet returned a set with a different cpu count; %d, expected %d\n", RTCpuSetCount(&Set), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTPrintf(RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTPrintf(" absent\n"); else { RTPrintf(" online but absent!\n"); RTPrintf("tstMp-1: FAILURE: Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); g_cErrors++; } } if (!RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); g_cErrors++; } } else if (RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); g_cErrors++; } else if (RTMpGetCurFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } else if (RTMpGetMaxFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTPrintf("tstMp-1: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); g_cErrors++; cCpusOnline = cCpus; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); g_cErrors++; cCpusOnline = 1; } RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTPrintf("tstMp-1: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); g_cErrors++; cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); g_cErrors++; } RTPrintf("tstMp-1: Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTPrintf("tstMp-1: RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); g_cErrors++; } rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc\n", rc); g_cErrors++; } if (!g_cErrors) RTPrintf("tstMp-1: SUCCESS\n", g_cErrors); else RTPrintf("tstMp-1: FAILURE - %d errors\n", g_cErrors); return !!g_cErrors; }
RTDECL(RTCPUID) RTMpGetCoreCount(void) { /* * Resolve the API dynamically (one try) as it requires XP w/ sp3 or later. */ typedef BOOL (WINAPI *PFNGETLOGICALPROCINFO)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD); static PFNGETLOGICALPROCINFO s_pfnGetLogicalProcInfo = (PFNGETLOGICALPROCINFO)~(uintptr_t)0; if (s_pfnGetLogicalProcInfo == (PFNGETLOGICALPROCINFO)~(uintptr_t)0) s_pfnGetLogicalProcInfo = (PFNGETLOGICALPROCINFO)RTLdrGetSystemSymbol("kernel32.dll", "GetLogicalProcessorInformation"); /* * Sadly, on XP and Server 2003, even if the API is present, it does not tell us * how many physical cores there are (any package will look like a single core). * That is worse than not using the API at all, so just skip it unless it's Vista+. */ bool fIsVistaOrLater = false; OSVERSIONINFOEX OSInfoEx = { 0 }; OSInfoEx.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); if ( GetVersionEx((LPOSVERSIONINFO) &OSInfoEx) && (OSInfoEx.dwPlatformId == VER_PLATFORM_WIN32_NT) && (OSInfoEx.dwMajorVersion >= 6)) fIsVistaOrLater = true; if (s_pfnGetLogicalProcInfo && fIsVistaOrLater) { /* * Query the information. This unfortunately requires a buffer, so we * start with a guess and let windows advice us if it's too small. */ DWORD cbSysProcInfo = _4K; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION paSysInfo = NULL; BOOL fRc = FALSE; do { cbSysProcInfo = RT_ALIGN_32(cbSysProcInfo, 256); void *pv = RTMemRealloc(paSysInfo, cbSysProcInfo); if (!pv) break; paSysInfo = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)pv; fRc = s_pfnGetLogicalProcInfo(paSysInfo, &cbSysProcInfo); } while (!fRc && GetLastError() == ERROR_INSUFFICIENT_BUFFER); if (fRc) { /* * Parse the result. */ uint32_t cCores = 0; uint32_t i = cbSysProcInfo / sizeof(paSysInfo[0]); while (i-- > 0) if (paSysInfo[i].Relationship == RelationProcessorCore) cCores++; RTMemFree(paSysInfo); Assert(cCores > 0); return cCores; } RTMemFree(paSysInfo); } /* If we don't have the necessary API or if it failed, return the same value as the generic implementation. */ return RTMpGetCount(); }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { RTTIMER_ASSERT_VALID_RET(pTimer); RT_ASSERT_INTS_ON(); if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; /* One-shot timers are not supported by the cyclic system. */ if (pTimer->interval == 0) return VERR_NOT_SUPPORTED; pTimer->fSuspended = false; if (pTimer->fAllCpu) { PRTR0OMNITIMERSOL pOmniTimer = RTMemAllocZ(sizeof(RTR0OMNITIMERSOL)); if (RT_UNLIKELY(!pOmniTimer)) return VERR_NO_MEMORY; pOmniTimer->au64Ticks = RTMemAllocZ(RTMpGetCount() * sizeof(uint64_t)); if (RT_UNLIKELY(!pOmniTimer->au64Ticks)) { RTMemFree(pOmniTimer); return VERR_NO_MEMORY; } /* * Setup omni (all CPU) timer. The Omni-CPU online event will fire * and from there we setup periodic timers per CPU. */ pTimer->pOmniTimer = pOmniTimer; pOmniTimer->u64When = pTimer->interval + RTTimeNanoTS(); cyc_omni_handler_t hOmni; hOmni.cyo_online = rtTimerSolOmniCpuOnline; hOmni.cyo_offline = NULL; hOmni.cyo_arg = pTimer; mutex_enter(&cpu_lock); pTimer->hCyclicId = cyclic_add_omni(&hOmni); mutex_exit(&cpu_lock); } else { int iCpu = SOL_TIMER_ANY_CPU; if (pTimer->fSpecificCpu) { iCpu = pTimer->iCpu; if (!RTMpIsCpuOnline(iCpu)) /* ASSUMES: index == cpuid */ return VERR_CPU_OFFLINE; } PRTR0SINGLETIMERSOL pSingleTimer = RTMemAllocZ(sizeof(RTR0SINGLETIMERSOL)); if (RT_UNLIKELY(!pSingleTimer)) return VERR_NO_MEMORY; pTimer->pSingleTimer = pSingleTimer; pSingleTimer->hHandler.cyh_func = rtTimerSolCallbackWrapper; pSingleTimer->hHandler.cyh_arg = pTimer; pSingleTimer->hHandler.cyh_level = CY_LOCK_LEVEL; mutex_enter(&cpu_lock); if (iCpu != SOL_TIMER_ANY_CPU && !cpu_is_online(cpu[iCpu])) { mutex_exit(&cpu_lock); RTMemFree(pSingleTimer); pTimer->pSingleTimer = NULL; return VERR_CPU_OFFLINE; } pSingleTimer->hFireTime.cyt_when = u64First + RTTimeNanoTS(); if (pTimer->interval == 0) { /** @todo use gethrtime_max instead of LLONG_MAX? */ AssertCompileSize(pSingleTimer->hFireTime.cyt_interval, sizeof(long long)); pSingleTimer->hFireTime.cyt_interval = LLONG_MAX - pSingleTimer->hFireTime.cyt_when; } else pSingleTimer->hFireTime.cyt_interval = pTimer->interval; pTimer->hCyclicId = cyclic_add(&pSingleTimer->hHandler, &pSingleTimer->hFireTime); if (iCpu != SOL_TIMER_ANY_CPU) cyclic_bind(pTimer->hCyclicId, cpu[iCpu], NULL /* cpupart */); mutex_exit(&cpu_lock); } return VINF_SUCCESS; }