/** * The native callback. * * @param pNotifierBlock Pointer to g_NotifierBlock. * @param ulNativeEvent The native event. * @param pvCpu The cpu id cast into a pointer value. */ static VOID __stdcall rtMpNotificationNtCallback(PVOID pvUser, PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeContext, PNTSTATUS pOperationStatus) { NOREF(pvUser); AssertPtr(pChangeContext); AssertPtrNull(pOperationStatus); RTCPUID idCpu = pChangeContext->NtNumber; switch (pChangeContext->State) { case KeProcessorAddStartNotify: case KeProcessorAddFailureNotify: break; case KeProcessorAddCompleteNotify: /* Update the active CPU set before doing callback round. */ RTCpuSetAdd(&g_rtMpNtCpuSet, idCpu); rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); break; //case KeProcessorDelCompleteNotify: // rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); // break; default: AssertMsgFailed(("Unexpected state=%d idCpu=%d\n", pChangeContext->State, (int)idCpu)); break; } *pOperationStatus = STATUS_SUCCESS; }
/** * Solaris callback function for Mp event notification. * * @returns Solaris error code. * @param CpuState The current event/state of the CPU. * @param iCpu Which CPU is this event for. * @param pvArg Ignored. * * @remarks This function assumes index == RTCPUID. * We may -not- be firing on the CPU going online/offline and called * with preemption enabled. */ static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) { RTMPEVENT enmMpEvent; /* * Update our CPU set structures first regardless of whether we've been * scheduled on the right CPU or not, this is just atomic accounting. */ if (CpuState == CPU_ON) { enmMpEvent = RTMPEVENT_ONLINE; RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu); } else if (CpuState == CPU_OFF) { enmMpEvent = RTMPEVENT_OFFLINE; RTCpuSetDel(&g_rtMpSolCpuSet, iCpu); } else return 0; rtMpNotificationDoCallbacks(enmMpEvent, iCpu); NOREF(pvArg); return 0; }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); int idCpu = RTMpGetCount(); while (idCpu-- > 0) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cCpus = RTMpGetCount(); for (RTCPUID idCpu = 0; idCpu < cCpus; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpLinuxMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { #ifdef CONFIG_SMP RTCPUID idCpu; RTCpuSetEmpty(pSet); idCpu = RTMpGetMaxCpuId(); do { if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); } while (idCpu-- > 0); #else RTCpuSetEmpty(pSet); RTCpuSetAdd(pSet, RTMpCpuId()); #endif return pSet; }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpFreeBsdMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuPossible(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; }
/** * Solaris callback function for Mp event notification. * * @param CpuState The current event/state of the CPU. * @param iCpu Which CPU is this event fore. * @param pvArg Ignored. * * @remarks This function assumes index == RTCPUID. * @returns Solaris error code. */ static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) { RTMPEVENT enmMpEvent; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); /* * Update our CPU set structures first regardless of whether we've been * scheduled on the right CPU or not, this is just atomic accounting. */ if (CpuState == CPU_ON) { enmMpEvent = RTMPEVENT_ONLINE; RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu); } else if (CpuState == CPU_OFF) { enmMpEvent = RTMPEVENT_OFFLINE; RTCpuSetDel(&g_rtMpSolCpuSet, iCpu); } else return 0; /* * Since we don't absolutely need to do CPU bound code in any of the CPU offline * notification hooks, run it on the current CPU. Scheduling a callback to execute * on the CPU going offline at this point is too late and will not work reliably. */ bool fRunningOnTargetCpu = iCpu == RTMpCpuId(); if ( fRunningOnTargetCpu == true || enmMpEvent == RTMPEVENT_OFFLINE) { rtMpNotificationDoCallbacks(enmMpEvent, iCpu); } else { /* * We're not on the target CPU, schedule (synchronous) the event notification callback * to run on the target CPU i.e. the CPU that was online'd. */ RTMPARGS Args; RT_ZERO(Args); Args.pvUser1 = &enmMpEvent; Args.pvUser2 = NULL; Args.idCpu = iCpu; RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */); } RTThreadPreemptRestore(&PreemptState); NOREF(pvArg); return 0; }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { #if 0 return RTMpGetSet(pSet); #else RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpDarwinMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; #endif }
RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet) { RTCPUID idCpu; RTCpuSetEmpty(pSet); idCpu = RTMpGetMaxCpuId(); do { if (RTMpIsCpuOnline(idCpu)) RTCpuSetAdd(pSet, idCpu); } while (idCpu-- > 0); return pSet; }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { RTCPUID idCpu; RTCpuSetEmpty(pSet); idCpu = RTMpGetMaxCpuId(); /* it's inclusive */ do { if (RTMpIsCpuPossible(idCpu)) RTCpuSetAdd(pSet, idCpu); } while (idCpu-- > 0); return pSet; }
RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet) { #if 0 RTCPUID cCpus = rtMpDarwinMaxCpus(); return RTCpuSetFromU64(RT_BIT_64(cCpus) - 1); #else RTCpuSetEmpty(pSet); RTCPUID cMax = rtMpDarwinMaxCpus(); for (RTCPUID idCpu = 0; idCpu < cMax; idCpu++) if (RTMpIsCpuPossible(idCpu)) RTCpuSetAdd(pSet, idCpu); return pSet; #endif }
RTR3DECL(int) RTThreadGetAffinity(PRTCPUSET pCpuSet) { processorid_t iOldCpu; int rc = processor_bind(P_LWPID, P_MYID, PBIND_QUERY, &iOldCpu); if (rc) return RTErrConvertFromErrno(errno); if (iOldCpu == PBIND_NONE) RTMpGetPresentSet(pCpuSet); else { RTCpuSetEmpty(pCpuSet); if (RTCpuSetAdd(pCpuSet, iOldCpu) != 0) return VERR_INTERNAL_ERROR_5; } return VINF_SUCCESS; }
RTDECL(PRTCPUSET) RTMpGetPresentSet(PRTCPUSET pSet) { #ifdef RT_STRICT RTCPUID cCpusPresent = 0; #endif RTCpuSetEmpty(pSet); RTCPUID cCpus = RTMpGetCount(); for (RTCPUID idCpu = 0; idCpu < cCpus; idCpu++) if (RTMpIsCpuPresent(idCpu)) { RTCpuSetAdd(pSet, idCpu); #ifdef RT_STRICT cCpusPresent++; #endif } Assert(cCpusPresent == RTMpGetPresentCount()); return pSet; }
int main() { RTTEST hTest; RTEXITCODE rcExit = RTTestInitAndCreate("tstRTMp-1", &hTest); if (rcExit != RTEXITCODE_SUCCESS) return rcExit; RTTestBanner(hTest); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCount -> %u\n", cCpus); else { RTTestIFailed("RTMpGetCount returned zero"); cCpus = 1; } RTCPUID cCoreCpus = RTMpGetCoreCount(); if (cCoreCpus > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetCoreCount -> %d\n", (int)cCoreCpus); else { RTTestIFailed("RTMpGetCoreCount returned zero"); cCoreCpus = 1; } RTTESTI_CHECK(cCoreCpus <= cCpus); RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); RTTESTI_CHECK(pSet == &Set); if (pSet == &Set) { RTTESTI_CHECK((RTCPUID)RTCpuSetCount(&Set) == cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTTestIPrintf(RTTESTLVL_ALWAYS, " absent\n"); else { RTTestIPrintf(RTTESTLVL_ALWAYS, " online but absent!\n"); RTTestIFailed("Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); } } if (!RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); } else if (RTMpIsCpuPossible(idCpu)) RTTestIFailed("Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); else if (RTMpGetCurFrequency(idCpu) != 0) RTTestIFailed("RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); else if (RTMpGetMaxFrequency(idCpu) != 0) RTTestIFailed("RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); } } else { RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTTestIFailed("RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); cCpusOnline = cCpus; } } else { RTTestIFailed("RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); cCpusOnline = 1; } RTCPUID cCoresOnline = RTMpGetOnlineCoreCount(); if (cCoresOnline > 0) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetOnlineCoreCount -> %d\n", (int)cCoresOnline); else { RTTestIFailed("RTMpGetOnlineCoreCount -> %d, expected <= %d\n", (int)cCoresOnline, (int)cCpusOnline); cCoresOnline = 1; } RTTESTI_CHECK(cCoresOnline <= cCpusOnline); RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) RTTestIFailed("RTMpGetOnlineSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) RTTestIFailed("RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); RTTestIPrintf(RTTESTLVL_ALWAYS, "Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else RTTestIFailed("RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else RTTestIFailed("RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); } else { RTTestIFailed("RTMpGetPresentCount -> %d\n", (int)cCpusPresent); cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) RTTestIFailed("RTMpGetPresentSet returned an empty set!\n"); else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) RTTestIFailed("RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); RTTestIPrintf(RTTESTLVL_ALWAYS, "Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTTestIPrintf(RTTESTLVL_ALWAYS, "%2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) RTTestIFailed("online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else RTTestIFailed("RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTTestIPrintf(RTTESTLVL_ALWAYS, "RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) RTTestIFailed("RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); } else RTTestIFailed("RTMpGetDescription -> %Rrc\n", rc); return RTTestSummaryAndDestroy(hTest); }
int main() { RTR3InitExeNoArguments(0); RTPrintf("tstMp-1: TESTING...\n"); /* * Present and possible CPUs. */ RTCPUID cCpus = RTMpGetCount(); if (cCpus > 0) RTPrintf("tstMp-1: RTMpGetCount -> %d\n", (int)cCpus); else { RTPrintf("tstMp-1: FAILURE: RTMpGetCount -> %d\n", (int)cCpus); g_cErrors++; cCpus = 1; } RTCPUSET Set; PRTCPUSET pSet = RTMpGetSet(&Set); if (pSet == &Set) { if ((RTCPUID)RTCpuSetCount(&Set) != cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetSet returned a set with a different cpu count; %d, expected %d\n", RTCpuSetCount(&Set), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Possible CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); if (RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu)); if (RTMpIsCpuPresent(idCpu)) RTPrintf(RTMpIsCpuOnline(idCpu) ? " online\n" : " offline\n"); else { if (!RTMpIsCpuOnline(idCpu)) RTPrintf(" absent\n"); else { RTPrintf(" online but absent!\n"); RTPrintf("tstMp-1: FAILURE: Cpu with index %d is report as !RTIsCpuPresent while RTIsCpuOnline returns true!\n", iCpu); g_cErrors++; } } if (!RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTCpuSet but not RTMpIsCpuPossible!\n", iCpu); g_cErrors++; } } else if (RTMpIsCpuPossible(idCpu)) { RTPrintf("tstMp-1: FAILURE: Cpu with index %d is returned by RTMpIsCpuPossible but not RTCpuSet!\n", iCpu); g_cErrors++; } else if (RTMpGetCurFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetCurFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } else if (RTMpGetMaxFrequency(idCpu) != 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetMaxFrequency(%d[idx=%d]) didn't return 0 as it should\n", (int)idCpu, iCpu); g_cErrors++; } } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; RTCpuSetEmpty(&Set); RTCpuSetAdd(&Set, RTMpCpuIdFromSetIndex(0)); } /* * Online CPUs. */ RTCPUID cCpusOnline = RTMpGetOnlineCount(); if (cCpusOnline > 0) { if (cCpusOnline <= cCpus) RTPrintf("tstMp-1: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d, expected <= %d\n", (int)cCpusOnline, (int)cCpus); g_cErrors++; cCpusOnline = cCpus; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineCount -> %d\n", (int)cCpusOnline); g_cErrors++; cCpusOnline = 1; } RTCPUSET SetOnline; pSet = RTMpGetOnlineSet(&SetOnline); if (pSet == &SetOnline) { if (RTCpuSetCount(&SetOnline) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetOnline) > cCpus) { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet returned a too high value; %d, expected <= %d\n", RTCpuSetCount(&SetOnline), cCpus); g_cErrors++; } RTPrintf("tstMp-1: Online CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetOnline, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuOnline(idCpu) ? "online" : "offline"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuOnline really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetOnlineSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* * Present CPUs. */ RTCPUID cCpusPresent = RTMpGetPresentCount(); if (cCpusPresent > 0) { if ( cCpusPresent <= cCpus && cCpusPresent >= cCpusOnline) RTPrintf("tstMp-1: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d, expected <= %d and >= %d\n", (int)cCpusPresent, (int)cCpus, (int)cCpusOnline); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentCount -> %d\n", (int)cCpusPresent); g_cErrors++; cCpusPresent = 1; } RTCPUSET SetPresent; pSet = RTMpGetPresentSet(&SetPresent); if (pSet == &SetPresent) { if (RTCpuSetCount(&SetPresent) <= 0) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned an empty set!\n"); g_cErrors++; } else if ((RTCPUID)RTCpuSetCount(&SetPresent) != cCpusPresent) { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet returned a bad value; %d, expected = %d\n", RTCpuSetCount(&SetPresent), cCpusPresent); g_cErrors++; } RTPrintf("tstMp-1: Present CPU mask:\n"); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if (RTCpuSetIsMemberByIndex(&SetPresent, iCpu)) { RTCPUID idCpu = RTMpCpuIdFromSetIndex(iCpu); RTPrintf("tstMp-1: %2d - id %d: %u/%u MHz %s\n", iCpu, (int)idCpu, RTMpGetCurFrequency(idCpu), RTMpGetMaxFrequency(idCpu), RTMpIsCpuPresent(idCpu) ? "present" : "absent"); if (!RTCpuSetIsMemberByIndex(&Set, iCpu)) { RTPrintf("tstMp-1: FAILURE: online cpu with index %2d is not a member of the possible cpu set!\n", iCpu); g_cErrors++; } } /* There isn't any sane way of testing RTMpIsCpuPresent really... :-/ */ } else { RTPrintf("tstMp-1: FAILURE: RTMpGetPresentSet -> %p, expected %p\n", pSet, &Set); g_cErrors++; } /* Find an online cpu for the next test. */ RTCPUID idCpuOnline; for (idCpuOnline = 0; idCpuOnline < RTCPUSET_MAX_CPUS; idCpuOnline++) if (RTMpIsCpuOnline(idCpuOnline)) break; /* * Quick test of RTMpGetDescription. */ char szBuf[64]; int rc = RTMpGetDescription(idCpuOnline, &szBuf[0], sizeof(szBuf)); if (RT_SUCCESS(rc)) { RTPrintf("tstMp-1: RTMpGetDescription -> '%s'\n", szBuf); size_t cch = strlen(szBuf); rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch); if (rc != VERR_BUFFER_OVERFLOW) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VERR_BUFFER_OVERFLOW\n", rc); g_cErrors++; } rc = RTMpGetDescription(idCpuOnline, &szBuf[0], cch + 1); if (RT_FAILURE(rc)) { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc, expected VINF_SUCCESS\n", rc); g_cErrors++; } } else { RTPrintf("tstMp-1: FAILURE: RTMpGetDescription -> %Rrc\n", rc); g_cErrors++; } if (!g_cErrors) RTPrintf("tstMp-1: SUCCESS\n", g_cErrors); else RTPrintf("tstMp-1: FAILURE - %d errors\n", g_cErrors); return !!g_cErrors; }
/** * The native callback. * * @returns NOTIFY_DONE. * @param pNotifierBlock Pointer to g_NotifierBlock. * @param ulNativeEvent The native event. * @param pvCpu The cpu id cast into a pointer value. * * @remarks This can fire with preemption enabled and on any CPU. */ static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu) { bool fProcessEvent = false; RTCPUID idCpu = (uintptr_t)pvCpu; NOREF(pNotifierBlock); /* * Note that redhat/CentOS ported _some_ of the FROZEN macros * back to their 2.6.18-92.1.10.el5 kernel but actually don't * use them. Thus we have to test for both CPU_TASKS_FROZEN and * the individual event variants. */ switch (ulNativeEvent) { /* * Pick up online events or failures to go offline. * Ignore failure events for CPUs we didn't see go offline. */ # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif if (!RTCpuSetIsMember(&g_MpPendingOfflineSet, idCpu)) break; /* fProcessEvents = false */ /* fall thru */ # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif # ifdef CPU_DOWN_FAILED RTCpuSetDel(&g_MpPendingOfflineSet, idCpu); # endif fProcessEvent = true; break; /* * Pick the earliest possible offline event. * The only important thing here is that we get the event and that * it's exactly one. */ # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif fProcessEvent = true; # else case CPU_DEAD: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DEAD_FROZEN) case CPU_DEAD_FROZEN: # endif /* Don't process CPU_DEAD notifications. */ # endif # ifdef CPU_DOWN_FAILED RTCpuSetAdd(&g_MpPendingOfflineSet, idCpu); # endif break; } if (!fProcessEvent) return NOTIFY_DONE; switch (ulNativeEvent) { # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); break; # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); break; # endif } return NOTIFY_DONE; }