RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu) { /* * We cannot query CPU status recursively, check cpu member from cached set. */ if (idCpu >= ncpus) return false; return RTCpuSetIsMember(&g_rtMpSolCpuSet, idCpu); }
RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu) { if (idCpu >= MAXIMUM_PROCESSORS) return false; #if 0 /* this isn't safe at all IRQLs (great work guys) */ KAFFINITY Mask = KeQueryActiveProcessors(); return !!(Mask & RT_BIT_64(idCpu)); #else return RTCpuSetIsMember(&g_rtMpNtCpuSet, idCpu); #endif }
/** * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER. * * @param pvInfo Pointer to the RTMPARGS package. */ static void rtmpLinuxAllWrapper(void *pvInfo) { PRTMPARGS pArgs = (PRTMPARGS)pvInfo; PRTCPUSET pWorkerSet = pArgs->pWorkerSet; RTCPUID idCpu = RTMpCpuId(); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); if (RTCpuSetIsMember(pWorkerSet, idCpu)) { pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); RTCpuSetDel(pWorkerSet, idCpu); } }
RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu) { RTCPUSET Set; return RTCpuSetIsMember(RTMpGetOnlineSet(&Set), idCpu); }
RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu) { RTCPUSET Set; return RTCpuSetIsMember(RTMpGetSet(&Set), idCpu); }
/** * The native callback. * * @returns NOTIFY_DONE. * @param pNotifierBlock Pointer to g_NotifierBlock. * @param ulNativeEvent The native event. * @param pvCpu The cpu id cast into a pointer value. * * @remarks This can fire with preemption enabled and on any CPU. */ static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu) { bool fProcessEvent = false; RTCPUID idCpu = (uintptr_t)pvCpu; NOREF(pNotifierBlock); /* * Note that redhat/CentOS ported _some_ of the FROZEN macros * back to their 2.6.18-92.1.10.el5 kernel but actually don't * use them. Thus we have to test for both CPU_TASKS_FROZEN and * the individual event variants. */ switch (ulNativeEvent) { /* * Pick up online events or failures to go offline. * Ignore failure events for CPUs we didn't see go offline. */ # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif if (!RTCpuSetIsMember(&g_MpPendingOfflineSet, idCpu)) break; /* fProcessEvents = false */ /* fall thru */ # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif # ifdef CPU_DOWN_FAILED RTCpuSetDel(&g_MpPendingOfflineSet, idCpu); # endif fProcessEvent = true; break; /* * Pick the earliest possible offline event. * The only important thing here is that we get the event and that * it's exactly one. */ # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif fProcessEvent = true; # else case CPU_DEAD: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DEAD_FROZEN) case CPU_DEAD_FROZEN: # endif /* Don't process CPU_DEAD notifications. */ # endif # ifdef CPU_DOWN_FAILED RTCpuSetAdd(&g_MpPendingOfflineSet, idCpu); # endif break; } if (!fProcessEvent) return NOTIFY_DONE; switch (ulNativeEvent) { # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); break; # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); break; # endif } return NOTIFY_DONE; }