/** * Solaris callback function for Mp event notification. * * @returns Solaris error code. * @param CpuState The current event/state of the CPU. * @param iCpu Which CPU is this event for. * @param pvArg Ignored. * * @remarks This function assumes index == RTCPUID. * We may -not- be firing on the CPU going online/offline and called * with preemption enabled. */ static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) { RTMPEVENT enmMpEvent; /* * Update our CPU set structures first regardless of whether we've been * scheduled on the right CPU or not, this is just atomic accounting. */ if (CpuState == CPU_ON) { enmMpEvent = RTMPEVENT_ONLINE; RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu); } else if (CpuState == CPU_OFF) { enmMpEvent = RTMPEVENT_OFFLINE; RTCpuSetDel(&g_rtMpSolCpuSet, iCpu); } else return 0; rtMpNotificationDoCallbacks(enmMpEvent, iCpu); NOREF(pvArg); return 0; }
RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { IPRT_LINUX_SAVE_EFL_AC(); int rc; RTMPARGS Args; RTCPUSET OnlineSet; RTCPUID idCpu; uint32_t cLoops; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = NIL_RTCPUID; Args.cHits = 0; RTThreadPreemptDisable(&PreemptState); RTMpGetOnlineSet(&OnlineSet); Args.pWorkerSet = &OnlineSet; idCpu = RTMpCpuId(); if (RTCpuSetCount(&OnlineSet) > 1) { /* Fire the function on all other CPUs without waiting for completion. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */); #else rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */); #endif Assert(!rc); NOREF(rc); } /* Fire the function on this CPU. */ Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2); RTCpuSetDel(Args.pWorkerSet, idCpu); /* Wait for all of them finish. */ cLoops = 64000; while (!RTCpuSetIsEmpty(Args.pWorkerSet)) { /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */ if (!cLoops--) { RTCPUSET OnlineSetNow; RTMpGetOnlineSet(&OnlineSetNow); RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow); cLoops = 64000; } ASMNopPause(); } RTThreadPreemptRestore(&PreemptState); IPRT_LINUX_RESTORE_EFL_AC(); return VINF_SUCCESS; }
/** * Solaris callback function for Mp event notification. * * @param CpuState The current event/state of the CPU. * @param iCpu Which CPU is this event fore. * @param pvArg Ignored. * * @remarks This function assumes index == RTCPUID. * @returns Solaris error code. */ static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg) { RTMPEVENT enmMpEvent; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); /* * Update our CPU set structures first regardless of whether we've been * scheduled on the right CPU or not, this is just atomic accounting. */ if (CpuState == CPU_ON) { enmMpEvent = RTMPEVENT_ONLINE; RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu); } else if (CpuState == CPU_OFF) { enmMpEvent = RTMPEVENT_OFFLINE; RTCpuSetDel(&g_rtMpSolCpuSet, iCpu); } else return 0; /* * Since we don't absolutely need to do CPU bound code in any of the CPU offline * notification hooks, run it on the current CPU. Scheduling a callback to execute * on the CPU going offline at this point is too late and will not work reliably. */ bool fRunningOnTargetCpu = iCpu == RTMpCpuId(); if ( fRunningOnTargetCpu == true || enmMpEvent == RTMPEVENT_OFFLINE) { rtMpNotificationDoCallbacks(enmMpEvent, iCpu); } else { /* * We're not on the target CPU, schedule (synchronous) the event notification callback * to run on the target CPU i.e. the CPU that was online'd. */ RTMPARGS Args; RT_ZERO(Args); Args.pvUser1 = &enmMpEvent; Args.pvUser2 = NULL; Args.idCpu = iCpu; RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */); } RTThreadPreemptRestore(&PreemptState); NOREF(pvArg); return 0; }
/** * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER. * * @param pvInfo Pointer to the RTMPARGS package. */ static void rtmpLinuxAllWrapper(void *pvInfo) { PRTMPARGS pArgs = (PRTMPARGS)pvInfo; PRTCPUSET pWorkerSet = pArgs->pWorkerSet; RTCPUID idCpu = RTMpCpuId(); Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); if (RTCpuSetIsMember(pWorkerSet, idCpu)) { pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); RTCpuSetDel(pWorkerSet, idCpu); } }
/** * The native callback. * * @returns NOTIFY_DONE. * @param pNotifierBlock Pointer to g_NotifierBlock. * @param ulNativeEvent The native event. * @param pvCpu The cpu id cast into a pointer value. * * @remarks This can fire with preemption enabled and on any CPU. */ static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu) { bool fProcessEvent = false; RTCPUID idCpu = (uintptr_t)pvCpu; NOREF(pNotifierBlock); /* * Note that redhat/CentOS ported _some_ of the FROZEN macros * back to their 2.6.18-92.1.10.el5 kernel but actually don't * use them. Thus we have to test for both CPU_TASKS_FROZEN and * the individual event variants. */ switch (ulNativeEvent) { /* * Pick up online events or failures to go offline. * Ignore failure events for CPUs we didn't see go offline. */ # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif if (!RTCpuSetIsMember(&g_MpPendingOfflineSet, idCpu)) break; /* fProcessEvents = false */ /* fall thru */ # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif # ifdef CPU_DOWN_FAILED RTCpuSetDel(&g_MpPendingOfflineSet, idCpu); # endif fProcessEvent = true; break; /* * Pick the earliest possible offline event. * The only important thing here is that we get the event and that * it's exactly one. */ # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif fProcessEvent = true; # else case CPU_DEAD: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DEAD_FROZEN) case CPU_DEAD_FROZEN: # endif /* Don't process CPU_DEAD notifications. */ # endif # ifdef CPU_DOWN_FAILED RTCpuSetAdd(&g_MpPendingOfflineSet, idCpu); # endif break; } if (!fProcessEvent) return NOTIFY_DONE; switch (ulNativeEvent) { # ifdef CPU_DOWN_FAILED case CPU_DOWN_FAILED: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN) case CPU_DOWN_FAILED_FROZEN: # endif # endif case CPU_ONLINE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN) case CPU_ONLINE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu); break; # ifdef CPU_DOWN_PREPARE case CPU_DOWN_PREPARE: # if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN) case CPU_DOWN_PREPARE_FROZEN: # endif rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu); break; # endif } return NOTIFY_DONE; }