示例#1
0
/**
 * Notification wrapper that updates CPU states and invokes our notification
 * callbacks.
 *
 * @param idCpu             The CPU Id.
 * @param pvUser1           Pointer to the notifier_block (unused).
 * @param pvUser2           The notification event.
 * @remarks This can be invoked in interrupt context.
 */
static DECLCALLBACK(void) rtMpNotificationLinuxOnCurrentCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2)
{
    unsigned long ulNativeEvent = *(unsigned long *)pvUser2;
    NOREF(pvUser1);

    AssertRelease(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    AssertReleaseMsg(idCpu == RTMpCpuId(),  /* ASSUMES iCpu == RTCPUID */
                     ("idCpu=%u RTMpCpuId=%d ApicId=%d\n", idCpu, RTMpCpuId(), ASMGetApicId() ));

    switch (ulNativeEvent)
    {
# ifdef CPU_DOWN_FAILED
        case CPU_DOWN_FAILED:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
        case CPU_DOWN_FAILED_FROZEN:
#  endif
# endif
        case CPU_ONLINE:
# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
        case CPU_ONLINE_FROZEN:
# endif
            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
            break;

# ifdef CPU_DOWN_PREPARE
        case CPU_DOWN_PREPARE:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
        case CPU_DOWN_PREPARE_FROZEN:
#  endif
            rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
            break;
# endif
    }
}
/**
 * Solaris callback function for Mp event notification.
 *
 * @returns Solaris error code.
 * @param    CpuState   The current event/state of the CPU.
 * @param    iCpu       Which CPU is this event for.
 * @param    pvArg      Ignored.
 *
 * @remarks This function assumes index == RTCPUID.
 *          We may -not- be firing on the CPU going online/offline and called
 *          with preemption enabled.
 */
static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
{
    RTMPEVENT enmMpEvent;

    /*
     * Update our CPU set structures first regardless of whether we've been
     * scheduled on the right CPU or not, this is just atomic accounting.
     */
    if (CpuState == CPU_ON)
    {
        enmMpEvent = RTMPEVENT_ONLINE;
        RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
    }
    else if (CpuState == CPU_OFF)
    {
        enmMpEvent = RTMPEVENT_OFFLINE;
        RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
    }
    else
        return 0;

    rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
    NOREF(pvArg);
    return 0;
}
示例#3
0
/**
 * The native callback.
 *
 * @param   pNotifierBlock  Pointer to g_NotifierBlock.
 * @param   ulNativeEvent   The native event.
 * @param   pvCpu           The cpu id cast into a pointer value.
 */
static VOID __stdcall rtMpNotificationNtCallback(PVOID pvUser,
                                                 PKE_PROCESSOR_CHANGE_NOTIFY_CONTEXT pChangeContext,
                                                 PNTSTATUS pOperationStatus)
{
    NOREF(pvUser);
    AssertPtr(pChangeContext);
    AssertPtrNull(pOperationStatus);

    RTCPUID idCpu = pChangeContext->NtNumber;
    switch (pChangeContext->State)
    {
        case KeProcessorAddStartNotify:
        case KeProcessorAddFailureNotify:
            break;

        case KeProcessorAddCompleteNotify:
            /* Update the active CPU set before doing callback round. */
            RTCpuSetAdd(&g_rtMpNtCpuSet, idCpu);
            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
            break;

        //case KeProcessorDelCompleteNotify:
        //    rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
        //    break;

        default:
           AssertMsgFailed(("Unexpected state=%d idCpu=%d\n", pChangeContext->State, (int)idCpu));
           break;
    }

    *pOperationStatus = STATUS_SUCCESS;
}
示例#4
0
/**
 * Solaris callback function for Mp event notification.
 *
 * @param    CpuState   The current event/state of the CPU.
 * @param    iCpu       Which CPU is this event fore.
 * @param    pvArg      Ignored.
 *
 * @remarks This function assumes index == RTCPUID.
 * @returns Solaris error code.
 */
static int rtMpNotificationCpuEvent(cpu_setup_t CpuState, int iCpu, void *pvArg)
{
    RTMPEVENT enmMpEvent;

    RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    RTThreadPreemptDisable(&PreemptState);

    /*
     * Update our CPU set structures first regardless of whether we've been
     * scheduled on the right CPU or not, this is just atomic accounting.
     */
    if (CpuState == CPU_ON)
    {
        enmMpEvent = RTMPEVENT_ONLINE;
        RTCpuSetAdd(&g_rtMpSolCpuSet, iCpu);
    }
    else if (CpuState == CPU_OFF)
    {
        enmMpEvent = RTMPEVENT_OFFLINE;
        RTCpuSetDel(&g_rtMpSolCpuSet, iCpu);
    }
    else
        return 0;

    /*
     * Since we don't absolutely need to do CPU bound code in any of the CPU offline
     * notification hooks, run it on the current CPU. Scheduling a callback to execute
     * on the CPU going offline at this point is too late and will not work reliably.
     */
    bool fRunningOnTargetCpu = iCpu == RTMpCpuId();
    if (   fRunningOnTargetCpu == true
        || enmMpEvent == RTMPEVENT_OFFLINE)
    {
        rtMpNotificationDoCallbacks(enmMpEvent, iCpu);
    }
    else
    {
        /*
         * We're not on the target CPU, schedule (synchronous) the event notification callback
         * to run on the target CPU i.e. the CPU that was online'd.
         */
        RTMPARGS Args;
        RT_ZERO(Args);
        Args.pvUser1 = &enmMpEvent;
        Args.pvUser2 = NULL;
        Args.idCpu   = iCpu;
        RTMpOnSpecific(iCpu, rtMpNotificationSolOnCurrentCpu, &Args, NULL /* pvIgnored1 */);
    }

    RTThreadPreemptRestore(&PreemptState);

    NOREF(pvArg);
    return 0;
}
示例#5
0
/**
 * PFNRTMPWORKER worker for executing Mp events on the target CPU.
 *
 * @param    idCpu          The current CPU Id.
 * @param    pvArg          Opaque pointer to event type (online/offline).
 * @param    pvIgnored1     Ignored.
 */
static void rtMpNotificationSolOnCurrentCpu(RTCPUID idCpu, void *pvArg, void *pvIgnored1)
{
    NOREF(pvIgnored1);
    NOREF(idCpu);

    PRTMPARGS pArgs = (PRTMPARGS)pvArg;
    AssertRelease(pArgs && pArgs->idCpu == RTMpCpuId());
    Assert(pArgs->pvUser1);
    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));

    RTMPEVENT enmMpEvent = *(RTMPEVENT *)pArgs->pvUser1;
    rtMpNotificationDoCallbacks(enmMpEvent, pArgs->idCpu);
}
/**
 * The native callback.
 *
 * @returns NOTIFY_DONE.
 * @param   pNotifierBlock  Pointer to g_NotifierBlock.
 * @param   ulNativeEvent   The native event.
 * @param   pvCpu           The cpu id cast into a pointer value.
 *
 * @remarks This can fire with preemption enabled and on any CPU.
 */
static int rtMpNotificationLinuxCallback(struct notifier_block *pNotifierBlock, unsigned long ulNativeEvent, void *pvCpu)
{
    bool fProcessEvent = false;
    RTCPUID idCpu      = (uintptr_t)pvCpu;
    NOREF(pNotifierBlock);

    /*
     * Note that redhat/CentOS ported _some_ of the FROZEN macros
     * back to their 2.6.18-92.1.10.el5 kernel but actually don't
     * use them. Thus we have to test for both CPU_TASKS_FROZEN and
     * the individual event variants.
     */
    switch (ulNativeEvent)
    {
        /*
         * Pick up online events or failures to go offline.
         * Ignore failure events for CPUs we didn't see go offline.
         */
# ifdef CPU_DOWN_FAILED
        case CPU_DOWN_FAILED:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
        case CPU_DOWN_FAILED_FROZEN:
#  endif
            if (!RTCpuSetIsMember(&g_MpPendingOfflineSet, idCpu))
                break;      /* fProcessEvents = false */
        /* fall thru */
# endif
        case CPU_ONLINE:
# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
        case CPU_ONLINE_FROZEN:
# endif
# ifdef CPU_DOWN_FAILED
            RTCpuSetDel(&g_MpPendingOfflineSet, idCpu);
# endif
            fProcessEvent = true;
            break;

        /*
         * Pick the earliest possible offline event.
         * The only important thing here is that we get the event and that
         * it's exactly one.
         */
# ifdef CPU_DOWN_PREPARE
        case CPU_DOWN_PREPARE:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
        case CPU_DOWN_PREPARE_FROZEN:
#  endif
            fProcessEvent = true;
# else
        case CPU_DEAD:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DEAD_FROZEN)
        case CPU_DEAD_FROZEN:
#  endif
            /* Don't process CPU_DEAD notifications. */
# endif
# ifdef CPU_DOWN_FAILED
            RTCpuSetAdd(&g_MpPendingOfflineSet, idCpu);
# endif
            break;
    }

    if (!fProcessEvent)
        return NOTIFY_DONE;

    switch (ulNativeEvent)
    {
# ifdef CPU_DOWN_FAILED
        case CPU_DOWN_FAILED:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_FAILED_FROZEN)
        case CPU_DOWN_FAILED_FROZEN:
#  endif
# endif
        case CPU_ONLINE:
# if defined(CPU_TASKS_FROZEN) && defined(CPU_ONLINE_FROZEN)
        case CPU_ONLINE_FROZEN:
# endif
            rtMpNotificationDoCallbacks(RTMPEVENT_ONLINE, idCpu);
            break;

# ifdef CPU_DOWN_PREPARE
        case CPU_DOWN_PREPARE:
#  if defined(CPU_TASKS_FROZEN) && defined(CPU_DOWN_PREPARE_FROZEN)
        case CPU_DOWN_PREPARE_FROZEN:
#  endif
            rtMpNotificationDoCallbacks(RTMPEVENT_OFFLINE, idCpu);
            break;
# endif
    }

    return NOTIFY_DONE;
}