VOID XenPci_HighSync(PXENPCI_HIGHSYNC_FUNCTION function0, PXENPCI_HIGHSYNC_FUNCTION functionN, PVOID context) { ULONG ActiveProcessorCount; ULONG i; highsync_info_t *highsync_info; KIRQL old_irql; UNREFERENCED_PARAMETER(context); FUNCTION_ENTER(); highsync_info = ExAllocatePoolWithTag(NonPagedPool, sizeof(highsync_info_t), XENPCI_POOL_TAG); RtlZeroMemory(highsync_info, sizeof(highsync_info_t)); KeInitializeEvent(&highsync_info->highsync_complete_event, SynchronizationEvent, FALSE); highsync_info->function0 = function0; highsync_info->functionN = functionN; highsync_info->context = context; highsync_info->sync_level = HIGH_LEVEL; #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif /* Go to HIGH_LEVEL to prevent any races with Dpc's on the current processor */ KeRaiseIrql(highsync_info->sync_level, &old_irql); highsync_info->do_spin = TRUE; for (i = 0; i < ActiveProcessorCount; i++) { if (i == 0) KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunction0, highsync_info); else KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunctionN, highsync_info); KeSetTargetProcessorDpc(&highsync_info->dpcs[i], (CCHAR)i); KeSetImportanceDpc(&highsync_info->dpcs[i], HighImportance); KdPrint((__DRIVER_NAME " queuing Dpc for CPU %d\n", i)); KeInsertQueueDpc(&highsync_info->dpcs[i], NULL, NULL); } KdPrint((__DRIVER_NAME " All Dpc's queued\n")); KeMemoryBarrier(); KeLowerIrql(old_irql); KdPrint((__DRIVER_NAME " Waiting for highsync_complete_event\n")); KeWaitForSingleObject(&highsync_info->highsync_complete_event, Executive, KernelMode, FALSE, NULL); #if (NTDDI_VERSION >= NTDDI_WINXP) KeFlushQueuedDpcs(); #else { /* just wait 1 second until all DPC's finish - not ideal but it's only for W2K */ LARGE_INTEGER interval; interval.QuadPart = -1 * 1000 * 1000 * 10; /* 1 second */ KeDelayExecutionThread(KernelMode, FALSE, &interval); } #endif ExFreePoolWithTag(highsync_info, XENPCI_POOL_TAG); FUNCTION_EXIT(); }
void dtrace_hook_int(UCHAR ivec, void (*InterruptHandler)( void ), uintptr_t *paddr) { INT_VECTOR OrgVec; int i; PRKDPC Dpc; cpunos = 0; if (paddr != 0) { BackupInterrupt(ivec, &OrgVec); #ifdef _AMD64_ *(ULONG64 *)paddr = VEC_OFFSET_TO_ADDR(OrgVec); #else *(ULONG32 *)paddr = VEC_OFFSET_TO_ADDR(OrgVec); #endif } Dpc = (PRKDPC) ExAllocatePoolWithTag(NonPagedPool, sizeof(KDPC)*KeNumberProcessors, 'Tag1'); for (i = 0; i < KeNumberProcessors; i++) { KeInitializeDpc(&Dpc[i], hook_init, NULL); } KeInitializeEvent(&SyncIDT, NotificationEvent, FALSE); for (i=0; i < KeNumberProcessors; i++) { KeSetTargetProcessorDpc(&Dpc[i], (char) i); KeSetImportanceDpc(&Dpc[i], HighImportance); KeInsertQueueDpc(&Dpc[i], (PVOID) ivec, (PVOID)InterruptHandler); } KeWaitForSingleObject(&SyncIDT,Executive,KernelMode,0,NULL); KeClearEvent(&SyncIDT); ExFreePoolWithTag(Dpc, 'Tag1'); }
VOID SendEachProcessorDpc ( PKDEFERRED_ROUTINE Routine, PVOID Context, PVOID SysArg1, PVOID SysArg2 ) /*++ Routine Description This routine sends DPC to each processor in multiprocessor system Arguments Routine Deferred routine Context, SysArg1, SysArg2 Parameters, see MSDN doc for KeInitializeDpc, KeInsertQueueDpc Return Value None --*/ { UNICODE_STRING u; RtlInitUnicodeString (&u, L"KeFlushQueuedDpcs"); *(PVOID*)&pKeFlushQueuedDpcs = MmGetSystemRoutineAddress (&u); for (CCHAR i=0; i<KeNumberProcessors; i++) { KDPC Dpc; KdPrint(("SendEachProcessorDpc: processor [%d] in queue\n", i)); KeInitializeDpc (&Dpc, Routine, Context); KeSetTargetProcessorDpc (&Dpc, i); KeInsertQueueDpc (&Dpc, SysArg1, SysArg2); KdPrint(("SendEachProcessorDpc: processor [%d] completed its DPC\n", i)); } if (pKeFlushQueuedDpcs) { // Ensure that all DPCs are delivered. pKeFlushQueuedDpcs (); } else { KdPrint(("pKeFlushQueuedDpcs = NULL!!!\n")); } KdPrint(("SendEachProcessorDpc: all completed\n")); }
VOID MPCreateThread(VOID (*FunctionPointer)(IN PKDPC, IN PVOID, IN PVOID, IN PVOID)) { /* * * Multi-Processor Consideration :: * * Each processor has it's own IDT. * */ CCHAR i; long currentProcessor =0; PKDPC pkDpc =NULL; KIRQL oldIrql, currentIrql; allProcessorDone =0; currentIrql = KeGetCurrentIrql(); if (currentIrql < DISPATCH_LEVEL) KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); InterlockedAnd(&allProcessorDone, 0); pkDpc = (PKDPC)ExAllocatePoolWithTag(NonPagedPool, KeNumberProcessors * sizeof(KDPC), (ULONG)' pni'); if (!pkDpc) { DbgPrint("Insufficient Resource error\n"); return; } currentProcessor = KeGetCurrentProcessorNumber(); for (i = 0; i < KeNumberProcessors; i++) { cpuNum[i] =i; KeInitializeDpc(&pkDpc[i], FunctionPointer, &cpuNum[i]); KeSetTargetProcessorDpc(&pkDpc[i], i); KeInsertQueueDpc(&pkDpc[i], NULL, NULL); } // wait for all of the processor's hooking initialization. while(InterlockedCompareExchange(&allProcessorDone, KeNumberProcessors - 1, KeNumberProcessors - 1) != KeNumberProcessors - 1) { _asm pause; } if (currentIrql < DISPATCH_LEVEL) KeLowerIrql(oldIrql); if (pkDpc) { ExFreePool(pkDpc); pkDpc = NULL; } }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; int rc = g_pfnrtSendIpi(idCpu); if (rc == VINF_SUCCESS) return rc; /* Fallback. */ if (!fPokeDPCsInitialized) { for (unsigned i = 0; i < RT_ELEMENTS(aPokeDpcs); i++) { KeInitializeDpc(&aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL); KeSetImportanceDpc(&aPokeDpcs[i], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[i], (int)i); } fPokeDPCsInitialized = true; } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); KeSetImportanceDpc(&aPokeDpcs[idCpu], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[idCpu], (int)idCpu); /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately. * @note: not true on at least Vista & Windows 7 */ BOOLEAN bRet = KeInsertQueueDpc(&aPokeDpcs[idCpu], 0, 0); KeLowerIrql(oldIrql); return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */; }
VOID SrvSetTimer ( IN PSRV_TIMER Timer, IN PLARGE_INTEGER Timeout, IN PKDEFERRED_ROUTINE TimeoutHandler, IN PVOID Context ) /*++ Routine Description: This routine starts a timer. Arguments: Timer -- pointer to the timer Timeout -- number of milliseconds to wait TimeoutHandler -- routine to call if the timer expires Context -- context value for the timer routine Return Value: None. --*/ { PRKDPC Dpc = &Timer->Dpc; PAGED_CODE( ); // // Initialize the DPC associated with the timer. Reset the event // that indicates that the timer routine has run. Set the timer. // KeInitializeDpc( Dpc, TimeoutHandler, Context ); KeSetTargetProcessorDpc( Dpc, (CCHAR)KeGetCurrentProcessorNumber() ); KeClearEvent( &Timer->Event ); KeSetTimer( &Timer->Timer, *Timeout, Dpc ); return; } // SrvSetTimer
int rtMpPokeCpuUsingDpc(RTCPUID idCpu) { /* * APC fallback. */ static KDPC s_aPokeDpcs[MAXIMUM_PROCESSORS] = {0}; static bool s_fPokeDPCsInitialized = false; if (!s_fPokeDPCsInitialized) { for (unsigned i = 0; i < RT_ELEMENTS(s_aPokeDpcs); i++) { KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL); KeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance); KeSetTargetProcessorDpc(&s_aPokeDpcs[i], (int)i); } s_fPokeDPCsInitialized = true; } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); KeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance); KeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu); /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately. * @note: not true on at least Vista & Windows 7 */ BOOLEAN bRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0); KeLowerIrql(oldIrql); return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */; }
VOID NTAPI INIT_FUNCTION PoInitializePrcb(IN PKPRCB Prcb) { /* Initialize the Power State */ RtlZeroMemory(&Prcb->PowerState, sizeof(Prcb->PowerState)); Prcb->PowerState.Idle0KernelTimeLimit = 0xFFFFFFFF; Prcb->PowerState.CurrentThrottle = 100; Prcb->PowerState.CurrentThrottleIndex = 0; Prcb->PowerState.IdleFunction = PopIdle0; /* Initialize the Perf DPC and Timer */ KeInitializeDpc(&Prcb->PowerState.PerfDpc, PopPerfIdleDpc, Prcb); KeSetTargetProcessorDpc(&Prcb->PowerState.PerfDpc, Prcb->Number); KeInitializeTimerEx(&Prcb->PowerState.PerfTimer, SynchronizationTimer); }
// Locks all other processors and returns exclusivity pointer. This function // should never be called before the last exclusivity is released. _Use_decl_annotations_ EXTERN_C void *ExclGainExclusivity() { NT_ASSERT(InterlockedAdd(&g_ExclpNumberOfLockedProcessors, 0) == 0); _InterlockedAnd(&g_ExclpReleaseAllProcessors, 0); const auto numberOfProcessors = KeQueryActiveProcessorCount(nullptr); // Allocates DPCs for all processors. auto context = reinterpret_cast<ExclusivityContext *>(ExAllocatePoolWithTag( NonPagedPoolNx, sizeof(void *) + (numberOfProcessors * sizeof(KDPC)), EXCLP_POOL_TAG)); if (!context) { return nullptr; } // Execute a lock DPC for all processors but this. context->OldIrql = KeRaiseIrqlToDpcLevel(); const auto currentCpu = KeGetCurrentProcessorNumber(); for (auto i = 0ul; i < numberOfProcessors; i++) { if (i == currentCpu) { continue; } // Queue a lock DPC. KeInitializeDpc(&context->Dpcs[i], ExclpRaiseIrqlAndWaitDpc, nullptr); KeSetTargetProcessorDpc(&context->Dpcs[i], static_cast<CCHAR>(i)); KeInsertQueueDpc(&context->Dpcs[i], nullptr, nullptr); } // Wait until all other processors were halted. const auto needToBeLocked = numberOfProcessors - 1; while (_InterlockedCompareExchange(&g_ExclpNumberOfLockedProcessors, needToBeLocked, needToBeLocked) != static_cast<LONG>(needToBeLocked)) { KeStallExecutionProcessor(10); } return context; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { /* * Don't try mess with an offline CPU. */ if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; /* * Use the broadcast IPI routine if there are no more than two CPUs online, * or if the current IRQL is unsuitable for KeWaitForSingleObject. */ int rc; uint32_t cHits = 0; if ( g_pfnrtKeIpiGenericCall && ( RTMpGetOnlineCount() <= 2 || KeGetCurrentIrql() > APC_LEVEL) ) { rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper, idCpu, NIL_RTCPUID, &cHits); if (RT_SUCCESS(rc)) { if (cHits == 1) return VINF_SUCCESS; rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1; } return rc; } #if 0 rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits); if (RT_SUCCESS(rc)) { if (cHits == 1) return VINF_SUCCESS; rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1; } return rc; #else /* * Initialize the argument package and the objects within it. * The package is referenced counted to avoid unnecessary spinning to * synchronize cleanup and prevent stack corruption. */ PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp'); if (!pArgs) return VERR_NO_MEMORY; pArgs->cRefs = 2; pArgs->fExecuting = false; pArgs->fDone = false; pArgs->CallbackArgs.pfnWorker = pfnWorker; pArgs->CallbackArgs.pvUser1 = pvUser1; pArgs->CallbackArgs.pvUser2 = pvUser2; pArgs->CallbackArgs.idCpu = idCpu; pArgs->CallbackArgs.cHits = 0; pArgs->CallbackArgs.cRefs = 2; KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */); KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs); KeSetImportanceDpc(&pArgs->Dpc, HighImportance); KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu); /* * Disable preemption while we check the current processor and inserts the DPC. */ KIRQL bOldIrql; KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql); ASMCompilerBarrier(); /* paranoia */ if (RTMpCpuId() == idCpu) { /* Just execute the callback on the current CPU. */ pfnWorker(idCpu, pvUser1, pvUser2); KeLowerIrql(bOldIrql); ExFreePool(pArgs); return VINF_SUCCESS; } /* Different CPU, so queue it if the CPU is still online. */ if (RTMpIsCpuOnline(idCpu)) { BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0); Assert(fRc); KeLowerIrql(bOldIrql); uint64_t const nsRealWaitTS = RTTimeNanoTS(); /* * Wait actively for a while in case the CPU/thread responds quickly. */ uint32_t cLoopsLeft = 0x20000; while (cLoopsLeft-- > 0) { if (pArgs->fDone) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } ASMNopPause(); } /* * It didn't respond, so wait on the event object, poking the CPU if it's slow. */ LARGE_INTEGER Timeout; Timeout.QuadPart = -10000; /* 1ms */ NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } /* If it hasn't respondend yet, maybe poke it and wait some more. */ if (rcNt == STATUS_TIMEOUT) { #ifndef IPRT_TARGET_NT4 if ( !pArgs->fExecuting && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7)) RTMpPokeCpu(idCpu); #endif Timeout.QuadPart = -1280000; /* 128ms */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } } /* * Something weird is happening, try bail out. */ if (KeRemoveQueueDpc(&pArgs->Dpc)) { ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */ LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } else { /* DPC is running, wait a good while for it to complete. */ LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt != STATUS_SUCCESS) LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } rc = RTErrConvertFromNtStatus(rcNt); } else { /* CPU is offline.*/ KeLowerIrql(bOldIrql); rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; } rtMpNtOnSpecificRelease(pArgs); return rc; #endif }
/** * Internal worker for the RTMpOn* APIs. * * @returns IPRT status code. * @param pfnWorker The callback. * @param pvUser1 User argument 1. * @param pvUser2 User argument 2. * @param enmCpuid What to do / is idCpu valid. * @param idCpu Used if enmCpuid is RT_NT_CPUID_SPECIFIC or * RT_NT_CPUID_PAIR, otherwise ignored. * @param idCpu2 Used if enmCpuid is RT_NT_CPUID_PAIR, otherwise ignored. * @param pcHits Where to return the number of this. Optional. */ static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2, RT_NT_CPUID enmCpuid, RTCPUID idCpu, RTCPUID idCpu2, uint32_t *pcHits) { PRTMPARGS pArgs; KDPC *paExecCpuDpcs; #if 0 /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the * driver verifier doesn't complain... */ AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL)); #endif #ifdef IPRT_TARGET_NT4 KAFFINITY Mask; /* g_pfnrtNt* are not present on NT anyway. */ return VERR_NOT_SUPPORTED; #else KAFFINITY Mask = KeQueryActiveProcessors(); #endif /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */ if (!g_pfnrtNtKeFlushQueuedDpcs) return VERR_NOT_SUPPORTED; pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp'); if (!pArgs) return VERR_NO_MEMORY; pArgs->pfnWorker = pfnWorker; pArgs->pvUser1 = pvUser1; pArgs->pvUser2 = pvUser2; pArgs->idCpu = NIL_RTCPUID; pArgs->idCpu2 = NIL_RTCPUID; pArgs->cHits = 0; pArgs->cRefs = 1; paExecCpuDpcs = (KDPC *)(pArgs + 1); if (enmCpuid == RT_NT_CPUID_SPECIFIC) { KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs); KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance); KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu); pArgs->idCpu = idCpu; } else if (enmCpuid == RT_NT_CPUID_SPECIFIC) { KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs); KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance); KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu); pArgs->idCpu = idCpu; KeInitializeDpc(&paExecCpuDpcs[1], rtmpNtDPCWrapper, pArgs); KeSetImportanceDpc(&paExecCpuDpcs[1], HighImportance); KeSetTargetProcessorDpc(&paExecCpuDpcs[1], (int)idCpu2); pArgs->idCpu2 = idCpu2; } else { for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++) { KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs); KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance); KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i); } } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); /* * We cannot do other than assume a 1:1 relationship between the * affinity mask and the process despite the warnings in the docs. * If someone knows a better way to get this done, please let bird know. */ ASMCompilerBarrier(); /* paranoia */ if (enmCpuid == RT_NT_CPUID_SPECIFIC) { ASMAtomicIncS32(&pArgs->cRefs); BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0); Assert(ret); } else if (enmCpuid == RT_NT_CPUID_PAIR) { ASMAtomicIncS32(&pArgs->cRefs); BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0); Assert(ret); ASMAtomicIncS32(&pArgs->cRefs); ret = KeInsertQueueDpc(&paExecCpuDpcs[1], 0, 0); Assert(ret); } else { unsigned iSelf = KeGetCurrentProcessorNumber(); for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++) { if ( (i != iSelf) && (Mask & RT_BIT_64(i))) { ASMAtomicIncS32(&pArgs->cRefs); BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0); Assert(ret); } } if (enmCpuid != RT_NT_CPUID_OTHERS) pfnWorker(iSelf, pvUser1, pvUser2); } KeLowerIrql(oldIrql); /* Flush all DPCs and wait for completion. (can take long!) */ /** @todo Consider changing this to an active wait using some atomic inc/dec * stuff (and check for the current cpu above in the specific case). */ /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely * executed. Seen pArgs being freed while some CPU was using it before * cRefs was added. */ g_pfnrtNtKeFlushQueuedDpcs(); if (pcHits) *pcHits = pArgs->cHits; /* Dereference the argument structure. */ int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs); Assert(cRefs >= 0); if (cRefs == 0) ExFreePool(pArgs); return VINF_SUCCESS; }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { *ppTimer = NULL; /* * Validate flags. */ if (!RTTIMER_FLAGS_ARE_VALID(fFlags)) return VERR_INVALID_PARAMETER; if ( (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL && !RTMpIsCpuPossible(RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK))) return VERR_CPU_NOT_FOUND; /* * Allocate the timer handler. */ RTCPUID cSubTimers = 1; if ((fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL) { cSubTimers = RTMpGetMaxCpuId() + 1; Assert(cSubTimers <= RTCPUSET_MAX_CPUS); /* On Windows we have a 1:1 relationship between cpuid and set index. */ } PRTTIMER pTimer = (PRTTIMER)RTMemAllocZ(RT_OFFSETOF(RTTIMER, aSubTimers[cSubTimers])); if (!pTimer) return VERR_NO_MEMORY; /* * Initialize it. */ pTimer->u32Magic = RTTIMER_MAGIC; pTimer->fSuspended = true; pTimer->fSpecificCpu = (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) && (fFlags & RTTIMER_FLAGS_CPU_ALL) != RTTIMER_FLAGS_CPU_ALL; pTimer->fOmniTimer = (fFlags & RTTIMER_FLAGS_CPU_ALL) == RTTIMER_FLAGS_CPU_ALL; pTimer->idCpu = pTimer->fSpecificCpu ? RTMpCpuIdFromSetIndex(fFlags & RTTIMER_FLAGS_CPU_MASK) : NIL_RTCPUID; pTimer->cSubTimers = cSubTimers; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->u64NanoInterval = u64NanoInterval; KeInitializeTimerEx(&pTimer->NtTimer, SynchronizationTimer); if (pTimer->fOmniTimer) { /* * Initialize the per-cpu "sub-timers", select the first online cpu * to be the master. * ASSUMES that no cpus will ever go offline. */ pTimer->idCpu = NIL_RTCPUID; for (unsigned iCpu = 0; iCpu < cSubTimers; iCpu++) { pTimer->aSubTimers[iCpu].iTick = 0; pTimer->aSubTimers[iCpu].pParent = pTimer; if ( pTimer->idCpu == NIL_RTCPUID && RTMpIsCpuOnline(RTMpCpuIdFromSetIndex(iCpu))) { pTimer->idCpu = RTMpCpuIdFromSetIndex(iCpu); KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniMasterCallback, &pTimer->aSubTimers[iCpu]); } else KeInitializeDpc(&pTimer->aSubTimers[iCpu].NtDpc, rtTimerNtOmniSlaveCallback, &pTimer->aSubTimers[iCpu]); KeSetImportanceDpc(&pTimer->aSubTimers[iCpu].NtDpc, HighImportance); KeSetTargetProcessorDpc(&pTimer->aSubTimers[iCpu].NtDpc, (int)RTMpCpuIdFromSetIndex(iCpu)); } Assert(pTimer->idCpu != NIL_RTCPUID); } else { /* * Initialize the first "sub-timer", target the DPC on a specific processor * if requested to do so. */ pTimer->aSubTimers[0].iTick = 0; pTimer->aSubTimers[0].pParent = pTimer; KeInitializeDpc(&pTimer->aSubTimers[0].NtDpc, rtTimerNtSimpleCallback, pTimer); KeSetImportanceDpc(&pTimer->aSubTimers[0].NtDpc, HighImportance); if (pTimer->fSpecificCpu) KeSetTargetProcessorDpc(&pTimer->aSubTimers[0].NtDpc, (int)pTimer->idCpu); } *ppTimer = pTimer; return VINF_SUCCESS; }
// Called at <= DISPATCH_LEVEL static NDIS_STATUS XenNet_Init( OUT PNDIS_STATUS OpenErrorStatus, OUT PUINT SelectedMediumIndex, IN PNDIS_MEDIUM MediumArray, IN UINT MediumArraySize, IN NDIS_HANDLE MiniportAdapterHandle, IN NDIS_HANDLE WrapperConfigurationContext ) { NDIS_STATUS status; BOOLEAN medium_found = FALSE; struct xennet_info *xi = NULL; UINT nrl_length; PNDIS_RESOURCE_LIST nrl; PCM_PARTIAL_RESOURCE_DESCRIPTOR prd; KIRQL irq_level = 0; ULONG irq_vector = 0; ULONG irq_mode = 0; NDIS_HANDLE config_handle; NDIS_STRING config_param_name; PNDIS_CONFIGURATION_PARAMETER config_param; ULONG i; PUCHAR ptr; UCHAR type; PCHAR setting, value; ULONG length; //CHAR buf[128]; PVOID network_address; UINT network_address_length; BOOLEAN qemu_hide_filter = FALSE; ULONG qemu_hide_flags_value = 0; UNREFERENCED_PARAMETER(OpenErrorStatus); FUNCTION_ENTER(); KdPrint((__DRIVER_NAME " IRQL = %d\n", KeGetCurrentIrql())); /* deal with medium stuff */ for (i = 0; i < MediumArraySize; i++) { if (MediumArray[i] == NdisMedium802_3) { medium_found = TRUE; break; } } if (!medium_found) { KdPrint(("NIC_MEDIA_TYPE not in MediumArray\n")); return NDIS_STATUS_UNSUPPORTED_MEDIA; } *SelectedMediumIndex = i; /* Alloc memory for adapter private info */ status = NdisAllocateMemoryWithTag((PVOID)&xi, sizeof(*xi), XENNET_POOL_TAG); if (!NT_SUCCESS(status)) { KdPrint(("NdisAllocateMemoryWithTag failed with 0x%x\n", status)); status = NDIS_STATUS_RESOURCES; goto err; } RtlZeroMemory(xi, sizeof(*xi)); xi->adapter_handle = MiniportAdapterHandle; xi->rx_target = RX_DFL_MIN_TARGET; xi->rx_min_target = RX_DFL_MIN_TARGET; xi->rx_max_target = RX_MAX_TARGET; xi->inactive = TRUE; NdisMSetAttributesEx(xi->adapter_handle, (NDIS_HANDLE) xi, 0, 0 /* the last zero is to give the next | something to | with */ #ifdef NDIS51_MINIPORT |NDIS_ATTRIBUTE_USES_SAFE_BUFFER_APIS #endif |NDIS_ATTRIBUTE_DESERIALIZE |NDIS_ATTRIBUTE_SURPRISE_REMOVE_OK, NdisInterfaceInternal); /* PnpBus option doesn't exist... */ xi->multicast_list_size = 0; xi->current_lookahead = MIN_LOOKAHEAD_LENGTH; nrl_length = 0; NdisMQueryAdapterResources(&status, WrapperConfigurationContext, NULL, (PUINT)&nrl_length); KdPrint((__DRIVER_NAME " nrl_length = %d\n", nrl_length)); status = NdisAllocateMemoryWithTag((PVOID)&nrl, nrl_length, XENNET_POOL_TAG); if (status != NDIS_STATUS_SUCCESS) { KdPrint((__DRIVER_NAME " Could not get allocate memory for Adapter Resources 0x%x\n", status)); return NDIS_STATUS_RESOURCES; } NdisMQueryAdapterResources(&status, WrapperConfigurationContext, nrl, (PUINT)&nrl_length); if (status != NDIS_STATUS_SUCCESS) { KdPrint((__DRIVER_NAME " Could not get Adapter Resources 0x%x\n", status)); return NDIS_STATUS_RESOURCES; } xi->event_channel = 0; xi->config_csum = 1; xi->config_csum_rx_check = 1; xi->config_sg = 1; xi->config_gso = 61440; xi->config_page = NULL; xi->config_rx_interrupt_moderation = 0; for (i = 0; i < nrl->Count; i++) { prd = &nrl->PartialDescriptors[i]; switch(prd->Type) { case CmResourceTypeInterrupt: irq_vector = prd->u.Interrupt.Vector; irq_level = (KIRQL)prd->u.Interrupt.Level; irq_mode = (prd->Flags & CM_RESOURCE_INTERRUPT_LATCHED)?NdisInterruptLatched:NdisInterruptLevelSensitive; KdPrint((__DRIVER_NAME " irq_vector = %03x, irq_level = %03x, irq_mode = %s\n", irq_vector, irq_level, (irq_mode == NdisInterruptLatched)?"NdisInterruptLatched":"NdisInterruptLevelSensitive")); break; case CmResourceTypeMemory: if (xi->config_page) { KdPrint(("More than one memory range\n")); return NDIS_STATUS_RESOURCES; } else { status = NdisMMapIoSpace(&xi->config_page, MiniportAdapterHandle, prd->u.Memory.Start, prd->u.Memory.Length); if (!NT_SUCCESS(status)) { KdPrint(("NdisMMapIoSpace failed with 0x%x\n", status)); NdisFreeMemory(nrl, nrl_length, 0); return NDIS_STATUS_RESOURCES; } } break; } } NdisFreeMemory(nrl, nrl_length, 0); if (!xi->config_page) { KdPrint(("No config page given\n")); return NDIS_STATUS_RESOURCES; } KeInitializeDpc(&xi->suspend_dpc, XenNet_SuspendResume, xi); KeInitializeSpinLock(&xi->resume_lock); KeInitializeDpc(&xi->rxtx_dpc, XenNet_RxTxDpc, xi); KeSetTargetProcessorDpc(&xi->rxtx_dpc, 0); KeSetImportanceDpc(&xi->rxtx_dpc, HighImportance); NdisMGetDeviceProperty(MiniportAdapterHandle, &xi->pdo, &xi->fdo, &xi->lower_do, NULL, NULL); xi->packet_filter = 0; status = IoGetDeviceProperty(xi->pdo, DevicePropertyDeviceDescription, NAME_SIZE, xi->dev_desc, &length); if (!NT_SUCCESS(status)) { KdPrint(("IoGetDeviceProperty failed with 0x%x\n", status)); status = NDIS_STATUS_FAILURE; goto err; } ptr = xi->config_page; while((type = GET_XEN_INIT_RSP(&ptr, (PVOID)&setting, (PVOID)&value, (PVOID)&value)) != XEN_INIT_TYPE_END) { switch(type) { case XEN_INIT_TYPE_VECTORS: KdPrint((__DRIVER_NAME " XEN_INIT_TYPE_VECTORS\n")); if (((PXENPCI_VECTORS)value)->length != sizeof(XENPCI_VECTORS) || ((PXENPCI_VECTORS)value)->magic != XEN_DATA_MAGIC) { KdPrint((__DRIVER_NAME " vectors mismatch (magic = %08x, length = %d)\n", ((PXENPCI_VECTORS)value)->magic, ((PXENPCI_VECTORS)value)->length)); FUNCTION_EXIT(); return NDIS_STATUS_FAILURE; } else memcpy(&xi->vectors, value, sizeof(XENPCI_VECTORS)); break; case XEN_INIT_TYPE_STATE_PTR: KdPrint((__DRIVER_NAME " XEN_INIT_TYPE_DEVICE_STATE - %p\n", PtrToUlong(value))); xi->device_state = (PXENPCI_DEVICE_STATE)value; break; case XEN_INIT_TYPE_QEMU_HIDE_FLAGS: qemu_hide_flags_value = PtrToUlong(value); break; case XEN_INIT_TYPE_QEMU_HIDE_FILTER: qemu_hide_filter = TRUE; break; default: KdPrint((__DRIVER_NAME " XEN_INIT_TYPE_%d\n", type)); break; } } if ((qemu_hide_flags_value & QEMU_UNPLUG_ALL_IDE_DISKS) || qemu_hide_filter) xi->inactive = FALSE; xi->power_state = NdisDeviceStateD0; xi->power_workitem = IoAllocateWorkItem(xi->fdo); // now build config page NdisOpenConfiguration(&status, &config_handle, WrapperConfigurationContext); if (!NT_SUCCESS(status)) { KdPrint(("Could not open config in registry (%08x)\n", status)); status = NDIS_STATUS_RESOURCES; goto err; } NdisInitUnicodeString(&config_param_name, L"ScatterGather"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read ScatterGather value (%08x)\n", status)); xi->config_sg = 1; } else { KdPrint(("ScatterGather = %d\n", config_param->ParameterData.IntegerData)); xi->config_sg = config_param->ParameterData.IntegerData; } NdisInitUnicodeString(&config_param_name, L"LargeSendOffload"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read LargeSendOffload value (%08x)\n", status)); xi->config_gso = 0; } else { KdPrint(("LargeSendOffload = %d\n", config_param->ParameterData.IntegerData)); xi->config_gso = config_param->ParameterData.IntegerData; if (xi->config_gso > 61440) { xi->config_gso = 61440; KdPrint(("(clipped to %d)\n", xi->config_gso)); } } NdisInitUnicodeString(&config_param_name, L"ChecksumOffload"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read ChecksumOffload value (%08x)\n", status)); xi->config_csum = 1; } else { KdPrint(("ChecksumOffload = %d\n", config_param->ParameterData.IntegerData)); xi->config_csum = !!config_param->ParameterData.IntegerData; } NdisInitUnicodeString(&config_param_name, L"ChecksumOffloadRxCheck"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read ChecksumOffloadRxCheck value (%08x)\n", status)); xi->config_csum_rx_check = 1; } else { KdPrint(("ChecksumOffloadRxCheck = %d\n", config_param->ParameterData.IntegerData)); xi->config_csum_rx_check = !!config_param->ParameterData.IntegerData; } NdisInitUnicodeString(&config_param_name, L"ChecksumOffloadDontFix"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read ChecksumOffloadDontFix value (%08x)\n", status)); xi->config_csum_rx_dont_fix = 0; } else { KdPrint(("ChecksumOffloadDontFix = %d\n", config_param->ParameterData.IntegerData)); xi->config_csum_rx_dont_fix = !!config_param->ParameterData.IntegerData; } NdisInitUnicodeString(&config_param_name, L"MTU"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read MTU value (%08x)\n", status)); xi->config_mtu = 1500; } else { KdPrint(("MTU = %d\n", config_param->ParameterData.IntegerData)); xi->config_mtu = config_param->ParameterData.IntegerData; } NdisInitUnicodeString(&config_param_name, L"RxInterruptModeration"); NdisReadConfiguration(&status, &config_param, config_handle, &config_param_name, NdisParameterInteger); if (!NT_SUCCESS(status)) { KdPrint(("Could not read RxInterruptModeration value (%08x)\n", status)); xi->config_rx_interrupt_moderation = 1500; } else { KdPrint(("RxInterruptModeration = %d\n", config_param->ParameterData.IntegerData)); xi->config_rx_interrupt_moderation = config_param->ParameterData.IntegerData; } NdisReadNetworkAddress(&status, &network_address, &network_address_length, config_handle); if (!NT_SUCCESS(status) || network_address_length != ETH_ALEN || ((((PUCHAR)network_address)[0] & 0x03) != 0x02)) { KdPrint(("Could not read NetworkAddress value (%08x) or value is invalid\n", status)); memset(xi->curr_mac_addr, 0, ETH_ALEN); } else { memcpy(xi->curr_mac_addr, network_address, ETH_ALEN); KdPrint((" Set MAC address from registry to %02X:%02X:%02X:%02X:%02X:%02X\n", xi->curr_mac_addr[0], xi->curr_mac_addr[1], xi->curr_mac_addr[2], xi->curr_mac_addr[3], xi->curr_mac_addr[4], xi->curr_mac_addr[5])); } xi->config_max_pkt_size = max(xi->config_mtu + XN_HDR_SIZE, xi->config_gso + XN_HDR_SIZE); NdisCloseConfiguration(config_handle); status = XenNet_D0Entry(xi); if (!NT_SUCCESS(status)) { KdPrint(("Failed to go to D0 (%08x)\n", status)); goto err; } return NDIS_STATUS_SUCCESS; err: NdisFreeMemory(xi, 0, 0); *OpenErrorStatus = status; FUNCTION_EXIT_STATUS(status); return status; }
/* KphAcquireProcessorLock * * Raises the IRQL to DISPATCH_LEVEL and prevents threads from * executing on other processors until the processor lock is released. * Blocks if the supplied processor lock is already in use. * * ProcessorLock: A processor lock structure that is present in * non-paged memory. * * Comments: * Here is how the processor lock works: * 1. Tries to acquire the mutex in the processor lock, and * blocks until it can be obtained. * 2. Initializes a DPC for each processor on the computer. * 3. Raises the IRQL to DISPATCH_LEVEL to make sure the * code is not interrupted by a context switch. * 4. Queues each of the previously-initialized DPCs, except if * it is targeted at the current processor. * 5. Since DPCs run at DISPATCH_LEVEL, they have exclusive * control of the processor. As each runs, they increment * a counter in the processor lock. They then enter a loop. * 6. The routine waits for the counter to become n - 1, * signaling that all (other) processors have been acquired * (where n is the number of processors). * 7. It returns. Any code from here will be running in * DISPATCH_LEVEL and will be the only code running on the * machine. * Thread safety: Full * IRQL: <= APC_LEVEL */ BOOLEAN KphAcquireProcessorLock( __inout PKPH_PROCESSOR_LOCK ProcessorLock ) { ULONG i; ULONG numberProcessors; ULONG currentProcessor; /* Acquire the processor lock guarded lock. */ KphAcquireGuardedLock(&ProcessorLock->Lock); /* Reset some state. */ ASSERT(ProcessorLock->AcquiredProcessors == 0); ProcessorLock->AcquiredProcessors = 0; ProcessorLock->ReleaseSignal = 0; /* IMPORTANT */ /* Get the number of processors. */ numberProcessors = KphpCountBits(KeQueryActiveProcessors()); /* If there's only one processor we can simply raise the IRQL and exit. */ if (numberProcessors == 1) { dprintf("KphAcquireProcessorLock: Only one processor, raising IRQL and exiting...\n"); KeRaiseIrql(DISPATCH_LEVEL, &ProcessorLock->OldIrql); ProcessorLock->Acquired = TRUE; return TRUE; } /* Allocate storage for the DPCs. */ ProcessorLock->Dpcs = ExAllocatePoolWithTag( NonPagedPool, sizeof(KDPC) * numberProcessors, TAG_SYNC_DPC ); if (!ProcessorLock->Dpcs) { dprintf("KphAcquireProcessorLock: Could not allocate storage for DPCs!\n"); KphReleaseGuardedLock(&ProcessorLock->Lock); return FALSE; } /* Initialize the DPCs. */ for (i = 0; i < numberProcessors; i++) { KeInitializeDpc(&ProcessorLock->Dpcs[i], KphpProcessorLockDpc, NULL); KeSetTargetProcessorDpc(&ProcessorLock->Dpcs[i], (CCHAR)i); KeSetImportanceDpc(&ProcessorLock->Dpcs[i], HighImportance); } /* Raise the IRQL to DISPATCH_LEVEL to prevent context switching. */ KeRaiseIrql(DISPATCH_LEVEL, &ProcessorLock->OldIrql); /* Get the current processor number. */ currentProcessor = KeGetCurrentProcessorNumber(); /* Queue the DPCs (except on the current processor). */ for (i = 0; i < numberProcessors; i++) if (i != currentProcessor) KeInsertQueueDpc(&ProcessorLock->Dpcs[i], ProcessorLock, NULL); /* Spinwait for all (other) processors to be acquired. */ KphSpinUntilEqual(&ProcessorLock->AcquiredProcessors, numberProcessors - 1); dprintf("KphAcquireProcessorLock: All processors acquired.\n"); ProcessorLock->Acquired = TRUE; return TRUE; }