/** * The timer callback for an omni-timer. * * This is responsible for queueing the DPCs for the other CPUs and * perform the callback on the CPU on which it is called. * * @param pDpc The DPC object. * @param pvUser Pointer to the sub-timer. * @param SystemArgument1 Some system stuff. * @param SystemArgument2 Some system stuff. */ static void _stdcall rtTimerNtOmniMasterCallback(IN PKDPC pDpc, IN PVOID pvUser, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PRTTIMERNTSUBTIMER pSubTimer = (PRTTIMERNTSUBTIMER)pvUser; PRTTIMER pTimer = pSubTimer->pParent; int iCpuSelf = RTMpCpuIdToSetIndex(RTMpCpuId()); AssertPtr(pTimer); #ifdef RT_STRICT if (KeGetCurrentIrql() < DISPATCH_LEVEL) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: Irql=%d expected >=%d\n", KeGetCurrentIrql(), DISPATCH_LEVEL); if (pSubTimer - &pTimer->aSubTimers[0] != iCpuSelf) RTAssertMsg2Weak("rtTimerNtOmniMasterCallback: iCpuSelf=%d pSubTimer=%p / %d\n", iCpuSelf, pSubTimer, pSubTimer - &pTimer->aSubTimers[0]); #endif /* * Check that we haven't been suspended before scheduling the other DPCs * and doing the callout. */ if ( !ASMAtomicUoReadBool(&pTimer->fSuspended) && pTimer->u32Magic == RTTIMER_MAGIC) { RTCPUSET OnlineSet; RTMpGetOnlineSet(&OnlineSet); for (int iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) if ( RTCpuSetIsMemberByIndex(&OnlineSet, iCpu) && iCpuSelf != iCpu) KeInsertQueueDpc(&pTimer->aSubTimers[iCpu].NtDpc, 0, 0); pTimer->pfnTimer(pTimer, pTimer->pvUser, ++pSubTimer->iTick); } NOREF(pDpc); NOREF(SystemArgument1); NOREF(SystemArgument2); }
VOID XenPci_HighSync(PXENPCI_HIGHSYNC_FUNCTION function0, PXENPCI_HIGHSYNC_FUNCTION functionN, PVOID context) { ULONG ActiveProcessorCount; ULONG i; highsync_info_t *highsync_info; KIRQL old_irql; UNREFERENCED_PARAMETER(context); FUNCTION_ENTER(); highsync_info = ExAllocatePoolWithTag(NonPagedPool, sizeof(highsync_info_t), XENPCI_POOL_TAG); RtlZeroMemory(highsync_info, sizeof(highsync_info_t)); KeInitializeEvent(&highsync_info->highsync_complete_event, SynchronizationEvent, FALSE); highsync_info->function0 = function0; highsync_info->functionN = functionN; highsync_info->context = context; highsync_info->sync_level = HIGH_LEVEL; #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif /* Go to HIGH_LEVEL to prevent any races with Dpc's on the current processor */ KeRaiseIrql(highsync_info->sync_level, &old_irql); highsync_info->do_spin = TRUE; for (i = 0; i < ActiveProcessorCount; i++) { if (i == 0) KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunction0, highsync_info); else KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunctionN, highsync_info); KeSetTargetProcessorDpc(&highsync_info->dpcs[i], (CCHAR)i); KeSetImportanceDpc(&highsync_info->dpcs[i], HighImportance); KdPrint((__DRIVER_NAME " queuing Dpc for CPU %d\n", i)); KeInsertQueueDpc(&highsync_info->dpcs[i], NULL, NULL); } KdPrint((__DRIVER_NAME " All Dpc's queued\n")); KeMemoryBarrier(); KeLowerIrql(old_irql); KdPrint((__DRIVER_NAME " Waiting for highsync_complete_event\n")); KeWaitForSingleObject(&highsync_info->highsync_complete_event, Executive, KernelMode, FALSE, NULL); #if (NTDDI_VERSION >= NTDDI_WINXP) KeFlushQueuedDpcs(); #else { /* just wait 1 second until all DPC's finish - not ideal but it's only for W2K */ LARGE_INTEGER interval; interval.QuadPart = -1 * 1000 * 1000 * 10; /* 1 second */ KeDelayExecutionThread(KernelMode, FALSE, &interval); } #endif ExFreePoolWithTag(highsync_info, XENPCI_POOL_TAG); FUNCTION_EXIT(); }
BOOLEAN RosKmAdapter::InterruptRoutine( IN_ULONG MessageNumber) { MessageNumber; #if VC4_TODO RosKmAdapter *pRosKmdAdapter = RosKmAdapter::Cast(MiniportDeviceContext); if (!m_bReadyToHandleInterrupt) { return FALSE; } // Acknowledge the interrupt // If the interrupt is for DMA buffer completion, // queue the DPC to wake up the worker thread KeInsertQueueDpc(&pRosKmAdapter->m_hwDmaBufCompletionDpc, NULL, NULL); return TRUE; #else return FALSE; #endif }
_IRQL_requires_same_ static DECLSPEC_NOINLINE BOOLEAN EvtchnInterruptHandler( __in PKINTERRUPT Interrupt, __in_opt PVOID Argument ) { PXENIFACE_EVTCHN_CONTEXT Context = Argument; PROCESSOR_NUMBER ProcNumber; ULONG ProcIndex; UNREFERENCED_PARAMETER(Interrupt); ASSERT(Context != NULL); KeGetCurrentProcessorNumberEx(&ProcNumber); ProcIndex = KeGetProcessorIndexFromNumber(&ProcNumber); if (!KeInsertQueueDpc(&Context->Dpc, NULL, NULL)) { XenIfaceDebugPrint(TRACE, "NOT INSERTED: Context %p, Port %lu, FO %p, Cpu %lu\n", Context, Context->LocalPort, Context->FileObject, ProcIndex); } return TRUE; }
NTSTATUS KrnlHlprThreadedDPCQueue(_In_ KDEFERRED_ROUTINE* pDPCFn, _In_ CLASSIFY_DATA* pClassifyData, _In_opt_ INJECTION_DATA* pInjectionData, /* 0 */ _In_opt_ VOID* pContext) /* 0 */ { #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " ---> KrnlHlprThreadedDPCQueue()\n"); #endif /// DBG NT_ASSERT(pDPCFn); NT_ASSERT(pClassifyData); NTSTATUS status = STATUS_SUCCESS; DPC_DATA* pDPCData = 0; #pragma warning(push) #pragma warning(disable: 6014) /// pDPCData will be freed by caller status = KrnlHlprDPCDataCreate(&pDPCData, pClassifyData, pInjectionData, pContext); HLPR_BAIL_ON_FAILURE(status); #pragma warning(pop) KeInitializeThreadedDpc(&(pDPCData->kdpc), pDPCFn, 0); KeInsertQueueDpc(&(pDPCData->kdpc), pDPCData, 0); HLPR_BAIL_LABEL: #pragma warning(push) #pragma warning(disable: 6001) /// pDPCData initialized with call to KrnlHlprDPCDataCreate if(status != STATUS_SUCCESS && pDPCData) KrnlHlprDPCDataDestroy(&pDPCData); #pragma warning(pop) #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " <--- KrnlHlprThreadedDPCQueue() [status: %#x]\n", status); #endif /// DBG return status; }
BOOLEAN WDFEXPORT(WdfDpcEnqueue)( __in PWDF_DRIVER_GLOBALS DriverGlobals, __in WDFDPC Dpc ) /*++ Routine Description: Enqueue the DPC to run at a system determined time Arguments: WDFDPC - Handle to WDFDPC object created with WdfDpcCreate. Returns: --*/ { FxDpc* pFxDpc; FxObjectHandleGetPtr(GetFxDriverGlobals(DriverGlobals), Dpc, FX_TYPE_DPC, (PVOID*)&pFxDpc); return KeInsertQueueDpc(pFxDpc->GetDpcPtr(), NULL, NULL); }
void dtrace_hook_int(UCHAR ivec, void (*InterruptHandler)( void ), uintptr_t *paddr) { INT_VECTOR OrgVec; int i; PRKDPC Dpc; cpunos = 0; if (paddr != 0) { BackupInterrupt(ivec, &OrgVec); #ifdef _AMD64_ *(ULONG64 *)paddr = VEC_OFFSET_TO_ADDR(OrgVec); #else *(ULONG32 *)paddr = VEC_OFFSET_TO_ADDR(OrgVec); #endif } Dpc = (PRKDPC) ExAllocatePoolWithTag(NonPagedPool, sizeof(KDPC)*KeNumberProcessors, 'Tag1'); for (i = 0; i < KeNumberProcessors; i++) { KeInitializeDpc(&Dpc[i], hook_init, NULL); } KeInitializeEvent(&SyncIDT, NotificationEvent, FALSE); for (i=0; i < KeNumberProcessors; i++) { KeSetTargetProcessorDpc(&Dpc[i], (char) i); KeSetImportanceDpc(&Dpc[i], HighImportance); KeInsertQueueDpc(&Dpc[i], (PVOID) ivec, (PVOID)InterruptHandler); } KeWaitForSingleObject(&SyncIDT,Executive,KernelMode,0,NULL); KeClearEvent(&SyncIDT); ExFreePoolWithTag(Dpc, 'Tag1'); }
VOID SendEachProcessorDpc ( PKDEFERRED_ROUTINE Routine, PVOID Context, PVOID SysArg1, PVOID SysArg2 ) /*++ Routine Description This routine sends DPC to each processor in multiprocessor system Arguments Routine Deferred routine Context, SysArg1, SysArg2 Parameters, see MSDN doc for KeInitializeDpc, KeInsertQueueDpc Return Value None --*/ { UNICODE_STRING u; RtlInitUnicodeString (&u, L"KeFlushQueuedDpcs"); *(PVOID*)&pKeFlushQueuedDpcs = MmGetSystemRoutineAddress (&u); for (CCHAR i=0; i<KeNumberProcessors; i++) { KDPC Dpc; KdPrint(("SendEachProcessorDpc: processor [%d] in queue\n", i)); KeInitializeDpc (&Dpc, Routine, Context); KeSetTargetProcessorDpc (&Dpc, i); KeInsertQueueDpc (&Dpc, SysArg1, SysArg2); KdPrint(("SendEachProcessorDpc: processor [%d] completed its DPC\n", i)); } if (pKeFlushQueuedDpcs) { // Ensure that all DPCs are delivered. pKeFlushQueuedDpcs (); } else { KdPrint(("pKeFlushQueuedDpcs = NULL!!!\n")); } KdPrint(("SendEachProcessorDpc: all completed\n")); }
BOOLEAN NTAPI InterruptService(PKINTERRUPT Interrupt, PVOID ServiceContext) { PFDO_DEVICE_EXTENSION FdoDeviceExtension; PDEVICE_OBJECT DeviceObject = (PDEVICE_OBJECT) ServiceContext; PEHCI_HOST_CONTROLLER hcd; ULONG CStatus = 0; FdoDeviceExtension = (PFDO_DEVICE_EXTENSION) DeviceObject->DeviceExtension; hcd = &FdoDeviceExtension->hcd; /* Read device status */ CStatus = ReadControllerStatus(hcd); CStatus &= (EHCI_ERROR_INT | EHCI_STS_INT | EHCI_STS_IAA | EHCI_STS_PCD | EHCI_STS_FLR | EHCI_STS_RECL); if ((!CStatus) || (FdoDeviceExtension->DeviceState == 0)) { /* This interrupt isnt for us or not ready for it. */ return FALSE; } /* Clear status */ ClearControllerStatus(hcd, CStatus); if (CStatus & EHCI_STS_RECL) { DPRINT("Reclamation\n"); } if (CStatus & EHCI_ERROR_INT) { DPRINT1("EHCI Status=0x%x\n", CStatus); /* This check added in case the NT USB Driver is still loading. It will cause this error condition at every device connect. */ if(CStatus & EHCI_STS_PCD) { DPRINT1("EHCI Error: Another driver may be interfering with proper operation of this driver\n"); DPRINT1(" Hint: Ensure that the old NT Usb Driver has been removed!\n"); ASSERT(FALSE); } } if (CStatus & EHCI_STS_FATAL) { DPRINT1("EHCI: Host System Error. Possible PCI problems.\n"); ASSERT(FALSE); } if (CStatus & EHCI_STS_HALT) { DPRINT1("EHCI: Host Controller unexpected halt.\n"); /* FIXME: Reset the controller */ } KeInsertQueueDpc(&FdoDeviceExtension->DpcObject, FdoDeviceExtension, (PVOID)CStatus); return TRUE; }
static VOID V4vVirqNotifyIsr(VOID *ctx) { XENV4V_EXTENSION *pde = V4vGetDeviceExtension((DEVICE_OBJECT*)ctx); // Just drop out of ISR context KeInsertQueueDpc(&pde->virqDpc, NULL, NULL); }
VOID MPCreateThread(VOID (*FunctionPointer)(IN PKDPC, IN PVOID, IN PVOID, IN PVOID)) { /* * * Multi-Processor Consideration :: * * Each processor has it's own IDT. * */ CCHAR i; long currentProcessor =0; PKDPC pkDpc =NULL; KIRQL oldIrql, currentIrql; allProcessorDone =0; currentIrql = KeGetCurrentIrql(); if (currentIrql < DISPATCH_LEVEL) KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); InterlockedAnd(&allProcessorDone, 0); pkDpc = (PKDPC)ExAllocatePoolWithTag(NonPagedPool, KeNumberProcessors * sizeof(KDPC), (ULONG)' pni'); if (!pkDpc) { DbgPrint("Insufficient Resource error\n"); return; } currentProcessor = KeGetCurrentProcessorNumber(); for (i = 0; i < KeNumberProcessors; i++) { cpuNum[i] =i; KeInitializeDpc(&pkDpc[i], FunctionPointer, &cpuNum[i]); KeSetTargetProcessorDpc(&pkDpc[i], i); KeInsertQueueDpc(&pkDpc[i], NULL, NULL); } // wait for all of the processor's hooking initialization. while(InterlockedCompareExchange(&allProcessorDone, KeNumberProcessors - 1, KeNumberProcessors - 1) != KeNumberProcessors - 1) { _asm pause; } if (currentIrql < DISPATCH_LEVEL) KeLowerIrql(oldIrql); if (pkDpc) { ExFreePool(pkDpc); pkDpc = NULL; } }
VOID RXScheduleTheReceiveIndication( _In_ PMP_ADAPTER Adapter, _In_ PRCB Rcb) /*++ Routine Description: This function schedules the receive DPC on the receiving miniport. Arguments: FunctionContext Pointer to the adapter that is receiving frames Return Value: None. --*/ { // // Use default DPC unless VMQ is enabled, in which case you use the Queue's DPC // PMP_ADAPTER_RECEIVE_DPC AdapterDpc = Adapter->DefaultRecvDpc; if(VMQ_ENABLED(Adapter)) { // // Add Rcb to owner Queue's pending List // AdapterDpc = GetRxQueueDpc(Adapter, NET_BUFFER_LIST_RECEIVE_QUEUE_ID(Rcb->Nbl)); } else { UNREFERENCED_PARAMETER(Rcb); } // // Schedule DPC // if(AdapterDpc->WorkItemQueued) { // // We've queued up receive work item to avoid DPC watchdog timeout. Let's wait for it to start rather // than queue up the DPC. // DEBUGP(MP_TRACE, "[%p] Receive DPC not scheduled, receive work item is pending. Processor: %i\n", Adapter, AdapterDpc->ProcessorNumber); } else { KeInsertQueueDpc(&AdapterDpc->Dpc, AdapterDpc, NULL); DEBUGP(MP_TRACE, "[%p] Scheduled Receive DPC. Processor: %i\n", Adapter, AdapterDpc->ProcessorNumber); } }
NTSTATUS KrnlHlprThreadedDPCQueue(_In_ KDEFERRED_ROUTINE* pDPCFn) { #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " ---> KrnlHlprThreadedDPCQueue()\n"); #endif /// DBG NT_ASSERT(pDPCFn); NTSTATUS status = STATUS_SUCCESS; DPC_DATA* pDPCData = 0; #pragma warning(push) #pragma warning(disable: 6014) /// pDPCData will be freed by caller HLPR_NEW(pDPCData, DPC_DATA, WFPSAMPLER_SYSLIB_TAG); HLPR_BAIL_ON_ALLOC_FAILURE(pDPCData, status); #pragma warning(pop) KeInitializeThreadedDpc(&(pDPCData->kdpc), pDPCFn, 0); KeInsertQueueDpc(&(pDPCData->kdpc), pDPCData, 0); HLPR_BAIL_LABEL: #pragma warning(push) #pragma warning(disable: 6001) /// pDPCData initialized with call to HLPR_NEW if(status != STATUS_SUCCESS && pDPCData) KrnlHlprDPCDataDestroy(&pDPCData); #pragma warning(pop) #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " <--- KrnlHlprThreadedDPCQueue() [status: %#x]\n", status); #endif /// DBG return status; }
static BOOLEAN XenUsb_HandleEvent_DIRQL(PVOID context) { PXENUSB_DEVICE_DATA xudd = context; //FUNCTION_ENTER(); if (xudd->device_state == DEVICE_STATE_ACTIVE || xudd->device_state == DEVICE_STATE_DISCONNECTING) { KeInsertQueueDpc(&xudd->event_dpc, NULL, NULL); } //FUNCTION_EXIT(); return TRUE; }
BOOLEAN FASTCALL KiSignalTimer(IN PKTIMER Timer) { BOOLEAN RequestInterrupt = FALSE; PKDPC Dpc = Timer->Dpc; ULONG Period = Timer->Period; LARGE_INTEGER Interval, SystemTime; DPRINT("KiSignalTimer(): Timer %p\n", Timer); /* Set default values */ Timer->Header.Inserted = FALSE; Timer->Header.SignalState = TRUE; /* Check if the timer has waiters */ if (!IsListEmpty(&Timer->Header.WaitListHead)) { /* Check the type of event */ if (Timer->Header.Type == TimerNotificationObject) { /* Unwait the thread */ KxUnwaitThread(&Timer->Header, IO_NO_INCREMENT); } else { /* Otherwise unwait the thread and signal the timer */ KxUnwaitThreadForEvent((PKEVENT)Timer, IO_NO_INCREMENT); } } /* Check if we have a period */ if (Period) { /* Calculate the interval and insert the timer */ Interval.QuadPart = Int32x32To64(Period, -10000); while (!KiInsertTreeTimer(Timer, Interval)); } /* Check if we have a DPC */ if (Dpc) { /* Insert it in the queue */ KeQuerySystemTime(&SystemTime); KeInsertQueueDpc(Dpc, ULongToPtr(SystemTime.LowPart), ULongToPtr(SystemTime.HighPart)); RequestInterrupt = TRUE; } /* Return whether we need to request a DPC interrupt or not */ return RequestInterrupt; }
BOOLEAN NTAPI ServiceRoutine( IN PKINTERRUPT Interrupt, IN PVOID ServiceContext) /* * FUNCTION: Interrupt service routine * ARGUMENTS: * Interrupt = Pointer to interrupt object * ServiceContext = Pointer to context information (PNDIS_MINIPORT_INTERRUPT) * RETURNS * TRUE if a miniport controlled device generated the interrupt */ { BOOLEAN InterruptRecognized = FALSE; BOOLEAN QueueMiniportHandleInterrupt = FALSE; PNDIS_MINIPORT_INTERRUPT NdisInterrupt = ServiceContext; PNDIS_MINIPORT_BLOCK NdisMiniportBlock = NdisInterrupt->Miniport; BOOLEAN Initializing; NDIS_DbgPrint(MAX_TRACE, ("Called. Interrupt (0x%X)\n", NdisInterrupt)); /* Certain behavior differs if MiniportInitialize is executing when the interrupt is generated */ Initializing = (NdisMiniportBlock->PnPDeviceState != NdisPnPDeviceStarted); NDIS_DbgPrint(MAX_TRACE, ("MiniportInitialize executing: %s\n", (Initializing ? "yes" : "no"))); /* MiniportISR is always called for interrupts during MiniportInitialize */ if ((Initializing) || (NdisInterrupt->IsrRequested) || (NdisInterrupt->SharedInterrupt)) { NDIS_DbgPrint(MAX_TRACE, ("Calling MiniportISR\n")); (*NdisMiniportBlock->DriverHandle->MiniportCharacteristics.ISRHandler)( &InterruptRecognized, &QueueMiniportHandleInterrupt, NdisMiniportBlock->MiniportAdapterContext); } else if (NdisMiniportBlock->DriverHandle->MiniportCharacteristics.DisableInterruptHandler) { NDIS_DbgPrint(MAX_TRACE, ("Calling MiniportDisableInterrupt\n")); (*NdisMiniportBlock->DriverHandle->MiniportCharacteristics.DisableInterruptHandler)( NdisMiniportBlock->MiniportAdapterContext); QueueMiniportHandleInterrupt = TRUE; InterruptRecognized = TRUE; } /* TODO: Figure out if we should call this or not if Initializing is true. It appears * that calling it fixes some NICs, but documentation is contradictory on it. */ if (QueueMiniportHandleInterrupt) { NDIS_DbgPrint(MAX_TRACE, ("Queuing DPC.\n")); KeInsertQueueDpc(&NdisInterrupt->InterruptDpc, NULL, NULL); } NDIS_DbgPrint(MAX_TRACE, ("Leaving.\n")); return InterruptRecognized; }
NTSTATUS NotifierEnable( IN PXENVIF_NOTIFIER Notifier ) { ASSERT(!Notifier->Enabled); Notifier->Enabled = TRUE; if (KeInsertQueueDpc(&Notifier->Dpc, NULL, NULL)) Notifier->Dpcs++; return STATUS_SUCCESS; }
static BOOLEAN XenNet_HandleEvent(PVOID context) { struct xennet_info *xi = context; ULONG suspend_resume_state_pdo; //FUNCTION_ENTER(); suspend_resume_state_pdo = xi->device_state->suspend_resume_state_pdo; KeMemoryBarrier(); // KdPrint((__DRIVER_NAME " connected = %d, inactive = %d, suspend_resume_state_pdo = %d\n", // xi->connected, xi->inactive, suspend_resume_state_pdo)); if (!xi->shutting_down && suspend_resume_state_pdo != xi->device_state->suspend_resume_state_fdo) { KeInsertQueueDpc(&xi->suspend_dpc, NULL, NULL); } if (xi->connected && !xi->inactive && suspend_resume_state_pdo != SR_STATE_RESUMING) { KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL); } //FUNCTION_EXIT(); return TRUE; }
BOOLEAN SerialPretendXoff( IN PVOID Context ) /*++ Routine Description: This routine is used to process the Ioctl that request the driver to act as if an Xoff was received. Even if the driver does not have automatic Xoff/Xon flowcontrol - This still will stop the transmission. This is the OS/2 behavior and is not well specified for Windows. Therefore we adopt the OS/2 behavior. Note: If the driver does not have automatic Xoff/Xon enabled then the only way to restart transmission is for the application to request we "act" as if we saw the xon. Arguments: Context - Really a pointer to the device extension. Return Value: This routine always returns FALSE. --*/ { PSERIAL_DEVICE_EXTENSION Extension = Context; Extension->TXHolding |= SERIAL_TX_XOFF; if ((Extension->HandFlow.FlowReplace & SERIAL_RTS_MASK) == SERIAL_TRANSMIT_TOGGLE) { KeInsertQueueDpc( &Extension->StartTimerLowerRTSDpc, NULL, NULL )?Extension->CountOfTryingToLowerRTS++:0; } return FALSE; }
void redirect_srb(struct scsifilt *sf, PSCSI_REQUEST_BLOCK srb) { struct scsifilt_srb_extension *const se = get_srb_extension(srb); KIRQL irql; se->srb = srb; irql = acquire_irqsafe_lock(&sf->redirect_lock); sf->nr_redirected_srbs_ever++; sf->redirect_srb_list_len++; InsertTailList(&sf->redirect_srb_list, &se->list); release_irqsafe_lock(&sf->redirect_lock, irql); KeInsertQueueDpc(&sf->redirect_srb_dpc, NULL, NULL); }
VOID TransferPacketQueueRetryDpc(PTRANSFER_PACKET Pkt) { KeInitializeDpc(&Pkt->RetryTimerDPC, TransferPacketRetryTimerDpc, Pkt); if (Pkt->RetryIn100nsUnits == 0){ KeInsertQueueDpc(&Pkt->RetryTimerDPC, NULL, NULL); } else { LARGE_INTEGER timerPeriod; NT_ASSERT(Pkt->RetryIn100nsUnits < 100 * 1000 * 1000 * 10); // sanity check -- 100 seconds is normally too long timerPeriod.QuadPart = -(Pkt->RetryIn100nsUnits); KeInitializeTimer(&Pkt->RetryTimer); KeSetTimer(&Pkt->RetryTimer, timerPeriod, &Pkt->RetryTimerDPC); } }
static BOOLEAN NTAPI Isr(PKINTERRUPT Interrupt, PVOID ServiceContext) /* * FUNCTION: Interrupt service routine for the controllers * ARGUMENTS: * Interrupt: Interrupt object representing the interrupt that occured * ServiceContext: Pointer to the ControllerInfo object that caused the interrupt * RETURNS: * TRUE in all cases (see notes) * NOTES: * - We should always be the target of the interrupt, being an edge-triggered ISA interrupt, but * this won't be the case with a level-sensitive system like PCI * - Note that it probably doesn't matter if the interrupt isn't dismissed, as it's edge-triggered. * It probably won't keep re-interrupting. * - There are two different ways to dismiss a floppy interrupt. If the command has a result phase * (see intel datasheet), you dismiss the interrupt by reading the first data byte. If it does * not, you dismiss the interrupt by doing a Sense Interrupt command. Again, because it's edge- * triggered, this is safe to not do here, as we can just wait for the DPC. * - Either way, we don't want to do this here. The controller shouldn't interrupt again, so we'll * schedule a DPC to take care of it. * - This driver really cannot shrare interrupts, as I don't know how to conclusively say * whether it was our controller that interrupted or not. I just have to assume that any time * my ISR gets called, it was my board that called it. Dumb design, yes, but it goes back to * the semantics of ISA buses. That, and I don't know much about ISA drivers. :-) * UPDATE: The high bit of Status Register A seems to work on non-AT controllers. * - Called at DIRQL */ { PCONTROLLER_INFO ControllerInfo = (PCONTROLLER_INFO)ServiceContext; UNREFERENCED_PARAMETER(Interrupt); ASSERT(ControllerInfo); TRACE_(FLOPPY, "ISR called\n"); /* * Due to the stupidity of the drive/controller relationship on the floppy drive, only one device object * can have an active interrupt pending. Due to the nature of these IRPs, though, there will only ever * be one thread expecting an interrupt at a time, and furthermore, Interrupts (outside of spurious ones) * won't ever happen unless a thread is expecting them. Therefore, all we have to do is signal an event * and we're done. Queue a DPC and leave. */ KeInsertQueueDpc(&ControllerInfo->Dpc, NULL, NULL); return TRUE; }
BOOLEAN FxInterrupt::QueueDpcForIsr( VOID ) { BOOLEAN queued; // // Using this function is optional, // but the caller better have registered a handler // ASSERT(m_EvtInterruptDpc != NULL); queued = KeInsertQueueDpc(&m_Dpc, this, NULL); return queued; }
BOOLEAN NotifierEvtchnCallback( IN PKINTERRUPT InterruptObject, IN PVOID Argument ) { PXENVIF_NOTIFIER Notifier = Argument; UNREFERENCED_PARAMETER(InterruptObject); ASSERT(Notifier != NULL); Notifier->Events++; if (KeInsertQueueDpc(&Notifier->Dpc, NULL, NULL)) Notifier->Dpcs++; return TRUE; }
// Locks all other processors and returns exclusivity pointer. This function // should never be called before the last exclusivity is released. _Use_decl_annotations_ EXTERN_C void *ExclGainExclusivity() { NT_ASSERT(InterlockedAdd(&g_ExclpNumberOfLockedProcessors, 0) == 0); _InterlockedAnd(&g_ExclpReleaseAllProcessors, 0); const auto numberOfProcessors = KeQueryActiveProcessorCount(nullptr); // Allocates DPCs for all processors. auto context = reinterpret_cast<ExclusivityContext *>(ExAllocatePoolWithTag( NonPagedPoolNx, sizeof(void *) + (numberOfProcessors * sizeof(KDPC)), EXCLP_POOL_TAG)); if (!context) { return nullptr; } // Execute a lock DPC for all processors but this. context->OldIrql = KeRaiseIrqlToDpcLevel(); const auto currentCpu = KeGetCurrentProcessorNumber(); for (auto i = 0ul; i < numberOfProcessors; i++) { if (i == currentCpu) { continue; } // Queue a lock DPC. KeInitializeDpc(&context->Dpcs[i], ExclpRaiseIrqlAndWaitDpc, nullptr); KeSetTargetProcessorDpc(&context->Dpcs[i], static_cast<CCHAR>(i)); KeInsertQueueDpc(&context->Dpcs[i], nullptr, nullptr); } // Wait until all other processors were halted. const auto needToBeLocked = numberOfProcessors - 1; while (_InterlockedCompareExchange(&g_ExclpNumberOfLockedProcessors, needToBeLocked, needToBeLocked) != static_cast<LONG>(needToBeLocked)) { KeStallExecutionProcessor(10); } return context; }
static VOID NTAPI i8042KbdQueuePacket( IN PVOID Context) { PI8042_KEYBOARD_EXTENSION DeviceExtension; DeviceExtension = (PI8042_KEYBOARD_EXTENSION)Context; DeviceExtension->KeyComplete = TRUE; DeviceExtension->KeysInBuffer++; if (DeviceExtension->KeysInBuffer > DeviceExtension->Common.PortDeviceExtension->Settings.KeyboardDataQueueSize) { WARN_(I8042PRT, "Keyboard buffer overflow\n"); DeviceExtension->KeysInBuffer--; } TRACE_(I8042PRT, "Irq completes key\n"); KeInsertQueueDpc(&DeviceExtension->DpcKeyboard, NULL, NULL); }
_IRQL_requires_same_ static DECLSPEC_NOINLINE BOOLEAN EvtchnInterruptHandler( __in PKINTERRUPT Interrupt, __in_opt PVOID Argument ) { PXENIFACE_EVTCHN_CONTEXT Context = Argument; UNREFERENCED_PARAMETER(Interrupt); ASSERT(Context != NULL); if (!KeInsertQueueDpc(&Context->Dpc, NULL, NULL)) { XenIfaceDebugPrint(TRACE, "NOT INSERTED: Context %p, Port %lu, FO %p\n", Context, Context->LocalPort, Context->FileObject); } return TRUE; }
RTDECL(int) RTMpPokeCpu(RTCPUID idCpu) { if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; int rc = g_pfnrtSendIpi(idCpu); if (rc == VINF_SUCCESS) return rc; /* Fallback. */ if (!fPokeDPCsInitialized) { for (unsigned i = 0; i < RT_ELEMENTS(aPokeDpcs); i++) { KeInitializeDpc(&aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL); KeSetImportanceDpc(&aPokeDpcs[i], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[i], (int)i); } fPokeDPCsInitialized = true; } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); KeSetImportanceDpc(&aPokeDpcs[idCpu], HighImportance); KeSetTargetProcessorDpc(&aPokeDpcs[idCpu], (int)idCpu); /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately. * @note: not true on at least Vista & Windows 7 */ BOOLEAN bRet = KeInsertQueueDpc(&aPokeDpcs[idCpu], 0, 0); KeLowerIrql(oldIrql); return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */; }
int rtMpPokeCpuUsingDpc(RTCPUID idCpu) { /* * APC fallback. */ static KDPC s_aPokeDpcs[MAXIMUM_PROCESSORS] = {0}; static bool s_fPokeDPCsInitialized = false; if (!s_fPokeDPCsInitialized) { for (unsigned i = 0; i < RT_ELEMENTS(s_aPokeDpcs); i++) { KeInitializeDpc(&s_aPokeDpcs[i], rtMpNtPokeCpuDummy, NULL); KeSetImportanceDpc(&s_aPokeDpcs[i], HighImportance); KeSetTargetProcessorDpc(&s_aPokeDpcs[i], (int)i); } s_fPokeDPCsInitialized = true; } /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu. * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL. */ KIRQL oldIrql; KeRaiseIrql(DISPATCH_LEVEL, &oldIrql); KeSetImportanceDpc(&s_aPokeDpcs[idCpu], HighImportance); KeSetTargetProcessorDpc(&s_aPokeDpcs[idCpu], (int)idCpu); /* Assuming here that high importance DPCs will be delivered immediately; or at least an IPI will be sent immediately. * @note: not true on at least Vista & Windows 7 */ BOOLEAN bRet = KeInsertQueueDpc(&s_aPokeDpcs[idCpu], 0, 0); KeLowerIrql(oldIrql); return (bRet == TRUE) ? VINF_SUCCESS : VERR_ACCESS_DENIED /* already queued */; }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { /* * Don't try mess with an offline CPU. */ if (!RTMpIsCpuOnline(idCpu)) return !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; /* * Use the broadcast IPI routine if there are no more than two CPUs online, * or if the current IRQL is unsuitable for KeWaitForSingleObject. */ int rc; uint32_t cHits = 0; if ( g_pfnrtKeIpiGenericCall && ( RTMpGetOnlineCount() <= 2 || KeGetCurrentIrql() > APC_LEVEL) ) { rc = rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper, idCpu, NIL_RTCPUID, &cHits); if (RT_SUCCESS(rc)) { if (cHits == 1) return VINF_SUCCESS; rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1; } return rc; } #if 0 rc = rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu, NIL_RTCPUID, &cHits); if (RT_SUCCESS(rc)) { if (cHits == 1) return VINF_SUCCESS; rc = cHits == 0 ? VERR_CPU_OFFLINE : VERR_CPU_IPE_1; } return rc; #else /* * Initialize the argument package and the objects within it. * The package is referenced counted to avoid unnecessary spinning to * synchronize cleanup and prevent stack corruption. */ PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp'); if (!pArgs) return VERR_NO_MEMORY; pArgs->cRefs = 2; pArgs->fExecuting = false; pArgs->fDone = false; pArgs->CallbackArgs.pfnWorker = pfnWorker; pArgs->CallbackArgs.pvUser1 = pvUser1; pArgs->CallbackArgs.pvUser2 = pvUser2; pArgs->CallbackArgs.idCpu = idCpu; pArgs->CallbackArgs.cHits = 0; pArgs->CallbackArgs.cRefs = 2; KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */); KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs); KeSetImportanceDpc(&pArgs->Dpc, HighImportance); KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu); /* * Disable preemption while we check the current processor and inserts the DPC. */ KIRQL bOldIrql; KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql); ASMCompilerBarrier(); /* paranoia */ if (RTMpCpuId() == idCpu) { /* Just execute the callback on the current CPU. */ pfnWorker(idCpu, pvUser1, pvUser2); KeLowerIrql(bOldIrql); ExFreePool(pArgs); return VINF_SUCCESS; } /* Different CPU, so queue it if the CPU is still online. */ if (RTMpIsCpuOnline(idCpu)) { BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0); Assert(fRc); KeLowerIrql(bOldIrql); uint64_t const nsRealWaitTS = RTTimeNanoTS(); /* * Wait actively for a while in case the CPU/thread responds quickly. */ uint32_t cLoopsLeft = 0x20000; while (cLoopsLeft-- > 0) { if (pArgs->fDone) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } ASMNopPause(); } /* * It didn't respond, so wait on the event object, poking the CPU if it's slow. */ LARGE_INTEGER Timeout; Timeout.QuadPart = -10000; /* 1ms */ NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } /* If it hasn't respondend yet, maybe poke it and wait some more. */ if (rcNt == STATUS_TIMEOUT) { #ifndef IPRT_TARGET_NT4 if ( !pArgs->fExecuting && ( g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7)) RTMpPokeCpu(idCpu); #endif Timeout.QuadPart = -1280000; /* 128ms */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt == STATUS_SUCCESS) { rtMpNtOnSpecificRelease(pArgs); return VINF_SUCCESS; } } /* * Something weird is happening, try bail out. */ if (KeRemoveQueueDpc(&pArgs->Dpc)) { ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */ LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } else { /* DPC is running, wait a good while for it to complete. */ LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */ rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout); if (rcNt != STATUS_SUCCESS) LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt)); } rc = RTErrConvertFromNtStatus(rcNt); } else { /* CPU is offline.*/ KeLowerIrql(bOldIrql); rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE; } rtMpNtOnSpecificRelease(pArgs); return rc; #endif }