VOID NTAPI CcScheduleReadAhead(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length) { PWORK_QUEUE_WITH_READ_AHEAD WorkItem; DPRINT("Schedule read ahead %08x%08x:%x %wZ\n", FileOffset->HighPart, FileOffset->LowPart, Length, &FileObject->FileName); WorkItem = ExAllocatePool(NonPagedPool, sizeof(*WorkItem)); if (!WorkItem) KeBugCheck(0); ObReferenceObject(FileObject); WorkItem->FileObject = FileObject; WorkItem->FileOffset = *FileOffset; WorkItem->Length = Length; ExInitializeWorkItem(((PWORK_QUEUE_ITEM)WorkItem), (PWORKER_THREAD_ROUTINE)CcpReadAhead, WorkItem); ExQueueWorkItem((PWORK_QUEUE_ITEM)WorkItem, DelayedWorkQueue); DPRINT("Done\n"); }
NTSTATUS DeviceArrivalCompletion(PDEVICE_OBJECT DeviceObject, PIRP Irp, PVOID Context) { PHUB_DEVICE_EXTENSION DeviceExtension; LONG i; PWORKITEMDATA WorkItemData; DeviceExtension = (PHUB_DEVICE_EXTENSION)((PDEVICE_OBJECT)Context)->DeviceExtension; for (i=0; i < DeviceExtension->UsbExtHubInfo.NumberOfPorts; i++) DPRINT1("Port %x DeviceExtension->PortStatus %x\n",i+1, DeviceExtension->PortStatus[i]); IoFreeIrp(Irp); WorkItemData = ExAllocatePool(NonPagedPool, sizeof(WORKITEMDATA)); if (!WorkItemData) { DPRINT1("Failed to allocate memory\n"); return STATUS_NO_MEMORY; } RtlZeroMemory(WorkItemData, sizeof(WORKITEMDATA)); WorkItemData->Context = Context; ExInitializeWorkItem(&WorkItemData->WorkItem, (PWORKER_THREAD_ROUTINE)WorkerThread, (PVOID)WorkItemData); ExQueueWorkItem(&WorkItemData->WorkItem, DelayedWorkQueue); return STATUS_MORE_PROCESSING_REQUIRED; }
VOID FFSQueueCloseRequest( IN PFFS_IRP_CONTEXT IrpContext) { PAGED_CODE(); ASSERT(IrpContext); ASSERT((IrpContext->Identifier.Type == FFSICX) && (IrpContext->Identifier.Size == sizeof(FFS_IRP_CONTEXT))); if (!IsFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DELAY_CLOSE)) { SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_DELAY_CLOSE); IrpContext->Fcb = (PFFS_FCB)IrpContext->FileObject->FsContext; IrpContext->Ccb = (PFFS_CCB)IrpContext->FileObject->FsContext2; IrpContext->FileObject = NULL; } // IsSynchronous means we can block (so we don't requeue it) IrpContext->IsSynchronous = TRUE; ExInitializeWorkItem( &IrpContext->WorkQueueItem, FFSDeQueueCloseRequest, IrpContext); ExQueueWorkItem(&IrpContext->WorkQueueItem, CriticalWorkQueue); }
void CTEInitEvent( CTEEvent *Event, CTEEventRtn Handler ) /*++ Routine Description: Initializes a CTE Event variable. Arguments: Event - Event variable to initialize. Handler - Handler routine for this event variable. Return Value: None. --*/ { ASSERT(Handler != NULL); Event->ce_handler = Handler; Event->ce_scheduled = 0; CTEInitLock(&(Event->ce_lock)); ExInitializeWorkItem(&(Event->ce_workitem), CTEpEventHandler, Event); }
/* * @implemented */ VOID NTAPI FsRtlpPostStackOverflow(IN PVOID Context, IN PKEVENT Event, IN PFSRTL_STACK_OVERFLOW_ROUTINE StackOverflowRoutine, IN BOOLEAN IsPaging) { PSTACK_OVERFLOW_WORK_ITEM WorkItem; /* Try to allocate a work item */ WorkItem = ExAllocatePoolWithTag(NonPagedPool, sizeof(STACK_OVERFLOW_WORK_ITEM), 'FSrs'); if (WorkItem == NULL) { /* If we failed, and we are not a paging file, just raise an error */ if (!IsPaging) { RtlRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); } /* Otherwise, wait for fallback workitem to be available and use it */ KeWaitForSingleObject(&StackOverflowFallbackSerialEvent, Executive, KernelMode, FALSE, NULL); WorkItem = &StackOverflowFallback; } /* Initialize work item */ WorkItem->Context = Context; WorkItem->Event = Event; WorkItem->Routine = StackOverflowRoutine; ExInitializeWorkItem(&WorkItem->WorkItem, FsRtlStackOverflowRead, WorkItem); /* And queue it in the appropriate queue (paging or not?) */ KeInsertQueue(&FsRtlWorkerQueues[IsPaging], &WorkItem->WorkItem.List); }
NTSTATUS Ext2QueueRequest (IN PEXT2_IRP_CONTEXT IrpContext) { ASSERT(IrpContext); ASSERT((IrpContext->Identifier.Type == EXT2ICX) && (IrpContext->Identifier.Size == sizeof(EXT2_IRP_CONTEXT))); /* set the flags of "can wait" and "queued" */ SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_REQUEUED); /* make sure the buffer is kept valid in system context */ Ext2LockIrp(IrpContext, IrpContext->Irp); /* initialize workite*/ ExInitializeWorkItem( &IrpContext->WorkQueueItem, Ext2DeQueueRequest, IrpContext ); /* dispatch it */ ExQueueWorkItem(&IrpContext->WorkQueueItem, CriticalWorkQueue); return STATUS_PENDING; }
VOID ExpShutdownWorkerThreads ( VOID ) { PULONG QueueEnable; SHUTDOWN_WORK_ITEM ShutdownItem; if ((PoCleanShutdownEnabled () & PO_CLEAN_SHUTDOWN_WORKERS) == 0) { return; } ASSERT (KeGetCurrentThread()->Queue == &ExWorkerQueue[PO_SHUTDOWN_QUEUE].WorkerQueue); // // Mark the queues as terminating. // QueueEnable = (PULONG)&ExWorkerQueue[DelayedWorkQueue].Info.QueueWorkerInfo; RtlInterlockedSetBitsDiscardReturn (QueueEnable, EX_WORKER_QUEUE_DISABLED); QueueEnable = (PULONG)&ExWorkerQueue[CriticalWorkQueue].Info.QueueWorkerInfo; RtlInterlockedSetBitsDiscardReturn (QueueEnable, EX_WORKER_QUEUE_DISABLED); // // Queue the shutdown work item to the delayed work queue. After // all currently queued work items are complete, this will fire, // repeatedly taking out every worker thread in every queue until // they're all done. // ExInitializeWorkItem (&ShutdownItem.WorkItem, &ExpShutdownWorker, &ShutdownItem); ShutdownItem.QueueType = DelayedWorkQueue; ShutdownItem.PrevThread = NULL; KeInsertQueue (&ExWorkerQueue[DelayedWorkQueue].WorkerQueue, &ShutdownItem.WorkItem.List); // // Wait for all of the workers and the balancer to exit. // if (ExpWorkerThreadBalanceManagerPtr != NULL) { KeWaitForSingleObject(ExpWorkerThreadBalanceManagerPtr, Executive, KernelMode, FALSE, NULL); ASSERT(!ShutdownItem.PrevThread); ObDereferenceObject(ExpWorkerThreadBalanceManagerPtr); } }
NTSTATUS FFSQueueRequest( IN PFFS_IRP_CONTEXT IrpContext) { ASSERT(IrpContext); ASSERT((IrpContext->Identifier.Type == FFSICX) && (IrpContext->Identifier.Size == sizeof(FFS_IRP_CONTEXT))); // IsSynchronous means we can block (so we don't requeue it) IrpContext->IsSynchronous = TRUE; SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_REQUEUED); IoMarkIrpPending(IrpContext->Irp); ExInitializeWorkItem( &IrpContext->WorkQueueItem, FFSDeQueueRequest, IrpContext); ExQueueWorkItem(&IrpContext->WorkQueueItem, CriticalWorkQueue); return STATUS_PENDING; }
VOID Ext2QueueCloseRequest (IN PEXT2_IRP_CONTEXT IrpContext) { ASSERT(IrpContext); ASSERT((IrpContext->Identifier.Type == EXT2ICX) && (IrpContext->Identifier.Size == sizeof(EXT2_IRP_CONTEXT))); if (IsFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DELAY_CLOSE)) { if (IsFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_FILE_BUSY)) { Ext2Sleep(500); /* 0.5 sec*/ } else { Ext2Sleep(50); /* 0.05 sec*/ } } else { SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT); SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_DELAY_CLOSE); IrpContext->Fcb = (PEXT2_FCB) IrpContext->FileObject->FsContext; IrpContext->Ccb = (PEXT2_CCB) IrpContext->FileObject->FsContext2; } ExInitializeWorkItem( &IrpContext->WorkQueueItem, Ext2DeQueueCloseRequest, IrpContext); ExQueueWorkItem(&IrpContext->WorkQueueItem, DelayedWorkQueue); }
PIO_WORKITEM IoAllocateWorkItem( PDEVICE_OBJECT DeviceObject ) { PIO_WORKITEM ioWorkItem; PWORK_QUEUE_ITEM exWorkItem; // // Allocate a new workitem structure. // ioWorkItem = ExAllocatePool( NonPagedPool, sizeof( IO_WORKITEM )); if (ioWorkItem != NULL) { // // Initialize the invariant portions of both ioWorkItem and // exWorkItem. // #if DBG ioWorkItem->Size = sizeof( IO_WORKITEM ); #endif ioWorkItem->DeviceObject = DeviceObject; exWorkItem = &ioWorkItem->WorkItem; ExInitializeWorkItem( exWorkItem, IopProcessWorkItem, ioWorkItem ); } return ioWorkItem; }
/* * @implemented */ VOID NTAPI IoWriteErrorLogEntry(IN PVOID ElEntry) { PERROR_LOG_ENTRY LogEntry; KIRQL Irql; /* Get the main header */ LogEntry = (PERROR_LOG_ENTRY)((ULONG_PTR)ElEntry - sizeof(ERROR_LOG_ENTRY)); /* Get time stamp */ KeQuerySystemTime(&LogEntry->TimeStamp); /* Acquire the lock and insert this write in the list */ KeAcquireSpinLock(&IopLogListLock, &Irql); InsertHeadList(&IopErrorLogListHead, &LogEntry->ListEntry); /* Check if the worker is running */ if (!IopLogWorkerRunning) { #if 0 /* It's not, initialize it and queue it */ ExInitializeWorkItem(&IopErrorLogWorkItem, IopLogWorker, &IopErrorLogWorkItem); ExQueueWorkItem(&IopErrorLogWorkItem, DelayedWorkQueue); IopLogWorkerRunning = TRUE; #endif } /* Release the lock and return */ KeReleaseSpinLock(&IopLogListLock, Irql); }
VOID CmpInitializeDelayedCloseTable() /*++ Routine Description: Initialize delayed close table; allocation + LRU list initialization. Arguments: Return Value: NONE. --*/ { ExInitializeWorkItem(&CmpDelayCloseWorkItem, CmpDelayCloseWorker, NULL); KeInitializeGuardedMutex(&CmpDelayedCloseTableLock); InitializeListHead(&(CmpDelayedLRUListHead)); KeInitializeDpc(&CmpDelayCloseDpc, CmpDelayCloseDpcRoutine, NULL); KeInitializeTimer(&CmpDelayCloseTimer); }
VOID CmpDoQueueLateUnloadWorker(IN PCMHIVE CmHive) { PWORK_QUEUE_ITEM WorkItem; CM_PAGED_CODE(); ASSERT( CmHive->RootKcb != NULL ); // // NB: Hive lock has higher precedence; We don't need the kcb lock as we are only checking the refcount // CmLockHive(CmHive); if( (CmHive->RootKcb->RefCount == 1) && (CmHive->UnloadWorkItem == NULL) ) { // // the only reference on the rookcb is the one that we artificially created // queue a work item to late unload the hive // WorkItem = ExAllocatePool(NonPagedPool, sizeof(WORK_QUEUE_ITEM)); if( InterlockedCompareExchangePointer(&(CmHive->UnloadWorkItem),WorkItem,NULL) == NULL ) { ExInitializeWorkItem(CmHive->UnloadWorkItem, CmpLateUnloadHiveWorker, CmHive); ExQueueWorkItem(CmHive->UnloadWorkItem, DelayedWorkQueue); } else { ExFreePool(WorkItem); } } CmUnlockHive(CmHive); }
BOOLEAN CmpClaimGlobalQuota( IN ULONG Size ) /*++ Routine Description: If CmpGlobalQuotaUsed + Size >= CmpGlobalQuotaAllowed, return false. Otherwise, increment CmpGlobalQuotaUsed, in effect claiming the requested GlobalQuota. Arguments: Size - number of bytes of GlobalQuota caller wants to claim Return Value: TRUE - Claim succeeded, and has been counted in Used GQ FALSE - Claim failed, nothing counted in GQ. --*/ { LONG available; PWORK_QUEUE_ITEM WorkItem; // // compute available space, then see if size <. This prevents overflows. // Note that this must be signed. Since quota is not enforced until logon, // it is possible for the available bytes to be negative. // available = (LONG)CmpGlobalQuotaAllowed - (LONG)CmpGlobalQuotaUsed; if ((LONG)Size < available) { CmpGlobalQuotaUsed += Size; if ((CmpGlobalQuotaUsed > CmpGlobalQuotaWarning) && (!CmpQuotaWarningPopupDisplayed) && (ExReadyForErrors)) { // // Queue work item to display popup // WorkItem = ExAllocatePool(NonPagedPool, sizeof(WORK_QUEUE_ITEM)); if (WorkItem != NULL) { CmpQuotaWarningPopupDisplayed = TRUE; ExInitializeWorkItem(WorkItem, CmpQuotaWarningWorker, WorkItem); ExQueueWorkItem(WorkItem, DelayedWorkQueue); } } return TRUE; } else { return FALSE; } }
FxThreadedEventQueue::FxThreadedEventQueue( __in UCHAR QueueDepth ) : FxEventQueue(QueueDepth) { ExInitializeWorkItem(&m_EventWorkQueueItem, (PWORKER_THREAD_ROUTINE) _WorkerThreadRoutine, this); }
VOID RdrInitializeTimerPackage ( VOID ) { ExInitializeWorkItem( &TimerWorkItem, RdrTimer, NULL ); ExInitializeWorkItem( &CancelWorkItem, RdrCancelOutstandingRequests, NULL ); // // Set the timer up for the idle timer. // TimerCounter = SCAVENGER_TIMER_GRANULARITY; IoInitializeTimer((PDEVICE_OBJECT)RdrDeviceObject, RdrIdleTimer, NULL); return; }
VOID CmpInitDelayDerefKCBEngine() { InitializeListHead(&CmpDelayDerefKCBListHead); KeInitializeGuardedMutex(&CmpDelayDerefKCBLock); ExInitializeWorkItem(&CmpDelayDerefKCBWorkItem, CmpDelayDerefKCBWorker, NULL); KeInitializeDpc(&CmpDelayDerefKCBDpc, CmpDelayDerefKCBDpcRoutine, NULL); KeInitializeTimer(&CmpDelayDerefKCBTimer); }
VOID NTAPI IopLogDpcRoutine(IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { /* If we have a DPC, free it */ if (Dpc) ExFreePool(Dpc); /* Initialize and queue the work item */ ExInitializeWorkItem(&IopErrorLogWorkItem, IopLogWorker, NULL); ExQueueWorkItem(&IopErrorLogWorkItem, DelayedWorkQueue); }
NTSTATUS dc_probe_mount(dev_hook *hook, PIRP irp) { mount_ctx *mnt; if ( (mnt = mm_pool_alloc(sizeof(mount_ctx))) == NULL ) { return dc_release_irp(hook, irp, STATUS_INSUFFICIENT_RESOURCES); } IoMarkIrpPending(irp); mnt->irp = irp; mnt->hook = hook; ExInitializeWorkItem(&mnt->wrk_item, mount_item_proc, mnt); ExQueueWorkItem(&mnt->wrk_item, DelayedWorkQueue); return STATUS_PENDING; }
VOID Ext2StartFloppyFlushDpc ( PEXT2_VCB Vcb, PEXT2_FCB Fcb, PFILE_OBJECT FileObject ) { LARGE_INTEGER OneSecond; PEXT2_FLPFLUSH_CONTEXT Context; ASSERT(IsFlagOn(Vcb->Flags, VCB_FLOPPY_DISK)); Context = Ext2AllocatePool( NonPagedPool, sizeof(EXT2_FLPFLUSH_CONTEXT), EXT2_FLPFLUSH_MAGIC ); if (!Context) { DEBUG(DL_ERR, ( "Ex2StartFloppy...: failed to allocate Context\n")); DbgBreak(); return; } KeInitializeTimer(&Context->Timer); KeInitializeDpc( &Context->Dpc, Ext2FloppyFlushDpc, Context ); ExInitializeWorkItem( &Context->Item, Ext2FloppyFlush, Context ); Context->Vcb = Vcb; Context->Fcb = Fcb; Context->FileObject = FileObject; if (FileObject) { ObReferenceObject(FileObject); } OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10; KeSetTimer( &Context->Timer, OneSecond, &Context->Dpc ); }
VOID IopErrorLogDpc( IN struct _KDPC *Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2 ) /*++ Routine Description: This routine queues a work request to the worker thread to process logged errors. It is called by a timer DPC when the error log port cannot be connected. The DPC structure itself is freed by this routine. Arguments: Dpc - Supplies a pointer to the DPC structure. This structure is freed by this routine. DeferredContext - Unused. SystemArgument1 - Unused. SystemArgument2 - Unused. Return Value: None --*/ { // // Free the DPC structure if there is one. // if (Dpc != NULL) { ExFreePool(Dpc); } ExInitializeWorkItem( &IopErrorLogWorkItem, IopErrorLogThread, NULL ); ExQueueWorkItem( &IopErrorLogWorkItem, DelayedWorkQueue ); }
NTSTATUS USBSTOR_QueueWorkItem( PIRP_CONTEXT Context, PIRP Irp) { PERRORHANDLER_WORKITEM_DATA ErrorHandlerWorkItemData; // // Allocate Work Item Data // ErrorHandlerWorkItemData = ExAllocatePoolWithTag(NonPagedPool, sizeof(ERRORHANDLER_WORKITEM_DATA), USB_STOR_TAG); if (!ErrorHandlerWorkItemData) { // // no memory // return STATUS_INSUFFICIENT_RESOURCES; } // // error handling started // Context->FDODeviceExtension->SrbErrorHandlingActive = TRUE; // // srb error handling finished // Context->FDODeviceExtension->TimerWorkQueueEnabled = FALSE; // // Initialize and queue the work item to handle the error // ExInitializeWorkItem(&ErrorHandlerWorkItemData->WorkQueueItem, ErrorHandlerWorkItemRoutine, ErrorHandlerWorkItemData); ErrorHandlerWorkItemData->DeviceObject = Context->FDODeviceExtension->FunctionalDeviceObject; ErrorHandlerWorkItemData->Context = Context; ErrorHandlerWorkItemData->Irp = Irp; ErrorHandlerWorkItemData->DeviceObject = Context->FDODeviceExtension->FunctionalDeviceObject; DPRINT1("Queuing WorkItemROutine\n"); ExQueueWorkItem(&ErrorHandlerWorkItemData->WorkQueueItem, DelayedWorkQueue); return STATUS_MORE_PROCESSING_REQUIRED; }
void dc_unmount_async(dev_hook *hook) { mount_ctx *mnt; DbgMsg("dc_unmount_async at IRQL %d\n", KeGetCurrentIrql()); if (mnt = mm_pool_alloc(sizeof(mount_ctx))) { mnt->hook = hook; dc_reference_hook(hook); if (KeGetCurrentIrql() != PASSIVE_LEVEL) { ExInitializeWorkItem(&mnt->wrk_item, unmount_item_proc, mnt); ExQueueWorkItem(&mnt->wrk_item, DelayedWorkQueue); } else { unmount_item_proc(mnt); } } }
VOID FFSFloppyFlushDpc( IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2) { PFFS_FLPFLUSH_CONTEXT Context; Context = (PFFS_FLPFLUSH_CONTEXT)DeferredContext; FFSPrint((DBG_USER, "FFSFloppyFlushDpc is to be started...\n")); ExInitializeWorkItem(&Context->Item, FFSFloppyFlush, Context); ExQueueWorkItem(&Context->Item, CriticalWorkQueue); }
/* * @implemented */ PIO_WORKITEM NTAPI IoAllocateWorkItem(IN PDEVICE_OBJECT DeviceObject) { PIO_WORKITEM IoWorkItem; /* Allocate the work item */ IoWorkItem = ExAllocatePoolWithTag(NonPagedPool, sizeof(IO_WORKITEM), TAG_IOWI); if (!IoWorkItem) return NULL; /* Initialize it */ IoWorkItem->DeviceObject = DeviceObject; ExInitializeWorkItem(&IoWorkItem->Item, IopWorkItemCallback, IoWorkItem); /* Return it */ return IoWorkItem; }
VOID NTAPI FatQueueRequest(IN PFAT_IRP_CONTEXT IrpContext, IN PFAT_OPERATION_HANDLER OperationHandler) { /* Save the worker routine. */ IrpContext->QueuedOperationHandler = OperationHandler; /* Indicate if top level IRP was set. */ if (IoGetTopLevelIrp() == IrpContext->Irp) SetFlag(IrpContext->Flags, IRPCONTEXT_TOPLEVEL); /* Initialize work item. */ ExInitializeWorkItem(&IrpContext->WorkQueueItem, FatDequeueRequest, IrpContext); ExQueueWorkItem(&IrpContext->WorkQueueItem, DelayedWorkQueue); }
VOID APCInjectRoutine(PKAPC pkaApc, PKNORMAL_ROUTINE*, PVOID*, PVOID*, PVOID*) { WI_INJECT wiiItem; ExFreePool(pkaApc); wiiItem.pktThread = KeGetCurrentThread(); wiiItem.pepProcess = IoGetCurrentProcess(); wiiItem.hProcessID = PsGetCurrentProcessId(); KeInitializeEvent(&wiiItem.keEvent, NotificationEvent, FALSE); ExInitializeWorkItem(&wiiItem.qiItem, InjectorWorkItem, &wiiItem); ExQueueWorkItem(&wiiItem.qiItem, DelayedWorkQueue); //was KernelMode not work do UserMode and work KeWaitForSingleObject(&wiiItem.keEvent, Executive, UserMode, TRUE, 0); return; }
/* @implemented */ KSDDKAPI NTSTATUS NTAPI KsRegisterWorker( IN WORK_QUEUE_TYPE WorkQueueType, OUT PKSWORKER* Worker) { PKSIWORKER KsWorker; if (WorkQueueType != CriticalWorkQueue && WorkQueueType != DelayedWorkQueue && WorkQueueType != HyperCriticalWorkQueue) { return STATUS_INVALID_PARAMETER; } /* allocate worker context */ KsWorker = AllocateItem(NonPagedPool, sizeof(KSIWORKER)); if (!KsWorker) return STATUS_INSUFFICIENT_RESOURCES; /* initialze the work ctx */ ExInitializeWorkItem(&KsWorker->WorkItem, WorkItemRoutine, (PVOID)KsWorker); /* setup type */ KsWorker->Type = WorkQueueType; /* Initialize work item queue */ InitializeListHead(&KsWorker->QueuedWorkItems); /* initialize work item lock */ KeInitializeSpinLock(&KsWorker->Lock); /* initialize event */ KeInitializeEvent(&KsWorker->Event, NotificationEvent, FALSE); *Worker = KsWorker; return STATUS_SUCCESS; }
NTSTATUS dc_power_irp(dev_hook *hook, PIRP irp) { pw_irp_ctx *pwc; if (KeGetCurrentIrql() == PASSIVE_LEVEL) { return dc_process_power_irp(hook, irp); } if ( (pwc = mm_alloc(sizeof(pw_irp_ctx), 0)) == NULL ) { PoStartNextPowerIrp(irp); return dc_release_irp(hook, irp, STATUS_INSUFFICIENT_RESOURCES); } pwc->hook = hook; pwc->irp = irp; IoMarkIrpPending(irp); ExInitializeWorkItem(&pwc->wrk_item, dc_power_irp_worker, pwc); ExQueueWorkItem(&pwc->wrk_item, DelayedWorkQueue); return STATUS_PENDING; }
VOID NTAPI CmpCmdInit(IN BOOLEAN SetupBoot) { LARGE_INTEGER DueTime; PAGED_CODE(); /* Setup the lazy DPC */ KeInitializeDpc(&CmpLazyFlushDpc, CmpLazyFlushDpcRoutine, NULL); /* Setup the lazy timer */ KeInitializeTimer(&CmpLazyFlushTimer); /* Setup the lazy worker */ ExInitializeWorkItem(&CmpLazyWorkItem, CmpLazyFlushWorker, NULL); /* Setup the forced-lazy DPC and timer */ KeInitializeDpc(&CmpEnableLazyFlushDpc, CmpEnableLazyFlushDpcRoutine, NULL); KeInitializeTimer(&CmpEnableLazyFlushTimer); /* Enable lazy flushing after 10 minutes */ DueTime.QuadPart = Int32x32To64(600, -10 * 1000 * 1000); KeSetTimer(&CmpEnableLazyFlushTimer, DueTime, &CmpEnableLazyFlushDpc); /* Setup flush variables */ CmpNoWrite = CmpMiniNTBoot; CmpWasSetupBoot = SetupBoot; /* Testing: Force Lazy Flushing */ CmpHoldLazyFlush = FALSE; /* Setup the hive list */ CmpInitializeHiveList(SetupBoot); }