/** * @name KeLeaveGuardedRegion * * Leaves a guarded region and delivers pending APCs if possible. */ VOID NTAPI _KeLeaveGuardedRegion(VOID) { /* Use the inlined version */ KeLeaveGuardedRegion(); }
static void _lockRelease() { InterlockedExchange(&g_lock, UNLOCKED); //re-enable APCs KeLeaveGuardedRegion(); }
PFN_NUMBER NTAPI MiFindContiguousPages(IN PFN_NUMBER LowestPfn, IN PFN_NUMBER HighestPfn, IN PFN_NUMBER BoundaryPfn, IN PFN_NUMBER SizeInPages, IN MEMORY_CACHING_TYPE CacheType) { PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask; ULONG i = 0; PMMPFN Pfn1, EndPfn; KIRQL OldIrql; PAGED_CODE(); ASSERT(SizeInPages != 0); // // Convert the boundary PFN into an alignment mask // BoundaryMask = ~(BoundaryPfn - 1); /* Disable APCs */ KeEnterGuardedRegion(); // // Loop all the physical memory blocks // do { // // Capture the base page and length of this memory block // Page = MmPhysicalMemoryBlock->Run[i].BasePage; PageCount = MmPhysicalMemoryBlock->Run[i].PageCount; // // Check how far this memory block will go // LastPage = Page + PageCount; // // Trim it down to only the PFNs we're actually interested in // if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1; if (Page < LowestPfn) Page = LowestPfn; // // Skip this run if it's empty or fails to contain all the pages we need // if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue; // // Now scan all the relevant PFNs in this run // Length = 0; for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++) { // // If this PFN is in use, ignore it // if (MiIsPfnInUse(Pfn1)) { Length = 0; continue; } // // If we haven't chosen a start PFN yet and the caller specified an // alignment, make sure the page matches the alignment restriction // if ((!(Length) && (BoundaryPfn)) && (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask))) { // // It does not, so bail out // continue; } // // Increase the number of valid pages, and check if we have enough // if (++Length == SizeInPages) { // // It appears we've amassed enough legitimate pages, rollback // Pfn1 -= (Length - 1); Page -= (Length - 1); // // Acquire the PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); do { // // Things might've changed for us. Is the page still free? // if (MiIsPfnInUse(Pfn1)) break; // // So far so good. Is this the last confirmed valid page? // if (!--Length) { // // Sanity check that we didn't go out of bounds // ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns); // // Loop until all PFN entries have been processed // EndPfn = Pfn1 - SizeInPages + 1; do { // // This PFN is now a used page, set it up // MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION); MI_SET_PROCESS2("Kernel Driver"); MiUnlinkFreeOrZeroedPage(Pfn1); Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u2.ShareCount = 1; Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u3.e1.StartOfAllocation = 0; Pfn1->u3.e1.EndOfAllocation = 0; Pfn1->u3.e1.PrototypePte = 0; Pfn1->u4.VerifierAllocation = 0; Pfn1->PteAddress = (PVOID)0xBAADF00D; // // Check if this is the last PFN, otherwise go on // if (Pfn1 == EndPfn) break; Pfn1--; } while (TRUE); // // Mark the first and last PFN so we can find them later // Pfn1->u3.e1.StartOfAllocation = 1; (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1; // // Now it's safe to let go of the PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // Quick sanity check that the last PFN is consistent // EndPfn = Pfn1 + SizeInPages; ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1)); // // Compute the first page, and make sure it's consistent // Page = Page - SizeInPages + 1; ASSERT(Pfn1 == MI_PFN_ELEMENT(Page)); ASSERT(Page != 0); /* Enable APCs and return the page */ KeLeaveGuardedRegion(); return Page; } // // Keep going. The purpose of this loop is to reconfirm that // after acquiring the PFN lock these pages are still usable // Pfn1++; Page++; } while (TRUE); // // If we got here, something changed while we hadn't acquired // the PFN lock yet, so we'll have to restart // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); Length = 0; } } } while (++i != MmPhysicalMemoryBlock->NumberOfRuns); // // And if we get here, it means no suitable physical memory runs were found // return 0; }
NTSTATUS DriverDeviceControlHandler( IN PDEVICE_OBJECT fdo, IN PIRP Irp ) { NTSTATUS Status = STATUS_SUCCESS; PIO_STACK_LOCATION IrpStack = IoGetCurrentIrpStackLocation(Irp); ULONG ControlCode = IrpStack->Parameters.DeviceIoControl.IoControlCode; ULONG method = ControlCode & 0x3; ULONG ResultLength = 0; ULONG InputLength = IrpStack->Parameters.DeviceIoControl.InputBufferLength; ULONG OutputLength = IrpStack->Parameters.DeviceIoControl.OutputBufferLength; PVOID Buffer = Irp->AssociatedIrp.SystemBuffer; KeEnterGuardedRegion(); KLOG(LInfo, "IoControl fdo %p, ioctl %x", fdo, ControlCode); if (OutputLength < InputLength) { KLog(LError, "invalid outputlen=%x vs inputlen=%x", OutputLength, InputLength); Status = STATUS_INVALID_PARAMETER; goto complete; } ResultLength = InputLength; switch( ControlCode) { case IOCTL_KMON_INIT: { PKMON_INIT initData = (PKMON_INIT)Buffer; if (InputLength < sizeof(KMON_INIT)) { Status = STATUS_BUFFER_TOO_SMALL; goto complete; } KLog(LInfo, "IOCTL_KMON_INIT"); Status = MonitorStart(initData); break; } case IOCTL_KMON_RELEASE: { PKMON_RELEASE releaseData = (PKMON_RELEASE)Buffer; if (InputLength < sizeof(KMON_RELEASE)) { Status = STATUS_BUFFER_TOO_SMALL; goto complete; } KLog(LInfo, "IOCTL_KMON_RELEASE"); Status = MonitorStop(releaseData); break; } case IOCTL_KMON_OPEN_WINSTA: { POPEN_WINSTA openWinsta = (POPEN_WINSTA)Buffer; if (InputLength < sizeof(OPEN_WINSTA)) { Status = STATUS_BUFFER_TOO_SMALL; goto complete; } Status = MonitorOpenWinsta(openWinsta); break; } case IOCTL_KMON_OPEN_DESKTOP: { POPEN_DESKTOP openDeskop = (POPEN_DESKTOP)Buffer; if (InputLength < sizeof(OPEN_DESKTOP)) { Status = STATUS_BUFFER_TOO_SMALL; goto complete; } Status = MonitorOpenDesktop(openDeskop); break; } case IOCTL_KMON_SCREENSHOT: { PKMON_SCREENSHOT screenShot = (PKMON_SCREENSHOT)Buffer; if (InputLength < sizeof(KMON_SCREENSHOT)) { Status = STATUS_BUFFER_TOO_SMALL; goto complete; } Status = MonitorScreenshot(screenShot); break; } default: Status = STATUS_INVALID_DEVICE_REQUEST; } complete: KeLeaveGuardedRegion(); KLog(LInfo, "dev=%p IoControl: %x bytes: %x, Status=%x", fdo, ControlCode, ResultLength, Status); return CompleteIrp(Irp, Status, ResultLength); }
/* * @implemented */ NTSTATUS NTAPI PsGetContextThread(IN PETHREAD Thread, IN OUT PCONTEXT ThreadContext, IN KPROCESSOR_MODE PreviousMode) { GET_SET_CTX_CONTEXT GetSetContext; ULONG Size = 0, Flags = 0; NTSTATUS Status; /* Enter SEH */ _SEH2_TRY { /* Set default ength */ Size = sizeof(CONTEXT); /* Read the flags */ Flags = ProbeForReadUlong(&ThreadContext->ContextFlags); #ifdef _M_IX86 /* Check if the caller wanted extended registers */ if ((Flags & CONTEXT_EXTENDED_REGISTERS) != CONTEXT_EXTENDED_REGISTERS) { /* Cut them out of the size */ Size = FIELD_OFFSET(CONTEXT, ExtendedRegisters); } #endif /* Check if we came from user mode */ if (PreviousMode != KernelMode) { /* Probe the context */ ProbeForWrite(ThreadContext, Size, sizeof(ULONG)); } } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { /* Return the exception code */ _SEH2_YIELD(return _SEH2_GetExceptionCode()); } _SEH2_END; /* Initialize the wait event */ KeInitializeEvent(&GetSetContext.Event, NotificationEvent, FALSE); /* Set the flags and previous mode */ GetSetContext.Context.ContextFlags = Flags; GetSetContext.Mode = PreviousMode; /* Check if we're running in the same thread */ if (Thread == PsGetCurrentThread()) { /* Setup APC parameters manually */ GetSetContext.Apc.SystemArgument1 = NULL; GetSetContext.Apc.SystemArgument2 = Thread; /* Enter a guarded region to simulate APC_LEVEL */ KeEnterGuardedRegion(); /* Manually call the APC */ PspGetOrSetContextKernelRoutine(&GetSetContext.Apc, NULL, NULL, &GetSetContext.Apc.SystemArgument1, &GetSetContext.Apc.SystemArgument2); /* Leave the guarded region */ KeLeaveGuardedRegion(); /* We are done */ Status = STATUS_SUCCESS; } else { /* Initialize the APC */ KeInitializeApc(&GetSetContext.Apc, &Thread->Tcb, OriginalApcEnvironment, PspGetOrSetContextKernelRoutine, NULL, NULL, KernelMode, NULL); /* Queue it as a Get APC */ if (!KeInsertQueueApc(&GetSetContext.Apc, NULL, Thread, 2)) { /* It was already queued, so fail */ Status = STATUS_UNSUCCESSFUL; } else { /* Wait for the APC to complete */ Status = KeWaitForSingleObject(&GetSetContext.Event, 0, KernelMode, FALSE, NULL); } } _SEH2_TRY { /* Copy the context */ RtlCopyMemory(ThreadContext, &GetSetContext.Context, Size); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { /* Get the exception code */ Status = _SEH2_GetExceptionCode(); } _SEH2_END; /* Return status */ return Status; }