static VOID MmDeletePageTablePfn(PFN_NUMBER PageFrameNumber, ULONG Level) { PMMPTE PageTable; KIRQL OldIrql; PMMPFN PfnEntry; ULONG i, NumberEntries; /* Check if this is a page table */ if (Level > 0) { NumberEntries = (Level == 4) ? MiAddressToPxi(MmHighestUserAddress)+1 : 512; /* Map the page table in hyperspace */ PageTable = (PMMPTE)MmCreateHyperspaceMapping(PageFrameNumber); /* Loop all page table entries */ for (i = 0; i < NumberEntries; i++) { /* Check if the entry is valid */ if (PageTable[i].u.Hard.Valid) { /* Recursively free the page that backs it */ MmDeletePageTablePfn(PageTable[i].u.Hard.PageFrameNumber, Level - 1); } } /* Delete the hyperspace mapping */ MmDeleteHyperspaceMapping(PageTable); } /* Check if this is a legacy allocation */ PfnEntry = MiGetPfnEntry(PageFrameNumber); if (MI_IS_ROS_PFN(PfnEntry)) { /* Free it using the legacy API */ MmReleasePageMemoryConsumer(MC_SYSTEM, PageFrameNumber); } else { OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); /* Free it using the ARM3 API */ MI_SET_PFN_DELETED(PfnEntry); MiDecrementShareCount(PfnEntry, PageFrameNumber); KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } }
PVOID NTAPI MiFindContiguousMemory(IN PFN_NUMBER LowestPfn, IN PFN_NUMBER HighestPfn, IN PFN_NUMBER BoundaryPfn, IN PFN_NUMBER SizeInPages, IN MEMORY_CACHING_TYPE CacheType) { PFN_NUMBER Page; PHYSICAL_ADDRESS PhysicalAddress; PMMPFN Pfn1, EndPfn; PMMPTE PointerPte; PVOID BaseAddress; PAGED_CODE(); ASSERT(SizeInPages != 0); // // Our last hope is to scan the free page list for contiguous pages // Page = MiFindContiguousPages(LowestPfn, HighestPfn, BoundaryPfn, SizeInPages, CacheType); if (!Page) return NULL; // // We'll just piggyback on the I/O memory mapper // PhysicalAddress.QuadPart = Page << PAGE_SHIFT; BaseAddress = MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType); ASSERT(BaseAddress); /* Loop the PFN entries */ Pfn1 = MiGetPfnEntry(Page); EndPfn = Pfn1 + SizeInPages; PointerPte = MiAddressToPte(BaseAddress); do { /* Write the PTE address */ Pfn1->PteAddress = PointerPte; Pfn1->u4.PteFrame = PFN_FROM_PTE(MiAddressToPte(PointerPte++)); } while (++Pfn1 < EndPfn); /* Return the address */ return BaseAddress; }
/* * @implemented */ VOID NTAPI MmUnmapIoSpace(IN PVOID BaseAddress, IN SIZE_T NumberOfBytes) { PFN_NUMBER Pfn; PFN_COUNT PageCount; PMMPTE PointerPte; // // Sanity check // ASSERT(NumberOfBytes != 0); // // Get the page count // PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, NumberOfBytes); // // Get the PTE and PFN // PointerPte = MiAddressToPte(BaseAddress); Pfn = PFN_FROM_PTE(PointerPte); // // Is this an I/O mapping? // if (!MiGetPfnEntry(Pfn)) { // // Destroy the PTE // RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE)); // // Blow the TLB // KeFlushEntireTb(TRUE, TRUE); } // // Release the PTEs // MiReleaseSystemPtes(PointerPte, PageCount, 0); }
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes) { PFN_NUMBER PageFrameNumber; PFN_COUNT SizeInPages, PageTableCount; ULONG i; KIRQL OldIrql; PLIST_ENTRY NextEntry, NextHead, LastHead; PMMPTE PointerPte, StartPte; PMMPDE PointerPde; ULONG EndAllocation; MMPTE TempPte; MMPDE TempPde; PMMPFN Pfn1; PVOID BaseVa, BaseVaStart; PMMFREE_POOL_ENTRY FreeEntry; PKSPIN_LOCK_QUEUE LockQueue; // // Figure out how big the allocation is in pages // SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes); // // Check for overflow // if (SizeInPages == 0) { // // Fail // return NULL; } // // Handle paged pool // if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Lock the paged pool mutex // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Find some empty allocation space // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, MmPagedPoolInfo.PagedPoolHint); if (i == 0xFFFFFFFF) { // // Get the page bit count // i = ((SizeInPages - 1) / PTE_COUNT) + 1; DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages); // // Check if there is enougn paged pool expansion space left // if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } // // Check if we'll have to expand past the last PTE we have available // if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // We can only support this much then // PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool); PageTableCount = (PFN_COUNT)(PointerPde + 1 - MmPagedPoolInfo.NextPdeForPagedPoolExpansion); ASSERT(PageTableCount < i); i = PageTableCount; } else { // // Otherwise, there is plenty of space left for this expansion // PageTableCount = i; } // // Get the template PDE we'll use to expand // TempPde = ValidKernelPde; // // Get the first PTE in expansion space // PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion; BaseVa = MiPdeToPte(PointerPde); BaseVaStart = BaseVa; // // Lock the PFN database and loop pages // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); do { // // It should not already be valid // ASSERT(PointerPde->u.Hard.Valid == 0); /* Request a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); TempPde.u.Hard.PageFrameNumber = PageFrameNumber; #if (_MI_PAGING_LEVELS >= 3) /* On PAE/x64 systems, there's no double-buffering */ ASSERT(FALSE); #else // // Save it into our double-buffered system page directory // MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde; /* Initialize the PFN */ MiInitializePfnForOtherProcess(PageFrameNumber, (PMMPTE)PointerPde, MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]); /* Write the actual PDE now */ // MI_WRITE_VALID_PDE(PointerPde, TempPde); #endif // // Move on to the next expansion address // PointerPde++; BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE); i--; } while (i > 0); // // Release the PFN database lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // These pages are now available, clear their availablity bits // EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion - (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) * PTE_COUNT; RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, EndAllocation, PageTableCount * PTE_COUNT); // // Update the next expansion location // MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount; // // Zero out the newly available memory // RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE); // // Now try consuming the pages again // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, 0); if (i == 0xFFFFFFFF) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } } // // Update the pool hint if the request was just one page // if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1; // // Update the end bitmap so we know the bounds of this allocation when // the time comes to free it // EndAllocation = i + SizeInPages - 1; RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation); // // Now we can release the lock (it mainly protects the bitmap) // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // Now figure out where this allocation starts // BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT)); // // Flush the TLB // KeFlushEntireTb(TRUE, TRUE); /* Setup a demand-zero writable PTE */ MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE); // // Find the first and last PTE, then loop them all // PointerPte = MiAddressToPte(BaseVa); StartPte = PointerPte + SizeInPages; do { // // Write the demand zero PTE and keep going // MI_WRITE_INVALID_PTE(PointerPte, TempPte); } while (++PointerPte < StartPte); // // Return the allocation address to the caller // return BaseVa; } // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Allocations of less than 4 pages go into their individual buckets // i = SizeInPages - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; // // Loop through all the free page lists based on the page index // NextHead = &MmNonPagedPoolFreeListHead[i]; LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; // // Acquire the nonpaged pool lock // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); do { // // Now loop through all the free page entries in this given list // NextEntry = NextHead->Flink; while (NextEntry != NextHead) { /* Is freed non paged pool enabled */ if (MmProtectFreedNonPagedPool) { /* We need to be able to touch this page, unprotect it */ MiUnProtectFreeNonPagedPool(NextEntry, 0); } // // Grab the entry and see if it can handle our allocation // FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List); ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); if (FreeEntry->Size >= SizeInPages) { // // It does, so consume the pages from here // FreeEntry->Size -= SizeInPages; // // The allocation will begin in this free page area // BaseVa = (PVOID)((ULONG_PTR)FreeEntry + (FreeEntry->Size << PAGE_SHIFT)); /* Remove the item from the list, depending if pool is protected */ if (MmProtectFreedNonPagedPool) MiProtectedPoolRemoveEntryList(&FreeEntry->List); else RemoveEntryList(&FreeEntry->List); // // However, check if its' still got space left // if (FreeEntry->Size != 0) { /* Check which list to insert this entry into */ i = FreeEntry->Size - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; /* Insert the entry into the free list head, check for prot. pool */ if (MmProtectFreedNonPagedPool) MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); else InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } // // Grab the PTE for this allocation // PointerPte = MiAddressToPte(BaseVa); ASSERT(PointerPte->u.Hard.Valid == 1); // // Grab the PFN NextEntry and index // Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); // // Now mark it as the beginning of an allocation // ASSERT(Pfn1->u3.e1.StartOfAllocation == 0); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as special pool if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) { Pfn1->u4.VerifierAllocation = 1; } // // Check if the allocation is larger than one page // if (SizeInPages != 1) { // // Navigate to the last PFN entry and PTE // PointerPte += SizeInPages - 1; ASSERT(PointerPte->u.Hard.Valid == 1); Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); } // // Mark this PFN as the last (might be the same as the first) // ASSERT(Pfn1->u3.e1.EndOfAllocation == 0); Pfn1->u3.e1.EndOfAllocation = 1; // // Release the nonpaged pool lock, and return the allocation // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); return BaseVa; } // // Try the next free page entry // NextEntry = FreeEntry->List.Flink; /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } } while (++NextHead < LastHead); // // If we got here, we're out of space. // Start by releasing the lock // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Allocate some system PTEs // StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion); PointerPte = StartPte; if (StartPte == NULL) { // // Ran out of memory // DPRINT1("Out of NP Expansion Pool\n"); return NULL; } // // Acquire the pool lock now // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); // // Lock the PFN database too // LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock]; KeAcquireQueuedSpinLockAtDpcLevel(LockQueue); // // Loop the pages // TempPte = ValidKernelPte; do { /* Allocate a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); /* Get the PFN entry for it and fill it out */ Pfn1 = MiGetPfnEntry(PageFrameNumber); Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u2.ShareCount = 1; Pfn1->PteAddress = PointerPte; Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u4.VerifierAllocation = 0; /* Write the PTE for it */ TempPte.u.Hard.PageFrameNumber = PageFrameNumber; MI_WRITE_VALID_PTE(PointerPte++, TempPte); } while (--SizeInPages > 0); // // This is the last page // Pfn1->u3.e1.EndOfAllocation = 1; // // Get the first page and mark it as such // Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as a verifier allocation if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1; // // Release the PFN and nonpaged pool lock // KeReleaseQueuedSpinLockFromDpcLevel(LockQueue); KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Return the address // return MiPteToAddress(StartPte); }
// KeReleaseGuardedMutex(&MmPagedPoolMutex); // // And finally return the number of pages freed // return NumberOfPages; } // // Get the first PTE and its corresponding PFN entry. If this is also the // last PTE, meaning that this allocation was only for one page, push it into // the S-LIST instead of freeing it // StartPte = PointerPte = MiAddressToPte(StartingVa); StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); if ((Pfn1->u3.e1.EndOfAllocation == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum)) { InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa); return 1; } // // Loop until we find the last PTE // while (Pfn1->u3.e1.EndOfAllocation == 0) { // // Keep going //
/* * @implemented */ VOID NTAPI MmBuildMdlForNonPagedPool(IN PMDL Mdl) { PPFN_NUMBER MdlPages, EndPage; PFN_NUMBER Pfn, PageCount; PVOID Base; PMMPTE PointerPte; // // Sanity checks // ASSERT(Mdl->ByteCount != 0); ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL | MDL_PARTIAL)) == 0); // // We know the MDL isn't associated to a process now // Mdl->Process = NULL; // // Get page and VA information // MdlPages = (PPFN_NUMBER)(Mdl + 1); Base = Mdl->StartVa; // // Set the system address and now get the page count // Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, Mdl->ByteCount); ASSERT(PageCount != 0); EndPage = MdlPages + PageCount; // // Loop the PTEs // PointerPte = MiAddressToPte(Base); do { // // Write the PFN // Pfn = PFN_FROM_PTE(PointerPte++); *MdlPages++ = Pfn; } while (MdlPages < EndPage); // // Set the nonpaged pool flag // Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; // // Check if this is an I/O mapping // if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE; }
/* * @implemented */ VOID NTAPI MmProbeAndLockPages(IN PMDL Mdl, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation) { PPFN_NUMBER MdlPages; PVOID Base, Address, LastAddress, StartAddress; ULONG LockPages, TotalPages; NTSTATUS Status = STATUS_SUCCESS; PEPROCESS CurrentProcess; NTSTATUS ProbeStatus; PMMPTE PointerPte, LastPte; PMMPDE PointerPde; #if (_MI_PAGING_LEVELS >= 3) PMMPDE PointerPpe; #endif #if (_MI_PAGING_LEVELS == 4) PMMPDE PointerPxe; #endif PFN_NUMBER PageFrameIndex; BOOLEAN UsePfnLock; KIRQL OldIrql; PMMPFN Pfn1; DPRINT("Probing MDL: %p\n", Mdl); // // Sanity checks // ASSERT(Mdl->ByteCount != 0); ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_MAPPED_TO_SYSTEM_VA | MDL_SOURCE_IS_NONPAGED_POOL | MDL_PARTIAL | MDL_IO_SPACE)) == 0); // // Get page and base information // MdlPages = (PPFN_NUMBER)(Mdl + 1); Base = Mdl->StartVa; // // Get the addresses and how many pages we span (and need to lock) // Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount); LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); ASSERT(LockPages != 0); /* Block invalid access */ if ((AccessMode != KernelMode) && ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress))) { /* Caller should be in SEH, raise the error */ *MdlPages = LIST_HEAD; ExRaiseStatus(STATUS_ACCESS_VIOLATION); } // // Get the process // if (Address <= MM_HIGHEST_USER_ADDRESS) { // // Get the process // CurrentProcess = PsGetCurrentProcess(); } else { // // No process // CurrentProcess = NULL; } // // Save the number of pages we'll have to lock, and the start address // TotalPages = LockPages; StartAddress = Address; /* Large pages not supported */ ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address)); // // Now probe them // ProbeStatus = STATUS_SUCCESS; _SEH2_TRY { // // Enter probe loop // do { // // Assume failure // *MdlPages = LIST_HEAD; // // Read // *(volatile CHAR*)Address; // // Check if this is write access (only probe for user-mode) // if ((Operation != IoReadAccess) && (Address <= MM_HIGHEST_USER_ADDRESS)) { // // Probe for write too // ProbeForWriteChar(Address); } // // Next address... // Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE); // // Next page... // LockPages--; MdlPages++; } while (Address < LastAddress); // // Reset back to the original page // ASSERT(LockPages == 0); MdlPages = (PPFN_NUMBER)(Mdl + 1); } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) { // // Oops :( // ProbeStatus = _SEH2_GetExceptionCode(); } _SEH2_END; // // So how did that go? // if (ProbeStatus != STATUS_SUCCESS) { // // Fail // DPRINT1("MDL PROBE FAILED!\n"); Mdl->Process = NULL; ExRaiseStatus(ProbeStatus); } // // Get the PTE and PDE // PointerPte = MiAddressToPte(StartAddress); PointerPde = MiAddressToPde(StartAddress); #if (_MI_PAGING_LEVELS >= 3) PointerPpe = MiAddressToPpe(StartAddress); #endif #if (_MI_PAGING_LEVELS == 4) PointerPxe = MiAddressToPxe(StartAddress); #endif // // Sanity check // ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1)); // // Check what kind of operation this is // if (Operation != IoReadAccess) { // // Set the write flag // Mdl->MdlFlags |= MDL_WRITE_OPERATION; } else { // // Remove the write flag // Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); } // // Mark the MDL as locked *now* // Mdl->MdlFlags |= MDL_PAGES_LOCKED; // // Check if this came from kernel mode // if (Base > MM_HIGHEST_USER_ADDRESS) { // // We should not have a process // ASSERT(CurrentProcess == NULL); Mdl->Process = NULL; // // In kernel mode, we don't need to check for write access // Operation = IoReadAccess; // // Use the PFN lock // UsePfnLock = TRUE; OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { // // Sanity checks // ASSERT(TotalPages != 0); ASSERT(CurrentProcess == PsGetCurrentProcess()); // // Track locked pages // InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, TotalPages); // // Save the process // Mdl->Process = CurrentProcess; /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); UsePfnLock = FALSE; OldIrql = MM_NOIRQL; } // // Get the last PTE // LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1)); // // Loop the pages // do { // // Assume failure and check for non-mapped pages // *MdlPages = LIST_HEAD; while ( #if (_MI_PAGING_LEVELS == 4) (PointerPxe->u.Hard.Valid == 0) || #endif #if (_MI_PAGING_LEVELS >= 3) (PointerPpe->u.Hard.Valid == 0) || #endif (PointerPde->u.Hard.Valid == 0) || (PointerPte->u.Hard.Valid == 0)) { // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Access the page // Address = MiPteToAddress(PointerPte); //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3); if (!NT_SUCCESS(Status)) { // // Fail // DPRINT1("Access fault failed\n"); goto Cleanup; } // // What lock should we use? // if (UsePfnLock) { // // Grab the PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } } // // Check if this was a write or modify // if (Operation != IoReadAccess) { // // Check if the PTE is not writable // if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE) { // // Check if it's copy on write // if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte)) { // // Get the base address and allow a change for user-mode // Address = MiPteToAddress(PointerPte); if (Address <= MM_HIGHEST_USER_ADDRESS) { // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Access the page // //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3); if (!NT_SUCCESS(Status)) { // // Fail // DPRINT1("Access fault failed\n"); goto Cleanup; } // // Re-acquire the lock // if (UsePfnLock) { // // Grab the PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); } else { /* Lock the process working set */ MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Start over // continue; } } // // Fail, since we won't allow this // Status = STATUS_ACCESS_VIOLATION; goto CleanupWithLock; } } // // Grab the PFN // PageFrameIndex = PFN_FROM_PTE(PointerPte); Pfn1 = MiGetPfnEntry(PageFrameIndex); if (Pfn1) { /* Either this is for kernel-mode, or the working set is held */ ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); /* No Physical VADs supported yet */ if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL); /* This address should already exist and be fully valid */ MiReferenceProbedPageAndBumpLockCount(Pfn1); } else { // // For I/O addresses, just remember this // Mdl->MdlFlags |= MDL_IO_SPACE; } // // Write the page and move on // *MdlPages++ = PageFrameIndex; PointerPte++; /* Check if we're on a PDE boundary */ if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++; #if (_MI_PAGING_LEVELS >= 3) if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++; #endif #if (_MI_PAGING_LEVELS == 4) if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++; #endif } while (PointerPte <= LastPte); // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } // // Sanity check // ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0); return; CleanupWithLock: // // This is the failure path // ASSERT(!NT_SUCCESS(Status)); // // What kind of lock were we using? // if (UsePfnLock) { // // Release PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); } else { /* Release process working set */ MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); } Cleanup: // // Pages must be locked so MmUnlock can work // ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); MmUnlockPages(Mdl); // // Raise the error // ExRaiseStatus(Status); }
/* * @implemented */ VOID NTAPI MmFreePagesFromMdl(IN PMDL Mdl) { PVOID Base; PPFN_NUMBER Pages; LONG NumberOfPages; PMMPFN Pfn1; KIRQL OldIrql; DPRINT("Freeing MDL: %p\n", Mdl); // // Sanity checks // ASSERT(KeGetCurrentIrql() <= APC_LEVEL); ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0); ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); // // Get address and page information // Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); // // Acquire PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); // // Loop all the MDL pages // Pages = (PPFN_NUMBER)(Mdl + 1); do { // // Reached the last page // if (*Pages == LIST_HEAD) break; // // Get the page entry // Pfn1 = MiGetPfnEntry(*Pages); ASSERT(Pfn1); ASSERT(Pfn1->u2.ShareCount == 1); ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE); if (Pfn1->u4.PteFrame != 0x1FFEDCB) { /* Corrupted PFN entry or invalid free */ KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages); } // // Clear it // Pfn1->u3.e1.StartOfAllocation = 0; Pfn1->u3.e1.EndOfAllocation = 0; Pfn1->u2.ShareCount = 0; // // Dereference it // ASSERT(Pfn1->u3.e2.ReferenceCount != 0); if (Pfn1->u3.e2.ReferenceCount != 1) { /* Just take off one reference */ InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount); } else { /* We'll be nuking the whole page */ MiDecrementReferenceCount(Pfn1, *Pages); } // // Clear this page and move on // *Pages++ = LIST_HEAD; } while (--NumberOfPages != 0); // // Release the lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // Remove the pages locked flag // Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; }
/* * @implemented */ VOID NTAPI MmUnlockPages(IN PMDL Mdl) { PPFN_NUMBER MdlPages, LastPage; PEPROCESS Process; PVOID Base; ULONG Flags, PageCount; KIRQL OldIrql; PMMPFN Pfn1; DPRINT("Unlocking MDL: %p\n", Mdl); // // Sanity checks // ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0); ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0); ASSERT(Mdl->ByteCount != 0); // // Get the process associated and capture the flags which are volatile // Process = Mdl->Process; Flags = Mdl->MdlFlags; // // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL // if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { // // Unmap the pages from system space // MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); } // // Get the page count // MdlPages = (PPFN_NUMBER)(Mdl + 1); Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); ASSERT(PageCount != 0); // // We don't support AWE // if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE); // // Check if the buffer is mapped I/O space // if (Flags & MDL_IO_SPACE) { // // Acquire PFN lock // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); // // Loop every page // LastPage = MdlPages + PageCount; do { // // Last page, break out // if (*MdlPages == LIST_HEAD) break; // // Check if this page is in the PFN database // Pfn1 = MiGetPfnEntry(*MdlPages); if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1); } while (++MdlPages < LastPage); // // Release the lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // Check if we have a process // if (Process) { // // Handle the accounting of locked pages // ASSERT(Process->NumberOfLockedPages > 0); InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, -(LONG_PTR)PageCount); } // // We're done // Mdl->MdlFlags &= ~MDL_IO_SPACE; Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; return; } // // Check if we have a process // if (Process) { // // Handle the accounting of locked pages // ASSERT(Process->NumberOfLockedPages > 0); InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, -(LONG_PTR)PageCount); } // // Loop every page // LastPage = MdlPages + PageCount; do { // // Last page reached // if (*MdlPages == LIST_HEAD) { // // Were there no pages at all? // if (MdlPages == (PPFN_NUMBER)(Mdl + 1)) { // // We're already done // Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; return; } // // Otherwise, stop here // LastPage = MdlPages; break; } /* Save the PFN entry instead for the secondary loop */ *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages); ASSERT(*MdlPages != 0); } while (++MdlPages < LastPage); // // Reset pointer // MdlPages = (PPFN_NUMBER)(Mdl + 1); // // Now grab the PFN lock for the actual unlock and dereference // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); do { /* Get the current entry and reference count */ Pfn1 = (PMMPFN)*MdlPages; MiDereferencePfnAndDropLockCount(Pfn1); } while (++MdlPages < LastPage); // // Release the lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // We're done // Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; }
NTSTATUS NTAPI MmNotPresentFaultCachePage ( _In_ PMMSUPPORT AddressSpace, _In_ MEMORY_AREA* MemoryArea, _In_ PVOID Address, _In_ BOOLEAN Locked, _Inout_ PMM_REQUIRED_RESOURCES Required) { NTSTATUS Status; PVOID PAddress; ULONG Consumer; PMM_SECTION_SEGMENT Segment; LARGE_INTEGER FileOffset, TotalOffset; ULONG_PTR Entry; ULONG Attributes; PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace); KIRQL OldIrql; DPRINT("Not Present: %p %p (%p-%p)\n", AddressSpace, Address, MemoryArea->StartingAddress, MemoryArea->EndingAddress); /* * There is a window between taking the page fault and locking the * address space when another thread could load the page so we check * that. */ if (MmIsPagePresent(Process, Address)) { DPRINT("Done\n"); return STATUS_SUCCESS; } PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE); TotalOffset.QuadPart = (ULONG_PTR)PAddress - (ULONG_PTR)MemoryArea->StartingAddress; Segment = MemoryArea->Data.SectionData.Segment; TotalOffset.QuadPart += MemoryArea->Data.SectionData.ViewOffset.QuadPart; FileOffset = TotalOffset; //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER; Consumer = MC_CACHE; if (Segment->FileObject) { DPRINT("FileName %wZ\n", &Segment->FileObject->FileName); } DPRINT("Total Offset %08x%08x\n", TotalOffset.HighPart, TotalOffset.LowPart); /* Lock the segment */ MmLockSectionSegment(Segment); /* Get the entry corresponding to the offset within the section */ Entry = MmGetPageEntrySectionSegment(Segment, &TotalOffset); Attributes = PAGE_READONLY; if (Required->State && Required->Page[0]) { DPRINT("Have file and page, set page %x in section @ %x #\n", Required->Page[0], TotalOffset.LowPart); if (Required->SwapEntry) MmSetSavedSwapEntryPage(Required->Page[0], Required->SwapEntry); if (Required->State & 2) { DPRINT("Set in section @ %x\n", TotalOffset.LowPart); Status = MmSetPageEntrySectionSegment(Segment, &TotalOffset, Entry = MAKE_PFN_SSE(Required->Page[0])); if (!NT_SUCCESS(Status)) { MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]); } MmUnlockSectionSegment(Segment); MiSetPageEvent(Process, Address); DPRINT("Status %x\n", Status); return STATUS_MM_RESTART_OPERATION; } else { DPRINT("Set %x in address space @ %p\n", Required->Page[0], Address); Status = MmCreateVirtualMapping(Process, Address, Attributes, Required->Page, 1); if (NT_SUCCESS(Status)) { MmInsertRmap(Required->Page[0], Process, Address); } else { /* Drop the reference for our address space ... */ MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]); } MmUnlockSectionSegment(Segment); DPRINTC("XXX Set Event %x\n", Status); MiSetPageEvent(Process, Address); DPRINT("Status %x\n", Status); return Status; } } else if (MM_IS_WAIT_PTE(Entry)) { // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to // ask the fault handler to wait until we should continue. Rathern // than recopy this boilerplate code everywhere, we just ask them // to wait. MmUnlockSectionSegment(Segment); return STATUS_SUCCESS + 1; } else if (Entry) { PFN_NUMBER Page = PFN_FROM_SSE(Entry); DPRINT("Take reference to page %x #\n", Page); if (MiGetPfnEntry(Page) == NULL) { DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n", Page, Entry, Segment, TotalOffset.HighPart, TotalOffset.LowPart); KeBugCheck(CACHE_MANAGER); } OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); MmReferencePage(Page); KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); Status = MmCreateVirtualMapping(Process, Address, Attributes, &Page, 1); if (NT_SUCCESS(Status)) { MmInsertRmap(Page, Process, Address); } DPRINT("XXX Set Event %x\n", Status); MiSetPageEvent(Process, Address); MmUnlockSectionSegment(Segment); DPRINT("Status %x\n", Status); return Status; } else { DPRINT("Get page into section\n"); /* * If the entry is zero (and it can't change because we have * locked the segment) then we need to load the page. */ //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName); Required->State = 2; Required->Context = Segment->FileObject; Required->Consumer = Consumer; Required->FileOffset = FileOffset; Required->Amount = PAGE_SIZE; Required->DoAcquisition = MiReadFilePage; MmSetPageEntrySectionSegment(Segment, &TotalOffset, MAKE_SWAP_SSE(MM_WAIT_ENTRY)); MmUnlockSectionSegment(Segment); return STATUS_MORE_PROCESSING_REQUIRED; } ASSERT(FALSE); return STATUS_ACCESS_VIOLATION; }
VOID NTAPI MiFreeContiguousMemory(IN PVOID BaseAddress) { KIRQL OldIrql; PFN_NUMBER PageFrameIndex, LastPage, PageCount; PMMPFN Pfn1, StartPfn; PMMPTE PointerPte; PAGED_CODE(); // // First, check if the memory came from initial nonpaged pool, or expansion // if (((BaseAddress >= MmNonPagedPoolStart) && (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) || ((BaseAddress >= MmNonPagedPoolExpansionStart) && (BaseAddress < MmNonPagedPoolEnd))) { // // It did, so just use the pool to free this // ExFreePoolWithTag(BaseAddress, 'mCmM'); return; } /* Get the PTE and frame number for the allocation*/ PointerPte = MiAddressToPte(BaseAddress); PageFrameIndex = PFN_FROM_PTE(PointerPte); // // Now get the PFN entry for this, and make sure it's the correct one // Pfn1 = MiGetPfnEntry(PageFrameIndex); if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0)) { // // This probably means you did a free on an address that was in between // KeBugCheckEx(BAD_POOL_CALLER, 0x60, (ULONG_PTR)BaseAddress, 0, 0); } // // Now this PFN isn't the start of any allocation anymore, it's going out // StartPfn = Pfn1; Pfn1->u3.e1.StartOfAllocation = 0; /* Loop the PFNs until we find the one that marks the end of the allocation */ do { /* Make sure these are the pages we setup in the allocation routine */ ASSERT(Pfn1->u3.e2.ReferenceCount == 1); ASSERT(Pfn1->u2.ShareCount == 1); ASSERT(Pfn1->PteAddress == PointerPte); ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid); ASSERT(Pfn1->u4.VerifierAllocation == 0); ASSERT(Pfn1->u3.e1.PrototypePte == 0); /* Set the special pending delete marker */ MI_SET_PFN_DELETED(Pfn1); /* Keep going for assertions */ PointerPte++; } while (Pfn1++->u3.e1.EndOfAllocation == 0); // // Found it, unmark it // Pfn1--; Pfn1->u3.e1.EndOfAllocation = 0; // // Now compute how many pages this represents // PageCount = (ULONG)(Pfn1 - StartPfn + 1); // // So we can know how much to unmap (recall we piggyback on I/O mappings) // MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT); // // Lock the PFN database // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); // // Loop all the pages // LastPage = PageFrameIndex + PageCount; Pfn1 = MiGetPfnEntry(PageFrameIndex); do { /* Decrement the share count and move on */ MiDecrementShareCount(Pfn1++, PageFrameIndex++); } while (PageFrameIndex < LastPage); // // Release the PFN lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); }
VOID NTAPI MmZeroPageThread(VOID) { PKTHREAD Thread = KeGetCurrentThread(); PVOID StartAddress, EndAddress; PVOID WaitObjects[2]; KIRQL OldIrql; PVOID ZeroAddress; PFN_NUMBER PageIndex, FreePage; PMMPFN Pfn1; /* Get the discardable sections to free them */ MiFindInitializationCode(&StartAddress, &EndAddress); if (StartAddress) MiFreeInitializationCode(StartAddress, EndAddress); DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed); /* Set our priority to 0 */ Thread->BasePriority = 0; KeSetPriorityThread(Thread, 0); /* Setup the wait objects */ WaitObjects[0] = &MmZeroingPageEvent; // WaitObjects[1] = &PoSystemIdleTimer; FIXME: Implement idle timer while (TRUE) { KeWaitForMultipleObjects(1, // 2 WaitObjects, WaitAny, WrFreePage, KernelMode, FALSE, NULL, NULL); OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); while (TRUE) { if (!MmFreePageListHead.Total) { MmZeroingPageThreadActive = FALSE; KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); break; } PageIndex = MmFreePageListHead.Flink; ASSERT(PageIndex != LIST_HEAD); Pfn1 = MiGetPfnEntry(PageIndex); MI_SET_USAGE(MI_USAGE_ZERO_LOOP); MI_SET_PROCESS2("Kernel 0 Loop"); FreePage = MiRemoveAnyPage(MI_GET_PAGE_COLOR(PageIndex)); /* The first global free page should also be the first on its own list */ if (FreePage != PageIndex) { KeBugCheckEx(PFN_LIST_CORRUPT, 0x8F, FreePage, PageIndex, 0); } Pfn1->u1.Flink = LIST_HEAD; KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); ZeroAddress = MiMapPagesInZeroSpace(Pfn1, 1); ASSERT(ZeroAddress); RtlZeroMemory(ZeroAddress, PAGE_SIZE); MiUnmapPagesInZeroSpace(ZeroAddress, 1); OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); MiInsertPageInList(&MmZeroedPageListHead, PageIndex); } } }
/* * @implemented */ PVOID NTAPI MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType) { PFN_NUMBER Pfn; PFN_COUNT PageCount; PMMPTE PointerPte; PVOID BaseAddress; MMPTE TempPte; PMMPFN Pfn1 = NULL; MI_PFN_CACHE_ATTRIBUTE CacheAttribute; BOOLEAN IsIoMapping; // // Must be called with a non-zero count // ASSERT(NumberOfBytes != 0); // // Make sure the upper bits are 0 if this system // can't describe more than 4 GB of physical memory. // FIXME: This doesn't respect PAE, but we currently don't // define a PAE build flag since there is no such build. // #if !defined(_M_AMD64) ASSERT(PhysicalAddress.HighPart == 0); #endif // // Normalize and validate the caching attributes // CacheType &= 0xFF; if (CacheType >= MmMaximumCacheType) return NULL; // // Calculate page count // PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart, NumberOfBytes); // // Compute the PFN and check if it's a known I/O mapping // Also translate the cache attribute // Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); Pfn1 = MiGetPfnEntry(Pfn); IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE; CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; // // Now allocate system PTEs for the mapping, and get the VA // PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace); if (!PointerPte) return NULL; BaseAddress = MiPteToAddress(PointerPte); // // Check if this is uncached // if (CacheAttribute != MiCached) { // // Flush all caches // KeFlushEntireTb(TRUE, TRUE); KeInvalidateAllCaches(); } // // Now compute the VA offset // BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + BYTE_OFFSET(PhysicalAddress.LowPart)); // // Get the template and configure caching // TempPte = ValidKernelPte; switch (CacheAttribute) { case MiNonCached: // // Disable the cache // MI_PAGE_DISABLE_CACHE(&TempPte); MI_PAGE_WRITE_THROUGH(&TempPte); break; case MiCached: // // Leave defaults // break; case MiWriteCombined: // // We don't support write combining yet // ASSERT(FALSE); break; default: // // Should never happen // ASSERT(FALSE); break; } // // Sanity check and re-flush // Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL)); KeFlushEntireTb(TRUE, TRUE); KeInvalidateAllCaches(); // // Do the mapping // do { // // Write the PFN // TempPte.u.Hard.PageFrameNumber = Pfn++; MI_WRITE_VALID_PTE(PointerPte++, TempPte); } while (--PageCount); // // We're done! // return BaseAddress; }
PVOID NTAPI MiMapPageInHyperSpace(IN PEPROCESS Process, IN PFN_NUMBER Page, IN PKIRQL OldIrql) { MMPTE TempPte; PMMPTE PointerPte; PFN_NUMBER Offset; // // Never accept page 0 or non-physical pages // ASSERT(Page != 0); ASSERT(MiGetPfnEntry(Page) != NULL); // // Build the PTE // TempPte = ValidKernelPteLocal; TempPte.u.Hard.PageFrameNumber = Page; // // Pick the first hyperspace PTE // PointerPte = MmFirstReservedMappingPte; // // Acquire the hyperlock // ASSERT(Process == PsGetCurrentProcess()); KeAcquireSpinLock(&Process->HyperSpaceLock, OldIrql); // // Now get the first free PTE // Offset = PFN_FROM_PTE(PointerPte); if (!Offset) { // // Reset the PTEs // Offset = MI_HYPERSPACE_PTES; KeFlushProcessTb(); } // // Prepare the next PTE // PointerPte->u.Hard.PageFrameNumber = Offset - 1; // // Write the current PTE // PointerPte += Offset; MI_WRITE_VALID_PTE(PointerPte, TempPte); // // Return the address // return MiPteToAddress(PointerPte); }