VOID NTAPI MmInitGlobalKernelPageDirectory(VOID) { ULONG i; PULONG CurrentPageDirectory = (PULONG)PDE_BASE; /* Loop the 2GB of address space which belong to the kernel */ for (i = MiGetPdeOffset(MmSystemRangeStart); i < 2048; i++) { /* Check if we have an entry for this already */ if ((i != MiGetPdeOffset(PTE_BASE)) && (i != MiGetPdeOffset(HYPER_SPACE)) && (!MmGlobalKernelPageDirectory[i]) && (CurrentPageDirectory[i])) { /* We don't, link it in our global page directory */ MmGlobalKernelPageDirectory[i] = CurrentPageDirectory[i]; } } }
VOID MiUpdateSystemPdes ( IN PEPROCESS Process ) /*++ Routine Description: This routine updates the system PDEs, typically due to a large page system PTE mapping being created or destroyed. This is rare. Note this is only needed for 32-bit platforms (64-bit platforms share a common top level system page). Arguments: Process - Supplies a pointer to the process to update. Return Value: None. Environment: Kernel mode, expansion lock held. The caller acquired the expansion lock prior to clearing the update bit from this process. We must update the PDEs prior to releasing it so that any new updates can also be rippled. --*/ { ULONG i; ULONG PdeOffset; ULONG PdeEndOffset; MMPTE TempPte; PFN_NUMBER PageDirectoryIndex; PFN_NUMBER TargetPageDirectoryIndex; PEPROCESS CurrentProcess; PMMPTE PointerPte; PMMPTE PointerPde; PMMPTE TargetPdePage; PMMPTE TargetAddressSpacePde; ASSERT (KeGetCurrentIrql () == DISPATCH_LEVEL); CurrentProcess = PsGetCurrentProcess (); // // Map the page directory page in hyperspace. // Note for PAE, this is the high 1GB virtual only. // ASSERT (Process->Pcb.DirectoryTableBase[0] != 0); TargetPageDirectoryIndex = Process->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT; ASSERT (PsInitialSystemProcess != NULL); ASSERT (PsInitialSystemProcess->Pcb.DirectoryTableBase[0] != 0); PageDirectoryIndex = PsInitialSystemProcess->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT; TempPte = ValidKernelPte; TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex; ASSERT (MiLargePageHyperPte->u.Long == 0); MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte); TargetPdePage = MiGetVirtualAddressMappedByPte (MiLargePageHyperPte); // // Map the system process page directory as we know that's always kept // up to date. // PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PageDirectoryIndex); // // Copy all system PTE ranges. // for (i = 0; i < MiPteRangeIndex; i += 1) { PdeOffset = MiGetPdeOffset (MiPteRanges[i].StartingVa); PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa); PointerPde = &PointerPte[PdeOffset]; TargetAddressSpacePde = &TargetPdePage[PdeOffset]; RtlCopyMemory (TargetAddressSpacePde, PointerPde, (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE)); } MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte); // // Just invalidate the mapping on the current processor as we cannot // have context switched. // MI_WRITE_ZERO_PTE (MiLargePageHyperPte); MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage); return; }
BOOLEAN MmCreateProcessAddressSpace ( IN ULONG MinimumWorkingSetSize, IN PEPROCESS NewProcess, OUT PULONG_PTR DirectoryTableBase ) /*++ Routine Description: This routine creates an address space which maps the system portion and contains a hyper space entry. Arguments: MinimumWorkingSetSize - Supplies the minimum working set size for this address space. This value is only used to ensure that ample physical pages exist to create this process. NewProcess - Supplies a pointer to the process object being created. DirectoryTableBase - Returns the value of the newly created address space's Page Directory (PD) page and hyper space page. Return Value: Returns TRUE if an address space was successfully created, FALSE if ample physical pages do not exist. Environment: Kernel mode. APCs Disabled. --*/ { LOGICAL FlushTbNeeded; PFN_NUMBER PageDirectoryIndex; PFN_NUMBER HyperSpaceIndex; PFN_NUMBER PageContainingWorkingSet; PFN_NUMBER VadBitMapPage; MMPTE TempPte; MMPTE TempPte2; PEPROCESS CurrentProcess; KIRQL OldIrql; PMMPFN Pfn1; ULONG Color; PMMPTE PointerPte; ULONG PdeOffset; PMMPTE MappingPte; PMMPTE PointerFillPte; PMMPTE CurrentAddressSpacePde; // // Charge commitment for the page directory pages, working set page table // page, and working set list. If Vad bitmap lookups are enabled, then // charge for a page or two for that as well. // if (MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL) == FALSE) { return FALSE; } FlushTbNeeded = FALSE; CurrentProcess = PsGetCurrentProcess (); NewProcess->NextPageColor = (USHORT) (RtlRandom (&MmProcessColorSeed)); KeInitializeSpinLock (&NewProcess->HyperSpaceLock); // // Get the PFN lock to get physical pages. // LOCK_PFN (OldIrql); // // Check to make sure the physical pages are available. // if (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MinimumWorkingSetSize){ UNLOCK_PFN (OldIrql); MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE); // // Indicate no directory base was allocated. // return FALSE; } MM_TRACK_COMMIT (MM_DBG_COMMIT_PROCESS_CREATE, MM_PROCESS_COMMIT_CHARGE); MI_DECREMENT_RESIDENT_AVAILABLE (MinimumWorkingSetSize, MM_RESAVAIL_ALLOCATE_CREATE_PROCESS); // // Allocate a page directory page. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE, &CurrentProcess->NextPageColor); PageDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Allocate the hyper space page table page. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE), &CurrentProcess->NextPageColor); HyperSpaceIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (HyperSpaceIndex); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Remove page(s) for the VAD bitmap. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList, &CurrentProcess->NextPageColor); VadBitMapPage = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (VadBitMapPage); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Remove a page for the working set list. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList, &CurrentProcess->NextPageColor); PageContainingWorkingSet = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (PageContainingWorkingSet); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } UNLOCK_PFN (OldIrql); if (FlushTbNeeded == TRUE) { MI_FLUSH_TB_FOR_CACHED_ATTRIBUTE (); } ASSERT (NewProcess->AddressSpaceInitialized == 0); PS_SET_BITS (&NewProcess->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1); ASSERT (NewProcess->AddressSpaceInitialized == 1); NewProcess->Vm.MinimumWorkingSetSize = MinimumWorkingSetSize; NewProcess->WorkingSetPage = PageContainingWorkingSet; INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[0], PageDirectoryIndex); INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[1], HyperSpaceIndex); // // Initialize the page reserved for hyper space. // TempPte = ValidPdePde; MI_SET_GLOBAL_STATE (TempPte, 0); MappingPte = MiReserveSystemPtes (1, SystemPteSpace); if (MappingPte != NULL) { MI_MAKE_VALID_KERNEL_PTE (TempPte2, HyperSpaceIndex, MM_READWRITE, MappingPte); MI_SET_PTE_DIRTY (TempPte2); MI_WRITE_VALID_PTE (MappingPte, TempPte2); PointerPte = MiGetVirtualAddressMappedByPte (MappingPte); } else { PointerPte = MiMapPageInHyperSpace (CurrentProcess, HyperSpaceIndex, &OldIrql); } TempPte.u.Hard.PageFrameNumber = VadBitMapPage; PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte; TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet; PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte; if (MappingPte != NULL) { MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace); } else { MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql); } // // Set the PTE address in the PFN for the page directory page. // Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); Pfn1->PteAddress = (PMMPTE)PDE_BASE; TempPte = ValidPdePde; TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex; MI_SET_GLOBAL_STATE (TempPte, 0); // // Add the new process to our internal list prior to filling any // system PDEs so if a system PDE changes (large page map or unmap) // it can mark this process for a subsequent update. // ASSERT (NewProcess->Pcb.DirectoryTableBase[0] == 0); LOCK_EXPANSION (OldIrql); InsertTailList (&MmProcessList, &NewProcess->MmProcessLinks); UNLOCK_EXPANSION (OldIrql); // // Map the page directory page in hyperspace. // MappingPte = MiReserveSystemPtes (1, SystemPteSpace); if (MappingPte != NULL) { MI_MAKE_VALID_KERNEL_PTE (TempPte2, PageDirectoryIndex, MM_READWRITE, MappingPte); MI_SET_PTE_DIRTY (TempPte2); MI_WRITE_VALID_PTE (MappingPte, TempPte2); PointerPte = MiGetVirtualAddressMappedByPte (MappingPte); } else { PointerPte = MiMapPageInHyperSpace (CurrentProcess, PageDirectoryIndex, &OldIrql); } PdeOffset = MiGetPdeOffset (MmSystemRangeStart); PointerFillPte = &PointerPte[PdeOffset]; CurrentAddressSpacePde = MiGetPdeAddress (MmSystemRangeStart); RtlCopyMemory (PointerFillPte, CurrentAddressSpacePde, PAGE_SIZE - PdeOffset * sizeof (MMPTE)); // // Map the working set page table page. // PdeOffset = MiGetPdeOffset (HYPER_SPACE); PointerPte[PdeOffset] = TempPte; // // Zero the remaining page directory range used to map the working // set list and its hash. // PdeOffset += 1; ASSERT (MiGetPdeOffset (MmHyperSpaceEnd) >= PdeOffset); MiZeroMemoryPte (&PointerPte[PdeOffset], (MiGetPdeOffset (MmHyperSpaceEnd) - PdeOffset + 1)); // // Recursively map the page directory page so it points to itself. // TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex; PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte; if (MappingPte != NULL) { MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace); } else { MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql); } InterlockedExchangeAddSizeT (&MmProcessCommit, MM_PROCESS_COMMIT_CHARGE); // // Up the session space reference count. // MiSessionAddProcess (NewProcess); return TRUE; }
VOID MiDeleteVirtualAddresses ( IN PUCHAR StartingAddress, IN PUCHAR EndingAddress, IN ULONG AddressSpaceDeletion, IN PMMVAD Vad ) /*++ Routine Description: This routine deletes the specified virtual address range within the current process. Arguments: StartingAddress - Supplies the first virtual address to delete. EndingAddress - Supplies the last address to delete. AddressSpaceDeletion - Supplies TRUE if the address space is being deleted, FALSE otherwise. If TRUE is specified the TB is not flushed and valid addresses are not removed from the working set. Vad - Supplies the virtual address descriptor which maps this range or NULL if we are not concerned about views. From the Vad the range of prototype PTEs is determined and this information is used to uncover if the PTE refers to a prototype PTE or a fork PTE. Return Value: None. Environment: Kernel mode, called with APCs disabled working set mutex and PFN lock held. These mutexes may be released and reacquired to fault pages in. --*/ { PUCHAR Va; PVOID TempVa; PMMPTE PointerPte; PMMPTE PointerPde; PMMPTE OriginalPointerPte; PMMPTE ProtoPte; PMMPTE LastProtoPte; PEPROCESS CurrentProcess; ULONG FlushTb = FALSE; PSUBSECTION Subsection; PUSHORT UsedPageTableCount; KIRQL OldIrql = APC_LEVEL; MMPTE_FLUSH_LIST FlushList; FlushList.Count = 0; MM_PFN_LOCK_ASSERT(); CurrentProcess = PsGetCurrentProcess(); Va = StartingAddress; PointerPde = MiGetPdeAddress (Va); PointerPte = MiGetPteAddress (Va); OriginalPointerPte = PointerPte; UsedPageTableCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)]; while (MiDoesPdeExistAndMakeValid (PointerPde, CurrentProcess, TRUE) == FALSE) { // // This page directory entry is empty, go to the next one. // PointerPde += 1; PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); Va = MiGetVirtualAddressMappedByPte (PointerPte); if (Va > EndingAddress) { // // All done, return. // return; } UsedPageTableCount += 1; } // // A valid PDE has been located, examine each PTE and delete them. // if ((Vad == (PMMVAD)NULL) || (Vad->u.VadFlags.PrivateMemory) || (Vad->FirstPrototypePte == (PMMPTE)NULL)) { ProtoPte = (PMMPTE)NULL; LastProtoPte = (PMMPTE)NULL; } else { ProtoPte = Vad->FirstPrototypePte; LastProtoPte = (PMMPTE)4; } // // Examine each PTE within the address range and delete it. // while (Va <= EndingAddress) { if (((ULONG)Va & PAGE_DIRECTORY_MASK) == 0) { // // Note, the initial address could be aligned on a 4mb boundary. // // // The virtual address is on a page directory (4mb) boundary, // check the next PDE for validity and flush PTEs for previous // page table page. // MiFlushPteList (&FlushList, FALSE, ZeroPte); // // If all the entries have been eliminated from the previous // page table page, delete the page table page itself. // if ((*UsedPageTableCount == 0) && (PointerPde->u.Long != 0)) { TempVa = MiGetVirtualAddressMappedByPte(PointerPde); MiDeletePte (PointerPde, TempVa, AddressSpaceDeletion, CurrentProcess, NULL, NULL); } // // Release the PFN lock. This prevents a single thread // from forcing other high priority threads from being // blocked while a large address range is deleted. There // is nothing magic about the instruction within the // lock and unlock. // UNLOCK_PFN (OldIrql); PointerPde = MiGetPdeAddress (Va); LOCK_PFN (OldIrql); UsedPageTableCount = &MmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Va)]; while (MiDoesPdeExistAndMakeValid ( PointerPde, CurrentProcess, TRUE) == FALSE) { // // This page directory entry is empty, go to the next one. // PointerPde += 1; PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); Va = MiGetVirtualAddressMappedByPte (PointerPte); if (Va > EndingAddress) { // // All done, return. // return; } UsedPageTableCount += 1; if (LastProtoPte != NULL) { ProtoPte = MiGetProtoPteAddress(Vad,Va); Subsection = MiLocateSubsection (Vad,Va); LastProtoPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; #if DBG if (Vad->u.VadFlags.ImageMap != 1) { if ((ProtoPte < Subsection->SubsectionBase) || (ProtoPte >= LastProtoPte)) { DbgPrint ("bad proto pte %lx va %lx Vad %lx sub %lx\n", ProtoPte,Va,Vad,Subsection); DbgBreakPoint(); } } #endif //DBG } } } // // The PDE is now valid, delete the ptes // if (PointerPte->u.Long != 0) { #ifdef R4000 ASSERT (PointerPte->u.Hard.Global == 0); #endif // // One less used page table entry in this page table page. // *UsedPageTableCount -= 1; ASSERT (*UsedPageTableCount < PTE_PER_PAGE); if (IS_PTE_NOT_DEMAND_ZERO (*PointerPte)) { if (LastProtoPte != NULL) { if (ProtoPte >= LastProtoPte) { ProtoPte = MiGetProtoPteAddress(Vad,Va); Subsection = MiLocateSubsection (Vad,Va); LastProtoPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; } #if DBG if (Vad->u.VadFlags.ImageMap != 1) { if ((ProtoPte < Subsection->SubsectionBase) || (ProtoPte >= LastProtoPte)) { DbgPrint ("bad proto pte %lx va %lx Vad %lx sub %lx\n", ProtoPte,Va,Vad,Subsection); DbgBreakPoint(); } } #endif //DBG } MiDeletePte (PointerPte, (PVOID)Va, AddressSpaceDeletion, CurrentProcess, ProtoPte, &FlushList); } else { *PointerPte = ZeroPte; } } Va = Va + PAGE_SIZE; PointerPte++; ProtoPte++; } // // Flush out entries for the last page table page. // MiFlushPteList (&FlushList, FALSE, ZeroPte); // // If all the entries have been eliminated from the previous // page table page, delete the page table page itself. // if ((*UsedPageTableCount == 0) && (PointerPde->u.Long != 0)) { TempVa = MiGetVirtualAddressMappedByPte(PointerPde); MiDeletePte (PointerPde, TempVa, AddressSpaceDeletion, CurrentProcess, NULL, NULL); } // // All done, return. // return; }
VOID MiUpdateSystemPdes ( IN PEPROCESS Process ) /*++ Routine Description: This routine updates the system PDEs, typically due to a large page system PTE mapping being created or destroyed. This is rare. Note this is only needed for 32-bit platforms (64-bit platforms share a common top level system page). Arguments: Process - Supplies a pointer to the process to update. Return Value: None. Environment: Kernel mode, expansion lock held. The caller acquired the expansion lock prior to clearing the update bit from this process. We must update the PDEs prior to releasing it so that any new updates can also be rippled. --*/ { ULONG PdeOffset; ULONG PdeEndOffset; LOGICAL LowPtes; PVOID VirtualAddress; MMPTE TempPte; PFN_NUMBER PageDirectoryIndex; PFN_NUMBER TargetPageDirectoryIndex; PEPROCESS CurrentProcess; PMMPTE PointerPte; PMMPTE PointerPde; PMMPTE TargetPdePage; PMMPTE TargetAddressSpacePde; PMMPTE PaeTop; ULONG i; ASSERT (KeGetCurrentIrql () == DISPATCH_LEVEL); CurrentProcess = PsGetCurrentProcess (); // // Map the page directory page in hyperspace. // Note for PAE, this is the high 1GB virtual only. // PaeTop = Process->PaeTop; ASSERT (PaeTop != NULL); PaeTop += 3; ASSERT (PaeTop->u.Hard.Valid == 1); TargetPageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber); PaeTop = &MiSystemPaeVa.PteEntry[PD_PER_SYSTEM - 1]; ASSERT (PaeTop->u.Hard.Valid == 1); PageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber); TempPte = ValidKernelPte; TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex; ASSERT (MiLargePageHyperPte->u.Long == 0); MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte); TargetPdePage = MiGetVirtualAddressMappedByPte (MiLargePageHyperPte); LowPtes = FALSE; // // Map the system process page directory as we know that's always kept // up to date. // PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PageDirectoryIndex); // // Copy all system PTE ranges that reside in the top 1GB. // for (i = 0; i < MiPteRangeIndex; i += 1) { VirtualAddress = MiPteRanges[i].StartingVa; if (VirtualAddress < (PVOID) 0xC0000000) { LowPtes = TRUE; continue; } PdeOffset = MiGetPdeOffset (VirtualAddress); PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa); PointerPde = &PointerPte[PdeOffset]; TargetAddressSpacePde = &TargetPdePage[PdeOffset]; RtlCopyMemory (TargetAddressSpacePde, PointerPde, (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE)); } MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte); // // Just invalidate the mapping on the current processor as we cannot // have context switched. // MI_WRITE_ZERO_PTE (MiLargePageHyperPte); MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage); ASSERT (MmSystemRangeStart >= (PVOID) 0x80000000); // // Copy low additional system PTE ranges (if they exist). // if (LowPtes == TRUE) { ASSERT (MmSystemRangeStart < (PVOID) 0xC0000000); // // Map the target process' page directory. // PaeTop = Process->PaeTop; ASSERT (PaeTop != NULL); PaeTop += 2; ASSERT (PaeTop->u.Hard.Valid == 1); TargetPageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber); TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex; ASSERT (MiLargePageHyperPte->u.Long == 0); MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte); // // Map the system's page directory. // PaeTop = &MiSystemPaeVa.PteEntry[PD_PER_SYSTEM - 2]; ASSERT (PaeTop->u.Hard.Valid == 1); PageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber); PdeOffset = MiGetPdeOffset (MmSystemRangeStart); TargetAddressSpacePde = &TargetPdePage[PdeOffset]; PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess, PageDirectoryIndex); // // Copy all system ranges that reside in the 3rd GB. // for (i = 0; i < MiPteRangeIndex; i += 1) { VirtualAddress = MiPteRanges[i].StartingVa; if (VirtualAddress < (PVOID) 0xC0000000) { PdeOffset = MiGetPdeOffset (VirtualAddress); PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa); PointerPde = &PointerPte[PdeOffset]; TargetAddressSpacePde = &TargetPdePage[PdeOffset]; RtlCopyMemory (TargetAddressSpacePde, PointerPde, (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE)); } } MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte); // // Just invalidate the mapping on the current processor as we cannot // have context switched. // MI_WRITE_ZERO_PTE (MiLargePageHyperPte); MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage); } return; }
ULONG MiDecommitPages ( IN PVOID StartingAddress, IN PMMPTE EndingPte, IN PEPROCESS Process, IN PMMVAD_SHORT Vad ) /*++ Routine Description: This routine decommits the specficed range of pages. Arguments: StartingAddress - Supplies the starting address of the range. EndingPte - Supplies the ending PTE of the range. Process - Supplies the current process. Vad - Supplies the virtual address descriptor which describes the range. Return Value: Value to reduce commitment by for the VAD. Environment: Kernel mode, APCs disable, WorkingSetMutex and AddressCreation mutexes held. --*/ { PMMPTE PointerPde; PMMPTE PointerPte; PVOID Va; ULONG PdeOffset; ULONG CommitReduction = 0; PMMPTE CommitLimitPte; KIRQL OldIrql; PMMPTE ValidPteList[MM_VALID_PTE_SIZE]; ULONG count = 0; ULONG WorkingSetIndex; PMMPFN Pfn1; PMMPFN Pfn2; PVOID SwapVa; ULONG Entry; MMWSLENTRY Locked; MMPTE PteContents; if (Vad->u.VadFlags.MemCommit) { CommitLimitPte = MiGetPteAddress (Vad->EndingVa); } else { CommitLimitPte = NULL; } // // Decommit each page by setting the PTE to be explicitly // decommitted. The PTEs cannot be deleted all at once as // this would set the PTEs to zero which would auto-evaluate // as committed if referenced by another thread when a page // table page is being in-paged. // PointerPde = MiGetPdeAddress (StartingAddress); PointerPte = MiGetPteAddress (StartingAddress); Va = StartingAddress; PdeOffset = MiGetPdeOffset (Va); // // Loop through all the PDEs which map this region and ensure that // they exist. If they don't exist create them by touching a // PTE mapped by the PDE. // // // Get the PFN mutex so the MiDeletePte can be called. // MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE); while (PointerPte <= EndingPte) { if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) { PdeOffset = MiGetPdeOffset (Va); PointerPde = MiGetPdeAddress (Va); if (count != 0) { MiProcessValidPteList (&ValidPteList[0], count); count = 0; } MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE); } // // The working set lock is held. No PTEs can go from // invalid to valid or valid to invalid. Transition // PTEs can go from transition to pagefile. // PteContents = *PointerPte; if (PteContents.u.Long != 0) { if (PointerPte->u.Long == MmDecommittedPte.u.Long) { // // This PTE is already decommitted. // CommitReduction += 1; } else { Process->NumberOfPrivatePages -= 1; if (PteContents.u.Hard.Valid == 1) { // // Make sure this is not a forked PTE. // Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); if (Pfn1->u3.e1.PrototypePte) { LOCK_PFN (OldIrql); MiDeletePte (PointerPte, Va, FALSE, Process, NULL, NULL); UNLOCK_PFN (OldIrql); Process->NumberOfPrivatePages += 1; *PointerPte = MmDecommittedPte; } else { // // Pte is valid, process later when PFN lock is held. // if (count == MM_VALID_PTE_SIZE) { MiProcessValidPteList (&ValidPteList[0], count); count = 0; } ValidPteList[count] = PointerPte; count += 1; // // Remove address from working set list. // WorkingSetIndex = Pfn1->u1.WsIndex; ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) == Va); // // Check to see if this entry is locked in the working set // or locked in memory. // Locked = MmWsle[WorkingSetIndex].u1.e1; MiRemoveWsle (WorkingSetIndex, MmWorkingSetList); // // Add this entry to the list of free working set entries // and adjust the working set count. // MiReleaseWsle (WorkingSetIndex, &Process->Vm); if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) { // // This entry is locked. // MmWorkingSetList->FirstDynamic -= 1; if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) { SwapVa = MmWsle[MmWorkingSetList->FirstDynamic].u1.VirtualAddress; SwapVa = PAGE_ALIGN (SwapVa); Pfn2 = MI_PFN_ELEMENT ( MiGetPteAddress (SwapVa)->u.Hard.PageFrameNumber); Entry = MiLocateWsle (SwapVa, MmWorkingSetList, Pfn2->u1.WsIndex); MiSwapWslEntries (Entry, WorkingSetIndex, &Process->Vm); } } } } else if (PteContents.u.Soft.Prototype) { // // This is a forked PTE, just delete it. // LOCK_PFN (OldIrql); MiDeletePte (PointerPte, Va, FALSE, Process, NULL, NULL); UNLOCK_PFN (OldIrql); Process->NumberOfPrivatePages += 1; *PointerPte = MmDecommittedPte; } else if (PteContents.u.Soft.Transition == 1) { // // Transition PTE, get the PFN database lock // and reprocess this one. // LOCK_PFN (OldIrql); PteContents = *PointerPte; if (PteContents.u.Soft.Transition == 1) { // // PTE is still in transition, delete it. // Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber); MI_SET_PFN_DELETED (Pfn1); MiDecrementShareCount (Pfn1->PteFrame); // // Check the reference count for the page, if the // reference count is zero, move the page to the // free list, if the reference count is not zero, // ignore this page. When the refernce count // goes to zero, it will be placed on the free list. // if (Pfn1->u3.e2.ReferenceCount == 0) { MiUnlinkPageFromList (Pfn1); MiReleasePageFileSpace (Pfn1->OriginalPte); MiInsertPageInList (MmPageLocationList[FreePageList], PteContents.u.Trans.PageFrameNumber); } *PointerPte = MmDecommittedPte; } else { // // Page MUST be in page file format! // ASSERT (PteContents.u.Soft.Valid == 0); ASSERT (PteContents.u.Soft.Prototype == 0); ASSERT (PteContents.u.Soft.PageFileHigh != 0); MiReleasePageFileSpace (PteContents); *PointerPte = MmDecommittedPte; } UNLOCK_PFN (OldIrql); } else { // // Must be demand zero or paging file format. // if (PteContents.u.Soft.PageFileHigh != 0) { LOCK_PFN (OldIrql); MiReleasePageFileSpace (PteContents); UNLOCK_PFN (OldIrql); } else { // // Don't subtract out the private page count for // a demand zero page. // Process->NumberOfPrivatePages += 1; } *PointerPte = MmDecommittedPte; } } } else { // // The PTE is already zero. // // // Increment the count of non-zero page table entires for this // page table and the number of private pages for the process. // MmWorkingSetList->UsedPageTableEntries[PdeOffset] += 1; if (PointerPte > CommitLimitPte) { // // Pte is not committed. // CommitReduction += 1; } *PointerPte = MmDecommittedPte; } PointerPte += 1; Va = (PVOID)((ULONG)Va + PAGE_SIZE); } if (count != 0) { MiProcessValidPteList (&ValidPteList[0], count); } return CommitReduction; }