/* * @implemented */ VOID NTAPI MmUnmapIoSpace(IN PVOID BaseAddress, IN SIZE_T NumberOfBytes) { PFN_NUMBER Pfn; PFN_COUNT PageCount; PMMPTE PointerPte; // // Sanity check // ASSERT(NumberOfBytes != 0); // // Get the page count // PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, NumberOfBytes); // // Get the PTE and PFN // PointerPte = MiAddressToPte(BaseAddress); Pfn = PFN_FROM_PTE(PointerPte); // // Is this an I/O mapping? // if (!MiGetPfnEntry(Pfn)) { // // Destroy the PTE // RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE)); // // Blow the TLB // KeFlushEntireTb(TRUE, TRUE); } // // Release the PTEs // MiReleaseSystemPtes(PointerPte, PageCount, 0); }
/* * @implemented */ VOID NTAPI MmFreeNonCachedMemory(IN PVOID BaseAddress, IN SIZE_T NumberOfBytes) { PMDL Mdl; PMMPTE PointerPte; PFN_COUNT PageCount; // // Sanity checks // ASSERT(NumberOfBytes != 0); ASSERT(PAGE_ALIGN(BaseAddress) == BaseAddress); // // Get the page count // PageCount = (PFN_COUNT)BYTES_TO_PAGES(NumberOfBytes); // // Get the first PTE // PointerPte = MiAddressToPte(BaseAddress); // // Remember this is where we store the shadow MDL pointer // Mdl = *(PMDL*)(--PointerPte); // // Kill the MDL (and underlying pages) // MmFreePagesFromMdl(Mdl); ExFreePoolWithTag(Mdl, TAG_MDL); // // Now free the system PTEs for the underlying VA // MiReleaseSystemPtes(PointerPte, PageCount + 1, SystemPteSpace); }
/* * @implemented */ VOID NTAPI MmUnmapLockedPages(IN PVOID BaseAddress, IN PMDL Mdl) { PVOID Base; PFN_COUNT PageCount, ExtraPageCount; PPFN_NUMBER MdlPages; PMMPTE PointerPte; // // Sanity check // ASSERT(Mdl->ByteCount != 0); // // Check if this is a kernel request // if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { // // Get base and count information // Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); // // Sanity checks // ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); ASSERT(PageCount != 0); ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); // // Get the PTE // PointerPte = MiAddressToPte(BaseAddress); // // This should be a resident system PTE // ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); ASSERT(PointerPte->u.Hard.Valid == 1); // // Check if the caller wants us to free advanced pages // if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) { // // Get the MDL page array // MdlPages = MmGetMdlPfnArray(Mdl); /* Number of extra pages stored after the PFN array */ ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); // // Do the math // PageCount += ExtraPageCount; PointerPte -= ExtraPageCount; ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); // // Get the new base address // BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - (ExtraPageCount << PAGE_SHIFT)); } // // Remove flags // Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | MDL_PARTIAL_HAS_BEEN_MAPPED | MDL_FREE_EXTRA_PTES); // // Release the system PTEs // MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace); } else { UNIMPLEMENTED; } }
BOOLEAN NTAPI MmCreateProcessAddressSpace(IN ULONG MinWs, IN PEPROCESS Process, OUT PULONG_PTR DirectoryTableBase) { KIRQL OldIrql; PFN_NUMBER TableBasePfn, HyperPfn, HyperPdPfn, HyperPtPfn, WorkingSetPfn; PMMPTE SystemPte; MMPTE TempPte, PdePte; ULONG TableIndex; PMMPTE PageTablePointer; /* Make sure we don't already have a page directory setup */ ASSERT(Process->Pcb.DirectoryTableBase[0] == 0); ASSERT(Process->Pcb.DirectoryTableBase[1] == 0); ASSERT(Process->WorkingSetPage == 0); /* Choose a process color */ Process->NextPageColor = (USHORT)RtlRandom(&MmProcessColorSeed); /* Setup the hyperspace lock */ KeInitializeSpinLock(&Process->HyperSpaceLock); /* Lock PFN database */ OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); /* Get a page for the table base and one for hyper space. The PFNs for these pages will be initialized in MmInitializeProcessAddressSpace, when we are already attached to the process. */ TableBasePfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process)); HyperPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process)); HyperPdPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process)); HyperPtPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process)); WorkingSetPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process)); /* Release PFN lock */ KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); /* Zero pages */ /// FIXME: MiZeroPhysicalPage(HyperPfn); MiZeroPhysicalPage(WorkingSetPfn); /* Set the base directory pointers */ Process->WorkingSetPage = WorkingSetPfn; DirectoryTableBase[0] = TableBasePfn << PAGE_SHIFT; DirectoryTableBase[1] = HyperPfn << PAGE_SHIFT; /* Get a PTE to map the page directory */ SystemPte = MiReserveSystemPtes(1, SystemPteSpace); ASSERT(SystemPte != NULL); /* Get its address */ PageTablePointer = MiPteToAddress(SystemPte); /* Build the PTE for the page directory and map it */ PdePte = ValidKernelPte; PdePte.u.Hard.PageFrameNumber = TableBasePfn; *SystemPte = PdePte; /// architecture specific //MiInitializePageDirectoryForProcess( /* Copy the kernel mappings and zero out the rest */ TableIndex = PXE_PER_PAGE / 2; RtlZeroMemory(PageTablePointer, TableIndex * sizeof(MMPTE)); RtlCopyMemory(PageTablePointer + TableIndex, MiAddressToPxe(0) + TableIndex, PAGE_SIZE - TableIndex * sizeof(MMPTE)); /* Sanity check */ ASSERT(MiAddressToPxi(MmHyperSpaceEnd) >= TableIndex); /* Setup a PTE for the page directory mappings */ TempPte = ValidKernelPte; /* Update the self mapping of the PML4 */ TableIndex = MiAddressToPxi((PVOID)PXE_SELFMAP); TempPte.u.Hard.PageFrameNumber = TableBasePfn; PageTablePointer[TableIndex] = TempPte; /* Write the PML4 entry for hyperspace */ TableIndex = MiAddressToPxi((PVOID)HYPER_SPACE); TempPte.u.Hard.PageFrameNumber = HyperPfn; PageTablePointer[TableIndex] = TempPte; /* Map the hyperspace PDPT to the system PTE */ PdePte.u.Hard.PageFrameNumber = HyperPfn; *SystemPte = PdePte; __invlpg(PageTablePointer); /* Write the hyperspace entry for the first PD */ TempPte.u.Hard.PageFrameNumber = HyperPdPfn; PageTablePointer[0] = TempPte; /* Map the hyperspace PD to the system PTE */ PdePte.u.Hard.PageFrameNumber = HyperPdPfn; *SystemPte = PdePte; __invlpg(PageTablePointer); /* Write the hyperspace entry for the first PT */ TempPte.u.Hard.PageFrameNumber = HyperPtPfn; PageTablePointer[0] = TempPte; /* Map the hyperspace PT to the system PTE */ PdePte.u.Hard.PageFrameNumber = HyperPtPfn; *SystemPte = PdePte; __invlpg(PageTablePointer); /* Write the hyperspace PTE for the working set list index */ TempPte.u.Hard.PageFrameNumber = WorkingSetPfn; TableIndex = MiAddressToPti(MmWorkingSetList); PageTablePointer[TableIndex] = TempPte; /// end architecture specific /* Release the system PTE */ MiReleaseSystemPtes(SystemPte, 1, SystemPteSpace); /* Switch to phase 1 initialization */ ASSERT(Process->AddressSpaceInitialized == 0); Process->AddressSpaceInitialized = 1; return TRUE; }
BOOLEAN MmCreateProcessAddressSpace ( IN ULONG MinimumWorkingSetSize, IN PEPROCESS NewProcess, OUT PULONG_PTR DirectoryTableBase ) /*++ Routine Description: This routine creates an address space which maps the system portion and contains a hyper space entry. Arguments: MinimumWorkingSetSize - Supplies the minimum working set size for this address space. This value is only used to ensure that ample physical pages exist to create this process. NewProcess - Supplies a pointer to the process object being created. DirectoryTableBase - Returns the value of the newly created address space's Page Directory (PD) page and hyper space page. Return Value: Returns TRUE if an address space was successfully created, FALSE if ample physical pages do not exist. Environment: Kernel mode. APCs Disabled. --*/ { LOGICAL FlushTbNeeded; PFN_NUMBER PageDirectoryIndex; PFN_NUMBER HyperSpaceIndex; PFN_NUMBER PageContainingWorkingSet; PFN_NUMBER VadBitMapPage; MMPTE TempPte; MMPTE TempPte2; PEPROCESS CurrentProcess; KIRQL OldIrql; PMMPFN Pfn1; ULONG Color; PMMPTE PointerPte; ULONG PdeOffset; PMMPTE MappingPte; PMMPTE PointerFillPte; PMMPTE CurrentAddressSpacePde; // // Charge commitment for the page directory pages, working set page table // page, and working set list. If Vad bitmap lookups are enabled, then // charge for a page or two for that as well. // if (MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL) == FALSE) { return FALSE; } FlushTbNeeded = FALSE; CurrentProcess = PsGetCurrentProcess (); NewProcess->NextPageColor = (USHORT) (RtlRandom (&MmProcessColorSeed)); KeInitializeSpinLock (&NewProcess->HyperSpaceLock); // // Get the PFN lock to get physical pages. // LOCK_PFN (OldIrql); // // Check to make sure the physical pages are available. // if (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MinimumWorkingSetSize){ UNLOCK_PFN (OldIrql); MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE); // // Indicate no directory base was allocated. // return FALSE; } MM_TRACK_COMMIT (MM_DBG_COMMIT_PROCESS_CREATE, MM_PROCESS_COMMIT_CHARGE); MI_DECREMENT_RESIDENT_AVAILABLE (MinimumWorkingSetSize, MM_RESAVAIL_ALLOCATE_CREATE_PROCESS); // // Allocate a page directory page. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE, &CurrentProcess->NextPageColor); PageDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Allocate the hyper space page table page. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE), &CurrentProcess->NextPageColor); HyperSpaceIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (HyperSpaceIndex); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Remove page(s) for the VAD bitmap. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList, &CurrentProcess->NextPageColor); VadBitMapPage = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (VadBitMapPage); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } // // Remove a page for the working set list. // if (MmAvailablePages < MM_HIGH_LIMIT) { MiEnsureAvailablePageOrWait (NULL, OldIrql); } Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList, &CurrentProcess->NextPageColor); PageContainingWorkingSet = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql); Pfn1 = MI_PFN_ELEMENT (PageContainingWorkingSet); if (Pfn1->u3.e1.CacheAttribute != MiCached) { Pfn1->u3.e1.CacheAttribute = MiCached; FlushTbNeeded = TRUE; } UNLOCK_PFN (OldIrql); if (FlushTbNeeded == TRUE) { MI_FLUSH_TB_FOR_CACHED_ATTRIBUTE (); } ASSERT (NewProcess->AddressSpaceInitialized == 0); PS_SET_BITS (&NewProcess->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1); ASSERT (NewProcess->AddressSpaceInitialized == 1); NewProcess->Vm.MinimumWorkingSetSize = MinimumWorkingSetSize; NewProcess->WorkingSetPage = PageContainingWorkingSet; INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[0], PageDirectoryIndex); INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[1], HyperSpaceIndex); // // Initialize the page reserved for hyper space. // TempPte = ValidPdePde; MI_SET_GLOBAL_STATE (TempPte, 0); MappingPte = MiReserveSystemPtes (1, SystemPteSpace); if (MappingPte != NULL) { MI_MAKE_VALID_KERNEL_PTE (TempPte2, HyperSpaceIndex, MM_READWRITE, MappingPte); MI_SET_PTE_DIRTY (TempPte2); MI_WRITE_VALID_PTE (MappingPte, TempPte2); PointerPte = MiGetVirtualAddressMappedByPte (MappingPte); } else { PointerPte = MiMapPageInHyperSpace (CurrentProcess, HyperSpaceIndex, &OldIrql); } TempPte.u.Hard.PageFrameNumber = VadBitMapPage; PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte; TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet; PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte; if (MappingPte != NULL) { MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace); } else { MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql); } // // Set the PTE address in the PFN for the page directory page. // Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); Pfn1->PteAddress = (PMMPTE)PDE_BASE; TempPte = ValidPdePde; TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex; MI_SET_GLOBAL_STATE (TempPte, 0); // // Add the new process to our internal list prior to filling any // system PDEs so if a system PDE changes (large page map or unmap) // it can mark this process for a subsequent update. // ASSERT (NewProcess->Pcb.DirectoryTableBase[0] == 0); LOCK_EXPANSION (OldIrql); InsertTailList (&MmProcessList, &NewProcess->MmProcessLinks); UNLOCK_EXPANSION (OldIrql); // // Map the page directory page in hyperspace. // MappingPte = MiReserveSystemPtes (1, SystemPteSpace); if (MappingPte != NULL) { MI_MAKE_VALID_KERNEL_PTE (TempPte2, PageDirectoryIndex, MM_READWRITE, MappingPte); MI_SET_PTE_DIRTY (TempPte2); MI_WRITE_VALID_PTE (MappingPte, TempPte2); PointerPte = MiGetVirtualAddressMappedByPte (MappingPte); } else { PointerPte = MiMapPageInHyperSpace (CurrentProcess, PageDirectoryIndex, &OldIrql); } PdeOffset = MiGetPdeOffset (MmSystemRangeStart); PointerFillPte = &PointerPte[PdeOffset]; CurrentAddressSpacePde = MiGetPdeAddress (MmSystemRangeStart); RtlCopyMemory (PointerFillPte, CurrentAddressSpacePde, PAGE_SIZE - PdeOffset * sizeof (MMPTE)); // // Map the working set page table page. // PdeOffset = MiGetPdeOffset (HYPER_SPACE); PointerPte[PdeOffset] = TempPte; // // Zero the remaining page directory range used to map the working // set list and its hash. // PdeOffset += 1; ASSERT (MiGetPdeOffset (MmHyperSpaceEnd) >= PdeOffset); MiZeroMemoryPte (&PointerPte[PdeOffset], (MiGetPdeOffset (MmHyperSpaceEnd) - PdeOffset + 1)); // // Recursively map the page directory page so it points to itself. // TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex; PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte; if (MappingPte != NULL) { MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace); } else { MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql); } InterlockedExchangeAddSizeT (&MmProcessCommit, MM_PROCESS_COMMIT_CHARGE); // // Up the session space reference count. // MiSessionAddProcess (NewProcess); return TRUE; }
LOGICAL FASTCALL MiCopyOnWrite ( IN PVOID FaultingAddress, IN PMMPTE PointerPte ) /*++ Routine Description: This routine performs a copy on write operation for the specified virtual address. Arguments: FaultingAddress - Supplies the virtual address which caused the fault. PointerPte - Supplies the pointer to the PTE which caused the page fault. Return Value: Returns TRUE if the page was actually split, FALSE if not. Environment: Kernel mode, APCs disabled, working set mutex held. --*/ { MMPTE TempPte; MMPTE TempPte2; PMMPTE MappingPte; PFN_NUMBER PageFrameIndex; PFN_NUMBER NewPageIndex; PVOID CopyTo; PVOID CopyFrom; KIRQL OldIrql; PMMPFN Pfn1; PEPROCESS CurrentProcess; PMMCLONE_BLOCK CloneBlock; PMMCLONE_DESCRIPTOR CloneDescriptor; WSLE_NUMBER WorkingSetIndex; LOGICAL FakeCopyOnWrite; PMMWSL WorkingSetList; PVOID SessionSpace; PLIST_ENTRY NextEntry; PIMAGE_ENTRY_IN_SESSION Image; // // This is called from MmAccessFault, the PointerPte is valid // and the working set mutex ensures it cannot change state. // // Capture the PTE contents to TempPte. // TempPte = *PointerPte; ASSERT (TempPte.u.Hard.Valid == 1); PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&TempPte); Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); // // Check to see if this is a prototype PTE with copy on write enabled. // FakeCopyOnWrite = FALSE; CurrentProcess = PsGetCurrentProcess (); CloneBlock = NULL; if (FaultingAddress >= (PVOID) MmSessionBase) { WorkingSetList = MmSessionSpace->Vm.VmWorkingSetList; ASSERT (Pfn1->u3.e1.PrototypePte == 1); SessionSpace = (PVOID) MmSessionSpace; MM_SESSION_SPACE_WS_LOCK_ASSERT (); if (MmSessionSpace->ImageLoadingCount != 0) { NextEntry = MmSessionSpace->ImageList.Flink; while (NextEntry != &MmSessionSpace->ImageList) { Image = CONTAINING_RECORD (NextEntry, IMAGE_ENTRY_IN_SESSION, Link); if ((FaultingAddress >= Image->Address) && (FaultingAddress <= Image->LastAddress)) { if (Image->ImageLoading) { ASSERT (Pfn1->u3.e1.PrototypePte == 1); TempPte.u.Hard.CopyOnWrite = 0; TempPte.u.Hard.Write = 1; // // The page is no longer copy on write, update the PTE // setting both the dirty bit and the accessed bit. // // Even though the page's current backing is the image // file, the modified writer will convert it to // pagefile backing when it notices the change later. // MI_SET_PTE_DIRTY (TempPte); MI_SET_ACCESSED_IN_PTE (&TempPte, 1); MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, TempPte); // // The TB entry must be flushed as the valid PTE with // the dirty bit clear has been fetched into the TB. If // it isn't flushed, another fault is generated as the // dirty bit is not set in the cached TB entry. // MI_FLUSH_SINGLE_TB (FaultingAddress, TRUE); return FALSE; } break; } NextEntry = NextEntry->Flink; } } } else { WorkingSetList = MmWorkingSetList; SessionSpace = NULL; // // If a fork operation is in progress, block until the fork is // completed, then retry the whole operation as the state of // everything may have changed between when the mutexes were // released and reacquired. // if (CurrentProcess->ForkInProgress != NULL) { if (MiWaitForForkToComplete (CurrentProcess) == TRUE) { return FALSE; } } if (TempPte.u.Hard.CopyOnWrite == 0) { // // This is a fork page which is being made private in order // to change the protection of the page. // Do not make the page writable. // FakeCopyOnWrite = TRUE; } } WorkingSetIndex = MiLocateWsle (FaultingAddress, WorkingSetList, Pfn1->u1.WsIndex, FALSE); // // The page must be copied into a new page. // LOCK_PFN (OldIrql); if ((MmAvailablePages < MM_HIGH_LIMIT) && (MiEnsureAvailablePageOrWait (SessionSpace != NULL ? HYDRA_PROCESS : CurrentProcess, OldIrql))) { // // A wait operation was performed to obtain an available // page and the working set mutex and PFN lock have // been released and various things may have changed for // the worse. Rather than examine all the conditions again, // return and if things are still proper, the fault will // be taken again. // UNLOCK_PFN (OldIrql); return FALSE; } // // This must be a prototype PTE. Perform the copy on write. // ASSERT (Pfn1->u3.e1.PrototypePte == 1); // // A page is being copied and made private, the global state of // the shared page needs to be updated at this point on certain // hardware. This is done by ORing the dirty bit into the modify bit in // the PFN element. // // Note that a session page cannot be dirty (no POSIX-style forking is // supported for these drivers). // if (SessionSpace != NULL) { ASSERT ((TempPte.u.Hard.Valid == 1) && (TempPte.u.Hard.Write == 0)); ASSERT (!MI_IS_PTE_DIRTY (TempPte)); NewPageIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_SESSION(MmSessionSpace)); } else { MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1); CloneBlock = (PMMCLONE_BLOCK) Pfn1->PteAddress; // // Get a new page with the same color as this page. // NewPageIndex = MiRemoveAnyPage ( MI_PAGE_COLOR_PTE_PROCESS(PageFrameIndex, &CurrentProcess->NextPageColor)); } MiInitializeCopyOnWritePfn (NewPageIndex, PointerPte, WorkingSetIndex, WorkingSetList); UNLOCK_PFN (OldIrql); InterlockedIncrement (&KeGetCurrentPrcb ()->MmCopyOnWriteCount); CopyFrom = PAGE_ALIGN (FaultingAddress); MappingPte = MiReserveSystemPtes (1, SystemPteSpace); if (MappingPte != NULL) { MI_MAKE_VALID_KERNEL_PTE (TempPte2, NewPageIndex, MM_READWRITE, MappingPte); MI_SET_PTE_DIRTY (TempPte2); if (Pfn1->u3.e1.CacheAttribute == MiNonCached) { MI_DISABLE_CACHING (TempPte2); } else if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined) { MI_SET_PTE_WRITE_COMBINE (TempPte2); } MI_WRITE_VALID_PTE (MappingPte, TempPte2); CopyTo = MiGetVirtualAddressMappedByPte (MappingPte); } else { CopyTo = MiMapPageInHyperSpace (CurrentProcess, NewPageIndex, &OldIrql); } KeCopyPage (CopyTo, CopyFrom); if (MappingPte != NULL) { MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace); } else { MiUnmapPageInHyperSpace (CurrentProcess, CopyTo, OldIrql); } if (!FakeCopyOnWrite) { // // If the page was really a copy on write page, make it // accessed, dirty and writable. Also, clear the copy-on-write // bit in the PTE. // MI_SET_PTE_DIRTY (TempPte); TempPte.u.Hard.Write = 1; MI_SET_ACCESSED_IN_PTE (&TempPte, 1); TempPte.u.Hard.CopyOnWrite = 0; } // // Regardless of whether the page was really a copy on write, // the frame field of the PTE must be updated. // TempPte.u.Hard.PageFrameNumber = NewPageIndex; // // If the modify bit is set in the PFN database for the // page, the data cache must be flushed. This is due to the // fact that this process may have been cloned and the cache // still contains stale data destined for the page we are // going to remove. // ASSERT (TempPte.u.Hard.Valid == 1); MI_WRITE_VALID_PTE_NEW_PAGE (PointerPte, TempPte); // // Flush the TB entry for this page. // if (SessionSpace == NULL) { MI_FLUSH_SINGLE_TB (FaultingAddress, FALSE); // // Increment the number of private pages. // CurrentProcess->NumberOfPrivatePages += 1; } else { MI_FLUSH_SINGLE_TB (FaultingAddress, TRUE); ASSERT (Pfn1->u3.e1.PrototypePte == 1); } // // Decrement the share count for the page which was copied // as this PTE no longer refers to it. // LOCK_PFN (OldIrql); MiDecrementShareCount (Pfn1, PageFrameIndex); if (SessionSpace == NULL) { CloneDescriptor = MiLocateCloneAddress (CurrentProcess, (PVOID)CloneBlock); if (CloneDescriptor != NULL) { // // Decrement the reference count for the clone block, // note that this could release and reacquire the mutexes. // MiDecrementCloneBlockReference (CloneDescriptor, CloneBlock, CurrentProcess, NULL, OldIrql); } } UNLOCK_PFN (OldIrql); return TRUE; }