NTSTATUS MiCcPutPagesInTransition ( IN PMI_READ_INFO MiReadInfo ) /*++ Routine Description: This routine allocates physical memory for the specified read-list and puts all the pages in transition (so collided faults from other threads for these same pages remain coherent). I/O for any pages not already resident are issued here. The caller must wait for their completion. Arguments: MiReadInfo - Supplies a pointer to the read-list. Return Value: STATUS_SUCCESS - all the pages were already resident, reference counts have been applied and no I/O needs to be waited for. STATUS_ISSUE_PAGING_IO - the I/O has been issued and the caller must wait. Various other failure status values indicate the operation failed. Environment: Kernel mode. PASSIVE_LEVEL. --*/ { NTSTATUS status; PMMPTE LocalPrototypePte; PVOID StartingVa; PFN_NUMBER MdlPages; KIRQL OldIrql; MMPTE PteContents; PFN_NUMBER PageFrameIndex; PFN_NUMBER ResidentAvailableCharge; PPFN_NUMBER IoPage; PPFN_NUMBER ApiPage; PPFN_NUMBER Page; PPFN_NUMBER DestinationPage; ULONG PageColor; PMMPTE PointerPte; PMMPTE *ProtoPteArray; PMMPTE *EndProtoPteArray; PFN_NUMBER DummyPage; PMDL Mdl; PMDL FreeMdl; PMMPFN PfnProto; PMMPFN Pfn1; PMMPFN DummyPfn1; ULONG i; PFN_NUMBER DummyTrim; ULONG NumberOfPagesNeedingIo; MMPTE TempPte; PMMPTE PointerPde; PEPROCESS CurrentProcess; PMMINPAGE_SUPPORT InPageSupport; PKPRCB Prcb; ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); MiReadInfo->DummyPagePfn = NULL; FreeMdl = NULL; CurrentProcess = PsGetCurrentProcess(); PfnProto = NULL; PointerPde = NULL; InPageSupport = MiReadInfo->InPageSupport; Mdl = MI_EXTRACT_PREFETCH_MDL (InPageSupport); ASSERT (Mdl == MiReadInfo->IoMdl); IoPage = (PPFN_NUMBER)(Mdl + 1); ApiPage = (PPFN_NUMBER)(MiReadInfo->ApiMdl + 1); StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset); MdlPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa, Mdl->ByteCount); if (MdlPages + 1 > MAXUSHORT) { // // The PFN ReferenceCount for the dummy page could wrap, refuse the // request. // return STATUS_INSUFFICIENT_RESOURCES; } NumberOfPagesNeedingIo = 0; ProtoPteArray = (PMMPTE *)InPageSupport->BasePte; EndProtoPteArray = ProtoPteArray + MdlPages; ASSERT (*ProtoPteArray != NULL); LOCK_PFN (OldIrql); // // Ensure sufficient pages exist for the transfer plus the dummy page. // if (((SPFN_NUMBER)MdlPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) || (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MdlPages)) { UNLOCK_PFN (OldIrql); return STATUS_INSUFFICIENT_RESOURCES; } // // Charge resident available immediately as the PFN lock may get released // and reacquired below before all the pages have been locked down. // Note the dummy page is immediately charged separately. // MI_DECREMENT_RESIDENT_AVAILABLE (MdlPages, MM_RESAVAIL_ALLOCATE_BUILDMDL); ResidentAvailableCharge = MdlPages; // // Allocate a dummy page to map discarded pages that aren't skipped. // DummyPage = MiRemoveAnyPage (0); Pfn1 = MI_PFN_ELEMENT (DummyPage); ASSERT (Pfn1->u2.ShareCount == 0); ASSERT (Pfn1->u3.e2.ReferenceCount == 0); MiInitializePfnForOtherProcess (DummyPage, MI_PF_DUMMY_PAGE_PTE, 0); // // Give the page a containing frame so MiIdentifyPfn won't crash. // Pfn1->u4.PteFrame = PsInitialSystemProcess->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT; // // Always bias the reference count by 1 and charge for this locked page // up front so the myriad increments and decrements don't get slowed // down with needless checking. // Pfn1->u3.e1.PrototypePte = 0; MI_ADD_LOCKED_PAGE_CHARGE (Pfn1); Pfn1->u3.e1.ReadInProgress = 1; MiReadInfo->DummyPagePfn = Pfn1; DummyPfn1 = Pfn1; DummyPfn1->u3.e2.ReferenceCount = (USHORT)(DummyPfn1->u3.e2.ReferenceCount + MdlPages); // // Properly initialize the inpage support block fields we overloaded. // InPageSupport->BasePte = *ProtoPteArray; // // Build the proper InPageSupport and MDL to describe this run. // for (; ProtoPteArray < EndProtoPteArray; ProtoPteArray += 1, IoPage += 1, ApiPage += 1) { // // Fill the MDL entry for this RLE. // PointerPte = *ProtoPteArray; ASSERT (PointerPte != NULL); // // The PointerPte better be inside a prototype PTE allocation // so that subsequent page trims update the correct PTEs. // ASSERT (((PointerPte >= (PMMPTE)MmPagedPoolStart) && (PointerPte <= (PMMPTE)MmPagedPoolEnd)) || ((PointerPte >= (PMMPTE)MmSpecialPoolStart) && (PointerPte <= (PMMPTE)MmSpecialPoolEnd))); // // Check the state of this prototype PTE now that the PFN lock is held. // If the page is not resident, the PTE must be put in transition with // read in progress before the PFN lock is released. // // // Lock page containing prototype PTEs in memory by // incrementing the reference count for the page. // Unlock any page locked earlier containing prototype PTEs if // the containing page is not the same for both. // if (PfnProto != NULL) { if (PointerPde != MiGetPteAddress (PointerPte)) { ASSERT (PfnProto->u3.e2.ReferenceCount > 1); MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto); PfnProto = NULL; } } if (PfnProto == NULL) { ASSERT (!MI_IS_PHYSICAL_ADDRESS (PointerPte)); PointerPde = MiGetPteAddress (PointerPte); if (PointerPde->u.Hard.Valid == 0) { MiMakeSystemAddressValidPfn (PointerPte, OldIrql); } PfnProto = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber); MI_ADD_LOCKED_PAGE_CHARGE (PfnProto); ASSERT (PfnProto->u3.e2.ReferenceCount > 1); } recheck: PteContents = *PointerPte; // LWFIX: are zero or dzero ptes possible here ? ASSERT (PteContents.u.Long != 0); if (PteContents.u.Hard.Valid == 1) { PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); ASSERT (Pfn1->u3.e1.PrototypePte == 1); MI_ADD_LOCKED_PAGE_CHARGE (Pfn1); *ApiPage = PageFrameIndex; *IoPage = DummyPage; continue; } if ((PteContents.u.Soft.Prototype == 0) && (PteContents.u.Soft.Transition == 1)) { // // The page is in transition. If there is an inpage still in // progress, wait for it to complete. Reference the PFN and // then march on. // PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents); Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); ASSERT (Pfn1->u3.e1.PrototypePte == 1); if (Pfn1->u4.InPageError) { // // There was an in-page read error and there are other // threads colliding for this page, delay to let the // other threads complete and then retry. // UNLOCK_PFN (OldIrql); KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond); LOCK_PFN (OldIrql); goto recheck; } if (Pfn1->u3.e1.ReadInProgress) { // LWFIX - start with temp\aw.c } // // PTE refers to a normal transition PTE. // ASSERT ((SPFN_NUMBER)MmAvailablePages >= 0); if (MmAvailablePages == 0) { // // This can only happen if the system is utilizing a hardware // compression cache. This ensures that only a safe amount // of the compressed virtual cache is directly mapped so that // if the hardware gets into trouble, we can bail it out. // UNLOCK_PFN (OldIrql); KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond); LOCK_PFN (OldIrql); goto recheck; } // // The PFN reference count will be 1 already here if the // modified writer has begun a write of this page. Otherwise // it's ordinarily 0. // MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1); *IoPage = DummyPage; *ApiPage = PageFrameIndex; continue; } // LWFIX: need to handle protos that are now pagefile (or dzero) // backed - prefetching it from the file here would cause us to lose // the contents. Note this can happen for session-space images // as we back modified (ie: for relocation fixups or IAT // updated) portions from the pagefile. remove the assert below too. ASSERT (PteContents.u.Soft.Prototype == 1); if ((MmAvailablePages < MM_HIGH_LIMIT) && (MiEnsureAvailablePageOrWait (NULL, OldIrql))) { // // Had to wait so recheck all state. // goto recheck; } NumberOfPagesNeedingIo += 1; // // Allocate a physical page. // PageColor = MI_PAGE_COLOR_VA_PROCESS ( MiGetVirtualAddressMappedByPte (PointerPte), &CurrentProcess->NextPageColor); PageFrameIndex = MiRemoveAnyPage (PageColor); Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); ASSERT (Pfn1->u3.e2.ReferenceCount == 0); ASSERT (Pfn1->u2.ShareCount == 0); ASSERT (PointerPte->u.Hard.Valid == 0); // // Initialize read-in-progress PFN. // MiInitializePfn (PageFrameIndex, PointerPte, 0); // // These pieces of MiInitializePfn initialization are overridden // here as these pages are only going into prototype // transition and not into any page tables. // Pfn1->u3.e1.PrototypePte = 1; Pfn1->u2.ShareCount -= 1; ASSERT (Pfn1->u2.ShareCount == 0); Pfn1->u3.e1.PageLocation = ZeroedPageList; Pfn1->u3.e2.ReferenceCount -= 1; ASSERT (Pfn1->u3.e2.ReferenceCount == 0); MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1); // // Initialize the I/O specific fields. // Pfn1->u1.Event = &InPageSupport->Event; Pfn1->u3.e1.ReadInProgress = 1; ASSERT (Pfn1->u4.InPageError == 0); // // Increment the PFN reference count in the control area for // the subsection. // MiReadInfo->ControlArea->NumberOfPfnReferences += 1; // // Put the prototype PTE into the transition state. // MI_MAKE_TRANSITION_PTE (TempPte, PageFrameIndex, PointerPte->u.Soft.Protection, PointerPte); MI_WRITE_INVALID_PTE (PointerPte, TempPte); *IoPage = PageFrameIndex; *ApiPage = PageFrameIndex; } // // If all the pages were resident, dereference the dummy page references // now and notify our caller that I/O is not necessary. // if (NumberOfPagesNeedingIo == 0) { ASSERT (DummyPfn1->u3.e2.ReferenceCount > MdlPages); DummyPfn1->u3.e2.ReferenceCount = (USHORT)(DummyPfn1->u3.e2.ReferenceCount - MdlPages); // // Unlock page containing prototype PTEs. // if (PfnProto != NULL) { ASSERT (PfnProto->u3.e2.ReferenceCount > 1); MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto); } UNLOCK_PFN (OldIrql); // // Return the upfront resident available charge as the // individual charges have all been made at this point. // MI_INCREMENT_RESIDENT_AVAILABLE (ResidentAvailableCharge, MM_RESAVAIL_FREE_BUILDMDL_EXCESS); return STATUS_SUCCESS; } // // Carefully trim leading dummy pages. // Page = (PPFN_NUMBER)(Mdl + 1); DummyTrim = 0; for (i = 0; i < MdlPages - 1; i += 1) { if (*Page == DummyPage) { DummyTrim += 1; Page += 1; } else { break; } } if (DummyTrim != 0) { Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER))); Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE); ASSERT (Mdl->ByteCount != 0); InPageSupport->ReadOffset.QuadPart += (DummyTrim * PAGE_SIZE); DummyPfn1->u3.e2.ReferenceCount = (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim); // // Shuffle down the PFNs in the MDL. // Recalculate BasePte to adjust for the shuffle. // Pfn1 = MI_PFN_ELEMENT (*Page); ASSERT (Pfn1->PteAddress->u.Hard.Valid == 0); ASSERT ((Pfn1->PteAddress->u.Soft.Prototype == 0) && (Pfn1->PteAddress->u.Soft.Transition == 1)); InPageSupport->BasePte = Pfn1->PteAddress; DestinationPage = (PPFN_NUMBER)(Mdl + 1); do { *DestinationPage = *Page; DestinationPage += 1; Page += 1; i += 1; } while (i < MdlPages); MdlPages -= DummyTrim; } // // Carefully trim trailing dummy pages. // ASSERT (MdlPages != 0); Page = (PPFN_NUMBER)(Mdl + 1) + MdlPages - 1; if (*Page == DummyPage) { ASSERT (MdlPages >= 2); // // Trim the last page specially as it may be a partial page. // Mdl->Size -= sizeof(PFN_NUMBER); if (BYTE_OFFSET(Mdl->ByteCount) != 0) { Mdl->ByteCount &= ~(PAGE_SIZE - 1); } else { Mdl->ByteCount -= PAGE_SIZE; } ASSERT (Mdl->ByteCount != 0); DummyPfn1->u3.e2.ReferenceCount -= 1; // // Now trim any other trailing pages. // Page -= 1; DummyTrim = 0; while (Page != ((PPFN_NUMBER)(Mdl + 1))) { if (*Page != DummyPage) { break; } DummyTrim += 1; Page -= 1; } if (DummyTrim != 0) { ASSERT (Mdl->Size > (USHORT)(DummyTrim * sizeof(PFN_NUMBER))); Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER))); Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE); DummyPfn1->u3.e2.ReferenceCount = (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim); } ASSERT (MdlPages > DummyTrim + 1); MdlPages -= (DummyTrim + 1); #if DBG StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset); ASSERT (MdlPages == ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa, Mdl->ByteCount)); #endif } // // If the MDL is not already embedded in the inpage block, see if its // final size qualifies it - if so, embed it now. // if ((Mdl != &InPageSupport->Mdl) && (Mdl->ByteCount <= (MM_MAXIMUM_READ_CLUSTER_SIZE + 1) * PAGE_SIZE)){ #if DBG RtlFillMemoryUlong (&InPageSupport->Page[0], (MM_MAXIMUM_READ_CLUSTER_SIZE+1) * sizeof (PFN_NUMBER), 0xf1f1f1f1); #endif RtlCopyMemory (&InPageSupport->Mdl, Mdl, Mdl->Size); FreeMdl = Mdl; Mdl = &InPageSupport->Mdl; ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0); InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3); }
NTSTATUS MmAddPhysicalMemory ( IN PPHYSICAL_ADDRESS StartAddress, IN OUT PLARGE_INTEGER NumberOfBytes ) /*++ Routine Description: This routine adds the specified physical address range to the system. This includes initializing PFN database entries and adding it to the freelists. Arguments: StartAddress - Supplies the starting physical address. NumberOfBytes - Supplies a pointer to the number of bytes being added. If any bytes were added (ie: STATUS_SUCCESS is being returned), the actual amount is returned here. Return Value: NTSTATUS. Environment: Kernel mode. PASSIVE level. No locks held. --*/ { ULONG i; PMMPFN Pfn1; KIRQL OldIrql; LOGICAL Inserted; LOGICAL Updated; MMPTE TempPte; PMMPTE PointerPte; PMMPTE LastPte; PFN_NUMBER NumberOfPages; PFN_NUMBER start; PFN_NUMBER count; PFN_NUMBER StartPage; PFN_NUMBER EndPage; PFN_NUMBER PageFrameIndex; PFN_NUMBER Page; PFN_NUMBER LastPage; PFN_COUNT PagesNeeded; PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock; PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock; PPHYSICAL_MEMORY_RUN NewRun; LOGICAL PfnDatabaseIsPhysical; ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0); ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0); if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) { // // The system must be configured for dynamic memory addition. This is // critical as only then is the database guaranteed to be non-sparse. // if (MmDynamicPfn == FALSE) { return STATUS_NOT_SUPPORTED; } PfnDatabaseIsPhysical = TRUE; } else { PfnDatabaseIsPhysical = FALSE; } StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT); NumberOfPages = (PFN_NUMBER)(NumberOfBytes->QuadPart >> PAGE_SHIFT); EndPage = StartPage + NumberOfPages; if (EndPage - 1 > MmHighestPossiblePhysicalPage) { // // Truncate the request into something that can be mapped by the PFN // database. // EndPage = MmHighestPossiblePhysicalPage + 1; NumberOfPages = EndPage - StartPage; } // // The range cannot wrap. // if (StartPage >= EndPage) { return STATUS_INVALID_PARAMETER_1; } ExAcquireFastMutex (&MmDynamicMemoryMutex); i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + 1))); NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool, i, ' mM'); if (NewPhysicalMemoryBlock == NULL) { ExReleaseFastMutex (&MmDynamicMemoryMutex); return STATUS_INSUFFICIENT_RESOURCES; } // // The range cannot overlap any ranges that are already present. // start = 0; LOCK_PFN (OldIrql); do { count = MmPhysicalMemoryBlock->Run[start].PageCount; Page = MmPhysicalMemoryBlock->Run[start].BasePage; if (count != 0) { LastPage = Page + count; if ((StartPage < Page) && (EndPage > Page)) { UNLOCK_PFN (OldIrql); ExReleaseFastMutex (&MmDynamicMemoryMutex); ExFreePool (NewPhysicalMemoryBlock); return STATUS_CONFLICTING_ADDRESSES; } if ((StartPage >= Page) && (StartPage < LastPage)) { UNLOCK_PFN (OldIrql); ExReleaseFastMutex (&MmDynamicMemoryMutex); ExFreePool (NewPhysicalMemoryBlock); return STATUS_CONFLICTING_ADDRESSES; } } start += 1; } while (start != MmPhysicalMemoryBlock->NumberOfRuns); // // Fill any gaps in the (sparse) PFN database needed for these pages, // unless the PFN database was physically allocated and completely // committed up front. // PagesNeeded = 0; if (PfnDatabaseIsPhysical == FALSE) { PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage)); LastPte = MiGetPteAddress ((PCHAR)(MI_PFN_ELEMENT(EndPage)) - 1); while (PointerPte <= LastPte) { if (PointerPte->u.Hard.Valid == 0) { PagesNeeded += 1; } PointerPte += 1; } if (MmAvailablePages < PagesNeeded) { UNLOCK_PFN (OldIrql); ExReleaseFastMutex (&MmDynamicMemoryMutex); ExFreePool (NewPhysicalMemoryBlock); return STATUS_INSUFFICIENT_RESOURCES; } TempPte = ValidKernelPte; PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage)); while (PointerPte <= LastPte) { if (PointerPte->u.Hard.Valid == 0) { PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); MiInitializePfn (PageFrameIndex, PointerPte, 0); TempPte.u.Hard.PageFrameNumber = PageFrameIndex; *PointerPte = TempPte; } PointerPte += 1; } MmResidentAvailablePages -= PagesNeeded; } // // If the new range is adjacent to an existing range, just merge it into // the old block. Otherwise use the new block as a new entry will have to // be used. // NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + 1; NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages + NumberOfPages; NewRun = &NewPhysicalMemoryBlock->Run[0]; start = 0; Inserted = FALSE; Updated = FALSE; do { Page = MmPhysicalMemoryBlock->Run[start].BasePage; count = MmPhysicalMemoryBlock->Run[start].PageCount; if (Inserted == FALSE) { // // Note overlaps into adjacent ranges were already checked above. // if (StartPage == Page + count) { MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages; OldPhysicalMemoryBlock = NewPhysicalMemoryBlock; MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages; // // Coalesce below and above to avoid leaving zero length gaps // as these gaps would prevent callers from removing ranges // the span them. // if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) { start += 1; Page = MmPhysicalMemoryBlock->Run[start].BasePage; count = MmPhysicalMemoryBlock->Run[start].PageCount; if (StartPage + NumberOfPages == Page) { MmPhysicalMemoryBlock->Run[start - 1].PageCount += count; MmPhysicalMemoryBlock->NumberOfRuns -= 1; // // Copy any remaining entries. // if (start != MmPhysicalMemoryBlock->NumberOfRuns) { RtlMoveMemory (&MmPhysicalMemoryBlock->Run[start], &MmPhysicalMemoryBlock->Run[start + 1], (MmPhysicalMemoryBlock->NumberOfRuns - start) * sizeof (PHYSICAL_MEMORY_RUN)); } } } Updated = TRUE; break; } if (StartPage + NumberOfPages == Page) { MmPhysicalMemoryBlock->Run[start].BasePage = StartPage; MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages; OldPhysicalMemoryBlock = NewPhysicalMemoryBlock; MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages; Updated = TRUE; break; } if (StartPage + NumberOfPages <= Page) { if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) { if (StartPage + NumberOfPages <= MmPhysicalMemoryBlock->Run[start + 1].BasePage) { // // Don't insert here - the new entry really belongs // (at least) one entry further down. // continue; } } NewRun->BasePage = StartPage; NewRun->PageCount = NumberOfPages; NewRun += 1; Inserted = TRUE; Updated = TRUE; } } *NewRun = MmPhysicalMemoryBlock->Run[start]; NewRun += 1; start += 1; } while (start != MmPhysicalMemoryBlock->NumberOfRuns); // // If the memory block has not been updated, then the new entry must // be added at the very end. // if (Updated == FALSE) { ASSERT (Inserted == FALSE); NewRun->BasePage = StartPage; NewRun->PageCount = NumberOfPages; Inserted = TRUE; } // // Repoint the MmPhysicalMemoryBlock at the new chunk, free the old after // releasing the PFN lock. // if (Inserted == TRUE) { OldPhysicalMemoryBlock = MmPhysicalMemoryBlock; MmPhysicalMemoryBlock = NewPhysicalMemoryBlock; } // // Note that the page directory (page parent entries on Win64) must be // filled in at system boot so that already-created processes do not fault // when referencing the new PFNs. // // // Walk through the memory descriptors and add pages to the // free list in the PFN database. // PageFrameIndex = StartPage; Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); if (EndPage - 1 > MmHighestPhysicalPage) { MmHighestPhysicalPage = EndPage - 1; } while (PageFrameIndex < EndPage) { ASSERT (Pfn1->u2.ShareCount == 0); ASSERT (Pfn1->u3.e2.ShortFlags == 0); ASSERT (Pfn1->u3.e2.ReferenceCount == 0); ASSERT64 (Pfn1->UsedPageTableEntries == 0); ASSERT (Pfn1->OriginalPte.u.Long == ZeroKernelPte.u.Long); ASSERT (Pfn1->PteFrame == 0); ASSERT ((Pfn1->PteAddress == PFN_REMOVED) || (Pfn1->PteAddress == (PMMPTE)(UINT_PTR)0)); // // Set the PTE address to the physical page for // virtual address alignment checking. // Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT); MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex); PageFrameIndex += 1; Pfn1 += 1; } MmResidentAvailablePages += NumberOfPages; MmNumberOfPhysicalPages += (PFN_COUNT)NumberOfPages; UNLOCK_PFN (OldIrql); // // Increase all commit limits to reflect the additional memory. // ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql); MmTotalCommitLimit += NumberOfPages; MmTotalCommitLimitMaximum += NumberOfPages; MmTotalCommittedPages += PagesNeeded; ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql); ExReleaseFastMutex (&MmDynamicMemoryMutex); ExFreePool (OldPhysicalMemoryBlock); // // Indicate number of bytes actually added to our caller. // NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE; return STATUS_SUCCESS; }