Exemplo n.º 1
0
VOID
NTAPI
MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
                          IN ULONG PageCount)
{
    PMMPTE PointerPte, LastPte;
    MMPTE TempPte;

    /* If pool is physical, can't protect PTEs */
    if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;

    /* Get PTE pointers and loop */
    PointerPte = MiAddressToPte(VirtualAddress);
    LastPte = PointerPte + PageCount;
    do
    {
        /* Capture the PTE for safety */
        TempPte = *PointerPte;

        /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
        TempPte.u.Hard.Valid = 0;
        TempPte.u.Soft.Prototype = 1;
        MI_WRITE_INVALID_PTE(PointerPte, TempPte);
    } while (++PointerPte < LastPte);

    /* Flush the TLB */
    KeFlushEntireTb(TRUE, TRUE);
}
Exemplo n.º 2
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Exemplo n.º 3
0
NTSTATUS
MiCcPutPagesInTransition (
    IN PMI_READ_INFO MiReadInfo
    )

/*++

Routine Description:

    This routine allocates physical memory for the specified read-list and
    puts all the pages in transition (so collided faults from other threads
    for these same pages remain coherent).  I/O for any pages not already
    resident are issued here.  The caller must wait for their completion.

Arguments:

    MiReadInfo - Supplies a pointer to the read-list.

Return Value:

    STATUS_SUCCESS - all the pages were already resident, reference counts
                     have been applied and no I/O needs to be waited for.

    STATUS_ISSUE_PAGING_IO - the I/O has been issued and the caller must wait.

    Various other failure status values indicate the operation failed.

Environment:

    Kernel mode. PASSIVE_LEVEL.

--*/

{
    NTSTATUS status;
    PMMPTE LocalPrototypePte;
    PVOID StartingVa;
    PFN_NUMBER MdlPages;
    KIRQL OldIrql;
    MMPTE PteContents;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER ResidentAvailableCharge;
    PPFN_NUMBER IoPage;
    PPFN_NUMBER ApiPage;
    PPFN_NUMBER Page;
    PPFN_NUMBER DestinationPage;
    ULONG PageColor;
    PMMPTE PointerPte;
    PMMPTE *ProtoPteArray;
    PMMPTE *EndProtoPteArray;
    PFN_NUMBER DummyPage;
    PMDL Mdl;
    PMDL FreeMdl;
    PMMPFN PfnProto;
    PMMPFN Pfn1;
    PMMPFN DummyPfn1;
    ULONG i;
    PFN_NUMBER DummyTrim;
    ULONG NumberOfPagesNeedingIo;
    MMPTE TempPte;
    PMMPTE PointerPde;
    PEPROCESS CurrentProcess;
    PMMINPAGE_SUPPORT InPageSupport;
    PKPRCB Prcb;

    ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);

    MiReadInfo->DummyPagePfn = NULL;

    FreeMdl = NULL;
    CurrentProcess = PsGetCurrentProcess();

    PfnProto = NULL;
    PointerPde = NULL;

    InPageSupport = MiReadInfo->InPageSupport;
    
    Mdl = MI_EXTRACT_PREFETCH_MDL (InPageSupport);
    ASSERT (Mdl == MiReadInfo->IoMdl);

    IoPage = (PPFN_NUMBER)(Mdl + 1);
    ApiPage = (PPFN_NUMBER)(MiReadInfo->ApiMdl + 1);

    StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
    
    MdlPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (StartingVa,
                                               Mdl->ByteCount);

    if (MdlPages + 1 > MAXUSHORT) {

        //
        // The PFN ReferenceCount for the dummy page could wrap, refuse the
        // request.
        //

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    NumberOfPagesNeedingIo = 0;

    ProtoPteArray = (PMMPTE *)InPageSupport->BasePte;
    EndProtoPteArray = ProtoPteArray + MdlPages;

    ASSERT (*ProtoPteArray != NULL);

    LOCK_PFN (OldIrql);

    //
    // Ensure sufficient pages exist for the transfer plus the dummy page.
    //

    if (((SPFN_NUMBER)MdlPages > (SPFN_NUMBER)(MmAvailablePages - MM_HIGH_LIMIT)) ||
        (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MdlPages)) {

        UNLOCK_PFN (OldIrql);

        return STATUS_INSUFFICIENT_RESOURCES;
    }

    //
    // Charge resident available immediately as the PFN lock may get released
    // and reacquired below before all the pages have been locked down.
    // Note the dummy page is immediately charged separately.
    //

    MI_DECREMENT_RESIDENT_AVAILABLE (MdlPages, MM_RESAVAIL_ALLOCATE_BUILDMDL);

    ResidentAvailableCharge = MdlPages;

    //
    // Allocate a dummy page to map discarded pages that aren't skipped.
    //

    DummyPage = MiRemoveAnyPage (0);
    Pfn1 = MI_PFN_ELEMENT (DummyPage);

    ASSERT (Pfn1->u2.ShareCount == 0);
    ASSERT (Pfn1->u3.e2.ReferenceCount == 0);

    MiInitializePfnForOtherProcess (DummyPage, MI_PF_DUMMY_PAGE_PTE, 0);

    //
    // Give the page a containing frame so MiIdentifyPfn won't crash.
    //

    Pfn1->u4.PteFrame = PsInitialSystemProcess->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;

    //
    // Always bias the reference count by 1 and charge for this locked page
    // up front so the myriad increments and decrements don't get slowed
    // down with needless checking.
    //

    Pfn1->u3.e1.PrototypePte = 0;

    MI_ADD_LOCKED_PAGE_CHARGE (Pfn1);

    Pfn1->u3.e1.ReadInProgress = 1;

    MiReadInfo->DummyPagePfn = Pfn1;

    DummyPfn1 = Pfn1;

    DummyPfn1->u3.e2.ReferenceCount =
        (USHORT)(DummyPfn1->u3.e2.ReferenceCount + MdlPages);

    //
    // Properly initialize the inpage support block fields we overloaded.
    //

    InPageSupport->BasePte = *ProtoPteArray;

    //
    // Build the proper InPageSupport and MDL to describe this run.
    //

    for (; ProtoPteArray < EndProtoPteArray; ProtoPteArray += 1, IoPage += 1, ApiPage += 1) {
    
        //
        // Fill the MDL entry for this RLE.
        //
    
        PointerPte = *ProtoPteArray;

        ASSERT (PointerPte != NULL);

        //
        // The PointerPte better be inside a prototype PTE allocation
        // so that subsequent page trims update the correct PTEs.
        //

        ASSERT (((PointerPte >= (PMMPTE)MmPagedPoolStart) &&
                (PointerPte <= (PMMPTE)MmPagedPoolEnd)) ||
                ((PointerPte >= (PMMPTE)MmSpecialPoolStart) && (PointerPte <= (PMMPTE)MmSpecialPoolEnd)));

        //
        // Check the state of this prototype PTE now that the PFN lock is held.
        // If the page is not resident, the PTE must be put in transition with
        // read in progress before the PFN lock is released.
        //

        //
        // Lock page containing prototype PTEs in memory by
        // incrementing the reference count for the page.
        // Unlock any page locked earlier containing prototype PTEs if
        // the containing page is not the same for both.
        //

        if (PfnProto != NULL) {

            if (PointerPde != MiGetPteAddress (PointerPte)) {

                ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
                MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto);
                PfnProto = NULL;
            }
        }

        if (PfnProto == NULL) {

            ASSERT (!MI_IS_PHYSICAL_ADDRESS (PointerPte));
   
            PointerPde = MiGetPteAddress (PointerPte);
 
            if (PointerPde->u.Hard.Valid == 0) {
                MiMakeSystemAddressValidPfn (PointerPte, OldIrql);
            }

            PfnProto = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber);
            MI_ADD_LOCKED_PAGE_CHARGE (PfnProto);
            ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
        }

recheck:
        PteContents = *PointerPte;

        // LWFIX: are zero or dzero ptes possible here ?
        ASSERT (PteContents.u.Long != 0);

        if (PteContents.u.Hard.Valid == 1) {
            PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents);
            Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
            ASSERT (Pfn1->u3.e1.PrototypePte == 1);
            MI_ADD_LOCKED_PAGE_CHARGE (Pfn1);
            *ApiPage = PageFrameIndex;
            *IoPage = DummyPage;
            continue;
        }

        if ((PteContents.u.Soft.Prototype == 0) &&
            (PteContents.u.Soft.Transition == 1)) {

            //
            // The page is in transition.  If there is an inpage still in
            // progress, wait for it to complete.  Reference the PFN and
            // then march on.
            //

            PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents);
            Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
            ASSERT (Pfn1->u3.e1.PrototypePte == 1);

            if (Pfn1->u4.InPageError) {

                //
                // There was an in-page read error and there are other
                // threads colliding for this page, delay to let the
                // other threads complete and then retry.
                //

                UNLOCK_PFN (OldIrql);
                KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond);
                LOCK_PFN (OldIrql);
                goto recheck;
            }

            if (Pfn1->u3.e1.ReadInProgress) {
                    // LWFIX - start with temp\aw.c
            }

            //
            // PTE refers to a normal transition PTE.
            //

            ASSERT ((SPFN_NUMBER)MmAvailablePages >= 0);

            if (MmAvailablePages == 0) {

                //
                // This can only happen if the system is utilizing a hardware
                // compression cache.  This ensures that only a safe amount
                // of the compressed virtual cache is directly mapped so that
                // if the hardware gets into trouble, we can bail it out.
                //

                UNLOCK_PFN (OldIrql);
                KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmHalfSecond);
                LOCK_PFN (OldIrql);
                goto recheck;
            }

            //
            // The PFN reference count will be 1 already here if the
            // modified writer has begun a write of this page.  Otherwise
            // it's ordinarily 0.
            //

            MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1);

            *IoPage = DummyPage;
            *ApiPage = PageFrameIndex;
            continue;
        }

        // LWFIX: need to handle protos that are now pagefile (or dzero)
        // backed - prefetching it from the file here would cause us to lose
        // the contents.  Note this can happen for session-space images
        // as we back modified (ie: for relocation fixups or IAT
        // updated) portions from the pagefile.  remove the assert below too.
        ASSERT (PteContents.u.Soft.Prototype == 1);

        if ((MmAvailablePages < MM_HIGH_LIMIT) &&
            (MiEnsureAvailablePageOrWait (NULL, OldIrql))) {

            //
            // Had to wait so recheck all state.
            //

            goto recheck;
        }

        NumberOfPagesNeedingIo += 1;

        //
        // Allocate a physical page.
        //

        PageColor = MI_PAGE_COLOR_VA_PROCESS (
                        MiGetVirtualAddressMappedByPte (PointerPte),
                        &CurrentProcess->NextPageColor);

        PageFrameIndex = MiRemoveAnyPage (PageColor);

        Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

        ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
        ASSERT (Pfn1->u2.ShareCount == 0);
        ASSERT (PointerPte->u.Hard.Valid == 0);

        //
        // Initialize read-in-progress PFN.
        //
    
        MiInitializePfn (PageFrameIndex, PointerPte, 0);

        //
        // These pieces of MiInitializePfn initialization are overridden
        // here as these pages are only going into prototype
        // transition and not into any page tables.
        //

        Pfn1->u3.e1.PrototypePte = 1;
        Pfn1->u2.ShareCount -= 1;
        ASSERT (Pfn1->u2.ShareCount == 0);
        Pfn1->u3.e1.PageLocation = ZeroedPageList;
        Pfn1->u3.e2.ReferenceCount -= 1;
        ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
        MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1);

        //
        // Initialize the I/O specific fields.
        //
    
        Pfn1->u1.Event = &InPageSupport->Event;
        Pfn1->u3.e1.ReadInProgress = 1;
        ASSERT (Pfn1->u4.InPageError == 0);

        //
        // Increment the PFN reference count in the control area for
        // the subsection.
        //

        MiReadInfo->ControlArea->NumberOfPfnReferences += 1;
    
        //
        // Put the prototype PTE into the transition state.
        //

        MI_MAKE_TRANSITION_PTE (TempPte,
                                PageFrameIndex,
                                PointerPte->u.Soft.Protection,
                                PointerPte);

        MI_WRITE_INVALID_PTE (PointerPte, TempPte);

        *IoPage = PageFrameIndex;
        *ApiPage = PageFrameIndex;
    }
    
    //
    // If all the pages were resident, dereference the dummy page references
    // now and notify our caller that I/O is not necessary.
    //
    
    if (NumberOfPagesNeedingIo == 0) {
        ASSERT (DummyPfn1->u3.e2.ReferenceCount > MdlPages);
        DummyPfn1->u3.e2.ReferenceCount =
            (USHORT)(DummyPfn1->u3.e2.ReferenceCount - MdlPages);

        //
        // Unlock page containing prototype PTEs.
        //

        if (PfnProto != NULL) {
            ASSERT (PfnProto->u3.e2.ReferenceCount > 1);
            MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (PfnProto);
        }

        UNLOCK_PFN (OldIrql);

        //
        // Return the upfront resident available charge as the
        // individual charges have all been made at this point.
        //

        MI_INCREMENT_RESIDENT_AVAILABLE (ResidentAvailableCharge,
                                         MM_RESAVAIL_FREE_BUILDMDL_EXCESS);

        return STATUS_SUCCESS;
    }

    //
    // Carefully trim leading dummy pages.
    //

    Page = (PPFN_NUMBER)(Mdl + 1);

    DummyTrim = 0;
    for (i = 0; i < MdlPages - 1; i += 1) {
        if (*Page == DummyPage) {
            DummyTrim += 1;
            Page += 1;
        }
        else {
            break;
        }
    }

    if (DummyTrim != 0) {

        Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER)));
        Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE);
        ASSERT (Mdl->ByteCount != 0);
        InPageSupport->ReadOffset.QuadPart += (DummyTrim * PAGE_SIZE);
        DummyPfn1->u3.e2.ReferenceCount =
                (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim);

        //
        // Shuffle down the PFNs in the MDL.
        // Recalculate BasePte to adjust for the shuffle.
        //

        Pfn1 = MI_PFN_ELEMENT (*Page);

        ASSERT (Pfn1->PteAddress->u.Hard.Valid == 0);
        ASSERT ((Pfn1->PteAddress->u.Soft.Prototype == 0) &&
                 (Pfn1->PteAddress->u.Soft.Transition == 1));

        InPageSupport->BasePte = Pfn1->PteAddress;

        DestinationPage = (PPFN_NUMBER)(Mdl + 1);

        do {
            *DestinationPage = *Page;
            DestinationPage += 1;
            Page += 1;
            i += 1;
        } while (i < MdlPages);

        MdlPages -= DummyTrim;
    }

    //
    // Carefully trim trailing dummy pages.
    //

    ASSERT (MdlPages != 0);

    Page = (PPFN_NUMBER)(Mdl + 1) + MdlPages - 1;

    if (*Page == DummyPage) {

        ASSERT (MdlPages >= 2);

        //
        // Trim the last page specially as it may be a partial page.
        //

        Mdl->Size -= sizeof(PFN_NUMBER);
        if (BYTE_OFFSET(Mdl->ByteCount) != 0) {
            Mdl->ByteCount &= ~(PAGE_SIZE - 1);
        }
        else {
            Mdl->ByteCount -= PAGE_SIZE;
        }
        ASSERT (Mdl->ByteCount != 0);
        DummyPfn1->u3.e2.ReferenceCount -= 1;

        //
        // Now trim any other trailing pages.
        //

        Page -= 1;
        DummyTrim = 0;
        while (Page != ((PPFN_NUMBER)(Mdl + 1))) {
            if (*Page != DummyPage) {
                break;
            }
            DummyTrim += 1;
            Page -= 1;
        }
        if (DummyTrim != 0) {
            ASSERT (Mdl->Size > (USHORT)(DummyTrim * sizeof(PFN_NUMBER)));
            Mdl->Size = (USHORT)(Mdl->Size - (DummyTrim * sizeof(PFN_NUMBER)));
            Mdl->ByteCount -= (ULONG)(DummyTrim * PAGE_SIZE);
            DummyPfn1->u3.e2.ReferenceCount =
                (USHORT)(DummyPfn1->u3.e2.ReferenceCount - DummyTrim);
        }

        ASSERT (MdlPages > DummyTrim + 1);
        MdlPages -= (DummyTrim + 1);

#if DBG
        StartingVa = (PVOID)((PCHAR)Mdl->StartVa + Mdl->ByteOffset);
    
        ASSERT (MdlPages == ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa,
                                                               Mdl->ByteCount));
#endif
    }

    //
    // If the MDL is not already embedded in the inpage block, see if its
    // final size qualifies it - if so, embed it now.
    //

    if ((Mdl != &InPageSupport->Mdl) &&
        (Mdl->ByteCount <= (MM_MAXIMUM_READ_CLUSTER_SIZE + 1) * PAGE_SIZE)){

#if DBG
        RtlFillMemoryUlong (&InPageSupport->Page[0],
                            (MM_MAXIMUM_READ_CLUSTER_SIZE+1) * sizeof (PFN_NUMBER),
                            0xf1f1f1f1);
#endif

        RtlCopyMemory (&InPageSupport->Mdl, Mdl, Mdl->Size);

        FreeMdl = Mdl;

        Mdl = &InPageSupport->Mdl;

        ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0);
        InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3);
    }
Exemplo n.º 4
0
VOID
FASTCALL
MiDecrementShareCount (
    IN PMMPFN Pfn1,
    IN PFN_NUMBER PageFrameIndex
    )

/*++

Routine Description:

    This routine decrements the share count within the PFN element
    for the specified physical page.  If the share count becomes
    zero the corresponding PTE is converted to the transition state
    and the reference count is decremented and the ValidPte count
    of the PTEframe is decremented.

Arguments:

    Pfn1 - Supplies the PFN database entry to decrement.

    PageFrameIndex - Supplies the physical page number of which to decrement
                     the share count.

Return Value:

    None.

Environment:

    Must be holding the PFN database lock with APCs disabled.

--*/

{
    ULONG FreeBit;
    MMPTE TempPte;
    PMMPTE PointerPte;
    PEPROCESS Process;

    ASSERT (PageFrameIndex > 0);
    ASSERT (MI_IS_PFN (PageFrameIndex));
    ASSERT (Pfn1 == MI_PFN_ELEMENT (PageFrameIndex));

    if (Pfn1->u3.e1.PageLocation != ActiveAndValid &&
        Pfn1->u3.e1.PageLocation != StandbyPageList) {
            KeBugCheckEx (PFN_LIST_CORRUPT,
                      0x99,
                      PageFrameIndex,
                      Pfn1->u3.e1.PageLocation,
                      0);
    }

    Pfn1->u2.ShareCount -= 1;

    ASSERT (Pfn1->u2.ShareCount < 0xF000000);

    if (Pfn1->u2.ShareCount == 0) {

        if (PERFINFO_IS_GROUP_ON(PERF_MEMORY)) {
            PERFINFO_PFN_INFORMATION PerfInfoPfn;

            PerfInfoPfn.PageFrameIndex = PageFrameIndex;
            PerfInfoLogBytes(PERFINFO_LOG_TYPE_ZEROSHARECOUNT, 
                             &PerfInfoPfn, 
                             sizeof(PerfInfoPfn));
        }

        //
        // The share count is now zero, decrement the reference count
        // for the PFN element and turn the referenced PTE into
        // the transition state if it refers to a prototype PTE.
        // PTEs which are not prototype PTEs do not need to be placed
        // into transition as they are placed in transition when
        // they are removed from the working set (working set free routine).
        //

        //
        // If the PTE referenced by this PFN element is actually
        // a prototype PTE, it must be mapped into hyperspace and
        // then operated on.
        //

        if (Pfn1->u3.e1.PrototypePte == 1) {

            if (MiIsProtoAddressValid (Pfn1->PteAddress)) {
                Process = NULL;
                PointerPte = Pfn1->PteAddress;
            }
            else {

                //
                // The address is not valid in this process, map it into
                // hyperspace so it can be operated upon.
                //

                Process = PsGetCurrentProcess ();
                PointerPte = (PMMPTE) MiMapPageInHyperSpaceAtDpc(Process, Pfn1->u4.PteFrame);
                PointerPte = (PMMPTE)((PCHAR)PointerPte +
                                        MiGetByteOffset(Pfn1->PteAddress));
            }

            TempPte = *PointerPte;

            MI_MAKE_VALID_PTE_TRANSITION (TempPte,
                                          Pfn1->OriginalPte.u.Soft.Protection);
            MI_WRITE_INVALID_PTE (PointerPte, TempPte);

            if (Process != NULL) {
                MiUnmapPageInHyperSpaceFromDpc (Process, PointerPte);
            }

            //
            // There is no need to flush the translation buffer at this
            // time as we only invalidated a prototype PTE.
            //
        }

        //
        // Change the page location to inactive (from active and valid).
        //

        Pfn1->u3.e1.PageLocation = TransitionPage;

        //
        // Decrement the reference count as the share count is now zero.
        //

        MM_PFN_LOCK_ASSERT();

        ASSERT (Pfn1->u3.e2.ReferenceCount != 0);

        if (Pfn1->u3.e2.ReferenceCount == 1) {

            if (MI_IS_PFN_DELETED (Pfn1)) {

                Pfn1->u3.e2.ReferenceCount = 0;

                //
                // There is no referenced PTE for this page, delete the page
                // file space (if any), and place the page on the free list.
                //

                ASSERT (Pfn1->OriginalPte.u.Soft.Prototype == 0);

                FreeBit = GET_PAGING_FILE_OFFSET (Pfn1->OriginalPte);

                if ((FreeBit != 0) && (FreeBit != MI_PTE_LOOKUP_NEEDED)) {
                    MiReleaseConfirmedPageFileSpace (Pfn1->OriginalPte);
                }

                //
                // Temporarily mark the frame as active and valid so that
                // MiIdentifyPfn knows it is safe to walk back through the
                // containing frames for a more accurate identification.
                // Note the page will be immediately re-marked as it is
                // inserted into the freelist.
                //

                Pfn1->u3.e1.PageLocation = ActiveAndValid;

                MiInsertPageInFreeList (PageFrameIndex);
            }
            else {
                MiDecrementReferenceCount (Pfn1, PageFrameIndex);
            }
        }
        else {
            InterlockedDecrementPfn ((PSHORT)&Pfn1->u3.e2.ReferenceCount);
        }
    }

    return;
}
Exemplo n.º 5
0
VOID
MiProcessValidPteList (
    IN PMMPTE *ValidPteList,
    IN ULONG Count
)

/*++

Routine Description:

    This routine flushes the specified range of valid PTEs.

Arguments:

    ValidPteList - Supplies a pointer to an array of PTEs to flush.

    Count - Supplies the count of the number of elements in the array.

Return Value:

    none.

Environment:

    Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
    held.

--*/

{
    ULONG i;
    MMPTE_FLUSH_LIST PteFlushList;
    MMPTE PteContents;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER PageTableFrameIndex;
    KIRQL OldIrql;

    i = 0;
    PteFlushList.Count = Count;

    if (Count < MM_MAXIMUM_FLUSH_COUNT) {

        do {
            PteFlushList.FlushVa[i] =
                MiGetVirtualAddressMappedByPte (ValidPteList[i]);
            i += 1;
        } while (i != Count);
        i = 0;
    }

    LOCK_PFN (OldIrql);

    do {
        PteContents = *ValidPteList[i];
        ASSERT (PteContents.u.Hard.Valid == 1);
        PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(&PteContents);
        Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

        //
        // Decrement the share and valid counts of the page table
        // page which maps this PTE.
        //

        PageTableFrameIndex = Pfn1->u4.PteFrame;
        Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);

        MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);

        MI_SET_PFN_DELETED (Pfn1);

        //
        // Decrement the share count for the physical page.  As the page
        // is private it will be put on the free list.
        //

        MiDecrementShareCount (Pfn1, PageFrameIndex);

        MI_WRITE_INVALID_PTE (ValidPteList[i], MmDecommittedPte);

        i += 1;

    } while (i != Count);

    MiFlushPteList (&PteFlushList);

    UNLOCK_PFN (OldIrql);

    return;
}
Exemplo n.º 6
0
ULONG
MiDecommitPages (
    IN PVOID StartingAddress,
    IN PMMPTE EndingPte,
    IN PEPROCESS Process,
    IN PMMVAD_SHORT Vad
)

/*++

Routine Description:

    This routine decommits the specified range of pages.

Arguments:

    StartingAddress - Supplies the starting address of the range.

    EndingPte - Supplies the ending PTE of the range.

    Process - Supplies the current process.

    Vad - Supplies the virtual address descriptor which describes the range.

Return Value:

    Value to reduce commitment by for the VAD.

Environment:

    Kernel mode, APCs disabled, AddressCreation mutex held.

--*/

{
    PMMPTE PointerPde;
    PMMPTE PointerPte;
    PVOID Va;
    ULONG CommitReduction;
    PMMPTE CommitLimitPte;
    KIRQL OldIrql;
    PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
    ULONG count;
    WSLE_NUMBER WorkingSetIndex;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    WSLE_NUMBER Entry;
    MMWSLENTRY Locked;
    MMPTE PteContents;
    PFN_NUMBER PageTableFrameIndex;
    PVOID UsedPageTableHandle;
    PETHREAD CurrentThread;

    count = 0;
    CommitReduction = 0;

    if (Vad->u.VadFlags.MemCommit) {
        CommitLimitPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
    }
    else {
        CommitLimitPte = NULL;
    }

    //
    // Decommit each page by setting the PTE to be explicitly
    // decommitted.  The PTEs cannot be deleted all at once as
    // this would set the PTEs to zero which would auto-evaluate
    // as committed if referenced by another thread when a page
    // table page is being in-paged.
    //

    PointerPde = MiGetPdeAddress (StartingAddress);
    PointerPte = MiGetPteAddress (StartingAddress);
    Va = StartingAddress;

    //
    // Loop through all the PDEs which map this region and ensure that
    // they exist.  If they don't exist create them by touching a
    // PTE mapped by the PDE.
    //

    CurrentThread = PsGetCurrentThread ();

    LOCK_WS_UNSAFE (CurrentThread, Process);

    MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);

    while (PointerPte <= EndingPte) {

        if (MiIsPteOnPdeBoundary (PointerPte)) {

            PointerPde = MiGetPdeAddress (Va);
            if (count != 0) {
                MiProcessValidPteList (&ValidPteList[0], count);
                count = 0;
            }

            MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
        }

        //
        // The working set lock is held.  No PTEs can go from
        // invalid to valid or valid to invalid.  Transition
        // PTEs can go from transition to pagefile.
        //

        PteContents = *PointerPte;

        if (PteContents.u.Long != 0) {

            if (PointerPte->u.Long == MmDecommittedPte.u.Long) {

                //
                // This PTE is already decommitted.
                //

                CommitReduction += 1;
            }
            else {

                Process->NumberOfPrivatePages -= 1;

                if (PteContents.u.Hard.Valid == 1) {

                    //
                    // Make sure this is not a forked PTE.
                    //

                    Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);

                    if (Pfn1->u3.e1.PrototypePte) {

                        //
                        // MiDeletePte may release both the working set pushlock
                        // and the PFN lock so the valid PTE list must be
                        // processed now.
                        //

                        if (count != 0) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }

                        LOCK_PFN (OldIrql);

                        MiDeletePte (PointerPte,
                                     Va,
                                     FALSE,
                                     Process,
                                     NULL,
                                     NULL,
                                     OldIrql);

                        UNLOCK_PFN (OldIrql);

                        Process->NumberOfPrivatePages += 1;
                        MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    }
                    else {

                        //
                        // PTE is valid, process later when PFN lock is held.
                        //

                        if (count == MM_VALID_PTE_SIZE) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }
                        ValidPteList[count] = PointerPte;
                        count += 1;

                        //
                        // Remove address from working set list.
                        //

                        WorkingSetIndex = Pfn1->u1.WsIndex;

                        ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
                                Va);
                        //
                        // Check to see if this entry is locked in the
                        // working set or locked in memory.
                        //

                        Locked = MmWsle[WorkingSetIndex].u1.e1;

                        MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);

                        //
                        // Add this entry to the list of free working set
                        // entries and adjust the working set count.
                        //

                        MiReleaseWsle (WorkingSetIndex, &Process->Vm);

                        if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {

                            //
                            // This entry is locked.
                            //

                            MmWorkingSetList->FirstDynamic -= 1;

                            if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
                                Entry = MmWorkingSetList->FirstDynamic;
                                ASSERT (MmWsle[Entry].u1.e1.Valid);

                                MiSwapWslEntries (Entry,
                                                  WorkingSetIndex,
                                                  &Process->Vm,
                                                  FALSE);
                            }
                        }
                        MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
                    }
                }
                else if (PteContents.u.Soft.Prototype) {

                    //
                    // This is a forked PTE, just delete it.
                    //
                    // MiDeletePte may release both the working set pushlock
                    // and the PFN lock so the valid PTE list must be
                    // processed now.
                    //

                    if (count != 0) {
                        MiProcessValidPteList (&ValidPteList[0], count);
                        count = 0;
                    }

                    LOCK_PFN (OldIrql);

                    MiDeletePte (PointerPte,
                                 Va,
                                 FALSE,
                                 Process,
                                 NULL,
                                 NULL,
                                 OldIrql);

                    UNLOCK_PFN (OldIrql);

                    Process->NumberOfPrivatePages += 1;
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
                else if (PteContents.u.Soft.Transition == 1) {

                    //
                    // Transition PTE, get the PFN database lock
                    // and reprocess this one.
                    //

                    LOCK_PFN (OldIrql);
                    PteContents = *PointerPte;

                    if (PteContents.u.Soft.Transition == 1) {

                        //
                        // PTE is still in transition, delete it.
                        //

                        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);

                        MI_SET_PFN_DELETED (Pfn1);

                        PageTableFrameIndex = Pfn1->u4.PteFrame;
                        Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);

                        MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);

                        //
                        // Check the reference count for the page, if the
                        // reference count is zero, move the page to the
                        // free list, if the reference count is not zero,
                        // ignore this page.  When the reference count
                        // goes to zero, it will be placed on the free list.
                        //

                        if (Pfn1->u3.e2.ReferenceCount == 0) {
                            MiUnlinkPageFromList (Pfn1);
                            MiReleasePageFileSpace (Pfn1->OriginalPte);
                            MiInsertPageInFreeList (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents));
                        }

                    }
                    else {

                        //
                        // Page MUST be in page file format!
                        //

                        ASSERT (PteContents.u.Soft.Valid == 0);
                        ASSERT (PteContents.u.Soft.Prototype == 0);
                        ASSERT (PteContents.u.Soft.PageFileHigh != 0);
                        MiReleasePageFileSpace (PteContents);
                    }
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    UNLOCK_PFN (OldIrql);
                }
                else {

                    //
                    // Must be demand zero or paging file format.
                    //

                    if (PteContents.u.Soft.PageFileHigh != 0) {
                        LOCK_PFN (OldIrql);
                        MiReleasePageFileSpace (PteContents);
                        UNLOCK_PFN (OldIrql);
                    }
                    else {

                        //
                        // Don't subtract out the private page count for
                        // a demand zero page.
                        //

                        Process->NumberOfPrivatePages += 1;
                    }

                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
            }
        }
        else {

            //
            // The PTE is already zero.
            //

            //
            // Increment the count of non-zero page table entries for this
            // page table and the number of private pages for the process.
            //

            UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);

            MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);

            if (PointerPte > CommitLimitPte) {

                //
                // PTE is not committed.
                //

                CommitReduction += 1;
            }
            MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
        }

        PointerPte += 1;
        Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
    }
    if (count != 0) {
        MiProcessValidPteList (&ValidPteList[0], count);
    }

    UNLOCK_WS_UNSAFE (CurrentThread, Process);

    return CommitReduction;
}