Example #1
0
static
VOID
MmDeletePageTablePfn(PFN_NUMBER PageFrameNumber, ULONG Level)
{
    PMMPTE PageTable;
    KIRQL OldIrql;
    PMMPFN PfnEntry;
    ULONG i, NumberEntries;

    /* Check if this is a page table */
    if (Level > 0)
    {
        NumberEntries = (Level == 4) ? MiAddressToPxi(MmHighestUserAddress)+1 : 512;

        /* Map the page table in hyperspace */
        PageTable = (PMMPTE)MmCreateHyperspaceMapping(PageFrameNumber);

        /* Loop all page table entries */
        for (i = 0; i < NumberEntries; i++)
        {
            /* Check if the entry is valid */
            if (PageTable[i].u.Hard.Valid)
            {
                /* Recursively free the page that backs it */
                MmDeletePageTablePfn(PageTable[i].u.Hard.PageFrameNumber, Level - 1);
            }
        }

        /* Delete the hyperspace mapping */
        MmDeleteHyperspaceMapping(PageTable);
    }

    /* Check if this is a legacy allocation */
    PfnEntry = MiGetPfnEntry(PageFrameNumber);
    if (MI_IS_ROS_PFN(PfnEntry))
    {
        /* Free it using the legacy API */
        MmReleasePageMemoryConsumer(MC_SYSTEM, PageFrameNumber);
    }
    else
    {
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Free it using the ARM3 API */
        MI_SET_PFN_DELETED(PfnEntry);
        MiDecrementShareCount(PfnEntry, PageFrameNumber);

        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
}
Example #2
0
VOID
NTAPI
MiFreeContiguousMemory(IN PVOID BaseAddress)
{
    KIRQL OldIrql;
    PFN_NUMBER PageFrameIndex, LastPage, PageCount;
    PMMPFN Pfn1, StartPfn;
    PMMPTE PointerPte;
    PAGED_CODE();

    //
    // First, check if the memory came from initial nonpaged pool, or expansion
    //
    if (((BaseAddress >= MmNonPagedPoolStart) &&
         (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
                                MmSizeOfNonPagedPoolInBytes))) ||
        ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
         (BaseAddress < MmNonPagedPoolEnd)))
    {
        //
        // It did, so just use the pool to free this
        //
        ExFreePoolWithTag(BaseAddress, 'mCmM');
        return;
    }

    /* Get the PTE and frame number for the allocation*/
    PointerPte = MiAddressToPte(BaseAddress);
    PageFrameIndex = PFN_FROM_PTE(PointerPte);

    //
    // Now get the PFN entry for this, and make sure it's the correct one
    //
    Pfn1 = MiGetPfnEntry(PageFrameIndex);
    if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
    {
        //
        // This probably means you did a free on an address that was in between
        //
        KeBugCheckEx(BAD_POOL_CALLER,
                     0x60,
                     (ULONG_PTR)BaseAddress,
                     0,
                     0);
    }

    //
    // Now this PFN isn't the start of any allocation anymore, it's going out
    //
    StartPfn = Pfn1;
    Pfn1->u3.e1.StartOfAllocation = 0;

    /* Loop the PFNs until we find the one that marks the end of the allocation */
    do
    {
        /* Make sure these are the pages we setup in the allocation routine */
        ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
        ASSERT(Pfn1->u2.ShareCount == 1);
        ASSERT(Pfn1->PteAddress == PointerPte);
        ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
        ASSERT(Pfn1->u4.VerifierAllocation == 0);
        ASSERT(Pfn1->u3.e1.PrototypePte == 0);

        /* Set the special pending delete marker */
        MI_SET_PFN_DELETED(Pfn1);

        /* Keep going for assertions */
        PointerPte++;
    } while (Pfn1++->u3.e1.EndOfAllocation == 0);

    //
    // Found it, unmark it
    //
    Pfn1--;
    Pfn1->u3.e1.EndOfAllocation = 0;

    //
    // Now compute how many pages this represents
    //
    PageCount = (ULONG)(Pfn1 - StartPfn + 1);

    //
    // So we can know how much to unmap (recall we piggyback on I/O mappings)
    //
    MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);

    //
    // Lock the PFN database
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    //
    // Loop all the pages
    //
    LastPage = PageFrameIndex + PageCount;
    Pfn1 = MiGetPfnEntry(PageFrameIndex);
    do
    {
        /* Decrement the share count and move on */
        MiDecrementShareCount(Pfn1++, PageFrameIndex++);
    } while (PageFrameIndex < LastPage);

    //
    // Release the PFN lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
Example #3
0
VOID
MiDeletePte (
    IN PMMPTE PointerPte,
    IN PVOID VirtualAddress,
    IN ULONG AddressSpaceDeletion,
    IN PEPROCESS CurrentProcess,
    IN PMMPTE PrototypePte,
    IN PMMPTE_FLUSH_LIST PteFlushList OPTIONAL
    )

/*++

Routine Description:

    This routine deletes the contents of the specified PTE.  The PTE
    can be in one of the following states:

        - active and valid
        - transition
        - in paging file
        - in prototype PTE format

Arguments:

    PointerPte - Supplies a pointer to the PTE to delete.

    VirtualAddress - Supplies the virtual address which corresponds to
                     the PTE.  This is used to locate the working set entry
                     to eliminate it.

    AddressSpaceDeletion - Supplies TRUE if the address space is being
                           deleted, FALSE otherwise.  If TRUE is specified
                           the TB is not flushed and valid addresses are
                           not removed from the working set.


    CurrentProcess - Supplies a pointer to the current process.

    PrototypePte - Supplies a pointer to the prototype PTE which currently
                   or originally mapped this page.  This is used to determine
                   if pte is a fork PTE and should have it's reference block
                   decremented.

Return Value:

    None.

Environment:

    Kernel mode, APCs disabled, PFN lock and working set mutex held.

--*/

{
    PMMPTE PointerPde;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    MMPTE PteContents;
    ULONG WorkingSetIndex;
    ULONG Entry;
    PVOID SwapVa;
    MMWSLENTRY Locked;
    ULONG WsPfnIndex;
    PMMCLONE_BLOCK CloneBlock;
    PMMCLONE_DESCRIPTOR CloneDescriptor;

    MM_PFN_LOCK_ASSERT();

#if DBG
    if (MmDebug & MM_DBG_PTE_UPDATE) {
        DbgPrint("deleting PTE\n");
        MiFormatPte(PointerPte);
    }
#endif //DBG

    PteContents = *PointerPte;

    if (PteContents.u.Hard.Valid == 1) {

#ifdef R4000
        ASSERT (PteContents.u.Hard.Global == 0);
#endif
#ifdef _X86_
#if DBG
#if !defined(NT_UP)

        if (PteContents.u.Hard.Writable == 1) {
            ASSERT (PteContents.u.Hard.Dirty == 1);
        }
        ASSERT (PteContents.u.Hard.Accessed == 1);
#endif //NTUP
#endif //DBG
#endif //X86

        //
        // Pte is valid.  Check PFN database to see if this is a prototype PTE.
        //

        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);
        WsPfnIndex = Pfn1->u1.WsIndex;

#if DBG
        if (MmDebug & MM_DBG_PTE_UPDATE) {
            MiFormatPfn(Pfn1);
        }
#endif //DBG

        CloneDescriptor = NULL;

        if (Pfn1->u3.e1.PrototypePte == 1) {

            CloneBlock = (PMMCLONE_BLOCK)Pfn1->PteAddress;

            //
            // Capture the state of the modified bit for this
            // pte.
            //

            MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);

            //
            // Decrement the share and valid counts of the page table
            // page which maps this PTE.
            //

            PointerPde = MiGetPteAddress (PointerPte);
            MiDecrementShareAndValidCount (PointerPde->u.Hard.PageFrameNumber);

            //
            // Decrement the share count for the physical page.
            //

            MiDecrementShareCount (PteContents.u.Hard.PageFrameNumber);

            //
            // Check to see if this is a fork prototype PTE and if so
            // update the clone descriptor address.
            //

            if (PointerPte <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {

                if (PrototypePte != Pfn1->PteAddress) {

                    //
                    // Locate the clone descriptor within the clone tree.
                    //

                    CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);

#if DBG
                    if (CloneDescriptor == NULL) {
                        DbgPrint("1PrototypePte %lx Clone desc %lx pfn pte addr %lx\n",
                        PrototypePte, CloneDescriptor, Pfn1->PteAddress);
                        MiFormatPte(PointerPte);
                        ASSERT (FALSE);
                    }
#endif // DBG

                }
            }
        } else {

            //
            // This pte is a NOT a prototype PTE, delete the physical page.
            //

            //
            // Decrement the share and valid counts of the page table
            // page which maps this PTE.
            //

            MiDecrementShareAndValidCount (Pfn1->PteFrame);

            MI_SET_PFN_DELETED (Pfn1);

            //
            // Decrement the share count for the physical page.  As the page
            // is private it will be put on the free list.
            //

            MiDecrementShareCountOnly (PteContents.u.Hard.PageFrameNumber);

            //
            // Decrement the count for the number of private pages.
            //

            CurrentProcess->NumberOfPrivatePages -= 1;
        }

        //
        // Find the WSLE for this page and eliminate it.
        //

        //
        // If we are deleting the system portion of the address space, do
        // not remove WSLEs or flush translation buffers as there can be
        // no other usage of this address space.
        //

        if (AddressSpaceDeletion == FALSE) {
            WorkingSetIndex = MiLocateWsle (VirtualAddress,
                                            MmWorkingSetList,
                                            WsPfnIndex );

            ASSERT (WorkingSetIndex != WSLE_NULL_INDEX);

            //
            // Check to see if this entry is locked in the working set
            // or locked in memory.
            //

            Locked = MmWsle[WorkingSetIndex].u1.e1;

            MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);

            //
            // Add this entry to the list of free working set entries
            // and adjust the working set count.
            //

            MiReleaseWsle (WorkingSetIndex, &CurrentProcess->Vm);

            if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {

                //
                // This entry is locked.
                //

                ASSERT (WorkingSetIndex < MmWorkingSetList->FirstDynamic);
                MmWorkingSetList->FirstDynamic -= 1;

                if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {

                    Entry = MmWorkingSetList->FirstDynamic;
                    ASSERT (MmWsle[Entry].u1.e1.Valid);
                    SwapVa = MmWsle[Entry].u1.VirtualAddress;
                    SwapVa = PAGE_ALIGN (SwapVa);
                    Pfn2 = MI_PFN_ELEMENT (
                              MiGetPteAddress (SwapVa)->u.Hard.PageFrameNumber);
#if 0
                    Entry = MiLocateWsleAndParent (SwapVa,
                                                   &Parent,
                                                   MmWorkingSetList,
                                                   Pfn2->u1.WsIndex);

                    //
                    // Swap the removed entry with the last locked entry
                    // which is located at first dynamic.
                    //

                    MiSwapWslEntries (Entry,
                                      Parent,
                                      WorkingSetIndex,
                                      MmWorkingSetList);
#endif //0

                    MiSwapWslEntries (Entry,
                                      WorkingSetIndex,
                                      &CurrentProcess->Vm);
                }
            } else {
                ASSERT (WorkingSetIndex >= MmWorkingSetList->FirstDynamic);
            }

            //
            // Flush the entry out of the TB.
            //

            if (!ARGUMENT_PRESENT (PteFlushList)) {
                KeFlushSingleTb (VirtualAddress,
                                 TRUE,
                                 FALSE,
                                 (PHARDWARE_PTE)PointerPte,
                                 ZeroPte.u.Flush);
            } else {
                if (PteFlushList->Count != MM_MAXIMUM_FLUSH_COUNT) {
                    PteFlushList->FlushPte[PteFlushList->Count] = PointerPte;
                    PteFlushList->FlushVa[PteFlushList->Count] = VirtualAddress;
                    PteFlushList->Count += 1;
                }
                *PointerPte = ZeroPte;
            }

            if (CloneDescriptor != NULL) {

                //
                // Flush PTEs as this could release the PFN_LOCK.
                //

                MiFlushPteList (PteFlushList, FALSE, ZeroPte);

                //
                // Decrement the reference count for the clone block,
                // note that this could release and reacquire
                // the mutexes hence cannot be done until after the
                // working set index has been removed.
                //

                if (MiDecrementCloneBlockReference ( CloneDescriptor,
                                                     CloneBlock,
                                                     CurrentProcess )) {

                    //
                    // The working set mutex was released.  This may
                    // have removed the current page table page.
                    //

                    MiDoesPdeExistAndMakeValid (PointerPde,
                                                CurrentProcess,
                                                TRUE);
                }
            }
        }

    } else if (PteContents.u.Soft.Prototype == 1) {

        //
        // This is a prototype PTE, if it is a fork PTE clean up the
        // fork structures.
        //

        if (PteContents.u.Soft.PageFileHigh != 0xFFFFF) {

            //
            // Check to see if the prototype PTE is a fork prototype PTE.
            //

            if (PointerPte <= MiGetPteAddress(MM_HIGHEST_USER_ADDRESS)) {

                if (PrototypePte != MiPteToProto (PointerPte)) {

                    CloneBlock = (PMMCLONE_BLOCK)MiPteToProto (PointerPte);
                    CloneDescriptor = MiLocateCloneAddress ((PVOID)CloneBlock);


#if DBG
                    if (CloneDescriptor == NULL) {
                        DbgPrint("1PrototypePte %lx Clone desc %lx \n",
                        PrototypePte, CloneDescriptor);
                        MiFormatPte(PointerPte);
                        ASSERT (FALSE);
                    }
#endif //DBG

                    //
                    // Decrement the reference count for the clone block,
                    // note that this could release and reacquire
                    // the mutexes.
                    //

                    *PointerPte = ZeroPte;

                    MiFlushPteList (PteFlushList, FALSE, ZeroPte);

                    if (MiDecrementCloneBlockReference ( CloneDescriptor,
                                                         CloneBlock,
                                                         CurrentProcess )) {

                        //
                        // The working set mutex was released.  This may
                        // have removed the current page table page.
                        //

                        MiDoesPdeExistAndMakeValid (MiGetPteAddress (PointerPte),
                                                    CurrentProcess,
                                                    TRUE);
                    }
                }
            }
        }

    } else if (PteContents.u.Soft.Transition == 1) {

        //
        // This is a transition PTE. (Page is private)
        //

        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);

        MI_SET_PFN_DELETED (Pfn1);

        MiDecrementShareCount (Pfn1->PteFrame);

        //
        // Check the reference count for the page, if the reference
        // count is zero, move the page to the free list, if the reference
        // count is not zero, ignore this page.  When the refernce count
        // goes to zero, it will be placed on the free list.
        //

        if (Pfn1->u3.e2.ReferenceCount == 0) {
            MiUnlinkPageFromList (Pfn1);
            MiReleasePageFileSpace (Pfn1->OriginalPte);
            MiInsertPageInList (MmPageLocationList[FreePageList],
                                PteContents.u.Trans.PageFrameNumber);
        }

        //
        // Decrement the count for the number of private pages.
        //

        CurrentProcess->NumberOfPrivatePages -= 1;

    } else {

        //
        // Must be page file space.
        //

        if (PteContents.u.Soft.PageFileHigh != 0) {

            if (MiReleasePageFileSpace (*PointerPte)) {

                //
                // Decrement the count for the number of private pages.
                //

                CurrentProcess->NumberOfPrivatePages -= 1;
            }
        }
    }

    //
    // Zero the PTE contents.
    //

    *PointerPte = ZeroPte;

    return;
}
Example #4
0
NTSTATUS
MmRemovePhysicalMemory (
    IN PPHYSICAL_ADDRESS StartAddress,
    IN OUT PLARGE_INTEGER NumberOfBytes
    )

/*++

Routine Description:

    This routine attempts to remove the specified physical address range
    from the system.

Arguments:

    StartAddress  - Supplies the starting physical address.

    NumberOfBytes  - Supplies a pointer to the number of bytes being removed.

Return Value:

    NTSTATUS.

Environment:

    Kernel mode.  PASSIVE level.  No locks held.

--*/

{
    ULONG i;
    ULONG Additional;
    PFN_NUMBER Page;
    PFN_NUMBER LastPage;
    PFN_NUMBER OriginalLastPage;
    PFN_NUMBER start;
    PFN_NUMBER PagesReleased;
    PMMPFN Pfn1;
    PMMPFN StartPfn;
    PMMPFN EndPfn;
    KIRQL OldIrql;
    PFN_NUMBER StartPage;
    PFN_NUMBER EndPage;
    PFN_COUNT NumberOfPages;
    SPFN_NUMBER MaxPages;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER RemovedPages;
    LOGICAL Inserted;
    NTSTATUS Status;
    PMMPTE PointerPte;
    PMMPTE EndPte;
    PVOID VirtualAddress;
    PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock;
    PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock;
    PPHYSICAL_MEMORY_RUN NewRun;
    LOGICAL PfnDatabaseIsPhysical;

    ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL);

    ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0);
    ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0);

    if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) {

        //
        // The system must be configured for dynamic memory addition.  This is
        // not strictly required to remove the memory, but it's better to check
        // for it now under the assumption that the administrator is probably
        // going to want to add this range of memory back in - better to give
        // the error now and refuse the removal than to refuse the addition
        // later.
        //
    
        if (MmDynamicPfn == FALSE) {
            return STATUS_NOT_SUPPORTED;
        }

        PfnDatabaseIsPhysical = TRUE;
    }
    else {
        PfnDatabaseIsPhysical = FALSE;
    }

    StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT);
    NumberOfPages = (PFN_COUNT)(NumberOfBytes->QuadPart >> PAGE_SHIFT);

    EndPage = StartPage + NumberOfPages;

    if (EndPage - 1 > MmHighestPossiblePhysicalPage) {

        //
        // Truncate the request into something that can be mapped by the PFN
        // database.
        //

        EndPage = MmHighestPossiblePhysicalPage + 1;
        NumberOfPages = (PFN_COUNT)(EndPage - StartPage);
    }

    //
    // The range cannot wrap.
    //

    if (StartPage >= EndPage) {
        return STATUS_INVALID_PARAMETER_1;
    }

    StartPfn = MI_PFN_ELEMENT (StartPage);
    EndPfn = MI_PFN_ELEMENT (EndPage);

    ExAcquireFastMutex (&MmDynamicMemoryMutex);

#if DBG
    MiDynmemData[0] += 1;
#endif

    //
    // Decrease all commit limits to reflect the removed memory.
    //

    ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);

    ASSERT (MmTotalCommitLimit <= MmTotalCommitLimitMaximum);

    if ((NumberOfPages + 100 > MmTotalCommitLimit - MmTotalCommittedPages) ||
        (MmTotalCommittedPages > MmTotalCommitLimit)) {

#if DBG
        MiDynmemData[1] += 1;
#endif
        ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);
        ExReleaseFastMutex (&MmDynamicMemoryMutex);
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    MmTotalCommitLimit -= NumberOfPages;
    MmTotalCommitLimitMaximum -= NumberOfPages;

    ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);

    //
    // Check for outstanding promises that cannot be broken.
    //

    LOCK_PFN (OldIrql);

    MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 100;

    if ((SPFN_NUMBER)NumberOfPages > MaxPages) {
#if DBG
        MiDynmemData[2] += 1;
#endif
        UNLOCK_PFN (OldIrql);
        Status = STATUS_INSUFFICIENT_RESOURCES;
        goto giveup2;
    }

    MmResidentAvailablePages -= NumberOfPages;
    MmNumberOfPhysicalPages -= NumberOfPages;

    //
    // The range must be contained in a single entry.  It is permissible for
    // it to be part of a single entry, but it must not cross multiple entries.
    //

    Additional = (ULONG)-2;

    start = 0;
    do {

        Page = MmPhysicalMemoryBlock->Run[start].BasePage;
        LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount;

        if ((StartPage >= Page) && (EndPage <= LastPage)) {
            if ((StartPage == Page) && (EndPage == LastPage)) {
                Additional = (ULONG)-1;
            }
            else if ((StartPage == Page) || (EndPage == LastPage)) {
                Additional = 0;
            }
            else {
                Additional = 1;
            }
            break;
        }

        start += 1;

    } while (start != MmPhysicalMemoryBlock->NumberOfRuns);

    if (Additional == (ULONG)-2) {
#if DBG
        MiDynmemData[3] += 1;
#endif
        MmResidentAvailablePages += NumberOfPages;
        MmNumberOfPhysicalPages += NumberOfPages;
        UNLOCK_PFN (OldIrql);
        Status = STATUS_CONFLICTING_ADDRESSES;
        goto giveup2;
    }

    for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
        Pfn1->u3.e1.RemovalRequested = 1;
    }

    //
    // The free and zero lists must be pruned now before releasing the PFN
    // lock otherwise if another thread allocates the page from these lists,
    // the allocation will clear the RemovalRequested flag forever.
    //

    RemovedPages = MiRemovePhysicalPages (StartPage, EndPage);

    if (RemovedPages != NumberOfPages) {

#if DBG
retry:
#endif
    
        Pfn1 = StartPfn;
    
        InterlockedIncrement (&MiDelayPageFaults);
    
        for (i = 0; i < 5; i += 1) {
    
            UNLOCK_PFN (OldIrql);
    
            //
            // Attempt to move pages to the standby list.  Note that only the
            // pages with RemovalRequested set are moved.
            //
    
            MiTrimRemovalPagesOnly = TRUE;
    
            MiEmptyAllWorkingSets ();
    
            MiTrimRemovalPagesOnly = FALSE;
    
            MiFlushAllPages ();
    
            KeDelayExecutionThread (KernelMode, FALSE, &MmHalfSecond);
    
            LOCK_PFN (OldIrql);
    
            RemovedPages += MiRemovePhysicalPages (StartPage, EndPage);
    
            if (RemovedPages == NumberOfPages) {
                break;
            }
    
            //
            // RemovedPages doesn't include pages that were freed directly to
            // the bad page list via MiDecrementReferenceCount.  So use the above
            // check purely as an optimization - and walk here when necessary.
            //
    
            for ( ; Pfn1 < EndPfn; Pfn1 += 1) {
                if (Pfn1->u3.e1.PageLocation != BadPageList) {
                    break;
                }
            }
    
            if (Pfn1 == EndPfn) {
                RemovedPages = NumberOfPages;
                break;
            }
        }

        InterlockedDecrement (&MiDelayPageFaults);
    }

    if (RemovedPages != NumberOfPages) {
#if DBG
        MiDynmemData[4] += 1;
        if (MiShowStuckPages != 0) {

            RemovedPages = 0;
            for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
                if (Pfn1->u3.e1.PageLocation != BadPageList) {
                    RemovedPages += 1;
                }
            }

            ASSERT (RemovedPages != 0);

            DbgPrint("MmRemovePhysicalMemory : could not get %d of %d pages\n",
                RemovedPages, NumberOfPages);

            if (MiShowStuckPages & 0x2) {

                ULONG PfnsPrinted;
                ULONG EnoughShown;
                PMMPFN FirstPfn;
                PFN_COUNT PfnCount;

                PfnCount = 0;
                PfnsPrinted = 0;
                EnoughShown = 100;
    
                if (MiShowStuckPages & 0x4) {
                    EnoughShown = (ULONG)-1;
                }
    
                DbgPrint("Stuck PFN list: ");
                for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
                    if (Pfn1->u3.e1.PageLocation != BadPageList) {
                        if (PfnCount == 0) {
                            FirstPfn = Pfn1;
                        }
                        PfnCount += 1;
                    }
                    else {
                        if (PfnCount != 0) {
                            DbgPrint("%x -> %x ; ", FirstPfn - MmPfnDatabase,
                                                    (FirstPfn - MmPfnDatabase) + PfnCount - 1);
                            PfnsPrinted += 1;
                            if (PfnsPrinted == EnoughShown) {
                                break;
                            }
                            PfnCount = 0;
                        }
                    }
                }
                if (PfnCount != 0) {
                    DbgPrint("%x -> %x ; ", FirstPfn - MmPfnDatabase,
                                            (FirstPfn - MmPfnDatabase) + PfnCount - 1);
                }
                DbgPrint("\n");
            }
            if (MiShowStuckPages & 0x8) {
                DbgBreakPoint ();
            }
            if (MiShowStuckPages & 0x10) {
                goto retry;
            }
        }
#endif
        UNLOCK_PFN (OldIrql);
        Status = STATUS_NO_MEMORY;
        goto giveup;
    }

#if DBG
    for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {
        ASSERT (Pfn1->u3.e1.PageLocation == BadPageList);
    }
#endif

    //
    // All the pages in the range have been removed.  Update the physical
    // memory blocks and other associated housekeeping.
    //

    if (Additional == 0) {

        //
        // The range can be split off from an end of an existing chunk so no
        // pool growth or shrinkage is required.
        //

        NewPhysicalMemoryBlock = MmPhysicalMemoryBlock;
        OldPhysicalMemoryBlock = NULL;
    }
    else {

        //
        // The range cannot be split off from an end of an existing chunk so
        // pool growth or shrinkage is required.
        //

        UNLOCK_PFN (OldIrql);

        i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) +
             (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + Additional)));

        NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool,
                                                        i,
                                                        '  mM');

        if (NewPhysicalMemoryBlock == NULL) {
            Status = STATUS_INSUFFICIENT_RESOURCES;
#if DBG
            MiDynmemData[5] += 1;
#endif
            goto giveup;
        }

        OldPhysicalMemoryBlock = MmPhysicalMemoryBlock;
        RtlZeroMemory (NewPhysicalMemoryBlock, i);

        LOCK_PFN (OldIrql);
    }

    //
    // Remove or split the requested range from the existing memory block.
    //

    NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + Additional;
    NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages - NumberOfPages;

    NewRun = &NewPhysicalMemoryBlock->Run[0];
    start = 0;
    Inserted = FALSE;

    do {

        Page = MmPhysicalMemoryBlock->Run[start].BasePage;
        LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount;

        if (Inserted == FALSE) {

            if ((StartPage >= Page) && (EndPage <= LastPage)) {

                if ((StartPage == Page) && (EndPage == LastPage)) {
                    ASSERT (Additional == -1);
                    start += 1;
                    continue;
                }
                else if ((StartPage == Page) || (EndPage == LastPage)) {
                    ASSERT (Additional == 0);
                    if (StartPage == Page) {
                        MmPhysicalMemoryBlock->Run[start].BasePage += NumberOfPages;
                    }
                    MmPhysicalMemoryBlock->Run[start].PageCount -= NumberOfPages;
                }
                else {
                    ASSERT (Additional == 1);

                    OriginalLastPage = LastPage;

                    MmPhysicalMemoryBlock->Run[start].PageCount =
                        StartPage - MmPhysicalMemoryBlock->Run[start].BasePage;

                    *NewRun = MmPhysicalMemoryBlock->Run[start];
                    NewRun += 1;

                    NewRun->BasePage = EndPage;
                    NewRun->PageCount = OriginalLastPage - EndPage;
                    NewRun += 1;

                    start += 1;
                    continue;
                }

                Inserted = TRUE;
            }
        }

        *NewRun = MmPhysicalMemoryBlock->Run[start];
        NewRun += 1;
        start += 1;

    } while (start != MmPhysicalMemoryBlock->NumberOfRuns);

    //
    // Repoint the MmPhysicalMemoryBlock at the new chunk.
    // Free the old block after releasing the PFN lock.
    //

    MmPhysicalMemoryBlock = NewPhysicalMemoryBlock;

    if (EndPage - 1 == MmHighestPhysicalPage) {
        MmHighestPhysicalPage = StartPage - 1;
    }

    //
    // Throw away all the removed pages that are currently enqueued.
    //

    for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) {

        ASSERT (Pfn1->u3.e1.PageLocation == BadPageList);
        ASSERT (Pfn1->u3.e1.RemovalRequested == 1);

        MiUnlinkPageFromList (Pfn1);

        ASSERT (Pfn1->u1.Flink == 0);
        ASSERT (Pfn1->u2.Blink == 0);
        ASSERT (Pfn1->u3.e2.ReferenceCount == 0);
        ASSERT64 (Pfn1->UsedPageTableEntries == 0);

        Pfn1->PteAddress = PFN_REMOVED;
        Pfn1->u3.e2.ShortFlags = 0;
        Pfn1->OriginalPte.u.Long = ZeroKernelPte.u.Long;
        Pfn1->PteFrame = 0;
    }

    //
    // Now that the removed pages have been discarded, eliminate the PFN
    // entries that mapped them.  Straddling entries left over from an
    // adjacent earlier removal are not collapsed at this point.
    //
    //

    PagesReleased = 0;

    if (PfnDatabaseIsPhysical == FALSE) {

        VirtualAddress = (PVOID)ROUND_TO_PAGES(MI_PFN_ELEMENT(StartPage));
        PointerPte = MiGetPteAddress (VirtualAddress);
        EndPte = MiGetPteAddress (PAGE_ALIGN(MI_PFN_ELEMENT(EndPage)));

        while (PointerPte < EndPte) {
            PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte);
            Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);
            ASSERT (Pfn1->u2.ShareCount == 1);
            ASSERT (Pfn1->u3.e2.ReferenceCount == 1);
            Pfn1->u2.ShareCount = 0;
            MI_SET_PFN_DELETED (Pfn1);
#if DBG
            Pfn1->u3.e1.PageLocation = StandbyPageList;
#endif //DBG
            MiDecrementReferenceCount (PageFrameIndex);
    
            KeFlushSingleTb (VirtualAddress,
                             TRUE,
                             TRUE,
                             (PHARDWARE_PTE)PointerPte,
                             ZeroKernelPte.u.Flush);
    
            PagesReleased += 1;
            PointerPte += 1;
            VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE);
        }

        MmResidentAvailablePages += PagesReleased;
    }

#if DBG
    MiDynmemData[6] += 1;
#endif

    UNLOCK_PFN (OldIrql);

    if (PagesReleased != 0) {
        MiReturnCommitment (PagesReleased);
    }

    ExReleaseFastMutex (&MmDynamicMemoryMutex);

    if (OldPhysicalMemoryBlock != NULL) {
        ExFreePool (OldPhysicalMemoryBlock);
    }

    NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE;

    return STATUS_SUCCESS;

giveup:

    //
    // All the pages in the range were not obtained.  Back everything out.
    //

    PageFrameIndex = StartPage;
    Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

    LOCK_PFN (OldIrql);

    while (PageFrameIndex < EndPage) {

        ASSERT (Pfn1->u3.e1.RemovalRequested == 1);

        Pfn1->u3.e1.RemovalRequested = 0;

        if ((Pfn1->u3.e1.PageLocation == BadPageList) &&
            (Pfn1->u3.e1.ParityError == 0)) {

            MiUnlinkPageFromList (Pfn1);
            MiInsertPageInList (MmPageLocationList[FreePageList],
                                PageFrameIndex);
        }

        Pfn1 += 1;
        PageFrameIndex += 1;
    }

    MmResidentAvailablePages += NumberOfPages;
    MmNumberOfPhysicalPages += NumberOfPages;

    UNLOCK_PFN (OldIrql);

giveup2:

    ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql);
    MmTotalCommitLimit += NumberOfPages;
    MmTotalCommitLimitMaximum += NumberOfPages;
    ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql);

    ExReleaseFastMutex (&MmDynamicMemoryMutex);

    return Status;
}
Example #5
0
VOID
NTAPI
MmFreeSpecialPool(PVOID P)
{
    PMMPTE PointerPte;
    PPOOL_HEADER Header;
    BOOLEAN Overruns = FALSE;
    KIRQL Irql = KeGetCurrentIrql();
    POOL_TYPE PoolType;
    ULONG BytesRequested, BytesReal = 0;
    ULONG PtrOffset;
    PUCHAR b;
    PMI_FREED_SPECIAL_POOL FreedHeader;
    LARGE_INTEGER TickCount;
    PMMPFN Pfn;

    DPRINT1("MmFreeSpecialPool(%p)\n", P);

    /* Get the PTE */
    PointerPte = MiAddressToPte(P);

    /* Check if it's valid */
    if (PointerPte->u.Hard.Valid == 0)
    {
        /* Bugcheck if it has NOACCESS or 0 set as protection */
        if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
            !PointerPte->u.Soft.Protection)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)PointerPte, 0, 0x20);
        }
    }

    /* Determine if it's a underruns or overruns pool pointer */
    PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
    if (PtrOffset)
    {
        /* Pool catches overruns */
        Header = PAGE_ALIGN(P);
        Overruns = TRUE;
    }
    else
    {
        /* Pool catches underruns */
        Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
    }

    /* Check if it's non paged pool */
    if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
    {
        /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 0, 0x31);
        }

        PoolType = NonPagedPool;
    }
    else
    {
        /* Paged allocation, ensure */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 1, 0x31);
        }

        PoolType = PagedPool;
    }

    /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
    BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;

    /* Check memory before the allocated user buffer in case of overruns detection */
    if (Overruns)
    {
        /* Calculate the real placement of the buffer */
        BytesReal = PAGE_SIZE - PtrOffset;

        /* If they mismatch, it's unrecoverable */
        if (BytesRequested > BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x21);
        }

        if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x22);
        }

        /* Actually check the memory pattern */
        for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
        {
            if (Header->BlockSize != b[0])
            {
                /* Bytes mismatch */
                KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)b, Header->BlockSize, 0x23);
            }
        }
    }

    /* Check the memory pattern after the user buffer */
    MiSpecialPoolCheckPattern(P, Header);

    /* Fill the freed header */
    KeQueryTickCount(&TickCount);
    FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
    FreedHeader->Signature = 0x98764321;
    FreedHeader->TickCount = TickCount.LowPart;
    FreedHeader->NumberOfBytesRequested = BytesRequested;
    FreedHeader->Pagable = PoolType;
    FreedHeader->VirtualAddress = P;
    FreedHeader->Thread = PsGetCurrentThread();
    /* TODO: Fill StackPointer and StackBytes */
    FreedHeader->StackPointer = NULL;
    FreedHeader->StackBytes = 0;

    if (PoolType == NonPagedPool)
    {
        /* Non pagable. Get PFN element corresponding to the PTE */
        Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Delete this PFN */
        MI_SET_PFN_DELETED(Pfn);

        /* Decrement share count of this PFN */
        MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);

        /* Flush the TLB */
        //FIXME: Use KeFlushSingleTb() instead
        KeFlushEntireTb(TRUE, TRUE);
    }
    else
    {
        /* Pagable. Delete that virtual address */
        MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    }

    /* Mark next PTE as invalid */
    PointerPte[1].u.Long = 0; //|= 8000;

    /* Make sure that the last entry is really the last one */
    ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);

    /* Update the current last PTE next pointer */
    MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;

    /* PointerPte becomes the new last PTE */
    PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
    MiSpecialPoolLastPte = PointerPte;

    /* Release the PFN database lock */
    KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
}
Example #6
0
VOID
MiProcessValidPteList (
    IN PMMPTE *ValidPteList,
    IN ULONG Count
)

/*++

Routine Description:

    This routine flushes the specified range of valid PTEs.

Arguments:

    ValidPteList - Supplies a pointer to an array of PTEs to flush.

    Count - Supplies the count of the number of elements in the array.

Return Value:

    none.

Environment:

    Kernel mode, APCs disabled, WorkingSetMutex and AddressCreation mutexes
    held.

--*/

{
    ULONG i;
    MMPTE_FLUSH_LIST PteFlushList;
    MMPTE PteContents;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER PageTableFrameIndex;
    KIRQL OldIrql;

    i = 0;
    PteFlushList.Count = Count;

    if (Count < MM_MAXIMUM_FLUSH_COUNT) {

        do {
            PteFlushList.FlushVa[i] =
                MiGetVirtualAddressMappedByPte (ValidPteList[i]);
            i += 1;
        } while (i != Count);
        i = 0;
    }

    LOCK_PFN (OldIrql);

    do {
        PteContents = *ValidPteList[i];
        ASSERT (PteContents.u.Hard.Valid == 1);
        PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(&PteContents);
        Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

        //
        // Decrement the share and valid counts of the page table
        // page which maps this PTE.
        //

        PageTableFrameIndex = Pfn1->u4.PteFrame;
        Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);

        MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);

        MI_SET_PFN_DELETED (Pfn1);

        //
        // Decrement the share count for the physical page.  As the page
        // is private it will be put on the free list.
        //

        MiDecrementShareCount (Pfn1, PageFrameIndex);

        MI_WRITE_INVALID_PTE (ValidPteList[i], MmDecommittedPte);

        i += 1;

    } while (i != Count);

    MiFlushPteList (&PteFlushList);

    UNLOCK_PFN (OldIrql);

    return;
}
Example #7
0
ULONG
MiDecommitPages (
    IN PVOID StartingAddress,
    IN PMMPTE EndingPte,
    IN PEPROCESS Process,
    IN PMMVAD_SHORT Vad
)

/*++

Routine Description:

    This routine decommits the specified range of pages.

Arguments:

    StartingAddress - Supplies the starting address of the range.

    EndingPte - Supplies the ending PTE of the range.

    Process - Supplies the current process.

    Vad - Supplies the virtual address descriptor which describes the range.

Return Value:

    Value to reduce commitment by for the VAD.

Environment:

    Kernel mode, APCs disabled, AddressCreation mutex held.

--*/

{
    PMMPTE PointerPde;
    PMMPTE PointerPte;
    PVOID Va;
    ULONG CommitReduction;
    PMMPTE CommitLimitPte;
    KIRQL OldIrql;
    PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
    ULONG count;
    WSLE_NUMBER WorkingSetIndex;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    WSLE_NUMBER Entry;
    MMWSLENTRY Locked;
    MMPTE PteContents;
    PFN_NUMBER PageTableFrameIndex;
    PVOID UsedPageTableHandle;
    PETHREAD CurrentThread;

    count = 0;
    CommitReduction = 0;

    if (Vad->u.VadFlags.MemCommit) {
        CommitLimitPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn));
    }
    else {
        CommitLimitPte = NULL;
    }

    //
    // Decommit each page by setting the PTE to be explicitly
    // decommitted.  The PTEs cannot be deleted all at once as
    // this would set the PTEs to zero which would auto-evaluate
    // as committed if referenced by another thread when a page
    // table page is being in-paged.
    //

    PointerPde = MiGetPdeAddress (StartingAddress);
    PointerPte = MiGetPteAddress (StartingAddress);
    Va = StartingAddress;

    //
    // Loop through all the PDEs which map this region and ensure that
    // they exist.  If they don't exist create them by touching a
    // PTE mapped by the PDE.
    //

    CurrentThread = PsGetCurrentThread ();

    LOCK_WS_UNSAFE (CurrentThread, Process);

    MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);

    while (PointerPte <= EndingPte) {

        if (MiIsPteOnPdeBoundary (PointerPte)) {

            PointerPde = MiGetPdeAddress (Va);
            if (count != 0) {
                MiProcessValidPteList (&ValidPteList[0], count);
                count = 0;
            }

            MiMakePdeExistAndMakeValid (PointerPde, Process, MM_NOIRQL);
        }

        //
        // The working set lock is held.  No PTEs can go from
        // invalid to valid or valid to invalid.  Transition
        // PTEs can go from transition to pagefile.
        //

        PteContents = *PointerPte;

        if (PteContents.u.Long != 0) {

            if (PointerPte->u.Long == MmDecommittedPte.u.Long) {

                //
                // This PTE is already decommitted.
                //

                CommitReduction += 1;
            }
            else {

                Process->NumberOfPrivatePages -= 1;

                if (PteContents.u.Hard.Valid == 1) {

                    //
                    // Make sure this is not a forked PTE.
                    //

                    Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);

                    if (Pfn1->u3.e1.PrototypePte) {

                        //
                        // MiDeletePte may release both the working set pushlock
                        // and the PFN lock so the valid PTE list must be
                        // processed now.
                        //

                        if (count != 0) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }

                        LOCK_PFN (OldIrql);

                        MiDeletePte (PointerPte,
                                     Va,
                                     FALSE,
                                     Process,
                                     NULL,
                                     NULL,
                                     OldIrql);

                        UNLOCK_PFN (OldIrql);

                        Process->NumberOfPrivatePages += 1;
                        MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    }
                    else {

                        //
                        // PTE is valid, process later when PFN lock is held.
                        //

                        if (count == MM_VALID_PTE_SIZE) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }
                        ValidPteList[count] = PointerPte;
                        count += 1;

                        //
                        // Remove address from working set list.
                        //

                        WorkingSetIndex = Pfn1->u1.WsIndex;

                        ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
                                Va);
                        //
                        // Check to see if this entry is locked in the
                        // working set or locked in memory.
                        //

                        Locked = MmWsle[WorkingSetIndex].u1.e1;

                        MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);

                        //
                        // Add this entry to the list of free working set
                        // entries and adjust the working set count.
                        //

                        MiReleaseWsle (WorkingSetIndex, &Process->Vm);

                        if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {

                            //
                            // This entry is locked.
                            //

                            MmWorkingSetList->FirstDynamic -= 1;

                            if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {
                                Entry = MmWorkingSetList->FirstDynamic;
                                ASSERT (MmWsle[Entry].u1.e1.Valid);

                                MiSwapWslEntries (Entry,
                                                  WorkingSetIndex,
                                                  &Process->Vm,
                                                  FALSE);
                            }
                        }
                        MI_SET_PTE_IN_WORKING_SET (PointerPte, 0);
                    }
                }
                else if (PteContents.u.Soft.Prototype) {

                    //
                    // This is a forked PTE, just delete it.
                    //
                    // MiDeletePte may release both the working set pushlock
                    // and the PFN lock so the valid PTE list must be
                    // processed now.
                    //

                    if (count != 0) {
                        MiProcessValidPteList (&ValidPteList[0], count);
                        count = 0;
                    }

                    LOCK_PFN (OldIrql);

                    MiDeletePte (PointerPte,
                                 Va,
                                 FALSE,
                                 Process,
                                 NULL,
                                 NULL,
                                 OldIrql);

                    UNLOCK_PFN (OldIrql);

                    Process->NumberOfPrivatePages += 1;
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
                else if (PteContents.u.Soft.Transition == 1) {

                    //
                    // Transition PTE, get the PFN database lock
                    // and reprocess this one.
                    //

                    LOCK_PFN (OldIrql);
                    PteContents = *PointerPte;

                    if (PteContents.u.Soft.Transition == 1) {

                        //
                        // PTE is still in transition, delete it.
                        //

                        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);

                        MI_SET_PFN_DELETED (Pfn1);

                        PageTableFrameIndex = Pfn1->u4.PteFrame;
                        Pfn2 = MI_PFN_ELEMENT (PageTableFrameIndex);

                        MiDecrementShareCountInline (Pfn2, PageTableFrameIndex);

                        //
                        // Check the reference count for the page, if the
                        // reference count is zero, move the page to the
                        // free list, if the reference count is not zero,
                        // ignore this page.  When the reference count
                        // goes to zero, it will be placed on the free list.
                        //

                        if (Pfn1->u3.e2.ReferenceCount == 0) {
                            MiUnlinkPageFromList (Pfn1);
                            MiReleasePageFileSpace (Pfn1->OriginalPte);
                            MiInsertPageInFreeList (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents));
                        }

                    }
                    else {

                        //
                        // Page MUST be in page file format!
                        //

                        ASSERT (PteContents.u.Soft.Valid == 0);
                        ASSERT (PteContents.u.Soft.Prototype == 0);
                        ASSERT (PteContents.u.Soft.PageFileHigh != 0);
                        MiReleasePageFileSpace (PteContents);
                    }
                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                    UNLOCK_PFN (OldIrql);
                }
                else {

                    //
                    // Must be demand zero or paging file format.
                    //

                    if (PteContents.u.Soft.PageFileHigh != 0) {
                        LOCK_PFN (OldIrql);
                        MiReleasePageFileSpace (PteContents);
                        UNLOCK_PFN (OldIrql);
                    }
                    else {

                        //
                        // Don't subtract out the private page count for
                        // a demand zero page.
                        //

                        Process->NumberOfPrivatePages += 1;
                    }

                    MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
                }
            }
        }
        else {

            //
            // The PTE is already zero.
            //

            //
            // Increment the count of non-zero page table entries for this
            // page table and the number of private pages for the process.
            //

            UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va);

            MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle);

            if (PointerPte > CommitLimitPte) {

                //
                // PTE is not committed.
                //

                CommitReduction += 1;
            }
            MI_WRITE_INVALID_PTE (PointerPte, MmDecommittedPte);
        }

        PointerPte += 1;
        Va = (PVOID)((PCHAR)Va + PAGE_SIZE);
    }
    if (count != 0) {
        MiProcessValidPteList (&ValidPteList[0], count);
    }

    UNLOCK_WS_UNSAFE (CurrentThread, Process);

    return CommitReduction;
}
Example #8
0
ULONG
MiDecommitPages (
    IN PVOID StartingAddress,
    IN PMMPTE EndingPte,
    IN PEPROCESS Process,
    IN PMMVAD_SHORT Vad
    )

/*++

Routine Description:

    This routine decommits the specficed range of pages.

Arguments:

    StartingAddress - Supplies the starting address of the range.

    EndingPte - Supplies the ending PTE of the range.

    Process - Supplies the current process.

    Vad - Supplies the virtual address descriptor which describes the range.

Return Value:

    Value to reduce commitment by for the VAD.

Environment:

    Kernel mode, APCs disable, WorkingSetMutex and AddressCreation mutexes
    held.

--*/

{
    PMMPTE PointerPde;
    PMMPTE PointerPte;
    PVOID Va;
    ULONG PdeOffset;
    ULONG CommitReduction = 0;
    PMMPTE CommitLimitPte;
    KIRQL OldIrql;
    PMMPTE ValidPteList[MM_VALID_PTE_SIZE];
    ULONG count = 0;
    ULONG WorkingSetIndex;
    PMMPFN Pfn1;
    PMMPFN Pfn2;
    PVOID SwapVa;
    ULONG Entry;
    MMWSLENTRY Locked;
    MMPTE PteContents;

    if (Vad->u.VadFlags.MemCommit) {
        CommitLimitPte = MiGetPteAddress (Vad->EndingVa);
    } else {
        CommitLimitPte = NULL;
    }

    //
    // Decommit each page by setting the PTE to be explicitly
    // decommitted.  The PTEs cannot be deleted all at once as
    // this would set the PTEs to zero which would auto-evaluate
    // as committed if referenced by another thread when a page
    // table page is being in-paged.
    //

    PointerPde = MiGetPdeAddress (StartingAddress);
    PointerPte = MiGetPteAddress (StartingAddress);
    Va = StartingAddress;
    PdeOffset = MiGetPdeOffset (Va);

    //
    // Loop through all the PDEs which map this region and ensure that
    // they exist.  If they don't exist create them by touching a
    // PTE mapped by the PDE.
    //

    //
    // Get the PFN mutex so the MiDeletePte can be called.
    //

    MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);

    while (PointerPte <= EndingPte) {

        if (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0) {

            PdeOffset = MiGetPdeOffset (Va);
            PointerPde = MiGetPdeAddress (Va);
            if (count != 0) {
                MiProcessValidPteList (&ValidPteList[0], count);
                count = 0;
            }
            MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE);
        }

        //
        // The working set lock is held.  No PTEs can go from
        // invalid to valid or valid to invalid.  Transition
        // PTEs can go from transition to pagefile.
        //

        PteContents = *PointerPte;

        if (PteContents.u.Long != 0) {

            if (PointerPte->u.Long == MmDecommittedPte.u.Long) {

                //
                // This PTE is already decommitted.
                //

                CommitReduction += 1;

            } else {

                Process->NumberOfPrivatePages -= 1;

                if (PteContents.u.Hard.Valid == 1) {

                    //
                    // Make sure this is not a forked PTE.
                    //

                    Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber);

                    if (Pfn1->u3.e1.PrototypePte) {

                        LOCK_PFN (OldIrql);
                        MiDeletePte (PointerPte,
                                     Va,
                                     FALSE,
                                     Process,
                                     NULL,
                                     NULL);
                        UNLOCK_PFN (OldIrql);
                        Process->NumberOfPrivatePages += 1;
                        *PointerPte = MmDecommittedPte;
                    } else {

                        //
                        // Pte is valid, process later when PFN lock is held.
                        //

                        if (count == MM_VALID_PTE_SIZE) {
                            MiProcessValidPteList (&ValidPteList[0], count);
                            count = 0;
                        }
                        ValidPteList[count] = PointerPte;
                        count += 1;

                        //
                        // Remove address from working set list.
                        //


                        WorkingSetIndex = Pfn1->u1.WsIndex;

                        ASSERT (PAGE_ALIGN(MmWsle[WorkingSetIndex].u1.Long) ==
                                                                           Va);
                        //
                        // Check to see if this entry is locked in the working set
                        // or locked in memory.
                        //

                        Locked = MmWsle[WorkingSetIndex].u1.e1;

                        MiRemoveWsle (WorkingSetIndex, MmWorkingSetList);

                        //
                        // Add this entry to the list of free working set entries
                        // and adjust the working set count.
                        //

                        MiReleaseWsle (WorkingSetIndex, &Process->Vm);

                        if ((Locked.LockedInWs == 1) || (Locked.LockedInMemory == 1)) {

                            //
                            // This entry is locked.
                            //

                            MmWorkingSetList->FirstDynamic -= 1;

                            if (WorkingSetIndex != MmWorkingSetList->FirstDynamic) {

                                SwapVa = MmWsle[MmWorkingSetList->FirstDynamic].u1.VirtualAddress;
                                SwapVa = PAGE_ALIGN (SwapVa);
                                Pfn2 = MI_PFN_ELEMENT (
                                          MiGetPteAddress (SwapVa)->u.Hard.PageFrameNumber);

                                Entry = MiLocateWsle (SwapVa,
                                                      MmWorkingSetList,
                                                      Pfn2->u1.WsIndex);

                                MiSwapWslEntries (Entry,
                                                  WorkingSetIndex,
                                                  &Process->Vm);
                            }
                        }
                    }
                } else if (PteContents.u.Soft.Prototype) {

                    //
                    // This is a forked PTE, just delete it.
                    //

                    LOCK_PFN (OldIrql);
                    MiDeletePte (PointerPte,
                                 Va,
                                 FALSE,
                                 Process,
                                 NULL,
                                 NULL);
                    UNLOCK_PFN (OldIrql);
                    Process->NumberOfPrivatePages += 1;
                    *PointerPte = MmDecommittedPte;

                } else if (PteContents.u.Soft.Transition == 1) {

                    //
                    // Transition PTE, get the PFN database lock
                    // and reprocess this one.
                    //

                    LOCK_PFN (OldIrql);
                    PteContents = *PointerPte;

                    if (PteContents.u.Soft.Transition == 1) {

                        //
                        // PTE is still in transition, delete it.
                        //

                        Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber);

                        MI_SET_PFN_DELETED (Pfn1);

                        MiDecrementShareCount (Pfn1->PteFrame);

                        //
                        // Check the reference count for the page, if the
                        // reference count is zero, move the page to the
                        // free list, if the reference count is not zero,
                        // ignore this page.  When the refernce count
                        // goes to zero, it will be placed on the free list.
                        //

                        if (Pfn1->u3.e2.ReferenceCount == 0) {
                            MiUnlinkPageFromList (Pfn1);
                            MiReleasePageFileSpace (Pfn1->OriginalPte);
                            MiInsertPageInList (MmPageLocationList[FreePageList],
                                                PteContents.u.Trans.PageFrameNumber);
                        }

                        *PointerPte = MmDecommittedPte;

                    } else {

                        //
                        // Page MUST be in page file format!
                        //

                        ASSERT (PteContents.u.Soft.Valid == 0);
                        ASSERT (PteContents.u.Soft.Prototype == 0);
                        ASSERT (PteContents.u.Soft.PageFileHigh != 0);
                        MiReleasePageFileSpace (PteContents);
                        *PointerPte = MmDecommittedPte;
                    }
                    UNLOCK_PFN (OldIrql);
                } else {

                    //
                    // Must be demand zero or paging file format.
                    //

                    if (PteContents.u.Soft.PageFileHigh != 0) {
                        LOCK_PFN (OldIrql);
                        MiReleasePageFileSpace (PteContents);
                        UNLOCK_PFN (OldIrql);
                    } else {

                        //
                        // Don't subtract out the private page count for
                        // a demand zero page.
                        //

                        Process->NumberOfPrivatePages += 1;
                    }

                    *PointerPte = MmDecommittedPte;
                }
            }

        } else {

            //
            // The PTE is already zero.
            //

            //
            // Increment the count of non-zero page table entires for this
            // page table and the number of private pages for the process.
            //

            MmWorkingSetList->UsedPageTableEntries[PdeOffset] += 1;

            if (PointerPte > CommitLimitPte) {

                //
                // Pte is not committed.
                //

                CommitReduction += 1;
            }
            *PointerPte = MmDecommittedPte;
        }

        PointerPte += 1;
        Va = (PVOID)((ULONG)Va + PAGE_SIZE);
    }
    if (count != 0) {
        MiProcessValidPteList (&ValidPteList[0], count);
    }

    return CommitReduction;
}
Example #9
0
LOGICAL
MiZeroAllPageFiles (
    VOID
    )

/*++

Routine Description:

    This routine zeroes all inactive pagefile blocks in all pagefiles.

Arguments:

    None.

Return Value:

    Returns TRUE on success, FALSE on failure.

Environment:

    Kernel mode, the caller must lock down PAGELK.

--*/

{
    PMMPFN Pfn1;
    PFN_NUMBER MaxPagesToWrite;
    KIRQL OldIrql;
    ULONG i;
    PFN_NUMBER j;
    PFN_NUMBER PageFrameIndex;
    PMM_ZERO_PAGEFILE_CONTEXT ZeroContext;
    ULONG NumberOfPagingFiles;
    KEVENT WaitEvents[MAX_PAGE_FILES];
    PKEVENT WaitObjects[MAX_PAGE_FILES];
    KWAIT_BLOCK WaitBlockArray[MAX_PAGE_FILES];

    MaxPagesToWrite = MmModifiedWriteClusterSize;

    //
    // Get a zeroed page to use as the source for the writes.
    //

    LOCK_PFN (OldIrql);

    MI_DECREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_ALLOCATE_FOR_PAGEFILE_ZEROING);

    if (MmAvailablePages < MM_LOW_LIMIT) {
        UNLOCK_PFN (OldIrql);
        MI_INCREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_FREE_FOR_PAGEFILE_ZEROING);
        return TRUE;
    }

    PageFrameIndex = MiRemoveZeroPage (0);

    Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

    ASSERT (Pfn1->u2.ShareCount == 0);
    ASSERT (Pfn1->u3.e2.ReferenceCount == 0);

    Pfn1->u3.e2.ReferenceCount = (USHORT) MaxPagesToWrite;
    Pfn1->PteAddress = (PMMPTE) (ULONG_PTR)(X64K | 0x1);
    Pfn1->OriginalPte.u.Long = 0;
    MI_SET_PFN_DELETED (Pfn1);

    UNLOCK_PFN (OldIrql);

    //
    // Capture the number of paging files in case a new one gets added.
    //

    NumberOfPagingFiles = MmNumberOfPagingFiles;

    for (i = NumberOfPagingFiles; i != 0; i -= 1) {

        KeInitializeEvent (&WaitEvents[i - 1], NotificationEvent, FALSE);
        WaitObjects[i - 1] = &WaitEvents[i - 1];

        ZeroContext = ExAllocatePoolWithTag (NonPagedPool,
                                             sizeof (MM_ZERO_PAGEFILE_CONTEXT),
                                             'wZmM');

        if (ZeroContext == NULL) {
            KeSetEvent (WaitObjects[i - 1], 0, FALSE);
            continue;
        }

        ZeroContext->PagingFile = MmPagingFile[i - 1];
        ZeroContext->ZeroedPageFrame = PageFrameIndex;
        ZeroContext->AllDone = WaitObjects[i - 1];

        if (i != 1) {

            ExInitializeWorkItem (&ZeroContext->WorkItem,
                                  MiZeroPageFile,
                                  (PVOID) ZeroContext);

            ExQueueWorkItem (&ZeroContext->WorkItem, CriticalWorkQueue);
        }
        else {

            //
            // Zero the first pagefile ourself, then wait for
            // any others to finish.
            //

            KeSetEvent (WaitObjects[i - 1], 0, FALSE);
            MiZeroPageFile (ZeroContext);
        }
    }

    if (NumberOfPagingFiles > 1) {

        KeWaitForMultipleObjects (NumberOfPagingFiles,
                                  &WaitObjects[0],
                                  WaitAll,
                                  Executive,
                                  KernelMode,
                                  FALSE,
                                  NULL,
                                  &WaitBlockArray[0]);
    }

    LOCK_PFN (OldIrql);

    ASSERT (Pfn1->u3.e2.ReferenceCount >= MaxPagesToWrite);

    if (Pfn1->u3.e2.ReferenceCount == MaxPagesToWrite) {
        MI_INCREMENT_RESIDENT_AVAILABLE (1, MM_RESAVAIL_FREE_FOR_PAGEFILE_ZEROING);
    }

    for (j = 0; j < MaxPagesToWrite; j += 1) {
        MiDecrementReferenceCountInline (Pfn1, PageFrameIndex);
    }

    UNLOCK_PFN (OldIrql);

    return TRUE;
}