Example #1
0
BOOLEAN
NTAPI
MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
                            IN ULONG PageCount)
{
    PMMPTE PointerPte;
    MMPTE TempPte;
    PFN_NUMBER UnprotectedPages = 0;

    /* If pool is physical, can't protect PTEs */
    if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;

    /* Get, and capture the PTE */
    PointerPte = MiAddressToPte(VirtualAddress);
    TempPte = *PointerPte;

    /* Loop protected PTEs */
    while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
    {
        /* Unprotect the PTE */
        TempPte.u.Hard.Valid = 1;
        TempPte.u.Soft.Prototype = 0;
        MI_WRITE_VALID_PTE(PointerPte, TempPte);

        /* One more page */
        if (++UnprotectedPages == PageCount) break;

        /* Capture next PTE */
        TempPte = *(++PointerPte);
    }

    /* Return if any pages were unprotected */
    return UnprotectedPages ? TRUE : FALSE;
}
Example #2
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
                             IN KPROCESSOR_MODE AccessMode,
                             IN MEMORY_CACHING_TYPE CacheType,
                             IN PVOID BaseAddress,
                             IN ULONG BugCheckOnFailure,
                             IN MM_PAGE_PRIORITY Priority)
{
    PVOID Base;
    PPFN_NUMBER MdlPages, LastPage;
    PFN_COUNT PageCount;
    BOOLEAN IsIoMapping;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    PMMPTE PointerPte;
    MMPTE TempPte;

    //
    // Sanity check
    //
    ASSERT(Mdl->ByteCount != 0);

    //
    // Get the base
    //
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);

    //
    // Handle kernel case first
    //
    if (AccessMode == KernelMode)
    {
        //
        // Get the list of pages and count
        //
        MdlPages = (PPFN_NUMBER)(Mdl + 1);
        PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
        LastPage = MdlPages + PageCount;

        //
        // Sanity checks
        //
        ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
                                 MDL_SOURCE_IS_NONPAGED_POOL |
                                 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
        ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);

        //
        // Get the correct cache type
        //
        IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
        CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];

        //
        // Reserve the PTEs
        //
        PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
        if (!PointerPte)
        {
            //
            // If it can fail, return NULL
            //
            if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;

            //
            // Should we bugcheck?
            //
            if (!BugCheckOnFailure) return NULL;

            //
            // Yes, crash the system
            //
            KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
        }

        //
        // Get the mapped address
        //
        Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);

        //
        // Get the template
        //
        TempPte = ValidKernelPte;
        switch (CacheAttribute)
        {
            case MiNonCached:

                //
                // Disable caching
                //
                MI_PAGE_DISABLE_CACHE(&TempPte);
                MI_PAGE_WRITE_THROUGH(&TempPte);
                break;

            case MiWriteCombined:

                //
                // Enable write combining
                //
                MI_PAGE_DISABLE_CACHE(&TempPte);
                MI_PAGE_WRITE_COMBINED(&TempPte);
                break;

            default:
                //
                // Nothing to do
                //
                break;
        }

        //
        // Loop all PTEs
        //
        do
        {
            //
            // We're done here
            //
            if (*MdlPages == LIST_HEAD) break;

            //
            // Write the PTE
            //
            TempPte.u.Hard.PageFrameNumber = *MdlPages;
            MI_WRITE_VALID_PTE(PointerPte++, TempPte);
        } while (++MdlPages < LastPage);

        //
        // Mark it as mapped
        //
        ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
        Mdl->MappedSystemVa = Base;
        Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;

        //
        // Check if it was partial
        //
        if (Mdl->MdlFlags & MDL_PARTIAL)
        {
            //
            // Write the appropriate flag here too
            //
            Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
        }

        //
        // Return the mapped address
        //
        return Base;
    }

    UNIMPLEMENTED;
    return NULL;
}
Example #3
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Example #4
0
VOID
MiUpdateSystemPdes (
    IN PEPROCESS Process
    )

/*++

Routine Description:

    This routine updates the system PDEs, typically due to a large page
    system PTE mapping being created or destroyed.  This is rare.

    Note this is only needed for 32-bit platforms (64-bit platforms share
    a common top level system page).

Arguments:

    Process - Supplies a pointer to the process to update.

Return Value:

    None.

Environment:

    Kernel mode, expansion lock held.

    The caller acquired the expansion lock prior to clearing the update
    bit from this process.  We must update the PDEs prior to releasing
    it so that any new updates can also be rippled.

--*/

{
    ULONG i;
    ULONG PdeOffset;
    ULONG PdeEndOffset;
    MMPTE TempPte;
    PFN_NUMBER PageDirectoryIndex;
    PFN_NUMBER TargetPageDirectoryIndex;
    PEPROCESS CurrentProcess;
    PMMPTE PointerPte;
    PMMPTE PointerPde;
    PMMPTE TargetPdePage;
    PMMPTE TargetAddressSpacePde;

    ASSERT (KeGetCurrentIrql () == DISPATCH_LEVEL);

    CurrentProcess = PsGetCurrentProcess ();

    //
    // Map the page directory page in hyperspace.
    // Note for PAE, this is the high 1GB virtual only.
    //

    ASSERT (Process->Pcb.DirectoryTableBase[0] != 0);
    TargetPageDirectoryIndex = Process->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;

    ASSERT (PsInitialSystemProcess != NULL);
    ASSERT (PsInitialSystemProcess->Pcb.DirectoryTableBase[0] != 0);
    PageDirectoryIndex = PsInitialSystemProcess->Pcb.DirectoryTableBase[0] >> PAGE_SHIFT;

    TempPte = ValidKernelPte;
    TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex;
    ASSERT (MiLargePageHyperPte->u.Long == 0);
    MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte);
    TargetPdePage = MiGetVirtualAddressMappedByPte (MiLargePageHyperPte);

    //
    // Map the system process page directory as we know that's always kept
    // up to date.
    //

    PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess,
                                             PageDirectoryIndex);

    //
    // Copy all system PTE ranges.
    //

    for (i = 0; i < MiPteRangeIndex; i += 1) {

        PdeOffset = MiGetPdeOffset (MiPteRanges[i].StartingVa);
        PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa);

        PointerPde = &PointerPte[PdeOffset];
        TargetAddressSpacePde = &TargetPdePage[PdeOffset];

        RtlCopyMemory (TargetAddressSpacePde,
                       PointerPde,
                       (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE));

    }

    MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);

    //
    // Just invalidate the mapping on the current processor as we cannot
    // have context switched.
    //

    MI_WRITE_ZERO_PTE (MiLargePageHyperPte);
    MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage);

    return;
}
Example #5
0
File: page.c Project: GYGit/reactos
static
PMMPTE
MiGetPteForProcess(
    PEPROCESS Process,
    PVOID Address,
    BOOLEAN Create)
{
    MMPTE TmplPte, *Pte;

    /* Check if we need hypersapce mapping */
    if (Address < MmSystemRangeStart &&
        Process && Process != PsGetCurrentProcess())
    {
        UNIMPLEMENTED;
        __debugbreak();
        return NULL;
    }
    else if (Create)
    {
        KIRQL OldIrql;
        TmplPte.u.Long = 0;
        TmplPte.u.Flush.Valid = 1;
        TmplPte.u.Flush.Write = 1;

        /* All page table levels of user pages are user owned */
        TmplPte.u.Flush.Owner = (Address < MmHighestUserAddress) ? 1 : 0;

        /* Lock the PFN database */
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Get the PXE */
        Pte = MiAddressToPxe(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Get the PPE */
        Pte = MiAddressToPpe(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(1);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Get the PDE */
        Pte = MiAddressToPde(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(2);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Unlock PFN database */
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Get the PXE */
        Pte = MiAddressToPxe(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;

        /* Get the PPE */
        Pte = MiAddressToPpe(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;

        /* Get the PDE */
        Pte = MiAddressToPde(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;
    }

    return MiAddressToPte(Address);
}
Example #6
0
VOID
MiUpdateSystemPdes (
    IN PEPROCESS Process
    )

/*++

Routine Description:

    This routine updates the system PDEs, typically due to a large page
    system PTE mapping being created or destroyed.  This is rare.

    Note this is only needed for 32-bit platforms (64-bit platforms share
    a common top level system page).

Arguments:

    Process - Supplies a pointer to the process to update.

Return Value:

    None.

Environment:

    Kernel mode, expansion lock held.

    The caller acquired the expansion lock prior to clearing the update
    bit from this process.  We must update the PDEs prior to releasing
    it so that any new updates can also be rippled.

--*/

{
    ULONG PdeOffset;
    ULONG PdeEndOffset;
    LOGICAL LowPtes;
    PVOID VirtualAddress;
    MMPTE TempPte;
    PFN_NUMBER PageDirectoryIndex;
    PFN_NUMBER TargetPageDirectoryIndex;
    PEPROCESS CurrentProcess;
    PMMPTE PointerPte;
    PMMPTE PointerPde;
    PMMPTE TargetPdePage;
    PMMPTE TargetAddressSpacePde;
    PMMPTE PaeTop;
    ULONG i;

    ASSERT (KeGetCurrentIrql () == DISPATCH_LEVEL);

    CurrentProcess = PsGetCurrentProcess ();

    //
    // Map the page directory page in hyperspace.
    // Note for PAE, this is the high 1GB virtual only.
    //

    PaeTop = Process->PaeTop;
    ASSERT (PaeTop != NULL);
    PaeTop += 3;
    ASSERT (PaeTop->u.Hard.Valid == 1);
    TargetPageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber);

    PaeTop = &MiSystemPaeVa.PteEntry[PD_PER_SYSTEM - 1];
    ASSERT (PaeTop->u.Hard.Valid == 1);
    PageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber);

    TempPte = ValidKernelPte;
    TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex;
    ASSERT (MiLargePageHyperPte->u.Long == 0);
    MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte);
    TargetPdePage = MiGetVirtualAddressMappedByPte (MiLargePageHyperPte);

    LowPtes = FALSE;

    //
    // Map the system process page directory as we know that's always kept
    // up to date.
    //

    PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess,
                                             PageDirectoryIndex);

    //
    // Copy all system PTE ranges that reside in the top 1GB.
    //

    for (i = 0; i < MiPteRangeIndex; i += 1) {

        VirtualAddress = MiPteRanges[i].StartingVa;

        if (VirtualAddress < (PVOID) 0xC0000000) {
            LowPtes = TRUE;
            continue;
        }

        PdeOffset = MiGetPdeOffset (VirtualAddress);
        PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa);

        PointerPde = &PointerPte[PdeOffset];
        TargetAddressSpacePde = &TargetPdePage[PdeOffset];

        RtlCopyMemory (TargetAddressSpacePde,
                       PointerPde,
                       (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE));

    }

    MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);

    //
    // Just invalidate the mapping on the current processor as we cannot
    // have context switched.
    //

    MI_WRITE_ZERO_PTE (MiLargePageHyperPte);
    MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage);

    ASSERT (MmSystemRangeStart >= (PVOID) 0x80000000);

    //
    // Copy low additional system PTE ranges (if they exist).
    //

    if (LowPtes == TRUE) {
            
        ASSERT (MmSystemRangeStart < (PVOID) 0xC0000000);

        //
        // Map the target process' page directory.
        //

        PaeTop = Process->PaeTop;
        ASSERT (PaeTop != NULL);
        PaeTop += 2;
        ASSERT (PaeTop->u.Hard.Valid == 1);
        TargetPageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber);

        TempPte.u.Hard.PageFrameNumber = TargetPageDirectoryIndex;
        ASSERT (MiLargePageHyperPte->u.Long == 0);
        MI_WRITE_VALID_PTE (MiLargePageHyperPte, TempPte);

        //
        // Map the system's page directory.
        //

        PaeTop = &MiSystemPaeVa.PteEntry[PD_PER_SYSTEM - 2];
        ASSERT (PaeTop->u.Hard.Valid == 1);
        PageDirectoryIndex = (PFN_NUMBER)(PaeTop->u.Hard.PageFrameNumber);

        PdeOffset = MiGetPdeOffset (MmSystemRangeStart);
        TargetAddressSpacePde = &TargetPdePage[PdeOffset];

        PointerPte = MiMapPageInHyperSpaceAtDpc (CurrentProcess,
                                                 PageDirectoryIndex);

        //
        // Copy all system ranges that reside in the 3rd GB.
        //
    
        for (i = 0; i < MiPteRangeIndex; i += 1) {
    
            VirtualAddress = MiPteRanges[i].StartingVa;
    
            if (VirtualAddress < (PVOID) 0xC0000000) {
    
                PdeOffset = MiGetPdeOffset (VirtualAddress);
                PdeEndOffset = MiGetPdeOffset (MiPteRanges[i].EndingVa);
        
                PointerPde = &PointerPte[PdeOffset];
                TargetAddressSpacePde = &TargetPdePage[PdeOffset];
        
                RtlCopyMemory (TargetAddressSpacePde,
                               PointerPde,
                               (PdeEndOffset - PdeOffset + 1) * sizeof (MMPTE));
            } 
        }
    
        MiUnmapPageInHyperSpaceFromDpc (CurrentProcess, PointerPte);

        //
        // Just invalidate the mapping on the current processor as we cannot
        // have context switched.
        //

        MI_WRITE_ZERO_PTE (MiLargePageHyperPte);
        MI_FLUSH_CURRENT_TB_SINGLE (TargetPdePage);
    }

    return;
}
Example #7
0
BOOLEAN
MmCreateProcessAddressSpace (
    IN ULONG MinimumWorkingSetSize,
    IN PEPROCESS NewProcess,
    OUT PULONG_PTR DirectoryTableBase
    )

/*++

Routine Description:

    This routine creates an address space which maps the system
    portion and contains a hyper space entry.

Arguments:

    MinimumWorkingSetSize - Supplies the minimum working set size for
                            this address space.  This value is only used
                            to ensure that ample physical pages exist
                            to create this process.

    NewProcess - Supplies a pointer to the process object being created.

    DirectoryTableBase - Returns the value of the newly created
                         address space's Page Directory (PD) page and
                         hyper space page.

Return Value:

    Returns TRUE if an address space was successfully created, FALSE
    if ample physical pages do not exist.

Environment:

    Kernel mode.  APCs Disabled.

--*/

{
    LOGICAL FlushTbNeeded;
    PFN_NUMBER PageDirectoryIndex;
    PFN_NUMBER HyperSpaceIndex;
    PFN_NUMBER PageContainingWorkingSet;
    PFN_NUMBER VadBitMapPage;
    MMPTE TempPte;
    MMPTE TempPte2;
    PEPROCESS CurrentProcess;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    ULONG Color;
    PMMPTE PointerPte;
    ULONG PdeOffset;
    PMMPTE MappingPte;
    PMMPTE PointerFillPte;
    PMMPTE CurrentAddressSpacePde;

    //
    // Charge commitment for the page directory pages, working set page table
    // page, and working set list.  If Vad bitmap lookups are enabled, then
    // charge for a page or two for that as well.
    //

    if (MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL) == FALSE) {
        return FALSE;
    }

    FlushTbNeeded = FALSE;
    CurrentProcess = PsGetCurrentProcess ();

    NewProcess->NextPageColor = (USHORT) (RtlRandom (&MmProcessColorSeed));
    KeInitializeSpinLock (&NewProcess->HyperSpaceLock);

    //
    // Get the PFN lock to get physical pages.
    //

    LOCK_PFN (OldIrql);

    //
    // Check to make sure the physical pages are available.
    //

    if (MI_NONPAGEABLE_MEMORY_AVAILABLE() <= (SPFN_NUMBER)MinimumWorkingSetSize){

        UNLOCK_PFN (OldIrql);
        MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE);

        //
        // Indicate no directory base was allocated.
        //

        return FALSE;
    }

    MM_TRACK_COMMIT (MM_DBG_COMMIT_PROCESS_CREATE, MM_PROCESS_COMMIT_CHARGE);

    MI_DECREMENT_RESIDENT_AVAILABLE (MinimumWorkingSetSize,
                                     MM_RESAVAIL_ALLOCATE_CREATE_PROCESS);

    //
    // Allocate a page directory page.
    //

    if (MmAvailablePages < MM_HIGH_LIMIT) {
        MiEnsureAvailablePageOrWait (NULL, OldIrql);
    }

    Color =  MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE,
                                        &CurrentProcess->NextPageColor);

    PageDirectoryIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);

    Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex);

    if (Pfn1->u3.e1.CacheAttribute != MiCached) {
        Pfn1->u3.e1.CacheAttribute = MiCached;
        FlushTbNeeded = TRUE;
    }

    //
    // Allocate the hyper space page table page.
    //

    if (MmAvailablePages < MM_HIGH_LIMIT) {
        MiEnsureAvailablePageOrWait (NULL, OldIrql);
    }

    Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE),
                                       &CurrentProcess->NextPageColor);

    HyperSpaceIndex = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);

    Pfn1 = MI_PFN_ELEMENT (HyperSpaceIndex);

    if (Pfn1->u3.e1.CacheAttribute != MiCached) {
        Pfn1->u3.e1.CacheAttribute = MiCached;
        FlushTbNeeded = TRUE;
    }

    //
    // Remove page(s) for the VAD bitmap.
    //

    if (MmAvailablePages < MM_HIGH_LIMIT) {
        MiEnsureAvailablePageOrWait (NULL, OldIrql);
    }

    Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList,
                                      &CurrentProcess->NextPageColor);

    VadBitMapPage = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);

    Pfn1 = MI_PFN_ELEMENT (VadBitMapPage);

    if (Pfn1->u3.e1.CacheAttribute != MiCached) {
        Pfn1->u3.e1.CacheAttribute = MiCached;
        FlushTbNeeded = TRUE;
    }

    //
    // Remove a page for the working set list.
    //

    if (MmAvailablePages < MM_HIGH_LIMIT) {
        MiEnsureAvailablePageOrWait (NULL, OldIrql);
    }

    Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList,
                                      &CurrentProcess->NextPageColor);

    PageContainingWorkingSet = MiRemoveZeroPageMayReleaseLocks (Color, OldIrql);

    Pfn1 = MI_PFN_ELEMENT (PageContainingWorkingSet);

    if (Pfn1->u3.e1.CacheAttribute != MiCached) {
        Pfn1->u3.e1.CacheAttribute = MiCached;
        FlushTbNeeded = TRUE;
    }

    UNLOCK_PFN (OldIrql);

    if (FlushTbNeeded == TRUE) {
        MI_FLUSH_TB_FOR_CACHED_ATTRIBUTE ();
    }

    ASSERT (NewProcess->AddressSpaceInitialized == 0);
    PS_SET_BITS (&NewProcess->Flags, PS_PROCESS_FLAGS_ADDRESS_SPACE1);
    ASSERT (NewProcess->AddressSpaceInitialized == 1);

    NewProcess->Vm.MinimumWorkingSetSize = MinimumWorkingSetSize;

    NewProcess->WorkingSetPage = PageContainingWorkingSet;

    INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[0], PageDirectoryIndex);

    INITIALIZE_DIRECTORY_TABLE_BASE (&DirectoryTableBase[1], HyperSpaceIndex);

    //
    // Initialize the page reserved for hyper space.
    //

    TempPte = ValidPdePde;
    MI_SET_GLOBAL_STATE (TempPte, 0);

    MappingPte = MiReserveSystemPtes (1, SystemPteSpace);

    if (MappingPte != NULL) {

        MI_MAKE_VALID_KERNEL_PTE (TempPte2,
                                  HyperSpaceIndex,
                                  MM_READWRITE,
                                  MappingPte);

        MI_SET_PTE_DIRTY (TempPte2);

        MI_WRITE_VALID_PTE (MappingPte, TempPte2);

        PointerPte = MiGetVirtualAddressMappedByPte (MappingPte);
    }
    else {
        PointerPte = MiMapPageInHyperSpace (CurrentProcess, HyperSpaceIndex, &OldIrql);
    }

    TempPte.u.Hard.PageFrameNumber = VadBitMapPage;
    PointerPte[MiGetPteOffset(VAD_BITMAP_SPACE)] = TempPte;

    TempPte.u.Hard.PageFrameNumber = PageContainingWorkingSet;
    PointerPte[MiGetPteOffset(MmWorkingSetList)] = TempPte;

    if (MappingPte != NULL) {
        MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace);
    }
    else {
        MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
    }

    //
    // Set the PTE address in the PFN for the page directory page.
    //

    Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex);

    Pfn1->PteAddress = (PMMPTE)PDE_BASE;

    TempPte = ValidPdePde;
    TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex;
    MI_SET_GLOBAL_STATE (TempPte, 0);

    //
    // Add the new process to our internal list prior to filling any
    // system PDEs so if a system PDE changes (large page map or unmap)
    // it can mark this process for a subsequent update.
    //

    ASSERT (NewProcess->Pcb.DirectoryTableBase[0] == 0);

    LOCK_EXPANSION (OldIrql);

    InsertTailList (&MmProcessList, &NewProcess->MmProcessLinks);

    UNLOCK_EXPANSION (OldIrql);

    //
    // Map the page directory page in hyperspace.
    //

    MappingPte = MiReserveSystemPtes (1, SystemPteSpace);

    if (MappingPte != NULL) {

        MI_MAKE_VALID_KERNEL_PTE (TempPte2,
                                  PageDirectoryIndex,
                                  MM_READWRITE,
                                  MappingPte);

        MI_SET_PTE_DIRTY (TempPte2);

        MI_WRITE_VALID_PTE (MappingPte, TempPte2);

        PointerPte = MiGetVirtualAddressMappedByPte (MappingPte);
    }
    else {
        PointerPte = MiMapPageInHyperSpace (CurrentProcess, PageDirectoryIndex, &OldIrql);
    }

    PdeOffset = MiGetPdeOffset (MmSystemRangeStart);
    PointerFillPte = &PointerPte[PdeOffset];
    CurrentAddressSpacePde = MiGetPdeAddress (MmSystemRangeStart);

    RtlCopyMemory (PointerFillPte,
                   CurrentAddressSpacePde,
                   PAGE_SIZE - PdeOffset * sizeof (MMPTE));

    //
    // Map the working set page table page.
    //

    PdeOffset = MiGetPdeOffset (HYPER_SPACE);
    PointerPte[PdeOffset] = TempPte;

    //
    // Zero the remaining page directory range used to map the working
    // set list and its hash.
    //

    PdeOffset += 1;
    ASSERT (MiGetPdeOffset (MmHyperSpaceEnd) >= PdeOffset);

    MiZeroMemoryPte (&PointerPte[PdeOffset],
                     (MiGetPdeOffset (MmHyperSpaceEnd) - PdeOffset + 1));

    //
    // Recursively map the page directory page so it points to itself.
    //

    TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex;
    PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte;

    if (MappingPte != NULL) {
        MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace);
    }
    else {
        MiUnmapPageInHyperSpace (CurrentProcess, PointerPte, OldIrql);
    }

    InterlockedExchangeAddSizeT (&MmProcessCommit, MM_PROCESS_COMMIT_CHARGE);

    //
    // Up the session space reference count.
    //

    MiSessionAddProcess (NewProcess);

    return TRUE;
}
Example #8
0
/*
 * @implemented
 */
PVOID
NTAPI
MmAllocateNonCachedMemory(IN SIZE_T NumberOfBytes)
{
    PFN_COUNT PageCount, MdlPageCount;
    PFN_NUMBER PageFrameIndex;
    PHYSICAL_ADDRESS LowAddress, HighAddress, SkipBytes;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    PMDL Mdl;
    PVOID BaseAddress;
    PPFN_NUMBER MdlPages;
    PMMPTE PointerPte;
    MMPTE TempPte;

    //
    // Get the page count
    //
    ASSERT(NumberOfBytes != 0);
    PageCount = (PFN_COUNT)BYTES_TO_PAGES(NumberOfBytes);

    //
    // Use the MDL allocator for simplicity, so setup the parameters
    //
    LowAddress.QuadPart = 0;
    HighAddress.QuadPart = -1;
    SkipBytes.QuadPart = 0;
    CacheAttribute = MiPlatformCacheAttributes[0][MmNonCached];

    //
    // Now call the MDL allocator
    //
    Mdl = MiAllocatePagesForMdl(LowAddress,
                                HighAddress,
                                SkipBytes,
                                NumberOfBytes,
                                CacheAttribute,
                                0);
    if (!Mdl) return NULL;

    //
    // Get the MDL VA and check how many pages we got (could be partial)
    //
    BaseAddress = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    MdlPageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, Mdl->ByteCount);
    if (PageCount != MdlPageCount)
    {
        //
        // Unlike MDLs, partial isn't okay for a noncached allocation, so fail
        //
        ASSERT(PageCount > MdlPageCount);
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Allocate system PTEs for the base address
    // We use an extra page to store the actual MDL pointer for the free later
    //
    PointerPte = MiReserveSystemPtes(PageCount + 1, SystemPteSpace);
    if (!PointerPte)
    {
        //
        // Out of memory...
        //
        MmFreePagesFromMdl(Mdl);
        ExFreePoolWithTag(Mdl, TAG_MDL);
        return NULL;
    }

    //
    // Store the MDL pointer
    //
    *(PMDL*)PointerPte++ = Mdl;

    //
    // Okay, now see what range we got
    //
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // This is our array of pages
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Setup the template PTE
    //
    TempPte = ValidKernelPte;

    //
    // Now check what kind of caching we should use
    //
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable caching
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiWriteCombined:

            //
            // Enable write combining
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_COMBINED(&TempPte);
            break;

        default:
            //
            // Nothing to do
            //
            break;
    }

    //
    // Now loop the MDL pages
    //
    do
    {
        //
        // Get the PFN
        //
        PageFrameIndex = *MdlPages++;

        //
        // Set the PFN in the page and write it
        //
        TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // Return the base address
    //
    return BaseAddress;

}
Example #9
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
             IN SIZE_T NumberOfBytes,
             IN MEMORY_CACHING_TYPE CacheType)
{

    PFN_NUMBER Pfn;
    PFN_COUNT PageCount;
    PMMPTE PointerPte;
    PVOID BaseAddress;
    MMPTE TempPte;
    PMMPFN Pfn1 = NULL;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    BOOLEAN IsIoMapping;

    //
    // Must be called with a non-zero count
    //
    ASSERT(NumberOfBytes != 0);

    //
    // Make sure the upper bits are 0 if this system
    // can't describe more than 4 GB of physical memory.
    // FIXME: This doesn't respect PAE, but we currently don't
    // define a PAE build flag since there is no such build.
    //
#if !defined(_M_AMD64)
    ASSERT(PhysicalAddress.HighPart == 0);
#endif

    //
    // Normalize and validate the caching attributes
    //
    CacheType &= 0xFF;
    if (CacheType >= MmMaximumCacheType) return NULL;

    //
    // Calculate page count
    //
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart,
                                               NumberOfBytes);

    //
    // Compute the PFN and check if it's a known I/O mapping
    // Also translate the cache attribute
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    Pfn1 = MiGetPfnEntry(Pfn);
    IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE;
    CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];

    //
    // Now allocate system PTEs for the mapping, and get the VA
    //
    PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
    if (!PointerPte) return NULL;
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // Check if this is uncached
    //
    if (CacheAttribute != MiCached)
    {
        //
        // Flush all caches
        //
        KeFlushEntireTb(TRUE, TRUE);
        KeInvalidateAllCaches();
    }

    //
    // Now compute the VA offset
    //
    BaseAddress = (PVOID)((ULONG_PTR)BaseAddress +
                          BYTE_OFFSET(PhysicalAddress.LowPart));

    //
    // Get the template and configure caching
    //
    TempPte = ValidKernelPte;
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable the cache
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiCached:

            //
            // Leave defaults
            //
            break;

        case MiWriteCombined:

            //
            // We don't support write combining yet
            //
            ASSERT(FALSE);
            break;

        default:

            //
            // Should never happen
            //
            ASSERT(FALSE);
            break;
    }

    //
    // Sanity check and re-flush
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL));
    KeFlushEntireTb(TRUE, TRUE);
    KeInvalidateAllCaches();

    //
    // Do the mapping
    //
    do
    {
        //
        // Write the PFN
        //
        TempPte.u.Hard.PageFrameNumber = Pfn++;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // We're done!
    //
    return BaseAddress;
}
Example #10
0
LOGICAL
FASTCALL
MiCopyOnWrite (
    IN PVOID FaultingAddress,
    IN PMMPTE PointerPte
    )

/*++

Routine Description:

    This routine performs a copy on write operation for the specified
    virtual address.

Arguments:

    FaultingAddress - Supplies the virtual address which caused the fault.

    PointerPte - Supplies the pointer to the PTE which caused the page fault.

Return Value:

    Returns TRUE if the page was actually split, FALSE if not.

Environment:

    Kernel mode, APCs disabled, working set mutex held.

--*/

{
    MMPTE TempPte;
    MMPTE TempPte2;
    PMMPTE MappingPte;
    PFN_NUMBER PageFrameIndex;
    PFN_NUMBER NewPageIndex;
    PVOID CopyTo;
    PVOID CopyFrom;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    PEPROCESS CurrentProcess;
    PMMCLONE_BLOCK CloneBlock;
    PMMCLONE_DESCRIPTOR CloneDescriptor;
    WSLE_NUMBER WorkingSetIndex;
    LOGICAL FakeCopyOnWrite;
    PMMWSL WorkingSetList;
    PVOID SessionSpace;
    PLIST_ENTRY NextEntry;
    PIMAGE_ENTRY_IN_SESSION Image;

    //
    // This is called from MmAccessFault, the PointerPte is valid
    // and the working set mutex ensures it cannot change state.
    //
    // Capture the PTE contents to TempPte.
    //

    TempPte = *PointerPte;
    ASSERT (TempPte.u.Hard.Valid == 1);

    PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&TempPte);
    Pfn1 = MI_PFN_ELEMENT (PageFrameIndex);

    //
    // Check to see if this is a prototype PTE with copy on write enabled.
    //

    FakeCopyOnWrite = FALSE;
    CurrentProcess = PsGetCurrentProcess ();
    CloneBlock = NULL;

    if (FaultingAddress >= (PVOID) MmSessionBase) {

        WorkingSetList = MmSessionSpace->Vm.VmWorkingSetList;
        ASSERT (Pfn1->u3.e1.PrototypePte == 1);
        SessionSpace = (PVOID) MmSessionSpace;

        MM_SESSION_SPACE_WS_LOCK_ASSERT ();

        if (MmSessionSpace->ImageLoadingCount != 0) {

            NextEntry = MmSessionSpace->ImageList.Flink;
    
            while (NextEntry != &MmSessionSpace->ImageList) {
    
                Image = CONTAINING_RECORD (NextEntry, IMAGE_ENTRY_IN_SESSION, Link);
    
                if ((FaultingAddress >= Image->Address) &&
                    (FaultingAddress <= Image->LastAddress)) {
    
                    if (Image->ImageLoading) {
    
                        ASSERT (Pfn1->u3.e1.PrototypePte == 1);
    
                        TempPte.u.Hard.CopyOnWrite = 0;
                        TempPte.u.Hard.Write = 1;
    
                        //
                        // The page is no longer copy on write, update the PTE
                        // setting both the dirty bit and the accessed bit.
                        //
                        // Even though the page's current backing is the image
                        // file, the modified writer will convert it to
                        // pagefile backing when it notices the change later.
                        //
    
                        MI_SET_PTE_DIRTY (TempPte);
                        MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
    
                        MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, TempPte);
    
                        //
                        // The TB entry must be flushed as the valid PTE with
                        // the dirty bit clear has been fetched into the TB. If
                        // it isn't flushed, another fault is generated as the
                        // dirty bit is not set in the cached TB entry.
                        //
    
                        MI_FLUSH_SINGLE_TB (FaultingAddress, TRUE);
    
                        return FALSE;
                    }
                    break;
                }
    
                NextEntry = NextEntry->Flink;
            }
        }
    }
    else {
        WorkingSetList = MmWorkingSetList;
        SessionSpace = NULL;

        //
        // If a fork operation is in progress, block until the fork is
        // completed, then retry the whole operation as the state of
        // everything may have changed between when the mutexes were
        // released and reacquired.
        //

        if (CurrentProcess->ForkInProgress != NULL) {
            if (MiWaitForForkToComplete (CurrentProcess) == TRUE) {
                return FALSE;
            }
        }

        if (TempPte.u.Hard.CopyOnWrite == 0) {

            //
            // This is a fork page which is being made private in order
            // to change the protection of the page.
            // Do not make the page writable.
            //

            FakeCopyOnWrite = TRUE;
        }
    }

    WorkingSetIndex = MiLocateWsle (FaultingAddress,
                                    WorkingSetList,
                                    Pfn1->u1.WsIndex,
                                    FALSE);

    //
    // The page must be copied into a new page.
    //

    LOCK_PFN (OldIrql);

    if ((MmAvailablePages < MM_HIGH_LIMIT) &&
        (MiEnsureAvailablePageOrWait (SessionSpace != NULL ? HYDRA_PROCESS : CurrentProcess, OldIrql))) {

        //
        // A wait operation was performed to obtain an available
        // page and the working set mutex and PFN lock have
        // been released and various things may have changed for
        // the worse.  Rather than examine all the conditions again,
        // return and if things are still proper, the fault will
        // be taken again.
        //

        UNLOCK_PFN (OldIrql);
        return FALSE;
    }

    //
    // This must be a prototype PTE.  Perform the copy on write.
    //

    ASSERT (Pfn1->u3.e1.PrototypePte == 1);

    //
    // A page is being copied and made private, the global state of
    // the shared page needs to be updated at this point on certain
    // hardware.  This is done by ORing the dirty bit into the modify bit in
    // the PFN element.
    //
    // Note that a session page cannot be dirty (no POSIX-style forking is
    // supported for these drivers).
    //

    if (SessionSpace != NULL) {
        ASSERT ((TempPte.u.Hard.Valid == 1) && (TempPte.u.Hard.Write == 0));
        ASSERT (!MI_IS_PTE_DIRTY (TempPte));

        NewPageIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_SESSION(MmSessionSpace));
    }
    else {
        MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1);
        CloneBlock = (PMMCLONE_BLOCK) Pfn1->PteAddress;

        //
        // Get a new page with the same color as this page.
        //

        NewPageIndex = MiRemoveAnyPage (
                        MI_PAGE_COLOR_PTE_PROCESS(PageFrameIndex,
                                              &CurrentProcess->NextPageColor));
    }

    MiInitializeCopyOnWritePfn (NewPageIndex,
                                PointerPte,
                                WorkingSetIndex,
                                WorkingSetList);

    UNLOCK_PFN (OldIrql);

    InterlockedIncrement (&KeGetCurrentPrcb ()->MmCopyOnWriteCount);

    CopyFrom = PAGE_ALIGN (FaultingAddress);

    MappingPte = MiReserveSystemPtes (1, SystemPteSpace);

    if (MappingPte != NULL) {

        MI_MAKE_VALID_KERNEL_PTE (TempPte2,
                                  NewPageIndex,
                                  MM_READWRITE,
                                  MappingPte);

        MI_SET_PTE_DIRTY (TempPte2);

        if (Pfn1->u3.e1.CacheAttribute == MiNonCached) {
            MI_DISABLE_CACHING (TempPte2);
        }
        else if (Pfn1->u3.e1.CacheAttribute == MiWriteCombined) {
            MI_SET_PTE_WRITE_COMBINE (TempPte2);
        }

        MI_WRITE_VALID_PTE (MappingPte, TempPte2);

        CopyTo = MiGetVirtualAddressMappedByPte (MappingPte);
    }
    else {

        CopyTo = MiMapPageInHyperSpace (CurrentProcess,
                                        NewPageIndex,
                                        &OldIrql);
    }

    KeCopyPage (CopyTo, CopyFrom);

    if (MappingPte != NULL) {
        MiReleaseSystemPtes (MappingPte, 1, SystemPteSpace);
    }
    else {
        MiUnmapPageInHyperSpace (CurrentProcess, CopyTo, OldIrql);
    }

    if (!FakeCopyOnWrite) {

        //
        // If the page was really a copy on write page, make it
        // accessed, dirty and writable.  Also, clear the copy-on-write
        // bit in the PTE.
        //

        MI_SET_PTE_DIRTY (TempPte);
        TempPte.u.Hard.Write = 1;
        MI_SET_ACCESSED_IN_PTE (&TempPte, 1);
        TempPte.u.Hard.CopyOnWrite = 0;
    }

    //
    // Regardless of whether the page was really a copy on write,
    // the frame field of the PTE must be updated.
    //

    TempPte.u.Hard.PageFrameNumber = NewPageIndex;

    //
    // If the modify bit is set in the PFN database for the
    // page, the data cache must be flushed.  This is due to the
    // fact that this process may have been cloned and the cache
    // still contains stale data destined for the page we are
    // going to remove.
    //

    ASSERT (TempPte.u.Hard.Valid == 1);

    MI_WRITE_VALID_PTE_NEW_PAGE (PointerPte, TempPte);

    //
    // Flush the TB entry for this page.
    //

    if (SessionSpace == NULL) {

        MI_FLUSH_SINGLE_TB (FaultingAddress, FALSE);

        //
        // Increment the number of private pages.
        //

        CurrentProcess->NumberOfPrivatePages += 1;
    }
    else {

        MI_FLUSH_SINGLE_TB (FaultingAddress, TRUE);

        ASSERT (Pfn1->u3.e1.PrototypePte == 1);
    }

    //
    // Decrement the share count for the page which was copied
    // as this PTE no longer refers to it.
    //

    LOCK_PFN (OldIrql);

    MiDecrementShareCount (Pfn1, PageFrameIndex);

    if (SessionSpace == NULL) {

        CloneDescriptor = MiLocateCloneAddress (CurrentProcess,
                                                (PVOID)CloneBlock);

        if (CloneDescriptor != NULL) {

            //
            // Decrement the reference count for the clone block,
            // note that this could release and reacquire the mutexes.
            //

            MiDecrementCloneBlockReference (CloneDescriptor,
                                            CloneBlock,
                                            CurrentProcess,
                                            NULL,
                                            OldIrql);
        }
    }

    UNLOCK_PFN (OldIrql);
    return TRUE;
}
Example #11
0
PVOID
NTAPI
MiMapPageInHyperSpace(IN PEPROCESS Process,
                      IN PFN_NUMBER Page,
                      IN PKIRQL OldIrql)
{
    MMPTE TempPte;
    PMMPTE PointerPte;
    PFN_NUMBER Offset;

    //
    // Never accept page 0 or non-physical pages
    //
    ASSERT(Page != 0);
    ASSERT(MiGetPfnEntry(Page) != NULL);

    //
    // Build the PTE
    //
    TempPte = ValidKernelPteLocal;
    TempPte.u.Hard.PageFrameNumber = Page;

    //
    // Pick the first hyperspace PTE
    //
    PointerPte = MmFirstReservedMappingPte;

    //
    // Acquire the hyperlock
    //
    ASSERT(Process == PsGetCurrentProcess());
    KeAcquireSpinLock(&Process->HyperSpaceLock, OldIrql);

    //
    // Now get the first free PTE
    //
    Offset = PFN_FROM_PTE(PointerPte);
    if (!Offset)
    {
        //
        // Reset the PTEs
        //
        Offset = MI_HYPERSPACE_PTES;
        KeFlushProcessTb();
    }

    //
    // Prepare the next PTE
    //
    PointerPte->u.Hard.PageFrameNumber = Offset - 1;

    //
    // Write the current PTE
    //
    PointerPte += Offset;
    MI_WRITE_VALID_PTE(PointerPte, TempPte);

    //
    // Return the address
    //
    return MiPteToAddress(PointerPte);
}
Example #12
0
PVOID
NTAPI
MiMapPagesInZeroSpace(IN PMMPFN Pfn1,
                      IN PFN_NUMBER NumberOfPages)
{
    MMPTE TempPte;
    PMMPTE PointerPte;
    PFN_NUMBER Offset, PageFrameIndex;

    //
    // Sanity checks
    //
    ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL);
    ASSERT(NumberOfPages != 0);
    ASSERT(NumberOfPages <= (MI_ZERO_PTES - 1));

    //
    // Pick the first zeroing PTE
    //
    PointerPte = MiFirstReservedZeroingPte;

    //
    // Now get the first free PTE
    //
    Offset = PFN_FROM_PTE(PointerPte);
    if (NumberOfPages > Offset)
    {
        //
        // Reset the PTEs
        //
        Offset = MI_ZERO_PTES - 1;
        PointerPte->u.Hard.PageFrameNumber = Offset;
        KeFlushProcessTb();
    }

    //
    // Prepare the next PTE
    //
    PointerPte->u.Hard.PageFrameNumber = Offset - NumberOfPages;

    /* Choose the correct PTE to use, and which template */
    PointerPte += (Offset + 1);
    TempPte = ValidKernelPte;

    /* Make sure the list isn't empty and loop it */
    ASSERT(Pfn1 != (PVOID)LIST_HEAD);
    while (Pfn1 != (PVOID)LIST_HEAD)
    {
        /* Get the page index for this PFN */
        PageFrameIndex = MiGetPfnEntryIndex(Pfn1);

        //
        // Write the PFN
        //
        TempPte.u.Hard.PageFrameNumber = PageFrameIndex;

        //
        // Set the correct PTE to write to, and set its new value
        //
        PointerPte--;
        MI_WRITE_VALID_PTE(PointerPte, TempPte);

        /* Move to the next PFN */
        Pfn1 = (PMMPFN)Pfn1->u1.Flink;
    }

    //
    // Return the address
    //
    return MiPteToAddress(PointerPte);
}