Ejemplo n.º 1
0
VOID
NTAPI
MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
                          IN ULONG PageCount)
{
    PMMPTE PointerPte, LastPte;
    MMPTE TempPte;

    /* If pool is physical, can't protect PTEs */
    if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;

    /* Get PTE pointers and loop */
    PointerPte = MiAddressToPte(VirtualAddress);
    LastPte = PointerPte + PageCount;
    do
    {
        /* Capture the PTE for safety */
        TempPte = *PointerPte;

        /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
        TempPte.u.Hard.Valid = 0;
        TempPte.u.Soft.Prototype = 1;
        MI_WRITE_INVALID_PTE(PointerPte, TempPte);
    } while (++PointerPte < LastPte);

    /* Flush the TLB */
    KeFlushEntireTb(TRUE, TRUE);
}
Ejemplo n.º 2
0
/*
 * @implemented
 */
VOID
NTAPI
MmUnmapIoSpace(IN PVOID BaseAddress,
               IN SIZE_T NumberOfBytes)
{
    PFN_NUMBER Pfn;
    PFN_COUNT PageCount;
    PMMPTE PointerPte;

    //
    // Sanity check
    //
    ASSERT(NumberOfBytes != 0);

    //
    // Get the page count
    //
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(BaseAddress, NumberOfBytes);

    //
    // Get the PTE and PFN
    //
    PointerPte = MiAddressToPte(BaseAddress);
    Pfn = PFN_FROM_PTE(PointerPte);

    //
    // Is this an I/O mapping?
    //
    if (!MiGetPfnEntry(Pfn))
    {
        //
        // Destroy the PTE
        //
        RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE));

        //
        // Blow the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);
    }

    //
    // Release the PTEs
    //
    MiReleaseSystemPtes(PointerPte, PageCount, 0);
}
Ejemplo n.º 3
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
Ejemplo n.º 4
0
VOID
MiFlushPteList (
    IN PMMPTE_FLUSH_LIST PteFlushList,
    IN ULONG AllProcessors,
    IN MMPTE FillPte
    )

/*++

Routine Description:

    This routine flushes all the PTEs in the pte flush list.
    Is the list has overflowed, the entire TB is flushed.

Arguments:

    PteFlushList - Supplies an optional pointer to the list to be flushed.

    AllProcessors - Supplies TRUE if the flush occurs on all processors.

    FillPte - Supplies the PTE to fill with.

Return Value:

    None.

Environment:

    Kernel mode, PFN lock held.

--*/

{
    ULONG count;
    ULONG i = 0;

    ASSERT (ARGUMENT_PRESENT (PteFlushList));
    MM_PFN_LOCK_ASSERT ();

    count = PteFlushList->Count;

    if (count != 0) {
        if (count != 1) {
            if (count < MM_MAXIMUM_FLUSH_COUNT) {
                KeFlushMultipleTb (count,
                                   &PteFlushList->FlushVa[0],
                                   TRUE,
                                   (BOOLEAN)AllProcessors,
                                   &((PHARDWARE_PTE)PteFlushList->FlushPte[0]),
                                   FillPte.u.Flush);
            } else {

                //
                // Array has overflowed, flush the entire TB.
                //

                ExAcquireSpinLockAtDpcLevel ( &MmSystemSpaceLock );
                KeFlushEntireTb (TRUE, (BOOLEAN)AllProcessors);
                if (AllProcessors == TRUE) {
                    MmFlushCounter.u.List.NextEntry += 1;
                }
                ExReleaseSpinLockFromDpcLevel ( &MmSystemSpaceLock );
            }
        } else {
            KeFlushSingleTb (PteFlushList->FlushVa[0],
                             TRUE,
                             (BOOLEAN)AllProcessors,
                             (PHARDWARE_PTE)PteFlushList->FlushPte[0],
                             FillPte.u.Flush);
        }
        PteFlushList->Count = 0;
    }
    return;
}
Ejemplo n.º 5
0
/*
 * @implemented
 */
PVOID
NTAPI
MmMapIoSpace(IN PHYSICAL_ADDRESS PhysicalAddress,
             IN SIZE_T NumberOfBytes,
             IN MEMORY_CACHING_TYPE CacheType)
{

    PFN_NUMBER Pfn;
    PFN_COUNT PageCount;
    PMMPTE PointerPte;
    PVOID BaseAddress;
    MMPTE TempPte;
    PMMPFN Pfn1 = NULL;
    MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
    BOOLEAN IsIoMapping;

    //
    // Must be called with a non-zero count
    //
    ASSERT(NumberOfBytes != 0);

    //
    // Make sure the upper bits are 0 if this system
    // can't describe more than 4 GB of physical memory.
    // FIXME: This doesn't respect PAE, but we currently don't
    // define a PAE build flag since there is no such build.
    //
#if !defined(_M_AMD64)
    ASSERT(PhysicalAddress.HighPart == 0);
#endif

    //
    // Normalize and validate the caching attributes
    //
    CacheType &= 0xFF;
    if (CacheType >= MmMaximumCacheType) return NULL;

    //
    // Calculate page count
    //
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(PhysicalAddress.LowPart,
                                               NumberOfBytes);

    //
    // Compute the PFN and check if it's a known I/O mapping
    // Also translate the cache attribute
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    Pfn1 = MiGetPfnEntry(Pfn);
    IsIoMapping = (Pfn1 == NULL) ? TRUE : FALSE;
    CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];

    //
    // Now allocate system PTEs for the mapping, and get the VA
    //
    PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
    if (!PointerPte) return NULL;
    BaseAddress = MiPteToAddress(PointerPte);

    //
    // Check if this is uncached
    //
    if (CacheAttribute != MiCached)
    {
        //
        // Flush all caches
        //
        KeFlushEntireTb(TRUE, TRUE);
        KeInvalidateAllCaches();
    }

    //
    // Now compute the VA offset
    //
    BaseAddress = (PVOID)((ULONG_PTR)BaseAddress +
                          BYTE_OFFSET(PhysicalAddress.LowPart));

    //
    // Get the template and configure caching
    //
    TempPte = ValidKernelPte;
    switch (CacheAttribute)
    {
        case MiNonCached:

            //
            // Disable the cache
            //
            MI_PAGE_DISABLE_CACHE(&TempPte);
            MI_PAGE_WRITE_THROUGH(&TempPte);
            break;

        case MiCached:

            //
            // Leave defaults
            //
            break;

        case MiWriteCombined:

            //
            // We don't support write combining yet
            //
            ASSERT(FALSE);
            break;

        default:

            //
            // Should never happen
            //
            ASSERT(FALSE);
            break;
    }

    //
    // Sanity check and re-flush
    //
    Pfn = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT);
    ASSERT((Pfn1 == MiGetPfnEntry(Pfn)) || (Pfn1 == NULL));
    KeFlushEntireTb(TRUE, TRUE);
    KeInvalidateAllCaches();

    //
    // Do the mapping
    //
    do
    {
        //
        // Write the PFN
        //
        TempPte.u.Hard.PageFrameNumber = Pfn++;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--PageCount);

    //
    // We're done!
    //
    return BaseAddress;
}
Ejemplo n.º 6
0
VOID
NTAPI
MmFreeSpecialPool(PVOID P)
{
    PMMPTE PointerPte;
    PPOOL_HEADER Header;
    BOOLEAN Overruns = FALSE;
    KIRQL Irql = KeGetCurrentIrql();
    POOL_TYPE PoolType;
    ULONG BytesRequested, BytesReal = 0;
    ULONG PtrOffset;
    PUCHAR b;
    PMI_FREED_SPECIAL_POOL FreedHeader;
    LARGE_INTEGER TickCount;
    PMMPFN Pfn;

    DPRINT1("MmFreeSpecialPool(%p)\n", P);

    /* Get the PTE */
    PointerPte = MiAddressToPte(P);

    /* Check if it's valid */
    if (PointerPte->u.Hard.Valid == 0)
    {
        /* Bugcheck if it has NOACCESS or 0 set as protection */
        if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
            !PointerPte->u.Soft.Protection)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)PointerPte, 0, 0x20);
        }
    }

    /* Determine if it's a underruns or overruns pool pointer */
    PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
    if (PtrOffset)
    {
        /* Pool catches overruns */
        Header = PAGE_ALIGN(P);
        Overruns = TRUE;
    }
    else
    {
        /* Pool catches underruns */
        Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
    }

    /* Check if it's non paged pool */
    if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
    {
        /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 0, 0x31);
        }

        PoolType = NonPagedPool;
    }
    else
    {
        /* Paged allocation, ensure */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 1, 0x31);
        }

        PoolType = PagedPool;
    }

    /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
    BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;

    /* Check memory before the allocated user buffer in case of overruns detection */
    if (Overruns)
    {
        /* Calculate the real placement of the buffer */
        BytesReal = PAGE_SIZE - PtrOffset;

        /* If they mismatch, it's unrecoverable */
        if (BytesRequested > BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x21);
        }

        if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x22);
        }

        /* Actually check the memory pattern */
        for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
        {
            if (Header->BlockSize != b[0])
            {
                /* Bytes mismatch */
                KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)b, Header->BlockSize, 0x23);
            }
        }
    }

    /* Check the memory pattern after the user buffer */
    MiSpecialPoolCheckPattern(P, Header);

    /* Fill the freed header */
    KeQueryTickCount(&TickCount);
    FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
    FreedHeader->Signature = 0x98764321;
    FreedHeader->TickCount = TickCount.LowPart;
    FreedHeader->NumberOfBytesRequested = BytesRequested;
    FreedHeader->Pagable = PoolType;
    FreedHeader->VirtualAddress = P;
    FreedHeader->Thread = PsGetCurrentThread();
    /* TODO: Fill StackPointer and StackBytes */
    FreedHeader->StackPointer = NULL;
    FreedHeader->StackBytes = 0;

    if (PoolType == NonPagedPool)
    {
        /* Non pagable. Get PFN element corresponding to the PTE */
        Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Delete this PFN */
        MI_SET_PFN_DELETED(Pfn);

        /* Decrement share count of this PFN */
        MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);

        /* Flush the TLB */
        //FIXME: Use KeFlushSingleTb() instead
        KeFlushEntireTb(TRUE, TRUE);
    }
    else
    {
        /* Pagable. Delete that virtual address */
        MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    }

    /* Mark next PTE as invalid */
    PointerPte[1].u.Long = 0; //|= 8000;

    /* Make sure that the last entry is really the last one */
    ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);

    /* Update the current last PTE next pointer */
    MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;

    /* PointerPte becomes the new last PTE */
    PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
    MiSpecialPoolLastPte = PointerPte;

    /* Release the PFN database lock */
    KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
}