示例#1
0
NTSTATUS
NTAPI
MmReleasePageMemoryConsumer(ULONG Consumer, PFN_NUMBER Page)
{
   PMM_ALLOCATION_REQUEST Request;
   PLIST_ENTRY Entry;
   KIRQL OldIrql;

   if (Page == 0)
   {
      DPRINT1("Tried to release page zero.\n");
      KeBugCheck(MEMORY_MANAGEMENT);
   }

   KeAcquireSpinLock(&AllocationListLock, &OldIrql);
   if (MmGetReferenceCountPage(Page) == 1)
   {
      (void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
      if (IsListEmpty(&AllocationListHead) || MmAvailablePages < MiMinimumAvailablePages)
      {
         KeReleaseSpinLock(&AllocationListLock, OldIrql);
         if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
         OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
         MmDereferencePage(Page);
         KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
      }
      else
      {
         Entry = RemoveHeadList(&AllocationListHead);
         Request = CONTAINING_RECORD(Entry, MM_ALLOCATION_REQUEST, ListEntry);
         KeReleaseSpinLock(&AllocationListLock, OldIrql);
         if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
         MiZeroPhysicalPage(Page);
         Request->Page = Page;
         KeSetEvent(&Request->Event, IO_NO_INCREMENT, FALSE);
      }
   }
   else
   {
      KeReleaseSpinLock(&AllocationListLock, OldIrql);
      if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
      OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
      MmDereferencePage(Page);
      KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
   }

   return(STATUS_SUCCESS);
}
示例#2
0
/*
 * @implemented
 */
VOID
NTAPI
IoAcquireVpbSpinLock(OUT PKIRQL Irql)
{
    /* Simply acquire the lock */
    *Irql = KeAcquireQueuedSpinLock(LockQueueIoVpbLock);
}
示例#3
0
/*
 * @halfplemented
 */
VOID
NTAPI
IopDecrementDeviceObjectRef(IN PDEVICE_OBJECT DeviceObject,
                            IN BOOLEAN UnloadIfUnused)
{
    KIRQL OldIrql;

    /* Acquire lock */
    OldIrql = KeAcquireQueuedSpinLock(LockQueueIoDatabaseLock);
    ASSERT(DeviceObject->ReferenceCount > 0);

    if (--DeviceObject->ReferenceCount > 0)
    {
        KeReleaseQueuedSpinLock(LockQueueIoDatabaseLock, OldIrql);
        return;
    }

    /* Here, DO is not referenced any longer, check if we have to unload it */
    if (UnloadIfUnused || IoGetDevObjExtension(DeviceObject)->ExtensionFlags &
                          (DOE_UNLOAD_PENDING | DOE_DELETE_PENDING | DOE_REMOVE_PENDING))
    {
        /* Unload the driver */
        IopUnloadDevice(DeviceObject);
    }

    /* Release lock */
    KeReleaseQueuedSpinLock(LockQueueIoDatabaseLock, OldIrql);
}
示例#4
0
static
VOID
MmDeletePageTablePfn(PFN_NUMBER PageFrameNumber, ULONG Level)
{
    PMMPTE PageTable;
    KIRQL OldIrql;
    PMMPFN PfnEntry;
    ULONG i, NumberEntries;

    /* Check if this is a page table */
    if (Level > 0)
    {
        NumberEntries = (Level == 4) ? MiAddressToPxi(MmHighestUserAddress)+1 : 512;

        /* Map the page table in hyperspace */
        PageTable = (PMMPTE)MmCreateHyperspaceMapping(PageFrameNumber);

        /* Loop all page table entries */
        for (i = 0; i < NumberEntries; i++)
        {
            /* Check if the entry is valid */
            if (PageTable[i].u.Hard.Valid)
            {
                /* Recursively free the page that backs it */
                MmDeletePageTablePfn(PageTable[i].u.Hard.PageFrameNumber, Level - 1);
            }
        }

        /* Delete the hyperspace mapping */
        MmDeleteHyperspaceMapping(PageTable);
    }

    /* Check if this is a legacy allocation */
    PfnEntry = MiGetPfnEntry(PageFrameNumber);
    if (MI_IS_ROS_PFN(PfnEntry))
    {
        /* Free it using the legacy API */
        MmReleasePageMemoryConsumer(MC_SYSTEM, PageFrameNumber);
    }
    else
    {
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Free it using the ARM3 API */
        MI_SET_PFN_DELETED(PfnEntry);
        MiDecrementShareCount(PfnEntry, PageFrameNumber);

        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
}
示例#5
0
/*
 * @implemented
 */
ULONG
FASTCALL
IopInterlockedDecrementUlong(IN KSPIN_LOCK_QUEUE_NUMBER Queue,
                             IN PULONG Ulong)
{
    KIRQL Irql;
    ULONG OldValue;

    Irql = KeAcquireQueuedSpinLock(Queue);
    OldValue = (*Ulong)--;
    KeReleaseQueuedSpinLock(Queue, Irql);

    return OldValue;
}
示例#6
0
VOID
CcPostWorkQueue(
    IN PWORK_QUEUE_ENTRY WorkItem,
    IN PLIST_ENTRY WorkQueue)
{
    KIRQL OldIrql;
    PWORK_QUEUE_ITEM ThreadToSpawn;

    /* First of all, insert the item in the queue */
    OldIrql = KeAcquireQueuedSpinLock(LockQueueWorkQueueLock);
    InsertTailList(WorkQueue, &WorkItem->WorkQueueLinks);

    /* Now, define whether we have to spawn a new work thread
     * We will spawn a new one if:
     * - There's no throttle in action
     * - There's still at least one idle thread
     */
    ThreadToSpawn = NULL;
    if (!CcQueueThrottle && !IsListEmpty(&CcIdleWorkerThreadList))
    {
        PLIST_ENTRY ListEntry;

        /* Get the idle thread */
        ListEntry = RemoveHeadList(&CcIdleWorkerThreadList);
        ThreadToSpawn = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ITEM, List);

        /* We're going to have one more! */
        CcNumberActiveWorkerThreads += 1;
    }

    KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, OldIrql);

    /* If we have a thread to spawn, do it! */
    if (ThreadToSpawn != NULL)
    {
        /* We NULLify it to be consistent with initialization */
        ThreadToSpawn->List.Flink = NULL;
        ExQueueWorkItem(ThreadToSpawn, CriticalWorkQueue);
    }
}
示例#7
0
/*
 * @implemented
 */
NTSTATUS
NTAPI
CcWaitForCurrentLazyWriterActivity (
    VOID)
{
    KIRQL OldIrql;
    KEVENT WaitEvent;
    PWORK_QUEUE_ENTRY WorkItem;

    /* Allocate a work item */
    WorkItem = ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList);
    if (WorkItem == NULL)
    {
        return STATUS_INSUFFICIENT_RESOURCES;
    }

    /* We want lazy writer to set our event */
    WorkItem->Function = SetDone;
    KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
    WorkItem->Parameters.Event.Event = &WaitEvent;

    /* Use the post tick queue */
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
    InsertTailList(&CcPostTickWorkQueue, &WorkItem->WorkQueueLinks);

    /* Inform the lazy writer it will have to handle the post tick queue */
    LazyWriter.OtherWork = TRUE;
    /* And if it's not running, queue a lazy writer run
     * And start it NOW, we want the response now
     */
    if (!LazyWriter.ScanActive)
    {
        CcScheduleLazyWriteScan(TRUE);
    }

    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

    /* And now, wait until lazy writer replies */
    return KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, NULL);
}
示例#8
0
PMMPTE
NTAPI
MiReserveAlignedSystemPtes(IN ULONG NumberOfPtes,
                           IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType,
                           IN ULONG Alignment)
{
    KIRQL OldIrql;
    PMMPTE PreviousPte, NextPte, ReturnPte;
    ULONG ClusterSize;

    //
    // Sanity check
    //
    ASSERT(Alignment <= PAGE_SIZE);

    //
    // Acquire the System PTE lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueSystemSpaceLock);

    //
    // Find the last cluster in the list that doesn't contain enough PTEs
    //
    PreviousPte = &MmFirstFreeSystemPte[SystemPtePoolType];

    while (PreviousPte->u.List.NextEntry != MM_EMPTY_PTE_LIST)
    {
        //
        // Get the next cluster and its size
        //
        NextPte = MmSystemPteBase + PreviousPte->u.List.NextEntry;
        ClusterSize = MI_GET_CLUSTER_SIZE(NextPte);

        //
        // Check if this cluster contains enough PTEs
        //
        if (NumberOfPtes <= ClusterSize)
            break;

        //
        // On to the next cluster
        //
        PreviousPte = NextPte;
    }

    //
    // Make sure we didn't reach the end of the cluster list
    //
    if (PreviousPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
    {
        //
        // Release the System PTE lock and return failure
        //
        KeReleaseQueuedSpinLock(LockQueueSystemSpaceLock, OldIrql);
        return NULL;
    }

    //
    // Unlink the cluster
    //
    PreviousPte->u.List.NextEntry = NextPte->u.List.NextEntry;

    //
    // Check if the reservation spans the whole cluster
    //
    if (ClusterSize == NumberOfPtes)
    {
        //
        // Return the first PTE of this cluster
        //
        ReturnPte = NextPte;

        //
        // Zero the cluster
        //
        if (NextPte->u.List.OneEntry == 0)
        {
            NextPte->u.Long = 0;
            NextPte++;
        }
        NextPte->u.Long = 0;
    }
    else
    {
        //
        // Divide the cluster into two parts
        //
        ClusterSize -= NumberOfPtes;
        ReturnPte = NextPte + ClusterSize;

        //
        // Set the size of the first cluster, zero the second if needed
        //
        if (ClusterSize == 1)
        {
            NextPte->u.List.OneEntry = 1;
            ReturnPte->u.Long = 0;
        }
        else
        {
            NextPte++;
            NextPte->u.List.NextEntry = ClusterSize;
        }

        //
        // Step through the cluster list to find out where to insert the first
        //
        PreviousPte = &MmFirstFreeSystemPte[SystemPtePoolType];

        while (PreviousPte->u.List.NextEntry != MM_EMPTY_PTE_LIST)
        {
            //
            // Get the next cluster
            //
            NextPte = MmSystemPteBase + PreviousPte->u.List.NextEntry;

            //
            // Check if the cluster to insert is smaller or of equal size
            //
            if (ClusterSize <= MI_GET_CLUSTER_SIZE(NextPte))
                break;

            //
            // On to the next cluster
            //
            PreviousPte = NextPte;
        }

        //
        // Retrieve the first cluster and link it back into the cluster list
        //
        NextPte = ReturnPte - ClusterSize;

        NextPte->u.List.NextEntry = PreviousPte->u.List.NextEntry;
        PreviousPte->u.List.NextEntry = NextPte - MmSystemPteBase;
    }

    //
    // Decrease availability
    //
    MmTotalFreeSystemPtes[SystemPtePoolType] -= NumberOfPtes;

    //
    // Release the System PTE lock
    //
    KeReleaseQueuedSpinLock(LockQueueSystemSpaceLock, OldIrql);

    //
    // Flush the TLB
    //
    KeFlushProcessTb();

    //
    // Return the reserved PTEs
    //
    return ReturnPte;
}
示例#9
0
VOID
NTAPI
MiReleaseSystemPtes(IN PMMPTE StartingPte,
                    IN ULONG NumberOfPtes,
                    IN MMSYSTEM_PTE_POOL_TYPE SystemPtePoolType)
{
    KIRQL OldIrql;
    ULONG ClusterSize;
    PMMPTE PreviousPte, NextPte, InsertPte;

    //
    // Check to make sure the PTE address is within bounds
    //
    ASSERT(NumberOfPtes != 0);
    ASSERT(StartingPte >= MmSystemPtesStart[SystemPtePoolType]);
    ASSERT(StartingPte + NumberOfPtes - 1 <= MmSystemPtesEnd[SystemPtePoolType]);

    //
    // Zero PTEs
    //
    RtlZeroMemory(StartingPte, NumberOfPtes * sizeof(MMPTE));

    //
    // Acquire the System PTE lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueSystemSpaceLock);

    //
    // Increase availability
    //
    MmTotalFreeSystemPtes[SystemPtePoolType] += NumberOfPtes;

    //
    // Step through the cluster list to find where to insert the PTEs
    //
    PreviousPte = &MmFirstFreeSystemPte[SystemPtePoolType];
    InsertPte = NULL;

    while (PreviousPte->u.List.NextEntry != MM_EMPTY_PTE_LIST)
    {
        //
        // Get the next cluster and its size
        //
        NextPte = MmSystemPteBase + PreviousPte->u.List.NextEntry;
        ClusterSize = MI_GET_CLUSTER_SIZE(NextPte);

        //
        // Check if this cluster is adjacent to the PTEs being released
        //
        if ((NextPte + ClusterSize == StartingPte) ||
                (StartingPte + NumberOfPtes == NextPte))
        {
            //
            // Add the PTEs in the cluster to the PTEs being released
            //
            NumberOfPtes += ClusterSize;

            if (NextPte < StartingPte)
                StartingPte = NextPte;

            //
            // Unlink this cluster and zero it
            //
            PreviousPte->u.List.NextEntry = NextPte->u.List.NextEntry;

            if (NextPte->u.List.OneEntry == 0)
            {
                NextPte->u.Long = 0;
                NextPte++;
            }
            NextPte->u.Long = 0;

            //
            // Invalidate the previously found insertion location, if any
            //
            InsertPte = NULL;
        }
        else
        {
            //
            // Check if the insertion location is right before this cluster
            //
            if ((InsertPte == NULL) && (NumberOfPtes <= ClusterSize))
                InsertPte = PreviousPte;

            //
            // On to the next cluster
            //
            PreviousPte = NextPte;
        }
    }

    //
    // If no insertion location was found, use the tail of the list
    //
    if (InsertPte == NULL)
        InsertPte = PreviousPte;

    //
    // Create a new cluster using the PTEs being released
    //
    if (NumberOfPtes != 1)
    {
        StartingPte->u.List.OneEntry = 0;

        NextPte = StartingPte + 1;
        NextPte->u.List.NextEntry = NumberOfPtes;
    }
    else
        StartingPte->u.List.OneEntry = 1;

    //
    // Link the new cluster into the cluster list at the insertion location
    //
    StartingPte->u.List.NextEntry = InsertPte->u.List.NextEntry;
    InsertPte->u.List.NextEntry = StartingPte - MmSystemPteBase;

    //
    // Release the System PTE lock
    //
    KeReleaseQueuedSpinLock(LockQueueSystemSpaceLock, OldIrql);
}
示例#10
0
NTSTATUS
NTAPI
MmNotPresentFaultCachePage (
    _In_ PMMSUPPORT AddressSpace,
    _In_ MEMORY_AREA* MemoryArea,
    _In_ PVOID Address,
    _In_ BOOLEAN Locked,
    _Inout_ PMM_REQUIRED_RESOURCES Required)
{
    NTSTATUS Status;
    PVOID PAddress;
    ULONG Consumer;
    PMM_SECTION_SEGMENT Segment;
    LARGE_INTEGER FileOffset, TotalOffset;
    ULONG_PTR Entry;
    ULONG Attributes;
    PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
    KIRQL OldIrql;

    DPRINT("Not Present: %p %p (%p-%p)\n",
           AddressSpace,
           Address,
           MemoryArea->StartingAddress,
           MemoryArea->EndingAddress);

    /*
     * There is a window between taking the page fault and locking the
     * address space when another thread could load the page so we check
     * that.
     */
    if (MmIsPagePresent(Process, Address))
    {
        DPRINT("Done\n");
        return STATUS_SUCCESS;
    }

    PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
    TotalOffset.QuadPart = (ULONG_PTR)PAddress -
                           (ULONG_PTR)MemoryArea->StartingAddress;

    Segment = MemoryArea->Data.SectionData.Segment;

    TotalOffset.QuadPart += MemoryArea->Data.SectionData.ViewOffset.QuadPart;
    FileOffset = TotalOffset;

    //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
    Consumer = MC_CACHE;

    if (Segment->FileObject)
    {
        DPRINT("FileName %wZ\n", &Segment->FileObject->FileName);
    }

    DPRINT("Total Offset %08x%08x\n", TotalOffset.HighPart, TotalOffset.LowPart);

    /* Lock the segment */
    MmLockSectionSegment(Segment);

    /* Get the entry corresponding to the offset within the section */
    Entry = MmGetPageEntrySectionSegment(Segment, &TotalOffset);

    Attributes = PAGE_READONLY;

    if (Required->State && Required->Page[0])
    {
        DPRINT("Have file and page, set page %x in section @ %x #\n",
               Required->Page[0],
               TotalOffset.LowPart);

        if (Required->SwapEntry)
            MmSetSavedSwapEntryPage(Required->Page[0], Required->SwapEntry);

        if (Required->State & 2)
        {
            DPRINT("Set in section @ %x\n", TotalOffset.LowPart);
            Status = MmSetPageEntrySectionSegment(Segment,
                                                  &TotalOffset,
                                                  Entry = MAKE_PFN_SSE(Required->Page[0]));
            if (!NT_SUCCESS(Status))
            {
                MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
            }
            MmUnlockSectionSegment(Segment);
            MiSetPageEvent(Process, Address);
            DPRINT("Status %x\n", Status);
            return STATUS_MM_RESTART_OPERATION;
        }
        else
        {
            DPRINT("Set %x in address space @ %p\n", Required->Page[0], Address);
            Status = MmCreateVirtualMapping(Process,
                                            Address,
                                            Attributes,
                                            Required->Page,
                                            1);
            if (NT_SUCCESS(Status))
            {
                MmInsertRmap(Required->Page[0], Process, Address);
            }
            else
            {
                /* Drop the reference for our address space ... */
                MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
            }
            MmUnlockSectionSegment(Segment);
            DPRINTC("XXX Set Event %x\n", Status);
            MiSetPageEvent(Process, Address);
            DPRINT("Status %x\n", Status);
            return Status;
        }
    }
    else if (MM_IS_WAIT_PTE(Entry))
    {
        // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
        // ask the fault handler to wait until we should continue.  Rathern
        // than recopy this boilerplate code everywhere, we just ask them
        // to wait.
        MmUnlockSectionSegment(Segment);
        return STATUS_SUCCESS + 1;
    }
    else if (Entry)
    {
        PFN_NUMBER Page = PFN_FROM_SSE(Entry);
        DPRINT("Take reference to page %x #\n", Page);

        if (MiGetPfnEntry(Page) == NULL)
        {
            DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
                    Page,
                    Entry,
                    Segment,
                    TotalOffset.HighPart,
                    TotalOffset.LowPart);
            KeBugCheck(CACHE_MANAGER);
        }

        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
        MmReferencePage(Page);
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

        Status = MmCreateVirtualMapping(Process, Address, Attributes, &Page, 1);
        if (NT_SUCCESS(Status))
        {
            MmInsertRmap(Page, Process, Address);
        }
        DPRINT("XXX Set Event %x\n", Status);
        MiSetPageEvent(Process, Address);
        MmUnlockSectionSegment(Segment);
        DPRINT("Status %x\n", Status);
        return Status;
    }
    else
    {
        DPRINT("Get page into section\n");
        /*
         * If the entry is zero (and it can't change because we have
         * locked the segment) then we need to load the page.
         */
        //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
        Required->State = 2;
        Required->Context = Segment->FileObject;
        Required->Consumer = Consumer;
        Required->FileOffset = FileOffset;
        Required->Amount = PAGE_SIZE;
        Required->DoAcquisition = MiReadFilePage;

        MmSetPageEntrySectionSegment(Segment,
                                     &TotalOffset,
                                     MAKE_SWAP_SSE(MM_WAIT_ENTRY));

        MmUnlockSectionSegment(Segment);
        return STATUS_MORE_PROCESSING_REQUIRED;
    }
    ASSERT(FALSE);
    return STATUS_ACCESS_VIOLATION;
}
示例#11
0
文件: page.c 项目: GYGit/reactos
BOOLEAN
NTAPI
MmCreateProcessAddressSpace(IN ULONG MinWs,
                            IN PEPROCESS Process,
                            OUT PULONG_PTR DirectoryTableBase)
{
    KIRQL OldIrql;
    PFN_NUMBER TableBasePfn, HyperPfn, HyperPdPfn, HyperPtPfn, WorkingSetPfn;
    PMMPTE SystemPte;
    MMPTE TempPte, PdePte;
    ULONG TableIndex;
    PMMPTE PageTablePointer;

    /* Make sure we don't already have a page directory setup */
    ASSERT(Process->Pcb.DirectoryTableBase[0] == 0);
    ASSERT(Process->Pcb.DirectoryTableBase[1] == 0);
    ASSERT(Process->WorkingSetPage == 0);

    /* Choose a process color */
    Process->NextPageColor = (USHORT)RtlRandom(&MmProcessColorSeed);

    /* Setup the hyperspace lock */
    KeInitializeSpinLock(&Process->HyperSpaceLock);

    /* Lock PFN database */
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    /* Get a page for the table base and one for hyper space. The PFNs for
       these pages will be initialized in MmInitializeProcessAddressSpace,
       when we are already attached to the process. */
    TableBasePfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process));
    HyperPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process));
    HyperPdPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process));
    HyperPtPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process));
    WorkingSetPfn = MiRemoveAnyPage(MI_GET_NEXT_PROCESS_COLOR(Process));

    /* Release PFN lock */
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    /* Zero pages */ /// FIXME:
    MiZeroPhysicalPage(HyperPfn);
    MiZeroPhysicalPage(WorkingSetPfn);

    /* Set the base directory pointers */
    Process->WorkingSetPage = WorkingSetPfn;
    DirectoryTableBase[0] = TableBasePfn << PAGE_SHIFT;
    DirectoryTableBase[1] = HyperPfn << PAGE_SHIFT;

    /* Get a PTE to map the page directory */
    SystemPte = MiReserveSystemPtes(1, SystemPteSpace);
    ASSERT(SystemPte != NULL);

    /* Get its address */
    PageTablePointer = MiPteToAddress(SystemPte);

    /* Build the PTE for the page directory and map it */
    PdePte = ValidKernelPte;
    PdePte.u.Hard.PageFrameNumber = TableBasePfn;
    *SystemPte = PdePte;

/// architecture specific
    //MiInitializePageDirectoryForProcess(

    /* Copy the kernel mappings and zero out the rest */
    TableIndex = PXE_PER_PAGE / 2;
    RtlZeroMemory(PageTablePointer, TableIndex * sizeof(MMPTE));
    RtlCopyMemory(PageTablePointer + TableIndex,
                  MiAddressToPxe(0) + TableIndex,
                  PAGE_SIZE - TableIndex * sizeof(MMPTE));

    /* Sanity check */
    ASSERT(MiAddressToPxi(MmHyperSpaceEnd) >= TableIndex);

    /* Setup a PTE for the page directory mappings */
    TempPte = ValidKernelPte;

    /* Update the self mapping of the PML4 */
    TableIndex = MiAddressToPxi((PVOID)PXE_SELFMAP);
    TempPte.u.Hard.PageFrameNumber = TableBasePfn;
    PageTablePointer[TableIndex] = TempPte;

    /* Write the PML4 entry for hyperspace */
    TableIndex = MiAddressToPxi((PVOID)HYPER_SPACE);
    TempPte.u.Hard.PageFrameNumber = HyperPfn;
    PageTablePointer[TableIndex] = TempPte;

    /* Map the hyperspace PDPT to the system PTE */
    PdePte.u.Hard.PageFrameNumber = HyperPfn;
    *SystemPte = PdePte;
    __invlpg(PageTablePointer);

    /* Write the hyperspace entry for the first PD */
    TempPte.u.Hard.PageFrameNumber = HyperPdPfn;
    PageTablePointer[0] = TempPte;

    /* Map the hyperspace PD to the system PTE */
    PdePte.u.Hard.PageFrameNumber = HyperPdPfn;
    *SystemPte = PdePte;
    __invlpg(PageTablePointer);

    /* Write the hyperspace entry for the first PT */
    TempPte.u.Hard.PageFrameNumber = HyperPtPfn;
    PageTablePointer[0] = TempPte;

    /* Map the hyperspace PT to the system PTE */
    PdePte.u.Hard.PageFrameNumber = HyperPtPfn;
    *SystemPte = PdePte;
    __invlpg(PageTablePointer);

    /* Write the hyperspace PTE for the working set list index */
    TempPte.u.Hard.PageFrameNumber = WorkingSetPfn;
    TableIndex = MiAddressToPti(MmWorkingSetList);
    PageTablePointer[TableIndex] = TempPte;

/// end architecture specific

    /* Release the system PTE */
    MiReleaseSystemPtes(SystemPte, 1, SystemPteSpace);

    /* Switch to phase 1 initialization */
    ASSERT(Process->AddressSpaceInitialized == 0);
    Process->AddressSpaceInitialized = 1;

    return TRUE;
}
示例#12
0
VOID
NTAPI
MmFreeSpecialPool(PVOID P)
{
    PMMPTE PointerPte;
    PPOOL_HEADER Header;
    BOOLEAN Overruns = FALSE;
    KIRQL Irql = KeGetCurrentIrql();
    POOL_TYPE PoolType;
    ULONG BytesRequested, BytesReal = 0;
    ULONG PtrOffset;
    PUCHAR b;
    PMI_FREED_SPECIAL_POOL FreedHeader;
    LARGE_INTEGER TickCount;
    PMMPFN Pfn;

    DPRINT1("MmFreeSpecialPool(%p)\n", P);

    /* Get the PTE */
    PointerPte = MiAddressToPte(P);

    /* Check if it's valid */
    if (PointerPte->u.Hard.Valid == 0)
    {
        /* Bugcheck if it has NOACCESS or 0 set as protection */
        if (PointerPte->u.Soft.Protection == MM_NOACCESS ||
            !PointerPte->u.Soft.Protection)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)PointerPte, 0, 0x20);
        }
    }

    /* Determine if it's a underruns or overruns pool pointer */
    PtrOffset = (ULONG)((ULONG_PTR)P & (PAGE_SIZE - 1));
    if (PtrOffset)
    {
        /* Pool catches overruns */
        Header = PAGE_ALIGN(P);
        Overruns = TRUE;
    }
    else
    {
        /* Pool catches underruns */
        Header = (PPOOL_HEADER)((PUCHAR)PAGE_ALIGN(P) + PAGE_SIZE - sizeof(POOL_HEADER));
    }

    /* Check if it's non paged pool */
    if ((Header->Ulong1 & SPECIAL_POOL_PAGED) == 0)
    {
        /* Non-paged allocation, ensure that IRQ is not higher that DISPATCH */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_NONPAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 0, 0x31);
        }

        PoolType = NonPagedPool;
    }
    else
    {
        /* Paged allocation, ensure */
        ASSERT((PointerPte + 1)->u.Soft.PageFileHigh == SPECIAL_POOL_PAGED_PTE);
        if (Irql > DISPATCH_LEVEL)
        {
            KeBugCheckEx(BAD_POOL_HEADER, Irql, (ULONG_PTR)P, 1, 0x31);
        }

        PoolType = PagedPool;
    }

    /* Get amount of bytes user requested to be allocated by clearing out the paged mask */
    BytesRequested = (Header->Ulong1 & ~SPECIAL_POOL_PAGED) & 0xFFFF;

    /* Check memory before the allocated user buffer in case of overruns detection */
    if (Overruns)
    {
        /* Calculate the real placement of the buffer */
        BytesReal = PAGE_SIZE - PtrOffset;

        /* If they mismatch, it's unrecoverable */
        if (BytesRequested > BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x21);
        }

        if (BytesRequested + sizeof(POOL_HEADER) < BytesReal)
        {
            KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, BytesRequested, BytesReal, 0x22);
        }

        /* Actually check the memory pattern */
        for (b = (PUCHAR)(Header + 1); b < (PUCHAR)P; b++)
        {
            if (Header->BlockSize != b[0])
            {
                /* Bytes mismatch */
                KeBugCheckEx(BAD_POOL_HEADER, (ULONG_PTR)P, (ULONG_PTR)b, Header->BlockSize, 0x23);
            }
        }
    }

    /* Check the memory pattern after the user buffer */
    MiSpecialPoolCheckPattern(P, Header);

    /* Fill the freed header */
    KeQueryTickCount(&TickCount);
    FreedHeader = (PMI_FREED_SPECIAL_POOL)PAGE_ALIGN(P);
    FreedHeader->Signature = 0x98764321;
    FreedHeader->TickCount = TickCount.LowPart;
    FreedHeader->NumberOfBytesRequested = BytesRequested;
    FreedHeader->Pagable = PoolType;
    FreedHeader->VirtualAddress = P;
    FreedHeader->Thread = PsGetCurrentThread();
    /* TODO: Fill StackPointer and StackBytes */
    FreedHeader->StackPointer = NULL;
    FreedHeader->StackBytes = 0;

    if (PoolType == NonPagedPool)
    {
        /* Non pagable. Get PFN element corresponding to the PTE */
        Pfn = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Delete this PFN */
        MI_SET_PFN_DELETED(Pfn);

        /* Decrement share count of this PFN */
        MiDecrementShareCount(Pfn, PointerPte->u.Hard.PageFrameNumber);

        /* Flush the TLB */
        //FIXME: Use KeFlushSingleTb() instead
        KeFlushEntireTb(TRUE, TRUE);
    }
    else
    {
        /* Pagable. Delete that virtual address */
        MiDeleteSystemPageableVm(PointerPte, 1, 0, NULL);

        /* Lock PFN database */
        ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
        Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    }

    /* Mark next PTE as invalid */
    PointerPte[1].u.Long = 0; //|= 8000;

    /* Make sure that the last entry is really the last one */
    ASSERT(MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST);

    /* Update the current last PTE next pointer */
    MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase;

    /* PointerPte becomes the new last PTE */
    PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST;
    MiSpecialPoolLastPte = PointerPte;

    /* Release the PFN database lock */
    KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
}
示例#13
0
VOID
NTAPI
INIT_FUNCTION
MiInitializePoolEvents(VOID)
{
    KIRQL OldIrql;
    PFN_NUMBER FreePoolInPages;

    /* Lock paged pool */
    KeAcquireGuardedMutex(&MmPagedPoolMutex);

    /* Total size of the paged pool minus the allocated size, is free */
    FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;

    /* Check the initial state high state */
    if (FreePoolInPages >= MiHighPagedPoolThreshold)
    {
        /* We have plenty of pool */
        KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
    }
    else
    {
        /* We don't */
        KeClearEvent(MiHighPagedPoolEvent);
    }

    /* Check the initial low state */
    if (FreePoolInPages <= MiLowPagedPoolThreshold)
    {
        /* We're very low in free pool memory */
        KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
    }
    else
    {
        /* We're not */
        KeClearEvent(MiLowPagedPoolEvent);
    }

    /* Release the paged pool lock */
    KeReleaseGuardedMutex(&MmPagedPoolMutex);

    /* Now it's time for the nonpaged pool lock */
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    /* Free pages are the maximum minus what's been allocated */
    FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;

    /* Check if we have plenty */
    if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
    {
        /* We do, set the event */
        KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
    }
    else
    {
        /* We don't, clear the event */
        KeClearEvent(MiHighNonPagedPoolEvent);
    }

    /* Check if we have very little */
    if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
    {
        /* We do, set the event */
        KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
    }
    else
    {
        /* We don't, clear it */
        KeClearEvent(MiLowNonPagedPoolEvent);
    }

    /* We're done, release the nonpaged pool lock */
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
}
示例#14
0
文件: mdlsup.c 项目: RPG-7/reactos
/*
 * @implemented
 */
VOID
NTAPI
MmFreePagesFromMdl(IN PMDL Mdl)
{
    PVOID Base;
    PPFN_NUMBER Pages;
    LONG NumberOfPages;
    PMMPFN Pfn1;
    KIRQL OldIrql;
    DPRINT("Freeing MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
    ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
    ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);

    //
    // Get address and page information
    //
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);

    //
    // Acquire PFN lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    //
    // Loop all the MDL pages
    //
    Pages = (PPFN_NUMBER)(Mdl + 1);
    do
    {
        //
        // Reached the last page
        //
        if (*Pages == LIST_HEAD) break;

        //
        // Get the page entry
        //
        Pfn1 = MiGetPfnEntry(*Pages);
        ASSERT(Pfn1);
        ASSERT(Pfn1->u2.ShareCount == 1);
        ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
        if (Pfn1->u4.PteFrame != 0x1FFEDCB)
        {
            /* Corrupted PFN entry or invalid free */
            KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
        }

        //
        // Clear it
        //
        Pfn1->u3.e1.StartOfAllocation = 0;
        Pfn1->u3.e1.EndOfAllocation = 0;
        Pfn1->u2.ShareCount = 0;

        //
        // Dereference it
        //
        ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
        if (Pfn1->u3.e2.ReferenceCount != 1)
        {
            /* Just take off one reference */
            InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
        }
        else
        {
            /* We'll be nuking the whole page */
            MiDecrementReferenceCount(Pfn1, *Pages);
        }

        //
        // Clear this page and move on
        //
        *Pages++ = LIST_HEAD;
    } while (--NumberOfPages != 0);

    //
    // Release the lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    //
    // Remove the pages locked flag
    //
    Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
示例#15
0
VOID NTAPI
MiBalancerThread(PVOID Unused)
{
   PVOID WaitObjects[2];
   NTSTATUS Status;
   ULONG i;

   WaitObjects[0] = &MiBalancerEvent;
   WaitObjects[1] = &MiBalancerTimer;

   while (1)
   {
      Status = KeWaitForMultipleObjects(2,
                                        WaitObjects,
                                        WaitAny,
                                        Executive,
                                        KernelMode,
                                        FALSE,
                                        NULL,
                                        NULL);

      if (Status == STATUS_WAIT_0 || Status == STATUS_WAIT_1)
      {
        ULONG InitialTarget = 0;

#if (_MI_PAGING_LEVELS == 2)
        if (!MiIsBalancerThread())
        {
            /* Clean up the unused PDEs */
            ULONG_PTR Address;
            PEPROCESS Process = PsGetCurrentProcess();

            /* Acquire PFN lock */
            KIRQL OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            PMMPDE pointerPde;
            for (Address = (ULONG_PTR)MI_LOWEST_VAD_ADDRESS;
                 Address < (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
                 Address += (PAGE_SIZE * PTE_COUNT))
            {
                if (MiQueryPageTableReferences((PVOID)Address) == 0)
                {
                    pointerPde = MiAddressToPde(Address);
                    if (pointerPde->u.Hard.Valid)
                        MiDeletePte(pointerPde, MiPdeToPte(pointerPde), Process, NULL);
                    ASSERT(pointerPde->u.Hard.Valid == 0);
                }
            }
            /* Release lock */
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
        }
#endif
          do
          {
              ULONG OldTarget = InitialTarget;

              /* Trim each consumer */
              for (i = 0; i < MC_MAXIMUM; i++)
              {
                  InitialTarget = MiTrimMemoryConsumer(i, InitialTarget);
              }

              /* No pages left to swap! */
              if (InitialTarget != 0 &&
                  InitialTarget == OldTarget)
              {
                  /* Game over */
                  KeBugCheck(NO_PAGES_AVAILABLE);
              }
          } while (InitialTarget != 0);
      }
      else
      {
         DPRINT1("KeWaitForMultipleObjects failed, status = %x\n", Status);
         KeBugCheck(MEMORY_MANAGEMENT);
      }
   }
}
示例#16
0
VOID
CcLazyWriteScan(VOID)
{
    ULONG Target;
    KIRQL OldIrql;
    PLIST_ENTRY ListEntry;
    LIST_ENTRY ToPost;
    PWORK_QUEUE_ENTRY WorkItem;

    /* Do we have entries to queue after we're done? */
    InitializeListHead(&ToPost);
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
    if (LazyWriter.OtherWork)
    {
        while (!IsListEmpty(&CcPostTickWorkQueue))
        {
            ListEntry = RemoveHeadList(&CcPostTickWorkQueue);
            WorkItem = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ENTRY, WorkQueueLinks);
            InsertTailList(&ToPost, &WorkItem->WorkQueueLinks);
        }
        LazyWriter.OtherWork = FALSE;
    }
    KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);

    /* Our target is one-eighth of the dirty pages */
    Target = CcTotalDirtyPages / 8;
    if (Target != 0)
    {
        /* There is stuff to flush, schedule a write-behind operation */

        /* Allocate a work item */
        WorkItem = ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList);
        if (WorkItem != NULL)
        {
            WorkItem->Function = WriteBehind;
            CcPostWorkQueue(WorkItem, &CcRegularWorkQueue);
        }
    }

    /* Post items that were due for end of run */
    while (!IsListEmpty(&ToPost))
    {
        ListEntry = RemoveHeadList(&ToPost);
        WorkItem = CONTAINING_RECORD(ListEntry, WORK_QUEUE_ENTRY, WorkQueueLinks);
        CcPostWorkQueue(WorkItem, &CcRegularWorkQueue);
    }

    /* If we have deferred writes, try them now! */
    if (!IsListEmpty(&CcDeferredWrites))
    {
        CcPostDeferredWrites();
        /* Reschedule immediately a lazy writer run
         * Keep us active to have short idle delay
         */
        CcScheduleLazyWriteScan(FALSE);
    }
    else
    {
        /* We're no longer active */
        OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
        LazyWriter.ScanActive = FALSE;
        KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
    }
}
示例#17
0
VOID
NTAPI
CcWorkerThread(
    IN PVOID Parameter)
{
    KIRQL OldIrql;
    BOOLEAN DropThrottle, WritePerformed;
    PWORK_QUEUE_ITEM Item;
#if DBG
    PIRP TopLevel;
#endif

    /* Get back our thread item */
    Item = Parameter;
    /* And by default, don't touch throttle */
    DropThrottle = FALSE;
    /* No write performed */
    WritePerformed =  FALSE;

#if DBG
    /* Top level IRP should be clean when started
     * Save it to catch buggy drivers (or bugs!)
     */
    TopLevel = IoGetTopLevelIrp();
    if (TopLevel != NULL)
    {
        DPRINT1("(%p) TopLevel IRP for this thread: %p\n", PsGetCurrentThread(), TopLevel);
    }
#endif

    /* Loop till we have jobs */
    while (TRUE)
    {
        PWORK_QUEUE_ENTRY WorkItem;

        /* Lock queues */
        OldIrql = KeAcquireQueuedSpinLock(LockQueueWorkQueueLock);

        /* If we have to touch throttle, reset it now! */
        if (DropThrottle)
        {
            CcQueueThrottle = FALSE;
            DropThrottle = FALSE;
        }

        /* Check first if we have read ahead to do */
        if (IsListEmpty(&CcExpressWorkQueue))
        {
            /* If not, check regular queue */
            if (IsListEmpty(&CcRegularWorkQueue))
            {
                break;
            }
            else
            {
                WorkItem = CONTAINING_RECORD(CcRegularWorkQueue.Flink, WORK_QUEUE_ENTRY, WorkQueueLinks);
            }
        }
        else
        {
            WorkItem = CONTAINING_RECORD(CcExpressWorkQueue.Flink, WORK_QUEUE_ENTRY, WorkQueueLinks);
        }

        /* Get our work item, if someone is waiting for us to finish
         * and we're not the only thread in queue
         * then, quit running to let the others do
         * and throttle so that noone starts till current activity is over
         */
        if (WorkItem->Function == SetDone && CcNumberActiveWorkerThreads > 1)
        {
            CcQueueThrottle = TRUE;
            break;
        }

        /* Otherwise, remove current entry */
        RemoveEntryList(&WorkItem->WorkQueueLinks);
        KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, OldIrql);

        /* And handle it */
        switch (WorkItem->Function)
        {
            case ReadAhead:
                CcPerformReadAhead(WorkItem->Parameters.Read.FileObject);
                break;

            case WriteBehind:
                PsGetCurrentThread()->MemoryMaker = 1;
                CcWriteBehind();
                PsGetCurrentThread()->MemoryMaker = 0;
                WritePerformed = TRUE;
                break;

            case LazyScan:
                CcLazyWriteScan();
                break;

            case SetDone:
                KeSetEvent(WorkItem->Parameters.Event.Event, IO_NO_INCREMENT, FALSE);
                DropThrottle = TRUE;
                break;

            default:
                DPRINT1("Ignored item: %p (%d)\n", WorkItem, WorkItem->Function);
                break;
        }

        /* And release the item */
        ExFreeToNPagedLookasideList(&CcTwilightLookasideList, WorkItem);
    }

    /* Our thread is available again */
    InsertTailList(&CcIdleWorkerThreadList, &Item->List);
    /* One less worker */
    --CcNumberActiveWorkerThreads;
    KeReleaseQueuedSpinLock(LockQueueWorkQueueLock, OldIrql);

    /* If there are pending write openations and we have at least 20 dirty pages */
    if (!IsListEmpty(&CcDeferredWrites) && CcTotalDirtyPages >= 20)
    {
        /* And if we performed a write operation previously, then
         * stress the system a bit and reschedule a scan to find
         * stuff to write
         */
        if (WritePerformed)
        {
            CcLazyWriteScan();
        }
    }

#if DBG
    /* Top level shouldn't have changed */
    if (TopLevel != IoGetTopLevelIrp())
    {
        DPRINT1("(%p) Mismatching TopLevel: %p, %p\n", PsGetCurrentThread(), TopLevel, IoGetTopLevelIrp());
    }
#endif
}
示例#18
0
NTSTATUS
NTAPI
MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
                            PPFN_NUMBER AllocatedPage)
{
   ULONG OldUsed;
   PFN_NUMBER Page;
   KIRQL OldIrql;

   /*
    * Make sure we don't exceed our individual target.
    */
   OldUsed = InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
   if (OldUsed >= (MiMemoryConsumers[Consumer].PagesTarget - 1) &&
         !MiIsBalancerThread())
   {
      if (!CanWait)
      {
         (void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
         return(STATUS_NO_MEMORY);
      }
      MiTrimMemoryConsumer(Consumer);
   }

   /*
    * Allocate always memory for the non paged pool and for the pager thread.
    */
   if ((Consumer == MC_SYSTEM) || MiIsBalancerThread())
   {
      OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
      Page = MmAllocPage(Consumer);
      KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
      if (Page == 0)
      {
         KeBugCheck(NO_PAGES_AVAILABLE);
      }
      *AllocatedPage = Page;
      if (MmAvailablePages <= MiMinimumAvailablePages &&
            MiBalancerThreadHandle != NULL)
      {
         KeSetEvent(&MiBalancerEvent, IO_NO_INCREMENT, FALSE);
      }
      return(STATUS_SUCCESS);
   }

   /*
    * Make sure we don't exceed global targets.
    */
   if (MmAvailablePages <= MiMinimumAvailablePages)
   {
      MM_ALLOCATION_REQUEST Request;

      if (!CanWait)
      {
         (void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
         return(STATUS_NO_MEMORY);
      }

      /* Insert an allocation request. */
      Request.Page = 0;

      KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
      (void)InterlockedIncrementUL(&MiPagesRequired);

      KeAcquireSpinLock(&AllocationListLock, &OldIrql);

      if (MiBalancerThreadHandle != NULL)
      {
         KeSetEvent(&MiBalancerEvent, IO_NO_INCREMENT, FALSE);
      }
      InsertTailList(&AllocationListHead, &Request.ListEntry);
      KeReleaseSpinLock(&AllocationListLock, OldIrql);

      KeWaitForSingleObject(&Request.Event,
                            0,
                            KernelMode,
                            FALSE,
                            NULL);

      Page = Request.Page;
      if (Page == 0)
      {
         KeBugCheck(NO_PAGES_AVAILABLE);
      }
      /* Update the Consumer and make the page active */
      if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
      *AllocatedPage = Page;
      (void)InterlockedDecrementUL(&MiPagesRequired);
      return(STATUS_SUCCESS);
   }

   /*
    * Actually allocate the page.
    */
   OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
   Page = MmAllocPage(Consumer);
   KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
   if (Page == 0)
   {
      KeBugCheck(NO_PAGES_AVAILABLE);
   }
   if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
   *AllocatedPage = Page;

   return(STATUS_SUCCESS);
}
示例#19
0
文件: zeropage.c 项目: GYGit/reactos
VOID
NTAPI
MmZeroPageThread(VOID)
{
    PKTHREAD Thread = KeGetCurrentThread();
    PVOID StartAddress, EndAddress;
    PVOID WaitObjects[2];
    KIRQL OldIrql;
    PVOID ZeroAddress;
    PFN_NUMBER PageIndex, FreePage;
    PMMPFN Pfn1;

    /* Get the discardable sections to free them */
    MiFindInitializationCode(&StartAddress, &EndAddress);
    if (StartAddress) MiFreeInitializationCode(StartAddress, EndAddress);
    DPRINT("Free non-cache pages: %lx\n", MmAvailablePages + MiMemoryConsumers[MC_CACHE].PagesUsed);

    /* Set our priority to 0 */
    Thread->BasePriority = 0;
    KeSetPriorityThread(Thread, 0);

    /* Setup the wait objects */
    WaitObjects[0] = &MmZeroingPageEvent;
//    WaitObjects[1] = &PoSystemIdleTimer; FIXME: Implement idle timer

    while (TRUE)
    {
        KeWaitForMultipleObjects(1, // 2
                                 WaitObjects,
                                 WaitAny,
                                 WrFreePage,
                                 KernelMode,
                                 FALSE,
                                 NULL,
                                 NULL);
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
        while (TRUE)
        {
            if (!MmFreePageListHead.Total)
            {
                MmZeroingPageThreadActive = FALSE;
                KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
                break;
            }

            PageIndex = MmFreePageListHead.Flink;
            ASSERT(PageIndex != LIST_HEAD);
            Pfn1 = MiGetPfnEntry(PageIndex);
            MI_SET_USAGE(MI_USAGE_ZERO_LOOP);
            MI_SET_PROCESS2("Kernel 0 Loop");
            FreePage = MiRemoveAnyPage(MI_GET_PAGE_COLOR(PageIndex));

            /* The first global free page should also be the first on its own list */
            if (FreePage != PageIndex)
            {
                KeBugCheckEx(PFN_LIST_CORRUPT,
                             0x8F,
                             FreePage,
                             PageIndex,
                             0);
            }

            Pfn1->u1.Flink = LIST_HEAD;
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            ZeroAddress = MiMapPagesInZeroSpace(Pfn1, 1);
            ASSERT(ZeroAddress);
            RtlZeroMemory(ZeroAddress, PAGE_SIZE);
            MiUnmapPagesInZeroSpace(ZeroAddress, 1);

            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

            MiInsertPageInList(&MmZeroedPageListHead, PageIndex);
        }
    }
}
示例#20
0
PFN_NUMBER
NTAPI
MiFindContiguousPages(IN PFN_NUMBER LowestPfn,
                      IN PFN_NUMBER HighestPfn,
                      IN PFN_NUMBER BoundaryPfn,
                      IN PFN_NUMBER SizeInPages,
                      IN MEMORY_CACHING_TYPE CacheType)
{
    PFN_NUMBER Page, PageCount, LastPage, Length, BoundaryMask;
    ULONG i = 0;
    PMMPFN Pfn1, EndPfn;
    KIRQL OldIrql;
    PAGED_CODE();
    ASSERT(SizeInPages != 0);

    //
    // Convert the boundary PFN into an alignment mask
    //
    BoundaryMask = ~(BoundaryPfn - 1);

    /* Disable APCs */
    KeEnterGuardedRegion();

    //
    // Loop all the physical memory blocks
    //
    do
    {
        //
        // Capture the base page and length of this memory block
        //
        Page = MmPhysicalMemoryBlock->Run[i].BasePage;
        PageCount = MmPhysicalMemoryBlock->Run[i].PageCount;

        //
        // Check how far this memory block will go
        //
        LastPage = Page + PageCount;

        //
        // Trim it down to only the PFNs we're actually interested in
        //
        if ((LastPage - 1) > HighestPfn) LastPage = HighestPfn + 1;
        if (Page < LowestPfn) Page = LowestPfn;

        //
        // Skip this run if it's empty or fails to contain all the pages we need
        //
        if (!(PageCount) || ((Page + SizeInPages) > LastPage)) continue;

        //
        // Now scan all the relevant PFNs in this run
        //
        Length = 0;
        for (Pfn1 = MI_PFN_ELEMENT(Page); Page < LastPage; Page++, Pfn1++)
        {
            //
            // If this PFN is in use, ignore it
            //
            if (MiIsPfnInUse(Pfn1))
            {
                Length = 0;
                continue;
            }

            //
            // If we haven't chosen a start PFN yet and the caller specified an
            // alignment, make sure the page matches the alignment restriction
            //
            if ((!(Length) && (BoundaryPfn)) &&
                (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
            {
                //
                // It does not, so bail out
                //
                continue;
            }

            //
            // Increase the number of valid pages, and check if we have enough
            //
            if (++Length == SizeInPages)
            {
                //
                // It appears we've amassed enough legitimate pages, rollback
                //
                Pfn1 -= (Length - 1);
                Page -= (Length - 1);

                //
                // Acquire the PFN lock
                //
                OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
                do
                {
                    //
                    // Things might've changed for us. Is the page still free?
                    //
                    if (MiIsPfnInUse(Pfn1)) break;

                    //
                    // So far so good. Is this the last confirmed valid page?
                    //
                    if (!--Length)
                    {
                        //
                        // Sanity check that we didn't go out of bounds
                        //
                        ASSERT(i != MmPhysicalMemoryBlock->NumberOfRuns);

                        //
                        // Loop until all PFN entries have been processed
                        //
                        EndPfn = Pfn1 - SizeInPages + 1;
                        do
                        {
                            //
                            // This PFN is now a used page, set it up
                            //
                            MI_SET_USAGE(MI_USAGE_CONTINOUS_ALLOCATION);
                            MI_SET_PROCESS2("Kernel Driver");
                            MiUnlinkFreeOrZeroedPage(Pfn1);
                            Pfn1->u3.e2.ReferenceCount = 1;
                            Pfn1->u2.ShareCount = 1;
                            Pfn1->u3.e1.PageLocation = ActiveAndValid;
                            Pfn1->u3.e1.StartOfAllocation = 0;
                            Pfn1->u3.e1.EndOfAllocation = 0;
                            Pfn1->u3.e1.PrototypePte = 0;
                            Pfn1->u4.VerifierAllocation = 0;
                            Pfn1->PteAddress = (PVOID)0xBAADF00D;

                            //
                            // Check if this is the last PFN, otherwise go on
                            //
                            if (Pfn1 == EndPfn) break;
                            Pfn1--;
                        } while (TRUE);

                        //
                        // Mark the first and last PFN so we can find them later
                        //
                        Pfn1->u3.e1.StartOfAllocation = 1;
                        (Pfn1 + SizeInPages - 1)->u3.e1.EndOfAllocation = 1;

                        //
                        // Now it's safe to let go of the PFN lock
                        //
                        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

                        //
                        // Quick sanity check that the last PFN is consistent
                        //
                        EndPfn = Pfn1 + SizeInPages;
                        ASSERT(EndPfn == MI_PFN_ELEMENT(Page + 1));

                        //
                        // Compute the first page, and make sure it's consistent
                        //
                        Page = Page - SizeInPages + 1;
                        ASSERT(Pfn1 == MI_PFN_ELEMENT(Page));
                        ASSERT(Page != 0);

                        /* Enable APCs and return the page */
                        KeLeaveGuardedRegion();
                        return Page;
                    }

                    //
                    // Keep going. The purpose of this loop is to reconfirm that
                    // after acquiring the PFN lock these pages are still usable
                    //
                    Pfn1++;
                    Page++;
                } while (TRUE);

                //
                // If we got here, something changed while we hadn't acquired
                // the PFN lock yet, so we'll have to restart
                //
                KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
                Length = 0;
            }
        }
    } while (++i != MmPhysicalMemoryBlock->NumberOfRuns);

    //
    // And if we get here, it means no suitable physical memory runs were found
    //
    return 0;
}
示例#21
0
文件: mdlsup.c 项目: RPG-7/reactos
/*
 * @implemented
 */
VOID
NTAPI
MmUnlockPages(IN PMDL Mdl)
{
    PPFN_NUMBER MdlPages, LastPage;
    PEPROCESS Process;
    PVOID Base;
    ULONG Flags, PageCount;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    DPRINT("Unlocking MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
    ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
    ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
    ASSERT(Mdl->ByteCount != 0);

    //
    // Get the process associated and capture the flags which are volatile
    //
    Process = Mdl->Process;
    Flags = Mdl->MdlFlags;

    //
    // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
    //
    if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
    {
        //
        // Unmap the pages from system space
        //
        MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
    }

    //
    // Get the page count
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
    PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
    ASSERT(PageCount != 0);

    //
    // We don't support AWE
    //
    if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);

    //
    // Check if the buffer is mapped I/O space
    //
    if (Flags & MDL_IO_SPACE)
    {
        //
        // Acquire PFN lock
        //
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        //
        // Loop every page
        //
        LastPage = MdlPages + PageCount;
        do
        {
            //
            // Last page, break out
            //
            if (*MdlPages == LIST_HEAD) break;

            //
            // Check if this page is in the PFN database
            //
            Pfn1 = MiGetPfnEntry(*MdlPages);
            if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
        } while (++MdlPages < LastPage);

        //
        // Release the lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

        //
        // Check if we have a process
        //
        if (Process)
        {
            //
            // Handle the accounting of locked pages
            //
            ASSERT(Process->NumberOfLockedPages > 0);
            InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                        -(LONG_PTR)PageCount);
        }

        //
        // We're done
        //
        Mdl->MdlFlags &= ~MDL_IO_SPACE;
        Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
        return;
    }

    //
    // Check if we have a process
    //
    if (Process)
    {
        //
        // Handle the accounting of locked pages
        //
        ASSERT(Process->NumberOfLockedPages > 0);
        InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
                                    -(LONG_PTR)PageCount);
    }

    //
    // Loop every page
    //
    LastPage = MdlPages + PageCount;
    do
    {
        //
        // Last page reached
        //
        if (*MdlPages == LIST_HEAD)
        {
            //
            // Were there no pages at all?
            //
            if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
            {
                //
                // We're already done
                //
                Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
                return;
            }

            //
            // Otherwise, stop here
            //
            LastPage = MdlPages;
            break;
        }

        /* Save the PFN entry instead for the secondary loop */
        *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
        ASSERT(*MdlPages != 0);
    } while (++MdlPages < LastPage);

    //
    // Reset pointer
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);

    //
    // Now grab the PFN lock for the actual unlock and dereference
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    do
    {
        /* Get the current entry and reference count */
        Pfn1 = (PMMPFN)*MdlPages;
        MiDereferencePfnAndDropLockCount(Pfn1);
    } while (++MdlPages < LastPage);

    //
    // Release the lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

    //
    // We're done
    //
    Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
}
示例#22
0
VOID
NTAPI
MiFreeContiguousMemory(IN PVOID BaseAddress)
{
    KIRQL OldIrql;
    PFN_NUMBER PageFrameIndex, LastPage, PageCount;
    PMMPFN Pfn1, StartPfn;
    PMMPTE PointerPte;
    PAGED_CODE();

    //
    // First, check if the memory came from initial nonpaged pool, or expansion
    //
    if (((BaseAddress >= MmNonPagedPoolStart) &&
         (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
                                MmSizeOfNonPagedPoolInBytes))) ||
        ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
         (BaseAddress < MmNonPagedPoolEnd)))
    {
        //
        // It did, so just use the pool to free this
        //
        ExFreePoolWithTag(BaseAddress, 'mCmM');
        return;
    }

    /* Get the PTE and frame number for the allocation*/
    PointerPte = MiAddressToPte(BaseAddress);
    PageFrameIndex = PFN_FROM_PTE(PointerPte);

    //
    // Now get the PFN entry for this, and make sure it's the correct one
    //
    Pfn1 = MiGetPfnEntry(PageFrameIndex);
    if ((!Pfn1) || (Pfn1->u3.e1.StartOfAllocation == 0))
    {
        //
        // This probably means you did a free on an address that was in between
        //
        KeBugCheckEx(BAD_POOL_CALLER,
                     0x60,
                     (ULONG_PTR)BaseAddress,
                     0,
                     0);
    }

    //
    // Now this PFN isn't the start of any allocation anymore, it's going out
    //
    StartPfn = Pfn1;
    Pfn1->u3.e1.StartOfAllocation = 0;

    /* Loop the PFNs until we find the one that marks the end of the allocation */
    do
    {
        /* Make sure these are the pages we setup in the allocation routine */
        ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
        ASSERT(Pfn1->u2.ShareCount == 1);
        ASSERT(Pfn1->PteAddress == PointerPte);
        ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
        ASSERT(Pfn1->u4.VerifierAllocation == 0);
        ASSERT(Pfn1->u3.e1.PrototypePte == 0);

        /* Set the special pending delete marker */
        MI_SET_PFN_DELETED(Pfn1);

        /* Keep going for assertions */
        PointerPte++;
    } while (Pfn1++->u3.e1.EndOfAllocation == 0);

    //
    // Found it, unmark it
    //
    Pfn1--;
    Pfn1->u3.e1.EndOfAllocation = 0;

    //
    // Now compute how many pages this represents
    //
    PageCount = (ULONG)(Pfn1 - StartPfn + 1);

    //
    // So we can know how much to unmap (recall we piggyback on I/O mappings)
    //
    MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);

    //
    // Lock the PFN database
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    //
    // Loop all the pages
    //
    LastPage = PageFrameIndex + PageCount;
    Pfn1 = MiGetPfnEntry(PageFrameIndex);
    do
    {
        /* Decrement the share count and move on */
        MiDecrementShareCount(Pfn1++, PageFrameIndex++);
    } while (PageFrameIndex < LastPage);

    //
    // Release the PFN lock
    //
    KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
}
示例#23
0
文件: mdlsup.c 项目: RPG-7/reactos
/*
 * @implemented
 */
VOID
NTAPI
MmProbeAndLockPages(IN PMDL Mdl,
                    IN KPROCESSOR_MODE AccessMode,
                    IN LOCK_OPERATION Operation)
{
    PPFN_NUMBER MdlPages;
    PVOID Base, Address, LastAddress, StartAddress;
    ULONG LockPages, TotalPages;
    NTSTATUS Status = STATUS_SUCCESS;
    PEPROCESS CurrentProcess;
    NTSTATUS ProbeStatus;
    PMMPTE PointerPte, LastPte;
    PMMPDE PointerPde;
#if (_MI_PAGING_LEVELS >= 3)
    PMMPDE PointerPpe;
#endif
#if (_MI_PAGING_LEVELS == 4)
    PMMPDE PointerPxe;
#endif
    PFN_NUMBER PageFrameIndex;
    BOOLEAN UsePfnLock;
    KIRQL OldIrql;
    PMMPFN Pfn1;
    DPRINT("Probing MDL: %p\n", Mdl);

    //
    // Sanity checks
    //
    ASSERT(Mdl->ByteCount != 0);
    ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
    ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
    ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
                             MDL_MAPPED_TO_SYSTEM_VA |
                             MDL_SOURCE_IS_NONPAGED_POOL |
                             MDL_PARTIAL |
                             MDL_IO_SPACE)) == 0);

    //
    // Get page and base information
    //
    MdlPages = (PPFN_NUMBER)(Mdl + 1);
    Base = Mdl->StartVa;

    //
    // Get the addresses and how many pages we span (and need to lock)
    //
    Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
    LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
    LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
    ASSERT(LockPages != 0);

    /* Block invalid access */
    if ((AccessMode != KernelMode) &&
        ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
    {
        /* Caller should be in SEH, raise the error */
        *MdlPages = LIST_HEAD;
        ExRaiseStatus(STATUS_ACCESS_VIOLATION);
    }

    //
    // Get the process
    //
    if (Address <= MM_HIGHEST_USER_ADDRESS)
    {
        //
        // Get the process
        //
        CurrentProcess = PsGetCurrentProcess();
    }
    else
    {
        //
        // No process
        //
        CurrentProcess = NULL;
    }

    //
    // Save the number of pages we'll have to lock, and the start address
    //
    TotalPages = LockPages;
    StartAddress = Address;

    /* Large pages not supported */
    ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));

    //
    // Now probe them
    //
    ProbeStatus = STATUS_SUCCESS;
    _SEH2_TRY
    {
        //
        // Enter probe loop
        //
        do
        {
            //
            // Assume failure
            //
            *MdlPages = LIST_HEAD;

            //
            // Read
            //
            *(volatile CHAR*)Address;

            //
            // Check if this is write access (only probe for user-mode)
            //
            if ((Operation != IoReadAccess) &&
                (Address <= MM_HIGHEST_USER_ADDRESS))
            {
                //
                // Probe for write too
                //
                ProbeForWriteChar(Address);
            }

            //
            // Next address...
            //
            Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);

            //
            // Next page...
            //
            LockPages--;
            MdlPages++;
        } while (Address < LastAddress);

        //
        // Reset back to the original page
        //
        ASSERT(LockPages == 0);
        MdlPages = (PPFN_NUMBER)(Mdl + 1);
    }
    _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
    {
        //
        // Oops :(
        //
        ProbeStatus = _SEH2_GetExceptionCode();
    }
    _SEH2_END;

    //
    // So how did that go?
    //
    if (ProbeStatus != STATUS_SUCCESS)
    {
        //
        // Fail
        //
        DPRINT1("MDL PROBE FAILED!\n");
        Mdl->Process = NULL;
        ExRaiseStatus(ProbeStatus);
    }

    //
    // Get the PTE and PDE
    //
    PointerPte = MiAddressToPte(StartAddress);
    PointerPde = MiAddressToPde(StartAddress);
#if (_MI_PAGING_LEVELS >= 3)
    PointerPpe = MiAddressToPpe(StartAddress);
#endif
#if (_MI_PAGING_LEVELS == 4)
    PointerPxe = MiAddressToPxe(StartAddress);
#endif

    //
    // Sanity check
    //
    ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));

    //
    // Check what kind of operation this is
    //
    if (Operation != IoReadAccess)
    {
        //
        // Set the write flag
        //
        Mdl->MdlFlags |= MDL_WRITE_OPERATION;
    }
    else
    {
        //
        // Remove the write flag
        //
        Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
    }

    //
    // Mark the MDL as locked *now*
    //
    Mdl->MdlFlags |= MDL_PAGES_LOCKED;

    //
    // Check if this came from kernel mode
    //
    if (Base > MM_HIGHEST_USER_ADDRESS)
    {
        //
        // We should not have a process
        //
        ASSERT(CurrentProcess == NULL);
        Mdl->Process = NULL;

        //
        // In kernel mode, we don't need to check for write access
        //
        Operation = IoReadAccess;

        //
        // Use the PFN lock
        //
        UsePfnLock = TRUE;
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
    }
    else
    {
        //
        // Sanity checks
        //
        ASSERT(TotalPages != 0);
        ASSERT(CurrentProcess == PsGetCurrentProcess());

        //
        // Track locked pages
        //
        InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
                                    TotalPages);

        //
        // Save the process
        //
        Mdl->Process = CurrentProcess;

        /* Lock the process working set */
        MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
        UsePfnLock = FALSE;
        OldIrql = MM_NOIRQL;
    }

    //
    // Get the last PTE
    //
    LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));

    //
    // Loop the pages
    //
    do
    {
        //
        // Assume failure and check for non-mapped pages
        //
        *MdlPages = LIST_HEAD;
        while (
#if (_MI_PAGING_LEVELS == 4)
               (PointerPxe->u.Hard.Valid == 0) ||
#endif
#if (_MI_PAGING_LEVELS >= 3)
               (PointerPpe->u.Hard.Valid == 0) ||
#endif
               (PointerPde->u.Hard.Valid == 0) ||
               (PointerPte->u.Hard.Valid == 0))
        {
            //
            // What kind of lock were we using?
            //
            if (UsePfnLock)
            {
                //
                // Release PFN lock
                //
                KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
            }
            else
            {
                /* Release process working set */
                MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
            }

            //
            // Access the page
            //
            Address = MiPteToAddress(PointerPte);

            //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
            Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
            if (!NT_SUCCESS(Status))
            {
                //
                // Fail
                //
                DPRINT1("Access fault failed\n");
                goto Cleanup;
            }

            //
            // What lock should we use?
            //
            if (UsePfnLock)
            {
                //
                // Grab the PFN lock
                //
                OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            }
            else
            {
                /* Lock the process working set */
                MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
            }
        }

        //
        // Check if this was a write or modify
        //
        if (Operation != IoReadAccess)
        {
            //
            // Check if the PTE is not writable
            //
            if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
            {
                //
                // Check if it's copy on write
                //
                if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
                {
                    //
                    // Get the base address and allow a change for user-mode
                    //
                    Address = MiPteToAddress(PointerPte);
                    if (Address <= MM_HIGHEST_USER_ADDRESS)
                    {
                        //
                        // What kind of lock were we using?
                        //
                        if (UsePfnLock)
                        {
                            //
                            // Release PFN lock
                            //
                            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
                        }
                        else
                        {
                            /* Release process working set */
                            MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
                        }

                        //
                        // Access the page
                        //

                        //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
                        Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
                        if (!NT_SUCCESS(Status))
                        {
                            //
                            // Fail
                            //
                            DPRINT1("Access fault failed\n");
                            goto Cleanup;
                        }

                        //
                        // Re-acquire the lock
                        //
                        if (UsePfnLock)
                        {
                            //
                            // Grab the PFN lock
                            //
                            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
                        }
                        else
                        {
                            /* Lock the process working set */
                            MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
                        }

                        //
                        // Start over
                        //
                        continue;
                    }
                }

                //
                // Fail, since we won't allow this
                //
                Status = STATUS_ACCESS_VIOLATION;
                goto CleanupWithLock;
            }
        }

        //
        // Grab the PFN
        //
        PageFrameIndex = PFN_FROM_PTE(PointerPte);
        Pfn1 = MiGetPfnEntry(PageFrameIndex);
        if (Pfn1)
        {
            /* Either this is for kernel-mode, or the working set is held */
            ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));

            /* No Physical VADs supported yet */
            if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);

            /* This address should already exist and be fully valid */
            MiReferenceProbedPageAndBumpLockCount(Pfn1);
        }
        else
        {
            //
            // For I/O addresses, just remember this
            //
            Mdl->MdlFlags |= MDL_IO_SPACE;
        }

        //
        // Write the page and move on
        //
        *MdlPages++ = PageFrameIndex;
        PointerPte++;

        /* Check if we're on a PDE boundary */
        if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
#if (_MI_PAGING_LEVELS >= 3)
        if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
#endif
#if (_MI_PAGING_LEVELS == 4)
        if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
#endif

    } while (PointerPte <= LastPte);

    //
    // What kind of lock were we using?
    //
    if (UsePfnLock)
    {
        //
        // Release PFN lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Release process working set */
        MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
    }

    //
    // Sanity check
    //
    ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
    return;

CleanupWithLock:
    //
    // This is the failure path
    //
    ASSERT(!NT_SUCCESS(Status));

    //
    // What kind of lock were we using?
    //
    if (UsePfnLock)
    {
        //
        // Release PFN lock
        //
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Release process working set */
        MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
    }
Cleanup:
    //
    // Pages must be locked so MmUnlock can work
    //
    ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
    MmUnlockPages(Mdl);

    //
    // Raise the error
    //
    ExRaiseStatus(Status);
}
示例#24
0
文件: page.c 项目: GYGit/reactos
static
PMMPTE
MiGetPteForProcess(
    PEPROCESS Process,
    PVOID Address,
    BOOLEAN Create)
{
    MMPTE TmplPte, *Pte;

    /* Check if we need hypersapce mapping */
    if (Address < MmSystemRangeStart &&
        Process && Process != PsGetCurrentProcess())
    {
        UNIMPLEMENTED;
        __debugbreak();
        return NULL;
    }
    else if (Create)
    {
        KIRQL OldIrql;
        TmplPte.u.Long = 0;
        TmplPte.u.Flush.Valid = 1;
        TmplPte.u.Flush.Write = 1;

        /* All page table levels of user pages are user owned */
        TmplPte.u.Flush.Owner = (Address < MmHighestUserAddress) ? 1 : 0;

        /* Lock the PFN database */
        OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

        /* Get the PXE */
        Pte = MiAddressToPxe(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(0);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Get the PPE */
        Pte = MiAddressToPpe(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(1);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Get the PDE */
        Pte = MiAddressToPde(Address);
        if (!Pte->u.Hard.Valid)
        {
            TmplPte.u.Hard.PageFrameNumber = MiRemoveZeroPage(2);
            MI_WRITE_VALID_PTE(Pte, TmplPte);
        }

        /* Unlock PFN database */
        KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
    }
    else
    {
        /* Get the PXE */
        Pte = MiAddressToPxe(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;

        /* Get the PPE */
        Pte = MiAddressToPpe(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;

        /* Get the PDE */
        Pte = MiAddressToPde(Address);
        if (!Pte->u.Hard.Valid)
            return NULL;
    }

    return MiAddressToPte(Address);
}
示例#25
0
PVOID
NTAPI
MiAllocatePoolPages(IN POOL_TYPE PoolType,
                    IN SIZE_T SizeInBytes)
{
    PFN_NUMBER PageFrameNumber;
    PFN_COUNT SizeInPages, PageTableCount;
    ULONG i;
    KIRQL OldIrql;
    PLIST_ENTRY NextEntry, NextHead, LastHead;
    PMMPTE PointerPte, StartPte;
    PMMPDE PointerPde;
    ULONG EndAllocation;
    MMPTE TempPte;
    MMPDE TempPde;
    PMMPFN Pfn1;
    PVOID BaseVa, BaseVaStart;
    PMMFREE_POOL_ENTRY FreeEntry;
    PKSPIN_LOCK_QUEUE LockQueue;

    //
    // Figure out how big the allocation is in pages
    //
    SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);

    //
    // Check for overflow
    //
    if (SizeInPages == 0)
    {
        //
        // Fail
        //
        return NULL;
    }

    //
    // Handle paged pool
    //
    if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
    {
        //
        // If only one page is being requested, try to grab it from the S-LIST
        //
        if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
        {
            BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
            if (BaseVa) return BaseVa;
        }

        //
        // Lock the paged pool mutex
        //
        KeAcquireGuardedMutex(&MmPagedPoolMutex);

        //
        // Find some empty allocation space
        //
        i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                   SizeInPages,
                                   MmPagedPoolInfo.PagedPoolHint);
        if (i == 0xFFFFFFFF)
        {
            //
            // Get the page bit count
            //
            i = ((SizeInPages - 1) / PTE_COUNT) + 1;
            DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);

            //
            // Check if there is enougn paged pool expansion space left
            //
            if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }

            //
            // Check if we'll have to expand past the last PTE we have available
            //
            if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
            {
                //
                // We can only support this much then
                //
                PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool);
                PageTableCount = (PFN_COUNT)(PointerPde + 1 -
                                             MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
                ASSERT(PageTableCount < i);
                i = PageTableCount;
            }
            else
            {
                //
                // Otherwise, there is plenty of space left for this expansion
                //
                PageTableCount = i;
            }

            //
            // Get the template PDE we'll use to expand
            //
            TempPde = ValidKernelPde;

            //
            // Get the first PTE in expansion space
            //
            PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
            BaseVa = MiPdeToPte(PointerPde);
            BaseVaStart = BaseVa;

            //
            // Lock the PFN database and loop pages
            //
            OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
            do
            {
                //
                // It should not already be valid
                //
                ASSERT(PointerPde->u.Hard.Valid == 0);

                /* Request a page */
                MI_SET_USAGE(MI_USAGE_PAGED_POOL);
                MI_SET_PROCESS2("Kernel");
                PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
                TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
#if (_MI_PAGING_LEVELS >= 3)
                /* On PAE/x64 systems, there's no double-buffering */
                ASSERT(FALSE);
#else
                //
                // Save it into our double-buffered system page directory
                //
                MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;

                /* Initialize the PFN */
                MiInitializePfnForOtherProcess(PageFrameNumber,
                                               (PMMPTE)PointerPde,
                                               MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);

                /* Write the actual PDE now */
//                MI_WRITE_VALID_PDE(PointerPde, TempPde);
#endif
                //
                // Move on to the next expansion address
                //
                PointerPde++;
                BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
                i--;
            } while (i > 0);

            //
            // Release the PFN database lock
            //
            KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);

            //
            // These pages are now available, clear their availablity bits
            //
            EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
                                    (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
                            PTE_COUNT;
            RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
                         EndAllocation,
                         PageTableCount * PTE_COUNT);

            //
            // Update the next expansion location
            //
            MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;

            //
            // Zero out the newly available memory
            //
            RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);

            //
            // Now try consuming the pages again
            //
            i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
                                       SizeInPages,
                                       0);
            if (i == 0xFFFFFFFF)
            {
                //
                // Out of memory!
                //
                DPRINT1("OUT OF PAGED POOL!!!\n");
                KeReleaseGuardedMutex(&MmPagedPoolMutex);
                return NULL;
            }
        }

        //
        // Update the pool hint if the request was just one page
        //
        if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;

        //
        // Update the end bitmap so we know the bounds of this allocation when
        // the time comes to free it
        //
        EndAllocation = i + SizeInPages - 1;
        RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);

        //
        // Now we can release the lock (it mainly protects the bitmap)
        //
        KeReleaseGuardedMutex(&MmPagedPoolMutex);

        //
        // Now figure out where this allocation starts
        //
        BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));

        //
        // Flush the TLB
        //
        KeFlushEntireTb(TRUE, TRUE);

        /* Setup a demand-zero writable PTE */
        MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);

        //
        // Find the first and last PTE, then loop them all
        //
        PointerPte = MiAddressToPte(BaseVa);
        StartPte = PointerPte + SizeInPages;
        do
        {
            //
            // Write the demand zero PTE and keep going
            //
            MI_WRITE_INVALID_PTE(PointerPte, TempPte);
        } while (++PointerPte < StartPte);

        //
        // Return the allocation address to the caller
        //
        return BaseVa;
    }

    //
    // If only one page is being requested, try to grab it from the S-LIST
    //
    if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
    {
        BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
        if (BaseVa) return BaseVa;
    }

    //
    // Allocations of less than 4 pages go into their individual buckets
    //
    i = SizeInPages - 1;
    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

    //
    // Loop through all the free page lists based on the page index
    //
    NextHead = &MmNonPagedPoolFreeListHead[i];
    LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];

    //
    // Acquire the nonpaged pool lock
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
    do
    {
        //
        // Now loop through all the free page entries in this given list
        //
        NextEntry = NextHead->Flink;
        while (NextEntry != NextHead)
        {
            /* Is freed non paged pool enabled */
            if (MmProtectFreedNonPagedPool)
            {
                /* We need to be able to touch this page, unprotect it */
                MiUnProtectFreeNonPagedPool(NextEntry, 0);
            }

            //
            // Grab the entry and see if it can handle our allocation
            //
            FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
            ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
            if (FreeEntry->Size >= SizeInPages)
            {
                //
                // It does, so consume the pages from here
                //
                FreeEntry->Size -= SizeInPages;

                //
                // The allocation will begin in this free page area
                //
                BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
                                 (FreeEntry->Size  << PAGE_SHIFT));

                /* Remove the item from the list, depending if pool is protected */
                if (MmProtectFreedNonPagedPool)
                    MiProtectedPoolRemoveEntryList(&FreeEntry->List);
                else
                    RemoveEntryList(&FreeEntry->List);

                //
                // However, check if its' still got space left
                //
                if (FreeEntry->Size != 0)
                {
                    /* Check which list to insert this entry into */
                    i = FreeEntry->Size - 1;
                    if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;

                    /* Insert the entry into the free list head, check for prot. pool */
                    if (MmProtectFreedNonPagedPool)
                        MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
                    else
                        InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);

                    /* Is freed non paged pool protected? */
                    if (MmProtectFreedNonPagedPool)
                    {
                        /* Protect the freed pool! */
                        MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
                    }
                }

                //
                // Grab the PTE for this allocation
                //
                PointerPte = MiAddressToPte(BaseVa);
                ASSERT(PointerPte->u.Hard.Valid == 1);

                //
                // Grab the PFN NextEntry and index
                //
                Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));

                //
                // Now mark it as the beginning of an allocation
                //
                ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
                Pfn1->u3.e1.StartOfAllocation = 1;

                /* Mark it as special pool if needed */
                ASSERT(Pfn1->u4.VerifierAllocation == 0);
                if (PoolType & VERIFIER_POOL_MASK)
                {
                    Pfn1->u4.VerifierAllocation = 1;
                }

                //
                // Check if the allocation is larger than one page
                //
                if (SizeInPages != 1)
                {
                    //
                    // Navigate to the last PFN entry and PTE
                    //
                    PointerPte += SizeInPages - 1;
                    ASSERT(PointerPte->u.Hard.Valid == 1);
                    Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
                }

                //
                // Mark this PFN as the last (might be the same as the first)
                //
                ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
                Pfn1->u3.e1.EndOfAllocation = 1;

                //
                // Release the nonpaged pool lock, and return the allocation
                //
                KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
                return BaseVa;
            }

            //
            // Try the next free page entry
            //
            NextEntry = FreeEntry->List.Flink;

            /* Is freed non paged pool protected? */
            if (MmProtectFreedNonPagedPool)
            {
                /* Protect the freed pool! */
                MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
            }
        }
    } while (++NextHead < LastHead);

    //
    // If we got here, we're out of space.
    // Start by releasing the lock
    //
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Allocate some system PTEs
    //
    StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
    PointerPte = StartPte;
    if (StartPte == NULL)
    {
        //
        // Ran out of memory
        //
        DPRINT1("Out of NP Expansion Pool\n");
        return NULL;
    }

    //
    // Acquire the pool lock now
    //
    OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);

    //
    // Lock the PFN database too
    //
    LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
    KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);

    //
    // Loop the pages
    //
    TempPte = ValidKernelPte;
    do
    {
        /* Allocate a page */
        MI_SET_USAGE(MI_USAGE_PAGED_POOL);
        MI_SET_PROCESS2("Kernel");
        PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

        /* Get the PFN entry for it and fill it out */
        Pfn1 = MiGetPfnEntry(PageFrameNumber);
        Pfn1->u3.e2.ReferenceCount = 1;
        Pfn1->u2.ShareCount = 1;
        Pfn1->PteAddress = PointerPte;
        Pfn1->u3.e1.PageLocation = ActiveAndValid;
        Pfn1->u4.VerifierAllocation = 0;

        /* Write the PTE for it */
        TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
        MI_WRITE_VALID_PTE(PointerPte++, TempPte);
    } while (--SizeInPages > 0);

    //
    // This is the last page
    //
    Pfn1->u3.e1.EndOfAllocation = 1;

    //
    // Get the first page and mark it as such
    //
    Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
    Pfn1->u3.e1.StartOfAllocation = 1;

    /* Mark it as a verifier allocation if needed */
    ASSERT(Pfn1->u4.VerifierAllocation == 0);
    if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;

    //
    // Release the PFN and nonpaged pool lock
    //
    KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
    KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);

    //
    // Return the address
    //
    return MiPteToAddress(StartPte);
}
示例#26
0
PVOID
NTAPI
MmAllocateSpecialPool(SIZE_T NumberOfBytes, ULONG Tag, POOL_TYPE PoolType, ULONG SpecialType)
{
    KIRQL Irql;
    MMPTE TempPte = ValidKernelPte;
    PMMPTE PointerPte;
    PFN_NUMBER PageFrameNumber;
    LARGE_INTEGER TickCount;
    PVOID Entry;
    PPOOL_HEADER Header;

    DPRINT1("MmAllocateSpecialPool(%x %x %x %x)\n", NumberOfBytes, Tag, PoolType, SpecialType);

    /* Check if the pool is initialized and quit if it's not */
    if (!MiSpecialPoolFirstPte) return NULL;

    /* Get the pool type */
    PoolType &= BASE_POOL_TYPE_MASK;

    /* Check whether current IRQL matches the pool type */
    Irql = KeGetCurrentIrql();

    if (((PoolType == PagedPool) && (Irql > APC_LEVEL)) ||
        ((PoolType != PagedPool) && (Irql > DISPATCH_LEVEL)))
    {
        /* Bad caller */
        KeBugCheckEx(SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, Irql, PoolType, NumberOfBytes, 0x30);
    }

    /* TODO: Take into account various limitations */
    /*if ((PoolType != NonPagedPool) &&
        MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum)*/

    /* Lock PFN database */
    Irql = KeAcquireQueuedSpinLock(LockQueuePfnLock);

    /* Reject allocation in case amount of available pages is too small */
    if (MmAvailablePages < 0x100)
    {
        /* Release the PFN database lock */
        KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
        DPRINT1("Special pool: MmAvailablePages 0x%x is too small\n", MmAvailablePages);
        return NULL;
    }

    /* Reject allocation if special pool PTE list is exhausted */
    if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST)
    {
        /* Release the PFN database lock */
        KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);
        DPRINT1("Special pool: No PTEs left!\n");
        /* TODO: Expand the special pool */
        return NULL;
    }

    /* Save allocation time */
    KeQueryTickCount(&TickCount);

    /* Get a pointer to the first PTE */
    PointerPte = MiSpecialPoolFirstPte;

    /* Set the first PTE pointer to the next one in the list */
    MiSpecialPoolFirstPte = MmSystemPteBase + PointerPte->u.List.NextEntry;

    /* Allocate a physical page */
    PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());

    /* Initialize PFN and make it valid */
    TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
    MiInitializePfnAndMakePteValid(PageFrameNumber, PointerPte, TempPte);

    /* Release the PFN database lock */
    KeReleaseQueuedSpinLock(LockQueuePfnLock, Irql);

    /* Put some content into the page. Low value of tick count would do */
    Entry = MiPteToAddress(PointerPte);
    RtlFillMemory(Entry, PAGE_SIZE, TickCount.LowPart);

    /* Calculate header and entry addresses */
    if ((SpecialType != 0) &&
        ((SpecialType == 1) || (!MmSpecialPoolCatchOverruns)))
    {
        /* We catch underruns. Data is at the beginning of the page */
        Header = (PPOOL_HEADER)((PUCHAR)Entry + PAGE_SIZE - sizeof(POOL_HEADER));
    }
    else
    {
        /* We catch overruns. Data is at the end of the page */
        Header = (PPOOL_HEADER)Entry;
        Entry = (PVOID)((ULONG_PTR)((PUCHAR)Entry - NumberOfBytes + PAGE_SIZE) & ~((LONG_PTR)sizeof(POOL_HEADER) - 1));
    }

    /* Initialize the header */
    RtlZeroMemory(Header, sizeof(POOL_HEADER));

    /* Save allocation size there */
    Header->Ulong1 = (ULONG)NumberOfBytes;

    /* Make sure it's all good */
    ASSERT((NumberOfBytes <= PAGE_SIZE - sizeof(POOL_HEADER)) &&
           (PAGE_SIZE <= 32 * 1024));

    /* Mark it as paged or nonpaged */
    if (PoolType == PagedPool)
    {
        /* Add pagedpool flag into the pool header too */
        Header->Ulong1 |= SPECIAL_POOL_PAGED;

        /* Also mark the next PTE as special-pool-paged */
        PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_PAGED_PTE;
    }
    else
    {
        /* Mark the next PTE as special-pool-nonpaged */
        PointerPte[1].u.Soft.PageFileHigh |= SPECIAL_POOL_NONPAGED_PTE;
    }

    /* Finally save tag and put allocation time into the header's blocksize.
       That time will be used to check memory consistency within the allocated
       page. */
    Header->PoolTag = Tag;
    Header->BlockSize = (USHORT)TickCount.LowPart;
    DPRINT1("%p\n", Entry);
    return Entry;
}