VOID ExFreeToPagedLookasideList( IN PPAGED_LOOKASIDE_LIST Lookaside, IN PVOID Entry ) /*++ Routine Description: This function inserts (pushes) the specified entry into the specified paged lookaside list. Arguments: Lookaside - Supplies a pointer to a paged lookaside list structure. Entry - Supples a pointer to the entry that is inserted in the lookaside list. Return Value: None. --*/ { Lookaside->L.TotalFrees += 1; if (Isx86FeaturePresent(KF_CMPXCHG8B)) { if (ExQueryDepthSList(&Lookaside->L.ListHead) >= Lookaside->L.Depth) { Lookaside->L.FreeMisses += 1; (Lookaside->L.Free)(Entry); } else { ExInterlockedPushEntrySList(&Lookaside->L.ListHead, (PSINGLE_LIST_ENTRY)Entry, NULL); } return; } ExAcquireFastMutex(&Lookaside->Lock); if (ExQueryDepthSList(&Lookaside->L.ListHead) >= Lookaside->L.Depth) { ExReleaseFastMutex(&Lookaside->Lock); Lookaside->L.FreeMisses += 1; (Lookaside->L.Free)(Entry); } else { PushEntryList(&Lookaside->L.ListHead.Next, (PSINGLE_LIST_ENTRY)Entry); Lookaside->L.ListHead.Depth += 1; ExReleaseFastMutex(&Lookaside->Lock); } return; }
VOID NTAPI IopFreeMiniPacket(PIOP_MINI_COMPLETION_PACKET Packet) { PKPRCB Prcb = KeGetCurrentPrcb(); PNPAGED_LOOKASIDE_LIST List; /* Use the P List */ List = (PNPAGED_LOOKASIDE_LIST)Prcb-> PPLookasideList[LookasideCompletionList].P; List->L.TotalFrees++; /* Check if the Free was within the Depth or not */ if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth) { /* Let the balancer know */ List->L.FreeMisses++; /* Use the L List */ List = (PNPAGED_LOOKASIDE_LIST)Prcb-> PPLookasideList[LookasideCompletionList].L; List->L.TotalFrees++; /* Check if the Free was within the Depth or not */ if (ExQueryDepthSList(&List->L.ListHead) >= List->L.Depth) { /* All lists failed, use the pool */ List->L.FreeMisses++; ExFreePool(Packet); return; } } /* The free was within dhe Depth */ InterlockedPushEntrySList(&List->L.ListHead, (PSLIST_ENTRY)Packet); }
ULONG NTAPI MiFreePoolPages(IN PVOID StartingVa) { PMMPTE PointerPte, StartPte; PMMPFN Pfn1, StartPfn; PFN_COUNT FreePages, NumberOfPages; KIRQL OldIrql; PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; ULONG i, End; ULONG_PTR Offset; // // Handle paged pool // if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) { // // Calculate the offset from the beginning of paged pool, and convert it // into pages // Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart; i = (ULONG)(Offset >> PAGE_SHIFT); End = i; // // Now use the end bitmap to scan until we find a set bit, meaning that // this allocation finishes here // while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; // // Now calculate the total number of pages this allocation spans. If it's // only one page, add it to the S-LIST instead of freeing it // NumberOfPages = End - i + 1; if ((NumberOfPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) { InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa); return 1; } /* Delete the actual pages */ PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); ASSERT(FreePages == NumberOfPages); // // Acquire the paged pool lock // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Clear the allocation and free bits // RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); // // Update the hint if we need to // if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; // // Release the lock protecting the bitmaps // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // And finally return the number of pages freed // return NumberOfPages; }
PVOID NTAPI MiAllocatePoolPages(IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes) { PFN_NUMBER PageFrameNumber; PFN_COUNT SizeInPages, PageTableCount; ULONG i; KIRQL OldIrql; PLIST_ENTRY NextEntry, NextHead, LastHead; PMMPTE PointerPte, StartPte; PMMPDE PointerPde; ULONG EndAllocation; MMPTE TempPte; MMPDE TempPde; PMMPFN Pfn1; PVOID BaseVa, BaseVaStart; PMMFREE_POOL_ENTRY FreeEntry; PKSPIN_LOCK_QUEUE LockQueue; // // Figure out how big the allocation is in pages // SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes); // // Check for overflow // if (SizeInPages == 0) { // // Fail // return NULL; } // // Handle paged pool // if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Lock the paged pool mutex // KeAcquireGuardedMutex(&MmPagedPoolMutex); // // Find some empty allocation space // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, MmPagedPoolInfo.PagedPoolHint); if (i == 0xFFFFFFFF) { // // Get the page bit count // i = ((SizeInPages - 1) / PTE_COUNT) + 1; DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages); // // Check if there is enougn paged pool expansion space left // if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } // // Check if we'll have to expand past the last PTE we have available // if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) > (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) { // // We can only support this much then // PointerPde = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool); PageTableCount = (PFN_COUNT)(PointerPde + 1 - MmPagedPoolInfo.NextPdeForPagedPoolExpansion); ASSERT(PageTableCount < i); i = PageTableCount; } else { // // Otherwise, there is plenty of space left for this expansion // PageTableCount = i; } // // Get the template PDE we'll use to expand // TempPde = ValidKernelPde; // // Get the first PTE in expansion space // PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion; BaseVa = MiPdeToPte(PointerPde); BaseVaStart = BaseVa; // // Lock the PFN database and loop pages // OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock); do { // // It should not already be valid // ASSERT(PointerPde->u.Hard.Valid == 0); /* Request a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); TempPde.u.Hard.PageFrameNumber = PageFrameNumber; #if (_MI_PAGING_LEVELS >= 3) /* On PAE/x64 systems, there's no double-buffering */ ASSERT(FALSE); #else // // Save it into our double-buffered system page directory // MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde; /* Initialize the PFN */ MiInitializePfnForOtherProcess(PageFrameNumber, (PMMPTE)PointerPde, MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]); /* Write the actual PDE now */ // MI_WRITE_VALID_PDE(PointerPde, TempPde); #endif // // Move on to the next expansion address // PointerPde++; BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE); i--; } while (i > 0); // // Release the PFN database lock // KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql); // // These pages are now available, clear their availablity bits // EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion - (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) * PTE_COUNT; RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, EndAllocation, PageTableCount * PTE_COUNT); // // Update the next expansion location // MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount; // // Zero out the newly available memory // RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE); // // Now try consuming the pages again // i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, SizeInPages, 0); if (i == 0xFFFFFFFF) { // // Out of memory! // DPRINT1("OUT OF PAGED POOL!!!\n"); KeReleaseGuardedMutex(&MmPagedPoolMutex); return NULL; } } // // Update the pool hint if the request was just one page // if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1; // // Update the end bitmap so we know the bounds of this allocation when // the time comes to free it // EndAllocation = i + SizeInPages - 1; RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation); // // Now we can release the lock (it mainly protects the bitmap) // KeReleaseGuardedMutex(&MmPagedPoolMutex); // // Now figure out where this allocation starts // BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT)); // // Flush the TLB // KeFlushEntireTb(TRUE, TRUE); /* Setup a demand-zero writable PTE */ MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE); // // Find the first and last PTE, then loop them all // PointerPte = MiAddressToPte(BaseVa); StartPte = PointerPte + SizeInPages; do { // // Write the demand zero PTE and keep going // MI_WRITE_INVALID_PTE(PointerPte, TempPte); } while (++PointerPte < StartPte); // // Return the allocation address to the caller // return BaseVa; } // // If only one page is being requested, try to grab it from the S-LIST // if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead))) { BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead); if (BaseVa) return BaseVa; } // // Allocations of less than 4 pages go into their individual buckets // i = SizeInPages - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; // // Loop through all the free page lists based on the page index // NextHead = &MmNonPagedPoolFreeListHead[i]; LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; // // Acquire the nonpaged pool lock // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); do { // // Now loop through all the free page entries in this given list // NextEntry = NextHead->Flink; while (NextEntry != NextHead) { /* Is freed non paged pool enabled */ if (MmProtectFreedNonPagedPool) { /* We need to be able to touch this page, unprotect it */ MiUnProtectFreeNonPagedPool(NextEntry, 0); } // // Grab the entry and see if it can handle our allocation // FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List); ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); if (FreeEntry->Size >= SizeInPages) { // // It does, so consume the pages from here // FreeEntry->Size -= SizeInPages; // // The allocation will begin in this free page area // BaseVa = (PVOID)((ULONG_PTR)FreeEntry + (FreeEntry->Size << PAGE_SHIFT)); /* Remove the item from the list, depending if pool is protected */ if (MmProtectFreedNonPagedPool) MiProtectedPoolRemoveEntryList(&FreeEntry->List); else RemoveEntryList(&FreeEntry->List); // // However, check if its' still got space left // if (FreeEntry->Size != 0) { /* Check which list to insert this entry into */ i = FreeEntry->Size - 1; if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; /* Insert the entry into the free list head, check for prot. pool */ if (MmProtectFreedNonPagedPool) MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); else InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } // // Grab the PTE for this allocation // PointerPte = MiAddressToPte(BaseVa); ASSERT(PointerPte->u.Hard.Valid == 1); // // Grab the PFN NextEntry and index // Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); // // Now mark it as the beginning of an allocation // ASSERT(Pfn1->u3.e1.StartOfAllocation == 0); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as special pool if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) { Pfn1->u4.VerifierAllocation = 1; } // // Check if the allocation is larger than one page // if (SizeInPages != 1) { // // Navigate to the last PFN entry and PTE // PointerPte += SizeInPages - 1; ASSERT(PointerPte->u.Hard.Valid == 1); Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); } // // Mark this PFN as the last (might be the same as the first) // ASSERT(Pfn1->u3.e1.EndOfAllocation == 0); Pfn1->u3.e1.EndOfAllocation = 1; // // Release the nonpaged pool lock, and return the allocation // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); return BaseVa; } // // Try the next free page entry // NextEntry = FreeEntry->List.Flink; /* Is freed non paged pool protected? */ if (MmProtectFreedNonPagedPool) { /* Protect the freed pool! */ MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); } } } while (++NextHead < LastHead); // // If we got here, we're out of space. // Start by releasing the lock // KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Allocate some system PTEs // StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion); PointerPte = StartPte; if (StartPte == NULL) { // // Ran out of memory // DPRINT1("Out of NP Expansion Pool\n"); return NULL; } // // Acquire the pool lock now // OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); // // Lock the PFN database too // LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock]; KeAcquireQueuedSpinLockAtDpcLevel(LockQueue); // // Loop the pages // TempPte = ValidKernelPte; do { /* Allocate a page */ MI_SET_USAGE(MI_USAGE_PAGED_POOL); MI_SET_PROCESS2("Kernel"); PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); /* Get the PFN entry for it and fill it out */ Pfn1 = MiGetPfnEntry(PageFrameNumber); Pfn1->u3.e2.ReferenceCount = 1; Pfn1->u2.ShareCount = 1; Pfn1->PteAddress = PointerPte; Pfn1->u3.e1.PageLocation = ActiveAndValid; Pfn1->u4.VerifierAllocation = 0; /* Write the PTE for it */ TempPte.u.Hard.PageFrameNumber = PageFrameNumber; MI_WRITE_VALID_PTE(PointerPte++, TempPte); } while (--SizeInPages > 0); // // This is the last page // Pfn1->u3.e1.EndOfAllocation = 1; // // Get the first page and mark it as such // Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber); Pfn1->u3.e1.StartOfAllocation = 1; /* Mark it as a verifier allocation if needed */ ASSERT(Pfn1->u4.VerifierAllocation == 0); if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1; // // Release the PFN and nonpaged pool lock // KeReleaseQueuedSpinLockFromDpcLevel(LockQueue); KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); // // Return the address // return MiPteToAddress(StartPte); }
VOID NTAPI CdRomPrepareUpdateCapabilitiesIrp( PDEVICE_OBJECT Fdo ) { PFUNCTIONAL_DEVICE_EXTENSION fdoExtension = Fdo->DeviceExtension; //PCOMMON_DEVICE_EXTENSION commonExtension = Fdo->DeviceExtension; PCDROM_DATA cdData = fdoExtension->CommonExtension.DriverData; PCDROM_MMC_EXTENSION mmcData = &(cdData->Mmc); PIO_STACK_LOCATION nextStack; PSCSI_REQUEST_BLOCK srb; PCDB cdb; ULONG bufferSize; PIRP irp; ASSERT(mmcData->UpdateState); ASSERT(ExQueryDepthSList(&(mmcData->DelayedIrps)) != 0); ASSERT(mmcData->CapabilitiesIrp != NULL); ASSERT(mmcData->CapabilitiesMdl != NULL); ASSERT(mmcData->CapabilitiesBuffer); ASSERT(mmcData->CapabilitiesBufferSize != 0); ASSERT(fdoExtension->SenseData); // // do *NOT* call IoReuseIrp(), since it would zero out our // current irp stack location, which we really don't want // to happen. it would also set the current irp stack location // to one greater than currently exists (to give max irp usage), // but we don't want that either, since we use the top irp stack. // // IoReuseIrp(mmcData->CapabilitiesIrp, STATUS_UNSUCCESSFUL); // irp = mmcData->CapabilitiesIrp; srb = &(mmcData->CapabilitiesSrb); cdb = (PCDB)(srb->Cdb); bufferSize = mmcData->CapabilitiesBufferSize; // // zero stuff out // RtlZeroMemory(srb, sizeof(SCSI_REQUEST_BLOCK)); RtlZeroMemory(fdoExtension->SenseData, sizeof(SENSE_DATA)); RtlZeroMemory(mmcData->CapabilitiesBuffer, bufferSize); // // setup the srb // srb->TimeOutValue = CDROM_GET_CONFIGURATION_TIMEOUT; srb->Length = SCSI_REQUEST_BLOCK_SIZE; srb->Function = SRB_FUNCTION_EXECUTE_SCSI; srb->SenseInfoBufferLength = SENSE_BUFFER_SIZE; srb->SenseInfoBuffer = fdoExtension->SenseData; srb->DataBuffer = mmcData->CapabilitiesBuffer; srb->QueueAction = SRB_SIMPLE_TAG_REQUEST; srb->DataTransferLength = mmcData->CapabilitiesBufferSize; srb->ScsiStatus = 0; srb->SrbStatus = 0; srb->NextSrb = NULL; srb->OriginalRequest = irp; srb->SrbFlags = fdoExtension->SrbFlags; srb->CdbLength = 10; SET_FLAG(srb->SrbFlags, SRB_FLAGS_DATA_IN); SET_FLAG(srb->SrbFlags, SRB_FLAGS_NO_QUEUE_FREEZE); // // setup the cdb // cdb->GET_CONFIGURATION.OperationCode = SCSIOP_GET_CONFIGURATION; cdb->GET_CONFIGURATION.RequestType = SCSI_GET_CONFIGURATION_REQUEST_TYPE_CURRENT; cdb->GET_CONFIGURATION.StartingFeature[0] = 0; cdb->GET_CONFIGURATION.StartingFeature[1] = 0; cdb->GET_CONFIGURATION.AllocationLength[0] = (UCHAR)(bufferSize >> 8); cdb->GET_CONFIGURATION.AllocationLength[1] = (UCHAR)(bufferSize & 0xff); // // setup the irp // nextStack = IoGetNextIrpStackLocation(irp); nextStack->MajorFunction = IRP_MJ_SCSI; nextStack->Parameters.Scsi.Srb = srb; irp->MdlAddress = mmcData->CapabilitiesMdl; irp->AssociatedIrp.SystemBuffer = mmcData->CapabilitiesBuffer; IoSetCompletionRoutine(irp, (PIO_COMPLETION_ROUTINE)CdRomUpdateMmcDriveCapabilitiesCompletion, Fdo, TRUE, TRUE, TRUE); return; }
VOID MiReleaseAllMemory ( VOID ) /*++ Routine Description: This function performs the final release of memory management allocations. Arguments: None. Return Value: None. Environment: No references to paged pool or pageable code/data are allowed. --*/ { ULONG i; ULONG j; PEVENT_COUNTER EventSupport; PUNLOADED_DRIVERS Entry; PLIST_ENTRY NextEntry; PKLDR_DATA_TABLE_ENTRY DataTableEntry; PLOAD_IMPORTS ImportList; PMI_VERIFIER_DRIVER_ENTRY Verifier; PMMINPAGE_SUPPORT Support; PSLIST_ENTRY SingleListEntry; PDRIVER_SPECIFIED_VERIFIER_THUNKS ThunkTableBase; PMMMOD_WRITER_MDL_ENTRY ModWriterEntry; ASSERT (MmUnusedSegmentList.Flink == &MmUnusedSegmentList); // // Don't clear free pages so problems can be debugged. // MiZeroingDisabled = TRUE; // // Free the unloaded driver list. // if (MmUnloadedDrivers != NULL) { Entry = &MmUnloadedDrivers[0]; for (i = 0; i < MI_UNLOADED_DRIVERS; i += 1) { if (Entry->Name.Buffer != NULL) { RtlFreeUnicodeString (&Entry->Name); } Entry += 1; } ExFreePool (MmUnloadedDrivers); } NextEntry = MmLoadedUserImageList.Flink; while (NextEntry != &MmLoadedUserImageList) { DataTableEntry = CONTAINING_RECORD (NextEntry, KLDR_DATA_TABLE_ENTRY, InLoadOrderLinks); NextEntry = NextEntry->Flink; ExFreePool ((PVOID)DataTableEntry); } // // Release the loaded module list entries. // NextEntry = PsLoadedModuleList.Flink; while (NextEntry != &PsLoadedModuleList) { DataTableEntry = CONTAINING_RECORD (NextEntry, KLDR_DATA_TABLE_ENTRY, InLoadOrderLinks); ImportList = (PLOAD_IMPORTS)DataTableEntry->LoadedImports; if ((ImportList != (PVOID)LOADED_AT_BOOT) && (ImportList != (PVOID)NO_IMPORTS_USED) && (!SINGLE_ENTRY(ImportList))) { ExFreePool (ImportList); } if (DataTableEntry->FullDllName.Buffer != NULL) { ASSERT (DataTableEntry->FullDllName.Buffer == DataTableEntry->BaseDllName.Buffer); } NextEntry = NextEntry->Flink; ExFreePool ((PVOID)DataTableEntry); } // // Free the physical memory descriptor block. // ExFreePool (MmPhysicalMemoryBlock); ExFreePool (MiPfnBitMap.Buffer); // // Free the system views structure. // if (MmSession.SystemSpaceViewTable != NULL) { ExFreePool (MmSession.SystemSpaceViewTable); } if (MmSession.SystemSpaceBitMap != NULL) { ExFreePool (MmSession.SystemSpaceBitMap); } // // Free the pagefile structures - note the PageFileName buffer was freed // earlier as it resided in paged pool and may have needed an inpage // to be freed. // for (i = 0; i < MmNumberOfPagingFiles; i += 1) { ASSERT (MmPagingFile[i]->PageFileName.Buffer == NULL); for (j = 0; j < MM_PAGING_FILE_MDLS; j += 1) { ExFreePool (MmPagingFile[i]->Entry[j]); } ExFreePool (MmPagingFile[i]->Bitmap); ExFreePool (MmPagingFile[i]); } ASSERT (MmNumberOfMappedMdlsInUse == 0); i = 0; while (IsListEmpty (&MmMappedFileHeader.ListHead) != 0) { ModWriterEntry = (PMMMOD_WRITER_MDL_ENTRY)RemoveHeadList ( &MmMappedFileHeader.ListHead); ExFreePool (ModWriterEntry); i += 1; } ASSERT (i == MmNumberOfMappedMdls); // // Free the paged pool bitmaps. // ExFreePool (MmPagedPoolInfo.PagedPoolAllocationMap); ExFreePool (MmPagedPoolInfo.EndOfPagedPoolBitmap); if (VerifierLargePagedPoolMap != NULL) { ExFreePool (VerifierLargePagedPoolMap); } // // Free the inpage structures. // while (ExQueryDepthSList (&MmInPageSupportSListHead) != 0) { SingleListEntry = InterlockedPopEntrySList (&MmInPageSupportSListHead); if (SingleListEntry != NULL) { Support = CONTAINING_RECORD (SingleListEntry, MMINPAGE_SUPPORT, ListEntry); ASSERT (Support->u1.e1.PrefetchMdlHighBits == 0); ExFreePool (Support); } } while (ExQueryDepthSList (&MmEventCountSListHead) != 0) { EventSupport = (PEVENT_COUNTER) InterlockedPopEntrySList (&MmEventCountSListHead); if (EventSupport != NULL) { ExFreePool (EventSupport); } } // // Free the verifier list last because it must be consulted to debug // any bugchecks. // NextEntry = MiVerifierDriverAddedThunkListHead.Flink; if (NextEntry != NULL) { while (NextEntry != &MiVerifierDriverAddedThunkListHead) { ThunkTableBase = CONTAINING_RECORD (NextEntry, DRIVER_SPECIFIED_VERIFIER_THUNKS, ListEntry ); NextEntry = NextEntry->Flink; ExFreePool (ThunkTableBase); } } NextEntry = MiSuspectDriverList.Flink; while (NextEntry != &MiSuspectDriverList) { Verifier = CONTAINING_RECORD(NextEntry, MI_VERIFIER_DRIVER_ENTRY, Links); NextEntry = NextEntry->Flink; ExFreePool (Verifier); } }