/* * @implemented */ PMDL NTAPI MmCreateMdl(IN PMDL Mdl, IN PVOID Base, IN SIZE_T Length) { SIZE_T Size; // // Check if we don't have an MDL built // if (!Mdl) { // // Calculate the size we'll need and allocate the MDL // Size = MmSizeOfMdl(Base, Length); Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); if (!Mdl) return NULL; } // // Initialize it // MmInitializeMdl(Mdl, Base, Length); return Mdl; }
static NTSTATUS dump_mem_write(dump_context *dump, u32 size, u64 offset) { NTSTATUS status; MmInitializeMdl(dump_mdl, dump_mem, size); dump_mdl->MappedSystemVa = dump_mem; dump_mdl->MdlFlags = MDL_SOURCE_IS_NONPAGED_POOL | MDL_MAPPED_TO_SYSTEM_VA; if (dump->pg_init != 0) { if (dump->pg_pending != 0) { status = dump->WritePendingRoutine(IO_DUMP_WRITE_FINISH, NULL, NULL, dump->a_data); if (NT_SUCCESS(status) != FALSE) dump->pg_pending = 0; } status = dump->WritePendingRoutine(IO_DUMP_WRITE_START, pv(&offset), dump_mdl, dump->a_data); if (NT_SUCCESS(status) != FALSE) dump->pg_pending = 1; } else { status = dump->WriteRoutine(pv(&offset), dump_mdl); memset(dump_mem, 0, DUMP_MEM_SIZE); } return status; }
/* * @implemented */ PMDL NTAPI IoAllocateMdl(IN PVOID VirtualAddress, IN ULONG Length, IN BOOLEAN SecondaryBuffer, IN BOOLEAN ChargeQuota, IN PIRP Irp) { PMDL Mdl = NULL, p; ULONG Flags = 0; ULONG Size; /* Make sure we got a valid length */ ASSERT(Length != 0); /* Fail if allocation is over 2GB */ if (Length & 0x80000000) return NULL; /* Calculate the number of pages for the allocation */ Size = ADDRESS_AND_SIZE_TO_SPAN_PAGES(VirtualAddress, Length); if (Size > 23) { /* This is bigger then our fixed-size MDLs. Calculate real size */ Size *= sizeof(PFN_NUMBER); Size += sizeof(MDL); if (Size > MAXUSHORT) return NULL; } else { /* Use an internal fixed MDL size */ Size = (23 * sizeof(PFN_NUMBER)) + sizeof(MDL); Flags |= MDL_ALLOCATED_FIXED_SIZE; /* Allocate one from the lookaside list */ Mdl = IopAllocateMdlFromLookaside(LookasideMdlList); } /* Check if we don't have an mdl yet */ if (!Mdl) { /* Allocate one from pool */ Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); if (!Mdl) return NULL; } /* Initialize it */ MmInitializeMdl(Mdl, VirtualAddress, Length); Mdl->MdlFlags |= Flags; /* Check if an IRP was given too */ if (Irp) { /* Check if it came with a secondary buffer */ if (SecondaryBuffer) { /* Insert the MDL at the end */ p = Irp->MdlAddress; while (p->Next) p = p->Next; p->Next = Mdl; } else { /* Otherwise, insert it directly */ Irp->MdlAddress = Mdl; } } /* Return the allocated mdl */ return Mdl; }
static NTSTATUS DumpFilterWrite (PFILTER_EXTENSION filterExtension, PLARGE_INTEGER diskWriteOffset, PMDL writeMdl) { ULONG dataLength = MmGetMdlByteCount (writeMdl); uint64 offset = DumpPartitionOffset.QuadPart + diskWriteOffset->QuadPart; uint64 intersectStart; uint32 intersectLength; PVOID writeBuffer; CSHORT origMdlFlags; if (BootDriveFilterExtension->MagicNumber != TC_BOOT_DRIVE_FILTER_EXTENSION_MAGIC_NUMBER) TC_BUG_CHECK (STATUS_CRC_ERROR); if (BootDriveFilterExtension->Queue.EncryptedAreaEndUpdatePending) // Hibernation should always abort the setup thread TC_BUG_CHECK (STATUS_INVALID_PARAMETER); if (BootDriveFilterExtension->Queue.EncryptedAreaStart == -1 || BootDriveFilterExtension->Queue.EncryptedAreaEnd == -1) return STATUS_SUCCESS; if (dataLength > WriteFilterBufferSize) TC_BUG_CHECK (STATUS_BUFFER_OVERFLOW); // Bug check is required as returning an error does not prevent data from being written to disk if ((dataLength & (ENCRYPTION_DATA_UNIT_SIZE - 1)) != 0) TC_BUG_CHECK (STATUS_INVALID_PARAMETER); if ((offset & (ENCRYPTION_DATA_UNIT_SIZE - 1)) != 0) TC_BUG_CHECK (STATUS_INVALID_PARAMETER); writeBuffer = MmGetSystemAddressForMdlSafe (writeMdl, HighPagePriority); if (!writeBuffer) TC_BUG_CHECK (STATUS_INSUFFICIENT_RESOURCES); memcpy (WriteFilterBuffer, writeBuffer, dataLength); GetIntersection (offset, dataLength, BootDriveFilterExtension->Queue.EncryptedAreaStart, BootDriveFilterExtension->Queue.EncryptedAreaEnd, &intersectStart, &intersectLength); if (intersectLength > 0) { UINT64_STRUCT dataUnit; dataUnit.Value = intersectStart / ENCRYPTION_DATA_UNIT_SIZE; if (BootDriveFilterExtension->Queue.RemapEncryptedArea) { diskWriteOffset->QuadPart += BootDriveFilterExtension->Queue.RemappedAreaOffset; dataUnit.Value += BootDriveFilterExtension->Queue.RemappedAreaDataUnitOffset; } EncryptDataUnitsCurrentThread (WriteFilterBuffer + (intersectStart - offset), &dataUnit, intersectLength / ENCRYPTION_DATA_UNIT_SIZE, BootDriveFilterExtension->Queue.CryptoInfo); } origMdlFlags = writeMdl->MdlFlags; MmInitializeMdl (writeMdl, WriteFilterBuffer, dataLength); MmBuildMdlForNonPagedPool (writeMdl); // Instead of using MmGetSystemAddressForMdlSafe(), some buggy custom storage drivers may directly test MDL_MAPPED_TO_SYSTEM_VA flag, // disregarding the fact that other MDL flags may be set by the system or a dump filter (e.g. MDL_SOURCE_IS_NONPAGED_POOL flag only). // Therefore, to work around this issue, the original flags will be restored even if they do not match the new MDL. // MS BitLocker also uses this hack/workaround (it should be safe to use until the MDL structure is changed). writeMdl->MdlFlags = origMdlFlags; return STATUS_SUCCESS; }
NTSTATUS NTAPI MiReadFilePage(PMMSUPPORT AddressSpace, PMEMORY_AREA MemoryArea, PMM_REQUIRED_RESOURCES RequiredResources) { PFILE_OBJECT FileObject = RequiredResources->Context; PPFN_NUMBER Page = &RequiredResources->Page[RequiredResources->Offset]; PLARGE_INTEGER FileOffset = &RequiredResources->FileOffset; NTSTATUS Status; PVOID PageBuf = NULL; KEVENT Event; IO_STATUS_BLOCK IOSB; UCHAR MdlBase[sizeof(MDL) + sizeof(ULONG)]; PMDL Mdl = (PMDL)MdlBase; KIRQL OldIrql; DPRINTC("Pulling page %I64x from %wZ to %Ix\n", FileOffset->QuadPart, &FileObject->FileName, *Page); Status = MmRequestPageMemoryConsumer(RequiredResources->Consumer, TRUE, Page); if (!NT_SUCCESS(Status)) { DPRINT1("Status: %x\n", Status); return Status; } MmInitializeMdl(Mdl, NULL, PAGE_SIZE); MmBuildMdlFromPages(Mdl, Page); Mdl->MdlFlags |= MDL_PAGES_LOCKED; KeInitializeEvent(&Event, NotificationEvent, FALSE); Status = IoPageRead(FileObject, Mdl, FileOffset, &Event, &IOSB); if (Status == STATUS_PENDING) { KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL); Status = IOSB.Status; } if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl); } PageBuf = MiMapPageInHyperSpace(PsGetCurrentProcess(), *Page, &OldIrql); if (!PageBuf) { MmReleasePageMemoryConsumer(RequiredResources->Consumer, *Page); return STATUS_NO_MEMORY; } RtlZeroMemory((PCHAR)PageBuf+RequiredResources->Amount, PAGE_SIZE-RequiredResources->Amount); MiUnmapPageInHyperSpace(PsGetCurrentProcess(), PageBuf, OldIrql); DPRINT("Read Status %x (Page %x)\n", Status, *Page); if (!NT_SUCCESS(Status)) { MmReleasePageMemoryConsumer(RequiredResources->Consumer, *Page); DPRINT("Status: %x\n", Status); return Status; } return STATUS_SUCCESS; }
VOID MiZeroPageFile ( IN PVOID Context ) /*++ Routine Description: This routine zeroes all inactive pagefile blocks in the specified paging file. Arguments: Context - Supplies the information on which pagefile to zero and a zeroed page to use for the I/O. Return Value: Returns TRUE on success, FALSE on failure. Environment: Kernel mode, the caller must lock down PAGELK. --*/ { PFN_NUMBER MaxPagesToWrite; PMMPFN Pfn1; PPFN_NUMBER Page; PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + MM_MAXIMUM_WRITE_CLUSTER]; PMDL Mdl; NTSTATUS Status; KEVENT IoEvent; IO_STATUS_BLOCK IoStatus; KIRQL OldIrql; LARGE_INTEGER StartingOffset; ULONG count; ULONG i; PFN_NUMBER first; ULONG write; PKEVENT AllDone; SIZE_T NumberOfBytes; PMMPAGING_FILE PagingFile; PFN_NUMBER ZeroedPageFrame; PMM_ZERO_PAGEFILE_CONTEXT ZeroContext; ZeroContext = (PMM_ZERO_PAGEFILE_CONTEXT) Context; PagingFile = ZeroContext->PagingFile; ZeroedPageFrame = ZeroContext->ZeroedPageFrame; AllDone = ZeroContext->AllDone; ExFreePool (Context); NumberOfBytes = MmModifiedWriteClusterSize << PAGE_SHIFT; MaxPagesToWrite = NumberOfBytes >> PAGE_SHIFT; Mdl = (PMDL) MdlHack; Page = (PPFN_NUMBER)(Mdl + 1); KeInitializeEvent (&IoEvent, NotificationEvent, FALSE); MmInitializeMdl (Mdl, NULL, PAGE_SIZE); Mdl->MdlFlags |= MDL_PAGES_LOCKED; Mdl->StartVa = NULL; i = 0; Page = (PPFN_NUMBER)(Mdl + 1); for (i = 0; i < MaxPagesToWrite; i += 1) { *Page = ZeroedPageFrame; Page += 1; } count = 0; write = FALSE; SATISFY_OVERZEALOUS_COMPILER (first = 0); LOCK_PFN (OldIrql); for (i = 1; i < PagingFile->Size; i += 1) { if (RtlCheckBit (PagingFile->Bitmap, (ULONG) i) == 0) { // // Claim the pagefile location as the modified writer // may already be scanning. // RtlSetBit (PagingFile->Bitmap, (ULONG) i); if (count == 0) { first = i; } count += 1; if ((count == MaxPagesToWrite) || (i == PagingFile->Size - 1)) { write = TRUE; } } else { if (count != 0) { // // Issue a write. // write = TRUE; } } if (write) { UNLOCK_PFN (OldIrql); StartingOffset.QuadPart = (LONGLONG)first << PAGE_SHIFT; Mdl->ByteCount = count << PAGE_SHIFT; KeClearEvent (&IoEvent); Status = IoSynchronousPageWrite (PagingFile->File, Mdl, &StartingOffset, &IoEvent, &IoStatus); // // Ignore all I/O failures - there is nothing that can // be done at this point. // if (!NT_SUCCESS (Status)) { KeSetEvent (&IoEvent, 0, FALSE); } Status = KeWaitForSingleObject (&IoEvent, WrPageOut, KernelMode, FALSE, (PLARGE_INTEGER)&MmTwentySeconds); if (Status == STATUS_TIMEOUT) { // // The write did not complete in 20 seconds, assume // that the file systems are hung and return an error. // // Note the zero page (and any MDL system virtual address a // driver may have created) is leaked because we don't know // what the filesystem or storage stack might (still) be // doing to them. // Pfn1 = MI_PFN_ELEMENT (ZeroedPageFrame); LOCK_PFN (OldIrql); // // Increment the reference count on the zeroed page to ensure // it is never freed. // InterlockedIncrementPfn ((PSHORT)&Pfn1->u3.e2.ReferenceCount); RtlClearBits (PagingFile->Bitmap, (ULONG) first, count); break; } if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl); } write = FALSE; LOCK_PFN (OldIrql); RtlClearBits (PagingFile->Bitmap, (ULONG) first, count); count = 0; } } UNLOCK_PFN (OldIrql); KeSetEvent (AllDone, 0, FALSE); return; }
BOOLEAN MiShutdownSystem ( VOID ) /*++ Routine Description: This function performs the shutdown of memory management. This is accomplished by writing out all modified pages which are destined for files other than the paging file. All processes have already been killed, the registry shutdown and shutdown IRPs already sent. On return from this phase all mapped file data must be flushed and the unused segment list emptied. This releases all the Mm references to file objects, allowing many drivers (especially the network) to unload. Arguments: None. Return Value: TRUE if the pages were successfully written, FALSE otherwise. --*/ { SIZE_T ImportListSize; PLOAD_IMPORTS ImportList; PLOAD_IMPORTS ImportListNonPaged; PLIST_ENTRY NextEntry; PKLDR_DATA_TABLE_ENTRY DataTableEntry; PFN_NUMBER ModifiedPage; PMMPFN Pfn1; PSUBSECTION Subsection; PCONTROL_AREA ControlArea; PPFN_NUMBER Page; PFILE_OBJECT FilePointer; ULONG ConsecutiveFileLockFailures; PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + MM_MAXIMUM_WRITE_CLUSTER]; PMDL Mdl; NTSTATUS Status; KEVENT IoEvent; IO_STATUS_BLOCK IoStatus; KIRQL OldIrql; LARGE_INTEGER StartingOffset; ULONG count; ULONG i; // // Don't do this more than once. // if (MmSystemShutdown == 0) { Mdl = (PMDL) MdlHack; Page = (PPFN_NUMBER)(Mdl + 1); KeInitializeEvent (&IoEvent, NotificationEvent, FALSE); MmInitializeMdl (Mdl, NULL, PAGE_SIZE); Mdl->MdlFlags |= MDL_PAGES_LOCKED; MmLockPageableSectionByHandle (ExPageLockHandle); LOCK_PFN (OldIrql); ModifiedPage = MmModifiedPageListHead.Flink; while (ModifiedPage != MM_EMPTY_LIST) { // // There are modified pages. // Pfn1 = MI_PFN_ELEMENT (ModifiedPage); if (Pfn1->OriginalPte.u.Soft.Prototype == 1) { // // This page is destined for a file. // Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte); ControlArea = Subsection->ControlArea; if ((!ControlArea->u.Flags.Image) && (!ControlArea->u.Flags.NoModifiedWriting)) { MiUnlinkPageFromList (Pfn1); // // Issue the write. // MI_SET_MODIFIED (Pfn1, 0, 0x28); // // Up the reference count for the physical page as there // is I/O in progress. // MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1); *Page = ModifiedPage; ControlArea->NumberOfMappedViews += 1; ControlArea->NumberOfPfnReferences += 1; UNLOCK_PFN (OldIrql); StartingOffset.QuadPart = MiStartingOffset (Subsection, Pfn1->PteAddress); Mdl->StartVa = NULL; ConsecutiveFileLockFailures = 0; FilePointer = ControlArea->FilePointer; retry: KeClearEvent (&IoEvent); Status = FsRtlAcquireFileForCcFlushEx (FilePointer); if (NT_SUCCESS(Status)) { Status = IoSynchronousPageWrite (FilePointer, Mdl, &StartingOffset, &IoEvent, &IoStatus); // // Release the file we acquired. // FsRtlReleaseFileForCcFlush (FilePointer); } if (!NT_SUCCESS(Status)) { // // Only try the request more than once if the // filesystem said it had a deadlock. // if (Status == STATUS_FILE_LOCK_CONFLICT) { ConsecutiveFileLockFailures += 1; if (ConsecutiveFileLockFailures < 5) { KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime); goto retry; } goto wait_complete; } // // Ignore all I/O failures - there is nothing that // can be done at this point. // KeSetEvent (&IoEvent, 0, FALSE); } Status = KeWaitForSingleObject (&IoEvent, WrPageOut, KernelMode, FALSE, (PLARGE_INTEGER)&MmTwentySeconds); wait_complete: if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl); } if (Status == STATUS_TIMEOUT) { // // The write did not complete in 20 seconds, assume // that the file systems are hung and return an // error. // LOCK_PFN (OldIrql); MI_SET_MODIFIED (Pfn1, 1, 0xF); MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1); ControlArea->NumberOfMappedViews -= 1; ControlArea->NumberOfPfnReferences -= 1; // // This routine returns with the PFN lock released! // MiCheckControlArea (ControlArea, OldIrql); MmUnlockPageableImageSection (ExPageLockHandle); return FALSE; } LOCK_PFN (OldIrql); MI_REMOVE_LOCKED_PAGE_CHARGE_AND_DECREF (Pfn1); ControlArea->NumberOfMappedViews -= 1; ControlArea->NumberOfPfnReferences -= 1; // // This routine returns with the PFN lock released! // MiCheckControlArea (ControlArea, OldIrql); LOCK_PFN (OldIrql); // // Restart scan at the front of the list. // ModifiedPage = MmModifiedPageListHead.Flink; continue; } } ModifiedPage = Pfn1->u1.Flink; } UNLOCK_PFN (OldIrql); // // Indicate to the modified page writer that the system has // shutdown. // MmSystemShutdown = 1; // // Check to see if the paging file should be overwritten. // Only free blocks are written. // if (MmZeroPageFile) { MiZeroAllPageFiles (); } MmUnlockPageableImageSection (ExPageLockHandle); } if (PoCleanShutdownEnabled ()) { // // Empty the unused segment list. // LOCK_PFN (OldIrql); MmUnusedSegmentForceFree = (ULONG)-1; KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); // // Give it 5 seconds to empty otherwise assume the filesystems are // hung and march on. // for (count = 0; count < 500; count += 1) { if (IsListEmpty(&MmUnusedSegmentList)) { break; } UNLOCK_PFN (OldIrql); KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime); LOCK_PFN (OldIrql); #if DBG if (count == 400) { // // Everything should have been flushed by now. Give the // filesystem team a chance to debug this on checked builds. // ASSERT (FALSE); } #endif // // Resignal if needed in case more closed file objects triggered // additional entries. // if (MmUnusedSegmentForceFree == 0) { MmUnusedSegmentForceFree = (ULONG)-1; KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); } } UNLOCK_PFN (OldIrql); // // Get rid of any paged pool references as they will be illegal // by the time MmShutdownSystem is called again since the filesystems // will have shutdown. // KeWaitForSingleObject (&MmSystemLoadLock, WrVirtualMemory, KernelMode, FALSE, (PLARGE_INTEGER)NULL); NextEntry = PsLoadedModuleList.Flink; while (NextEntry != &PsLoadedModuleList) { DataTableEntry = CONTAINING_RECORD (NextEntry, KLDR_DATA_TABLE_ENTRY, InLoadOrderLinks); ImportList = (PLOAD_IMPORTS)DataTableEntry->LoadedImports; if ((ImportList != (PVOID)LOADED_AT_BOOT) && (ImportList != (PVOID)NO_IMPORTS_USED) && (!SINGLE_ENTRY(ImportList))) { ImportListSize = ImportList->Count * sizeof(PVOID) + sizeof(SIZE_T); ImportListNonPaged = (PLOAD_IMPORTS) ExAllocatePoolWithTag (NonPagedPool, ImportListSize, 'TDmM'); if (ImportListNonPaged != NULL) { RtlCopyMemory (ImportListNonPaged, ImportList, ImportListSize); ExFreePool (ImportList); DataTableEntry->LoadedImports = ImportListNonPaged; } else { // // Don't bother with the clean shutdown at this point. // PopShutdownCleanly = FALSE; break; } } // // Free the full DLL name as it is pageable. // if (DataTableEntry->FullDllName.Buffer != NULL) { ExFreePool (DataTableEntry->FullDllName.Buffer); DataTableEntry->FullDllName.Buffer = NULL; } NextEntry = NextEntry->Flink; } KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); // // Close all the pagefile handles, note we still have an object // reference to each keeping the underlying object resident. // At the end of Phase1 shutdown we'll release those references // to trigger the storage stack unload. The handle close must be // done here however as it will reference pageable structures. // for (i = 0; i < MmNumberOfPagingFiles; i += 1) { // // Free each pagefile name now as it resides in paged pool and // may need to be inpaged to be freed. Since the paging files // are going to be shutdown shortly, now is the time to access // pageable stuff and get rid of it. Zeroing the buffer pointer // is sufficient as the only accesses to this are from the // try-except-wrapped GetSystemInformation APIs and all the // user processes are gone already. // ASSERT (MmPagingFile[i]->PageFileName.Buffer != NULL); ExFreePool (MmPagingFile[i]->PageFileName.Buffer); MmPagingFile[i]->PageFileName.Buffer = NULL; ZwClose (MmPagingFile[i]->FileHandle); } } return TRUE; }