VOID MmInitializeHeap(PVOID PageLookupTable) { ULONG PagesNeeded; ULONG HeapStart; #ifndef _M_ARM MEMORY_TYPE Type; PPAGE_LOOKUP_TABLE_ITEM RealPageLookupTable = (PPAGE_LOOKUP_TABLE_ITEM)PageLookupTable; // HACK: Make it so it doesn't overlap kernel space Type = RealPageLookupTable[0x100].PageAllocated; MmMarkPagesInLookupTable(PageLookupTableAddress, 0x100, 0xFF, LoaderSystemCode); #endif // Find contigious memory block for HEAP:STACK PagesNeeded = HEAP_PAGES + STACK_PAGES; HeapStart = MmFindAvailablePages(PageLookupTable, TotalPagesInLookupTable, PagesNeeded, FALSE); #ifndef _M_ARM // Unapply the hack MmMarkPagesInLookupTable(PageLookupTableAddress, 0x100, 0xFF, Type); #endif if (HeapStart == 0) { UiMessageBox("Critical error: Can't allocate heap!"); return; } // Initialize BGET bpool(HeapStart << MM_PAGE_SHIFT, PagesNeeded << MM_PAGE_SHIFT); // Mark those pages as used MmMarkPagesInLookupTable(PageLookupTableAddress, HeapStart, PagesNeeded, LoaderOsloaderHeap); TRACE("Heap initialized, base 0x%08x, pages %d\n", (HeapStart << MM_PAGE_SHIFT), PagesNeeded); }
VOID MmInitPageLookupTable(PVOID PageLookupTable, PFN_NUMBER TotalPageCount) { const FREELDR_MEMORY_DESCRIPTOR* MemoryDescriptor = NULL; PFN_NUMBER PageLookupTableStartPage; PFN_NUMBER PageLookupTablePageCount; TRACE("MmInitPageLookupTable()\n"); // Mark every page as allocated initially // We will go through and mark pages again according to the memory map // But this will mark any holes not described in the map as allocated MmMarkPagesInLookupTable(PageLookupTable, MmLowestPhysicalPage, TotalPageCount, LoaderFirmwarePermanent); // Parse the whole memory map while ((MemoryDescriptor = ArcGetMemoryDescriptor(MemoryDescriptor)) != NULL) { // Mark used pages in the lookup table if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount <= TotalPageCount) { TRACE("Marking pages 0x%lx-0x%lx as type %s\n", MemoryDescriptor->BasePage, MemoryDescriptor->BasePage + MemoryDescriptor->PageCount, MmGetSystemMemoryMapTypeString(MemoryDescriptor->MemoryType)); MmMarkPagesInLookupTable(PageLookupTable, MemoryDescriptor->BasePage, MemoryDescriptor->PageCount, MemoryDescriptor->MemoryType); } else TRACE("Ignoring pages 0x%lx-0x%lx (%s)\n", MemoryDescriptor->BasePage, MemoryDescriptor->BasePage + MemoryDescriptor->PageCount, MmGetSystemMemoryMapTypeString(MemoryDescriptor->MemoryType)); } // Mark the pages that the lookup table occupies as reserved PageLookupTableStartPage = MmGetPageNumberFromAddress(PageLookupTable); PageLookupTablePageCount = MmGetPageNumberFromAddress((PVOID)((ULONG_PTR)PageLookupTable + ROUND_UP(TotalPageCount * sizeof(PAGE_LOOKUP_TABLE_ITEM), MM_PAGE_SIZE))) - PageLookupTableStartPage; TRACE("Marking the page lookup table pages as reserved StartPage: 0x%x PageCount: 0x%x\n", PageLookupTableStartPage, PageLookupTablePageCount); MmMarkPagesInLookupTable(PageLookupTable, PageLookupTableStartPage, PageLookupTablePageCount, LoaderFirmwareTemporary); }
VOID HeapDestroy( PVOID HeapHandle) { PHEAP Heap = HeapHandle; /* Mark all pages as firmware temporary, so they are free for the kernel */ MmMarkPagesInLookupTable(PageLookupTableAddress, (ULONG_PTR)Heap / MM_PAGE_SIZE, (PFN_COUNT)(Heap->MaximumSize / MM_PAGE_SIZE), LoaderFirmwareTemporary); }
VOID FrLdrHeapDestroy( PVOID HeapHandle) { PHEAP Heap = HeapHandle; /* Mark all pages as firmware temporary, so they are free for the kernel */ MmMarkPagesInLookupTable(PageLookupTableAddress, (ULONG_PTR)Heap / MM_PAGE_SIZE, (PFN_COUNT)(Heap->MaximumSize / MM_PAGE_SIZE), LoaderFirmwareTemporary); #if DBG /* Make sure everything is dead */ RtlFillMemory(Heap, Heap->MaximumSize, 0xCCCCCCCC); #endif }
VOID MmInitPageLookupTable(PVOID PageLookupTable, ULONG TotalPageCount) { const MEMORY_DESCRIPTOR* MemoryDescriptor = NULL; TYPE_OF_MEMORY MemoryMapPageAllocated; ULONG PageLookupTableStartPage; ULONG PageLookupTablePageCount; TRACE("MmInitPageLookupTable()\n"); // // Mark every page as allocated initially // We will go through and mark pages again according to the memory map // But this will mark any holes not described in the map as allocated // MmMarkPagesInLookupTable(PageLookupTable, MmLowestPhysicalPage, TotalPageCount, LoaderFirmwarePermanent); // // Parse the whole memory map // while ((MemoryDescriptor = ArcGetMemoryDescriptor(MemoryDescriptor)) != NULL) { // // Convert ARC memory type to loader memory type // switch (MemoryDescriptor->MemoryType) { case MemoryFree: { // // Allocatable memory // MemoryMapPageAllocated = LoaderFree; break; } case MemoryFirmwarePermanent: { // // Firmware permanent memory // MemoryMapPageAllocated = LoaderFirmwarePermanent; break; } case MemoryFirmwareTemporary: { // // Firmware temporary memory // MemoryMapPageAllocated = LoaderFirmwareTemporary; break; } case MemoryLoadedProgram: { // // Bootloader code // MemoryMapPageAllocated = LoaderLoadedProgram; break; } case MemorySpecialMemory: { // // OS Loader Stack // MemoryMapPageAllocated = LoaderOsloaderStack; break; } default: { // // Put something sensible here, which won't be overwritten // MemoryMapPageAllocated = LoaderSpecialMemory; break; } } // // Mark used pages in the lookup table // TRACE("Marking pages as type %d: StartPage: %d PageCount: %d\n", MemoryMapPageAllocated, MemoryDescriptor->BasePage, MemoryDescriptor->PageCount); MmMarkPagesInLookupTable(PageLookupTable, MemoryDescriptor->BasePage, MemoryDescriptor->PageCount, MemoryMapPageAllocated); } // // Mark the pages that the lookup table occupies as reserved // PageLookupTableStartPage = MmGetPageNumberFromAddress(PageLookupTable); PageLookupTablePageCount = MmGetPageNumberFromAddress((PVOID)((ULONG_PTR)PageLookupTable + ROUND_UP(TotalPageCount * sizeof(PAGE_LOOKUP_TABLE_ITEM), MM_PAGE_SIZE))) - PageLookupTableStartPage; TRACE("Marking the page lookup table pages as reserved StartPage: %d PageCount: %d\n", PageLookupTableStartPage, PageLookupTablePageCount); MmMarkPagesInLookupTable(PageLookupTable, PageLookupTableStartPage, PageLookupTablePageCount, LoaderFirmwareTemporary); }
VOID HeapRelease( PVOID HeapHandle) { PHEAP Heap = HeapHandle; PHEAP_BLOCK Block; PUCHAR StartAddress, EndAddress; PFN_COUNT FreePages, AllFreePages = 0; TRACE("HeapRelease(%p)\n", HeapHandle); /* Loop all heap chunks */ for (Block = &Heap->Blocks; Block->Size != 0; Block = Block + 1 + Block->Size) { /* Continue, if its not free */ if (Block->Tag != 0) { #ifdef FREELDR_HEAP_VERIFIER /* Verify size and redzones */ ASSERT(*REDZONE_SIZE(Block) <= Block->Size * sizeof(HEAP_BLOCK)); ASSERT(*REDZONE_LOW(Block) == REDZONE_MARK); ASSERT(*REDZONE_HI(Block) == REDZONE_MARK); #endif continue; } /* Calculate page aligned start address of the free region */ StartAddress = ALIGN_UP_POINTER_BY(Block->Data, PAGE_SIZE); /* Walk over adjacent free blocks */ while (Block->Tag == 0) Block = Block + Block->Size + 1; /* Check if this was the last block */ if (Block->Size == 0) { /* Align the end address up to cover the end of the heap */ EndAddress = ALIGN_UP_POINTER_BY(Block->Data, PAGE_SIZE); } else { /* Align the end address down to not cover any allocations */ EndAddress = ALIGN_DOWN_POINTER_BY(Block->Data, PAGE_SIZE); } /* Check if we have free pages */ if (EndAddress > StartAddress) { /* Calculate the size of the free region in pages */ FreePages = (PFN_COUNT)((EndAddress - StartAddress) / MM_PAGE_SIZE); AllFreePages += FreePages; /* Now mark the pages free */ MmMarkPagesInLookupTable(PageLookupTableAddress, (ULONG_PTR)StartAddress / MM_PAGE_SIZE, FreePages, LoaderFree); } /* bail out, if it was the last block */ if (Block->Size == 0) break; } TRACE("HeapRelease() done, freed %ld pages\n", AllFreePages); }