void pmm_init(mboot_info_t *mboot) { // Setup physical memory manager. // Takes a multiboot memory map as argument. mboot_mod_t *mods = (mboot_mod_t *)(assert_higher(mboot->mods_addr)); pmm_pos = (mboot->mem_upper + PAGE_SIZE) & PAGE_MASK; if(pmm_pos < (mods[mboot->mods_count-1].mod_end)) pmm_pos = (mods[mboot->mods_count-1].mod_end + PAGE_SIZE) & PAGE_MASK; pmm_running = FALSE; // Fill physical page stack with free pages from memory map. mboot_mmap_entry_t *me = (mboot_mmap_entry_t *)(assert_higher(mboot->mmap_addr)); uintptr_t mmap_end = assert_higher(mboot->mmap_addr) + mboot->mmap_length; while ((uintptr_t)me < mmap_end) { if(me->type == MBOOT_MEM_FLAG_FREE) { uint32_t j; for(j = me->base_addr_lower; j <= (me->base_addr_lower + me->length_lower); j += PAGE_SIZE) { pmm_free_page(j); } } me = (mboot_mmap_entry_t *)((uint32_t)me + me->size + sizeof(uint32_t)); } }
static void put_l2_table(arch_aspace_t *aspace, uint32_t l1_index, paddr_t l2_pa) { DEBUG_ASSERT(aspace); /* check if any l1 entry points to this l2 table */ for (uint i = 0; i < L1E_PER_PAGE; i++) { uint32_t tt_entry = aspace->tt_virt[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i]; if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) { return; } } /* we can free this l2 table */ vm_page_t *page = paddr_to_vm_page(l2_pa); if (!page) panic("bad page table paddr 0x%lx\n", l2_pa); /* verify that it is in our page list */ DEBUG_ASSERT(list_in_list(&page->node)); list_delete(&page->node); LTRACEF("freeing pagetable at 0x%lx\n", l2_pa); pmm_free_page(page); }
static bool guest_physical_address_space_map_interrupt_controller() { BEGIN_TEST; if (!hypervisor_supported()) { return true; } // Setup. ktl::unique_ptr<hypervisor::GuestPhysicalAddressSpace> gpas; zx_status_t status = create_gpas(&gpas); EXPECT_EQ(ZX_OK, status, "Failed to create GuestPhysicalAddressSpace\n"); fbl::RefPtr<VmObject> vmo; status = create_vmo(PAGE_SIZE, &vmo); EXPECT_EQ(ZX_OK, status, "Failed to create VMO\n"); status = create_mapping(gpas->RootVmar(), vmo, 0); EXPECT_EQ(ZX_OK, status, "Failed to create mapping\n"); // Allocate a page to use as the APIC page. paddr_t paddr = 0; vm_page* vm_page; status = pmm_alloc_page(0, &vm_page, &paddr); EXPECT_EQ(ZX_OK, status, "Unable to allocate a page\n"); // Map APIC page in an arbitrary location. const vaddr_t APIC_ADDRESS = 0xffff0000; status = gpas->MapInterruptController(APIC_ADDRESS, paddr, PAGE_SIZE); EXPECT_EQ(ZX_OK, status, "Failed to map APIC page\n"); // Cleanup pmm_free_page(vm_page); END_TEST; }
void init_pmm(mboot_info *mboot) { pmm_pos = (uint32_t)((uint32_t)&_end+PAGE_SIZE)&PAGE_MASK; ASSERT_LOWER(pmm_pos); pmm_running = FALSE; ASSERT_HIGHER(mboot->mmap_addr); uint32_t i = mboot->mmap_addr; while(i < mboot->mmap_addr + mboot->mmap_length) { mmap_entry *me = (mmap_entry *)i; if(me->type == MBOOT_MEM_FLAG_FREE) { uint32_t j; for(j = me->base_addr_lower; j <= (me->base_addr_lower + me->length_lower); j += PAGE_SIZE) { pmm_free_page(j); } } i += me->size + sizeof(uint32_t); } }
void init_pmm() { mmap_entry_t *mmap_start_addr = (mmap_entry_t *)glb_mboot_ptr->mmap_addr; mmap_entry_t *mmap_end_addr = (mmap_entry_t *)glb_mboot_ptr->mmap_addr + glb_mboot_ptr->mmap_length; mmap_entry_t *map_entry; for (map_entry = mmap_start_addr; map_entry < mmap_end_addr; map_entry++) { // 如果是可用内存 ( 按照协议,1 表示可用内存,其它数字指保留区域 ) if (map_entry->type == 1 && map_entry->base_addr_low == 0x100000) { // 把内核结束位置到结束位置的内存段,按页存储到页管理栈里 // 最多支持512MB的物理内存 uint32_t page_addr = map_entry->base_addr_low + (uint32_t)(kern_end - kern_start); uint32_t length = map_entry->base_addr_low + map_entry->length_low; while (page_addr < length && page_addr <= PMM_MAX_SIZE) { pmm_free_page(page_addr); page_addr += PMM_PAGE_SIZE; phy_page_count++; } } } }
int main(multiboot_t *mboot_ptr) { monitor_clear(); init_gdt (); init_idt (); init_timer (20); init_pmm (mboot_ptr->mem_upper); init_vmm (); // Find all the usable areas of memory and inform the physical memory manager about them. uint32_t i = mboot_ptr->mmap_addr; while (i < mboot_ptr->mmap_addr + mboot_ptr->mmap_length) { mmap_entry_t *me = (mmap_entry_t*) i; // Does this entry specify usable RAM? if (me->type == 1) { uint32_t j; // For every page in this entry, add to the free page stack. for (j = me->base_addr_low; j < me->base_addr_low+me->length_low; j += 0x1000) { pmm_free_page (j); } } // The multiboot specification is strange in this respect - the size member does not include "size" itself in its calculations, // so we must add sizeof (uint32_t). i += me->size + sizeof (uint32_t); } printk ("Paging initialised.\n"); printk ("Mapping page...\n"); uint32_t addr = 0x900000; map (addr, 0x500000, PAGE_PRESENT|PAGE_WRITE); printk ("Accessing page...\n"); volatile uint32_t *_addr = (volatile uint32_t*)addr; *_addr = 0x567; printk ("*addr: %x\n", *_addr); printk ("Unmapping page...\n"); unmap (addr); printk ("Trying to access again (should page fault)...\n"); *_addr = 0x678; printk ("*addr: %x\n", *_addr); asm volatile ("sti"); for (;;); return 0xdeadbeef; }
int kmain(multiboot_t *mboot_ptr) { monitor_clear(); printk("8888888888 d8b 888 .d88888b. .d8888b.\n"); printk("888 Y8P 888 d88P\" \"Y88b d88P Y88b\n"); printk("888 888 888 888 Y88b.\n"); printk("8888888 88888b.d88b. 888 888 888 888 \"Y888b.\n"); printk("888 888 \"888 \"88b 888 888 888 888 \"Y88b.\n"); printk("888 888 888 888 888 888 888 888 \"888\n"); printk("888 888 888 888 888 888 Y88b. .d88P Y88b d88P\n"); printk("8888888888 888 888 888 888 888 \"Y88888P\" \"Y8888P\"\n"); init_gdt (); init_idt (); init_keyboard(); setup_x87_fpu (); init_timer (20); init_pmm (mboot_ptr->mem_upper); init_vmm (); init_heap (); // Find all the usable areas of memory and inform the physical memory manager about them. uint32_t i = mboot_ptr->mmap_addr; while (i < mboot_ptr->mmap_addr + mboot_ptr->mmap_length) { mmap_entry_t *me = (mmap_entry_t*) i; // Does this entry specify usable RAM? if (me->type == 1) { uint32_t j; // For every page in this entry, add to the free page stack. for (j = me->base_addr_low; j < me->base_addr_low+me->length_low; j += 0x1000) { pmm_free_page (j); } } // The multiboot specification is strange in this respect - the size member does not include "size" itself in its calculations, // so we must add sizeof (uint32_t). i += me->size + sizeof (uint32_t); } kernel_elf = elf_from_multiboot (mboot_ptr); asm volatile ("sti"); panic ("Testing panic mechanism"); for (;;); return 0xdeadbeef; }
status_t arch_mmu_destroy_aspace(arch_aspace_t *aspace) { LTRACEF("aspace %p\n", aspace); // XXX free all of the pages allocated in aspace->pt_page_list vm_page_t *p; while ((p = list_remove_head_type(&aspace->pt_page_list, vm_page_t, node)) != NULL) { LTRACEF("freeing page %p\n", p); pmm_free_page(p); } return NO_ERROR; }
void free_chunk(kheader_t *chunk) { chunk->prev->next = 0; if (chunk->prev == 0) kheap_first = 0; // While the heap max can contract by a page and still be greater than the chunk address... while ((kheap_max - 0x1000) >= (uint32_t) chunk) { kheap_max -= 0x1000; uint32_t page; get_mapping(kheap_max, &page); pmm_free_page(page); unmap(kheap_max); } }
int main(multiboot_t *mboot_ptr) { monitor_clear(); init_gdt (); init_idt (); init_timer (20); init_pmm (mboot_ptr->mem_upper); init_vmm (); init_heap (); // Find all the usable areas of memory and inform the physical memory manager about them. uint32_t i = mboot_ptr->mmap_addr; while (i < mboot_ptr->mmap_addr + mboot_ptr->mmap_length) { mmap_entry_t *me = (mmap_entry_t*) i; // Does this entry specify usable RAM? if (me->type == 1) { uint32_t j; // For every page in this entry, add to the free page stack. for (j = me->base_addr_low; j < me->base_addr_low+me->length_low; j += 0x1000) { pmm_free_page (j); } } // The multiboot specification is strange in this respect - the size member does not include "size" itself in its calculations, // so we must add sizeof (uint32_t). i += me->size + sizeof (uint32_t); } kernel_elf = elf_from_multiboot (mboot_ptr); asm volatile ("sti"); void *a = kmalloc (8); void *b = kmalloc (8); void *c = kmalloc (8); kfree (a); kfree (b); void *d = kmalloc (24); printk ("a: %x, b: %x, c: %x, d: %x\n", a, b, c, d); panic ("Testing panic mechanism"); for (;;); return 0xdeadbeef; }
static void free_page_table(void *vaddr, paddr_t paddr, uint page_size_shift) { vm_page_t *address_to_page(paddr_t addr); /* TODO: remove */ size_t size = 1U << page_size_shift; vm_page_t *page; if (size >= PAGE_SIZE) { page = address_to_page(paddr); if (!page) panic("bad page table paddr 0x%lx\n", paddr); pmm_free_page(page); } else { heap_free(vaddr); } }
void free_chunk(header_t * chunk) { if(chunk->prev == 0){ heap_first = 0; } else { chunk->prev->next = 0; } //空闲的内存超过1页的话就释放掉 while((heap_max - PAGE_SIZE) >= (uint32_t) chunk){ heap_max -= PAGE_SIZE; uint32_t page; get_mapping(pgd_kern , heap_max , &page); unmap(pgd_kern , heap_max); pmm_free_page(page); } }
void free_chunk(header_t *chunk) { if (chunk->prev == 0) { heap_first = 0; } else { chunk->prev->next = 0; } // 空闲的内存超过 1 页的话就释放掉 while ((heap_max - 0x1000) >= (uint32_t)chunk) { heap_max -= 0x1000; uint32_t page; get_mapping(heap_max, &page); pmm_free_page(page); unmap(heap_max); } }
void free_chunk(header_t *chunk){ //no allocated chunk all free //only move pointer saying this location is used, no pointed are all free could be filled with //something but already free, so no use and see as empty and free if(chunk->prev == 0) heap_first = 0; else chunk->prev->next = 0; //if all free ram over 4K page, then unmap to let system run efficient while((heap_max - PAGE_SIZE) >= (uint32_t) chunk){ heap_max -= PAGE_SIZE; uint32_t page; get_mapping(pgd_kern, heap_max, &page); unmap(pgd_kern, heap_max); pmm_free_page(page); } }
static void put_l2_table(uint32_t l1_index, paddr_t l2_pa) { /* check if any l1 entry points to this l2 table */ for (uint i = 0; i < L1E_PER_PAGE; i++) { uint32_t tt_entry = arm_kernel_translation_table[ROUNDDOWN(l1_index, L1E_PER_PAGE) + i]; if ((tt_entry & MMU_MEMORY_L1_DESCRIPTOR_MASK) == MMU_MEMORY_L1_DESCRIPTOR_PAGE_TABLE) { return; } } /* we can free this l2 table */ vm_page_t *page = address_to_page(l2_pa); if (!page) panic("bad page table paddr 0x%lx\n", l2_pa); LTRACEF("freeing pagetable at 0x%lx\n", l2_pa); pmm_free_page(page); }
void free_chunk(heap_t *chunk) { if (chunk->prev == 0) { heap_first = 0; }else { //指向自己的指针指向0 chunk->prev->next = 0; } //heap_max 一直是指向申请内存的最上面,当释放掉上面的内存,内存大于4096字节,把这个内存释放掉,就是解除 //映射,把页表的恢复成可用的选项 while ((heap_max - PAGE_SIZE) >= (uint32)chunk) { heap_max -= PAGE_SIZE; uint32 page; //通过线性地址找到物理地址,然后赋值给page get_mapping(pgd_kernel, heap_max, &page); unmap(pgd_kernel, heap_max); //里面存在物理内存管理的数组 pmm_free_page(page); } }
void init_pmm() { mmap_entry_t *mmap_start_addr = (mmap_entry_t *)glb_mboot_ptr->mmap_addr; mmap_entry_t *mmap_end_addr = mmap_start_addr + glb_mboot_ptr->mmap_length; mmap_entry_t *map_entry; for (map_entry = mmap_start_addr; map_entry < mmap_end_addr; map_entry++) { if (map_entry->type == 1 && map_entry->base_addr_low == 0x100000) { uint32_t page_addr = map_entry->base_addr_low + (uint32_t)(kern_end - kern_start); uint32_t length = map_entry->base_addr_low + map_entry->length_low; while (page_addr < length && page_addr <= PMM_MAX_SIZE) { pmm_free_page(page_addr); page_addr += PMM_PAGE_SIZE; phy_page_count++; } } } }
void pmm_collect_pages(multiboot_info_t* mboot_ptr) { uint32_t i = mboot_ptr->mmap_addr; // debug kprintf("ignore pages before: 0x%.8x\nusable ram:\n", pmm_location); while (i < mboot_ptr->mmap_addr + mboot_ptr->mmap_length) { multiboot_memory_map_t *me = (multiboot_memory_map_t*) i; // usable ram? if (me->type == 1) { // debug kprintf("0x%.8x\t0x%.8x\n", me->base_addr_low, me->base_addr_low + me->length_low); uint32_t j; // For every page in this entry, add to the free page stack. for (j = me->base_addr_low; j < me->base_addr_low + me->length_low; j += 0x1000) { pmm_free_page(j); } } // The multiboot specification is strange in this respect: // the size member does not include "size" itself in its calculations, // so we must add sizeof (uint32_t). i += me->size + sizeof(uint32_t); } }
void task_free_kernel(struct task_state * task) { pmm_free_page(task->kernel_stack); pmm_free_page(task); }