/** * Initializes memory mirror. * * Memory mirror allocates all available ram to specific range address. * If 1GB paging is supported, 1GB paging is used, otherwise only 4KB * paging is used, which means much more physical ram is used. */ void initialize_memory_mirror() { section_info_t* head = frame_pool; frame_pool = NULL; bool first_set = false; if (is_1GB_paging_supported() != 0) { for (puint_t start=0; start < maxram; start+=1<<30) { puint_t vaddress = start + ADDRESS_OFFSET(RESERVED_KBLOCK_RAM_MAPPINGS); v_address_t va; memcpy(&va, &vaddress, 8); puint_t* pml4 = (puint_t*)ALIGN(physical_to_virtual(get_active_page())); if (!PRESENT(pml4[va.pml])) { pdpt_t pdpt; memset(&pdpt, 0, sizeof(pdpt_t)); pdpt.number = get_free_frame(); pdpt.flaggable.present = 1; pdpt.flaggable.us = 0; pdpt.flaggable.rw = 1; pml4[va.pml] = pdpt.number; memset((void*)physical_to_virtual(ALIGN(pdpt.number)), 0, 0x1000); } puint_t* pdpt = (puint_t*)ALIGN(physical_to_virtual(pml4[va.pml])); page_directory1GB_t dir; memset(&dir, 0, sizeof(page_directory1GB_t)); dir.number = start; dir.flaggable.present = 1; dir.flaggable.ps = 1; dir.flaggable.rw = 1; pdpt[va.directory_ptr] = dir.number; if (!first_set) { first_set = true; frame_pool = head; } } } else { for (puint_t start=0; start < maxram; start+=0x1000) { puint_t kend = kernel_tmp_heap_start - 0xFFFFFFFF80000000 + 0x40000; if (kend+0x4000 >= tmp_heap && !first_set) { first_set = true; frame_pool = head; } puint_t vaddress = start + ADDRESS_OFFSET(RESERVED_KBLOCK_RAM_MAPPINGS); puint_t* paddress = get_page(vaddress, get_active_page(), true); page_t page; memset(&page, 0, sizeof(page_t)); page.address = start; page.flaggable.present = 1; page.flaggable.rw = 1; page.flaggable.us = 0; *paddress = page.address; } } }
void free_page_structure(uintptr_t vaddress, uintptr_t cr3) { v_address_t va; memcpy(&va, &vaddress, 8); puint_t* pml4 = (puint_t*)ALIGN(physical_to_virtual(cr3)); if (!PRESENT(pml4[va.pml])) { return; } puint_t* pdpt = (puint_t*)ALIGN(physical_to_virtual(pml4[va.pml])); if (!PRESENT(pdpt[va.directory_ptr])) { return; } puint_t* pdir = (puint_t*)ALIGN(physical_to_virtual(pdpt[va.directory_ptr])); if (!PRESENT(pdir[va.directory])) { return; } puint_t* pt = (puint_t*)ALIGN(physical_to_virtual(pdir[va.directory])); pt[va.table] = 0; // free table entry // check for other table entries for (size_t i=0; i<512; i++) { if (pt[i] != 0) return; } free_frame(ALIGN(pdir[va.directory])); pdir[va.directory] = 0; // free pt address // check for other pdir adresses for (size_t i=0; i<512; i++) { if (pdir[i] != 0) return; } free_frame(ALIGN(pdpt[va.directory_ptr])); pdpt[va.directory_ptr] = 0; // free pdir address for (size_t i=0; i<512; i++) { if (pdpt[i] != 0) return; } free_frame(ALIGN(pml4[va.pml])); pml4[va.pml] = 0; // free pdpt address }
static frame_info_t* get_frame_info(puint_t fa) { section_info_t* section = (section_info_t*)physical_to_virtual((puint_t)frame_pool); while (section != NULL) { if (section->start_word <= fa && section->end_word > fa) { uint32_t idx = (fa-section->start_word) / 0x1000; frame_info_t* fi = (frame_info_t*)physical_to_virtual((puint_t)section->frame_array); return &fi[idx]; } else { section = (section_info_t*)physical_to_virtual((puint_t)section->next_section); } } return NULL; }
void *alloc_pages(unsigned int count) { if (count == 0 || count > ram_pages - pages_reserved) { printf("alloc_pages: sorry, can't allocate %d pages (only %d RAM pages available)\n", count, ram_pages - pages_reserved); shutdown(); } // look for count free pages in a row int seen = 0; for (int i = 0; i < ram_pages - pages_reserved; i++) { int end = (page_alloc_hint + i) % (ram_pages - pages_reserved); if (end == 0) seen = 0; // just wrapped around to the start of the bitmap if (bitmap_get(page_alloc_bitmap, end) == 0) seen++; if (seen == count) { int start = end - count + 1; // start through end (inclusive) are all free for (i = start; i <= end; i++) bitmap_set(page_alloc_bitmap, i, 1); page_alloc_hint = (end + 1) % (ram_pages - pages_reserved); return physical_to_virtual((ram_start_page + pages_reserved + start) << 12); } } printf("alloc_pages: no free pages left, sorry\n"); shutdown(); }
/** * Computes virtual to physical mapping. * * Checks for page hierarchy and determines what physical address will be. * If page structures are missing on the way to decode, physical address cannot * be found and this valid will contain 0, otherwise valid will contain 1 * * 1GB page sector (ram identity map) behaves slightly different, because it * needs different v_address_t type. */ ruint_t virtual_to_physical(uintptr_t vaddress, uintptr_t cr3, uint8_t* valid) { *valid = 1; v_address_t va; memcpy(&va, &vaddress, 8); puint_t* address = (puint_t*)ALIGN(physical_to_virtual(cr3)); address = (puint_t*)ALIGN(physical_to_virtual(address[va.pml])); if (!PRESENT(address)) { *valid = 0; return 0; } if (vaddress > ADDRESS_OFFSET(RESERVED_KBLOCK_RAM_MAPPINGS) && vaddress < ADDRESS_OFFSET((RESERVED_KBLOCK_RAM_MAPPINGS+1)) && is_1GB_paging_supported()) { v_address1GB_t va = *((v_address1GB_t*) &vaddress); page_directory1GB_t pd1gb; pd1gb.number = (uint64_t) address; return pd1gb.flaggable.address + va.offset; } address = (puint_t*)ALIGN(physical_to_virtual(address[va.directory_ptr])); if (!PRESENT(address)) { *valid = 0; return 0; } address = (puint_t*)ALIGN(physical_to_virtual(address[va.directory])); if (!PRESENT(address)) { *valid = 0; return 0; } if (!PRESENT(*address)) { *valid = 0; return 0; } puint_t physadd = *address; return ALIGN(physadd) + va.offset; }
static puint_t get_free_frame() { if (frame_pool == NULL) { return ((puint_t)malign(0x1000, 0x1000)-0xFFFFFFFF80000000); } else { section_info_t* section = (section_info_t*)physical_to_virtual((puint_t)frame_pool); while (section != NULL) { if (section->head != NULL) { stack_element_t* se = (stack_element_t*) physical_to_virtual((puint_t)section->head); section->head = se->next; ((frame_info_t*)physical_to_virtual((puint_t)section->frame_array)) [se->array_ord].usage_count = 1; return se->frame_address; } section = (section_info_t*)physical_to_virtual((puint_t)section->next_section); } proc_spinlock_unlock(&__frame_lock); return 0; } }
/** * Deallocates frame from frame_map. */ static void free_frame(puint_t frame_address) { puint_t fa = ALIGN(frame_address); section_info_t* section = (section_info_t*)physical_to_virtual((puint_t)frame_pool); while (section != NULL) { if (fa >= section->start_word && fa < section->end_word) { uint32_t idx = (fa-section->start_word) / 0x1000; frame_info_t* fi = &((frame_info_t*) physical_to_virtual((puint_t)section->frame_array))[idx]; --fi->usage_count; if (fi->cow_count > 0) --fi->cow_count; if (fi->usage_count == 0) { stack_element_t* se = (stack_element_t*) physical_to_virtual((puint_t)fi->bound_stack_element); se->next = section->head; section->head = fi->bound_stack_element; memset((void*)physical_to_virtual( ((stack_element_t*)physical_to_virtual((puint_t)section->head))->frame_address), 0x0, 0x1000); } return; } else { section = (section_info_t*)physical_to_virtual((puint_t)section->next_section); } } // unmapped address not in a pool, most likely <2MB. }
unsigned int virtual_to_physical(void *vptr) { // If the virtual address is of the form 0xC0000000 + N, then the physical // address is just N (as long as we don't modify any of these mappings). if (vptr >= (void *)0xC0000000) return (vptr - (void *)0xC0000000); // If the virtual address is not of this form, then the physical address could // be anything, and we have to traverse the pagetables to figure it out. unsigned int vaddr = (unsigned int)vptr; unsigned int pdi = vaddr >> 22; unsigned int pti = (vaddr >> 12) & 0x3ff; unsigned int off = vaddr & 0xfff; unsigned int context = current_cpu_context(); unsigned int ppn = context >> 12; if (ppn < ram_start_page || ppn >= ram_end_page) { printf("context register seems to point to non-RAM\n"); return NOPAGE; } unsigned int *pd = physical_to_virtual(ppn << 12); unsigned int pde = pd[pdi]; if (!(pde & 0x1)) { printf("PDE is invalid for virtual address %p\n", vptr); return NOPAGE; } unsigned int *pt = physical_to_virtual(pde & ~0xFFF); unsigned int pte = pt[pti]; if (!(pte & 0x1)) { printf("PTE is invalid for virtual address %p\n", vptr); return NOPAGE; } return (pte & ~0xFFF) | off; }
static void page_alloc_init() { // how many pages do we need for our free/busy bitmap? int bits_per_page = 8*PAGE_SIZE; int n = (ram_pages + bits_per_page - 1) / bits_per_page; // we assume that the bootpages were taken sequentially, starting at // ram_start_page, so we can just take the next n pages for our bitmap. page_alloc_bitmap = physical_to_virtual((ram_start_page + bootparams->bootpages) << 12); memset(page_alloc_bitmap, 0, n * PAGE_SIZE); // we forbid anything lower than pages_reserved from ever being freed, so we // don't even keep it in the bitmap. pages_reserved = bootparams->bootpages + n; // for allocation, start the search near page_alloc_hint page_alloc_hint = 0; }
void keyboard_init() { /* Find out where I/O region is in memory. */ for (int i = 0; i < 16; i++) { if (bootparams->devtable[i].type == DEV_TYPE_KEYBOARD) { puts("Detected keyboard device..."); // find a virtual address that maps to this I/O region dev_kbd = physical_to_virtual(bootparams->devtable[i].start); // also allow keyboard interrupts set_cpu_status(current_cpu_status() | (1 << (8+INTR_KEYBOARD))); puts("...keyboard driver is ready."); return; } } }
/* kernel entry point called at the end of the boot sequence */ void __boot() { if (current_cpu_id() == 0) { bootparams = physical_to_virtual(0x00000000); console_init(); mem_init(); trap_init(); keyboard_init(); set_cpu_enable(0xFFFFFFFF); network_init_pipeline(); network_start_receive(); set_cpu_enable(0xFFFFFFFF); } core_start(current_cpu_id()); while (1) ; shutdown(); }
/* kernel entry point called at the end of the boot sequence */ void __boot() { if (current_cpu_id() == 0) { /* core 0 boots first, and does all of the initialization */ // boot parameters are on physical page 0 bootparams = physical_to_virtual(0x00000000); // initialize console early, so output works console_init(); // output should now work printf("Welcome to my kernel!\n"); printf("Running on a %d-way multi-core machine\n", current_cpu_exists()); // initialize memory allocators mem_init(); // prepare to handle interrupts, exceptions, etc. trap_init(); // initialize keyboard late, since it isn't really used by anything else keyboard_init(); // see which cores are already on for (int i = 0; i < 32; i++) printf("CPU[%d] is %s\n", i, (current_cpu_enable() & (1<<i)) ? "on" : "off"); // turn on all other cores set_cpu_enable(0xFFFFFFFF); // see which cores got turned on busy_wait(0.1); for (int i = 0; i < 32; i++) printf("CPU[%d] is %s\n", i, (current_cpu_enable() & (1<<i)) ? "on" : "off"); } else { /* remaining cores boot after core 0 turns them on */ // nothing to initialize here... } printf("Core %d of %d is alive!\n", current_cpu_id(), current_cpu_exists()); busy_wait(current_cpu_id() * 0.1); // wait a while so messages from different cores don't get so mixed up int size = 64 * 1024 * 4; printf("about to do calloc(%d, 1)\n", size); unsigned int t0 = current_cpu_cycles(); calloc(size, 1); unsigned int t1 = current_cpu_cycles(); printf("DONE (%u cycles)!\n", t1 - t0); while (1) ; for (int i = 1; i < 30; i++) { int size = 1 << i; printf("about to do calloc(%d, 1)\n", size); calloc(size, 1); } while (1) { printf("Core %d is still running...\n", current_cpu_id()); busy_wait(4.0); // wait 4 seconds } shutdown(); }
void deallocate_start_memory() { tlb_shootdown(get_active_page(), 0, 0x200000); memset((void*)physical_to_virtual((uintptr_t)get_active_page()), 0, 256*sizeof(uintptr_t)); tlb_shootdown_end(); }
/** * Returns page allocated for that virtual address. * * If allocate_new is specified to true, it will allocate substructures, * if not present.Otherwise returns NULL if page structures are not present * on the way to the virtual address. Returned pointer points to page in page * table. */ static puint_t* __get_page(uintptr_t vaddress, uintptr_t cr3, bool allocate_new, bool user) { v_address_t va; memcpy(&va, &vaddress, 8); puint_t* pml4 = (puint_t*)ALIGN(physical_to_virtual(cr3)); if (!PRESENT(pml4[va.pml])) { if (!allocate_new) { return 0; } pdpt_t pdpt; memset(&pdpt, 0, sizeof(pdpt_t)); pdpt.number = get_free_frame(); if (pdpt.number == 0) return 0; pdpt.flaggable.present = 1; pdpt.flaggable.us = user; pdpt.flaggable.rw = 1; pml4[va.pml] = pdpt.number; memset((void*)physical_to_virtual(ALIGN(pdpt.number)), 0, 0x1000); } puint_t* pdpt = (puint_t*)ALIGN(physical_to_virtual(pml4[va.pml])); if (!PRESENT(pdpt[va.directory_ptr])) { if (!allocate_new) return 0; page_directory_t dir; memset(&dir, 0, sizeof(page_directory_t)); dir.number = get_free_frame(); if (dir.number == 0) { return 0; } dir.flaggable.present = 1; dir.flaggable.us = user; dir.flaggable.rw = 1; pdpt[va.directory_ptr] = dir.number; memset((void*)physical_to_virtual(ALIGN(dir.number)), 0, 0x1000); } puint_t* pdir = (puint_t*)ALIGN(physical_to_virtual(pdpt[va.directory_ptr])); if (!PRESENT(pdir[va.directory])) { if (!allocate_new) return 0; page_table_t pt; memset(&pt, 0, sizeof(page_table_t)); pt.number = get_free_frame(); if (pt.number == 0) { return 0; } pt.flaggable.present = 1; pt.flaggable.us = user; pt.flaggable.rw = 1; pdir[va.directory] = pt.number; memset((void*)physical_to_virtual(ALIGN(pt.number)), 0, 0x1000); } puint_t* pt = (puint_t*)ALIGN(physical_to_virtual(pdir[va.directory])); return &pt[va.table]; }