/** * build_new_pagedir - luodaan sivutaulu uutta prosessia varten * old_phys_pd - sivutaulu, josta kopio otetaan (0 = ei mitään) **/ uint_t build_new_pagedir(uint_t old_phys_pd) { uint_t new_phys_pd; page_entry_t *new_pd, *old_pd, *new_pt, *old_pt; void *old_page, *new_page; uint_t pde, pte; if (!(new_phys_pd = alloc_phys_page(PD_NOSWAP_FLAG))) { return 0; } new_pd = temp_phys_page(0, new_phys_pd); // Kopioidaan memset(new_pd, 0, MEMORY_PAGE_SIZE); memcpy(new_pd, KERNEL_PD_ADDR, KMEM_PDE_END * sizeof(page_entry_t)); if (!old_phys_pd) { return new_phys_pd; } old_pd = temp_phys_page(1, old_phys_pd); for (pde = KMEM_PDE_END; pde < MEMORY_PDE_END; ++pde) { if (old_pd[pde].pagenum == 0) { continue; } new_pd[pde] = old_pd[pde]; new_pd[pde].pagenum = alloc_phys_page(PT_NOSWAP_FLAG); if (!new_pd[pde].pagenum) { panic("build_new_pagedir: (!new_pd[pde].pagenum) == muisti loppui!"); } // TODO: Swappaus pagedirin kopioinnissa new_pt = temp_phys_page(2, new_pd[pde].pagenum); memset(new_pt, 0, MEMORY_PAGE_SIZE); old_pt = temp_phys_page(3, old_pd[pde].pagenum); for (pte = 0; pte < MEMORY_PE_COUNT; ++pte) { new_pt[pte] = old_pt[pte]; if (old_pt[pte].pagenum == 0) { continue; } new_pt[pte].pagenum = alloc_phys_page(0); if (!new_pt[pte].pagenum) { panic("build_new_pagedir: (!new_pt[pte].pagenum) == muisti loppui!"); } new_page = temp_phys_page(4, new_pt[pde].pagenum); old_page = temp_phys_page(5, old_pt[pde].pagenum); memcpy(new_page, old_page, MEMORY_PAGE_SIZE); temp_phys_page(5, 0); temp_phys_page(4, 0); } temp_phys_page(3, 0); temp_phys_page(2, 0); } temp_phys_page(1, 0); temp_phys_page(0, 0); return new_phys_pd; }
/** * map_virtual_page - osoitetaan virtuaalinen sivu jonnekin fyysiseen muistiin * @phys_pd: käytettävän sivuhakemiston fyysinen sivu * @virt_page: sivu, joka pitää osoittaa * @noswap: pitääkö sivu pitää aina RAMissa? * @user: sivuja kernelille (0) vai käyttäjälle (1) **/ int map_virtual_page(uint_t phys_pd, uint_t virt_page, int noswap, int user) { uint_t phys_page; uint_t pde, pte; page_entry_t *pd, *pt; pde = virt_page / MEMORY_PE_COUNT; pte = virt_page % MEMORY_PE_COUNT; if (!phys_pd) { phys_pd = cur_phys_pd(); } pd = temp_page_directory(phys_pd); if (pd[pde].pagenum == 0) { if (!(phys_page = alloc_phys_page(PT_NOSWAP_FLAG))) { return -1; } void *ptr = temp_phys_page(0, phys_page); memset(ptr, 0, MEMORY_PAGE_SIZE); temp_phys_page(0, 0); if (pde < KMEM_PDE_END) { pd[pde] = KERNEL_PE(phys_page); } else { pd[pde] = USER_PE(phys_page); } } pt = temp_page_table(pd[pde].pagenum); phys_page = alloc_phys_page(noswap); if (!phys_page) { return -2; } // TODO: nollaus? pt[pte] = NEW_PE(phys_page, user); flush_pagedir(phys_pd); return 0; }
void load_driver(ElfHeader* header) { bool valid = elf_is_valid(header); if(!valid) { die("Invalid driver."); } size_t num_segments = header->num_ph_entries; size_t segment_len = header->ph_entry_size; ElfSegment* segments = program_header(header); for(size_t i = 0; i < num_segments; ++i) { ElfSegment* segment = segments + i; //TODO: loading for other segment types? if(segment->type != PT_LOAD) { continue; } if(segment->memory_size == 0) { continue; } void* virt = alloc_driver_segment(0); if(!virt) { die("Unable to allocate address space for driver segment"); } for(size_t i = 0; i < lsr_round_up(segment->memory_size, PAGE_SIZE_POWER); ++i) { void* phys = alloc_phys_page(); map_pages((uint8_t*)virt + PAGE_SIZE * i, phys, 1); } memcpy(virt, (uint8_t*)header + segment->offset, segment->memory_size); void* start = virt + header->entry - segment->offset; ((EntryPoint)start)(); //TODO: clear .bss area? } }