void setup_process_vas(pcb* pcb_p) { // assert(1==15); for (int i = 0; i < 20; i++) { uint32_t *v = (uint32_t*) (pcb_p->start + (i * BLOCK_SIZE)); int x = vm_allocate_page(pcb_p->stored_vas, (void*) v, VM_PERM_USER_RW); assert(x == 0); vm_map_shared_memory(KERNEL_VAS, (void*) v, pcb_p->stored_vas, (void*) v, VM_PERM_USER_RW); } uint32_t *copyIn = (uint32_t *) pcb_p->start; int counter = 0; uint32_t * v = (uint32_t*) pcb_p->start; //*v = *copyIn; while (counter < pcb_p->len) { *v = *copyIn; copyIn += 1; v += 1; counter += 4; } for (int i = 0; i < 20; i++) { uint32_t *v = (uint32_t *) (pcb_p->start + (i * BLOCK_SIZE)); vm_free_mapping(KERNEL_VAS, (void*) v); } }
void vm_map_page(struct vm_translation_map *map, unsigned int va, unsigned int pa) { int vpindex = va / PAGE_SIZE; int pgdindex = vpindex / 1024; int pgtindex = vpindex % 1024; unsigned int *pgdir; unsigned int *pgtbl; struct list_node *other_map; unsigned int new_pgt; int old_flags; if (va >= KERNEL_BASE) { // Map into kernel space old_flags = acquire_spinlock_int(&kernel_space_lock); // The page tables for kernel space are shared by all page directories. // Check the first page directory to see if this is present. If not, // allocate a new one and stick it into all page directories. pgdir = (unsigned int*) PA_TO_VA(kernel_map.page_dir); if ((pgdir[pgdindex] & PAGE_PRESENT) == 0) { new_pgt = page_to_pa(vm_allocate_page()) | PAGE_PRESENT; list_for_each(&map_list, other_map, struct list_node) { pgdir = (unsigned int*) PA_TO_VA(((struct vm_translation_map*)other_map)->page_dir); pgdir[pgdindex] = new_pgt; } }
/* Allocated memory for the process stack Moves arguments for argc, argv, envp, and auxp into stack_top Points stack pointer to location where stack_top would begin @param pointer to process control block @param pcb* pcb_p */ void init_proc_stack(pcb * pcb_p) { int retval = 0; for (int i = 0; i < (STACK_SIZE / BLOCK_SIZE); i++) { retval = vm_allocate_page(pcb_p->stored_vas, (void*) (STACK_BASE + (i * BLOCK_SIZE)), VM_PERM_USER_RW); if (retval) { os_printf("vm_allocate_page error code: %d\n", retval); break; } else { os_printf( "A page have been allocated for process stack at vptr: 0x%x\n", (STACK_BASE + (i * BLOCK_SIZE))); } vm_map_shared_memory(KERNEL_VAS, (void*) (STACK_BASE + (i * BLOCK_SIZE)), pcb_p->stored_vas, (void*) (STACK_BASE + (i * BLOCK_SIZE)), VM_PERM_USER_RW); } // Stick a NULL at STACK_TOP-sizeof(int*) uint32_t *stack_top = (uint32_t*) STACK_TOP; stack_top[-1] = 0; stack_top[-2] = 0; stack_top[-3] = 0; stack_top[-4] = 0; stack_top[-5] = STACK_BASE; stack_top[-6] = 1; os_strcpy((char*) STACK_BASE, pcb_p->name); // We need to set sp (r13) to stack_top - 12 pcb_p->R13 = STACK_TOP - 4 * 6; print_process_state(pcb_p->PID); for (int i = 0; i < (STACK_SIZE / BLOCK_SIZE); i++) { vm_free_mapping(KERNEL_VAS, (void*) (STACK_BASE + (i * BLOCK_SIZE))); } }
struct vm_translation_map *create_translation_map(void) { struct vm_translation_map *map; int old_flags; map = slab_alloc(&translation_map_slab); map->page_dir = page_to_pa(vm_allocate_page()); old_flags = acquire_spinlock_int(&kernel_space_lock); // Copy kernel page tables into new page directory memcpy((unsigned int*) PA_TO_VA(map->page_dir) + 768, (unsigned int*) PA_TO_VA(kernel_map.page_dir) + 768, 256 * sizeof(unsigned int)); map->asid = next_asid++; map->lock = 0; list_add_tail(&map_list, (struct list_node*) map); release_spinlock_int(&kernel_space_lock, old_flags); return map; }
void __process_elf_init(pcb* pcb_p, const char* name) { int fd = kopen(name, 'r'); uint32_t start = PROC_LOCATION; uint32_t len = 0; struct stats fstats; get_stats(name, &fstats); len = fstats.size; os_printf("LOADING PROCESS <<%s>>, start address %X\n", name, start, len); for (int i = 0; i < (len / BLOCK_SIZE) + 1; i++) { uint32_t *v = (uint32_t*) (start + (i * BLOCK_SIZE)); int x = vm_allocate_page(pcb_p->stored_vas, (void*) v, VM_PERM_USER_RW); assert(x == 0); vm_map_shared_memory(KERNEL_VAS, (void*) v, pcb_p->stored_vas,(void*) v, VM_PERM_USER_RW); } int* location = (int*) start; int counter = 0; while (counter < len) { kread(fd, location, 4); location += 1; counter += 4; } Elf_Ehdr* success = (Elf_Ehdr*) load_file(pcb_p, (uint32_t*) start); pcb_p->R15 = success->e_entry; for (int i = 0; i < (len / BLOCK_SIZE) + 1; i++) { uint32_t *v = (uint32_t *) (start + (i * BLOCK_SIZE)); vm_free_mapping(KERNEL_VAS, (void*) v); } }
// // This is always called with the address space lock held, so the area is // guaranteed not to change. Returns 1 if it sucessfully satisfied the fault, 0 // if it failed for some reason. // static int soft_fault(struct vm_address_space *space, const struct vm_area *area, unsigned int address, int is_store) { int got; unsigned int page_flags; struct vm_page *source_page; struct vm_page *dummy_page = 0; unsigned int cache_offset; struct vm_cache *cache; int old_flags; int is_cow_page = 0; int size_to_read; VM_DEBUG("soft fault va %08x %s\n", address, is_store ? "store" : "load"); // XXX check area protections and fail if this shouldn't be allowed if (is_store && (area->flags & AREA_WRITABLE) == 0) { kprintf("store to read only area %s @%08x\n", area->name, address); return 0; } cache_offset = PAGE_ALIGN(address - area->low_address + area->cache_offset); old_flags = disable_interrupts(); lock_vm_cache(); assert(area->cache); for (cache = area->cache; cache; cache = cache->source) { VM_DEBUG("searching in cache %p\n", cache); source_page = lookup_cache_page(cache, cache_offset); if (source_page) break; if (cache->file && address - area->low_address < area->cache_length) { VM_DEBUG("reading page from file\n"); // Read the page from this cache. source_page = vm_allocate_page(); // Insert the page first so, if a collided fault occurs, it will not // load a different page (the vm cache lock protects the busy bit) source_page->busy = 1; insert_cache_page(cache, cache_offset, source_page); unlock_vm_cache(); restore_interrupts(old_flags); if (area->cache_length - cache_offset < PAGE_SIZE) size_to_read = area->cache_length - cache_offset; else size_to_read = PAGE_SIZE; got = read_file(cache->file, cache_offset, (void*) PA_TO_VA(page_to_pa(source_page)), size_to_read); if (got < 0) { kprintf("failed to read from file\n"); dec_page_ref(source_page); if (dummy_page != 0) { disable_interrupts(); lock_vm_cache(); remove_cache_page(dummy_page); unlock_vm_cache(); restore_interrupts(old_flags); dec_page_ref(dummy_page); } return 0; } // For BSS, clear out data past the end of the file if (size_to_read < PAGE_SIZE) { memset((char*) PA_TO_VA(page_to_pa(source_page)) + size_to_read, 0, PAGE_SIZE - size_to_read); } disable_interrupts(); lock_vm_cache(); source_page->busy = 0; break; } // Otherwise scan the next cache is_cow_page = 1; if (cache == area->cache) { // Insert a dummy page in the top level cache to catch collided faults. dummy_page = vm_allocate_page(); dummy_page->busy = 1; insert_cache_page(cache, cache_offset, dummy_page); } } if (source_page == 0) { assert(dummy_page != 0); VM_DEBUG("source page was not found, use empty page\n"); // No page found, just use the dummy page dummy_page->busy = 0; source_page = dummy_page; } else if (is_cow_page) { // is_cow_page means source_page belongs to another cache. assert(dummy_page != 0); if (is_store) { // The dummy page have the contents of the source page copied into it, // and will be inserted into the top cache (it's not really a dummy page // any more). memcpy((void*) PA_TO_VA(page_to_pa(dummy_page)), (void*) PA_TO_VA(page_to_pa(source_page)), PAGE_SIZE); VM_DEBUG("write copy page va %08x dest pa %08x source pa %08x\n", address, page_to_pa(dummy_page), page_to_pa(source_page)); source_page = dummy_page; dummy_page->busy = 0; } else { // We will map in the read-only page from the source cache. // Remove the dummy page from this cache (we do not insert // the page into this cache, because we don't own it page). remove_cache_page(dummy_page); dec_page_ref(dummy_page); VM_DEBUG("mapping read-only source page va %08x pa %08x\n", address, page_to_pa(source_page)); } } assert(source_page != 0); // Grab a ref because we are going to map this page inc_page_ref(source_page); unlock_vm_cache(); restore_interrupts(old_flags); // XXX busy wait for page to finish loading while (source_page->busy) reschedule(); if (is_store) source_page->dirty = 1; // XXX Locking? // It's possible two threads will fault on the same VA and end up mapping // the page twice. This is fine, because the code above ensures it will // be the same page. page_flags = PAGE_PRESENT; // If the page is clean, we will mark it not writable. This will fault // on the next write, allowing us to update the dirty flag. if ((area->flags & AREA_WRITABLE) != 0 && (source_page->dirty || is_store)) page_flags |= PAGE_WRITABLE; if (area->flags & AREA_EXECUTABLE) page_flags |= PAGE_EXECUTABLE; if (space == &kernel_address_space) page_flags |= PAGE_SUPERVISOR | PAGE_GLOBAL; vm_map_page(space->translation_map, address, page_to_pa(source_page) | page_flags); return 1; }