void process_run(process_id_t pid) { context_t user_context; thread_table_t *my_thread = thread_get_current_thread_entry(); /* If my process is a zombie, that means initialisation failed. */ if (process_table[pid].state == PROCESS_ZOMBIE) { if (my_thread->pagetable) { vm_destroy_pagetable(my_thread->pagetable); my_thread->pagetable = NULL; } thread_finish(); } process_set_pagetable(my_thread->pagetable); my_thread->process_id = pid; my_thread->pagetable = my_thread->pagetable; /* Initialize the user context. (Status register is handled by thread_goto_userland) */ memoryset(&user_context, 0, sizeof(user_context)); _context_set_ip(&user_context, process_table[pid].entry_point); _context_set_sp(&user_context, process_table[pid].stack_top); thread_goto_userland(&user_context); }
void setup_thread(thread_params_t *params) { context_t user_context; uint32_t phys_page; int i; interrupt_status_t intr_status; thread_table_t *thread= thread_get_current_thread_entry(); /* Copy thread parameters. */ int arg = params->arg; void (*func)(int) = params->func; process_id_t pid = thread->process_id = params->pid; thread->pagetable = params->pagetable; params->done = 1; /* OK, we don't need params any more. */ intr_status = _interrupt_disable(); spinlock_acquire(&process_table_slock); /* Set up userspace environment. */ memoryset(&user_context, 0, sizeof(user_context)); user_context.cpu_regs[MIPS_REGISTER_A0] = arg; user_context.pc = (uint32_t)func; /* Allocate thread stack */ if (process_table[pid].bot_free_stack != 0) { /* Reuse old thread stack. */ user_context.cpu_regs[MIPS_REGISTER_SP] = process_table[pid].bot_free_stack + CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE - 4; /* Space for the thread argument */ process_table[pid].bot_free_stack = *(uint32_t*)process_table[pid].bot_free_stack; } else { /* Allocate physical pages (frames) for the stack. */ for (i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) { phys_page = pagepool_get_phys_page(); KERNEL_ASSERT(phys_page != 0); vm_map(thread->pagetable, phys_page, process_table[pid].stack_end - (i+1)*PAGE_SIZE, 1); } user_context.cpu_regs[MIPS_REGISTER_SP] = process_table[pid].stack_end-4; /* Space for the thread argument */ process_table[pid].stack_end -= PAGE_SIZE*CONFIG_USERLAND_STACK_SIZE; } tlb_fill(thread->pagetable); spinlock_release(&process_table_slock); _interrupt_set_state(intr_status); thread_goto_userland(&user_context); }
/* Sets context for process */ void process_run(process_id_t pid) { context_t user_context; kprintf("vi er nu i process run\n"); process_set_pagetable(thread_get_thread_entry(thread_get_current_thread())->pagetable); /* Initialize the user context. (Status register is handled by thread_goto_userland) */ memoryset(&user_context, 0, sizeof(user_context)); _context_set_ip(&user_context, process_table[pid].entry_point); _context_set_sp(&user_context, process_table[pid].stack_top); kprintf("lige før thread goto"); thread_goto_userland(&user_context); }
/** * Starts one userland process. The thread calling this function will * be used to run the process and will therefore never return from * this function. This function asserts that no errors occur in * process startup (the executable file exists and is a valid ecoff * file, enough memory is available, file operations succeed...). * Therefore this function is not suitable to allow startup of * arbitrary processes. * * @executable The name of the executable to be run in the userland * process */ void process_start(uint32_t pid) { thread_table_t *my_entry; pagetable_t *pagetable; uint32_t phys_page; context_t user_context; uint32_t stack_bottom; elf_info_t elf; openfile_t file; const char* executable; int i; interrupt_status_t intr_status; my_entry = thread_get_current_thread_entry(); my_entry->process_id = pid; executable = process_table[pid].executable; /* If the pagetable of this thread is not NULL, we are trying to run a userland process for a second time in the same thread. This is not possible. */ KERNEL_ASSERT(my_entry->pagetable == NULL); pagetable = vm_create_pagetable(thread_get_current_thread()); KERNEL_ASSERT(pagetable != NULL); intr_status = _interrupt_disable(); my_entry->pagetable = pagetable; _interrupt_set_state(intr_status); file = vfs_open((char *)executable); /* Make sure the file existed and was a valid ELF file */ KERNEL_ASSERT(file >= 0); KERNEL_ASSERT(elf_parse_header(&elf, file)); /* Trivial and naive sanity check for entry point: */ KERNEL_ASSERT(elf.entry_point >= PAGE_SIZE); /* Calculate the number of pages needed by the whole process (including userland stack). Since we don't have proper tlb handling code, all these pages must fit into TLB. */ KERNEL_ASSERT(elf.ro_pages + elf.rw_pages + CONFIG_USERLAND_STACK_SIZE <= _tlb_get_maxindex() + 1); /* Allocate and map stack */ for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) { phys_page = pagepool_get_phys_page(); KERNEL_ASSERT(phys_page != 0); vm_map(my_entry->pagetable, phys_page, (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1); } /* Allocate and map pages for the segments. We assume that segments begin at page boundary. (The linker script in tests directory creates this kind of segments) */ for(i = 0; i < (int)elf.ro_pages; i++) { phys_page = pagepool_get_phys_page(); KERNEL_ASSERT(phys_page != 0); vm_map(my_entry->pagetable, phys_page, elf.ro_vaddr + i*PAGE_SIZE, 1); } for(i = 0; i < (int)elf.rw_pages; i++) { phys_page = pagepool_get_phys_page(); KERNEL_ASSERT(phys_page != 0); vm_map(my_entry->pagetable, phys_page, elf.rw_vaddr + i*PAGE_SIZE, 1); } /* Put the mapped pages into TLB. Here we again assume that the pages fit into the TLB. After writing proper TLB exception handling this call should be skipped. */ //intr_status = _interrupt_disable(); //tlb_fill(my_entry->pagetable); //_interrupt_set_state(intr_status); /* Now we may use the virtual addresses of the segments. */ /* Zero the pages. */ memoryset((void *)elf.ro_vaddr, 0, elf.ro_pages*PAGE_SIZE); memoryset((void *)elf.rw_vaddr, 0, elf.rw_pages*PAGE_SIZE); stack_bottom = (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - (CONFIG_USERLAND_STACK_SIZE-1)*PAGE_SIZE; memoryset((void *)stack_bottom, 0, CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE); /* Copy segments */ if (elf.ro_size > 0) { /* Make sure that the segment is in proper place. */ KERNEL_ASSERT(elf.ro_vaddr >= PAGE_SIZE); KERNEL_ASSERT(vfs_seek(file, elf.ro_location) == VFS_OK); KERNEL_ASSERT(vfs_read(file, (void *)elf.ro_vaddr, elf.ro_size) == (int)elf.ro_size); } if (elf.rw_size > 0) { /* Make sure that the segment is in proper place. */ KERNEL_ASSERT(elf.rw_vaddr >= PAGE_SIZE); KERNEL_ASSERT(vfs_seek(file, elf.rw_location) == VFS_OK); KERNEL_ASSERT(vfs_read(file, (void *)elf.rw_vaddr, elf.rw_size) == (int)elf.rw_size); } /* Set the dirty bit to zero (read-only) on read-only pages. */ for(i = 0; i < (int)elf.ro_pages; i++) { vm_set_dirty(my_entry->pagetable, elf.ro_vaddr + i*PAGE_SIZE, 0); } /* Insert page mappings again to TLB to take read-only bits into use */ //intr_status = _interrupt_disable(); //tlb_fill(my_entry->pagetable); //_interrupt_set_state(intr_status); /* Initialize the user context. (Status register is handled by thread_goto_userland) */ memoryset(&user_context, 0, sizeof(user_context)); user_context.cpu_regs[MIPS_REGISTER_SP] = USERLAND_STACK_TOP; user_context.pc = elf.entry_point; vfs_close(file); thread_goto_userland(&user_context); KERNEL_PANIC("thread_goto_userland failed."); }