int arch_loader_parse_elf_executable(void *mem, struct file *file, addr_t *start, addr_t *end) { uint32_t i, x; addr_t entry; elf_header_t *eh = (elf_header_t *)mem; uint8_t buffer[(eh->phnum+1)*eh->phsize]; fs_file_pread(file, eh->phoff, buffer, eh->phsize * eh->phnum); uint64_t vaddr=0, length=0, offset=0, stop, tmp; uint64_t max=0, min=~0; for(i=0;i < eh->phnum;i++) { elf64_program_header_t *ph = (elf64_program_header_t *)(buffer + (i*eh->phsize)); if((ph->p_addr + ph->p_memsz) > max) max = ph->p_addr + ph->p_memsz; if(ph->p_addr < min) min = ph->p_addr; if(ph->p_type == PH_LOAD) { /* mmap program headers. if the memsz is the same as the filesz, we don't have * to do anything special. if not, then we might need additional mappings: the * file is mapped to some section of the program header's region, and then the * rest is MAP_ANONYMOUS memory. if it fits in the end of the page for the file * mapped memory, then it can fit there. otherwise, we call mmap again. * * also, the actual section we want might be offset from a page. handle that as * well, with inpage_offset. */ size_t additional = ph->p_memsz - ph->p_filesz; size_t inpage_offset = ph->p_addr & (~PAGE_MASK); addr_t newend = (ph->p_addr + ph->p_filesz); size_t page_free = PAGE_SIZE - (newend % PAGE_SIZE); int prot = 0; if(ph->p_flags & ELF_PF_R) prot |= PROT_READ; if(ph->p_flags & ELF_PF_W) prot |= PROT_WRITE; if(ph->p_flags & ELF_PF_X) prot |= PROT_EXEC; int flags = MAP_FIXED; if(prot & PROT_WRITE) flags |= MAP_PRIVATE; else flags |= MAP_SHARED; mm_mmap(ph->p_addr & PAGE_MASK, ph->p_filesz + inpage_offset, prot, flags, file, ph->p_offset & PAGE_MASK, 0); if(additional > page_free) { mm_mmap((newend & PAGE_MASK) + PAGE_SIZE, additional - page_free, prot, flags | MAP_ANONYMOUS, 0, 0, 0); } } } if(!max) return 0; *start = eh->entry; *end = (max & PAGE_MASK) + PAGE_SIZE; return 1; }
void __init_entry(void) { /* the kernel doesn't have this mapping, so we have to create it here. */ tm_thread_raise_flag(current_thread, THREAD_KERNEL); addr_t ret = mm_mmap(current_thread->usermode_stack_start, CONFIG_STACK_PAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); tm_thread_lower_flag(current_thread, THREAD_KERNEL); tm_thread_user_mode_jump(user_mode_init); }
void tls_init() { mm_mmap(TLS_DATA_BASE, sizeof(struct tls_data), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, NULL, 0); for (int i = 0; i < TLS_KERNEL_ENTRY_COUNT; i++) { tls->kernel_entries[i] = TlsAlloc(); log_info("Allocated kernel TLS entry, entry: %d, slot: %d, fs offset 0x%x\n", i, tls->kernel_entries[i], tls_slot_to_offset(tls->kernel_entries[i])); } }
static void preexec(void) { struct thread *t = current_thread; /* unmap all mappings, specified by POSIX */ mm_destroy_all_mappings(t->process); mm_virtual_map(MEMMAP_SYSGATE_ADDRESS, sysgate_page, PAGE_PRESENT | PAGE_USER, PAGE_SIZE); /* we need to re-create the vmem for memory mappings */ valloc_create(&(t->process->mmf_valloc), MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0); addr_t ret = mm_mmap(t->usermode_stack_start, CONFIG_STACK_PAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); mm_page_fault_test_mappings(t->usermode_stack_end - PAGE_SIZE, PF_CAUSE_USER | PF_CAUSE_WRITE); t->signal = t->signals_pending = 0; memset((void *)t->process->signal_act, 0, sizeof(struct sigaction) * NUM_SIGNALS); if(t->flags & THREAD_PTRACED) { tm_signal_send_thread(t, SIGTRAP); } }
static struct bucket *alloc_bucket(int objsize) { struct bucket *b = mm_mmap(NULL, BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, INTERNAL_MAP_TOPDOWN | INTERNAL_MAP_NORESET | INTERNAL_MAP_COPYONFORK, NULL, 0); b->ref_cnt = 0; b->next_bucket = NULL; /* Set up the chain of free objects */ char *c = (char *)b + ALIGN(sizeof(struct bucket), sizeof(void *)); /* Align to machine word size */ b->first_free = c; while (c + objsize < (char *)b + BLOCK_SIZE) { *(char **)c = c + objsize; c += objsize; } *(char **)c = NULL; return b; }
int do_exec(char *path, char **argv, char **env, int shebanged /* oh my */) { unsigned int i=0; addr_t end, eip; unsigned int argc=0, envc=0; char **backup_argv=0, **backup_env=0; /* Sanity */ if(!path || !*path) return -EINVAL; /* Load the file, and make sure that it is valid and accessible */ if(EXEC_LOG == 2) printk(0, "[%d]: Checking executable file (%s)\n", current_process->pid, path); struct file *efil; int err_open; efil = fs_file_open(path, _FREAD, 0, &err_open); if(!efil) return err_open; /* are we allowed to execute it? */ if(!vfs_inode_check_permissions(efil->inode, MAY_EXEC, 0)) { file_put(efil); return -EACCES; } /* is it a valid elf? */ int header_size = 0; #if CONFIG_ARCH == TYPE_ARCH_X86_64 header_size = sizeof(elf64_header_t); #elif CONFIG_ARCH == TYPE_ARCH_X86 header_size = sizeof(elf32_header_t); #endif /* read in the ELF header, and check if it's a shebang */ if(header_size < 2) header_size = 2; unsigned char mem[header_size]; fs_file_pread(efil, 0, mem, header_size); if(__is_shebang(mem)) return loader_do_shebang(efil, argv, env); int other_bitsize=0; if(!is_valid_elf(mem, 2) && !other_bitsize) { file_put(efil); return -ENOEXEC; } if(EXEC_LOG == 2) printk(0, "[%d]: Copy data\n", current_process->pid); /* okay, lets back up argv and env so that we can * clear out the address space and not lose data... * If this call if coming from a shebang, then we don't check the pointers, * since they won't be from userspace */ size_t total_args_len = 0; if((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, argv, 0)) && argv) { while((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, argv[argc], 0)) && argv[argc] && *argv[argc]) argc++; backup_argv = (char **)kmalloc(sizeof(addr_t) * argc); for(i=0;i<argc;i++) { backup_argv[i] = (char *)kmalloc(strlen(argv[i]) + 1); _strcpy(backup_argv[i], argv[i]); total_args_len += strlen(argv[i])+1 + sizeof(char *); } } if((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, env, 0)) && env) { while((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, env[envc], 0)) && env[envc] && *env[envc]) envc++; backup_env = (char **)kmalloc(sizeof(addr_t) * envc); for(i=0;i<envc;i++) { backup_env[i] = (char *)kmalloc(strlen(env[i]) + 1); _strcpy(backup_env[i], env[i]); total_args_len += strlen(env[i])+1 + sizeof(char *); } } total_args_len += 2 * sizeof(char *); /* and the path too! */ char *path_backup = (char *)kmalloc(strlen(path) + 1); _strcpy((char *)path_backup, path); path = path_backup; /* Preexec - This is the point of no return. Here we close out unneeded * file descs, free up the page directory and clear up the resources * of the task */ if(EXEC_LOG) printk(0, "Executing (p%dt%d, cpu %d, tty %d): %s\n", current_process->pid, current_thread->tid, current_thread->cpu->knum, current_process->pty ? current_process->pty->num : 0, path); preexec(); /* load in the new image */ strncpy((char *)current_process->command, path, 128); if(!loader_parse_elf_executable(mem, efil, &eip, &end)) eip=0; /* do setuid and setgid */ if(efil->inode->mode & S_ISUID) { current_process->effective_uid = efil->inode->uid; } if(efil->inode->mode & S_ISGID) { current_process->effective_gid = efil->inode->gid; } /* we don't need the file anymore, close it out */ file_put(efil); file_close_cloexec(); if(!eip) { printk(5, "[exec]: Tried to execute an invalid ELF file!\n"); free_dp(backup_argv, argc); free_dp(backup_env, envc); kfree(path); tm_thread_exit(0); } if(EXEC_LOG == 2) printk(0, "[%d]: Updating task values\n", current_process->pid); /* Setup the task with the proper values (libc malloc stack) */ addr_t end_l = end; end = ((end-1)&PAGE_MASK) + PAGE_SIZE; total_args_len += PAGE_SIZE; /* now we need to copy back the args and env into userspace * writeable memory...yippie. */ addr_t args_start = end + PAGE_SIZE; addr_t env_start = args_start; addr_t alen = 0; mm_mmap(end, total_args_len, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); if(backup_argv) { memcpy((void *)args_start, backup_argv, sizeof(addr_t) * argc); alen += sizeof(addr_t) * argc; *(addr_t *)(args_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); argv = (char **)args_start; for(i=0;i<argc;i++) { char *old = argv[i]; char *new = (char *)(args_start+alen); unsigned len = strlen(old) + 4; argv[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_argv); } env_start = args_start + alen; alen = 0; if(backup_env) { memcpy((void *)env_start, backup_env, sizeof(addr_t) * envc); alen += sizeof(addr_t) * envc; *(addr_t *)(env_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); env = (char **)env_start; for(i=0;i<envc;i++) { char *old = env[i]; char *new = (char *)(env_start+alen); unsigned len = strlen(old) + 1; env[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_env); } end = (env_start + alen) & PAGE_MASK; current_process->env = env; current_process->argv = argv; kfree(path); /* set the heap locations, and map in the start */ current_process->heap_start = current_process->heap_end = end + PAGE_SIZE*2; addr_t ret = mm_mmap(end + PAGE_SIZE, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); /* now, we just need to deal with the syscall return stuff. When the syscall * returns, it'll just jump into the entry point of the new process */ tm_thread_lower_flag(current_thread, THREAD_SCHEDULE); /* the kernel cares if it has executed something or not */ if(!(kernel_state_flags & KSF_HAVEEXECED)) set_ksf(KSF_HAVEEXECED); arch_loader_exec_initializer(argc, eip); if(EXEC_LOG == 2) printk(0, "[%d]: Performing call\n", current_process->pid); return 0; }
void main() { win7compat_init(); log_init(); fork_init(); /* fork_init() will directly jump to restored thread context if we are a fork child */ mm_init(); flags_init(); /* Parse command line */ const char *cmdline = GetCommandLineA(); int len = strlen(cmdline); if (len > BLOCK_SIZE) /* TODO: Test if there is sufficient space for argv[] array */ { init_subsystems(); kprintf("Command line too long.\n"); process_exit(1, 0); } startup = mm_mmap(NULL, BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, INTERNAL_MAP_TOPDOWN | INTERNAL_MAP_NORESET | INTERNAL_MAP_VIRTUALALLOC, NULL, 0); *(uintptr_t*) startup = 1; char *current_startup_base = startup + sizeof(uintptr_t); memcpy(current_startup_base, cmdline, len + 1); char *envbuf = (char *)ALIGN_TO(current_startup_base + len + 1, sizeof(void*)); char *env0 = envbuf; ENV("TERM=xterm"); char *env1 = envbuf; ENV("HOME=/root"); char *env2 = envbuf; ENV("DISPLAY=127.0.0.1:0"); char *env3 = envbuf; ENV("PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin:/sbin"); int argc = 0; char **argv = (char **)ALIGN_TO(envbuf, sizeof(void*)); /* Parse command line */ int in_quote = 0; char *j = current_startup_base; for (char *i = current_startup_base; i <= current_startup_base + len; i++) if (!in_quote && (*i == ' ' || *i == '\t' || *i == '\r' || *i == '\n' || *i == 0)) { *i = 0; if (i > j) argv[argc++] = j; j = i + 1; } else if (*i == '"') { *i = 0; if (in_quote) argv[argc++] = j; in_quote = !in_quote; j = i + 1; } argv[argc] = NULL; char **envp = argv + argc + 1; int env_size = 4; envp[0] = env0; envp[1] = env1; envp[2] = env2; envp[3] = env3; envp[4] = NULL; char *buffer_base = (char*)(envp + env_size + 1); const char *filename = NULL; int arg_start; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--session-id")) { if (++i < argc) { int len = strlen(argv[i]); if (len >= MAX_SESSION_ID_LEN) { init_subsystems(); kprintf("--session-id: Session ID too long.\n"); process_exit(1, 0); } for (int j = 0; j < len; j++) { char ch = argv[i][j]; if (!((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '_' || ch == '-')) { init_subsystems(); kprintf("--session-id: Invalid characters.\n"); process_exit(1, 0); } } strcpy(cmdline_flags->global_session_id, argv[i]); } else { init_subsystems(); kprintf("--session-id: No ID given.\n"); process_exit(1, 0); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { init_subsystems(); print_help(); process_exit(1, 0); } else if (!strcmp(argv[i], "--usage")) { init_subsystems(); print_usage(); process_exit(1, 0); } else if (!strcmp(argv[i], "--version") || !strcmp(argv[i], "-v")) { init_subsystems(); print_version(); process_exit(1, 0); } else if (!strcmp(argv[i], "--dbt-trace")) cmdline_flags->dbt_trace = true; else if (!strcmp(argv[i], "--dbt-trace-all")) { cmdline_flags->dbt_trace = true; cmdline_flags->dbt_trace_all = true; } else if (argv[i][0] == '-') { init_subsystems(); kprintf("Unrecognized option: %s\n", argv[i]); process_exit(1, 0); } else if (!filename) { filename = argv[i]; arg_start = i; break; } } init_subsystems(); if (filename) { install_syscall_handler(); int r = do_execve(filename, argc - arg_start, argv + arg_start, env_size, envp, buffer_base, NULL); if (r == -L_ENOENT) { kprintf("Executable not found."); process_exit(1, 0); } } print_usage(); process_exit(1, 0); }
static int load_elf(struct file *f, struct binfmt *binary) { struct elf_header *elf = binary->has_interpreter ? binary->interpreter : binary->executable; /* Load ELF header */ f->op_vtable->pread(f, &elf->eh, sizeof(Elf_Ehdr), 0); if (elf->eh.e_type != ET_EXEC && elf->eh.e_type != ET_DYN) { log_error("Only ET_EXEC and ET_DYN executables can be loaded."); return -L_EACCES; } #ifdef _WIN64 if (elf->eh.e_machine != EM_X86_64) { log_error("Not an x86_64 executable."); #else if (elf->eh.e_machine != EM_386) { log_error("Not an i386 executable."); #endif return -L_EACCES; } /* Load program header table */ size_t phsize = (size_t)elf->eh.e_phentsize * (size_t)elf->eh.e_phnum; char *pht = pht_storage; f->op_vtable->pread(f, pht, phsize, elf->eh.e_phoff); /* TODO */ /* Find virtual address range */ elf->low = 0xFFFFFFFF; elf->high = 0; for (int i = 0; i < elf->eh.e_phnum; i++) { Elf_Phdr *ph = (Elf_Phdr *)&pht[elf->eh.e_phentsize * i]; if (ph->p_type == PT_LOAD) { elf->low = min(elf->low, ph->p_vaddr); elf->high = max(elf->high, ph->p_vaddr + ph->p_memsz); log_info("PT_LOAD: vaddr %p, size %p", ph->p_vaddr, ph->p_memsz); } else if (ph->p_type == PT_DYNAMIC) log_info("PT_DYNAMIC: vaddr %p, size %p", ph->p_vaddr, ph->p_memsz); else if (ph->p_type == PT_PHDR) /* Patch phdr pointer in PT_PHDR, glibc uses it to determine load offset */ ph->p_vaddr = (size_t)pht; } /* Find virtual address range for ET_DYN executable */ elf->load_base = 0; if (elf->eh.e_type == ET_DYN) { size_t free_addr = mm_find_free_pages(elf->high - elf->low) * PAGE_SIZE; if (!free_addr) return -L_ENOMEM; elf->load_base = free_addr - elf->low; log_info("ET_DYN load offset: %p, real range [%p, %p)", elf->load_base, elf->load_base + elf->low, elf->load_base + elf->high); } #ifdef _WIN64 /* Unmap the pre-reserved executable region (see fork_init() for details) */ size_t region_start = 0x400000; VirtualFree(region_start, 0, MEM_RELEASE); /* This will silently fail if it's not the intended case */ #endif /* Map executable segments */ /* TODO: Directly use mmap() */ int load_base_set = 0; for (int i = 0; i < elf->eh.e_phnum; i++) { Elf_Phdr *ph = (Elf_Phdr *)&pht[elf->eh.e_phentsize * i]; if (ph->p_type == PT_LOAD) { size_t addr = ph->p_vaddr & 0xFFFFF000; size_t size = ph->p_memsz + (ph->p_vaddr & 0x00000FFF); off_t offset_pages = ph->p_offset / PAGE_SIZE; /* Note: In ET_DYN executables, all address are based upon elf->load_base. * But in ET_EXEC executables, all address are absolute. */ int prot = 0; if (ph->p_flags & PF_R) prot |= PROT_READ; if (ph->p_flags & PF_W) prot |= PROT_WRITE; if (ph->p_flags & PF_X) prot |= PROT_EXEC; if (elf->eh.e_type == ET_DYN) addr += elf->load_base; mm_mmap((void*)addr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED | MAP_POPULATE, 0, NULL, 0); char *vaddr = (char *)ph->p_vaddr; if (elf->eh.e_type == ET_DYN) vaddr += elf->load_base; mm_check_write(vaddr, ph->p_filesz); /* Populate the memory, otherwise pread() will fail */ f->op_vtable->pread(f, vaddr, ph->p_filesz, ph->p_offset); if (!binary->has_interpreter) /* This is not interpreter */ mm_update_brk((void*)(addr + size)); if (elf->eh.e_type == ET_EXEC && !load_base_set) { /* Record load base of first segment in ET_EXEC * load_base will be used in run() to calculate various auxiliary vector pointers */ load_base_set = 1; elf->load_base = addr; } } } /* Load interpreter if present */ for (int i = 0; i < elf->eh.e_phnum; i++) { Elf_Phdr *ph = (Elf_Phdr *)&pht[elf->eh.e_phentsize * i]; if (ph->p_type == PT_INTERP) { if (binary->has_interpreter) /* This is already an interpreter */ return -L_EACCES; /* Bad interpreter */ binary->has_interpreter = true; char path[MAX_PATH]; f->op_vtable->pread(f, path, ph->p_filesz, ph->p_offset); /* TODO */ path[ph->p_filesz] = 0; log_info("interpreter: %s", path); struct file *fi; int r = vfs_openat(AT_FDCWD, path, O_RDONLY, 0, 0, &fi); if (r < 0) return r; if (!winfs_is_winfile(fi)) { vfs_release(fi); return -L_EACCES; } r = load_elf(fi, binary); vfs_release(fi); if (r < 0) return -L_EACCES; /* Bad interpreter */ } } return 0; } #define MAX_SHEBANG_LINE 256 static int load_script(struct file *f, struct binfmt *binary) { /* Parse the shebang line */ int size = f->op_vtable->pread(f, binary->buffer_base, MAX_SHEBANG_LINE, 0); char *p = binary->buffer_base, *end = p + size; /* Skip shebang */ p += 2; /* Skip spaces */ while (p < end && *p == ' ') p++; if (p == end) return -L_EACCES; const char *executable = p; binary->argv0 = p; while (p < end && *p != ' ' && *p != '\n') p++; if (p == end) return -L_EACCES; if (*p == '\n') *p = 0; /* It has no argument */ else { *p++ = 0; while (p < end && *p == ' ') p++; if (p == end) return -L_EACCES; if (*p != '\n') { /* It has an argument */ binary->argv1 = p; while (p < end && *p != '\n') p++; if (p == end) return -L_EACCES; *p = 0; } } binary->replace_argv0 = TRUE; struct file *fe; int r = vfs_openat(AT_FDCWD, executable, O_RDONLY, 0, 0, &fe); if (r < 0) return r; if (!winfs_is_winfile(fe)) { vfs_release(fe); return -L_EACCES; } /* TODO: Recursive interpreters */ return load_elf(fe, binary); }
int Syscall::kcall_threadcreate(CoreData& core_data, Thread *currt) { /* * Create a new thread the POSIX way * * KPARM1 : attr * KPARM2 : start_routine * KPARM3 : arg * * retval : tid or -ERRNO */ //Thread *org_thread; pthread_attr_t attr; int err; mm_context *ctx; Process *pproc; // struct _Anvil_tls *ptls; kdebug("kcall_threadcreate %016lx\n", KPARM2); /* Validate the args */ if ((err = currt->kcopy_fromuser(&attr, (const char *)KPARM1, sizeof(pthread_attr_t))) != 0) { kdebug("kcall_threadcreate - bad attr structure\n"); return -err; } if (currt->ptr_check((void *)KPARM2) == -1) { kdebug("kcall_threadcreate - bad start routine\n"); return -EINVAL; } /* Handle the case of someone creating thread 1 in a new process */ if (attr.pid) { if ((pproc = proctable.getref(attr.pid)) == NULL) { return -EAGAIN; } } else { pproc = currt->get_proc(); pproc->incref(); } /* * FIXME: It should be possible to do this a different way * Get the cr3 of the process we'll be creating in */ ctx = pproc->ctx; /* * According to Posix the user may pass us both stacksize and stackaddr, * just stacksize or nothing. If the user passes nothing we use the * default (4 * __PAGESIZE). */ if (attr.stackaddr) { /* The user passed us a stack - make sure that he gave us the size * as well and check it */ if (!attr.stacksize) { return -EINVAL; } /* Check */ } if (!attr.stacksize) { attr.stacksize = 4 * __PAGESIZE; } void *stk; size_t stk_size; /* Now check whether we need to create a stack. */ if (attr.stackaddr) { kdebug("Passing in stack %016lx\n", attr.stackaddr); /* We already checked that the address and size are valid */ stk = (char *)attr.stackaddr; stk_size = attr.stacksize; } else { kdebug("Creating stack\n"); //ksched_block(THR_ST_CREATING); int err = mm_mmap(pproc->ctx, 0, attr.stacksize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_GUARD, -1, 0, &stk); stk_size = attr.stacksize; //kdebug("Created stack %d %d %p %ld\n", tnew->get_pid(), tnew->get_id(), tnew->stackaddr, tnew->stacksize); } /* Seems okay so let's create a new thread */ Thread *tnew = new Thread(KPARM2, KPARM3, stk, stk_size); if ((err = pproc->thread_add(tnew)) != 0) { kprintf("kcall_threadcreate (kobj_create) - err %d\n", -err); pproc->unref(); return -err; } /* Initialise the thread context */ if (attr.stopped) { tnew->set_state(THR_ST_STOPPED); } /* Some fields are inherited from the creating thread */ tnew->priority = currt->priority; tnew->m_thread_signal.set_blocked(currt->m_thread_signal.get_blocked()); tnew->max_addr = currt->max_addr; tnew->ctx = ctx; tnew->init_tls(attr.return_func); /* Unblock the new thread - sets the state to THR_ST_READY */ if (tnew->get_state() == THR_ST_READY) { sys.m_sched.add(tnew, 0); } tnew->activate(); //kobj_unlock(KOBJ(tnew)); kdebug("finished STARTING1 %d %d thread at %016lx\n", tnew->get_pid(), tnew->get_id(), tnew->reg.rip); //pproc->unref(); return tnew->get_id(); }
int kthread_init() { Process *proc1; Thread *tnew; int err; int i; //struct _Anvil_tls *ptls; //kobj_thread_type = kobj_register_type(sizeof(kobj_thread_t), (void (*)(void *))thread_delete_callback); /* All these threads are going to be part of process 1 */ proc1 = proctable.getref(1); kdebug("proc1=%016lx pid=%d\n", proc1, proc1->get_id()); /* Create an idle thread for each core in ring 1 */ for (i=0; i<ksysinfo->num_cores; ++i) { /* TODO: allocate a proper stack */ void *stackaddr = kmm_page_alloc();//mm_mmap(k_ctx, 0, IDLESTACK_SIZE, //PROT_READ | PROT_WRITE, MAP_ANON | MAP_GUARD, -1, 0, NULL); memset(tnew->stackaddr, 0, IDLESTACK_SIZE); /* Seems okay so let's create a new thread */ tnew = new Thread((uint64_t)idle, (uint64_t)i, stackaddr, IDLESTACK_SIZE); // if ((err = proc1->thread_add(tnew)) != 0) // { // kprintf("kcall_threadcreate (kobj_create) - err %d\n", -err); // return -err; // } /* Initialise the thread context */ /* Now check whether we need to create a stack */ tnew->reg.cs = RING1_CS; tnew->reg.oldss = RING1_SS; tnew->set_state(THR_ST_RUNNING); tnew->priority = 0; tnew->max_addr = (char *)KERN_TOP; tnew->ctx = proc1->ctx; kdebug("pid=%d tid=%d\n", tnew->get_pid(), tnew->get_id()); sys.core_data[i].curr_thread = tnew; tnew->affinity = i; tnew->activate(); } void *stk_addr; err = mm_mmap(proc1->ctx, 0, 4 * __PAGESIZE, PROT_READ|PROT_WRITE, MAP_ANON|MAP_GUARD, -1, 0, &stk_addr); /* Create the main thread in the init - runs in ring 3 */ tnew = new Thread(ksysinfo->init_program_info.ventry, 0, stk_addr, 4 * __PAGESIZE); if ((err = proc1->thread_add(tnew)) != 0) { kprintf("kcall_threadcreate (kobj_create) - err %d\n", -err); return -err; } tnew->ctx = proc1->ctx; mm_ctx_set(tnew->ctx); memset(tnew->stackaddr, 0, 4 * __PAGESIZE); tnew->max_addr = (char *)proc1->ctx->hi_addr; /* We've got a stack so put the TLS stuff on it */ // mm_ctx_set(tnew->ctx); // ptls = (struct _Anvil_tls *)tnew->reg.oldrsp; // ptls->return_func = NULL; // // ptls->id = tnew->get_id() | ((tnew->get_pid() & 0x7fff) << 15); // kdebug("ID=%d id=%d pid=%d tnew=%016lx\n", ptls->id, tnew->get_id(), tnew->get_pid(), tnew); // ptls->stackaddr = tnew->stackaddr; // ptls->stacksize = tnew->stacksize; // ptls->pself = ptls; // tnew->pptls = &ptls->pself; proc1->unref(); tnew->init_tls(NULL); /* 24 bytes for argc, argv and envp */ tnew->reg.oldrsp -= 24; sys.m_sched.add(tnew, 0); tnew->activate(); return 0; }
void main() { log_init(); fork_init(); /* fork_init() will directly jump to restored thread context if we are a fork child */ mm_init(); install_syscall_handler(); heap_init(); signal_init(); process_init(); tls_init(); vfs_init(); dbt_init(); /* Parse command line */ const char *cmdline = GetCommandLineA(); int len = strlen(cmdline); if (len > BLOCK_SIZE) /* TODO: Test if there is sufficient space for argv[] array */ { kprintf("Command line too long.\n"); process_exit(1, 0); } startup = mm_mmap(NULL, BLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, INTERNAL_MAP_TOPDOWN | INTERNAL_MAP_NORESET, NULL, 0); *(uintptr_t*) startup = 1; char *current_startup_base = startup + sizeof(uintptr_t); memcpy(current_startup_base, cmdline, len + 1); char *envbuf = (char *)ALIGN_TO(current_startup_base + len + 1, sizeof(void*)); char *env0 = envbuf; ENV("TERM=xterm"); char *env1 = envbuf; ENV("HOME=/root"); char *env2 = envbuf; ENV("DISPLAY=127.0.0.1:0"); char *env3 = envbuf; ENV("PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin:/sbin"); int argc = 0; char **argv = (char **)ALIGN_TO(envbuf, sizeof(void*)); /* Parse command line */ int in_quote = 0; char *j = current_startup_base; for (char *i = current_startup_base; i <= current_startup_base + len; i++) if (!in_quote && (*i == ' ' || *i == '\t' || *i == '\r' || *i == '\n' || *i == 0)) { *i = 0; if (i > j) argv[argc++] = j; j = i + 1; } else if (*i == '"') { *i = 0; if (in_quote) argv[argc++] = j; in_quote = !in_quote; j = i + 1; } argv[argc] = NULL; char **envp = argv + argc + 1; int env_size = 4; envp[0] = env0; envp[1] = env1; envp[2] = env2; envp[3] = env3; envp[4] = NULL; char *buffer_base = (char*)(envp + env_size + 1); const char *filename = NULL; for (int i = 1; i < argc; i++) { if (argv[i][0] == '-') { } else if (!filename) filename = argv[i]; } if (filename) do_execve(filename, argc - 1, argv + 1, env_size, envp, buffer_base, NULL); kprintf("Usage: flinux <executable> [arguments]\n"); process_exit(1, 0); }