void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[mm]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler (14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); /* hey, look at that, we have happy memory times! */ mm_reclaim_init(); for(size_t i=0;i<=(sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1);i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[mm]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); #if CONFIG_MODULES loader_add_kernel_symbol(slab_kmalloc); loader_add_kernel_symbol(slab_kfree); loader_add_kernel_symbol(mm_virtual_map); loader_add_kernel_symbol(mm_virtual_getmap); loader_add_kernel_symbol(mm_allocate_dma_buffer); loader_add_kernel_symbol(mm_free_dma_buffer); loader_add_kernel_symbol(mm_physical_allocate); loader_add_kernel_symbol(mm_physical_deallocate); #endif }
// TODO: Kernel init functions, kernel_early_init stub is here, kernel_init is just a declaration to shut the linker up. void kernel_early_init(struct multiboot *mboot_header, addr_t initial_stack) { // This is fun, but as long as grub is a bitch, i have a problem. // Make sure interrupts are disabled. //asm volatile("cli"); // Store passed values, and set up the early system. kernel_state_flags = 0; set_ksf(KSF_BOOTING); mtboot = mboot_header; initial_boot_stack = initial_stack; loader_parse_kernel_elf(mboot_header, &kernel_sections); #if _DBOS_KERNEL_LOADER_MODULES loader_init_kernel_symbols(); #endif serial_init(); cpu_early_init(); #if _DBOS_KERNEL_LOADER_MODULES loader_init_modules(); #endif syscall_init(); cpu_timer_install(1000); cpu_processor_init_1(); printk(8, "[KERNEL]: Parsed kernel elf, end of stub function. If you see this, then it is working.\n"); printk(1, "[KERNEL]: Starting system management.\n"); mm_init(mtboot); tm_init_multitasking(); dm_init(); fs_init(); }
static void process_memorymap(struct multiboot *mboot) { addr_t i = mboot->mmap_addr; unsigned long num_pages = 0; unsigned long unusable = 0; uint64_t j = 0; uint64_t address; uint64_t length; uint64_t highest_page = 0; uint64_t lowest_page = ~0; int found_contiguous = 0; pm_location = ((((addr_t)&kernel_end - MEMMAP_KERNEL_START) & ~(PAGE_SIZE - 1)) + PAGE_SIZE + 0x100000); while ((pm_location >= initrd_start_page && pm_location <= initrd_end_page)) pm_location += PAGE_SIZE; while (i < (mboot->mmap_addr + mboot->mmap_length)) { mmap_entry_t *me = (mmap_entry_t *)(i + MEMMAP_KERNEL_START); address = ((uint64_t)me->base_addr_high << 32) | (uint64_t)me->base_addr_low; length = (((uint64_t)me->length_high << 32) | me->length_low); if (me->type == 1 && length > 0) { for (j = address; j < (address + length); j += PAGE_SIZE) { addr_t page; // 32 bit can only handle addresses up to 0xFFFFFFFF, above this == ignore. page = j; if (__is_actually_free(page)) { if (lowest_page > page) lowest_page = page; if (page > highest_page) highest_page = page; num_pages++; mm_physical_deallocate(page); } } } i += me->size + sizeof(uint32_t); } printk(1, "[MM]: Highest page = %x, Lowest page = %x, num_pages = %d\n", highest_page, lowest_page, num_pages); if (!j) PANIC(PANIC_MEM | PANIC_NOSYNC, "Memory map corrupted!", EFAULT); int gbs = 0; int mbs = ((num_pages * PAGE_SIZE)/1024)/1024; if (mbs < 4) { printk(KERN_PANIC, "%d MB, %d pages", mbs, num_pages); PANIC(PANIC_MEM | PANIC_NOSYNC, "Not enough memory, system wont work!", ENOMEM); } gbs = mbs/1024; if (gbs > 0) { printk(KERN_MILE, "%d GB", gbs); mbs = mbs % 1024; } printk(KERN_MILE, "%d MB available memory (page size = %d KB, kmalloc=slab: ok)\n", mbs, PAGE_SIZE/1024); printk(KERN_DEBUG, "[MM]: num pages = %d\n", num_pages); pm_num_pages = num_pages; maximum_page_number = highest_page / PAGE_SIZE; set_ksf(KSF_MEMMAPPED); }
void arch_cpu_processor_init_2(void) { acpi_init(); x86_hpet_init(); #if _DBOS_KERNEL_HAVE_CPU_SMP probe_smp(); init_lapic(); calibrate_lapic_timer(1000); init_ioapic(); set_ksf(KSF_SMP_ENABLE); #endif }
void mm_init(struct multiboot *m) { printk(KERN_DEBUG, "[MM]: Setting up Memory Management...\n"); arch_mm_virtual_init(&kernel_context); cpu_interrupt_register_handler(14, &arch_mm_page_fault_handle); pmm_buddy_init(); process_memorymap(m); slab_init(MEMMAP_KMALLOC_START, MEMMAP_KMALLOC_END); set_ksf(KSF_MMU); // Memory init, check! mm_reclaim_init(); for(size_t i = 0; i <= (sizeof(struct pagedata) * maximum_page_number) / mm_page_size(1); i++) { mm_virtual_map(MEMMAP_FRAMECOUNT_START + i * mm_page_size(1), mm_physical_allocate(mm_page_size(1), true), PAGE_PRESENT | PAGE_WRITE, mm_page_size(1)); } frames = (struct pagedata *)(MEMMAP_FRAMECOUNT_START); printk(0, "[MM]: allocated %d KB for page-frame counting.\n", sizeof(struct pagedata) * maximum_page_number / 1024); }
int probe_smp() { unsigned long long lapic_msr = read_msr(0x1b); write_msr(0x1b, (lapic_msr&0xFFFFF000) | 0x800, 0); //set global enable bit for lapic unsigned mem_lower = ((CMOS_READ_BYTE(CMOS_BASE_MEMORY+1) << 8) | CMOS_READ_BYTE(CMOS_BASE_MEMORY)) << 10; int res=0; if(mem_lower < 512*1024 || mem_lower > 640*1024) return 0; if((unsigned)EBDA_SEG_ADDR > mem_lower - 1024 || (unsigned)EBDA_SEG_ADDR + *((unsigned char *)EBDA_SEG_ADDR) * 1024 > mem_lower) res=imps_scan_mptables(mem_lower - 1024, 1024); else res=imps_scan_mptables(EBDA_SEG_ADDR, 1024); if(!res) res=imps_scan_mptables(0xF0000, 0x10000); if(!res) return 0; set_ksf(KSF_CPUS_RUNNING); printk(5, "[cpu]: CPU%s initialized (boot=%d, #APs=%d: ok) \n", num_cpus > 1 ? "s" : "", primary_cpu->apicid, num_booted_cpus); return num_booted_cpus > 0; }
int do_exec(char *path, char **argv, char **env, int shebanged /* oh my */) { unsigned int i=0; addr_t end, eip; unsigned int argc=0, envc=0; char **backup_argv=0, **backup_env=0; /* Sanity */ if(!path || !*path) return -EINVAL; /* Load the file, and make sure that it is valid and accessible */ if(EXEC_LOG == 2) printk(0, "[%d]: Checking executable file (%s)\n", current_process->pid, path); struct file *efil; int err_open; efil = fs_file_open(path, _FREAD, 0, &err_open); if(!efil) return err_open; /* are we allowed to execute it? */ if(!vfs_inode_check_permissions(efil->inode, MAY_EXEC, 0)) { file_put(efil); return -EACCES; } /* is it a valid elf? */ int header_size = 0; #if CONFIG_ARCH == TYPE_ARCH_X86_64 header_size = sizeof(elf64_header_t); #elif CONFIG_ARCH == TYPE_ARCH_X86 header_size = sizeof(elf32_header_t); #endif /* read in the ELF header, and check if it's a shebang */ if(header_size < 2) header_size = 2; unsigned char mem[header_size]; fs_file_pread(efil, 0, mem, header_size); if(__is_shebang(mem)) return loader_do_shebang(efil, argv, env); int other_bitsize=0; if(!is_valid_elf(mem, 2) && !other_bitsize) { file_put(efil); return -ENOEXEC; } if(EXEC_LOG == 2) printk(0, "[%d]: Copy data\n", current_process->pid); /* okay, lets back up argv and env so that we can * clear out the address space and not lose data... * If this call if coming from a shebang, then we don't check the pointers, * since they won't be from userspace */ size_t total_args_len = 0; if((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, argv, 0)) && argv) { while((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, argv[argc], 0)) && argv[argc] && *argv[argc]) argc++; backup_argv = (char **)kmalloc(sizeof(addr_t) * argc); for(i=0;i<argc;i++) { backup_argv[i] = (char *)kmalloc(strlen(argv[i]) + 1); _strcpy(backup_argv[i], argv[i]); total_args_len += strlen(argv[i])+1 + sizeof(char *); } } if((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, env, 0)) && env) { while((shebanged || mm_is_valid_user_pointer(SYS_EXECVE, env[envc], 0)) && env[envc] && *env[envc]) envc++; backup_env = (char **)kmalloc(sizeof(addr_t) * envc); for(i=0;i<envc;i++) { backup_env[i] = (char *)kmalloc(strlen(env[i]) + 1); _strcpy(backup_env[i], env[i]); total_args_len += strlen(env[i])+1 + sizeof(char *); } } total_args_len += 2 * sizeof(char *); /* and the path too! */ char *path_backup = (char *)kmalloc(strlen(path) + 1); _strcpy((char *)path_backup, path); path = path_backup; /* Preexec - This is the point of no return. Here we close out unneeded * file descs, free up the page directory and clear up the resources * of the task */ if(EXEC_LOG) printk(0, "Executing (p%dt%d, cpu %d, tty %d): %s\n", current_process->pid, current_thread->tid, current_thread->cpu->knum, current_process->pty ? current_process->pty->num : 0, path); preexec(); /* load in the new image */ strncpy((char *)current_process->command, path, 128); if(!loader_parse_elf_executable(mem, efil, &eip, &end)) eip=0; /* do setuid and setgid */ if(efil->inode->mode & S_ISUID) { current_process->effective_uid = efil->inode->uid; } if(efil->inode->mode & S_ISGID) { current_process->effective_gid = efil->inode->gid; } /* we don't need the file anymore, close it out */ file_put(efil); file_close_cloexec(); if(!eip) { printk(5, "[exec]: Tried to execute an invalid ELF file!\n"); free_dp(backup_argv, argc); free_dp(backup_env, envc); kfree(path); tm_thread_exit(0); } if(EXEC_LOG == 2) printk(0, "[%d]: Updating task values\n", current_process->pid); /* Setup the task with the proper values (libc malloc stack) */ addr_t end_l = end; end = ((end-1)&PAGE_MASK) + PAGE_SIZE; total_args_len += PAGE_SIZE; /* now we need to copy back the args and env into userspace * writeable memory...yippie. */ addr_t args_start = end + PAGE_SIZE; addr_t env_start = args_start; addr_t alen = 0; mm_mmap(end, total_args_len, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); if(backup_argv) { memcpy((void *)args_start, backup_argv, sizeof(addr_t) * argc); alen += sizeof(addr_t) * argc; *(addr_t *)(args_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); argv = (char **)args_start; for(i=0;i<argc;i++) { char *old = argv[i]; char *new = (char *)(args_start+alen); unsigned len = strlen(old) + 4; argv[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_argv); } env_start = args_start + alen; alen = 0; if(backup_env) { memcpy((void *)env_start, backup_env, sizeof(addr_t) * envc); alen += sizeof(addr_t) * envc; *(addr_t *)(env_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); env = (char **)env_start; for(i=0;i<envc;i++) { char *old = env[i]; char *new = (char *)(env_start+alen); unsigned len = strlen(old) + 1; env[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_env); } end = (env_start + alen) & PAGE_MASK; current_process->env = env; current_process->argv = argv; kfree(path); /* set the heap locations, and map in the start */ current_process->heap_start = current_process->heap_end = end + PAGE_SIZE*2; addr_t ret = mm_mmap(end + PAGE_SIZE, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0, 0); /* now, we just need to deal with the syscall return stuff. When the syscall * returns, it'll just jump into the entry point of the new process */ tm_thread_lower_flag(current_thread, THREAD_SCHEDULE); /* the kernel cares if it has executed something or not */ if(!(kernel_state_flags & KSF_HAVEEXECED)) set_ksf(KSF_HAVEEXECED); arch_loader_exec_initializer(argc, eip); if(EXEC_LOG == 2) printk(0, "[%d]: Performing call\n", current_process->pid); return 0; }
int do_exec(task_t *t, char *path, char **argv, char **env) { unsigned int i=0; addr_t end, eip; unsigned int argc=0, envc=0; int desc; char **backup_argv=0, **backup_env=0; /* Sanity */ if(!t) panic(PANIC_NOSYNC, "Tried to execute with empty task"); if(t == kernel_task) panic(0, "Kernel is being executed at the gallows!"); if(t != current_task) panic(0, "I don't know, was really drunk at the time"); if(t->magic != TASK_MAGIC) panic(0, "Invalid task in exec (%d)", t->pid); if(!path || !*path) return -EINVAL; /* Load the file, and make sure that it is valid and accessable */ if(EXEC_LOG == 2) printk(0, "[%d]: Checking executable file (%s)\n", t->pid, path); struct file *efil; int err_open, num; efil=d_sys_open(path, O_RDONLY, 0, &err_open, &num); if(efil) desc = num; else desc = err_open; if(desc < 0 || !efil) return -ENOENT; if(!permissions(efil->inode, MAY_EXEC)) { sys_close(desc); return -EACCES; } /* Detirmine if the file is a valid ELF */ int header_size = 0; #if CONFIG_ARCH == TYPE_ARCH_X86_64 header_size = sizeof(elf64_header_t); #elif CONFIG_ARCH == TYPE_ARCH_X86 header_size = sizeof(elf32_header_t); #endif char mem[header_size]; read_data(desc, mem, 0, header_size); int other_bitsize=0; #if CONFIG_ARCH == TYPE_ARCH_X86_64 if(is_valid_elf32_otherarch(mem, 2)) other_bitsize = 1; #endif if(!is_valid_elf(mem, 2) && !other_bitsize) { sys_close(desc); return -ENOEXEC; } if(EXEC_LOG == 2) printk(0, "[%d]: Copy data\n", t->pid); /* okay, lets back up argv and env so that we can * clear out the address space and not lose data..*/ if(__is_valid_user_ptr(SYS_EXECVE, argv, 0)) { while(__is_valid_user_ptr(SYS_EXECVE, argv[argc], 0) && *argv[argc]) argc++; backup_argv = (char **)kmalloc(sizeof(addr_t) * argc); for(i=0;i<argc;i++) { backup_argv[i] = (char *)kmalloc(strlen(argv[i]) + 1); _strcpy(backup_argv[i], argv[i]); } } if(__is_valid_user_ptr(SYS_EXECVE, env, 0)) { while(__is_valid_user_ptr(SYS_EXECVE, env[envc], 0) && *env[envc]) envc++; backup_env = (char **)kmalloc(sizeof(addr_t) * envc); for(i=0;i<envc;i++) { backup_env[i] = (char *)kmalloc(strlen(env[i]) + 1); _strcpy(backup_env[i], env[i]); } } /* and the path too! */ char *path_backup = (char *)kmalloc(strlen(path) + 1); _strcpy((char *)path_backup, path); path = path_backup; if(pd_cur_data->count > 1) printk(0, "[exec]: Not sure what to do here...\n"); /* Preexec - This is the point of no return. Here we close out unneeded * file descs, free up the page directory and clear up the resources * of the task */ if(EXEC_LOG) printk(0, "Executing (task %d, cpu %d, tty %d, cwd=%s): %s\n", t->pid, ((cpu_t *)t->cpu)->apicid, t->tty, current_task->thread->pwd->name, path); preexec(t, desc); strncpy((char *)t->command, path, 128); if(other_bitsize) { #if CONFIG_ARCH == TYPE_ARCH_X86_64 if(!process_elf_other(mem, desc, &eip, &end)) eip=0; #endif } else if(!process_elf(mem, desc, &eip, &end)) eip=0; sys_close(desc); if(!eip) { printk(5, "[exec]: Tried to execute an invalid ELF file!\n"); free_dp(backup_argv, argc); free_dp(backup_env, envc); #if DEBUG panic(0, ""); #endif exit(0); } if(EXEC_LOG == 2) printk(0, "[%d]: Updating task values\n", t->pid); /* Setup the task with the proper values (libc malloc stack) */ addr_t end_l = end; end = (end&PAGE_MASK); user_map_if_not_mapped_noclear(end); /* now we need to copy back the args and env into userspace * writeable memory...yippie. */ addr_t args_start = end + PAGE_SIZE; addr_t env_start = args_start; addr_t alen = 0; if(backup_argv) { for(i=0;i<(sizeof(addr_t) * (argc+1))/PAGE_SIZE + 2;i++) user_map_if_not_mapped_noclear(args_start + i * PAGE_SIZE); memcpy((void *)args_start, backup_argv, sizeof(addr_t) * argc); alen += sizeof(addr_t) * argc; *(addr_t *)(args_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); argv = (char **)args_start; for(i=0;i<argc;i++) { char *old = argv[i]; char *new = (char *)(args_start+alen); user_map_if_not_mapped_noclear((addr_t)new); unsigned len = strlen(old) + 4; user_map_if_not_mapped_noclear((addr_t)new + len + 1); argv[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_argv); } env_start = args_start + alen; alen = 0; if(backup_env) { for(i=0;i<(((sizeof(addr_t) * (envc+1))/PAGE_SIZE) + 2);i++) user_map_if_not_mapped_noclear(env_start + i * PAGE_SIZE); memcpy((void *)env_start, backup_env, sizeof(addr_t) * envc); alen += sizeof(addr_t) * envc; *(addr_t *)(env_start + alen) = 0; /* set last argument value to zero */ alen += sizeof(addr_t); env = (char **)env_start; for(i=0;i<envc;i++) { char *old = env[i]; char *new = (char *)(env_start+alen); user_map_if_not_mapped_noclear((addr_t)new); unsigned len = strlen(old) + 1; user_map_if_not_mapped_noclear((addr_t)new + len + 1); env[i] = new; _strcpy(new, old); kfree(old); alen += len; } kfree(backup_env); } end = (env_start + alen) & PAGE_MASK; t->env = env; t->argv = argv; kfree(path); t->heap_start = t->heap_end = end + PAGE_SIZE; if(other_bitsize) raise_task_flag(t, TF_OTHERBS); user_map_if_not_mapped_noclear(t->heap_start); /* Zero the heap and stack */ memset((void *)end_l, 0, PAGE_SIZE-(end_l%PAGE_SIZE)); memset((void *)(end+PAGE_SIZE), 0, PAGE_SIZE); memset((void *)(STACK_LOCATION - STACK_SIZE), 0, STACK_SIZE); /* Release everything */ if(EXEC_LOG == 2) printk(0, "[%d]: Performing call\n", t->pid); set_int(0); lower_task_flag(t, TF_SCHED); if(!(kernel_state_flags & KSF_HAVEEXECED)) set_ksf(KSF_HAVEEXECED); arch_specific_exec_initializer(t, argc, eip); return 0; }
static void process_memorymap(struct multiboot *mboot) { addr_t i = mboot->mmap_addr; unsigned long num_pages=0, unusable=0; uint64_t j=0, address, length, highest_page = 0, lowest_page = ~0; int found_contiguous=0; pm_location = ((((addr_t)&kernel_end - MEMMAP_KERNEL_START) & ~(PAGE_SIZE-1)) + PAGE_SIZE + 0x100000 /* HACK */); while((pm_location >= initrd_start_page && pm_location <= initrd_end_page)) pm_location += PAGE_SIZE; while(i < (mboot->mmap_addr + mboot->mmap_length)){ mmap_entry_t *me = (mmap_entry_t *)(i + MEMMAP_KERNEL_START); address = ((uint64_t)me->base_addr_high << 32) | (uint64_t)me->base_addr_low; length = (((uint64_t)me->length_high<<32) | me->length_low); if(me->type == 1 && length > 0) { for (j=address; j < (address+length); j += PAGE_SIZE) { addr_t page; #if ADDR_BITS == 32 /* 32-bit can only handle the lower 32 bits of the address. If we're * considering an address above 0xFFFFFFFF, we have to ignore it */ page = (addr_t)(j & 0xFFFFFFFF); if((j >> 32) != 0) break; #else page = j; #endif if(__is_actually_free(page)) { if(lowest_page > page) lowest_page=page; if(page > highest_page) highest_page=page; num_pages++; mm_physical_deallocate(page); } } } i += me->size + sizeof (uint32_t); } printk(1, "[mm]: Highest page = %x, Lowest page = %x, num_pages = %d \n", highest_page, lowest_page, num_pages); if(!j) panic(PANIC_MEM | PANIC_NOSYNC, "Memory map corrupted"); int gbs=0; int mbs = ((num_pages * PAGE_SIZE)/1024)/1024; if(mbs < 4){ panic(PANIC_MEM | PANIC_NOSYNC, "Not enough memory, system wont work (%d MB, %d pages)", mbs, num_pages); } gbs = mbs/1024; if(gbs > 0) { printk(KERN_MILE, "%d GB and ", gbs); mbs = mbs % 1024; } printk(KERN_MILE, "%d MB available memory (page size=%d KB, kmalloc=slab: ok)\n" , mbs, PAGE_SIZE/1024); printk(1, "[mm]: num pages = %d\n", num_pages); pm_num_pages=num_pages; maximum_page_number = highest_page / PAGE_SIZE; set_ksf(KSF_MEMMAPPED); }
void tm_init_multitasking(void) { printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n"); sysgate_page = mm_physical_allocate(PAGE_SIZE, true); mm_physical_memcpy((void *)sysgate_page, (void *)signal_return_injector, MEMMAP_SYSGATE_ADDRESS_SIZE, PHYS_MEMCPY_MODE_DEST); process_table = hash_create(0, 0, 128); process_list = linkedlist_create(0, LINKEDLIST_MUTEX); mutex_create(&process_refs_lock, 0); mutex_create(&thread_refs_lock, 0); thread_table = hash_create(0, 0, 128); struct thread *thread = kmalloc(sizeof(struct thread)); struct process *proc = kernel_process = kmalloc(sizeof(struct process)); proc->refs = 2; thread->refs = 1; hash_insert(process_table, &proc->pid, sizeof(proc->pid), &proc->hash_elem, proc); hash_insert(thread_table, &thread->tid, sizeof(thread->tid), &thread->hash_elem, thread); linkedlist_insert(process_list, &proc->listnode, proc); valloc_create(&proc->mmf_valloc, MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0); linkedlist_create(&proc->threadlist, 0); mutex_create(&proc->map_lock, 0); mutex_create(&proc->stacks_lock, 0); mutex_create(&proc->fdlock, 0); hash_create(&proc->files, HASH_LOCKLESS, 64); proc->magic = PROCESS_MAGIC; blocklist_create(&proc->waitlist, 0, "process-waitlist"); mutex_create(&proc->fdlock, 0); memcpy(&proc->vmm_context, &kernel_context, sizeof(kernel_context)); thread->process = proc; /* we have to do this early, so that the vmm system can use the lock... */ thread->state = THREADSTATE_RUNNING; thread->magic = THREAD_MAGIC; workqueue_create(&thread->resume_work, 0); thread->kernel_stack = (addr_t)&initial_kernel_stack; spinlock_create(&thread->status_lock); primary_cpu->active_queue = tqueue_create(0, 0); primary_cpu->idle_thread = thread; primary_cpu->numtasks=1; ticker_create(&primary_cpu->ticker, 0); workqueue_create(&primary_cpu->work, 0); tm_thread_add_to_process(thread, proc); tm_thread_add_to_cpu(thread, primary_cpu); atomic_fetch_add_explicit(&running_processes, 1, memory_order_relaxed); atomic_fetch_add_explicit(&running_threads, 1, memory_order_relaxed); set_ksf(KSF_THREADING); *(struct thread **)(thread->kernel_stack) = thread; primary_cpu->flags |= CPU_RUNNING; #if CONFIG_MODULES loader_add_kernel_symbol(tm_thread_delay_sleep); loader_add_kernel_symbol(tm_thread_delay); loader_add_kernel_symbol(tm_timing_get_microseconds); loader_add_kernel_symbol(tm_thread_set_state); loader_add_kernel_symbol(tm_thread_exit); loader_add_kernel_symbol(tm_thread_poke); loader_add_kernel_symbol(tm_thread_block); loader_add_kernel_symbol(tm_thread_got_signal); loader_add_kernel_symbol(tm_thread_unblock); loader_add_kernel_symbol(tm_blocklist_wakeall); loader_add_kernel_symbol(kthread_create); loader_add_kernel_symbol(kthread_wait); loader_add_kernel_symbol(kthread_join); loader_add_kernel_symbol(kthread_kill); loader_add_kernel_symbol(tm_schedule); loader_add_kernel_symbol(arch_tm_get_current_thread); #endif }