void boot_64(uint32_t magic, uint32_t multiboot) { // checking multiboot magic variable. VGA_AT(0,0) = VGA_ENTRY('H', WHITE_ON_BLACK); if (magic != MULTIBOOT_BOOTLOADER_MAGIC) { VGA_AT(0,1) = VGA_ENTRY('!', WHITE_ON_BLACK); kernel_panic(); } // copying multiboot data. VGA_AT(0,1) = VGA_ENTRY('E', WHITE_ON_BLACK); multiboot_info_t *info = (multiboot_info_t *)kernel_p2v((uintptr_t)multiboot); switch (multiboot_copy(info)) { case MULTIBOOT_TOO_MANY_MEMORY_MAPS: VGA_AT(0,2) = VGA_ENTRY('?', WHITE_ON_BLACK); kernel_panic(); case MULTIBOOT_TOO_MANY_MODULES: VGA_AT(0,2) = VGA_ENTRY('?', WHITE_ON_BLACK); kernel_panic(); case MULTIBOOT_TOO_MANY_ELF_HEADERS: VGA_AT(0,2) = VGA_ENTRY('.', WHITE_ON_BLACK); kernel_panic(); } // we no longer need the original memory map at zero address. VGA_AT(0,2) = VGA_ENTRY('L', WHITE_ON_BLACK); page_set_pdpt(0, 0, 0); page_invalidate_all(); // initialize page allocators. VGA_AT(0,3) = VGA_ENTRY('L', WHITE_ON_BLACK); lomem_init(); himem_init(); // initialize CPUs. VGA_AT(0,4) = VGA_ENTRY('B', WHITE_ON_BLACK); cpu_enable_features(); cpu_init(); service_init(); gdt_init(); // adds per-cpu TSS into GDT. tss_update(); // initialize interrupt handling. VGA_AT(0,5) = VGA_ENTRY('E', WHITE_ON_BLACK); pic_init(); idt_init(); // kick-start the core service. VGA_AT(0,6) = VGA_ENTRY('N', WHITE_ON_BLACK); kernel_start_core(); // start scheduler and do stuff. VGA_AT(0,7) = VGA_ENTRY('D', WHITE_ON_BLACK); kernel_main(); }
void kernel_start_core() { // create the core service. if (multiboot_data.coresrv_module == -1) { log_error("kernel", "start_core", "Coresrv module not found"); } module_t *core_mod = multiboot_data.modules + multiboot_data.coresrv_module; Elf32_Ehdr *elf = (Elf32_Ehdr*)kernel_p2v(core_mod->mod_start); // each ELF header may end up as a memory block. // we also need one for initrd, stack, and VGA each. struct process_descriptor* pd = process_alloc_descriptor(elf->e_phnum + 3); pd->vmem_base = 0; pd->vmem_size = USERMODE_SIZE; service_relocate(pd); pd->entry_point = pd->vmem_base + (uintptr_t)elf->e_entry; // create memory map description of the binary. uintptr_t vmem_top = 0; struct process_memory *pm_local = 0; // thread local section. for (unsigned i = 0; i < elf->e_phnum; ++i) { Elf32_Phdr *prog_header = (Elf32_Phdr*) ((uintptr_t)elf + elf->e_phoff + i * elf->e_phentsize); if (prog_header->p_type != PT_LOAD) continue; struct process_memory *pm = pd->memory_maps + (pd->n_maps++); // check flags: pm->flags = 0; if ((prog_header->p_flags & PF_X) == 0) pm->flags |= PAGE_NOEXECUTE; if ((prog_header->p_flags & PF_W) != 0) pm->flags |= PAGE_WRITEABLE; // virtual address range where pages will be moved into: pm->v_base = pd->vmem_base + (uintptr_t)prog_header->p_vaddr; pm->v_size = (size_t)prog_header->p_memsz; if (pm->v_base % TABLE_SIZE) { // each block must be at table boundary! log_error("kernel", "start_core", "Unaligned ELF segment"); } // actual memory address range to move: pm->m_base = kernel_p2v(core_mod->mod_start + prog_header->p_offset); pm->m_size = (size_t)prog_header->p_filesz; // thread local section requires special care: if (pm->v_base == pd->vmem_base + THREAD_LOCAL_BASE) { pm->flags = 0; pm_local = pm; } else { uintptr_t v_top = pm->v_base + pm->v_size; if (v_top > vmem_top) vmem_top = v_top; } } // add a stack. uintptr_t stack = page_clear(lomem_alloc_4k()); size_t stack_size = PAGE_SIZE; uintptr_t stack_top = stack + stack_size; if (!stack) log_error("kernel", "start_core", "Out of memory"); { struct process_memory *pm = pd->memory_maps + (pd->n_maps++); pm->flags = PAGE_WRITEABLE | PAGE_NOEXECUTE; pm->m_base = kernel_p2v(stack); pm->m_size = -(intptr_t)stack_size; pm->v_base = page_table_round_up(vmem_top); pm->v_size = USER_STACK_SIZE; uintptr_t v_top = pm->v_base + pm->v_size; if (v_top > vmem_top) vmem_top = v_top; pd->stack_bottom = pm->v_base; pd->stack_top = pm->v_base + pm->v_size; } // top of stack is used for libc & coresrv initialization data. pd->stack_reserved = sizeof(struct libc_init) + sizeof(struct coresrv_init); if (pd->stack_reserved > stack_size) { log_error("kernel", "start_core", "Stack too small"); } struct coresrv_init *coresrv = (struct coresrv_init *)kernel_p2v(stack_top - sizeof(struct coresrv_init)); struct libc_init *libc = (struct libc_init *)kernel_p2v(stack_top - pd->stack_reserved); libc->data = (void*)(pd->stack_top - sizeof(struct coresrv_init)); pd->libc = libc; // special thread locals init memory. if (pm_local) { pm_local->v_base = page_table_round_up(vmem_top); uintptr_t v_top = pm_local->v_base + pm_local->v_size; if (v_top > vmem_top) vmem_top = v_top; libc->threadlocal_init = (void*)pm_local->v_base; libc->threadlocal_size = pm_local->v_size; } // add VGA. { struct process_memory *pm = pd->memory_maps + (pd->n_maps++); pm->flags = PAGE_WRITETHROUGH | PAGE_WRITEABLE | PAGE_NOEXECUTE; pm->m_base = vga.fb; pm->m_size = vga.fb_size; pm->v_base = page_table_round_up(vmem_top); pm->v_size = pm->m_size; uintptr_t v_top = pm->v_base + pm->v_size; if (v_top > vmem_top) vmem_top = v_top; coresrv->vga_base = (void*)pm->v_base; coresrv->vga_size = pm->v_size; } // add the initrd, if available. if (multiboot_data.initrd_module != -1) { module_t *initrd_mod = multiboot_data.modules + multiboot_data.initrd_module; struct process_memory *pm = pd->memory_maps + (pd->n_maps++); pm->flags = PAGE_WRITEABLE | PAGE_NOEXECUTE; pm->m_base = kernel_p2v(initrd_mod->mod_start); pm->m_size = initrd_mod->mod_end - initrd_mod->mod_start; pm->v_base = page_table_round_up(vmem_top); pm->v_size = pm->m_size; uintptr_t v_top = pm->v_base + pm->v_size; if (v_top > vmem_top) vmem_top = v_top; coresrv->initrd_base = (void*)pm->v_base; coresrv->initrd_size = pm->v_size; } // make it happen! struct process *core = process_create(pd); (void)core; }