/* Create a new thread */ x86Thread_t *_ThreadInit(Addr_t EntryPoint) { x86Thread_t *t; Cpu_t Cpu; /* Get cpu */ Cpu = ApicGetCpu(); /* Allocate a new thread structure */ t = (x86Thread_t*)kmalloc(sizeof(x86Thread_t)); /* Setup */ t->Context = ContextCreate((Addr_t)EntryPoint); t->UserContext = NULL; t->Flags = 0; /* FPU */ t->FpuBuffer = (Addr_t*)kmalloc_a(0x1000); /* Memset the buffer */ memset(t->FpuBuffer, 0, 0x1000); /* Done */ return t; }
/* Setup a new task, including the necessary paging context. * However, mapping the program itself into the context is * UP TO YOU as the scheduler has no clue about how long * your program is. */ task_t *scheduler_newTask(void *entry, task_t *parent, char name[SCHEDULER_MAXNAME], char** environ, char** argv, int argc) { task_t* thisTask = (task_t*)kmalloc(sizeof(task_t)); void* stack = kmalloc_a(STACKSIZE); memset(stack, 0, STACKSIZE); thisTask->state = stack + STACKSIZE - sizeof(cpu_state_t) - 3; thisTask->memory_context = setupMemoryContext(stack); thisTask->memory_context = vmem_kernelContext; // Stack thisTask->state->esp = stack + STACKSIZE - 3; thisTask->state->ebp = thisTask->state->esp; *(thisTask->state->ebp + 1) = (intptr_t)scheduler_terminateCurrentTask; *(thisTask->state->ebp + 2) = NULL; // base pointer // Instruction pointer (= start of the program) thisTask->state->eip = entry; thisTask->state->eflags = 0x200; thisTask->state->cs = 0x08; thisTask->state->ds = 0x10; thisTask->state->ss = 0x10; thisTask->pid = ++highestPid; strcpy(thisTask->name, name); thisTask->parent = parent; thisTask->task_state = TASK_STATE_RUNNING; thisTask->environ = environ; thisTask->argv = argv; thisTask->argc = argc; return thisTask; }
//map pages for bss segment pointed to by shdr //stores program break (end of .bss segment) in prog_break //stored start of .bss segment in bss_loc static void alloc_bss(page_directory_t* new_dir, elf_s_header* shdr, int* prog_break, int* bss_loc) { printf("ELF .bss mapped @ %x - %x\n", shdr->addr, shdr->addr + shdr->size); for (uint32_t i = 0; i <= shdr->size + PAGE_SIZE; i += PAGE_SIZE) { page_t* page = get_page(shdr->addr + i, 1, new_dir); if (!alloc_frame(page, 0, 1)) { printf_err(".bss %x wasn't alloc'd", shdr->addr + i); } char* pagebuf = kmalloc_a(PAGE_SIZE); //zero out .bss memset(pagebuf, 0, PAGE_SIZE); page_t* local_page = get_page((uint32_t)pagebuf, 1, page_dir_current()); ASSERT(local_page, "elf_load_segment couldn't find page for pagebuf"); extern void copy_page_physical(uint32_t page, uint32_t dest); copy_page_physical(local_page->frame * PAGE_SIZE, page->frame * PAGE_SIZE); //now that the buffer has been copied, we can safely free the buffer kfree(pagebuf); } //set program break to .bss segment *prog_break = shdr->addr + shdr->size; *bss_loc = shdr->addr; }
bool elf_load_segment(page_directory_t* new_dir, unsigned char* src, elf_phdr* seg) { //loadable? if (seg->type != PT_LOAD) { printf_err("Tried to load non-loadable segment"); printk_err("Tried to load non-loadable segment"); return false; } unsigned char* src_base = src + seg->offset; //figure out range to map this binary to in virtual memory uint32_t dest_base = seg->vaddr; uint32_t dest_limit = dest_base + seg->memsz; printf("dest_base %x dest_limit %x\n", dest_base, dest_limit); //alloc enough mem for new task for (uint32_t i = dest_base, page_counter = 0; i <= dest_limit; i += PAGE_SIZE, page_counter++) { page_t* page = get_page(i, 1, new_dir); ASSERT(page, "elf_load_segment couldn't get page in new addrspace at %x\n", i); bool got_frame = alloc_frame(page, 0, 0); ASSERT(got_frame, "elf_load_segment couldn't alloc frame for page %x\n", i); char* pagebuf = kmalloc_a(PAGE_SIZE); page_t* local_page = get_page((uint32_t)pagebuf, 0, page_dir_current()); ASSERT(local_page, "couldn't get local_page!"); int old_frame = local_page->frame; local_page->frame = page->frame; invlpg(pagebuf); //create buffer in current address space, //copy data, //and then map frame into new address space memset(pagebuf, 0, (dest_limit - dest_base)); //only seg->filesz bytes are garuanteed to be in the file! //_not_ memsz //any extra bytes between filesz and memsz should be set to 0, which is done above //memcpy(dest_base, src_base, seg->filesz); memcpy(pagebuf, src_base + (page_counter * PAGE_SIZE), seg->filesz); //now that we've copied the data in the local address space, //get the page in local address space, //and copy backing physical frame data to physical frame of //page in new address space //now that the buffer has been copied, we can safely free the buffer local_page->frame = old_frame; invlpg(pagebuf); kfree(pagebuf); } // Copy data //memset((void*)dest_base, 0, (void*)(dest_limit - dest_base)); return true; }
void init_kheap(){ end = (unsigned int) &end; placement_address = end; kheap = (heap_header_t*) fmalloc(KHEAP_SIZE); init_heap(kheap, KHEAP_SIZE); //Make user heap, then map to its uheap = (heap_header_t*) kmalloc_a(UHEAP_SIZE); init_heap(uheap, UHEAP_SIZE); vpage_map_user(root_vpage_dir, (uint) &uheap, (uint) &uheap); }
void init_paging() { size_t sz; uint32_t i; uint32_t mem_end_page; DPRINTK("paging...\t\t"); mem_end_page = 0x1000000; nframes = mem_end_page / PAGE_SIZ; sz = INDEX_FROM_BIT(nframes); frames = (uint32_t *)kmalloc(sz); memset(frames, 0, sz); kernel_directory = (struct page_directory *) kmalloc_a(sizeof(struct page_directory)); memset(kernel_directory, 0, sizeof(struct page_directory)); // don't do this... current_directory = kernel_directory; // do this instead... kernel_directory->physical_addr = (uint32_t)kernel_directory->tables_physical; for (i = KHEAP_START; i < KHEAP_START + KHEAP_INITIAL_SIZE; i += PAGE_SIZ) get_page(i, 1, kernel_directory); i = 0; while (i < placement_addr + PAGE_SIZ) { alloc_frame(get_page(i, 1, kernel_directory), 0, 0); i += PAGE_SIZ; } for (i = KHEAP_START; i < KHEAP_START + KHEAP_INITIAL_SIZE; i += PAGE_SIZ) alloc_frame(get_page(i, 1, kernel_directory), 0, 0); // register_interrupt_handler(14, page_fault); switch_page_directory(kernel_directory); enable_paging(); kheap = create_heap(KHEAP_START, KHEAP_START + KHEAP_INITIAL_SIZE, 0xCFFFF000, 0, 0); current_directory = clone_directory(kernel_directory); switch_page_directory(current_directory); DPRINTK("done!\n"); }
virtix_proc_t* flat_load_bin(void* addr){ cli(); virtix_proc_t* proc = mk_empty_proc(); proc->registers.useresp = 0; proc->registers.eip = 0xF0000000; proc->name = "FLAT_BIN"; unsigned int mem = (unsigned int) kmalloc_a(PAGE_S); vpage_map_user(proc->cr3, mem, 0xF0000000); memcpy((void*) mem, (void*) addr, 1024); sti(); return proc; }
int sys_brk(struct syscall syscall) { if (vmem_currentContext == vmem_kernelContext) { if (syscall.params[0] == 0) return alignedMemoryPosition(); size_t allocationSize = syscall.params[0] - alignedMemoryPosition(); kmalloc_a(allocationSize); return (int)alignedMemoryPosition(); } return -1; }
/* Initialises AP task */ x86Thread_t *_ThreadInitAp(void) { x86Thread_t *Init; /* Setup initial thread */ Init = (x86Thread_t*)kmalloc(sizeof(x86Thread_t)); Init->FpuBuffer = kmalloc_a(0x1000); Init->Flags = X86_THREAD_FPU_INITIALISED | X86_THREAD_USEDFPU; Init->Context = NULL; Init->UserContext = NULL; /* Memset the buffer */ memset(Init->FpuBuffer, 0, 0x1000); /* Done */ return Init; }
/* Initialization * Creates the main thread */ x86Thread_t *_ThreadInitBoot(void) { x86Thread_t *Init; /* Setup initial thread */ Init = (x86Thread_t*)kmalloc(sizeof(x86Thread_t)); Init->FpuBuffer = kmalloc_a(0x1000); Init->Flags = X86_THREAD_FPU_INITIALISED | X86_THREAD_USEDFPU; Init->Context = NULL; Init->UserContext = NULL; /* Memset the buffer */ memset(Init->FpuBuffer, 0, 0x1000); /* Install Yield */ InterruptInstallIdtOnly(0xFFFFFFFF, INTERRUPT_YIELD, ThreadingYield, NULL); /* Done */ return Init; }
void init_tasking() { u8 *sp; tsk1 = (task_t*)kmalloc(sizeof *tsk1); tsk2 = (task_t*)kmalloc(sizeof *tsk2); kprint("ESP: "); kprint_hexnl(read_esp()); sp = (u8*)(kmalloc_a(STACK_SIZE) + STACK_SIZE); sp -= sizeof *tsk2->context; tsk2->context = (context_t *)sp; memset(tsk2->context, 0, sizeof *tsk2->context); tsk2->context->eip = (u32)task2; tsk1->context = 0; while (1) { kprint("I AM TASK 1\r"); swtch(&tsk1->context, tsk2->context); } }
int x86ThreadArchInit(struct x86ArchThread *thread, void *entry, unsigned long *argv, unsigned char priv) { unsigned long *stack; int argc = CountArguments((char **)argv); argv[argc] = 0; // Set last argument + 1 to NULL if(argc < 0) argc = 0; /** @note Following structures are allocated in process' parent: - The process struct; - The process queue. */ thread->kernel_stack_base = (unsigned long)kmalloc_a(KERNEL_STACK_SIZE); if(thread->kernel_stack_base == 0) return -ENOMEM; memset((void *)thread->kernel_stack_base, 0, KERNEL_STACK_SIZE); thread->kernel_stack = thread->kernel_stack_base + KERNEL_STACK_SIZE; // Create kernel stack if(priv == PRIV_USER) { // Apenas aloca memória de user se for preciso thread->stack_base = (unsigned long)umalloc_a(USER_STACK_SIZE); if(thread->stack_base == 0) { kfree((void *)thread->kernel_stack_base); return -ENOMEM; } memset((void *)thread->stack_base, 0, USER_STACK_SIZE); thread->stack = thread->stack_base + USER_STACK_SIZE; // Allocate 4 kilobytes of space for user } else { thread->stack_base = thread->kernel_stack_base; thread->stack = thread->kernel_stack; } stack = (unsigned long*)thread->stack; // Expand down stack // We expect all threads to have this layout: // ThreadMain(int argc, char **argv); // The kernel will not crash if the thread is different (i guess) *--stack = (unsigned long)argv; // Argv *--stack = argc; // Argc *--stack = (unsigned long)&x86ThreadExitEntry; // Pushed by iret if(priv == PRIV_USER) { // User threads have special stack *--stack = 0x23; // SS *--stack = thread->stack - 12; // ESP } *--stack = 0x202; // EFLAGS *--stack = ((priv == PRIV_USER) ? (0x1b) : 0x08); // CS *--stack = (unsigned long)entry; // EIP // Pushed by pusha *--stack = 0; // EDI *--stack = 0; // ESI *--stack = 0; // EBP *--stack = 0; // NULL *--stack = 0; // EBX *--stack = 0; // EDX *--stack = 0; // ECX *--stack = 0; // EAX // Pushed by asm handler *--stack = ((priv == PRIV_USER) ? (0x23) : 0x10); // DS *--stack = ((priv == PRIV_USER) ? (0x23) : 0x10); // ES *--stack = ((priv == PRIV_USER) ? (0x23) : 0x10); // FS *--stack = ((priv == PRIV_USER) ? (0x23) : 0x10); // GS thread->stack = (unsigned long)stack; return ESUCCESS; }
/* Normal malloc() */ void* kmalloc(size_t size) { return kmalloc_a(size, NODE_T_SIZE); }
/* Page aligned */ void* kmalloc_pa(size_t size) { return kmalloc_a(size, PAGE_SIZE); }
/* * Initialises paging and sets up page tables. */ void paging_init() { kheap = NULL; unsigned int i = 0; // Hopefully the bootloader got the correct himem size uint32_t mem_end_page = sys_multiboot_info->mem_upper * 1024; nframes = mem_end_page / 0x1000; pages_total = nframes; frames = (uint32_t *) kmalloc(INDEX_FROM_BIT(nframes)); memclr(frames, INDEX_FROM_BIT(nframes)); // Allocate mem for a page directory. kernel_directory = (page_directory_t *) kmalloc_a(sizeof(page_directory_t)); ASSERT(kernel_directory != NULL); memclr(kernel_directory, sizeof(page_directory_t)); current_directory = kernel_directory; // Map some pages in the kernel heap area. // Here we call get_page but not alloc_frame. This causes page_table_t's // to be created where necessary. We can't allocate frames yet because they // they need to be identity mapped first below, and yet we can't increase // placement_address between identity mapping and enabling the heap for(i = KHEAP_START; i < KHEAP_START+KHEAP_INITIAL_SIZE; i += 0x1000) { page_t* page = paging_get_page(i, true, kernel_directory); memclr(page, sizeof(page_t)); page->rw = 1; page->user = 0; } // This step serves to map the kernel itself // We don't allocate frames here, as that's done below. for(i = 0xC0000000; i < 0xC7FFF000; i += 0x1000) { page_t* page = paging_get_page(i, true, kernel_directory); memclr(page, sizeof(page_t)); page->present = 1; page->rw = 1; page->user = 0; page->frame = ((i & 0x0FFFF000) >> 12); } // Allocate enough memory past the kernel heap so we can use the 'smart' allocator. // Note that this actually performs identity mapping. i = 0x00000000; while(i < (kheap_placement_address & 0x0FFFFFFF) + 0x1000) { alloc_frame(paging_get_page(i, true, kernel_directory), true, true); if(i < (uint32_t) &__kern_size) { pages_wired++; } i += 0x1000; } // Allocate kernel heap pages. for(i = KHEAP_START; i < KHEAP_START+KHEAP_INITIAL_SIZE; i += 0x1000) { alloc_frame(paging_get_page(i, false, kernel_directory), true, true); } // Set page fault handler // sys_set_idt_gate(14, (uint32_t) isr14, 0x08, 0x8E); // Convert kernel directory address to physical and save it kern_dir_phys = (uint32_t) &kernel_directory->tablesPhysical; kern_dir_phys -= 0xC0000000; kernel_directory->physicalAddr = kern_dir_phys; // Enable paging paging_switch_directory(kernel_directory); // Initialise a kernel heap kheap = create_heap(KHEAP_START, KHEAP_START+KHEAP_INITIAL_SIZE, 0xCFFFF000, true, true); }