static int set_ahci_memory(uint64_t ahci_start_addr, uint64_t ahci_end_addr) { struct kernel_mm_struct *mm = get_kernel_mm(); if(-1 == add_vma(&(mm->mmap), ahci_start_addr, ahci_end_addr, PAGE_TRANS_READ_WRITE | PAGE_TRANS_NX, 0)) { return -1; } mm->start_ahci_mem = ahci_start_addr; mm->end_ahci_mem = ahci_end_addr; return 0; }
/** * Create vma for kernel virtual memory * @param kern_start starting virtual address of kernel * @param kern_end ending virtual address of kernel * @return OK or ERROR */ static int set_kernel_memory(uint64_t kern_start, uint64_t kern_end) { struct kernel_mm_struct *mm = get_kernel_mm(); // TODO: Replace with error code if(-1 == add_vma(&(mm->mmap), kern_start, kern_end, PAGE_TRANS_READ_WRITE, 0)) { return -1; } mm->start_kernel = kern_start; mm->end_kernel = kern_end; // TODO: Replace with error code return 0; }
void load_elf_args(Task *tsk, int argc, char *argv[], char *envp[]) { if (tsk->mm->start_stack == 0) panic("Task not set up\n"); char tmp_c; pml4_t *kern_cr3; pml4_t *task_cr3; kern_cr3 = get_cr3(); task_cr3 = (pml4_t*)tsk->registers.cr3; set_cr3(task_cr3); uint64_t *new_stack = (uint64_t*)((tsk->mm->start_stack + PAGE_SIZE) & PG_ALIGN); if (kmalloc_vma(task_cr3, (uint64_t)new_stack, 1, USER_SETTINGS) == NULL) { panic("malloc failed\n"); return; } set_cr3(kern_cr3); struct vm_area_struct *vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) new_stack; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); new_stack = (uint64_t*)tsk->mm->start_stack; *new_stack = argc; new_stack++; //tsk->args.argv = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.argv = (uint64_t)kmalloc_vma(task_cr3, (tsk->mm->start_stack + (4*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_argv = (char*)tsk->args.argv; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_argv; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); for (int i = 0; i < argc; i++, new_stack++) { *new_stack = (uint64_t)tsk_argv; set_cr3(kern_cr3); for (int j = 0; *(argv[i]+j) != '\0'; j++, tsk_argv++) { //if(get_pte((pml4_t*)tsk->registers.cr3, (uint64_t)tsk_argv)) panic("VERY BAD!!!\n"); //printk("char: %c\n", *(argv[i]+j)); //*tsk_argv = *(argv[i] + j); set_cr3(kern_cr3); tmp_c = *(argv[i] + j); set_cr3(task_cr3); *tsk_argv = tmp_c; set_cr3(kern_cr3); //printk("%c\n", *(argv[i]+j)); //if(i == 1 && j == 3)halt(); } set_cr3(task_cr3); *tsk_argv = '\0'; tsk_argv++; } *new_stack= 0; new_stack++; //tsk->args.envp = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.envp = (uint64_t)kmalloc_vma((pml4_t*)tsk->registers.cr3, (tsk->mm->start_stack + (5*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_env = (char*)tsk->args.envp; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_env; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); //printk("adder: %p\n", new_stack); set_cr3(kern_cr3); for (int i = 0; envp[i] != NULL; i++, new_stack++) { set_cr3(task_cr3); *new_stack = (uint64_t)tsk_env; set_cr3(kern_cr3); for (int j = 0; *(envp[i]+j) != '\0'; j++, tsk_env++) { set_cr3(kern_cr3); tmp_c = *(envp[i] + j); set_cr3(task_cr3); *tsk_env = tmp_c; set_cr3(kern_cr3); } set_cr3(task_cr3); *tsk_env = '\0'; tsk_env++; set_cr3(kern_cr3); } set_cr3(task_cr3); *new_stack= 0; new_stack++; set_cr3(kern_cr3); }
struct mm_struct* load_elf(char *data, int len, Task *task, pml4_t *proc_pml4) { if (validate_header(data)) { pml4_t *kern_pml4 = get_cr3(); //get the header Elf64_Ehdr *hdr = (Elf64_Ehdr*)data; //create new mm_struct struct mm_struct *mm = (struct mm_struct*)kmalloc_kern(PAGE_SIZE); memset(mm, 0, sizeof(struct mm_struct)); if(hdr->e_shstrndx == 0x00) panic("NO STRING TABLE"); mm->start_code = ((Elf64_Ehdr*) data)->e_entry; Elf64_Phdr *prgm_hdr = (Elf64_Phdr*)(data + hdr->e_phoff); uint64_t high_addr = 0; for(int i = 0; i < hdr->e_phnum; prgm_hdr++, i++) { //printk("--------------LOAD-ELF-----------------\n"); if (prgm_hdr->p_type == PT_LOAD && prgm_hdr->p_filesz > 0) { if (prgm_hdr->p_filesz > prgm_hdr->p_memsz) { panic("Bad Elf!!!\n"); halt(); } struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); if(kmalloc_vma(proc_pml4, prgm_hdr->p_vaddr, prgm_hdr->p_memsz, USER_SETTINGS) == NULL) { panic("KMALLOC FAILED - elf.c:load_elf:34\n"); printk("SIZE: %d\n", prgm_hdr->p_filesz); } // printk("ELF Virtual memory address: %p\n", prgm_hdr->p_vaddr); set_cr3(proc_pml4); memset((void*)prgm_hdr->p_vaddr, 0, prgm_hdr->p_memsz); //printk("memcpy dest: %p src: %p size: %p\n", prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); //memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_memsz); set_cr3(kern_pml4); vma->vm_start = prgm_hdr->p_vaddr; vma->vm_end = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_memsz); vma->vm_prot = prgm_hdr->p_flags; add_vma(mm, vma); if(vma->next != NULL) { panic("not null\n"); halt(); } if(vma->vm_end > high_addr) high_addr = vma->vm_end; if (prgm_hdr->p_vaddr == mm->start_code) { // its the txt section mm->end_code = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_filesz); mm->start_data = mm->end_code +1; } } } high_addr += PAGE_SIZE; high_addr &= PG_ALIGN; mm->brk = high_addr; mm->start_brk = mm->brk; struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_start = mm->start_brk; vma->vm_end = mm->brk; vma->vm_prot = 0; add_vma(mm, vma); return mm; } else { return NULL; } }