/* * Init the VM code. */ void oskit_uvm_redzone_init(void) { oskit_addr_t addr; /* * We use a task gate to catch page faults, since a stack overflow * will try and dump more stuff on the stack. This is the easiest * way to deal with it. */ if ((addr = (oskit_addr_t) lmm_alloc_aligned(&malloc_lmm, STACKSIZE, 0, 12, 0)) == 0) panic(__FUNCTION__": Could not allocate stack\n"); task_tss.ss0 = KERNEL_DS; task_tss.esp0 = addr + STACKSIZE - sizeof(double); task_tss.esp = task_tss.esp0; task_tss.ss = KERNEL_DS; task_tss.ds = KERNEL_DS; task_tss.es = KERNEL_DS; task_tss.fs = KERNEL_DS; task_tss.gs = KERNEL_DS; task_tss.cs = KERNEL_CS; task_tss.io_bit_map_offset = sizeof(task_tss); task_tss.eip = (int) double_fault_handler; /* Make sure the task is started with interrupts disabled */ osenv_intr_disable(); task_tss.eflags = (int) get_eflags(); osenv_intr_enable(); /* Both TSSs has to know about the page tables */ task_tss.cr3 = get_cr3(); base_tss.cr3 = get_cr3(); /* Initialize the base TSS descriptor. */ fill_descriptor(&base_gdt[KERNEL_TRAP_TSS / 8], kvtolin(&task_tss), sizeof(task_tss) - 1, ACC_PL_K|ACC_TSS|ACC_P, 0); /* * NOTE: The task switch will include an extra word on the stack, * pushed by the CPU. The handler will need to be in assembly code * if we care about that value. As it is, the handler routine * stack is going to be slightly messed up, but since the handler * calls panic, it is not a problem right now. */ fill_gate(&base_idt[T_DOUBLE_FAULT], 0, KERNEL_TRAP_TSS, ACC_TASK_GATE|ACC_P|ACC_PL_K, 0); base_idt_load(); base_gdt_load(); }
ac_bool test_crs(void) { ac_bool error = AC_FALSE; union cr0_u cr0 = { .raw = get_cr0() }; // cr1 is reserved ac_uint cr2 = get_cr2(); union cr3_u cr3 = { .raw = get_cr3() }; union cr4_u cr4 = { .raw = get_cr4() }; ac_uint cr8 = get_cr8(); print_cr0("cr0", cr0.raw); ac_printf("cr2: 0x%p\n", cr2); print_cr3("cr3", cr3.raw); print_cr4("cr4", cr4.raw); ac_printf("cr8: 0x%p\n", cr8); set_cr0(cr0.raw); // cr2 is read only set_cr3(cr3.raw); set_cr4(cr4.raw); set_cr8(cr8); ac_uint cr0_1 = get_cr0(); ac_uint cr3_1 = get_cr3(); ac_uint cr4_1 = get_cr4(); ac_uint cr8_1 = get_cr8(); error |= AC_TEST(cr0.raw == cr0_1); error |= AC_TEST(cr3.raw == cr3_1); error |= AC_TEST(cr4.raw == cr4_1); error |= AC_TEST(cr8 == cr8_1); return error; }
int init_module(void) { unsigned int cr3_phys, cr3_virt; unsigned int retval, i; unsigned int *ptr; cr3_phys = get_cr3(); printk(KERN_ALERT "cr3 phys = 0x%x\n", cr3_phys); cr3_virt = (unsigned int)phys_to_virt(cr3_phys); printk(KERN_ALERT "cr3 virt = 0x%x\n", cr3_virt); retval = cache_trick(cr3_virt, cr3_phys); // Just a loop to access some random addresses ptr = (unsigned int *)0xc0004000; // virtual address of 0x4000 for (i = 0; i < 1000; i++) { *ptr = i; printk(KERN_ALERT "%u\n", *ptr); ptr++; } // If 0xaabbccdd is printed here, then only_nem() executed until the end printk(KERN_ALERT "retval = %x\n", retval); return 0; }
void set_task_int_gate (void (*handler)(void), int int_no, int dpl, tss_386_t *tss, int gdt_index) { tss_descriptor_t *gdt; gate_t *idt_ptr; /* bail if bad gdt index */ if ((gdt_index < GDT_INT_START_INDEX) || (gdt_index > GDT_SIZE-1)) panic ("set_task_int_gate: GDT index out of bounds!"); /* bail if bad int_no */ if ((int_no < 0) || (int_no > MAX_INTS)) panic ("set_task_int_gate: INT no out of bounds!"); gdt = (tss_descriptor_t *) &_gdt; /* Setup the descriptor */ gdt[gdt_index].g_limit_high = 0; gdt[gdt_index].limit_low = sizeof (*tss) - 1; gdt[gdt_index].p_dpl_type = D_TSS | D_PRESENT; gdt[gdt_index].base_low = (short int) ((long) tss & 0xffff); gdt[gdt_index].base_mid = (unsigned char) (((long) tss) >> 16) & 0xff; gdt[gdt_index].base_high = (unsigned char) (((long) tss) >> 24) & 0xff; /* Setup task gate */ idt_ptr = &idt[int_no]; idt_ptr->p_dpl_type = D_PRESENT | D_TASK_TYPE | dpl; idt_ptr->offset_low = 0; idt_ptr->offset_high = 0; /* make selector of index (always in GDT) */ idt_ptr->selector = (gdt_index << 3); idt_ptr->pad = 0; /* setup TSS */ tss->ds = DS_SELECTOR; tss->es = DS_SELECTOR; tss->fs = DS_SELECTOR; tss->gs = DS_SELECTOR; tss->cs = CS_SELECTOR; tss->ss = DS_SELECTOR; tss->esp = (long) int_stacktop; /* same address space for int-handlers */ tss->cr3 = get_cr3 (); tss->eflags = 2; tss->eip = (long) handler; tss->trace = 0; tss->backlink = TSS_SELECTOR; }
static void panic_32(__unused int code, __unused int pc, __unused const char *msg, boolean_t do_mca_dump, boolean_t do_bt) { struct i386_tss *my_ktss = current_ktss(); /* Set postcode (DEBUG only) */ postcode(pc); /* * Issue an I/O port read if one has been requested - this is an * event logic analyzers can use as a trigger point. */ panic_io_port_read(); /* * Break kprintf lock in case of recursion, * and record originally faulted instruction address. */ kprintf_break_lock(); if (do_mca_dump) { #if CONFIG_MCA /* * Dump the contents of the machine check MSRs (if any). */ mca_dump(); #endif } #if MACH_KDP /* * Print backtrace leading to first fault: */ if (do_bt) panic_i386_backtrace((void *) my_ktss->ebp, 10, NULL, FALSE, NULL); #endif panic("%s at 0x%08x, code:0x%x, " "registers:\n" "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n" "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n" "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n" "EFL: 0x%08x, EIP: 0x%08x%s\n", msg, my_ktss->eip, code, (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(), my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx, my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi, my_ktss->eflags, my_ktss->eip, virtualized ? " VMM" : ""); }
void init_switching (void) { short tss_selector = TSS_SELECTOR; gdt = (tss_descriptor_t *) &_gdt; gdt[TSS_INDEX].g_limit_high = 0; gdt[TSS_INDEX].limit_low = sizeof (tss0) - 1; gdt[TSS_INDEX].p_dpl_type = D_TSS | D_PRESENT; /* setup an initial tss */ gdt[TSS_INDEX].base_low = (short int) &tss0 & 0xffff; gdt[TSS_INDEX].base_mid = (unsigned char) (((long) &tss0) >> 16) & 0xff; gdt[TSS_INDEX].base_high = (unsigned char) (((long) &tss0) >> 24) & 0xff; tss0.backlink = TSS_SELECTOR; tss0.cr3 = get_cr3(); /* load task register */ asm ("ltr %0": :"r" (tss_selector)); }
void load_elf_args(Task *tsk, int argc, char *argv[], char *envp[]) { if (tsk->mm->start_stack == 0) panic("Task not set up\n"); char tmp_c; pml4_t *kern_cr3; pml4_t *task_cr3; kern_cr3 = get_cr3(); task_cr3 = (pml4_t*)tsk->registers.cr3; set_cr3(task_cr3); uint64_t *new_stack = (uint64_t*)((tsk->mm->start_stack + PAGE_SIZE) & PG_ALIGN); if (kmalloc_vma(task_cr3, (uint64_t)new_stack, 1, USER_SETTINGS) == NULL) { panic("malloc failed\n"); return; } set_cr3(kern_cr3); struct vm_area_struct *vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) new_stack; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); new_stack = (uint64_t*)tsk->mm->start_stack; *new_stack = argc; new_stack++; //tsk->args.argv = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.argv = (uint64_t)kmalloc_vma(task_cr3, (tsk->mm->start_stack + (4*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_argv = (char*)tsk->args.argv; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_argv; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); for (int i = 0; i < argc; i++, new_stack++) { *new_stack = (uint64_t)tsk_argv; set_cr3(kern_cr3); for (int j = 0; *(argv[i]+j) != '\0'; j++, tsk_argv++) { //if(get_pte((pml4_t*)tsk->registers.cr3, (uint64_t)tsk_argv)) panic("VERY BAD!!!\n"); //printk("char: %c\n", *(argv[i]+j)); //*tsk_argv = *(argv[i] + j); set_cr3(kern_cr3); tmp_c = *(argv[i] + j); set_cr3(task_cr3); *tsk_argv = tmp_c; set_cr3(kern_cr3); //printk("%c\n", *(argv[i]+j)); //if(i == 1 && j == 3)halt(); } set_cr3(task_cr3); *tsk_argv = '\0'; tsk_argv++; } *new_stack= 0; new_stack++; //tsk->args.envp = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.envp = (uint64_t)kmalloc_vma((pml4_t*)tsk->registers.cr3, (tsk->mm->start_stack + (5*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_env = (char*)tsk->args.envp; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_env; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); //printk("adder: %p\n", new_stack); set_cr3(kern_cr3); for (int i = 0; envp[i] != NULL; i++, new_stack++) { set_cr3(task_cr3); *new_stack = (uint64_t)tsk_env; set_cr3(kern_cr3); for (int j = 0; *(envp[i]+j) != '\0'; j++, tsk_env++) { set_cr3(kern_cr3); tmp_c = *(envp[i] + j); set_cr3(task_cr3); *tsk_env = tmp_c; set_cr3(kern_cr3); } set_cr3(task_cr3); *tsk_env = '\0'; tsk_env++; set_cr3(kern_cr3); } set_cr3(task_cr3); *new_stack= 0; new_stack++; set_cr3(kern_cr3); }
struct mm_struct* load_elf(char *data, int len, Task *task, pml4_t *proc_pml4) { if (validate_header(data)) { pml4_t *kern_pml4 = get_cr3(); //get the header Elf64_Ehdr *hdr = (Elf64_Ehdr*)data; //create new mm_struct struct mm_struct *mm = (struct mm_struct*)kmalloc_kern(PAGE_SIZE); memset(mm, 0, sizeof(struct mm_struct)); if(hdr->e_shstrndx == 0x00) panic("NO STRING TABLE"); mm->start_code = ((Elf64_Ehdr*) data)->e_entry; Elf64_Phdr *prgm_hdr = (Elf64_Phdr*)(data + hdr->e_phoff); uint64_t high_addr = 0; for(int i = 0; i < hdr->e_phnum; prgm_hdr++, i++) { //printk("--------------LOAD-ELF-----------------\n"); if (prgm_hdr->p_type == PT_LOAD && prgm_hdr->p_filesz > 0) { if (prgm_hdr->p_filesz > prgm_hdr->p_memsz) { panic("Bad Elf!!!\n"); halt(); } struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); if(kmalloc_vma(proc_pml4, prgm_hdr->p_vaddr, prgm_hdr->p_memsz, USER_SETTINGS) == NULL) { panic("KMALLOC FAILED - elf.c:load_elf:34\n"); printk("SIZE: %d\n", prgm_hdr->p_filesz); } // printk("ELF Virtual memory address: %p\n", prgm_hdr->p_vaddr); set_cr3(proc_pml4); memset((void*)prgm_hdr->p_vaddr, 0, prgm_hdr->p_memsz); //printk("memcpy dest: %p src: %p size: %p\n", prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); //memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_memsz); set_cr3(kern_pml4); vma->vm_start = prgm_hdr->p_vaddr; vma->vm_end = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_memsz); vma->vm_prot = prgm_hdr->p_flags; add_vma(mm, vma); if(vma->next != NULL) { panic("not null\n"); halt(); } if(vma->vm_end > high_addr) high_addr = vma->vm_end; if (prgm_hdr->p_vaddr == mm->start_code) { // its the txt section mm->end_code = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_filesz); mm->start_data = mm->end_code +1; } } } high_addr += PAGE_SIZE; high_addr &= PG_ALIGN; mm->brk = high_addr; mm->start_brk = mm->brk; struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_start = mm->start_brk; vma->vm_end = mm->brk; vma->vm_prot = 0; add_vma(mm, vma); return mm; } else { return NULL; } }
// ---------------------------------------------------------------------------- // Name: KeHandleInterrupt // // Desc: Handles an exception. // ---------------------------------------------------------------------------- VOID KeHandleInterrupt( KINTERRUPT_CONTEXT* ctx ) { ULONG cr3; cli; get_cr3( cr3 ); switch( ctx->num ) { case EXCEPTION_DE: printf( "Divide Error\n" ); break; case EXCEPTION_BP: printf( "Breakpoint\n" ); break; case EXCEPTION_OF: printf( "Overflow\n" ); break; case EXCEPTION_BR: printf( "BOUND Range Exceeded\n" ); break; case EXCEPTION_UD: printf( "Invalid Opcode (Undefined Opcode)\n" ); break; case EXCEPTION_NM: printf( "Device Not Available (No Math Coprocessor)\n" ); break; case EXCEPTION_DF: printf( "Double Fault\n" ); break; case EXCEPTION_TS: printf( "Invalid TSS\n" ); break; case EXCEPTION_NP: printf( "Segment Not Present\n" ); break; case EXCEPTION_SS: printf( "Stack-Segment Fault\n" ); break; case EXCEPTION_GP: printf( "General Protection Fault\n" ); break; case EXCEPTION_PF: printf( "Page Fault\n" ); break; case EXCEPTION_MF: printf( "x87 FPU Floating-Point Error (Math Fault)\n" ); break; case EXCEPTION_AC: printf( "Alignment Check\n" ); break; case EXCEPTION_MC: printf( "Machine Check\n" ); break; case EXCEPTION_XF: printf( "SIMD Floating-Point Exception\n" ); break; default: printf( "Reserved exception\n" ); break; } printf( "Exception Number: %d\n\n", ctx->num ); printf( "Register Dump:\n" ); printf( "EAX = 0x%x\tECX = 0x%x\tEDX = 0x%x\tEBX = 0x%x\n", ctx->eax, ctx->ecx, ctx->edx, ctx->ebx ); printf( "ESI = 0x%x\tEDI = 0x%x\tEBP = 0x%x\n", ctx->esi, ctx->edi, ctx->ebp ); printf( "DS = 0x%x\tES = 0x%x\tFS = 0x%x\tGS = 0x%x\n", ctx->ds, ctx->es, ctx->fs, ctx->gs ); printf( "CS:EIP = 0x%x:0x%x\tEFLAGS = 0x%x\n", ctx->cs, ctx->eip, ctx->eflags ); printf( "CR3 = 0x%x\n\n", cr3 ); printf( "Halting system..." ); for( ; ; ); }
void *arch::create_vm() { return (void*)get_cr3(); }
generic_page_table_entry_t* get_l4_page_table(void) { cr3_t cr3; get_cr3(&cr3); return (generic_page_table_entry_t*) (uint64) phys_to_virt(PFN_TO_PA(cr3.l4_pfn)); }
void panic_64(x86_saved_state_t *sp, __unused int pc, __unused const char *msg, boolean_t do_mca_dump) { /* Set postcode (DEBUG only) */ postcode(pc); /* * Issue an I/O port read if one has been requested - this is an * event logic analyzers can use as a trigger point. */ panic_io_port_read(); /* * Break kprintf lock in case of recursion, * and record originally faulted instruction address. */ kprintf_break_lock(); if (do_mca_dump) { #if CONFIG_MCA /* * Dump the contents of the machine check MSRs (if any). */ mca_dump(); #endif } #ifdef __i386__ /* * Dump the interrupt stack frame at last kernel entry. */ if (is_saved_state64(sp)) { x86_saved_state64_t *ss64p = saved_state64(sp); panic("%s trapno:0x%x, err:0x%qx, " "registers:\n" "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n" "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n" "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n" "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n" "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n" "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx%s\n", msg, ss64p->isf.trapno, ss64p->isf.err, (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(), ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx, ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi, ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11, ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15, ss64p->isf.rflags, ss64p->isf.rip, ss64p->cr2, virtualized ? " VMM" : ""); } else { x86_saved_state32_t *ss32p = saved_state32(sp); panic("%s at 0x%08x, trapno:0x%x, err:0x%x," "registers:\n" "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n" "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n" "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n" "EFL: 0x%08x, EIP: 0x%08x%s\n", msg, ss32p->eip, ss32p->trapno, ss32p->err, (uint32_t)get_cr0(), (uint32_t)get_cr2(), (uint32_t)get_cr3(), (uint32_t)get_cr4(), ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx, ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi, ss32p->efl, ss32p->eip, virtualized ? " VMM" : ""); } #else x86_saved_state64_t *regs = saved_state64(sp); panic("%s at 0x%016llx, registers:\n" "CR0: 0x%016lx, CR2: 0x%016lx, CR3: 0x%016lx, CR4: 0x%016lx\n" "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" "Error code: 0x%016llx%s\n", msg, regs->isf.rip, get_cr0(), get_cr2(), get_cr3_raw(), get_cr4(), regs->rax, regs->rbx, regs->rcx, regs->rdx, regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, regs->r8, regs->r9, regs->r10, regs->r11, regs->r12, regs->r13, regs->r14, regs->r15, regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, regs->isf.ss & 0xFFFF, regs->isf.err, virtualized ? " VMM" : ""); #endif }