void gdb_send_stop_reason(uint8_t reason) { size_t rlen; uint32_t s_rip; uint64_t mode = (uint64_t)cpu_addr_sz(); if(mode == 64) { s_rip = 0x3a36313b; rlen = sizeof(uint64_t)*2; } else /* XXX: gdb seems to wait for 32 bits regs at least */ { s_rip = 0x3a38303b; rlen = sizeof(uint32_t)*2; } gdb_add_str("T", 1); gdb_add_byte(reason); gdb_add_str("md:", 3); gdb_add_number(mode, 2, 0); gdb_add_str(";04:", 4); gdb_add_number(info->vm.cpu.gpr->rsp.raw, rlen, 1); gdb_add_str(";05:", 4); gdb_add_number(info->vm.cpu.gpr->rbp.raw, rlen, 1); gdb_add_str((char*)&s_rip, 4); gdb_add_number(__rip.raw, rlen, 1); gdb_add_str(";", 1); gdb_send_packet(); }
static void vmx_vmexit_show_cr() { printf("\n- control registers\n" "mode : %d bits\n" "cpl : %d\n" "cr0 : 0x%x (pe:%d pg:%d ne:%d)\n" "cr0 fixed : 0x%x 0x%x\n" "cr2 : 0x%X\n" "cr3 : 0x%X\n" "cr4 : 0x%x (pae:%d pse:%d pge:%d vmxe:%d)\n" "cr4 fixed : 0x%x 0x%x\n" "dr6 : 0x%X\n" "dr7 : 0x%X\n" "efer (shadow) : 0x%x (lma:%d lme:%d nxe:%d)\n" "efer : 0x%x (lma:%d lme:%d nxe:%d ia32e:%d)\n" "gdtr (limit) : 0x%x (0x%x)\n" "idtr (limit) : 0x%x (0x%x)\n" ,cpu_addr_sz(), __cpl ,vm_state.cr0.low, vm_state.cr0.pe, vm_state.cr0.pg, vm_state.cr0.ne ,info->vm.vmx_fx_cr0.allow_0.raw, info->vm.vmx_fx_cr0.allow_1.raw ,vm_state.cr2.raw ,vm_state.cr3.raw ,vm_state.cr4.low, vm_state.cr4.pae ,vm_state.cr4.pse, vm_state.cr4.pge, vm_state.cr4.vmxe ,info->vm.vmx_fx_cr4.allow_0.raw, info->vm.vmx_fx_cr4.allow_1.raw ,vm_state.dr6.raw ,vm_state.dr7.raw ,info->vm.efer.eax, info->vm.efer.lma ,info->vm.efer.lme, info->vm.efer.nxe ,vm_state.ia32_efer.eax, vm_state.ia32_efer.lma ,vm_state.ia32_efer.lme, vm_state.ia32_efer.nxe ,vm_entry_ctrls.entry.ia32e ,vm_state.gdtr.base.low, vm_state.gdtr.limit.raw ,vm_state.idtr.base.low, vm_state.idtr.limit.raw); }
static void gdb_vmm_translate(uint8_t *data, size_t len) { offset_t vaddr, paddr; size_t psz, sz; if(!gdb_get_number(data, len, (uint64_t*)&vaddr, 0)) { gdb_nak(); return; } debug(GDBSTUB_CMD, "translating 0x%X\n", vaddr); if(!__paging()) paddr = vaddr; else if(!__pg_walk(info->vmm.ctrl.active_cr3, vaddr, &paddr, &psz, 1)) { debug(GDBSTUB, "memory translation failure\n"); gdb_err_mem(); return; } debug(GDBSTUB_CMD, "sending 0x%X\n", paddr); if(cpu_addr_sz() == 64) sz = sizeof(uint64_t)*2; else /* XXX: gdb seems to wait for 32 bits regs at least */ sz = sizeof(uint32_t)*2; gdb_add_number(paddr, sz, 0); gdb_send_packet(); }
static void gdb_cmd_rd_gpr() { size_t flen, vlen, ngpr, i; if(cpu_addr_sz() == 64) { ngpr = 16; vlen = sizeof(uint64_t)*2; } else /* XXX: gdb seems to wait for 32 bits regs at least */ { ngpr = 8; vlen = sizeof(uint32_t)*2; } flen = sizeof(uint32_t)*2; /* [r/e]ax - [r/e]di */ for(i=GPR64_RAX ; i >= ((GPR64_RAX+1)-ngpr) ; i--) gdb_add_number(info->vm.cpu.gpr->raw[i].raw, vlen, 1); /* [r/e]ip */ gdb_add_number(__rip.raw, vlen, 1); /* fixed length eflags, cs, ss, ds, es, fs, gs */ gdb_add_number(__rflags.raw, flen, 1); gdb_add_number(__cs.selector.raw, flen, 1); __pre_access(__ss.selector); gdb_add_number(__ss.selector.raw, flen, 1); __pre_access(__ds.selector); gdb_add_number(__ds.selector.raw, flen, 1); __pre_access(__es.selector); gdb_add_number(__es.selector.raw, flen, 1); __pre_access(__fs.selector); gdb_add_number(__fs.selector.raw, flen, 1); __pre_access(__gs.selector); gdb_add_number(__gs.selector.raw, flen, 1); gdb_send_packet(); }
int __gdb_setup_reg(uint64_t idx, raw64_t **reg, size_t *size, uint8_t sys, uint8_t #ifdef CONFIG_ARCH_AMD __unused__ #endif wr) { loc_t loc; offset_t *cache; if(sys) { debug(GDBSTUB_PKT, "reg_sys_op\n"); *size = sizeof(uint64_t); if(idx >= 22) goto __fail; cache = (offset_t*)info->vm.cpu.insn_cache; goto __sys; } if(cpu_addr_sz() == 64) { *size = sizeof(uint64_t); if(idx < 16) goto __gpr; else if(idx < 24) idx -= 8; else goto __fail; } else { /* XXX: gdb seems to wait for 32 bits regs at least */ *size = sizeof(uint32_t); if(idx < 8) goto __gpr; else if(idx >= 16) goto __fail; } switch(idx) { case 8: loc.u64 = &__rip.raw; __cond_access(wr,__rip); goto __win; case 9: loc.u64 = &__rflags.raw; __cond_access(wr,__rflags); goto __win; case 10: loc.u16 = &__cs.selector.raw;__cond_access(wr,__cs.selector); goto __win16; case 11: loc.u16 = &__ss.selector.raw;__cond_access(wr,__ss.selector); goto __win16; case 12: loc.u16 = &__ds.selector.raw;__cond_access(wr,__ds.selector); goto __win16; case 13: loc.u16 = &__es.selector.raw;__cond_access(wr,__es.selector); goto __win16; case 14: loc.u16 = &__fs.selector.raw;__cond_access(wr,__fs.selector); goto __win16; case 15: loc.u16 = &__gs.selector.raw;__cond_access(wr,__gs.selector); goto __win16; } __sys: switch(idx) { case 0: loc.u64 = &__cr0.raw; __cond_access(wr,__cr0); goto __win; case 1: loc.u64 = &__cr2.raw; __cond_access(wr,__cr2); goto __win; case 2: loc.u64 = &__cr3.raw; __cond_access(wr,__cr3); goto __win; case 3: loc.u64 = &__cr4.raw; __cond_access(wr,__cr4); goto __win; case 4: *cache = get_dr0(); loc.addr = (void*)cache; goto __win; case 5: *cache = get_dr1(); loc.addr = (void*)cache; goto __win; case 6: *cache = get_dr2(); loc.addr = (void*)cache; goto __win; case 7: *cache = get_dr3(); loc.addr = (void*)cache; goto __win; case 8: loc.u64 = &__dr6.raw; __cond_access(wr,__dr6); goto __win; case 9: loc.u64 = &__dr7.raw; __cond_access(wr,__dr7); goto __win; case 10: loc.u64 = &__dbgctl.raw; __cond_access(wr,__dbgctl); goto __win; case 11: loc.u64 = &__efer.raw; /*__cond_access(wr,__efer);*/ goto __win; case 12: loc.u64 = &__cs.base.raw; __cond_access(wr,__cs.base); goto __win; case 13: loc.u64 = &__ss.base.raw; __cond_access(wr,__ss.base); goto __win; case 14: loc.u64 = &__ds.base.raw; __cond_access(wr,__ds.base); goto __win; case 15: loc.u64 = &__es.base.raw; __cond_access(wr,__es.base); goto __win; case 16: loc.u64 = &__fs.base.raw; __cond_access(wr,__fs.base); goto __win; case 17: loc.u64 = &__gs.base.raw; __cond_access(wr,__gs.base); goto __win; case 18: loc.u64 = &__gdtr.base.raw; __cond_access(wr,__gdtr.base); goto __win; case 19: loc.u64 = &__idtr.base.raw; __cond_access(wr,__idtr.base); goto __win; case 20: loc.u64 = &__ldtr.base.raw; __cond_access(wr,__ldtr.base); goto __win; case 21: loc.u64 = &__tr.base.raw; __cond_access(wr,__tr.base); goto __win; } __gpr: loc.u64 = &info->vm.cpu.gpr->raw[GPR64_RAX - idx].raw; goto __win; __win16: *size = sizeof(uint16_t); __win: debug(GDBSTUB_PKT, "reg_op win on %d\n", idx); *reg = (raw64_t*)loc.u64; return 1; __fail: debug(GDBSTUB_PKT, "reg_op failed on %d\n", idx); gdb_unsupported(); return 0; }
int vmx_vmexit_resolve_dt() { vmcs_exit_info_insn_dt_t *dt_insn; offset_t dt_addr; dt_reg_t dt_reg; raw64_t disp; uint64_t addr_msk, op_msk; int rc, sz, mode; if(!__rmode()) { debug(VMX_DT, "DT intercept only while in real mode\n"); return VM_FAIL; } vmcs_read(vm_exit_info.insn_info); vmcs_read(vm_exit_info.qualification); dt_insn = &vm_exit_info.insn_info.dt; dt_addr = 0; disp.sraw = vm_exit_info.qualification.sraw; addr_msk = (1ULL<<(16*(1<<dt_insn->addr))) - 1; switch(dt_insn->seg) { case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_ES: vmcs_read(vm_state.es.base); dt_addr += vm_state.es.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_CS: vmcs_read(vm_state.cs.base); dt_addr += vm_state.cs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_SS: vmcs_read(vm_state.ss.base); dt_addr += vm_state.ss.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_DS: vmcs_read(vm_state.ds.base); dt_addr += vm_state.ds.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_FS: vmcs_read(vm_state.fs.base); dt_addr += vm_state.fs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_GS: vmcs_read(vm_state.gs.base); dt_addr += vm_state.gs.base.raw; break; } /* XXX: compute offset alone and check against segment limit */ if(!dt_insn->no_base) { int reg = GPR64_RAX - (dt_insn->base & GPR64_RAX); dt_addr += info->vm.cpu.gpr->raw[reg].raw & addr_msk; } if(!dt_insn->no_idx) { int reg = GPR64_RAX - (dt_insn->idx & GPR64_RAX); uint64_t val = info->vm.cpu.gpr->raw[reg].raw & addr_msk; if(dt_insn->scale) val *= (1ULL<<dt_insn->scale); dt_addr += val; } dt_addr += (disp.sraw & addr_msk); mode = cpu_addr_sz(); if(mode == 64) { op_msk = -1ULL; sz = 10; } else if(dt_insn->op == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_OP_SZ_16) { op_msk = (1ULL<<24) - 1; sz = 6; } else { op_msk = (1ULL<<32) - 1; sz = 6; } debug(VMX_DT, "dt op @ 0x%X\n", dt_addr); if(dt_insn->type < VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) { if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_SGDT) rc = __vmx_vmexit_sgdt(&dt_reg); else rc = __vmx_vmexit_sidt(&dt_reg); dt_reg.base.raw &= op_msk; if(!vm_write_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not write vm mem @0x%X\n", dt_addr); return VM_FAIL; } } else { if(!vm_read_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not read vm mem @0x%X\n", dt_addr); return VM_FAIL; } dt_reg.base.raw &= op_msk; if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) rc = __vmx_vmexit_lgdt(&dt_reg); else rc = __vmx_vmexit_lidt(&dt_reg); } vmcs_read(vm_exit_info.insn_len); return emulate_done(rc, vm_exit_info.insn_len.raw); }