int vmx_vmexit_resolve_io() { int rc = dev_access(); if(rc != VM_DONE) return rc; vmcs_read(vm_exit_info.insn_len); return emulate_done(VM_DONE, vm_exit_info.insn_len.raw); }
int svm_vmexit_resolve_io() { int rc = dev_access(); if(rc != VM_DONE) return rc; vm_state.rip.raw = vm_ctrls.exit_info_2.raw; return emulate_done(VM_DONE, 0); }
int resolve_msr(uint8_t wr) { /* XXX: check reserved/unimplemented MSR and raise #GP */ int rc = wr ? __resolve_msr_wr() : __resolve_msr_rd(); debug(MSR, "%smsr 0x%x | 0x%x 0x%x\n" , wr?"wr":"rd", info->vm.cpu.gpr->rcx.low , info->vm.cpu.gpr->rdx.low, info->vm.cpu.gpr->rax.low); return emulate_done(rc, max(__insn_sz(), MSR_INSN_SZ)); }
int emulate() { int rc; size_t sz; if(!disassemble(&info->vm.cpu.disasm)) return VM_FAIL; rc = emulate_insn(&info->vm.cpu.disasm); sz = ud_insn_len(&info->vm.cpu.disasm); return emulate_done(rc, sz); }
static int dbg_hard_stp_event_fast_syscall(int tf) { int rc; size_t sz; dbg_hard_stp_restore_context(); sz = ud_insn_len(&info->vm.cpu.disasm); rc = emulate_done(emulate_insn(&info->vm.cpu.disasm), sz); info->vm.cpu.emu_sts = EMU_STS_AVL; /* stealth for db_pending() */ dbg_hard_stp_setup_context(); if(rc == VM_DONE_LET_RIP) { __rflags.tf = tf; __post_access(__rflags); } return rc; }
int vmx_vmexit_resolve_dt() { vmcs_exit_info_insn_dt_t *dt_insn; offset_t dt_addr; dt_reg_t dt_reg; raw64_t disp; uint64_t addr_msk, op_msk; int rc, sz, mode; if(!__rmode()) { debug(VMX_DT, "DT intercept only while in real mode\n"); return VM_FAIL; } vmcs_read(vm_exit_info.insn_info); vmcs_read(vm_exit_info.qualification); dt_insn = &vm_exit_info.insn_info.dt; dt_addr = 0; disp.sraw = vm_exit_info.qualification.sraw; addr_msk = (1ULL<<(16*(1<<dt_insn->addr))) - 1; switch(dt_insn->seg) { case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_ES: vmcs_read(vm_state.es.base); dt_addr += vm_state.es.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_CS: vmcs_read(vm_state.cs.base); dt_addr += vm_state.cs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_SS: vmcs_read(vm_state.ss.base); dt_addr += vm_state.ss.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_DS: vmcs_read(vm_state.ds.base); dt_addr += vm_state.ds.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_FS: vmcs_read(vm_state.fs.base); dt_addr += vm_state.fs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_GS: vmcs_read(vm_state.gs.base); dt_addr += vm_state.gs.base.raw; break; } /* XXX: compute offset alone and check against segment limit */ if(!dt_insn->no_base) { int reg = GPR64_RAX - (dt_insn->base & GPR64_RAX); dt_addr += info->vm.cpu.gpr->raw[reg].raw & addr_msk; } if(!dt_insn->no_idx) { int reg = GPR64_RAX - (dt_insn->idx & GPR64_RAX); uint64_t val = info->vm.cpu.gpr->raw[reg].raw & addr_msk; if(dt_insn->scale) val *= (1ULL<<dt_insn->scale); dt_addr += val; } dt_addr += (disp.sraw & addr_msk); mode = cpu_addr_sz(); if(mode == 64) { op_msk = -1ULL; sz = 10; } else if(dt_insn->op == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_OP_SZ_16) { op_msk = (1ULL<<24) - 1; sz = 6; } else { op_msk = (1ULL<<32) - 1; sz = 6; } debug(VMX_DT, "dt op @ 0x%X\n", dt_addr); if(dt_insn->type < VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) { if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_SGDT) rc = __vmx_vmexit_sgdt(&dt_reg); else rc = __vmx_vmexit_sidt(&dt_reg); dt_reg.base.raw &= op_msk; if(!vm_write_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not write vm mem @0x%X\n", dt_addr); return VM_FAIL; } } else { if(!vm_read_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not read vm mem @0x%X\n", dt_addr); return VM_FAIL; } dt_reg.base.raw &= op_msk; if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) rc = __vmx_vmexit_lgdt(&dt_reg); else rc = __vmx_vmexit_lidt(&dt_reg); } vmcs_read(vm_exit_info.insn_len); return emulate_done(rc, vm_exit_info.insn_len.raw); }