static int emulate_interrupt(uint8_t vector, uint16_t insn_sz) { if(__rmode()) return emulate_rmode_interrupt(vector, insn_sz); return VM_FAIL; }
static int emulate_iret(size_t sz) { offset_t __unused__ addend = 2*sz; if(__rmode()) return emulate_rmode_iret(); return VM_FAIL; }
int vmx_vmexit_resolve_excp() { vmcs_read(vm_exit_info.int_info); vmcs_read(vm_exit_info.int_err_code); vmcs_read(vm_exit_info.qualification); switch(vm_exit_info.int_info.vector) { case GP_EXCP: if(__rmode()) return vmx_vmexit_resolve_rmode_gp(); break; case DB_EXCP: vm_state.dr6.wlow = vm_exit_info.qualification.wlow; vmcs_set_read(vm_state.dr6); break; case MC_EXCP: vmm_excp_mce(); break; } return resolve_exception(); }
int vmx_vmexit_idt_deliver() { vmcs_read(vm_exit_info.idt_info); if(!vm_exit_info.idt_info.v) { vmcs_read(vm_exit_info.int_info); if(vm_exit_info.int_info.nmi && vm_exit_info.int_info.vector != DF_EXCP) { vmcs_read(vm_state.interrupt); vm_state.interrupt.nmi = 1; vmcs_dirty(vm_state.interrupt); } return VM_DONE; } if(__rmode()) return __vmx_vmexit_idt_deliver_rmode(); return __vmx_vmexit_idt_deliver_pmode(); }
static int __dev_ata_device(ata_t *ata, io_insn_t *io) { ata_dev_reg_t dev; int rc; io_size_t sz = {.available = sizeof(ata_dev_reg_t)}; if(io->in) return dev_io_proxify(io); else { /* check crazy io (should not happen) */ if((io->sz * io->cnt) > sizeof(ata_dev_reg_t)) { debug(DEV_ATA, "unsupported ata dev access\n"); return VM_FAIL; } rc = dev_io_insn(io, (void*)&dev.raw, &sz); if(rc != VM_DONE) return rc; ata->dev_head = dev; debug(DEV_ATA, "ata device [%s]\n", dev.dev ? "SLAVE":"MASTER"); return dev_io_native(io, &dev.raw); } } static int __fake_ata_status(ata_t *ata) { if(ata->last_out == ATA_CMD_REG(ata->base)) info->vm.cpu.gpr->rax.blow = 1; else if(ata->last_out == ATA_DEVICE_REG(ata->base)) info->vm.cpu.gpr->rax.blow = 0; else { debug(DEV_ATA, "can't fake status for previous out(0x%x)\n", ata->last_out); return VM_FAIL; } return VM_DONE; } static int __dev_ata_status(ata_t *ata, io_insn_t *io) { if(!__rmode() && __ata_guest_want_slave(ata)) { debug(DEV_ATA, "ata fake status\n"); return __fake_ata_status(ata); } debug(DEV_ATA, "ata status\n"); return dev_io_proxify(io); } static int __dev_ata_alt_status(ata_t *ata, io_insn_t *io) { if(!__rmode() && __ata_guest_want_slave(ata)) { debug(DEV_ATA, "ata fake ALT status\n"); return __fake_ata_status(ata); } debug(DEV_ATA, "ata ALT status\n"); return dev_io_proxify(io); } static int __dev_ata_lba_filter(void *device, void *arg) { ata_t *ata = (ata_t*)arg; ata_dev_t *disk = &ata->devices[0]; uint8_t lba = *(uint8_t*)device; uint8_t idx = ata->last_out - ATA_LBA_LOW_REG(ata->base); if(idx > 2) { debug(DEV_ATA, "unknown (internal) LBA index access (%d)\n", idx); return VM_FAIL; } disk->lba_r[idx] = lba; debug(DEV_ATA, "ata lba[%d] = 0x%x\n", idx, lba); return VM_DONE; }
int vmx_vmexit_resolve_dt() { vmcs_exit_info_insn_dt_t *dt_insn; offset_t dt_addr; dt_reg_t dt_reg; raw64_t disp; uint64_t addr_msk, op_msk; int rc, sz, mode; if(!__rmode()) { debug(VMX_DT, "DT intercept only while in real mode\n"); return VM_FAIL; } vmcs_read(vm_exit_info.insn_info); vmcs_read(vm_exit_info.qualification); dt_insn = &vm_exit_info.insn_info.dt; dt_addr = 0; disp.sraw = vm_exit_info.qualification.sraw; addr_msk = (1ULL<<(16*(1<<dt_insn->addr))) - 1; switch(dt_insn->seg) { case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_ES: vmcs_read(vm_state.es.base); dt_addr += vm_state.es.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_CS: vmcs_read(vm_state.cs.base); dt_addr += vm_state.cs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_SS: vmcs_read(vm_state.ss.base); dt_addr += vm_state.ss.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_DS: vmcs_read(vm_state.ds.base); dt_addr += vm_state.ds.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_FS: vmcs_read(vm_state.fs.base); dt_addr += vm_state.fs.base.raw; break; case VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_SEG_REG_GS: vmcs_read(vm_state.gs.base); dt_addr += vm_state.gs.base.raw; break; } /* XXX: compute offset alone and check against segment limit */ if(!dt_insn->no_base) { int reg = GPR64_RAX - (dt_insn->base & GPR64_RAX); dt_addr += info->vm.cpu.gpr->raw[reg].raw & addr_msk; } if(!dt_insn->no_idx) { int reg = GPR64_RAX - (dt_insn->idx & GPR64_RAX); uint64_t val = info->vm.cpu.gpr->raw[reg].raw & addr_msk; if(dt_insn->scale) val *= (1ULL<<dt_insn->scale); dt_addr += val; } dt_addr += (disp.sraw & addr_msk); mode = cpu_addr_sz(); if(mode == 64) { op_msk = -1ULL; sz = 10; } else if(dt_insn->op == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_OP_SZ_16) { op_msk = (1ULL<<24) - 1; sz = 6; } else { op_msk = (1ULL<<32) - 1; sz = 6; } debug(VMX_DT, "dt op @ 0x%X\n", dt_addr); if(dt_insn->type < VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) { if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_SGDT) rc = __vmx_vmexit_sgdt(&dt_reg); else rc = __vmx_vmexit_sidt(&dt_reg); dt_reg.base.raw &= op_msk; if(!vm_write_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not write vm mem @0x%X\n", dt_addr); return VM_FAIL; } } else { if(!vm_read_mem(dt_addr, (uint8_t*)&dt_reg, sz)) { debug(VMX_DT, "could not read vm mem @0x%X\n", dt_addr); return VM_FAIL; } dt_reg.base.raw &= op_msk; if(dt_insn->type == VMCS_VM_EXIT_INFORMATION_VMX_INSN_INFORMATION_TYPE_LGDT) rc = __vmx_vmexit_lgdt(&dt_reg); else rc = __vmx_vmexit_lidt(&dt_reg); } vmcs_read(vm_exit_info.insn_len); return emulate_done(rc, vm_exit_info.insn_len.raw); }