void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist); GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_SYNC(3); GUEST_ASSERT(load_vmcs(vmx_pages)); GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); GUEST_SYNC(4); GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); GUEST_SYNC(5); GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); GUEST_SYNC(8); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_SYNC(9); }
/* Return the value same as ioctl value */ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun) { vmx_error_t err = 0; int ret; preempt_flag flags; struct vcpu_state_t *state = vcpu->state; uint32 vmcs_err = 0; while (1) { exit_reason_t exit_reason; if (vcpu->paused) { htun->_exit_status = HAX_EXIT_PAUSED; return 0; } if (vcpu_is_panic(vcpu)) return 0; if ((vmcs_err = load_vmcs(vcpu, &flags))) { hax_panic_vcpu(vcpu, "load_vmcs fail: %x\n", vmcs_err); hax_panic_log(vcpu); return 0; } vcpu_handle_vmcs_pending(vcpu); vcpu_inject_intr(vcpu, htun); /* sometimes, the code segment type from qemu can be 10 (code segment), * this will cause invalid guest state, since 11 (accessed code segment), * not 10 is required by vmx hardware. Note: 11 is one of the allowed * values by vmx hardware. */ { uint32 temp= vmread(vcpu, GUEST_CS_AR); if( (temp & 0xf) == 0xa) { temp = temp +1; vmwrite(vcpu, GUEST_CS_AR, temp); } } /* sometimes, the TSS segment type from qemu is not right. * let's hard-code it for now */ { uint32 temp = vmread(vcpu, GUEST_TR_AR); temp = (temp & ~0xf) | 0xb; vmwrite(vcpu, GUEST_TR_AR, temp); } err = cpu_vmx_run(vcpu, htun); if (err) { hax_debug("cpu_vmx_run error, code:%lx\n", err); if ((vmcs_err = put_vmcs(vcpu, &flags))) { hax_panic_vcpu(vcpu, "put_vmcs fail: %x\n", vmcs_err); hax_panic_log(vcpu); } return -EINVAL; } exit_reason.raw = vmread(vcpu, VM_EXIT_INFO_REASON); hax_debug("....exit_reason.raw %x, cpu %d %d\n", exit_reason.raw, vcpu->cpu_id, hax_cpuid()); /* XXX Currently we take active save/restore for MSR and FPU, the main * reason is, we have no schedule hook to get notified of preemption * This should be changed later after get better idea */ vcpu->state->_rip = vmread(vcpu, GUEST_RIP); hax_handle_idt_vectoring(vcpu); vmx(vcpu, exit_qualification).raw = vmread( vcpu, VM_EXIT_INFO_QUALIFICATION); vmx(vcpu, exit_intr_info).raw = vmread( vcpu, VM_EXIT_INFO_INTERRUPT_INFO); vmx(vcpu, exit_exception_error_code) = vmread( vcpu, VM_EXIT_INFO_EXCEPTION_ERROR_CODE); vmx(vcpu, exit_idt_vectoring) = vmread( vcpu, VM_EXIT_INFO_IDT_VECTORING); vmx(vcpu, exit_instr_length) = vmread( vcpu, VM_EXIT_INFO_INSTRUCTION_LENGTH); state->_rflags = vmread(vcpu, GUEST_RFLAGS); state->_rsp = vmread(vcpu, GUEST_RSP); VMREAD_SEG(vcpu, CS, state->_cs); VMREAD_SEG(vcpu, DS, state->_ds); VMREAD_SEG(vcpu, ES, state->_es); vmread_cr(vcpu); if (vcpu->nr_pending_intrs > 0 || hax_intr_is_blocked(vcpu)) htun->ready_for_interrupt_injection = 0; else htun->ready_for_interrupt_injection = 1; vcpu->cur_state = GS_STALE; vmcs_err = put_vmcs(vcpu, &flags); if (vmcs_err) { hax_panic_vcpu(vcpu, "put_vmcs() fail before vmexit. %x\n", vmcs_err); hax_panic_log(vcpu); } hax_enable_irq(); ret = cpu_vmexit_handler(vcpu, exit_reason, htun); if (ret <= 0) return ret; } }
void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_SYNC(3); GUEST_ASSERT(load_vmcs(vmx_pages)); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_SYNC(4); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); GUEST_SYNC(5); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); /* Check that the launched state is preserved. */ GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_SYNC(7); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3); vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa); GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(vmlaunch()); GUEST_SYNC(8); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); vmwrite(GUEST_RIP, 0xc0ffee); GUEST_SYNC(9); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); GUEST_SYNC(13); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); }