exception_t invokeWriteVMCS(vcpu_t *vcpu, int num_fields, uint32_t *fields, uint32_t *values) { tcb_t *thread; int i; thread = ksCurThread; if (current_vmcs != vcpu) { vmptrld(vcpu); } for (i = 0; i < num_fields; i++) { uint32_t field = fields[i]; uint32_t value = values[i]; switch (field) { case VMX_CONTROL_EXCEPTION_BITMAP: vcpu->exception_mask = value; break; case VMX_GUEST_CR0: vcpu->cr0 = value; break; case VMX_CONTROL_CR0_MASK: vcpu->cr0_mask = value; break; case VMX_CONTROL_CR0_READ_SHADOW: vcpu->cr0_shadow = value; break; } setRegister(thread, msgRegisters[0], value); vmwrite(field, value); } return EXCEPTION_NONE; }
static uint32_t readVMCSfield(vcpu_t *vcpu, uint32_t field) { switch (field) { case VMX_DATA_EXIT_INTERRUPT_INFO: return vcpu->interrupt_info; case VMX_CONTROL_EXCEPTION_BITMAP: return vcpu->exception_mask; case VMX_GUEST_CR0: return vcpu->cr0; case VMX_CONTROL_CR0_MASK: return vcpu->cr0_mask; case VMX_CONTROL_CR0_READ_SHADOW: return vcpu->cr0_shadow; } if (current_vmcs != vcpu) { vmptrld(vcpu); } return vmread(field); }
void Arch_leaveVMAsyncTransfer(tcb_t *tcb) { #ifdef CONFIG_VTX vcpu_t *vcpu = tcb->tcbArch.vcpu; word_t *buffer; if (vcpu) { if (current_vmcs != vcpu) { vmptrld(vcpu); } setRegister(tcb, msgRegisters[0], vmread(VMX_GUEST_RIP)); setRegister(tcb, msgRegisters[1], vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS)); buffer = lookupIPCBuffer(true, tcb); if (!buffer) { return; } buffer[3] = vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO); } #endif }
void vcpu_init(vcpu_t *vcpu) { uint32_t *vmcs = (uint32_t*)vcpu; vcpu->tcb = NULL; vcpu->launched = false; *vmcs = vmcs_revision; vmclear(vcpu); vmptrld(vcpu); /* Set fixed host state. */ /*vmwrite(VMX_HOST_PAT, 0); vmwrite(VMX_HOST_EFER, 0); vmwrite(VMX_HOST_PERF_GLOBAL_CTRL, 0);*/ vmwrite(VMX_HOST_CR0, read_cr0()); /* CR3 is set dynamically. */ vmwrite(VMX_HOST_CR4, read_cr4()); vmwrite(VMX_HOST_FS_BASE, 0); vmwrite(VMX_HOST_GS_BASE, 0); vmwrite(VMX_HOST_TR_BASE, (uint32_t)&ia32KStss); vmwrite(VMX_HOST_GDTR_BASE, (uint32_t)ia32KSgdt); vmwrite(VMX_HOST_IDTR_BASE, (uint32_t)ia32KSidt); vmwrite(VMX_HOST_SYSENTER_CS, (uint32_t)SEL_CS_0); vmwrite(VMX_HOST_SYSENTER_EIP, (uint32_t)&handle_syscall); vmwrite(VMX_HOST_SYSENTER_ESP, (uint32_t)&ia32KStss.words[1]); /* VMX_HOST_RSP is set dyanamically. */ vmwrite(VMX_HOST_RIP, (uint32_t)&handle_vmexit); vmwrite(VMX_HOST_ES_SELECTOR, SEL_DS_0); vmwrite(VMX_HOST_CS_SELECTOR, SEL_CS_0); vmwrite(VMX_HOST_SS_SELECTOR, SEL_DS_0); vmwrite(VMX_HOST_DS_SELECTOR, SEL_DS_0); vmwrite(VMX_HOST_FS_SELECTOR, 0); vmwrite(VMX_HOST_GS_SELECTOR, 0); vmwrite(VMX_HOST_TR_SELECTOR, SEL_TSS); /* Set fixed VMCS control fields. */ vmwrite(VMX_CONTROL_PIN_EXECUTION_CONTROLS, applyFixedBits(0, pin_control_high, pin_control_low)); vmwrite(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS, applyFixedBits(0, primary_control_high, primary_control_low)); vmwrite(VMX_CONTROL_SECONDARY_PROCESSOR_CONTROLS, applyFixedBits(0, secondary_control_high, secondary_control_low)); vmwrite(VMX_CONTROL_EXIT_CONTROLS, applyFixedBits(0, exit_control_high, exit_control_low)); vmwrite(VMX_CONTROL_ENTRY_CONTROLS, applyFixedBits(0, entry_control_high, entry_control_low)); vmwrite(VMX_CONTROL_MSR_ADDRESS, (uint32_t)pptr_to_paddr(msr_bitmap)); vmwrite(VMX_GUEST_CR0, applyFixedBits(0, cr0_high, cr0_low)); vmwrite(VMX_GUEST_CR4, applyFixedBits(0, cr4_high, cr4_low)); vmwrite(VMX_GUEST_VMCS_LINK_POINTER, ~0); vmwrite(VMX_GUEST_VMCS_LINK_POINTER_HIGH, ~0); memset(vcpu->io, ~0, 8192); vmwrite(VMX_CONTROL_IOA_ADDRESS, pptr_to_paddr(vcpu->io)); vmwrite(VMX_CONTROL_IOB_ADDRESS, pptr_to_paddr((char *)vcpu->io + 4096)); vcpu->io_min = -1; vcpu->io_max = -1; vcpu->cr0 = applyFixedBits(0, cr0_high, cr0_low); vcpu->cr0_shadow = 0; vcpu->cr0_mask = 0; vcpu->exception_mask = 0; }
void l1_guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; GUEST_ASSERT(vmx_pages->vmcs_gpa); GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_SYNC(3); GUEST_ASSERT(load_vmcs(vmx_pages)); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_SYNC(4); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); GUEST_SYNC(5); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); /* Check that the launched state is preserved. */ GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_SYNC(7); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3); vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS); vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa); GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(vmlaunch()); GUEST_SYNC(8); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); vmwrite(GUEST_RIP, 0xc0ffee); GUEST_SYNC(9); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee); GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa)); GUEST_ASSERT(!vmresume()); GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa)); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); GUEST_SYNC(13); GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee); GUEST_ASSERT(vmlaunch()); GUEST_ASSERT(vmresume()); }