Beispiel #1
0
/* -----------------------------------------------------------------------------
   vmx_off()
	Leave VMX root operation on this CPU.
   -------------------------------------------------------------------------- */
static void
vmx_off(void)
{
	int result;
	
	/* Tell the CPU to release the VMXON region */
	if ((result = __vmxoff()) != VMX_SUCCEED) {
		panic("vmx_off: unexpected return %d from __vmxoff()", result);
	}
}
Beispiel #2
0
Datei: cpu.c Projekt: tisma/haxm
uint32 put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
{
    int cpu_id = hax_cpuid();
    struct per_cpu_data *cpu_data = hax_cpu_data[cpu_id];
    paddr_t vmcs_phy;
    vmx_error_t err = 0;
    vmx_error_t vmxoff_err = 0;
    if (vcpu && cpu_data->nested > 0) {
        cpu_data->nested--;
        goto out;
    }

    if (vcpu)
        vmcs_phy = vcpu_vmcs_pa(vcpu);
    else
        vmcs_phy = hax_page_pa(cpu_data->vmcs_page);

    if (__vmclear(vmcs_phy) != VMX_SUCCEED) {
        hax_error("HAX: vmclear ailed (%llx)\n", vmcs_phy);
        log_vmclear_err = 1;
    }

    cpu_data->current_vcpu = NULL;

    if (cpu_data->vmm_flag & VMXON_HAX) {
        err = __vmxoff();
        if (!(err & VMX_FAIL_MASK)) {
            restore_host_cr4_vmxe(cpu_data);
        } else {
            hax_error("VMXOFF Failed..........\n");
            vmxoff_err = err;
            log_vmxoff_err = err;
        }
    } else {
        log_vmxoff_no = 1;
#ifdef __MACH__
        hax_debug("Skipping VMXOFF because another VMM (VirtualBox or macOS"
                  " Hypervisor Framework) is running\n");
#else
        vmxoff_err = 0x1;
        hax_error("NO VMXOFF.......\n");
#endif
    }
    cpu_data->other_vmcs = VMCS_NONE;
    cpu_data->vmm_flag = 0;
    if (vcpu && vcpu->is_vmcs_loaded)
        vcpu->is_vmcs_loaded = 0;
out:
    hax_enable_preemption(flags);
    return vmxoff_err;
}
Beispiel #3
0
void vmx_cpu_down(void)
{
    struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
    unsigned long flags;

    local_irq_save(flags);

    while ( !list_empty(active_vmcs_list) )
        __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
                                    struct vcpu, arch.hvm_vmx.active_list));

    BUG_ON(!(read_cr4() & X86_CR4_VMXE));
    __vmxoff();

    local_irq_restore(flags);
}
Beispiel #4
0
Datei: cpu.c Projekt: tisma/haxm
uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
{
    struct per_cpu_data *cpu_data;
    paddr_t vmcs_phy;
    paddr_t curr_vmcs = VMCS_NONE;
    vmx_error_t err = 0;
    uint64 fc_msr;

    hax_disable_preemption(flags);

    /* when wake up from sleep, we need the barrier, as vm operation
     * are not serialized instructions.
     */
    smp_mb();

    cpu_data = current_cpu_data();

    if (vcpu && is_vmcs_loaded(vcpu)) {
        cpu_data->nested++;
        return 0;
    }

    cpu_data->host_cr4_vmxe = (get_cr4() & CR4_VMXE);
    if(cpu_data->host_cr4_vmxe) {
        if (debug_vmcs_count % 100000 == 0) {
            hax_debug("host VT has enabled!\n");
            hax_debug("Cr4 value = 0x%lx\n", get_cr4());
            log_host_cr4_vmxe = 1;
            log_host_cr4 = get_cr4();
        }
        debug_vmcs_count++;
    }
    set_cr4(get_cr4() | CR4_VMXE);
    /* HP systems & Mac systems workaround
      * When resuming from S3, some HP/Mac set the IA32_FEATURE_CONTROL MSR to
      * zero. Setting the lock bit to zero & then doing 'vmxon' would cause a GP.
      * As a workaround, when we see this condition, we enable the bits so that
      * we can launch vmxon & thereby hax.
      * bit 0 - Lock bit
      * bit 2 - Enable VMX outside SMX operation
      *
      * ********* To Do **************************************
      * This is the workground to fix BSOD when resume from S3
      * The best way is to add one power management handler, and set
      * IA32_FEATURE_CONTROL MSR in that PM S3 handler
      * *****************************************************
      */
    fc_msr = ia32_rdmsr(IA32_FEATURE_CONTROL);
    if (!(fc_msr & FC_LOCKED))
        ia32_wrmsr(IA32_FEATURE_CONTROL,
                   fc_msr | FC_LOCKED | FC_VMXON_OUTSMX);

    err = __vmxon(hax_page_pa(cpu_data->vmxon_page));

    log_vmxon_err = err;
    log_vmxon_addr = hax_page_pa(cpu_data->vmxon_page);

    if (!(err & VMX_FAIL_MASK))
        cpu_data->vmm_flag |= VMXON_HAX;
    else {
        bool fatal = true;

#ifdef __MACH__
        if ((err & VMX_FAIL_INVALID) && cpu_data->host_cr4_vmxe) {
            // On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
            // was already set, it is very likely that another VMM (VirtualBox
            // or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
            // running and did not call VMXOFF. In that case, the current host
            // logical processor is already in VMX operation, and we can use an
            // innocuous VMX instruction (VMPTRST) to confirm that.
            // However, if the above assumption is wrong and the host processor
            // is not actually in VMX operation, VMPTRST will probably cause a
            // host reboot. But we don't have a better choice, and it is worth
            // taking the risk.
            curr_vmcs = __vmptrst();
            if (curr_vmcs == VMCS_NONE) {
                hax_debug("Already in VMX operation, courtesy of another"
                          " VMM (VirtualBox or macOS Hypervisor Framework)\n");
                fatal = false;
                // Indicate that it is not necessary to call VMXOFF later
                cpu_data->vmm_flag &= ~VMXON_HAX;
            } else {
                // Should never happen
                hax_error("VMXON failed with VMX_FAIL_INVALID, but there is a"
                          " current VMCS at 0x%llx\n", curr_vmcs);
            }
        }
#endif

        if (fatal) {
            hax_error("VMXON failed for region 0x%llx (err=0x%x)\n",
                      hax_page_pa(cpu_data->vmxon_page), (uint32) err);
            restore_host_cr4_vmxe(cpu_data);
            if (err & VMX_FAIL_INVALID) {
                log_vmxon_err_type1 = 1;
            } else {
                // TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
                // be cleared (deactivated and saved to memory) using VMCLEAR
                log_vmxon_err_type2 = 1;
            }
            hax_enable_preemption(flags);
            return VMXON_FAIL;
        }
    }

    if (vcpu)
        ((vmcs_t*)(hax_page_va(vcpu->vmcs_page)))->_revision_id =
                cpu_data->vmx_info._vmcs_revision_id;

    if (vcpu)
        vmcs_phy = vcpu_vmcs_pa(vcpu);
    else
        vmcs_phy = hax_page_pa(cpu_data->vmcs_page);


    if (__vmptrld(vmcs_phy) != VMX_SUCCEED) {
        hax_error("HAX: vmptrld failed (%08llx)\n", vmcs_phy);
        cpu_data->vmm_flag = 0;
        __vmxoff();
        restore_host_cr4_vmxe(cpu_data);
        log_vmxon_err_type3 = 1;
        hax_enable_preemption(flags);
        return VMPTRLD_FAIL;
    }

    if (vcpu) {
        vcpu->is_vmcs_loaded = 1;
        cpu_data->current_vcpu = vcpu;
        vcpu->cpu_id = hax_cpuid();
    }

    cpu_data->other_vmcs = curr_vmcs;
    return VMXON_SUCCESS;
}