Exemple #1
0
static void vmx_load_vmcs(struct vcpu *v)
{
    unsigned long flags;

    local_irq_save(flags);

    if ( v->arch.hvm_vmx.active_cpu == -1 )
    {
        list_add(&v->arch.hvm_vmx.active_list, &this_cpu(active_vmcs_list));
        v->arch.hvm_vmx.active_cpu = smp_processor_id();
    }

    ASSERT(v->arch.hvm_vmx.active_cpu == smp_processor_id());

    __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
    this_cpu(current_vmcs) = v->arch.hvm_vmx.vmcs;

    local_irq_restore(flags);
}
Exemple #2
0
Fichier : cpu.c Projet : tisma/haxm
uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
{
    struct per_cpu_data *cpu_data;
    paddr_t vmcs_phy;
    paddr_t curr_vmcs = VMCS_NONE;
    vmx_error_t err = 0;
    uint64 fc_msr;

    hax_disable_preemption(flags);

    /* when wake up from sleep, we need the barrier, as vm operation
     * are not serialized instructions.
     */
    smp_mb();

    cpu_data = current_cpu_data();

    if (vcpu && is_vmcs_loaded(vcpu)) {
        cpu_data->nested++;
        return 0;
    }

    cpu_data->host_cr4_vmxe = (get_cr4() & CR4_VMXE);
    if(cpu_data->host_cr4_vmxe) {
        if (debug_vmcs_count % 100000 == 0) {
            hax_debug("host VT has enabled!\n");
            hax_debug("Cr4 value = 0x%lx\n", get_cr4());
            log_host_cr4_vmxe = 1;
            log_host_cr4 = get_cr4();
        }
        debug_vmcs_count++;
    }
    set_cr4(get_cr4() | CR4_VMXE);
    /* HP systems & Mac systems workaround
      * When resuming from S3, some HP/Mac set the IA32_FEATURE_CONTROL MSR to
      * zero. Setting the lock bit to zero & then doing 'vmxon' would cause a GP.
      * As a workaround, when we see this condition, we enable the bits so that
      * we can launch vmxon & thereby hax.
      * bit 0 - Lock bit
      * bit 2 - Enable VMX outside SMX operation
      *
      * ********* To Do **************************************
      * This is the workground to fix BSOD when resume from S3
      * The best way is to add one power management handler, and set
      * IA32_FEATURE_CONTROL MSR in that PM S3 handler
      * *****************************************************
      */
    fc_msr = ia32_rdmsr(IA32_FEATURE_CONTROL);
    if (!(fc_msr & FC_LOCKED))
        ia32_wrmsr(IA32_FEATURE_CONTROL,
                   fc_msr | FC_LOCKED | FC_VMXON_OUTSMX);

    err = __vmxon(hax_page_pa(cpu_data->vmxon_page));

    log_vmxon_err = err;
    log_vmxon_addr = hax_page_pa(cpu_data->vmxon_page);

    if (!(err & VMX_FAIL_MASK))
        cpu_data->vmm_flag |= VMXON_HAX;
    else {
        bool fatal = true;

#ifdef __MACH__
        if ((err & VMX_FAIL_INVALID) && cpu_data->host_cr4_vmxe) {
            // On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
            // was already set, it is very likely that another VMM (VirtualBox
            // or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
            // running and did not call VMXOFF. In that case, the current host
            // logical processor is already in VMX operation, and we can use an
            // innocuous VMX instruction (VMPTRST) to confirm that.
            // However, if the above assumption is wrong and the host processor
            // is not actually in VMX operation, VMPTRST will probably cause a
            // host reboot. But we don't have a better choice, and it is worth
            // taking the risk.
            curr_vmcs = __vmptrst();
            if (curr_vmcs == VMCS_NONE) {
                hax_debug("Already in VMX operation, courtesy of another"
                          " VMM (VirtualBox or macOS Hypervisor Framework)\n");
                fatal = false;
                // Indicate that it is not necessary to call VMXOFF later
                cpu_data->vmm_flag &= ~VMXON_HAX;
            } else {
                // Should never happen
                hax_error("VMXON failed with VMX_FAIL_INVALID, but there is a"
                          " current VMCS at 0x%llx\n", curr_vmcs);
            }
        }
#endif

        if (fatal) {
            hax_error("VMXON failed for region 0x%llx (err=0x%x)\n",
                      hax_page_pa(cpu_data->vmxon_page), (uint32) err);
            restore_host_cr4_vmxe(cpu_data);
            if (err & VMX_FAIL_INVALID) {
                log_vmxon_err_type1 = 1;
            } else {
                // TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
                // be cleared (deactivated and saved to memory) using VMCLEAR
                log_vmxon_err_type2 = 1;
            }
            hax_enable_preemption(flags);
            return VMXON_FAIL;
        }
    }

    if (vcpu)
        ((vmcs_t*)(hax_page_va(vcpu->vmcs_page)))->_revision_id =
                cpu_data->vmx_info._vmcs_revision_id;

    if (vcpu)
        vmcs_phy = vcpu_vmcs_pa(vcpu);
    else
        vmcs_phy = hax_page_pa(cpu_data->vmcs_page);


    if (__vmptrld(vmcs_phy) != VMX_SUCCEED) {
        hax_error("HAX: vmptrld failed (%08llx)\n", vmcs_phy);
        cpu_data->vmm_flag = 0;
        __vmxoff();
        restore_host_cr4_vmxe(cpu_data);
        log_vmxon_err_type3 = 1;
        hax_enable_preemption(flags);
        return VMPTRLD_FAIL;
    }

    if (vcpu) {
        vcpu->is_vmcs_loaded = 1;
        cpu_data->current_vcpu = vcpu;
        vcpu->cpu_id = hax_cpuid();
    }

    cpu_data->other_vmcs = curr_vmcs;
    return VMXON_SUCCESS;
}
Exemple #3
0
Fichier : cpu.c Projet : tisma/haxm
vmx_error_t cpu_vmx_vmptrld(struct per_cpu_data *cpu_data, paddr_t vmcs,
                            struct vcpu_t *vcpu)
{
    vmx_error_t r = __vmptrld(vmcs);
    return r;
}