Beispiel #1
0
/* -----------------------------------------------------------------------------
   vmx_on()
	Enter VMX root operation on this CPU.
   -------------------------------------------------------------------------- */
static void
vmx_on(void)
{
	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
	addr64_t vmxon_region_paddr;
	int result;

	vmx_init();
	
	assert(cpu->specs.vmx_present);

	if (NULL == cpu->vmxon_region)
		panic("vmx_on: VMXON region not allocated");
	vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);

	/*
	 * Enable VMX operation.
	 */
	set_cr4(get_cr4() | CR4_VMXE);
	
	assert(vmx_is_cr0_valid(&cpu->specs));
	assert(vmx_is_cr4_valid(&cpu->specs));
	
	if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) {
		panic("vmx_on: unexpected return %d from __vmxon()", result);
	}
}
Beispiel #2
0
int vmx_cpu_up(void)
{
    u32 eax, edx;
    int bios_locked, cpu = smp_processor_id();
    u64 cr0, vmx_cr0_fixed0, vmx_cr0_fixed1;

    BUG_ON(!(read_cr4() & X86_CR4_VMXE));

    /* 
     * Ensure the current processor operating mode meets 
     * the requred CRO fixed bits in VMX operation. 
     */
    cr0 = read_cr0();
    rdmsrl(MSR_IA32_VMX_CR0_FIXED0, vmx_cr0_fixed0);
    rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx_cr0_fixed1);
    if ( (~cr0 & vmx_cr0_fixed0) || (cr0 & ~vmx_cr0_fixed1) )
    {
        printk("CPU%d: some settings of host CR0 are " 
               "not allowed in VMX operation.\n", cpu);
        return 0;
    }

    rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);

    bios_locked = !!(eax & IA32_FEATURE_CONTROL_MSR_LOCK);
    if ( bios_locked )
    {
        if ( !(eax & (IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX |
                      IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
        {
            printk("CPU%d: VMX disabled by BIOS.\n", cpu);
            return 0;
        }
    }
    else
    {
        eax  = IA32_FEATURE_CONTROL_MSR_LOCK;
        eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX;
        if ( test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) )
            eax |= IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX;
        wrmsr(IA32_FEATURE_CONTROL_MSR, eax, 0);
    }

    vmx_init_vmcs_config();

    INIT_LIST_HEAD(&this_cpu(active_vmcs_list));

    if ( this_cpu(host_vmcs) == NULL )
    {
        this_cpu(host_vmcs) = vmx_alloc_vmcs();
        if ( this_cpu(host_vmcs) == NULL )
        {
            printk("CPU%d: Could not allocate host VMCS\n", cpu);
            return 0;
        }
    }

    switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
    {
    case -2: /* #UD or #GP */
        if ( bios_locked &&
             test_bit(X86_FEATURE_SMXE, &boot_cpu_data.x86_capability) &&
             (!(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_OUTSIDE_SMX) ||
              !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON_INSIDE_SMX)) )
        {
            printk("CPU%d: VMXON failed: perhaps because of TXT settings "
                   "in your BIOS configuration?\n", cpu);
            printk(" --> Disable TXT in your BIOS unless using a secure "
                   "bootloader.\n");
            return 0;
        }
        /* fall through */
    case -1: /* CF==1 or ZF==1 */
        printk("CPU%d: unexpected VMXON failure\n", cpu);
        return 0;
    case 0: /* success */
        break;
    default:
        BUG();
    }

    return 1;
}
Beispiel #3
0
Datei: cpu.c Projekt: tisma/haxm
uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
{
    struct per_cpu_data *cpu_data;
    paddr_t vmcs_phy;
    paddr_t curr_vmcs = VMCS_NONE;
    vmx_error_t err = 0;
    uint64 fc_msr;

    hax_disable_preemption(flags);

    /* when wake up from sleep, we need the barrier, as vm operation
     * are not serialized instructions.
     */
    smp_mb();

    cpu_data = current_cpu_data();

    if (vcpu && is_vmcs_loaded(vcpu)) {
        cpu_data->nested++;
        return 0;
    }

    cpu_data->host_cr4_vmxe = (get_cr4() & CR4_VMXE);
    if(cpu_data->host_cr4_vmxe) {
        if (debug_vmcs_count % 100000 == 0) {
            hax_debug("host VT has enabled!\n");
            hax_debug("Cr4 value = 0x%lx\n", get_cr4());
            log_host_cr4_vmxe = 1;
            log_host_cr4 = get_cr4();
        }
        debug_vmcs_count++;
    }
    set_cr4(get_cr4() | CR4_VMXE);
    /* HP systems & Mac systems workaround
      * When resuming from S3, some HP/Mac set the IA32_FEATURE_CONTROL MSR to
      * zero. Setting the lock bit to zero & then doing 'vmxon' would cause a GP.
      * As a workaround, when we see this condition, we enable the bits so that
      * we can launch vmxon & thereby hax.
      * bit 0 - Lock bit
      * bit 2 - Enable VMX outside SMX operation
      *
      * ********* To Do **************************************
      * This is the workground to fix BSOD when resume from S3
      * The best way is to add one power management handler, and set
      * IA32_FEATURE_CONTROL MSR in that PM S3 handler
      * *****************************************************
      */
    fc_msr = ia32_rdmsr(IA32_FEATURE_CONTROL);
    if (!(fc_msr & FC_LOCKED))
        ia32_wrmsr(IA32_FEATURE_CONTROL,
                   fc_msr | FC_LOCKED | FC_VMXON_OUTSMX);

    err = __vmxon(hax_page_pa(cpu_data->vmxon_page));

    log_vmxon_err = err;
    log_vmxon_addr = hax_page_pa(cpu_data->vmxon_page);

    if (!(err & VMX_FAIL_MASK))
        cpu_data->vmm_flag |= VMXON_HAX;
    else {
        bool fatal = true;

#ifdef __MACH__
        if ((err & VMX_FAIL_INVALID) && cpu_data->host_cr4_vmxe) {
            // On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
            // was already set, it is very likely that another VMM (VirtualBox
            // or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
            // running and did not call VMXOFF. In that case, the current host
            // logical processor is already in VMX operation, and we can use an
            // innocuous VMX instruction (VMPTRST) to confirm that.
            // However, if the above assumption is wrong and the host processor
            // is not actually in VMX operation, VMPTRST will probably cause a
            // host reboot. But we don't have a better choice, and it is worth
            // taking the risk.
            curr_vmcs = __vmptrst();
            if (curr_vmcs == VMCS_NONE) {
                hax_debug("Already in VMX operation, courtesy of another"
                          " VMM (VirtualBox or macOS Hypervisor Framework)\n");
                fatal = false;
                // Indicate that it is not necessary to call VMXOFF later
                cpu_data->vmm_flag &= ~VMXON_HAX;
            } else {
                // Should never happen
                hax_error("VMXON failed with VMX_FAIL_INVALID, but there is a"
                          " current VMCS at 0x%llx\n", curr_vmcs);
            }
        }
#endif

        if (fatal) {
            hax_error("VMXON failed for region 0x%llx (err=0x%x)\n",
                      hax_page_pa(cpu_data->vmxon_page), (uint32) err);
            restore_host_cr4_vmxe(cpu_data);
            if (err & VMX_FAIL_INVALID) {
                log_vmxon_err_type1 = 1;
            } else {
                // TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
                // be cleared (deactivated and saved to memory) using VMCLEAR
                log_vmxon_err_type2 = 1;
            }
            hax_enable_preemption(flags);
            return VMXON_FAIL;
        }
    }

    if (vcpu)
        ((vmcs_t*)(hax_page_va(vcpu->vmcs_page)))->_revision_id =
                cpu_data->vmx_info._vmcs_revision_id;

    if (vcpu)
        vmcs_phy = vcpu_vmcs_pa(vcpu);
    else
        vmcs_phy = hax_page_pa(cpu_data->vmcs_page);


    if (__vmptrld(vmcs_phy) != VMX_SUCCEED) {
        hax_error("HAX: vmptrld failed (%08llx)\n", vmcs_phy);
        cpu_data->vmm_flag = 0;
        __vmxoff();
        restore_host_cr4_vmxe(cpu_data);
        log_vmxon_err_type3 = 1;
        hax_enable_preemption(flags);
        return VMPTRLD_FAIL;
    }

    if (vcpu) {
        vcpu->is_vmcs_loaded = 1;
        cpu_data->current_vcpu = vcpu;
        vcpu->cpu_id = hax_cpuid();
    }

    cpu_data->other_vmcs = curr_vmcs;
    return VMXON_SUCCESS;
}