Exemplo n.º 1
0
bool
svm_available (void)
{
	u32 a, b, c, d;
	u64 tmp;

	asm_cpuid (CPUID_EXT_0, 0, &a, &b, &c, &d);
	if (a < CPUID_EXT_1) {
		printf ("SVM is not available.\n");
		return false;
	}
	asm_cpuid (CPUID_EXT_1, 0, &a, &b, &c, &d);
	if (!(c & CPUID_EXT_1_ECX_SVM_BIT)) {
		printf ("SVM is not available.\n");
		return false;
	}
	asm_rdmsr64 (MSR_AMD_VM_CR, &tmp);
	if (!(tmp & MSR_AMD_VM_CR_SVMDIS_BIT))
		return true;	/* SVM is allowed */
	asm_cpuid (CPUID_EXT_0, 0, &a, &b, &c, &d);
	if (a < CPUID_EXT_A) {
		printf ("SVM is disabled.\n");
		return false;
	}
	asm_cpuid (CPUID_EXT_A, 0, &a, &b, &c, &d);
	if (!(d & CPUID_EXT_A_EDX_SVM_LOCK_BIT)) {
		printf ("SVM is disabled at BIOS.\n");
		return false;
	} else {
		printf ("SVM is disabled with a key.\n");
		return false;
	}
}
Exemplo n.º 2
0
static bool
msr_pass_read_msr (u32 msrindex, u64 *msrdata)
{
	int num;
	struct msrarg m;

	switch (msrindex) {
	case MSR_IA32_TSC_ADJUST:
		*msrdata = current->tsc_offset;
		break;
	case MSR_IA32_TIME_STAMP_COUNTER:
		asm_rdmsr64 (MSR_IA32_TIME_STAMP_COUNTER, msrdata);
		*msrdata += current->tsc_offset;
		break;
	default:
		m.msrindex = msrindex;
		m.msrdata = msrdata;
		num = callfunc_and_getint (do_read_msr_sub, &m);
		switch (num) {
		case -1:
			break;
		case EXCEPTION_GP:
			return true;
		default:
			panic ("msr_pass_read_msr: exception %d", num);
		}
	}
	return false;
}
Exemplo n.º 3
0
static asmlinkage void
do_read_msr_sub (void *arg)
{
	struct msrarg *p;

	p = arg;
	asm_rdmsr64 (p->msrindex, p->msrdata);
}
Exemplo n.º 4
0
static u64
get_ia32_bios_sign_id (void)
{
	u64 rev;
	u32 a, b, c, d;

	asm_wrmsr64 (MSR_IA32_BIOS_SIGN_ID, 0);
	asm_cpuid (1, 0, &a, &b, &c, &d);
	asm_rdmsr64 (MSR_IA32_BIOS_SIGN_ID, &rev);
	return rev;
}
Exemplo n.º 5
0
static void
svm_vmcb_init (void)
{
	struct vmcb *p;

	alloc_page ((void **)&current->u.svm.vi.vmcb,
		    &current->u.svm.vi.vmcb_phys);
	alloc_pages (&current->u.svm.io.iobmp,
		     &current->u.svm.io.iobmp_phys, 3);
	alloc_pages (&current->u.svm.msr.msrbmp,
		     &current->u.svm.msr.msrbmp_phys, 2);
	memset (current->u.svm.io.iobmp, 0xFF, PAGESIZE * 3);
	memset (current->u.svm.msr.msrbmp, 0xFF, PAGESIZE * 2);
	p = current->u.svm.vi.vmcb;
	memset (p, 0, PAGESIZE);
	p->intercept_read_cr = ~4;
	p->intercept_write_cr = ~4;
	p->intercept_exception = 0x4000;
	p->intercept_intr = 1;
	p->intercept_nmi = 1;
	p->intercept_init = 1;	/* FIXME */
	p->intercept_invlpg = 1;
	p->intercept_invlpga = 1;
	p->intercept_ioio_prot = 1;
	p->intercept_msr_prot = 1;
	p->intercept_task_switches = 1;
	p->intercept_shutdown = 1;
	p->intercept_vmrun = 1;
	p->intercept_vmmcall = 1;
	p->iopm_base_pa = current->u.svm.io.iobmp_phys;
	p->msrpm_base_pa = current->u.svm.msr.msrbmp_phys;
	p->guest_asid = 1;	/* FIXME */
	p->tlb_control = VMCB_TLB_CONTROL_FLUSH_TLB;
	svm_seg_reset (p);
	p->cpl = 0;
	p->efer = MSR_IA32_EFER_SVME_BIT;
	p->cr0 = CR0_PG_BIT;
	p->rflags = RFLAGS_ALWAYS1_BIT;
	if (false && svm_nested_paging_available ()) {
		/* FIXME: Nested paging */
		p->np_enable = 1;
		p->intercept_invlpg = 0;
		p->intercept_exception &= ~0x4000;
		p->intercept_read_cr &= ~8;
		p->intercept_write_cr &= ~8;
		asm_rdmsr64 (0x277, &p->g_pat);
	}
}
Exemplo n.º 6
0
static bool
msr_pass_write_msr (u32 msrindex, u64 msrdata)
{
	u64 tmp;
	int num;
	struct msrarg m;

	/* FIXME: Exception handling */
	switch (msrindex) {
	case MSR_IA32_BIOS_UPDT_TRIG:
		return ia32_bios_updt (msrdata);
	case MSR_IA32_TSC_ADJUST:
		current->tsc_offset = msrdata;
		current->vmctl.tsc_offset_changed ();
		break;
	case MSR_IA32_TIME_STAMP_COUNTER:
		asm_rdmsr64 (MSR_IA32_TIME_STAMP_COUNTER, &tmp);
		current->tsc_offset = msrdata - tmp;
		current->vmctl.tsc_offset_changed ();
		break;
	case MSR_IA32_APIC_BASE_MSR:
		if (msrdata & MSR_IA32_APIC_BASE_MSR_APIC_GLOBAL_ENABLE_BIT) {
			tmp = msrdata & MSR_IA32_APIC_BASE_MSR_APIC_BASE_MASK;
			if (phys_in_vmm (tmp))
				panic ("relocating APIC Base to VMM address!");
		}
		localapic_change_base_msr (msrdata);
		goto pass;
	case MSR_IA32_X2APIC_ICR:
		localapic_x2apic_icr (msrdata);
		goto pass;
	default:
	pass:
		m.msrindex = msrindex;
		m.msrdata = &msrdata;
		num = callfunc_and_getint (do_write_msr_sub, &m);
		switch (num) {
		case -1:
			break;
		case EXCEPTION_GP:
			return true;
		default:
			panic ("msr_pass_write_msr: exception %d", num);
		}
	}
	return false;
}
Exemplo n.º 7
0
void
svm_init (void)
{
	u64 p;
	u64 tmp;
	void *v;
	ulong efer;

	asm_rdmsr (MSR_IA32_EFER, &efer);
	efer |= MSR_IA32_EFER_SVME_BIT;
	asm_wrmsr (MSR_IA32_EFER, efer);
	asm_rdmsr64 (MSR_AMD_VM_CR, &tmp);
	tmp |= MSR_AMD_VM_CR_DIS_A20M_BIT;
	asm_wrmsr64 (MSR_AMD_VM_CR, tmp);
	/* FIXME: size of a host state area is undocumented */
	alloc_page (&v, &p);
	currentcpu->svm.hsave = v;
	currentcpu->svm.hsave_phys = p;
	asm_wrmsr64 (MSR_AMD_VM_HSAVE_PA, p);
	alloc_page (&v, &p);
	memset (v, 0, PAGESIZE);
	currentcpu->svm.vmcbhost = v;
	currentcpu->svm.vmcbhost_phys = p;
}
Exemplo n.º 8
0
void
vt_paging_pg_change (void)
{
	ulong tmp;
	u64 tmp64;
	bool ept_enable, use_spt;
	ulong cr3;

	ept_enable = ept_enabled ();
	use_spt = !ept_enable;
#ifdef CPU_MMU_SPT_DISABLE
	if (current->u.vt.vr.pg) {
		ulong rflags;
		ulong acr;

		/* If both EPT and "unrestricted guest" were enabled,
		 * the CS could be a data segment.  But
		 * CPU_MMU_SPT_DISABLE disables EPT while the guest
		 * enables paging.  So if the CS is a data segment
		 * here, make it a code segment. */
		if (!ept_enable || !current->u.vt.unrestricted_guest)
			goto cs_is_ok;
		asm_vmread (VMCS_GUEST_CS_ACCESS_RIGHTS, &acr);
		if ((acr & 0xF) != SEGDESC_TYPE_RDWR_DATA_A)
			goto cs_is_ok;
		/* The CS can be a data segment in virtual 8086
		 * mode. */
		asm_vmread (VMCS_GUEST_RFLAGS, &rflags);
		if (rflags & RFLAGS_VM_BIT)
			goto cs_is_ok;
		asm_vmwrite (VMCS_GUEST_CS_ACCESS_RIGHTS,
			     (acr & ~0xF) | SEGDESC_TYPE_EXECREAD_CODE_A);
	cs_is_ok:
		ept_enable = false;
		use_spt = false;
	}
#endif
	if (current->u.vt.ept) {
		asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL2, &tmp);
		tmp &= ~(VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT |
			 VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT);
		tmp |= ept_enable ?
			VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_EPT_BIT |
			(current->u.vt.unrestricted_guest ?
			 VMCS_PROC_BASED_VMEXEC_CTL2_UNRESTRICTED_GUEST_BIT :
			 0) : 0;
		tmp |= current->u.vt.unrestricted_guest &&
			current->u.vt.pcid_available &&
			current->u.vt.enable_invpcid_available ?
			VMCS_PROC_BASED_VMEXEC_CTL2_ENABLE_INVPCID_BIT : 0;
		asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL2, tmp);
		asm_vmread (VMCS_VMEXIT_CTL, &tmp);
		if (ept_enable)
			tmp |= (VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT |
				VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT);
		else
			tmp &= ~(VMCS_VMEXIT_CTL_SAVE_IA32_PAT_BIT |
				 VMCS_VMEXIT_CTL_LOAD_IA32_PAT_BIT);
		asm_vmwrite (VMCS_VMEXIT_CTL, tmp);
		asm_vmread (VMCS_VMENTRY_CTL, &tmp);
		if (ept_enable)
			tmp |= VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT;
		else
			tmp &= ~VMCS_VMENTRY_CTL_LOAD_IA32_PAT_BIT;
		asm_vmwrite (VMCS_VMENTRY_CTL, tmp);
		if (ept_enable) {
			asm_rdmsr64 (MSR_IA32_PAT, &tmp64);
			asm_vmwrite64 (VMCS_HOST_IA32_PAT, tmp64);
			cache_get_gpat (&tmp64);
			asm_vmwrite64 (VMCS_GUEST_IA32_PAT, tmp64);
		}
	}
	asm_vmread (VMCS_PROC_BASED_VMEXEC_CTL, &tmp);
	if (use_spt)
		tmp |= VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT;
	else
		tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_INVLPGEXIT_BIT;
	if (current->u.vt.cr3exit_controllable) {
		if (use_spt && current->u.vt.cr3exit_off) {
			cr3 = vt_read_cr3 ();
			tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT;
			tmp |= VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT;
			current->u.vt.cr3exit_off = false;
			vt_write_cr3 (cr3);
		} else if (!use_spt && !current->u.vt.cr3exit_off) {
			cr3 = vt_read_cr3 ();
			tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3LOADEXIT_BIT;
			tmp &= ~VMCS_PROC_BASED_VMEXEC_CTL_CR3STOREEXIT_BIT;
			current->u.vt.cr3exit_off = true;
			vt_write_cr3 (cr3);
		}
	}
	asm_vmwrite (VMCS_PROC_BASED_VMEXEC_CTL, tmp);
	tmp = vt_read_cr0 ();
	asm_vmwrite (VMCS_GUEST_CR0, vt_paging_apply_fixed_cr0 (tmp));
	if (use_spt)
		asm_vmwrite (VMCS_GUEST_CR3, current->u.vt.spt_cr3);
	else
		vt_update_vmcs_guest_cr3 ();
	tmp = vt_read_cr4 ();
	asm_vmwrite (VMCS_GUEST_CR4, vt_paging_apply_fixed_cr4 (tmp));
	current->u.vt.handle_pagefault = use_spt;
	vt_update_exception_bmp ();
	if (ept_enable)
		vt_ept_clear_all ();
}