void
vmx_init_all_rr(VCPU *vcpu)
{
	// enable vhpt in guest physical mode
	vcpu->arch.metaphysical_rid_dt |= 1;

	VMX(vcpu, vrr[VRN0]) = 0x38;
	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38);
	VMX(vcpu, vrr[VRN1]) = 0x38;
	VMX(vcpu, vrr[VRN2]) = 0x38;
	VMX(vcpu, vrr[VRN3]) = 0x38;
	VMX(vcpu, vrr[VRN4]) = 0x38;
	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38);
	VMX(vcpu, vrr[VRN5]) = 0x38;
	VMX(vcpu, vrr[VRN6]) = 0x38;
	VMX(vcpu, vrr[VRN7]) = 0x738;
}
示例#2
0
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
{
    u64 rrval;

    if (unlikely(is_reserved_rr_rid(vcpu, val))) {
        gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
        return IA64_RSVDREG_FAULT;
    }

    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
    switch((u64)(reg>>VRN_SHIFT)) {
    case VRN7:
        if (likely(vcpu == current))
            vmx_switch_rr7(vrrtomrr(vcpu,val),
                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
       break;
    case VRN4:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr4 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    case VRN0:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr0 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    default:
        if (likely(vcpu == current))
            ia64_set_rr(reg,vrrtomrr(vcpu,val));
        break;
    }

    return (IA64_NO_FAULT);
}
void
vmx_load_all_rr(VCPU *vcpu)
{
	unsigned long rr0, rr4;

	switch (vcpu->arch.arch_vmx.mmu_mode) {
	case VMX_MMU_VIRTUAL:
		rr0 = vcpu->arch.metaphysical_saved_rr0;
		rr4 = vcpu->arch.metaphysical_saved_rr4;
		break;
	case VMX_MMU_PHY_DT:
		rr0 = vcpu->arch.metaphysical_rid_dt;
		rr4 = vcpu->arch.metaphysical_rid_dt;
		break;
	case VMX_MMU_PHY_D:
		rr0 = vcpu->arch.metaphysical_rid_d;
		rr4 = vcpu->arch.metaphysical_rid_d;
		break;
	default:
		panic_domain(NULL, "bad mmu mode value");
	}

	ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
	ia64_dv_serialize_data();
	vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
	ia64_set_pta(VMX(vcpu, mpta));
	vmx_ia64_set_dcr(vcpu);

	ia64_srlz_d();
}