Ejemplo n.º 1
0
/* Recover always follows prepare */
void
recover_if_physical_mode(VCPU *vcpu)
{
    if (!is_virtual_mode(vcpu))
        switch_to_physical_rid(vcpu);
    return;
}
Ejemplo n.º 2
0
void
prepare_if_physical_mode(VCPU *vcpu)
{
    if (!is_virtual_mode(vcpu))
        switch_to_virtual_rid(vcpu);
    return;
}
Ejemplo n.º 3
0
IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
{
    u64 rrval;

    if (unlikely(is_reserved_rr_rid(vcpu, val))) {
        gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
        return IA64_RSVDREG_FAULT;
    }

    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
    switch((u64)(reg>>VRN_SHIFT)) {
    case VRN7:
        if (likely(vcpu == current))
            vmx_switch_rr7(vrrtomrr(vcpu,val),
                           (void *)vcpu->arch.vhpt.hash, pal_vaddr );
       break;
    case VRN4:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr4 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    case VRN0:
        rrval = vrrtomrr(vcpu,val);
        vcpu->arch.metaphysical_saved_rr0 = rrval;
        if (is_virtual_mode(vcpu) && likely(vcpu == current))
            ia64_set_rr(reg,rrval);
        break;
    default:
        if (likely(vcpu == current))
            ia64_set_rr(reg,vrrtomrr(vcpu,val));
        break;
    }

    return (IA64_NO_FAULT);
}