static void enable_intr_window(struct vcpu *v, struct hvm_intack intack) { u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING; ASSERT(intack.source != hvm_intsrc_none); if ( unlikely(tb_init_done) ) { unsigned int intr = __vmread(VM_ENTRY_INTR_INFO); HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source, (intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1); } if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi ) { /* * We set MOV-SS blocking in lieu of STI blocking when delivering an * NMI. This is because it is processor-specific whether STI-blocking * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery * (otherwise vmentry will fail on processors that check for STI- * blocking) but if the processor does not check for STI-blocking then * we may immediately vmexit and hance make no progress! * (see SDM 3B 21.3, "Other Causes of VM Exits"). */ u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO); if ( intr_shadow & VMX_INTR_SHADOW_STI ) { /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */ intr_shadow &= ~VMX_INTR_SHADOW_STI; intr_shadow |= VMX_INTR_SHADOW_MOV_SS; __vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow); } ctl = CPU_BASED_VIRTUAL_NMI_PENDING; } if ( !(v->arch.hvm_vmx.exec_control & ctl) ) { v->arch.hvm_vmx.exec_control |= ctl; vmx_update_cpu_exec_control(v); } }
asmlinkage void vmx_intr_assist(void) { struct hvm_intack intack; struct vcpu *v = current; unsigned int tpr_threshold = 0; enum hvm_intblk intblk; /* Block event injection when single step with MTF. */ if ( unlikely(v->arch.hvm_vcpu.single_step) ) { v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; vmx_update_cpu_exec_control(v); return; } /* Crank the handle on interrupt state. */ pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) goto out; intblk = hvm_interrupt_blocked(v, intack); if ( intblk == hvm_intblk_tpr ) { ASSERT(vlapic_enabled(vcpu_vlapic(v))); ASSERT(intack.source == hvm_intsrc_lapic); tpr_threshold = intack.vector >> 4; goto out; } if ( (intblk != hvm_intblk_none) || (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) ) { enable_intr_window(v, intack); goto out; } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none );
void vmx_intr_assist(void) { struct hvm_intack intack; struct vcpu *v = current; unsigned int tpr_threshold = 0; enum hvm_intblk intblk; int pt_vector = -1; /* Block event injection when single step with MTF. */ if ( unlikely(v->arch.hvm_vcpu.single_step) ) { v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; vmx_update_cpu_exec_control(v); return; } /* Crank the handle on interrupt state. */ if ( is_hvm_vcpu(v) ) pt_vector = pt_update_irq(v); do { unsigned long intr_info; intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) goto out; if ( unlikely(nvmx_intr_intercept(v, intack)) ) goto out; intblk = hvm_interrupt_blocked(v, intack); if ( cpu_has_vmx_virtual_intr_delivery ) { /* Set "Interrupt-window exiting" for ExtINT and NMI. */ if ( (intblk != hvm_intblk_none) && (intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_vector || intack.source == hvm_intsrc_nmi) ) { vmx_enable_intr_window(v, intack); goto out; } __vmread(VM_ENTRY_INTR_INFO, &intr_info); if ( intr_info & INTR_INFO_VALID_MASK ) { if ( (intack.source == hvm_intsrc_pic) || (intack.source == hvm_intsrc_nmi) || (intack.source == hvm_intsrc_mce) ) vmx_enable_intr_window(v, intack); goto out; } } else if ( intblk == hvm_intblk_tpr ) { ASSERT(vlapic_enabled(vcpu_vlapic(v))); ASSERT(intack.source == hvm_intsrc_lapic); tpr_threshold = intack.vector >> 4; goto out; } else if ( intblk != hvm_intblk_none ) { vmx_enable_intr_window(v, intack); goto out; } else { __vmread(VM_ENTRY_INTR_INFO, &intr_info); if ( intr_info & INTR_INFO_VALID_MASK ) { vmx_enable_intr_window(v, intack); goto out; } } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none );