asmlinkage void vmx_intr_assist(void) { struct hvm_intack intack; struct vcpu *v = current; unsigned int tpr_threshold = 0; enum hvm_intblk intblk; /* Block event injection when single step with MTF. */ if ( unlikely(v->arch.hvm_vcpu.single_step) ) { v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vmx.exec_control); return; } /* Crank the handle on interrupt state. */ pt_update_irq(v); hvm_dirq_assist(v); do { intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) goto out; intblk = hvm_interrupt_blocked(v, intack); if ( intblk == hvm_intblk_tpr ) { ASSERT(vlapic_enabled(vcpu_vlapic(v))); ASSERT(intack.source == hvm_intsrc_lapic); tpr_threshold = intack.vector >> 4; goto out; } if ( (intblk != hvm_intblk_none) || (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) ) { enable_intr_window(v, intack); goto out; } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none );
static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack) { u32 ctrl; if ( nvmx_intr_blocked(v) != hvm_intblk_none ) { enable_intr_window(v, intack); return 1; } if ( nestedhvm_vcpu_in_guestmode(v) ) { if ( intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_lapic ) { ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL); if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) ) return 0; vmx_inject_extint(intack.vector); ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS); if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT ) { /* for now, duplicate the ack path in vmx_intr_assist */ hvm_vcpu_ack_pending_irq(v, intack); pt_intr_post(v, intack); intack = hvm_vcpu_has_pending_irq(v); if ( unlikely(intack.source != hvm_intsrc_none) ) enable_intr_window(v, intack); } else enable_intr_window(v, intack); return 1; } } return 0; }
void vmx_intr_assist(void) { struct hvm_intack intack; struct vcpu *v = current; unsigned int tpr_threshold = 0; enum hvm_intblk intblk; int pt_vector = -1; /* Block event injection when single step with MTF. */ if ( unlikely(v->arch.hvm_vcpu.single_step) ) { v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG; vmx_update_cpu_exec_control(v); return; } /* Crank the handle on interrupt state. */ pt_vector = pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) goto out; if ( unlikely(nvmx_intr_intercept(v, intack)) ) goto out; intblk = hvm_interrupt_blocked(v, intack); if ( cpu_has_vmx_virtual_intr_delivery ) { /* Set "Interrupt-window exiting" for ExtINT */ if ( (intblk != hvm_intblk_none) && ( (intack.source == hvm_intsrc_pic) || ( intack.source == hvm_intsrc_vector) ) ) { enable_intr_window(v, intack); goto out; } if ( __vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK ) { if ( (intack.source == hvm_intsrc_pic) || (intack.source == hvm_intsrc_nmi) || (intack.source == hvm_intsrc_mce) ) enable_intr_window(v, intack); goto out; } } else if ( intblk == hvm_intblk_tpr ) { ASSERT(vlapic_enabled(vcpu_vlapic(v))); ASSERT(intack.source == hvm_intsrc_lapic); tpr_threshold = intack.vector >> 4; goto out; } else if ( (intblk != hvm_intblk_none) || (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) ) { enable_intr_window(v, intack); goto out; } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none );
void svm_intr_assist(void) { struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct hvm_intack intack; enum hvm_intblk intblk; /* Crank the handle on interrupt state. */ pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) return; intblk = hvm_interrupt_blocked(v, intack); if ( intblk == hvm_intblk_svm_gif ) { ASSERT(nestedhvm_enabled(v->domain)); return; } /* Interrupts for the nested guest are already * in the vmcb. */ if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) { int rc; /* l2 guest was running when an interrupt for * the l1 guest occured. */ rc = nestedsvm_vcpu_interrupt(v, intack); switch (rc) { case NSVM_INTR_NOTINTERCEPTED: /* Inject interrupt into 2nd level guest directly. */ break; case NSVM_INTR_NOTHANDLED: case NSVM_INTR_FORCEVMEXIT: return; case NSVM_INTR_MASKED: /* Guest already enabled an interrupt window. */ return; default: panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x", __func__, rc); } } /* * Pending IRQs must be delayed if: * 1. An event is already pending. This is despite the fact that SVM * provides a VINTR delivery method quite separate from the EVENTINJ * mechanism. The event delivery can arbitrarily delay the injection * of the vintr (for example, if the exception is handled via an * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: * - the vTPR could be modified upwards, so we need to wait until * the exception is delivered before we can safely decide that an * interrupt is deliverable; and * - the guest might look at the APIC/PIC state, so we ought not to * have cleared the interrupt out of the IRR. * 2. The IRQ is masked. */ if ( unlikely(vmcb->eventinj.fields.v) || intblk ) { svm_enable_intr_window(v, intack); return; } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none ); if ( intack.source == hvm_intsrc_nmi ) { svm_inject_nmi(v); } else { HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); svm_inject_extint(v, intack.vector); pt_intr_post(v, intack); } /* Is there another IRQ to queue up behind this one? */ intack = hvm_vcpu_has_pending_irq(v); if ( unlikely(intack.source != hvm_intsrc_none) ) svm_enable_intr_window(v, intack); }