static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack) { u32 ctrl; /* If blocked by L1's tpr, then nothing to do. */ if ( nestedhvm_vcpu_in_guestmode(v) && hvm_interrupt_blocked(v, intack) == hvm_intblk_tpr ) return 1; if ( nvmx_intr_blocked(v) != hvm_intblk_none ) { vmx_enable_intr_window(v, intack); return 1; } if ( nestedhvm_vcpu_in_guestmode(v) ) { ctrl = get_vvmcs(v, PIN_BASED_VM_EXEC_CONTROL); if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) ) return 0; if ( intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_lapic ) { vmx_inject_extint(intack.vector, intack.source); ctrl = get_vvmcs(v, VM_EXIT_CONTROLS); if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT ) { /* for now, duplicate the ack path in vmx_intr_assist */ hvm_vcpu_ack_pending_irq(v, intack); pt_intr_post(v, intack); intack = hvm_vcpu_has_pending_irq(v); if ( unlikely(intack.source != hvm_intsrc_none) ) vmx_enable_intr_window(v, intack); } else vmx_enable_intr_window(v, intack); return 1; } else if ( intack.source == hvm_intsrc_vector ) { vmx_inject_extint(intack.vector, intack.source); return 1; } } return 0; }
bool_t nestedhvm_is_n2(struct vcpu *v) { if (!nestedhvm_enabled(v->domain) || nestedhvm_vmswitch_in_progress(v) || !nestedhvm_paging_mode_hap(v)) return 0; if (nestedhvm_vcpu_in_guestmode(v)) return 1; return 0; }
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v) { int r = hvm_intblk_none; struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v); if ( nestedhvm_vcpu_in_guestmode(v) ) { if ( nvcpu->nv_vmexit_pending || nvcpu->nv_vmswitch_in_progress || (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) ) r = hvm_intblk_rflags_ie; } else if ( nvcpu->nv_vmentry_pending ) r = hvm_intblk_rflags_ie; return r; }
static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack) { u32 ctrl; if ( nvmx_intr_blocked(v) != hvm_intblk_none ) { enable_intr_window(v, intack); return 1; } if ( nestedhvm_vcpu_in_guestmode(v) ) { if ( intack.source == hvm_intsrc_pic || intack.source == hvm_intsrc_lapic ) { ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, PIN_BASED_VM_EXEC_CONTROL); if ( !(ctrl & PIN_BASED_EXT_INTR_MASK) ) return 0; vmx_inject_extint(intack.vector); ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, VM_EXIT_CONTROLS); if ( ctrl & VM_EXIT_ACK_INTR_ON_EXIT ) { /* for now, duplicate the ack path in vmx_intr_assist */ hvm_vcpu_ack_pending_irq(v, intack); pt_intr_post(v, intack); intack = hvm_vcpu_has_pending_irq(v); if ( unlikely(intack.source != hvm_intsrc_none) ) enable_intr_window(v, intack); } else enable_intr_window(v, intack); return 1; } } return 0; }
void svm_intr_assist(void) { struct vcpu *v = current; struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; struct hvm_intack intack; enum hvm_intblk intblk; /* Crank the handle on interrupt state. */ pt_update_irq(v); do { intack = hvm_vcpu_has_pending_irq(v); if ( likely(intack.source == hvm_intsrc_none) ) return; intblk = hvm_interrupt_blocked(v, intack); if ( intblk == hvm_intblk_svm_gif ) { ASSERT(nestedhvm_enabled(v->domain)); return; } /* Interrupts for the nested guest are already * in the vmcb. */ if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) ) { int rc; /* l2 guest was running when an interrupt for * the l1 guest occured. */ rc = nestedsvm_vcpu_interrupt(v, intack); switch (rc) { case NSVM_INTR_NOTINTERCEPTED: /* Inject interrupt into 2nd level guest directly. */ break; case NSVM_INTR_NOTHANDLED: case NSVM_INTR_FORCEVMEXIT: return; case NSVM_INTR_MASKED: /* Guest already enabled an interrupt window. */ return; default: panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x", __func__, rc); } } /* * Pending IRQs must be delayed if: * 1. An event is already pending. This is despite the fact that SVM * provides a VINTR delivery method quite separate from the EVENTINJ * mechanism. The event delivery can arbitrarily delay the injection * of the vintr (for example, if the exception is handled via an * interrupt gate, hence zeroing RFLAGS.IF). In the meantime: * - the vTPR could be modified upwards, so we need to wait until * the exception is delivered before we can safely decide that an * interrupt is deliverable; and * - the guest might look at the APIC/PIC state, so we ought not to * have cleared the interrupt out of the IRR. * 2. The IRQ is masked. */ if ( unlikely(vmcb->eventinj.fields.v) || intblk ) { svm_enable_intr_window(v, intack); return; } intack = hvm_vcpu_ack_pending_irq(v, intack); } while ( intack.source == hvm_intsrc_none ); if ( intack.source == hvm_intsrc_nmi ) { svm_inject_nmi(v); } else { HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0); svm_inject_extint(v, intack.vector); pt_intr_post(v, intack); } /* Is there another IRQ to queue up behind this one? */ intack = hvm_vcpu_has_pending_irq(v); if ( unlikely(intack.source != hvm_intsrc_none) ) svm_enable_intr_window(v, intack); }