static int hax_vcpu_interrupt(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); struct hax_vcpu_state *vcpu = cpu->hax_vcpu; struct hax_tunnel *ht = vcpu->tunnel; /* * Try to inject an interrupt if the guest can accept it * Unlike KVM, HAX kernel check for the eflags, instead of qemu */ if (ht->ready_for_interrupt_injection && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { int irq; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { hax_inject_interrupt(env, irq); cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; } } /* If we have an interrupt but the guest is not ready to receive an * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { ht->request_interrupt_window = 1; } else { ht->request_interrupt_window = 0; } return 0; }
int process_interrupt(int interrupt_request, CPUState *env) { #if defined(TARGET_I386) if (interrupt_request & CPU_INTERRUPT_INIT) { svm_check_intercept(env, SVM_EXIT_INIT); do_cpu_init(env); env->exception_index = EXCP_HALTED; cpu_loop_exit(env); } else if (interrupt_request & CPU_INTERRUPT_SIPI) { do_cpu_sipi(env); } else if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { svm_check_intercept(env, SVM_EXIT_SMI); env->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(env); return 1; } else if ((interrupt_request & CPU_INTERRUPT_NMI) && !(env->hflags2 & HF2_NMI_MASK)) { env->interrupt_request &= ~CPU_INTERRUPT_NMI; env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); return 1; } else if (interrupt_request & CPU_INTERRUPT_MCE) { env->interrupt_request &= ~CPU_INTERRUPT_MCE; do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); return 1; } else if ((interrupt_request & CPU_INTERRUPT_HARD) && (((env->hflags2 & HF2_VINTR_MASK) && (env->hflags2 & HF2_HIF_MASK)) || (!(env->hflags2 & HF2_VINTR_MASK) && (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { int intno; svm_check_intercept(env, SVM_EXIT_INTR); env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); do_interrupt_x86_hardirq(env, intno, 1); /* ensure that no TB jump will be modified as the program flow was changed */ return 1; } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; /* FIXME: this should respect TPR */ svm_check_intercept(env, SVM_EXIT_VINTR); intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); do_interrupt_x86_hardirq(env, intno, 1); env->interrupt_request &= ~CPU_INTERRUPT_VIRQ; return 1; } }
static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); int irq; uint8_t tpr; WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0}; UINT32 reg_count = 0; WHV_REGISTER_VALUE reg_values[3] = {0}; WHV_REGISTER_NAME reg_names[3]; qemu_mutex_lock_iothread(); /* Inject NMI */ if (!vcpu->interrupt_in_flight.InterruptionPending && cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; vcpu->interruptable = false; new_int.InterruptionType = WHvX64PendingNmi; new_int.InterruptionPending = 1; new_int.InterruptionVector = 2; } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; } } /* * Force the VCPU out of its inner loop to process any INIT requests or * commit pending TPR access. */ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { cpu->exit_request = 1; } if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { cpu->exit_request = 1; } } /* Get pending hard interruption or replay one that was overwritten */ if (!vcpu->interrupt_in_flight.InterruptionPending && vcpu->interruptable && (env->eflags & IF_MASK)) { assert(!new_int.InterruptionPending); if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { new_int.InterruptionType = WHvX64PendingInterrupt; new_int.InterruptionPending = 1; new_int.InterruptionVector = irq; } } } /* Setup interrupt state if new one was prepared */ if (new_int.InterruptionPending) { reg_values[reg_count].PendingInterruption = new_int; reg_names[reg_count] = WHvRegisterPendingInterruption; reg_count += 1; } /* Sync the TPR to the CR8 if was modified during the intercept */ tpr = cpu_get_apic_tpr(x86_cpu->apic_state); if (tpr != vcpu->tpr) { vcpu->tpr = tpr; reg_values[reg_count].Reg64 = tpr; cpu->exit_request = 1; reg_names[reg_count] = WHvX64RegisterCr8; reg_count += 1; } /* Update the state of the interrupt delivery notification */ if (!vcpu->window_registered && cpu->interrupt_request & CPU_INTERRUPT_HARD) { reg_values[reg_count].DeliverabilityNotifications.InterruptNotification = 1; vcpu->window_registered = 1; reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications; reg_count += 1; } qemu_mutex_unlock_iothread(); if (reg_count) { hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, reg_names, reg_count, reg_values); if (FAILED(hr)) { error_report("WHPX: Failed to set interrupt state registers," " hr=%08lx", hr); } } return; }