void vmm_scheduler_irq_exit(arch_regs_t *regs) { struct vmm_scheduler_ctrl *schedp = &this_cpu(sched); struct vmm_vcpu *vcpu = NULL; /* Determine current vcpu */ vcpu = schedp->current_vcpu; if (!vcpu) { return; } /* If current vcpu is not RUNNING or yield on exit is set * then context switch */ if ((vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_RUNNING) || schedp->yield_on_irq_exit) { vmm_scheduler_next(schedp, &schedp->ev, schedp->irq_regs); schedp->yield_on_irq_exit = FALSE; } /* VCPU irq processing */ vmm_vcpu_irq_process(vcpu, regs); /* Indicate that we have exited IRQ */ schedp->irq_context = FALSE; /* Clear pointer to IRQ registers */ schedp->irq_regs = NULL; }
void vmm_scheduler_irq_exit(vmm_user_regs_t * regs) { vmm_vcpu_t * vcpu = NULL; /* Determine current vcpu */ vcpu = sched.current_vcpu; if (!vcpu) { return; } /* Schedule next vcpu if state of * current vcpu is not RUNNING */ if (vcpu->state != VMM_VCPU_STATE_RUNNING) { vmm_scheduler_next(sched.ev, regs); return; } /* VCPU irq processing */ vmm_vcpu_irq_process(regs); /* Indicate that we have exited IRQ */ sched.irq_context = FALSE; }