void vmm_scheduler_timer_event(vmm_timer_event_t * ev) { vmm_vcpu_t * vcpu = sched.current_vcpu; if (vcpu) { if (!vcpu->preempt_count) { vmm_scheduler_next(ev, ev->cpu_regs); } else { vmm_timer_event_restart(ev); } } else { vmm_scheduler_next(ev, ev->cpu_regs); } }
void vmm_scheduler_irq_exit(arch_regs_t *regs) { struct vmm_scheduler_ctrl *schedp = &this_cpu(sched); struct vmm_vcpu *vcpu = NULL; /* Determine current vcpu */ vcpu = schedp->current_vcpu; if (!vcpu) { return; } /* If current vcpu is not RUNNING or yield on exit is set * then context switch */ if ((vmm_manager_vcpu_get_state(vcpu) != VMM_VCPU_STATE_RUNNING) || schedp->yield_on_irq_exit) { vmm_scheduler_next(schedp, &schedp->ev, schedp->irq_regs); schedp->yield_on_irq_exit = FALSE; } /* VCPU irq processing */ vmm_vcpu_irq_process(vcpu, regs); /* Indicate that we have exited IRQ */ schedp->irq_context = FALSE; /* Clear pointer to IRQ registers */ schedp->irq_regs = NULL; }
static void vmm_scheduler_switch(struct vmm_scheduler_ctrl *schedp, arch_regs_t *regs) { struct vmm_vcpu *vcpu = schedp->current_vcpu; if (!regs) { /* This should never happen !!! */ vmm_panic("%s: null pointer to regs.\n", __func__); } if (vcpu) { if (!vcpu->preempt_count) { vmm_scheduler_next(schedp, &schedp->ev, regs); } else { vmm_timer_event_restart(&schedp->ev); } } else { vmm_scheduler_next(schedp, &schedp->ev, regs); } }
void vmm_scheduler_irq_exit(vmm_user_regs_t * regs) { vmm_vcpu_t * vcpu = NULL; /* Determine current vcpu */ vcpu = sched.current_vcpu; if (!vcpu) { return; } /* Schedule next vcpu if state of * current vcpu is not RUNNING */ if (vcpu->state != VMM_VCPU_STATE_RUNNING) { vmm_scheduler_next(sched.ev, regs); return; } /* VCPU irq processing */ vmm_vcpu_irq_process(regs); /* Indicate that we have exited IRQ */ sched.irq_context = FALSE; }