void vmm_vcpu_irq_process(struct vmm_vcpu *vcpu, arch_regs_t *regs) { /* For non-normal vcpu dont do anything */ if (!vcpu || !vcpu->is_normal) { return; } /* If vcpu is not in interruptible state then dont do anything */ if (!(vmm_manager_vcpu_get_state(vcpu) & VMM_VCPU_STATE_INTERRUPTIBLE)) { return; } /* Proceed only if we have pending execute */ if (arch_atomic_dec_if_positive(&vcpu->irqs.execute_pending) >= 0) { int irq_no = -1; u32 i, tmp_prio, irq_count = vcpu->irqs.irq_count; u32 irq_prio = 0; /* Find the irq number to process */ for (i = 0; i < irq_count; i++) { if (arch_atomic_read(&vcpu->irqs.irq[i].assert) == ASSERTED) { tmp_prio = arch_vcpu_irq_priority(vcpu, i); if (tmp_prio > irq_prio) { irq_no = i; irq_prio = tmp_prio; } } } if (irq_no == -1) { return; } /* If irq number found then execute it */ if (arch_atomic_cmpxchg(&vcpu->irqs.irq[irq_no].assert, ASSERTED, PENDING) == ASSERTED) { if (arch_vcpu_irq_execute(vcpu, regs, irq_no, vcpu->irqs.irq[irq_no].reason) == VMM_OK) { arch_atomic_write(&vcpu->irqs. irq[irq_no].assert, DEASSERTED); arch_atomic64_inc(&vcpu->irqs. execute_count); } else { /* arch_vcpu_irq_execute failed may be * because VCPU was already processing * a VCPU irq hence increment execute * pending count to try next time. */ arch_atomic_inc(&vcpu->irqs. execute_pending); arch_atomic_write(&vcpu->irqs. irq[irq_no].assert, ASSERTED); } } } }
int vmm_vcpu_irq_wait_timeout(struct vmm_vcpu *vcpu, u64 nsecs) { irq_flags_t flags; bool try_vcpu_pause = FALSE; /* Sanity Checks */ if (!vcpu || !vcpu->is_normal) { return VMM_EFAIL; } /* Lock VCPU WFI */ vmm_spin_lock_irqsave_lite(&vcpu->irqs.wfi.lock, flags); if (!vcpu->irqs.wfi.state && !arch_atomic_read(&vcpu->irqs.execute_pending)) { try_vcpu_pause = TRUE; /* Set wait for irq state */ vcpu->irqs.wfi.state = TRUE; /* Start wait for irq timeout event */ if (!nsecs) { nsecs = CONFIG_WFI_TIMEOUT_SECS * 1000000000ULL; } vmm_timer_event_start(vcpu->irqs.wfi.priv, nsecs); } /* Unlock VCPU WFI */ vmm_spin_unlock_irqrestore_lite(&vcpu->irqs.wfi.lock, flags); /* Try to pause the VCPU */ if (try_vcpu_pause) { vmm_manager_vcpu_pause(vcpu); } return VMM_OK; }
int vmm_scheduler_state_change(struct vmm_vcpu *vcpu, u32 new_state) { u64 tstamp; int rc = VMM_OK; irq_flags_t flags; bool preempt = FALSE; u32 chcpu = vmm_smp_processor_id(), vhcpu; struct vmm_scheduler_ctrl *schedp; u32 current_state; if (!vcpu) { return VMM_EFAIL; } vmm_write_lock_irqsave_lite(&vcpu->sched_lock, flags); vhcpu = vcpu->hcpu; schedp = &per_cpu(sched, vhcpu); current_state = arch_atomic_read(&vcpu->state); switch(new_state) { case VMM_VCPU_STATE_UNKNOWN: /* Existing VCPU being destroyed */ rc = vmm_schedalgo_vcpu_cleanup(vcpu); break; case VMM_VCPU_STATE_RESET: if (current_state == VMM_VCPU_STATE_UNKNOWN) { /* New VCPU */ rc = vmm_schedalgo_vcpu_setup(vcpu); } else if (current_state != VMM_VCPU_STATE_RESET) { /* Existing VCPU */ /* Make sure VCPU is not in a ready queue */ if ((schedp->current_vcpu != vcpu) && (current_state == VMM_VCPU_STATE_READY)) { if ((rc = rq_detach(schedp, vcpu))) { break; } } /* Make sure current VCPU is preempted */ if ((schedp->current_vcpu == vcpu) && (current_state == VMM_VCPU_STATE_RUNNING)) { preempt = TRUE; } vcpu->reset_count++; if ((rc = arch_vcpu_init(vcpu))) { break; } if ((rc = vmm_vcpu_irq_init(vcpu))) { break; } } else { rc = VMM_EFAIL; } break; case VMM_VCPU_STATE_READY: if ((current_state == VMM_VCPU_STATE_RESET) || (current_state == VMM_VCPU_STATE_PAUSED)) { /* Enqueue VCPU to ready queue */ rc = rq_enqueue(schedp, vcpu); if (!rc && (schedp->current_vcpu != vcpu)) { preempt = rq_prempt_needed(schedp); } } else { rc = VMM_EFAIL; } break; case VMM_VCPU_STATE_PAUSED: case VMM_VCPU_STATE_HALTED: if ((current_state == VMM_VCPU_STATE_READY) || (current_state == VMM_VCPU_STATE_RUNNING)) { /* Expire timer event if current VCPU * is paused or halted */ if (schedp->current_vcpu == vcpu) { preempt = TRUE; } else if (current_state == VMM_VCPU_STATE_READY) { /* Make sure VCPU is not in a ready queue */ rc = rq_detach(schedp, vcpu); } } else { rc = VMM_EFAIL; } break; } if (rc == VMM_OK) { tstamp = vmm_timer_timestamp(); switch (current_state) { case VMM_VCPU_STATE_READY: vcpu->state_ready_nsecs += tstamp - vcpu->state_tstamp; break; case VMM_VCPU_STATE_RUNNING: vcpu->state_running_nsecs += tstamp - vcpu->state_tstamp; break; case VMM_VCPU_STATE_PAUSED: vcpu->state_paused_nsecs += tstamp - vcpu->state_tstamp; break; case VMM_VCPU_STATE_HALTED: vcpu->state_halted_nsecs += tstamp - vcpu->state_tstamp; break; default: break; } if (new_state == VMM_VCPU_STATE_RESET) { vcpu->state_ready_nsecs = 0; vcpu->state_running_nsecs = 0; vcpu->state_paused_nsecs = 0; vcpu->state_halted_nsecs = 0; vcpu->reset_tstamp = tstamp; } arch_atomic_write(&vcpu->state, new_state); vcpu->state_tstamp = tstamp; } vmm_write_unlock_irqrestore_lite(&vcpu->sched_lock, flags); if (preempt && schedp->current_vcpu) { if (chcpu == vhcpu) { if (schedp->current_vcpu->is_normal) { schedp->yield_on_irq_exit = TRUE; } else if (schedp->irq_context) { vmm_scheduler_preempt_orphan(schedp->irq_regs); } else { arch_vcpu_preempt_orphan(); } } else { vmm_smp_ipi_async_call(vmm_cpumask_of(vhcpu), scheduler_ipi_resched, NULL, NULL, NULL); } } return rc; }
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp, struct vmm_timer_event *ev, arch_regs_t *regs) { irq_flags_t cf, nf; u64 tstamp = vmm_timer_timestamp(); struct vmm_vcpu *next = NULL; struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu; u32 current_state; /* First time scheduling */ if (!current) { next = rq_dequeue(schedp); if (!next) { /* This should never happen !!! */ vmm_panic("%s: no vcpu to switch to.\n", __func__); } vmm_write_lock_irqsave_lite(&next->sched_lock, nf); arch_vcpu_switch(NULL, next, regs); next->state_ready_nsecs += tstamp - next->state_tstamp; arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING); next->state_tstamp = tstamp; schedp->current_vcpu = next; vmm_timer_event_start(ev, next->time_slice); vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf); return; } /* Normal scheduling */ vmm_write_lock_irqsave_lite(¤t->sched_lock, cf); current_state = arch_atomic_read(¤t->state); if (current_state & VMM_VCPU_STATE_SAVEABLE) { if (current_state == VMM_VCPU_STATE_RUNNING) { current->state_running_nsecs += tstamp - current->state_tstamp; arch_atomic_write(¤t->state, VMM_VCPU_STATE_READY); current->state_tstamp = tstamp; rq_enqueue(schedp, current); } tcurrent = current; } next = rq_dequeue(schedp); if (!next) { /* This should never happen !!! */ vmm_panic("%s: no vcpu to switch to.\n", __func__); } if (next != current) { vmm_write_lock_irqsave_lite(&next->sched_lock, nf); arch_vcpu_switch(tcurrent, next, regs); } next->state_ready_nsecs += tstamp - next->state_tstamp; arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING); next->state_tstamp = tstamp; schedp->current_vcpu = next; vmm_timer_event_start(ev, next->time_slice); if (next != current) { vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf); } vmm_write_unlock_irqrestore_lite(¤t->sched_lock, cf); }