int __cpuinit vmm_scheduler_init(void) { int rc; char vcpu_name[VMM_FIELD_NAME_SIZE]; u32 cpu = vmm_smp_processor_id(); struct vmm_scheduler_ctrl *schedp = &this_cpu(sched); /* Reset the scheduler control structure */ memset(schedp, 0, sizeof(struct vmm_scheduler_ctrl)); /* Create ready queue (Per Host CPU) */ schedp->rq = vmm_schedalgo_rq_create(); if (!schedp->rq) { return VMM_EFAIL; } INIT_SPIN_LOCK(&schedp->rq_lock); /* Initialize current VCPU. (Per Host CPU) */ schedp->current_vcpu = NULL; /* Initialize IRQ state (Per Host CPU) */ schedp->irq_context = FALSE; schedp->irq_regs = NULL; /* Initialize yield on exit (Per Host CPU) */ schedp->yield_on_irq_exit = FALSE; /* Create timer event and start it. (Per Host CPU) */ INIT_TIMER_EVENT(&schedp->ev, &vmm_scheduler_timer_event, schedp); /* Create idle orphan vcpu with default time slice. (Per Host CPU) */ vmm_snprintf(vcpu_name, sizeof(vcpu_name), "idle/%d", cpu); schedp->idle_vcpu = vmm_manager_vcpu_orphan_create(vcpu_name, (virtual_addr_t)&idle_orphan, IDLE_VCPU_STACK_SZ, IDLE_VCPU_PRIORITY, IDLE_VCPU_TIMESLICE); if (!schedp->idle_vcpu) { return VMM_EFAIL; } /* The idle vcpu need to stay on this cpu */ if ((rc = vmm_manager_vcpu_set_affinity(schedp->idle_vcpu, vmm_cpumask_of(cpu)))) { return rc; } /* Kick idle orphan vcpu */ if ((rc = vmm_manager_vcpu_kick(schedp->idle_vcpu))) { return rc; } /* Start scheduler timer event */ vmm_timer_event_start(&schedp->ev, 0); /* Mark this CPU online */ vmm_set_cpu_online(cpu, TRUE); return VMM_OK; }
int generic_timer_vcpu_context_init(void *vcpu_ptr, void **context, u32 phys_irq, u32 virt_irq) { struct generic_timer_context *cntx; if (!context || !vcpu_ptr) { return VMM_EINVALID; } if (!(*context)) { *context = vmm_zalloc(sizeof(*cntx)); if (!(*context)) { return VMM_ENOMEM; } cntx = *context; INIT_TIMER_EVENT(&cntx->phys_ev, generic_phys_timer_expired, vcpu_ptr); INIT_TIMER_EVENT(&cntx->virt_ev, generic_virt_timer_expired, vcpu_ptr); } else { cntx = *context; } cntx->cntpctl = GENERIC_TIMER_CTRL_IT_MASK; cntx->cntvctl = GENERIC_TIMER_CTRL_IT_MASK; cntx->cntpcval = 0; cntx->cntvcval = 0; cntx->cntkctl = 0; cntx->cntvoff = 0; cntx->phys_timer_irq = phys_irq; cntx->virt_timer_irq = virt_irq; vmm_timer_event_stop(&cntx->phys_ev); vmm_timer_event_stop(&cntx->virt_ev); return VMM_OK; }
int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu) { int rc; u32 ite, irq_count; struct vmm_timer_event *ev; /* Sanity Checks */ if (!vcpu) { return VMM_EFAIL; } /* For Orphan VCPU just return */ if (!vcpu->is_normal) { return VMM_OK; } /* Get irq count */ irq_count = arch_vcpu_irq_count(vcpu); /* Only first time */ if (!vcpu->reset_count) { /* Clear the memory of irq */ memset(&vcpu->irqs, 0, sizeof(struct vmm_vcpu_irqs)); /* Allocate memory for flags */ vcpu->irqs.irq = vmm_zalloc(sizeof(struct vmm_vcpu_irq) * irq_count); if (!vcpu->irqs.irq) { return VMM_ENOMEM; } /* Create wfi_timeout event */ ev = vmm_zalloc(sizeof(struct vmm_timer_event)); if (!ev) { vmm_free(vcpu->irqs.irq); vcpu->irqs.irq = NULL; return VMM_ENOMEM; } vcpu->irqs.wfi.priv = ev; /* Initialize wfi lock */ INIT_SPIN_LOCK(&vcpu->irqs.wfi.lock); /* Initialize wfi timeout event */ INIT_TIMER_EVENT(ev, vcpu_irq_wfi_timeout, vcpu); } /* Save irq count */ vcpu->irqs.irq_count = irq_count; /* Set execute pending to zero */ arch_atomic_write(&vcpu->irqs.execute_pending, 0); /* Set default assert & deassert counts */ arch_atomic64_write(&vcpu->irqs.assert_count, 0); arch_atomic64_write(&vcpu->irqs.execute_count, 0); arch_atomic64_write(&vcpu->irqs.deassert_count, 0); /* Reset irq processing data structures for VCPU */ for (ite = 0; ite < irq_count; ite++) { vcpu->irqs.irq[ite].reason = 0; arch_atomic_write(&vcpu->irqs.irq[ite].assert, DEASSERTED); } /* Setup wait for irq context */ vcpu->irqs.wfi.state = FALSE; rc = vmm_timer_event_stop(vcpu->irqs.wfi.priv); if (rc != VMM_OK) { vmm_free(vcpu->irqs.irq); vcpu->irqs.irq = NULL; vmm_free(vcpu->irqs.wfi.priv); vcpu->irqs.wfi.priv = NULL; } return rc; }