Exemplo n.º 1
0
void generic_timer_vcpu_context_save(void *vcpu_ptr, void *context)
{
	u64 ev_nsecs;
	struct generic_timer_context *cntx = context;

	if (!cntx) {
		return;
	}

#ifdef HAVE_GENERIC_TIMER_REGS_SAVE
	generic_timer_regs_save(cntx);
#else
	cntx->cntpctl = generic_timer_reg_read(GENERIC_TIMER_REG_PHYS_CTRL);
	cntx->cntvctl = generic_timer_reg_read(GENERIC_TIMER_REG_VIRT_CTRL);
	cntx->cntpcval = generic_timer_reg_read64(GENERIC_TIMER_REG_PHYS_CVAL);
	cntx->cntvcval = generic_timer_reg_read64(GENERIC_TIMER_REG_VIRT_CVAL);
	cntx->cntkctl = generic_timer_reg_read(GENERIC_TIMER_REG_KCTL);
	generic_timer_reg_write(GENERIC_TIMER_REG_PHYS_CTRL,
				GENERIC_TIMER_CTRL_IT_MASK);
	generic_timer_reg_write(GENERIC_TIMER_REG_VIRT_CTRL,
				GENERIC_TIMER_CTRL_IT_MASK);
#endif

	if ((cntx->cntpctl & GENERIC_TIMER_CTRL_ENABLE) &&
	    !(cntx->cntpctl & GENERIC_TIMER_CTRL_IT_MASK)) {
		ev_nsecs = cntx->cntpcval - generic_timer_pcounter_read();
		/* check if timer is expired while saving the context */
		if (((s64)ev_nsecs) < 0) {
			ev_nsecs = 0;
		} else {
			ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs,
							generic_timer_mult,
							generic_timer_shift);
		}
		vmm_timer_event_start(&cntx->phys_ev, ev_nsecs);
	}

	if ((cntx->cntvctl & GENERIC_TIMER_CTRL_ENABLE) &&
	    !(cntx->cntvctl & GENERIC_TIMER_CTRL_IT_MASK)) {
		ev_nsecs = cntx->cntvcval + cntx->cntvoff -
					generic_timer_pcounter_read();
		/* check if timer is expired while saving the context */
		if (((s64)ev_nsecs) < 0) {
			ev_nsecs = 0;
		} else {
			ev_nsecs = vmm_clocksource_delta2nsecs(ev_nsecs,
							generic_timer_mult,
							generic_timer_shift);
		}
		vmm_timer_event_start(&cntx->virt_ev, ev_nsecs);
	}
}
Exemplo n.º 2
0
int __cpuinit vmm_scheduler_init(void)
{
	int rc;
	char vcpu_name[VMM_FIELD_NAME_SIZE];
	u32 cpu = vmm_smp_processor_id();
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	/* Reset the scheduler control structure */
	memset(schedp, 0, sizeof(struct vmm_scheduler_ctrl));

	/* Create ready queue (Per Host CPU) */
	schedp->rq = vmm_schedalgo_rq_create();
	if (!schedp->rq) {
		return VMM_EFAIL;
	}
	INIT_SPIN_LOCK(&schedp->rq_lock);

	/* Initialize current VCPU. (Per Host CPU) */
	schedp->current_vcpu = NULL;

	/* Initialize IRQ state (Per Host CPU) */
	schedp->irq_context = FALSE;
	schedp->irq_regs = NULL;

	/* Initialize yield on exit (Per Host CPU) */
	schedp->yield_on_irq_exit = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	INIT_TIMER_EVENT(&schedp->ev, &vmm_scheduler_timer_event, schedp);

	/* Create idle orphan vcpu with default time slice. (Per Host CPU) */
	vmm_snprintf(vcpu_name, sizeof(vcpu_name), "idle/%d", cpu);
	schedp->idle_vcpu = vmm_manager_vcpu_orphan_create(vcpu_name,
						(virtual_addr_t)&idle_orphan,
						IDLE_VCPU_STACK_SZ,
						IDLE_VCPU_PRIORITY, 
						IDLE_VCPU_TIMESLICE);
	if (!schedp->idle_vcpu) {
		return VMM_EFAIL;
	}

	/* The idle vcpu need to stay on this cpu */
	if ((rc = vmm_manager_vcpu_set_affinity(schedp->idle_vcpu,
						vmm_cpumask_of(cpu)))) {
		return rc;
	}

	/* Kick idle orphan vcpu */
	if ((rc = vmm_manager_vcpu_kick(schedp->idle_vcpu))) {
		return rc;
	}

	/* Start scheduler timer event */
	vmm_timer_event_start(&schedp->ev, 0);

	/* Mark this CPU online */
	vmm_set_cpu_online(cpu, TRUE);

	return VMM_OK;
}
Exemplo n.º 3
0
void vmm_scheduler_next(vmm_timer_event_t * ev, vmm_user_regs_t * regs)
{
	int next, vcpu_count = vmm_manager_vcpu_count();
	vmm_vcpu_t *cur_vcpu, *nxt_vcpu;

	/* Determine current vcpu */
	cur_vcpu = sched.current_vcpu;

	/* Determine the next ready vcpu to schedule */
	next = (cur_vcpu) ? cur_vcpu->id : -1;
	next = ((next + 1) < vcpu_count) ? (next + 1) : 0;
	nxt_vcpu = vmm_manager_vcpu(next);
	while (nxt_vcpu->state != VMM_VCPU_STATE_READY) {
		next = ((next + 1) < vcpu_count) ? (next + 1) : 0;
		nxt_vcpu = vmm_manager_vcpu(next);
	}

	/* Do context switch between current and next vcpus */
	if (!cur_vcpu || (cur_vcpu->id != nxt_vcpu->id)) {
		if (cur_vcpu && (cur_vcpu->state & VMM_VCPU_STATE_SAVEABLE)) {
			if (cur_vcpu->state == VMM_VCPU_STATE_RUNNING) {
				cur_vcpu->state = VMM_VCPU_STATE_READY;
			}
			vmm_vcpu_regs_switch(cur_vcpu, nxt_vcpu, regs);
		} else {
			vmm_vcpu_regs_switch(NULL, nxt_vcpu, regs);
		}
	}

	if (nxt_vcpu) {
		nxt_vcpu->state = VMM_VCPU_STATE_RUNNING;
		sched.current_vcpu = nxt_vcpu;
		vmm_timer_event_start(ev, nxt_vcpu->time_slice);
	}
}
Exemplo n.º 4
0
int vmm_scheduler_init(void)
{
	/* Reset the scheduler control structure */
	vmm_memset(&sched, 0, sizeof(sched));

	/* Initialize current VCPU. (Per Host CPU) */
	sched.current_vcpu = NULL;

	/* Create idle orphan vcpu with 100 msec time slice. (Per Host CPU) */
	sched.idle_vcpu = vmm_manager_vcpu_orphan_create("idle/0",
	(virtual_addr_t)&idle_orphan,
	(virtual_addr_t)&sched.idle_vcpu_stack[VMM_IDLE_VCPU_STACK_SZ - 4],
	VMM_IDLE_VCPU_TIMESLICE);

	/* Initialize IRQ state (Per Host CPU) */
	sched.irq_context = FALSE;

	/* Create timer event and start it. (Per Host CPU) */
	sched.ev = vmm_timer_event_create("sched", 
					  &vmm_scheduler_timer_event, 
					  NULL);
	if (!sched.ev) {
		return VMM_EFAIL;
	}
	vmm_timer_event_start(sched.ev, 0);

	return VMM_OK;
}
Exemplo n.º 5
0
void vmm_scheduler_yield(void)
{
	if (vmm_scheduler_irq_context()) {
		vmm_panic("%s: Tried to yield in IRQ context\n", __func__);
	}

	vmm_timer_event_start(sched.ev, 0);
}
Exemplo n.º 6
0
static void pl031_set_alarm(struct pl031_state *s)
{
	u32 ticks = pl031_get_count(s);

	/* If timer wraps around then subtraction also wraps in the same way,
	 * and gives correct results when alarm < now_ticks.  */
	ticks = s->mr - ticks;
	if (ticks == 0) {
		vmm_timer_event_stop(&s->event);
		s->im = 1;
		pl031_update(s);
	} else {
		vmm_timer_event_start(&s->event, 
				      ((u64)ticks) * ((u64)1000000000));
	}
}
Exemplo n.º 7
0
/* Must be called with sp805->lock held */
static int _sp805_counter_reload(struct sp805_state *sp805)
{
	int rc = VMM_OK;
	u64 reload = (sp805->load + 1) * 1000;

	if (!_sp805_enabled(sp805)) {
		sp805_debug(sp805, "Disabled, event not started.\n");
		return VMM_OK;
	}

	sp805->timestamp = vmm_timer_timestamp();
	vmm_timer_event_stop(&sp805->event);
	rc = vmm_timer_event_start(&sp805->event, reload);
	sp805_debug(sp805, "Counter started: IRQ in %d ms (%d)\n",
		    udiv32(sp805->load + 1, 1000), rc);

	return rc;
}
Exemplo n.º 8
0
int vmm_scheduler_notify_state_change(vmm_vcpu_t * vcpu, u32 new_state)
{
	int rc = VMM_OK;

	if(!vcpu) {
		return VMM_EFAIL;
	}

	switch(new_state) {
	case VMM_VCPU_STATE_PAUSED:
	case VMM_VCPU_STATE_HALTED:
		if(sched.current_vcpu == vcpu) {
			vmm_timer_event_start(sched.ev, 0);
		}
		break;
	}

	return rc;
}
Exemplo n.º 9
0
int vmm_vcpu_irq_wait_timeout(struct vmm_vcpu *vcpu, u64 nsecs)
{
	irq_flags_t flags;
	bool try_vcpu_pause = FALSE;

	/* Sanity Checks */
	if (!vcpu || !vcpu->is_normal) {
		return VMM_EFAIL;
	}

	/* Lock VCPU WFI */
	vmm_spin_lock_irqsave_lite(&vcpu->irqs.wfi.lock, flags);

	if (!vcpu->irqs.wfi.state &&
	    !arch_atomic_read(&vcpu->irqs.execute_pending)) {
		try_vcpu_pause = TRUE;

		/* Set wait for irq state */
		vcpu->irqs.wfi.state = TRUE;

		/* Start wait for irq timeout event */
		if (!nsecs) {
			nsecs = CONFIG_WFI_TIMEOUT_SECS * 1000000000ULL;
		}
		vmm_timer_event_start(vcpu->irqs.wfi.priv, nsecs);
	}

	/* Unlock VCPU WFI */
	vmm_spin_unlock_irqrestore_lite(&vcpu->irqs.wfi.lock, flags);

	/* Try to pause the VCPU */
	if (try_vcpu_pause) {
		vmm_manager_vcpu_pause(vcpu);
	}

	return VMM_OK;
}
Exemplo n.º 10
0
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp,
			       struct vmm_timer_event *ev, 
			       arch_regs_t *regs)
{
	irq_flags_t cf, nf;
	u64 tstamp = vmm_timer_timestamp();
	struct vmm_vcpu *next = NULL; 
	struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu;
	u32 current_state;

	/* First time scheduling */
	if (!current) {
		next = rq_dequeue(schedp);
		if (!next) {
			/* This should never happen !!! */
			vmm_panic("%s: no vcpu to switch to.\n", __func__);
		}

		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);

		arch_vcpu_switch(NULL, next, regs);
		next->state_ready_nsecs += tstamp - next->state_tstamp;
		arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
		next->state_tstamp = tstamp;
		schedp->current_vcpu = next;
		vmm_timer_event_start(ev, next->time_slice);

		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);

		return;
	}

	/* Normal scheduling */
	vmm_write_lock_irqsave_lite(&current->sched_lock, cf);

	current_state = arch_atomic_read(&current->state);

	if (current_state & VMM_VCPU_STATE_SAVEABLE) {
		if (current_state == VMM_VCPU_STATE_RUNNING) {
			current->state_running_nsecs += 
				tstamp - current->state_tstamp;
			arch_atomic_write(&current->state, VMM_VCPU_STATE_READY);
			current->state_tstamp = tstamp;
			rq_enqueue(schedp, current);
		}
		tcurrent = current;
	}

	next = rq_dequeue(schedp);
	if (!next) {
		/* This should never happen !!! */
		vmm_panic("%s: no vcpu to switch to.\n", 
			  __func__);
	}

	if (next != current) {
		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);
		arch_vcpu_switch(tcurrent, next, regs);
	}

	next->state_ready_nsecs += tstamp - next->state_tstamp;
	arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
	next->state_tstamp = tstamp;
	schedp->current_vcpu = next;
	vmm_timer_event_start(ev, next->time_slice);

	if (next != current) {
		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);
	}

	vmm_write_unlock_irqrestore_lite(&current->sched_lock, cf);
}