Exemplo n.º 1
0
int vmm_host_irqext_dispose_mapping(u32 hirq)
{
	int rc = VMM_OK;
	irq_flags_t flags;
	struct vmm_host_irq *irq = NULL;

	if (hirq < CONFIG_HOST_IRQ_COUNT) {
		return vmm_host_irq_set_hwirq(hirq, hirq);
	}

	vmm_write_lock_irqsave_lite(&iectrl.lock, flags);

	if (iectrl.count <= (hirq - CONFIG_HOST_IRQ_COUNT)) {
		rc = VMM_EINVALID;
		goto done;
	}

	irq = iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT];
	iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT] = NULL;

	if (irq) {
		if (irq->name) {
			vmm_free((void *)irq->name);
		}
		vmm_free(irq);
	}

done:
	vmm_write_unlock_irqrestore_lite(&iectrl.lock, flags);

	return rc;
}
Exemplo n.º 2
0
int vmm_host_irqext_alloc_region(unsigned int size)
{
	int tries=3, pos = -1;
	int size_log = 0;
	int idx = 0;
	irq_flags_t flags;

	while ((1 << size_log) < size) {
		++size_log;
	}

	if (!size_log || size_log > BITS_PER_LONG)
		return VMM_ENOTAVAIL;

	vmm_write_lock_irqsave_lite(&iectrl.lock, flags);

try_again:
	for (idx = 0; idx < BITS_TO_LONGS(iectrl.count); ++idx) {
		pos = bitmap_find_free_region(&iectrl.bitmap[idx],
					      BITS_PER_LONG, size_log);
		if (pos >= 0) {
			bitmap_set(&iectrl.bitmap[idx], pos, size_log);
			pos += idx * (BITS_PER_LONG);
			break;
		}
	}

	if (pos < 0) {
		/*
		 * Give a second try, reallocate some memory for extended
		 * IRQs
		 */
		if (VMM_OK == _irqext_expand()) {
			if (tries) {
				tries--;
				goto try_again;
			}
		}
	}

	vmm_write_unlock_irqrestore_lite(&iectrl.lock, flags);

	if (pos < 0) {
		vmm_printf("%s: Failed to find an extended IRQ region\n",
			   __func__);
		return pos;
	}

	return pos + CONFIG_HOST_IRQ_COUNT;
}
Exemplo n.º 3
0
int vmm_host_irqext_create_mapping(u32 hirq, u32 hwirq)
{
	int rc = VMM_OK;
	irq_flags_t flags;
	struct vmm_host_irq *irq = NULL;

	if (hirq < CONFIG_HOST_IRQ_COUNT) {
		return vmm_host_irq_set_hwirq(hirq, hwirq);
	}

	vmm_write_lock_irqsave_lite(&iectrl.lock, flags);

	if (iectrl.count <= (hirq - CONFIG_HOST_IRQ_COUNT)) {
		rc = VMM_EINVALID;
		goto done;
	}

	irq = iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT];
	if (irq) {
		rc = VMM_OK;
		goto done;
	}

	if (NULL == (irq = vmm_malloc(sizeof(struct vmm_host_irq)))) {
		vmm_printf("%s: Failed to allocate host IRQ\n", __func__);
		rc = VMM_ENOMEM;
		goto done;
	}

	__vmm_host_irq_init_desc(irq, hirq, hwirq);

	iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT] = irq;

done:
	vmm_write_unlock_irqrestore_lite(&iectrl.lock, flags);

	return rc;
}
Exemplo n.º 4
0
int vmm_scheduler_state_change(struct vmm_vcpu *vcpu, u32 new_state)
{
	u64 tstamp;
	int rc = VMM_OK;
	irq_flags_t flags;
	bool preempt = FALSE;
	u32 chcpu = vmm_smp_processor_id(), vhcpu;
	struct vmm_scheduler_ctrl *schedp;
	u32 current_state;

	if (!vcpu) {
		return VMM_EFAIL;
	}

	vmm_write_lock_irqsave_lite(&vcpu->sched_lock, flags);

	vhcpu = vcpu->hcpu;
	schedp = &per_cpu(sched, vhcpu);

	current_state = arch_atomic_read(&vcpu->state);

	switch(new_state) {
	case VMM_VCPU_STATE_UNKNOWN:
		/* Existing VCPU being destroyed */
		rc = vmm_schedalgo_vcpu_cleanup(vcpu);
		break;
	case VMM_VCPU_STATE_RESET:
		if (current_state == VMM_VCPU_STATE_UNKNOWN) {
			/* New VCPU */
			rc = vmm_schedalgo_vcpu_setup(vcpu);
		} else if (current_state != VMM_VCPU_STATE_RESET) {
			/* Existing VCPU */
			/* Make sure VCPU is not in a ready queue */
			if ((schedp->current_vcpu != vcpu) &&
			    (current_state == VMM_VCPU_STATE_READY)) {
				if ((rc = rq_detach(schedp, vcpu))) {
					break;
				}
			}
			/* Make sure current VCPU is preempted */
			if ((schedp->current_vcpu == vcpu) &&
			    (current_state == VMM_VCPU_STATE_RUNNING)) {
				preempt = TRUE;
			}
			vcpu->reset_count++;
			if ((rc = arch_vcpu_init(vcpu))) {
				break;
			}
			if ((rc = vmm_vcpu_irq_init(vcpu))) {
				break;
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_READY:
		if ((current_state == VMM_VCPU_STATE_RESET) ||
		    (current_state == VMM_VCPU_STATE_PAUSED)) {
			/* Enqueue VCPU to ready queue */
			rc = rq_enqueue(schedp, vcpu);
			if (!rc && (schedp->current_vcpu != vcpu)) {
				preempt = rq_prempt_needed(schedp);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_PAUSED:
	case VMM_VCPU_STATE_HALTED:
		if ((current_state == VMM_VCPU_STATE_READY) ||
		    (current_state == VMM_VCPU_STATE_RUNNING)) {
			/* Expire timer event if current VCPU 
			 * is paused or halted 
			 */
			if (schedp->current_vcpu == vcpu) {
				preempt = TRUE;
			} else if (current_state == VMM_VCPU_STATE_READY) {
				/* Make sure VCPU is not in a ready queue */
				rc = rq_detach(schedp, vcpu);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	}

	if (rc == VMM_OK) {
		tstamp = vmm_timer_timestamp();
		switch (current_state) {
		case VMM_VCPU_STATE_READY:
			vcpu->state_ready_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_RUNNING:
			vcpu->state_running_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_PAUSED:
			vcpu->state_paused_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_HALTED:
			vcpu->state_halted_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		default:
			break; 
		}
		if (new_state == VMM_VCPU_STATE_RESET) {
			vcpu->state_ready_nsecs = 0;
			vcpu->state_running_nsecs = 0;
			vcpu->state_paused_nsecs = 0;
			vcpu->state_halted_nsecs = 0;
			vcpu->reset_tstamp = tstamp;
		}
		arch_atomic_write(&vcpu->state, new_state);
		vcpu->state_tstamp = tstamp;
	}

	vmm_write_unlock_irqrestore_lite(&vcpu->sched_lock, flags);

	if (preempt && schedp->current_vcpu) {
		if (chcpu == vhcpu) {
			if (schedp->current_vcpu->is_normal) {
				schedp->yield_on_irq_exit = TRUE;
			} else if (schedp->irq_context) {
				vmm_scheduler_preempt_orphan(schedp->irq_regs);
			} else {
				arch_vcpu_preempt_orphan();
			}
		} else {
			vmm_smp_ipi_async_call(vmm_cpumask_of(vhcpu),
						scheduler_ipi_resched,
						NULL, NULL, NULL);
		}
	}

	return rc;
}
Exemplo n.º 5
0
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp,
			       struct vmm_timer_event *ev, 
			       arch_regs_t *regs)
{
	irq_flags_t cf, nf;
	u64 tstamp = vmm_timer_timestamp();
	struct vmm_vcpu *next = NULL; 
	struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu;
	u32 current_state;

	/* First time scheduling */
	if (!current) {
		next = rq_dequeue(schedp);
		if (!next) {
			/* This should never happen !!! */
			vmm_panic("%s: no vcpu to switch to.\n", __func__);
		}

		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);

		arch_vcpu_switch(NULL, next, regs);
		next->state_ready_nsecs += tstamp - next->state_tstamp;
		arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
		next->state_tstamp = tstamp;
		schedp->current_vcpu = next;
		vmm_timer_event_start(ev, next->time_slice);

		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);

		return;
	}

	/* Normal scheduling */
	vmm_write_lock_irqsave_lite(&current->sched_lock, cf);

	current_state = arch_atomic_read(&current->state);

	if (current_state & VMM_VCPU_STATE_SAVEABLE) {
		if (current_state == VMM_VCPU_STATE_RUNNING) {
			current->state_running_nsecs += 
				tstamp - current->state_tstamp;
			arch_atomic_write(&current->state, VMM_VCPU_STATE_READY);
			current->state_tstamp = tstamp;
			rq_enqueue(schedp, current);
		}
		tcurrent = current;
	}

	next = rq_dequeue(schedp);
	if (!next) {
		/* This should never happen !!! */
		vmm_panic("%s: no vcpu to switch to.\n", 
			  __func__);
	}

	if (next != current) {
		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);
		arch_vcpu_switch(tcurrent, next, regs);
	}

	next->state_ready_nsecs += tstamp - next->state_tstamp;
	arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
	next->state_tstamp = tstamp;
	schedp->current_vcpu = next;
	vmm_timer_event_start(ev, next->time_slice);

	if (next != current) {
		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);
	}

	vmm_write_unlock_irqrestore_lite(&current->sched_lock, cf);
}