Beispiel #1
0
void vmm_vcpu_irq_process(struct vmm_vcpu *vcpu, arch_regs_t *regs)
{
	/* For non-normal vcpu dont do anything */
	if (!vcpu || !vcpu->is_normal) {
		return;
	}

	/* If vcpu is not in interruptible state then dont do anything */
	if (!(vmm_manager_vcpu_get_state(vcpu) & 
					VMM_VCPU_STATE_INTERRUPTIBLE)) {
		return;
	}

	/* Proceed only if we have pending execute */
	if (arch_atomic_dec_if_positive(&vcpu->irqs.execute_pending) >= 0) {
		int irq_no = -1;
		u32 i, tmp_prio, irq_count = vcpu->irqs.irq_count;
		u32 irq_prio = 0;

		/* Find the irq number to process */
		for (i = 0; i < irq_count; i++) {
			if (arch_atomic_read(&vcpu->irqs.irq[i].assert) ==
			    ASSERTED) {
				tmp_prio = arch_vcpu_irq_priority(vcpu, i);
				if (tmp_prio > irq_prio) {
					irq_no = i;
					irq_prio = tmp_prio;
				}
			}
		}
		if (irq_no == -1) {
			return;
		}

		/* If irq number found then execute it */
		if (arch_atomic_cmpxchg(&vcpu->irqs.irq[irq_no].assert,
					ASSERTED, PENDING) == ASSERTED) {
			if (arch_vcpu_irq_execute(vcpu, regs, irq_no,
			    	vcpu->irqs.irq[irq_no].reason) == VMM_OK) {
				arch_atomic_write(&vcpu->irqs.
						  irq[irq_no].assert,
						  DEASSERTED);
				arch_atomic64_inc(&vcpu->irqs.
						  execute_count);
			} else {
				/* arch_vcpu_irq_execute failed may be
				 * because VCPU was already processing
				 * a VCPU irq hence increment execute
				 * pending count to try next time.
				 */
				arch_atomic_inc(&vcpu->irqs.
						execute_pending);
				arch_atomic_write(&vcpu->irqs.
						  irq[irq_no].assert,
						  ASSERTED);
			}
		}
	}
}
Beispiel #2
0
void vmm_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	/* For non-normal VCPU dont do anything */
	if (!vcpu || !vcpu->is_normal) {
		return;
	}

	/* If VCPU is not in interruptible state then dont do anything */
	if (!(vmm_manager_vcpu_get_state(vcpu) & VMM_VCPU_STATE_INTERRUPTIBLE)) {
		return;
	}

	/* Check irq number */
	if (irq_no > vcpu->irqs.irq_count) {
		return;
	}

	/* Assert the irq */
	if (arch_atomic_cmpxchg(&vcpu->irqs.irq[irq_no].assert, 
				DEASSERTED, ASSERTED) == DEASSERTED) {
		if (arch_vcpu_irq_assert(vcpu, irq_no, reason) == VMM_OK) {
			vcpu->irqs.irq[irq_no].reason = reason;
			arch_atomic_inc(&vcpu->irqs.execute_pending);
			arch_atomic64_inc(&vcpu->irqs.assert_count);
		} else {
			arch_atomic_write(&vcpu->irqs.irq[irq_no].assert,
					  DEASSERTED);
		}
	}

	/* Resume VCPU from wfi */
	vcpu_irq_wfi_resume(vcpu, FALSE);
}
Beispiel #3
0
void vmm_vcpu_irq_deassert(struct vmm_vcpu *vcpu, u32 irq_no)
{
	/* For non-normal vcpu dont do anything */
	if (!vcpu || !vcpu->is_normal) {
		return;
	}

	/* Check irq number */
	if (irq_no > vcpu->irqs.irq_count) {
		return;
	}

	/* Call arch specific deassert */
	if (arch_vcpu_irq_deassert(vcpu, irq_no,
				   vcpu->irqs.irq[irq_no].reason) == VMM_OK) {
		arch_atomic64_inc(&vcpu->irqs.deassert_count);
	}

	/* Reset VCPU irq assert state */
	arch_atomic_write(&vcpu->irqs.irq[irq_no].assert, DEASSERTED);

	/* Ensure irq reason is zeroed */
	vcpu->irqs.irq[irq_no].reason = 0x0;
}
Beispiel #4
0
int vmm_scheduler_state_change(struct vmm_vcpu *vcpu, u32 new_state)
{
	u64 tstamp;
	int rc = VMM_OK;
	irq_flags_t flags;
	bool preempt = FALSE;
	u32 chcpu = vmm_smp_processor_id(), vhcpu;
	struct vmm_scheduler_ctrl *schedp;
	u32 current_state;

	if (!vcpu) {
		return VMM_EFAIL;
	}

	vmm_write_lock_irqsave_lite(&vcpu->sched_lock, flags);

	vhcpu = vcpu->hcpu;
	schedp = &per_cpu(sched, vhcpu);

	current_state = arch_atomic_read(&vcpu->state);

	switch(new_state) {
	case VMM_VCPU_STATE_UNKNOWN:
		/* Existing VCPU being destroyed */
		rc = vmm_schedalgo_vcpu_cleanup(vcpu);
		break;
	case VMM_VCPU_STATE_RESET:
		if (current_state == VMM_VCPU_STATE_UNKNOWN) {
			/* New VCPU */
			rc = vmm_schedalgo_vcpu_setup(vcpu);
		} else if (current_state != VMM_VCPU_STATE_RESET) {
			/* Existing VCPU */
			/* Make sure VCPU is not in a ready queue */
			if ((schedp->current_vcpu != vcpu) &&
			    (current_state == VMM_VCPU_STATE_READY)) {
				if ((rc = rq_detach(schedp, vcpu))) {
					break;
				}
			}
			/* Make sure current VCPU is preempted */
			if ((schedp->current_vcpu == vcpu) &&
			    (current_state == VMM_VCPU_STATE_RUNNING)) {
				preempt = TRUE;
			}
			vcpu->reset_count++;
			if ((rc = arch_vcpu_init(vcpu))) {
				break;
			}
			if ((rc = vmm_vcpu_irq_init(vcpu))) {
				break;
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_READY:
		if ((current_state == VMM_VCPU_STATE_RESET) ||
		    (current_state == VMM_VCPU_STATE_PAUSED)) {
			/* Enqueue VCPU to ready queue */
			rc = rq_enqueue(schedp, vcpu);
			if (!rc && (schedp->current_vcpu != vcpu)) {
				preempt = rq_prempt_needed(schedp);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	case VMM_VCPU_STATE_PAUSED:
	case VMM_VCPU_STATE_HALTED:
		if ((current_state == VMM_VCPU_STATE_READY) ||
		    (current_state == VMM_VCPU_STATE_RUNNING)) {
			/* Expire timer event if current VCPU 
			 * is paused or halted 
			 */
			if (schedp->current_vcpu == vcpu) {
				preempt = TRUE;
			} else if (current_state == VMM_VCPU_STATE_READY) {
				/* Make sure VCPU is not in a ready queue */
				rc = rq_detach(schedp, vcpu);
			}
		} else {
			rc = VMM_EFAIL;
		}
		break;
	}

	if (rc == VMM_OK) {
		tstamp = vmm_timer_timestamp();
		switch (current_state) {
		case VMM_VCPU_STATE_READY:
			vcpu->state_ready_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_RUNNING:
			vcpu->state_running_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_PAUSED:
			vcpu->state_paused_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		case VMM_VCPU_STATE_HALTED:
			vcpu->state_halted_nsecs += 
					tstamp - vcpu->state_tstamp;
			break;
		default:
			break; 
		}
		if (new_state == VMM_VCPU_STATE_RESET) {
			vcpu->state_ready_nsecs = 0;
			vcpu->state_running_nsecs = 0;
			vcpu->state_paused_nsecs = 0;
			vcpu->state_halted_nsecs = 0;
			vcpu->reset_tstamp = tstamp;
		}
		arch_atomic_write(&vcpu->state, new_state);
		vcpu->state_tstamp = tstamp;
	}

	vmm_write_unlock_irqrestore_lite(&vcpu->sched_lock, flags);

	if (preempt && schedp->current_vcpu) {
		if (chcpu == vhcpu) {
			if (schedp->current_vcpu->is_normal) {
				schedp->yield_on_irq_exit = TRUE;
			} else if (schedp->irq_context) {
				vmm_scheduler_preempt_orphan(schedp->irq_regs);
			} else {
				arch_vcpu_preempt_orphan();
			}
		} else {
			vmm_smp_ipi_async_call(vmm_cpumask_of(vhcpu),
						scheduler_ipi_resched,
						NULL, NULL, NULL);
		}
	}

	return rc;
}
Beispiel #5
0
static void vmm_scheduler_next(struct vmm_scheduler_ctrl *schedp,
			       struct vmm_timer_event *ev, 
			       arch_regs_t *regs)
{
	irq_flags_t cf, nf;
	u64 tstamp = vmm_timer_timestamp();
	struct vmm_vcpu *next = NULL; 
	struct vmm_vcpu *tcurrent = NULL, *current = schedp->current_vcpu;
	u32 current_state;

	/* First time scheduling */
	if (!current) {
		next = rq_dequeue(schedp);
		if (!next) {
			/* This should never happen !!! */
			vmm_panic("%s: no vcpu to switch to.\n", __func__);
		}

		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);

		arch_vcpu_switch(NULL, next, regs);
		next->state_ready_nsecs += tstamp - next->state_tstamp;
		arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
		next->state_tstamp = tstamp;
		schedp->current_vcpu = next;
		vmm_timer_event_start(ev, next->time_slice);

		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);

		return;
	}

	/* Normal scheduling */
	vmm_write_lock_irqsave_lite(&current->sched_lock, cf);

	current_state = arch_atomic_read(&current->state);

	if (current_state & VMM_VCPU_STATE_SAVEABLE) {
		if (current_state == VMM_VCPU_STATE_RUNNING) {
			current->state_running_nsecs += 
				tstamp - current->state_tstamp;
			arch_atomic_write(&current->state, VMM_VCPU_STATE_READY);
			current->state_tstamp = tstamp;
			rq_enqueue(schedp, current);
		}
		tcurrent = current;
	}

	next = rq_dequeue(schedp);
	if (!next) {
		/* This should never happen !!! */
		vmm_panic("%s: no vcpu to switch to.\n", 
			  __func__);
	}

	if (next != current) {
		vmm_write_lock_irqsave_lite(&next->sched_lock, nf);
		arch_vcpu_switch(tcurrent, next, regs);
	}

	next->state_ready_nsecs += tstamp - next->state_tstamp;
	arch_atomic_write(&next->state, VMM_VCPU_STATE_RUNNING);
	next->state_tstamp = tstamp;
	schedp->current_vcpu = next;
	vmm_timer_event_start(ev, next->time_slice);

	if (next != current) {
		vmm_write_unlock_irqrestore_lite(&next->sched_lock, nf);
	}

	vmm_write_unlock_irqrestore_lite(&current->sched_lock, cf);
}
Beispiel #6
0
int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 ite, irq_count;
	struct vmm_timer_event *ev;

	/* Sanity Checks */
	if (!vcpu) {
		return VMM_EFAIL;
	}

	/* For Orphan VCPU just return */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Get irq count */
	irq_count = arch_vcpu_irq_count(vcpu);

	/* Only first time */
	if (!vcpu->reset_count) {
		/* Clear the memory of irq */
		memset(&vcpu->irqs, 0, sizeof(struct vmm_vcpu_irqs));

		/* Allocate memory for flags */
		vcpu->irqs.irq =
		    vmm_zalloc(sizeof(struct vmm_vcpu_irq) * irq_count);
		if (!vcpu->irqs.irq) {
			return VMM_ENOMEM;
		}

		/* Create wfi_timeout event */
		ev = vmm_zalloc(sizeof(struct vmm_timer_event));
		if (!ev) {
			vmm_free(vcpu->irqs.irq);
			vcpu->irqs.irq = NULL;
			return VMM_ENOMEM;
		}
		vcpu->irqs.wfi.priv = ev;

		/* Initialize wfi lock */
		INIT_SPIN_LOCK(&vcpu->irqs.wfi.lock);

		/* Initialize wfi timeout event */
		INIT_TIMER_EVENT(ev, vcpu_irq_wfi_timeout, vcpu);
	}

	/* Save irq count */
	vcpu->irqs.irq_count = irq_count;

	/* Set execute pending to zero */
	arch_atomic_write(&vcpu->irqs.execute_pending, 0);

	/* Set default assert & deassert counts */
	arch_atomic64_write(&vcpu->irqs.assert_count, 0);
	arch_atomic64_write(&vcpu->irqs.execute_count, 0);
	arch_atomic64_write(&vcpu->irqs.deassert_count, 0);

	/* Reset irq processing data structures for VCPU */
	for (ite = 0; ite < irq_count; ite++) {
		vcpu->irqs.irq[ite].reason = 0;
		arch_atomic_write(&vcpu->irqs.irq[ite].assert, DEASSERTED);
	}

	/* Setup wait for irq context */
	vcpu->irqs.wfi.state = FALSE;
	rc = vmm_timer_event_stop(vcpu->irqs.wfi.priv);
	if (rc != VMM_OK) {
		vmm_free(vcpu->irqs.irq);
		vcpu->irqs.irq = NULL;
		vmm_free(vcpu->irqs.wfi.priv);
		vcpu->irqs.wfi.priv = NULL;
	}

	return rc;
}
Beispiel #7
0
int vmm_fb_register(struct vmm_fb_info *info)
{
	int rc;
	struct vmm_fb_event event;
	struct vmm_fb_videomode mode;
	struct vmm_classdev *cd;

	if (info == NULL) {
		return VMM_EFAIL;
	}
	if (info->fbops == NULL) {
		return VMM_EFAIL;
	}

	if ((rc = vmm_fb_check_foreignness(info))) {
		return rc;
	}

	vmm_fb_remove_conflicting_framebuffers(info->apertures, 
					       info->fix.id, FALSE);

	arch_atomic_write(&info->count, 1);
	INIT_MUTEX(&info->lock);

	if (info->pixmap.addr == NULL) {
		info->pixmap.addr = vmm_malloc(FBPIXMAPSIZE);
		if (info->pixmap.addr) {
			info->pixmap.size = FBPIXMAPSIZE;
			info->pixmap.buf_align = 1;
			info->pixmap.scan_align = 1;
			info->pixmap.access_align = 32;
			info->pixmap.flags = FB_PIXMAP_DEFAULT;
		}
	}	
	info->pixmap.offset = 0;

	if (!info->pixmap.blit_x)
		info->pixmap.blit_x = ~(u32)0;

	if (!info->pixmap.blit_y)
		info->pixmap.blit_y = ~(u32)0;

	if (!info->modelist.prev || !info->modelist.next) {
		INIT_LIST_HEAD(&info->modelist);
	}

	vmm_fb_var_to_videomode(&mode, &info->var);
	vmm_fb_add_videomode(&mode, &info->modelist);

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		rc = VMM_EFAIL;
		goto free_pixmap;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, info->dev->node->name);
	cd->dev = info->dev;
	cd->priv = info;

	rc = vmm_devdrv_register_classdev(VMM_FB_CLASS_NAME, cd);
	if (rc) {
		goto free_classdev;
	}

	vmm_mutex_lock(&info->lock);
	event.info = info;
	vmm_fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
	vmm_mutex_unlock(&info->lock);

	return VMM_OK;

free_classdev:
	cd->dev = NULL;
	cd->priv = NULL;
	vmm_free(cd);
free_pixmap:
	if (info->pixmap.flags & FB_PIXMAP_DEFAULT) {
		vmm_free(info->pixmap.addr);
	}
	return rc;
}