コード例 #1
0
ファイル: vmm_vcpu_irq.c プロジェクト: 32bitmicro/xvisor
int vmm_vcpu_irq_deinit(struct vmm_vcpu *vcpu)
{
	/* Sanity Checks */
	if (!vcpu) {
		return VMM_EFAIL;
	}

	/* For Orphan VCPU just return */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Stop wfi_timeout event */
	vmm_timer_event_stop(vcpu->irqs.wfi.priv);

	/* Free wfi_timeout event */
	vmm_free(vcpu->irqs.wfi.priv);
	vcpu->irqs.wfi.priv = NULL;

	/* Free flags */
	vmm_free(vcpu->irqs.irq);
	vcpu->irqs.irq = NULL;

	return VMM_OK;
}
コード例 #2
0
void generic_timer_vcpu_context_restore(void *vcpu_ptr, void *context)
{
	struct vmm_vcpu *vcpu = vcpu_ptr;
	struct generic_timer_context *cntx = context;

	if (!cntx) {
		return;
	}

	vmm_timer_event_stop(&cntx->phys_ev);
	vmm_timer_event_stop(&cntx->virt_ev);

	if (!cntx->cntvoff) {
		cntx->cntvoff = vmm_manager_guest_reset_timestamp(vcpu->guest);
		cntx->cntvoff = cntx->cntvoff * generic_timer_hz;
		cntx->cntvoff = udiv64(cntx->cntvoff, 1000000000ULL);
	}
}
コード例 #3
0
int generic_timer_vcpu_context_deinit(void *vcpu_ptr, void **context)
{
	struct generic_timer_context *cntx;

	if (!context || !vcpu_ptr) {
		return VMM_EINVALID;
	}
	if (!(*context)) {
		return VMM_EINVALID;
	}

	cntx = *context;

	vmm_timer_event_stop(&cntx->phys_ev);
	vmm_timer_event_stop(&cntx->virt_ev);

	vmm_free(cntx);

	return VMM_OK;
}
コード例 #4
0
ファイル: sp805.c プロジェクト: IRT-SystemX/xvisor-next
/* Must be called with sp805->lock held */
static int _sp805_counter_stop(struct sp805_state *sp805)
{
	int rc = VMM_OK;

	rc = vmm_timer_event_stop(&sp805->event);
	sp805->freezed_value = _sp805_reg_value(sp805);
	sp805_debug(sp805, "Counter stopped at 0x%08x(%d)\n",
		    sp805->freezed_value, rc);

	return rc;
}
コード例 #5
0
int generic_timer_vcpu_context_init(void *vcpu_ptr,
				    void **context,
				    u32 phys_irq, u32 virt_irq)

{
	struct generic_timer_context *cntx;

	if (!context || !vcpu_ptr) {
		return VMM_EINVALID;
	}

	if (!(*context)) {
		*context = vmm_zalloc(sizeof(*cntx));
		if (!(*context)) {
			return VMM_ENOMEM;
		}
		cntx = *context;
		INIT_TIMER_EVENT(&cntx->phys_ev,
				 generic_phys_timer_expired, vcpu_ptr);
		INIT_TIMER_EVENT(&cntx->virt_ev,
				 generic_virt_timer_expired, vcpu_ptr);
	} else {
		cntx = *context;
	}

	cntx->cntpctl = GENERIC_TIMER_CTRL_IT_MASK;
	cntx->cntvctl = GENERIC_TIMER_CTRL_IT_MASK;
	cntx->cntpcval = 0;
	cntx->cntvcval = 0;
	cntx->cntkctl = 0;
	cntx->cntvoff = 0;
	cntx->phys_timer_irq = phys_irq;
	cntx->virt_timer_irq = virt_irq;

	vmm_timer_event_stop(&cntx->phys_ev);
	vmm_timer_event_stop(&cntx->virt_ev);

	return VMM_OK;
}
コード例 #6
0
ファイル: pl031.c プロジェクト: machinoid/xvisor-next
static void pl031_set_alarm(struct pl031_state *s)
{
	u32 ticks = pl031_get_count(s);

	/* If timer wraps around then subtraction also wraps in the same way,
	 * and gives correct results when alarm < now_ticks.  */
	ticks = s->mr - ticks;
	if (ticks == 0) {
		vmm_timer_event_stop(&s->event);
		s->im = 1;
		pl031_update(s);
	} else {
		vmm_timer_event_start(&s->event, 
				      ((u64)ticks) * ((u64)1000000000));
	}
}
コード例 #7
0
ファイル: sp805.c プロジェクト: IRT-SystemX/xvisor-next
/* Must be called with sp805->lock held */
static int _sp805_counter_reload(struct sp805_state *sp805)
{
	int rc = VMM_OK;
	u64 reload = (sp805->load + 1) * 1000;

	if (!_sp805_enabled(sp805)) {
		sp805_debug(sp805, "Disabled, event not started.\n");
		return VMM_OK;
	}

	sp805->timestamp = vmm_timer_timestamp();
	vmm_timer_event_stop(&sp805->event);
	rc = vmm_timer_event_start(&sp805->event, reload);
	sp805_debug(sp805, "Counter started: IRQ in %d ms (%d)\n",
		    udiv32(sp805->load + 1, 1000), rc);

	return rc;
}
コード例 #8
0
ファイル: vmm_vcpu_irq.c プロジェクト: 32bitmicro/xvisor
int vmm_vcpu_irq_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 ite, irq_count;
	struct vmm_timer_event *ev;

	/* Sanity Checks */
	if (!vcpu) {
		return VMM_EFAIL;
	}

	/* For Orphan VCPU just return */
	if (!vcpu->is_normal) {
		return VMM_OK;
	}

	/* Get irq count */
	irq_count = arch_vcpu_irq_count(vcpu);

	/* Only first time */
	if (!vcpu->reset_count) {
		/* Clear the memory of irq */
		memset(&vcpu->irqs, 0, sizeof(struct vmm_vcpu_irqs));

		/* Allocate memory for flags */
		vcpu->irqs.irq =
		    vmm_zalloc(sizeof(struct vmm_vcpu_irq) * irq_count);
		if (!vcpu->irqs.irq) {
			return VMM_ENOMEM;
		}

		/* Create wfi_timeout event */
		ev = vmm_zalloc(sizeof(struct vmm_timer_event));
		if (!ev) {
			vmm_free(vcpu->irqs.irq);
			vcpu->irqs.irq = NULL;
			return VMM_ENOMEM;
		}
		vcpu->irqs.wfi.priv = ev;

		/* Initialize wfi lock */
		INIT_SPIN_LOCK(&vcpu->irqs.wfi.lock);

		/* Initialize wfi timeout event */
		INIT_TIMER_EVENT(ev, vcpu_irq_wfi_timeout, vcpu);
	}

	/* Save irq count */
	vcpu->irqs.irq_count = irq_count;

	/* Set execute pending to zero */
	arch_atomic_write(&vcpu->irqs.execute_pending, 0);

	/* Set default assert & deassert counts */
	arch_atomic64_write(&vcpu->irqs.assert_count, 0);
	arch_atomic64_write(&vcpu->irqs.execute_count, 0);
	arch_atomic64_write(&vcpu->irqs.deassert_count, 0);

	/* Reset irq processing data structures for VCPU */
	for (ite = 0; ite < irq_count; ite++) {
		vcpu->irqs.irq[ite].reason = 0;
		arch_atomic_write(&vcpu->irqs.irq[ite].assert, DEASSERTED);
	}

	/* Setup wait for irq context */
	vcpu->irqs.wfi.state = FALSE;
	rc = vmm_timer_event_stop(vcpu->irqs.wfi.priv);
	if (rc != VMM_OK) {
		vmm_free(vcpu->irqs.irq);
		vcpu->irqs.irq = NULL;
		vmm_free(vcpu->irqs.wfi.priv);
		vcpu->irqs.wfi.priv = NULL;
	}

	return rc;
}
コード例 #9
0
ファイル: vmm_vcpu_irq.c プロジェクト: 32bitmicro/xvisor
static int vcpu_irq_wfi_resume(struct vmm_vcpu *vcpu, bool use_async_ipi)
{
	int rc;
	irq_flags_t flags;
	bool try_vcpu_resume = FALSE;

	if (!vcpu) {
		return VMM_EINVALID;
	}

	/* Lock VCPU WFI */
	vmm_spin_lock_irqsave_lite(&vcpu->irqs.wfi.lock, flags);

	/* If VCPU was in wfi state then update state. */
	if (vcpu->irqs.wfi.state) {
		try_vcpu_resume = TRUE;

		/* Clear wait for irq state */
		vcpu->irqs.wfi.state = FALSE;

		/* Stop wait for irq timeout event */
		vmm_timer_event_stop(vcpu->irqs.wfi.priv);

		rc = VMM_OK;
	} else {
		rc = VMM_ENOTAVAIL;
	}

	/* Unlock VCPU WFI */
	vmm_spin_unlock_irqrestore_lite(&vcpu->irqs.wfi.lock, flags);

	/* Try to resume the VCPU */
	if (use_async_ipi) {
		/* The vcpu_irq_wfi_try_resume() will be executed by async
		 * IPI worker on hcpu assigned to vcpu (i.e. vcpu->hcpu).
		 * Case 1: try_vcpu_resume == TRUE
		 *   The vcpu_irq_wfi_try_resume() will try to resume vcpu
		 *   using vmm_manager_vcpu_resume(). This can fail if vcpu
		 *   is already in READY or RUNNING state.
		 * Case 2: try_resume == FALSE
		 *   The vcpu_irq_wfi_try_resume() will do nothing but
		 *   if vcpu was in RUNNING state then it will force atleast
		 *   one context switch for vcpu. This will help hardware
		 *   assisted interrupt-controller emulators to flush out
		 *   pending interrupts when vcpu is restored.
		 */
		vmm_manager_vcpu_hcpu_func(vcpu,
			VMM_VCPU_STATE_INTERRUPTIBLE,
			vcpu_irq_wfi_try_resume,
			(try_vcpu_resume) ? (void *)TRUE : (void *)FALSE);
	} else {
		/* Case 1: try_vcpu_resume == TRUE
		 *   We directly resume vcpu using vmm_manager_vcpu_resume().
		 *   This can fail if vcpu is in READY or RUNNING state.
		 * Case 2: try_vcpu_resume == FALSE
		 *   We do nothing.
		 */
		if (try_vcpu_resume) {
			vmm_manager_vcpu_resume(vcpu);
		}
	}

	return rc;
}