Пример #1
0
void vmm_scheduler_yield(void)
{
	irq_flags_t flags;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (schedp->irq_context) {
		vmm_panic("%s: Cannot yield in IRQ context\n", __func__);
	}

	if (!schedp->current_vcpu) {
		vmm_panic("%s: NULL VCPU pointer\n", __func__);
	}

	if (schedp->current_vcpu->is_normal) {
		/* For Normal VCPU
		 * Just enable yield on exit and rest will be taken care
		 * by vmm_scheduler_irq_exit()
		 */
		if (vmm_manager_vcpu_get_state(schedp->current_vcpu) == 
						VMM_VCPU_STATE_RUNNING) {
			schedp->yield_on_irq_exit = TRUE;
		}
	} else {
		/* For Orphan VCPU
		 * Forcefully expire yield 
		 */
		arch_vcpu_preempt_orphan();
	}

	arch_cpu_irq_restore(flags);
}
Пример #2
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
 * 			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
			     void *buf, size_t buflen, int to_buffer)
{
	unsigned int offset = 0;
	struct sg_mapping_iter miter;
	unsigned long flags;
	unsigned int sg_flags = SG_MITER_ATOMIC;

	if (to_buffer)
		sg_flags |= SG_MITER_FROM_SG;
	else
		sg_flags |= SG_MITER_TO_SG;

	sg_miter_start(&miter, sgl, nents, sg_flags);

	arch_cpu_irq_save(flags);

	while (sg_miter_next(&miter) && offset < buflen) {
		unsigned int len;

		len = min(miter.length, buflen - offset);

		if (to_buffer)
			memcpy(buf + offset, miter.addr, len);
		else
			memcpy(miter.addr, buf + offset, len);

		offset += len;
	}

	sg_miter_stop(&miter);

	arch_cpu_irq_restore(flags);
	return offset;
}
Пример #3
0
void __lock arch_atomic_sub(atomic_t *atom, long value)
{
	irq_flags_t flags;

	arch_cpu_irq_save(flags);
	atom->counter -= value;
	arch_cpu_irq_restore(flags);
}
Пример #4
0
void __lock vmm_spin_unlock_irqrestore(vmm_spinlock_t * lock,
					irq_flags_t flags)
{
#if defined(CONFIG_SMP)
	/* Call CPU specific unlocking routine */
	arch_cpu_spin_unlock(&lock->__the_lock);
#endif
	/* Restore saved interrupt flags */
	arch_cpu_irq_restore(flags);
}
Пример #5
0
long __lock arch_atomic_xchg(atomic_t *atom, long newval)
{
	long previous;
	irq_flags_t flags;

	arch_cpu_irq_save(flags);
	previous = atom->counter;
	atom->counter = newval;
	arch_cpu_irq_restore(flags);

	return previous;
}
Пример #6
0
long __lock arch_atomic_sub_return(atomic_t *atom, long value)
{
	long temp;
	irq_flags_t flags;

	arch_cpu_irq_save(flags);
	atom->counter -= value;
	temp = atom->counter;
	arch_cpu_irq_restore(flags);

	return temp;
}
Пример #7
0
long __lock arch_atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
{
	long previous;
	irq_flags_t flags;

	arch_cpu_irq_save(flags);
	previous = atom->counter;
	if (previous == oldval) {
		atom->counter = newval;
	}
	arch_cpu_irq_restore(flags);

	return previous;
}
Пример #8
0
bool vmm_scheduler_normal_context(void)
{
	bool ret = FALSE;
	irq_flags_t flags;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (schedp->current_vcpu && !schedp->irq_context) {
		ret = (schedp->current_vcpu->is_normal) ? TRUE : FALSE;
	}

	arch_cpu_irq_restore(flags);

	return ret;
}
Пример #9
0
int vmm_profiler_stop(void)
{
	if (vmm_profiler_isactive()) {
		irq_flags_t flags = arch_cpu_irq_save();

		_vmm_profile_enter = vmm_profile_none;
		_vmm_profile_exit = vmm_profile_none;
		pctrl.is_active = 0;

		arch_cpu_irq_restore(flags);
	} else {
		return VMM_EFAIL;
	}

	return VMM_OK;
}
Пример #10
0
void vmm_scheduler_preempt_enable(void)
{
	irq_flags_t flags;
	struct vmm_vcpu *vcpu;
	struct vmm_scheduler_ctrl *schedp = &this_cpu(sched);

	arch_cpu_irq_save(flags);

	if (!schedp->irq_context) {
		vcpu = schedp->current_vcpu;
		if (vcpu && vcpu->preempt_count) {
			vcpu->preempt_count--;
		}
	}

	arch_cpu_irq_restore(flags);
}
Пример #11
0
int vmm_profiler_start(void)
{
	if (!vmm_profiler_isactive()) {
		irq_flags_t flags = arch_cpu_irq_save();

		vmm_memset(pctrl.stat, 0,
			   sizeof(struct vmm_profiler_stat) *
			   kallsyms_num_syms);
		_vmm_profile_enter = vmm_profile_enter;
		_vmm_profile_exit = vmm_profile_exit;
		pctrl.is_active = 1;

		arch_cpu_irq_restore(flags);
	} else {
		return VMM_EFAIL;
	}

	return VMM_OK;
}
Пример #12
0
static void epit_set_mode(enum vmm_clockchip_mode mode,
			  struct vmm_clockchip *evt)
{
	struct epit_clockchip *ecc = evt->priv;
	unsigned long flags;

	/*
	 * The timer interrupt generation is disabled at least
	 * for enough time to call epit_set_next_event()
	 */
	arch_cpu_irq_save(flags);

	/* Disable interrupt */
	epit_irq_disable(ecc);

	if (mode != ecc->clockevent_mode) {
		/*
		 * Set event time into far-far future.
		 * The further we can go is to let the timer wrap arround
		 * once.
		 */

		/* read the actual counter */
		unsigned long tcmp = vmm_readl((void *)(ecc->base + EPITCNR));

		/*
		 * add 1 (as the counter is decrementing) and write the
		 * value.
		 */
		vmm_writel(tcmp + 1, (void *)(ecc->base + EPITCMPR));

		/* Clear pending interrupt */
		epit_irq_acknowledge(ecc);
	}

	/* Remember timer mode */
	ecc->clockevent_mode = mode;
	arch_cpu_irq_restore(flags);

	switch (mode) {
	case VMM_CLOCKCHIP_MODE_PERIODIC:
		vmm_printf("epit_set_mode: Periodic mode is not "
			   "supported for i.MX EPIT\n");
		break;
	case VMM_CLOCKCHIP_MODE_ONESHOT:
		/*
		 * Do not put overhead of interrupt enable/disable into
		 * epit_set_next_event(), the core has about 4 minutes
		 * to call epit_set_next_event() or shutdown clock after
		 * mode switching
		 */
		arch_cpu_irq_save(flags);
		epit_irq_enable(ecc);
		arch_cpu_irq_restore(flags);
		break;
	case VMM_CLOCKCHIP_MODE_SHUTDOWN:
	case VMM_CLOCKCHIP_MODE_UNUSED:
	case VMM_CLOCKCHIP_MODE_RESUME:
		/* Left event sources disabled, no more interrupts appear */
		break;
	}
}