예제 #1
0
/* Must be called hw IRQs off. */
void __ipipe_lock_irq(unsigned int irq)
{
	struct ipipe_domain *ipd = ipipe_root_domain;
	struct ipipe_percpu_domain_data *p;
	int l0b, l1b;

	IPIPE_WARN_ONCE(!hard_irqs_disabled());

	/*
	 * Interrupts requested by a registered head domain cannot be
	 * locked, since this would make no sense: interrupts are
	 * globally masked at CPU level when the head domain is
	 * stalled, so there is no way we could encounter the
	 * situation IRQ locks are handling.
	 */
	if (test_and_set_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
		return;

	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
	l1b = irq / BITS_PER_LONG;

	p = ipipe_this_cpu_context(ipd);
	if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
		__set_bit(irq, p->irqheld_map);
		if (p->irqpend_lomap[l1b] == 0) {
			__clear_bit(l1b, p->irqpend_mdmap);
			if (p->irqpend_mdmap[l0b] == 0)
				__clear_bit(l0b, &p->irqpend_himap);
		}
	}
}
예제 #2
0
asmlinkage void __sched __ipipe_preempt_schedule_irq(void)
{
	struct ipipe_percpu_domain_data *p;
	unsigned long flags;

	BUG_ON(!hard_irqs_disabled());
	local_irq_save(flags);
	hard_local_irq_enable();
	preempt_schedule_irq(); /* Ok, may reschedule now. */
	hard_local_irq_disable();

	/*
	 * Flush any pending interrupt that may have been logged after
	 * preempt_schedule_irq() stalled the root stage before
	 * returning to us, and now.
	 */
	p = ipipe_this_cpu_root_context();
	if (unlikely(__ipipe_ipending_p(p))) {
		add_preempt_count(PREEMPT_ACTIVE);
		trace_hardirqs_on();
		__clear_bit(IPIPE_STALL_FLAG, &p->status);
		__ipipe_sync_stage();
		sub_preempt_count(PREEMPT_ACTIVE);
	}

	__ipipe_restore_root_nosync(flags);
}
예제 #3
0
void __ipipe_spin_unlock_debug(unsigned long flags)
{
	/*
	 * We catch a nasty issue where spin_unlock_irqrestore() on a
	 * regular kernel spinlock is about to re-enable hw interrupts
	 * in a section entered with hw irqs off. This is clearly the
	 * sign of a massive breakage coming. Usual suspect is a
	 * regular spinlock which was overlooked, used within a
	 * section which must run with hw irqs disabled.
	 */
	WARN_ON_ONCE(!raw_irqs_disabled_flags(flags) && hard_irqs_disabled());
}
예제 #4
0
/* Must be called hw IRQs off. */
void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq)
{
	struct ipipe_percpu_domain_data *p = ipipe_this_cpu_context(ipd);
	int l0b = irq / BITS_PER_LONG;

	IPIPE_WARN_ONCE(!hard_irqs_disabled());

	if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) {
		__set_bit(irq, p->irqpend_lomap);
		__set_bit(l0b, &p->irqpend_himap);
	} else
		__set_bit(irq, p->irqheld_map);

	p->irqall[irq]++;
}
예제 #5
0
/* Must be called hw IRQs off. */
void __ipipe_lock_irq(unsigned int irq)
{
	struct ipipe_percpu_domain_data *p;
	int l0b = irq / BITS_PER_LONG;

	IPIPE_WARN_ONCE(!hard_irqs_disabled());

	if (test_and_set_bit(IPIPE_LOCK_FLAG,
			     &ipipe_root_domain->irqs[irq].control))
		return;

	p = ipipe_this_cpu_root_context();
	if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
		__set_bit(irq, p->irqheld_map);
		if (p->irqpend_lomap[l0b] == 0)
			__clear_bit(l0b, &p->irqpend_himap);
	}
}
예제 #6
0
/* Must be called hw IRQs off. */
void __ipipe_unlock_irq(unsigned int irq)
{
	struct ipipe_domain *ipd = ipipe_root_domain;
	struct ipipe_percpu_domain_data *p;
	int l0b = irq / BITS_PER_LONG, cpu;

	IPIPE_WARN_ONCE(!hard_irqs_disabled());

	if (!test_and_clear_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
		return;

	for_each_online_cpu(cpu) {
		p = ipipe_percpu_context(ipd, cpu);
		if (test_and_clear_bit(irq, p->irqheld_map)) {
			/* We need atomic ops here: */
			set_bit(irq, p->irqpend_lomap);
			set_bit(l0b, &p->irqpend_himap);
		}
	}
}
예제 #7
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
int __xnsched_run(struct xnsched *sched)
{
	struct xnthread *prev, *next, *curr;
	int switched, shadow;
	spl_t s;

	if (xnarch_escalate())
		return 0;

	trace_cobalt_schedule(sched);

	xnlock_get_irqsave(&nklock, s);

	curr = sched->curr;
	/*
	 * CAUTION: xnthread_host_task(curr) may be unsynced and even
	 * stale if curr = &rootcb, since the task logged by
	 * leave_root() may not still be the current one. Use
	 * "current" for disambiguating.
	 */
	xntrace_pid(current->pid, xnthread_current_priority(curr));
reschedule:
	switched = 0;
	if (!test_resched(sched))
		goto out;

	next = xnsched_pick_next(sched);
	if (next == curr) {
		if (unlikely(xnthread_test_state(next, XNROOT))) {
			if (sched->lflags & XNHTICK)
				xnintr_host_tick(sched);
			if (sched->lflags & XNHDEFER)
				xnclock_program_shot(&nkclock, sched);
		}
		goto out;
	}

	prev = curr;

	trace_cobalt_switch_context(prev, next);

	if (xnthread_test_state(next, XNROOT))
		xnsched_reset_watchdog(sched);

	sched->curr = next;
	shadow = 1;

	if (xnthread_test_state(prev, XNROOT)) {
		leave_root(prev);
		shadow = 0;
	} else if (xnthread_test_state(next, XNROOT)) {
		if (sched->lflags & XNHTICK)
			xnintr_host_tick(sched);
		if (sched->lflags & XNHDEFER)
			xnclock_program_shot(&nkclock, sched);
		enter_root(next);
	}

	xnstat_exectime_switch(sched, &next->stat.account);
	xnstat_counter_inc(&next->stat.csw);

	switch_context(sched, prev, next);

	/*
	 * Test whether we transitioned from primary mode to secondary
	 * over a shadow thread, caused by a call to xnthread_relax().
	 * In such a case, we are running over the regular schedule()
	 * tail code, so we have to skip our tail code.
	 */
	if (shadow && ipipe_root_p)
		goto shadow_epilogue;

	switched = 1;
	sched = xnsched_finish_unlocked_switch(sched);
	/*
	 * Re-read the currently running thread, this is needed
	 * because of relaxed/hardened transitions.
	 */
	curr = sched->curr;
	xnthread_switch_fpu(sched);
	xntrace_pid(current->pid, xnthread_current_priority(curr));
out:
	if (switched &&
	    xnsched_maybe_resched_after_unlocked_switch(sched))
		goto reschedule;

	if (curr->lock_count)
		sched->lflags |= XNINLOCK;

	xnlock_put_irqrestore(&nklock, s);

	return switched;

shadow_epilogue:
	__ipipe_complete_domain_migration();

	XENO_BUG_ON(COBALT, xnthread_current() == NULL);

	/*
	 * Interrupts must be disabled here (has to be done on entry
	 * of the Linux [__]switch_to function), but it is what
	 * callers expect, specifically the reschedule of an IRQ
	 * handler that hit before we call xnsched_run in
	 * xnthread_suspend() when relaxing a thread.
	 */
	XENO_BUG_ON(COBALT, !hard_irqs_disabled());

	return 1;
}