示例#1
0
static inline void xntimer_enqueue_aperiodic(xntimer_t *timer)
{
	xntimerq_t *q = &timer->sched->timerqueue;
	xntimerq_insert(q, &timer->aplink);
	__clrbits(timer->status, XNTIMER_DEQUEUED);
	xnstat_counter_inc(&timer->scheduled);
}
示例#2
0
/*
 * Low-level interrupt handler dispatching non-shared ISRs -- Called with
 * interrupts off.
 */
static void xnintr_irq_handler(unsigned irq, void *cookie)
{

	xnintr_t *intr;


	int s;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&xnirqs[irq].lock);

#ifdef CONFIG_SMP
	/* In SMP case, we have to reload the cookie under the per-IRQ lock
	   to avoid racing with xnintr_detach. */
	intr = xnarch_get_irq_cookie(irq);
	if (unlikely(!intr)) {
		s = 0;
		goto unlock_and_exit;
	}
#else
	/* cookie always valid, attach/detach happens with IRQs disabled */
	intr = cookie;
#endif
	s = intr->isr(intr);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else {
		xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);



		intr->unhandled = 0;
	}

#ifdef CONFIG_SMP
 unlock_and_exit:
#endif
	xnlock_put(&xnirqs[irq].lock);

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
示例#3
0
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared interrupts -- Called with interrupts off.
 */
static void xnintr_shirq_handler(unsigned irq, void *cookie)
{



	xnintr_irq_t *shirq = &xnirqs[irq];
	xnintr_t *intr;
	int s = 0;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr) {
		int ret;

		ret = intr->isr(intr);
		s |= ret;

		if (ret & XN_ISR_HANDLED) {
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);




		}

		intr = intr->next;
	}

	xnlock_put(&shirq->lock);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
示例#4
0
static inline void xntimer_enqueue_periodic(xntimer_t *timer)
{
	unsigned slot = (xntlholder_date(&timer->plink) & XNTIMER_WHEELMASK);
	unsigned cpu = xnsched_cpu(timer->sched);
	struct percpu_cascade *pc = &base2slave(timer->base)->cascade[cpu];
	/* Just prepend the new timer to the proper slot. */
	xntlist_insert(&pc->wheel[slot], &timer->plink);
	__clrbits(timer->status, XNTIMER_DEQUEUED);
	xnstat_counter_inc(&timer->scheduled);
}
示例#5
0
void xnintr_clock_handler(void)
{
	xnstat_exectime_t *prev;
	struct xnsched *sched;
	unsigned cpu;

	cpu = xnarch_current_cpu();

	if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) {
		xnarch_relay_tick();
		return;
	}

	sched = xnpod_sched_slot(cpu);

	prev = xnstat_exectime_switch(sched,
		&nkclock.stat[xnsched_cpu(sched)].account);
	xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits);

	trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ);
	trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&nklock);
	xntimer_tick_aperiodic();
	xnlock_put(&nklock);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
		sched = xnpod_current_sched();
	}
	/*
	 * If the clock interrupt preempted a real-time thread, any
	 * transition to the root thread has already triggered a host
	 * tick propagation from xnpod_schedule(), so at this point,
	 * we only need to propagate the host tick in case the
	 * interrupt preempted the root thread.
	 */
	if (testbits(sched->lflags, XNHTICK) &&
	    xnthread_test_state(sched->curr, XNROOT))
		xnintr_host_tick(sched);

	trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ);
}
示例#6
0
static void
xntimer_adjust_periodic(xntimer_t *timer, xnsticks_t delta)
{
	xnticks_t now = timer->base->jiffies;
	xnsticks_t diff;
	xntlholder_date(&timer->plink) -= delta;
	diff = now - xntlholder_date(&timer->plink);

	if (testbits(timer->status, XNTIMER_PERIODIC)) {
		xnticks_t period = xntimer_interval(timer);
		xnticks_t mod;

		timer->pexpect -= delta;

		if ((xnsticks_t) (diff - period) >= 0) {
			/* timer should tick several times before now, instead
			 of calling timer->handler several times, we change
			 the timer date without changing its pexpect, so that
			 timer we can call timer->handler only once and the lost
			 ticks will be counted as overruns. */
			mod = xnarch_mod64(diff, period);
			xntimerh_date(&timer->aplink) += diff - mod;
		} else if (delta < 0
			   && testbits(timer->status, XNTIMER_FIRED)
			   && (xnsticks_t) (diff + period) <= 0) {
			/* timer is periodic and NOT waiting for its first shot,
			   so we make it tick sooner than its original date in
			   order to avoid the case where by adjusting time to a
			   sooner date, real-time periodic timers do not tick
			   until the original date has passed. */
			mod = xnarch_mod64(-diff, period);
			xntimerh_date(&timer->aplink) += diff + mod;
			timer->pexpect += diff + mod;
		}
	}

	if (diff >= 0) {
		xnstat_counter_inc(&timer->fired);
		timer->handler(timer);

		if (!xntimer_reload_p(timer))
			return;

		xntlholder_date(&timer->plink) += timer->interval;
	}

	xntimer_enqueue_periodic(timer);
}
示例#7
0
void xntimer_tick_periodic_inner(xntslave_t *slave)
{
	xnsched_t *sched = xnpod_current_sched();
	xntbase_t *base = &slave->base;
	xntlholder_t *holder;
	xnqueue_t *timerq;
	xntimer_t *timer;

	/*
	 * Update the periodic clocks keeping the things strictly
	 * monotonous (this routine is run on every cpu, but only CPU
	 * XNTIMER_KEEPER_ID should do this).
	 */
	if (sched == xnpod_sched_slot(XNTIMER_KEEPER_ID))
		++base->jiffies;

	timerq = &slave->cascade[xnsched_cpu(sched)].wheel[base->jiffies & XNTIMER_WHEELMASK];

	while ((holder = xntlist_head(timerq)) != NULL) {
		timer = plink2timer(holder);

		if ((xnsticks_t) (xntlholder_date(&timer->plink)
				  - base->jiffies) > 0)
			break;

		trace_mark(xn_nucleus, timer_expire, "timer %p", timer);

		xntimer_dequeue_periodic(timer);
		xnstat_counter_inc(&timer->fired);
		timer->handler(timer);

		if (!xntimer_reload_p(timer))
			continue;

		__setbits(timer->status, XNTIMER_FIRED);
		xntlholder_date(&timer->plink) = base->jiffies + timer->interval;
		xntimer_enqueue_periodic(timer);
	}

	xnsched_tick(sched->curr, base); /* Do time-slicing if required. */
}
示例#8
0
void xntimer_tick_aperiodic(void)
{
	xnsched_t *sched = xnpod_current_sched();
	xntimerq_t *timerq = &sched->timerqueue;
	xntimerh_t *holder;
	xntimer_t *timer;
	xnsticks_t delta;
	xnticks_t now;

	/*
	 * Optimisation: any local timer reprogramming triggered by
	 * invoked timer handlers can wait until we leave the tick
	 * handler. Use this status flag as hint to
	 * xntimer_start_aperiodic.
	 */
	__setbits(sched->status, XNINTCK);

	now = xnarch_get_cpu_tsc();
	while ((holder = xntimerq_head(timerq)) != NULL) {
		timer = aplink2timer(holder);
		/*
		 * If the delay to the next shot is greater than the
		 * intrinsic latency value, we may stop scanning the
		 * timer queue there, since timeout dates are ordered
		 * by increasing values.
		 */
		delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
		if (delta > (xnsticks_t)nklatency)
			break;

		trace_mark(xn_nucleus, timer_expire, "timer %p", timer);

		xntimer_dequeue_aperiodic(timer);
		xnstat_counter_inc(&timer->fired);

		if (likely(timer != &sched->htimer)) {
			if (likely(!testbits(nktbase.status, XNTBLCK)
				   || testbits(timer->status, XNTIMER_NOBLCK))) {
				timer->handler(timer);
				now = xnarch_get_cpu_tsc();
				/*
				 * If the elapsed timer has no reload
				 * value, or was re-enqueued or killed
				 * by the timeout handler: don't not
				 * re-enqueue it for the next shot.
				 */
				if (!xntimer_reload_p(timer))
					continue;
				__setbits(timer->status, XNTIMER_FIRED);
			} else if (likely(!testbits(timer->status, XNTIMER_PERIODIC))) {
				/*
				 * Postpone the next tick to a
				 * reasonable date in the future,
				 * waiting for the timebase to be
				 * unlocked at some point.
				 */
				xntimerh_date(&timer->aplink) = xntimerh_date(&sched->htimer.aplink);
				continue;
			}
		} else {
			/*
			 * By postponing the propagation of the
			 * low-priority host tick to the interrupt
			 * epilogue (see xnintr_irq_handler()), we
			 * save some I-cache, which translates into
			 * precious microsecs on low-end hw.
			 */
			__setbits(sched->status, XNHTICK);
			__clrbits(sched->status, XNHDEFER);
			if (!testbits(timer->status, XNTIMER_PERIODIC))
				continue;
		}

		do {
			xntimerh_date(&timer->aplink) += timer->interval;
		} while (xntimerh_date(&timer->aplink) < now + nklatency);
		xntimer_enqueue_aperiodic(timer);
	}

	__clrbits(sched->status, XNINTCK);

	xntimer_next_local_shot(sched);
}
示例#9
0
/*
 * Low-level interrupt handler dispatching non-shared ISRs -- Called with
 * interrupts off.
 */
static void xnintr_irq_handler(unsigned irq, void *cookie)
{
	struct xnsched *sched = xnpod_current_sched();
	xnstat_exectime_t *prev;
	struct xnintr *intr;
	xnticks_t start;
	int s;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&xnirqs[irq].lock);

#ifdef CONFIG_SMP
	/*
	 * In SMP case, we have to reload the cookie under the per-IRQ
	 * lock to avoid racing with xnintr_detach.  However, we
	 * assume that no CPU migration will occur while running the
	 * interrupt service routine, so the scheduler pointer will
	 * remain valid throughout this function.
	 */
	intr = xnarch_get_irq_cookie(irq);
	if (unlikely(!intr)) {
		s = 0;
		goto unlock_and_exit;
	}
#else
	/* cookie always valid, attach/detach happens with IRQs disabled */
	intr = cookie;
#endif
	s = intr->isr(intr);
	if (unlikely(s == XN_ISR_NONE)) {
		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else {
		xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);
		xnstat_exectime_lazy_switch(sched,
			&intr->stat[xnsched_cpu(sched)].account,
			start);
		intr->unhandled = 0;
	}

#ifdef CONFIG_SMP
 unlock_and_exit:
#endif
	xnlock_put(&xnirqs[irq].lock);

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
}
示例#10
0
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared edge-triggered interrupts -- Called with interrupts off.
 */
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
{
	const int MAX_EDGEIRQ_COUNTER = 128;
	struct xnsched *sched = xnpod_current_sched();
	xnintr_irq_t *shirq = &xnirqs[irq];
	int s = 0, counter = 0, ret, code;
	struct xnintr *intr, *end = NULL;
	xnstat_exectime_t *prev;
	xnticks_t start;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr != end) {
		xnstat_exectime_switch(sched,
			&intr->stat[xnsched_cpu(sched)].account);
		/*
		 * NOTE: We assume that no CPU migration will occur
		 * while running the interrupt service routine.
		 */
		ret = intr->isr(intr);
		code = ret & ~XN_ISR_BITMASK;
		s |= ret;

		if (code == XN_ISR_HANDLED) {
			end = NULL;
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);
			xnstat_exectime_lazy_switch(sched,
				&intr->stat[xnsched_cpu(sched)].account,
				start);
			start = xnstat_exectime_now();
		} else if (end == NULL)
			end = intr;

		if (counter++ > MAX_EDGEIRQ_COUNTER)
			break;

		if (!(intr = intr->next))
			intr = shirq->handlers;
	}

	xnlock_put(&shirq->lock);

	if (counter > MAX_EDGEIRQ_COUNTER)
		xnlogerr
		    ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n",
		     irq);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
}
示例#11
0
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared edge-triggered interrupts -- Called with interrupts off.
 */
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
{
	const int MAX_EDGEIRQ_COUNTER = 128;




	xnintr_irq_t *shirq = &xnirqs[irq];
	xnintr_t *intr, *end = NULL;
	int s = 0, counter = 0;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr != end) {
		int ret, code;




		ret = intr->isr(intr);
		code = ret & ~XN_ISR_BITMASK;
		s |= ret;

		if (code == XN_ISR_HANDLED) {
			end = NULL;
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);




                } else if (end == NULL)
                        end = intr;

		if (counter++ > MAX_EDGEIRQ_COUNTER)
			break;

		if (!(intr = intr->next))
			intr = shirq->handlers;
	}

	xnlock_put(&shirq->lock);

	if (counter > MAX_EDGEIRQ_COUNTER)
		xnlogerr
		    ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n",
		     irq);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
示例#12
0
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared interrupts -- Called with interrupts off.
 */
static void xnintr_shirq_handler(unsigned irq, void *cookie)
{
	struct xnsched *sched = xnpod_current_sched();
	xnintr_irq_t *shirq = &xnirqs[irq];
	xnstat_exectime_t *prev;
	xnticks_t start;
	xnintr_t *intr;
	int s = 0, ret;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->status, XNINIRQ);

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr) {
		/*
		 * NOTE: We assume that no CPU migration will occur
		 * while running the interrupt service routine.
		 */
		ret = intr->isr(intr);
		s |= ret;

		if (ret & XN_ISR_HANDLED) {
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);
			xnstat_exectime_lazy_switch(sched,
				&intr->stat[xnsched_cpu(sched)].account,
				start);
			start = xnstat_exectime_now();
		}

		intr = intr->next;
	}

	xnlock_put(&shirq->lock);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	if (--sched->inesting == 0) {
		__clrbits(sched->status, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
	xnstat_exectime_switch(sched, prev);
}
示例#13
0
int __xnsched_run(struct xnsched *sched)
{
	struct xnthread *prev, *next, *curr;
	int switched, shadow;
	spl_t s;

	if (xnarch_escalate())
		return 0;

	trace_cobalt_schedule(sched);

	xnlock_get_irqsave(&nklock, s);

	curr = sched->curr;
	/*
	 * CAUTION: xnthread_host_task(curr) may be unsynced and even
	 * stale if curr = &rootcb, since the task logged by
	 * leave_root() may not still be the current one. Use
	 * "current" for disambiguating.
	 */
	xntrace_pid(current->pid, xnthread_current_priority(curr));
reschedule:
	switched = 0;
	if (!test_resched(sched))
		goto out;

	next = xnsched_pick_next(sched);
	if (next == curr) {
		if (unlikely(xnthread_test_state(next, XNROOT))) {
			if (sched->lflags & XNHTICK)
				xnintr_host_tick(sched);
			if (sched->lflags & XNHDEFER)
				xnclock_program_shot(&nkclock, sched);
		}
		goto out;
	}

	prev = curr;

	trace_cobalt_switch_context(prev, next);

	if (xnthread_test_state(next, XNROOT))
		xnsched_reset_watchdog(sched);

	sched->curr = next;
	shadow = 1;

	if (xnthread_test_state(prev, XNROOT)) {
		leave_root(prev);
		shadow = 0;
	} else if (xnthread_test_state(next, XNROOT)) {
		if (sched->lflags & XNHTICK)
			xnintr_host_tick(sched);
		if (sched->lflags & XNHDEFER)
			xnclock_program_shot(&nkclock, sched);
		enter_root(next);
	}

	xnstat_exectime_switch(sched, &next->stat.account);
	xnstat_counter_inc(&next->stat.csw);

	switch_context(sched, prev, next);

	/*
	 * Test whether we transitioned from primary mode to secondary
	 * over a shadow thread, caused by a call to xnthread_relax().
	 * In such a case, we are running over the regular schedule()
	 * tail code, so we have to skip our tail code.
	 */
	if (shadow && ipipe_root_p)
		goto shadow_epilogue;

	switched = 1;
	sched = xnsched_finish_unlocked_switch(sched);
	/*
	 * Re-read the currently running thread, this is needed
	 * because of relaxed/hardened transitions.
	 */
	curr = sched->curr;
	xnthread_switch_fpu(sched);
	xntrace_pid(current->pid, xnthread_current_priority(curr));
out:
	if (switched &&
	    xnsched_maybe_resched_after_unlocked_switch(sched))
		goto reschedule;

	if (curr->lock_count)
		sched->lflags |= XNINLOCK;

	xnlock_put_irqrestore(&nklock, s);

	return switched;

shadow_epilogue:
	__ipipe_complete_domain_migration();

	XENO_BUG_ON(COBALT, xnthread_current() == NULL);

	/*
	 * Interrupts must be disabled here (has to be done on entry
	 * of the Linux [__]switch_to function), but it is what
	 * callers expect, specifically the reschedule of an IRQ
	 * handler that hit before we call xnsched_run in
	 * xnthread_suspend() when relaxing a thread.
	 */
	XENO_BUG_ON(COBALT, !hard_irqs_disabled());

	return 1;
}