void xnintr_clock_handler(void) { xnstat_exectime_t *prev; struct xnsched *sched; unsigned cpu; cpu = xnarch_current_cpu(); if (!cpumask_test_cpu(cpu, &xnarch_supported_cpus)) { xnarch_relay_tick(); return; } sched = xnpod_sched_slot(cpu); prev = xnstat_exectime_switch(sched, &nkclock.stat[xnsched_cpu(sched)].account); xnstat_counter_inc(&nkclock.stat[xnsched_cpu(sched)].hits); trace_mark(xn_nucleus, irq_enter, "irq %u", XNARCH_TIMER_IRQ); trace_mark(xn_nucleus, tbase_tick, "base %s", nktbase.name); ++sched->inesting; __setbits(sched->lflags, XNINIRQ); xnlock_get(&nklock); xntimer_tick_aperiodic(); xnlock_put(&nklock); xnstat_exectime_switch(sched, prev); if (--sched->inesting == 0) { __clrbits(sched->lflags, XNINIRQ); xnpod_schedule(); sched = xnpod_current_sched(); } /* * If the clock interrupt preempted a real-time thread, any * transition to the root thread has already triggered a host * tick propagation from xnpod_schedule(), so at this point, * we only need to propagate the host tick in case the * interrupt preempted the root thread. */ if (testbits(sched->lflags, XNHTICK) && xnthread_test_state(sched->curr, XNROOT)) xnintr_host_tick(sched); trace_mark(xn_nucleus, irq_exit, "irq %u", XNARCH_TIMER_IRQ); }
int __xnsched_run(struct xnsched *sched) { struct xnthread *prev, *next, *curr; int switched, shadow; spl_t s; if (xnarch_escalate()) return 0; trace_cobalt_schedule(sched); xnlock_get_irqsave(&nklock, s); curr = sched->curr; /* * CAUTION: xnthread_host_task(curr) may be unsynced and even * stale if curr = &rootcb, since the task logged by * leave_root() may not still be the current one. Use * "current" for disambiguating. */ xntrace_pid(current->pid, xnthread_current_priority(curr)); reschedule: switched = 0; if (!test_resched(sched)) goto out; next = xnsched_pick_next(sched); if (next == curr) { if (unlikely(xnthread_test_state(next, XNROOT))) { if (sched->lflags & XNHTICK) xnintr_host_tick(sched); if (sched->lflags & XNHDEFER) xnclock_program_shot(&nkclock, sched); } goto out; } prev = curr; trace_cobalt_switch_context(prev, next); if (xnthread_test_state(next, XNROOT)) xnsched_reset_watchdog(sched); sched->curr = next; shadow = 1; if (xnthread_test_state(prev, XNROOT)) { leave_root(prev); shadow = 0; } else if (xnthread_test_state(next, XNROOT)) { if (sched->lflags & XNHTICK) xnintr_host_tick(sched); if (sched->lflags & XNHDEFER) xnclock_program_shot(&nkclock, sched); enter_root(next); } xnstat_exectime_switch(sched, &next->stat.account); xnstat_counter_inc(&next->stat.csw); switch_context(sched, prev, next); /* * Test whether we transitioned from primary mode to secondary * over a shadow thread, caused by a call to xnthread_relax(). * In such a case, we are running over the regular schedule() * tail code, so we have to skip our tail code. */ if (shadow && ipipe_root_p) goto shadow_epilogue; switched = 1; sched = xnsched_finish_unlocked_switch(sched); /* * Re-read the currently running thread, this is needed * because of relaxed/hardened transitions. */ curr = sched->curr; xnthread_switch_fpu(sched); xntrace_pid(current->pid, xnthread_current_priority(curr)); out: if (switched && xnsched_maybe_resched_after_unlocked_switch(sched)) goto reschedule; if (curr->lock_count) sched->lflags |= XNINLOCK; xnlock_put_irqrestore(&nklock, s); return switched; shadow_epilogue: __ipipe_complete_domain_migration(); XENO_BUG_ON(COBALT, xnthread_current() == NULL); /* * Interrupts must be disabled here (has to be done on entry * of the Linux [__]switch_to function), but it is what * callers expect, specifically the reschedule of an IRQ * handler that hit before we call xnsched_run in * xnthread_suspend() when relaxing a thread. */ XENO_BUG_ON(COBALT, !hard_irqs_disabled()); return 1; }