u_long t_setpri(u_long tid, u_long newprio, u_long *oldprio) { union xnsched_policy_param param; u_long err = SUCCESS; psostask_t *task; spl_t s; xnlock_get_irqsave(&nklock, s); if (tid == 0) { if (xnpod_unblockable_p()) return -EPERM; task = psos_current_task(); } else { task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t); if (!task) { err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t); goto unlock_and_exit; } } *oldprio = xnthread_current_priority(&task->threadbase); if (newprio != 0) { if (newprio < 1 || newprio > 255) { err = ERR_SETPRI; goto unlock_and_exit; } if (newprio != *oldprio) { param.rt.prio = newprio; xnpod_set_thread_schedparam(&task->threadbase, &xnsched_class_rt, ¶m); xnpod_schedule(); } } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
int __xnsched_run(struct xnsched *sched) { struct xnthread *prev, *next, *curr; int switched, shadow; spl_t s; if (xnarch_escalate()) return 0; trace_cobalt_schedule(sched); xnlock_get_irqsave(&nklock, s); curr = sched->curr; /* * CAUTION: xnthread_host_task(curr) may be unsynced and even * stale if curr = &rootcb, since the task logged by * leave_root() may not still be the current one. Use * "current" for disambiguating. */ xntrace_pid(current->pid, xnthread_current_priority(curr)); reschedule: switched = 0; if (!test_resched(sched)) goto out; next = xnsched_pick_next(sched); if (next == curr) { if (unlikely(xnthread_test_state(next, XNROOT))) { if (sched->lflags & XNHTICK) xnintr_host_tick(sched); if (sched->lflags & XNHDEFER) xnclock_program_shot(&nkclock, sched); } goto out; } prev = curr; trace_cobalt_switch_context(prev, next); if (xnthread_test_state(next, XNROOT)) xnsched_reset_watchdog(sched); sched->curr = next; shadow = 1; if (xnthread_test_state(prev, XNROOT)) { leave_root(prev); shadow = 0; } else if (xnthread_test_state(next, XNROOT)) { if (sched->lflags & XNHTICK) xnintr_host_tick(sched); if (sched->lflags & XNHDEFER) xnclock_program_shot(&nkclock, sched); enter_root(next); } xnstat_exectime_switch(sched, &next->stat.account); xnstat_counter_inc(&next->stat.csw); switch_context(sched, prev, next); /* * Test whether we transitioned from primary mode to secondary * over a shadow thread, caused by a call to xnthread_relax(). * In such a case, we are running over the regular schedule() * tail code, so we have to skip our tail code. */ if (shadow && ipipe_root_p) goto shadow_epilogue; switched = 1; sched = xnsched_finish_unlocked_switch(sched); /* * Re-read the currently running thread, this is needed * because of relaxed/hardened transitions. */ curr = sched->curr; xnthread_switch_fpu(sched); xntrace_pid(current->pid, xnthread_current_priority(curr)); out: if (switched && xnsched_maybe_resched_after_unlocked_switch(sched)) goto reschedule; if (curr->lock_count) sched->lflags |= XNINLOCK; xnlock_put_irqrestore(&nklock, s); return switched; shadow_epilogue: __ipipe_complete_domain_migration(); XENO_BUG_ON(COBALT, xnthread_current() == NULL); /* * Interrupts must be disabled here (has to be done on entry * of the Linux [__]switch_to function), but it is what * callers expect, specifically the reschedule of an IRQ * handler that hit before we call xnsched_run in * xnthread_suspend() when relaxing a thread. */ XENO_BUG_ON(COBALT, !hard_irqs_disabled()); return 1; }