static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason) { struct xnpholder *holder; int status; spl_t s; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu", synch, reason); status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED; while ((holder = getpq(&synch->pendq)) != NULL) { struct xnthread *sleeper = link2thread(holder, plink); xnthread_set_info(sleeper, reason); sleeper->wchan = NULL; xnpod_resume_thread(sleeper, XNPEND); } if (testbits(synch->status, XNSYNCH_CLAIMED)) { xnsynch_clear_boost(synch, synch->owner); status = XNSYNCH_RESCHED; } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return status; }
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder) { xnthread_t *thread, *lastowner; xnpholder_t *nholder; spl_t s; xnlock_get_irqsave(&nklock, s); lastowner = synch->owner; nholder = poppq(&synch->pendq, holder); thread = link2thread(holder, plink); thread->wchan = NULL; thread->wwake = synch; synch->owner = thread; xnthread_set_info(thread, XNWAKEN); trace_mark(xn_nucleus_synch_wakeup_all, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return nholder; }
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch) { struct xnthread *thread = NULL; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); holder = getpq(&synch->pendq); if (holder) { thread = link2thread(holder, plink); thread->wchan = NULL; trace_mark(xn_nucleus, synch_wakeup_one, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return thread; }
int rt_task_make_periodic(RT_TASK *task, RTIME start_time, RTIME period) { int err; spl_t s; xnlock_get_irqsave(&nklock, s); task = rtai_h2obj_validate(task, RTAI_TASK_MAGIC, RT_TASK); if (!task) { err = -EINVAL; goto unlock_and_exit; } if (start_time <= xntbase_get_time(rtai_tbase)) start_time = XN_INFINITE; err = xnpod_set_thread_periodic(&task->thread_base, start_time, period); if (task->suspend_depth > 0 && --task->suspend_depth == 0) { xnpod_resume_thread(&task->thread_base, XNSUSP); xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
int __rtai_task_resume(RT_TASK *task) { int err = 0; spl_t s; xnlock_get_irqsave(&nklock, s); task = rtai_h2obj_validate(task, RTAI_TASK_MAGIC, RT_TASK); if (!task) { err = -EINVAL; goto unlock_and_exit; } if (task->suspend_depth > 0 && --task->suspend_depth == 0) { xnpod_resume_thread(&task->thread_base, XNSUSP); xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static void xnthread_periodic_handler(xntimer_t *timer) { xnthread_t *thread = container_of(timer, xnthread_t, ptimer); /* * Prevent unwanted round-robin, and do not wake up threads * blocked on a resource. */ if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY) xnpod_resume_thread(thread, XNDELAY); }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
static void xnsynch_renice_thread(xnthread_t *thread, int prio) { thread->cprio = prio; if (thread->wchan) /* Ignoring the XNSYNCH_DREORD flag on purpose here. */ xnsynch_renice_sleeper(thread); else if (thread != xnpod_current_thread() && xnthread_test_state(thread, XNREADY)) /* xnpod_resume_thread() must be called for runnable threads but the running one. */ xnpod_resume_thread(thread, 0); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(thread, XNRELAX)) xnshadow_renice(thread); #endif /* CONFIG_XENO_OPT_PERVASIVE */ }
u_long t_resume(u_long tid) { u_long err = SUCCESS; psostask_t *task; spl_t s; xnlock_get_irqsave(&nklock, s); if (tid == 0) { if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } /* Would be admittedly silly, but silly code does * exist, and it's a matter of returning ERR_NOTSUSP * instead of ERR_OBJID. */ task = psos_current_task(); } else { task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t); if (!task) { err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t); goto unlock_and_exit; } } if (!xnthread_test_state(&task->threadbase, XNSUSP)) { err = ERR_NOTSUSP; /* Task not suspended. */ goto unlock_and_exit; } xnpod_resume_thread(&task->threadbase, XNSUSP); xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static void xnthread_timeout_handler(xntimer_t *timer) { xnthread_t *thread = container_of(timer, xnthread_t, rtimer); xnthread_set_info(thread, XNTIMEO); /* Interrupts are off. */ xnpod_resume_thread(thread, XNDELAY); }