static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
static void xnsched_watchdog_handler(struct xntimer *timer) { struct xnsched *sched = xnpod_current_sched(); struct xnthread *thread = sched->curr; if (likely(xnthread_test_state(thread, XNROOT))) { xnsched_reset_watchdog(sched); return; } if (likely(++sched->wdcount < wd_timeout_arg)) return; #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(thread, XNSHADOW) && !xnthread_amok_p(thread)) { trace_mark(xn_nucleus, watchdog_signal, "thread %p thread_name %s", thread, xnthread_name(thread)); xnprintf("watchdog triggered -- signaling runaway thread " "'%s'\n", xnthread_name(thread)); xnthread_set_info(thread, XNAMOK | XNKICKED); xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1); } else #endif /* CONFIG_XENO_OPT_PERVASIVE */ { trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s", thread, xnthread_name(thread)); xnprintf("watchdog triggered -- killing runaway thread '%s'\n", xnthread_name(thread)); xnpod_delete_thread(thread); } xnsched_reset_watchdog(sched); }
/* * Detect when a thread is about to sleep on a synchronization * object currently owned by someone running in secondary mode. */ void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper) { if (xnthread_test_state(sleeper, XNTRAPSW|XNSWREP) == XNTRAPSW && xnthread_test_state(synch->owner, XNRELAX)) { xnthread_set_state(sleeper, XNSWREP); xnshadow_send_sig(sleeper, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); } else xnthread_clear_state(sleeper, XNSWREP); }
/* * Detect when a thread is about to relax while holding a * synchronization object currently claimed by another thread, which * bears the TWARNSW bit (thus advertising a concern about potential * spurious relaxes and priority inversion). By relying on the claim * queue, we restrict the checks to PIP-enabled objects, but that * already covers most of the use cases anyway. */ void xnsynch_detect_claimed_relax(struct xnthread *owner) { struct xnpholder *hs, *ht; struct xnthread *sleeper; struct xnsynch *synch; for (hs = getheadpq(&owner->claimq); hs != NULL; hs = nextpq(&owner->claimq, hs)) { synch = link2synch(hs); for (ht = getheadpq(&synch->pendq); ht != NULL; ht = nextpq(&synch->pendq, ht)) { sleeper = link2thread(ht, plink); if (xnthread_test_state(sleeper, XNTRAPSW)) { xnthread_set_info(sleeper, XNSWREP); xnshadow_send_sig(sleeper, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); } } } }