static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
ER set_flg(ID flgid, UINT setptn) { xnpholder_t *holder, *nholder; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (setptn == 0) goto unlock_and_exit; flag->flgvalue |= setptn; if (!xnsynch_pended_p(&flag->synchbase)) goto unlock_and_exit; nholder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while ((holder = nholder) != NULL) { uitask_t *sleeper = thread2uitask(link2thread(holder, plink)); UINT wfmode = sleeper->wargs.flag.wfmode; UINT waiptn = sleeper->wargs.flag.waiptn; if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) { nholder = xnsynch_wakeup_this_sleeper(&flag->synchbase, holder); sleeper->wargs.flag.waiptn = flag->flgvalue; if (wfmode & TWF_CLR) flag->flgvalue = 0; } else nholder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
static int __mbx_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uimbx_t *mbx = (uimbx_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "%d/%d message(s), attr=%s\n", mbx->mcount, mbx->bufcnt, mbx->mbxatr & TA_TPRI ? "TA_TPRI" : "TA_TFIFO"); if (xnsynch_pended_p(&mbx->synchbase)) { xnpholder_t *holder; /* Pended mbx -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&mbx->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&mbx->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
ER ref_mbx(T_RMBX *pk_rmbx, ID mbxid) { uitask_t *sleeper; ER err = E_OK; uimbx_t *mbx; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { err = E_NOEXS; goto unlock_and_exit; } if (xnsynch_pended_p(&mbx->synchbase)) { sleeper = thread2uitask(link2thread (getheadpq(xnsynch_wait_queue(&mbx->synchbase)), plink)); pk_rmbx->wtsk = sleeper->id; } else pk_rmbx->wtsk = FALSE; pk_rmbx->exinf = mbx->exinf; pk_rmbx->pk_msg = mbx->mcount > 0 ? mbx->ring[mbx->rdptr] : (T_MSG *) NADR; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
ER ref_flg(T_RFLG *pk_rflg, ID flgid) { uitask_t *sleeper; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (xnsynch_pended_p(&flag->synchbase)) { xnpholder_t *holder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); xnthread_t *thread = link2thread(holder, plink); sleeper = thread2uitask(thread); pk_rflg->wtsk = sleeper->id; } else pk_rflg->wtsk = FALSE; pk_rflg->exinf = flag->exinf; pk_rflg->flgptn = flag->flgvalue; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(), *owner; xnhandle_t threadh = xnthread_handle(thread), fastlock, old; const int use_fastlock = xnsynch_fastlock_p(synch); spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); trace_mark(xn_nucleus, synch_acquire, "synch %p", synch); redo: if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh); if (likely(fastlock == XN_NO_HANDLE)) { if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); return 0; } xnlock_get_irqsave(&nklock, s); /* Set claimed bit. In case it appears to be set already, re-read its state under nklock so that we don't miss any change between the lock-less read and here. But also try to avoid cmpxchg where possible. Only if it appears not to be set, start with cmpxchg directly. */ if (xnsynch_fast_is_claimed(fastlock)) { old = xnarch_atomic_get(lockp); goto test_no_owner; } do { old = xnarch_atomic_cmpxchg(lockp, fastlock, xnsynch_fast_set_claimed(fastlock, 1)); if (likely(old == fastlock)) break; test_no_owner: if (old == XN_NO_HANDLE) { /* Owner called xnsynch_release (on another cpu) */ xnlock_put_irqrestore(&nklock, s); goto redo; } fastlock = old; } while (!xnsynch_fast_is_claimed(fastlock)); owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock)); if (!owner) { /* The handle is broken, therefore pretend that the synch object was deleted to signal an error. */ xnthread_set_info(thread, XNRMID); goto unlock_and_exit; } xnsynch_set_owner(synch, owner); } else { xnlock_get_irqsave(&nklock, s); owner = synch->owner; if (!owner) { synch->owner = thread; if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } } xnsynch_detect_relaxed_owner(synch, thread); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else if (w_cprio(thread) > w_cprio(owner)) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto grab_and_exit; } insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); if (testbits(synch->status, XNSYNCH_PIP)) { if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, w_cprio(thread)); xnsynch_renice_thread(owner, thread); } } else insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) { xnlock_put_irqrestore(&nklock, s); goto redo; } timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) { /* Otherwise, it's too late. */ xnlock_put_irqrestore(&nklock, s); goto redo; } xnthread_set_info(thread, XNTIMEO); } else { grab_and_exit: if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); /* We are the new owner, update the fastlock accordingly. */ if (xnsynch_pended_p(synch)) threadh = xnsynch_fast_set_claimed(threadh, 1); xnarch_atomic_set(lockp, threadh); } } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
static ER wai_flg_helper(UINT *p_flgptn, ID flgid, UINT waiptn, UINT wfmode, TMO tmout) { xnticks_t timeout; uitask_t *task; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_unblockable_p()) return E_CTX; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; if (waiptn == 0) return E_PAR; if (tmout == TMO_FEVR) timeout = XN_INFINITE; else if (tmout == 0) timeout = XN_NONBLOCK; else if (tmout < TMO_FEVR) return E_PAR; else timeout = (xnticks_t)tmout; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) { *p_flgptn = flag->flgvalue; if (wfmode & TWF_CLR) flag->flgvalue = 0; goto unlock_and_exit; } if (timeout == XN_NONBLOCK) { err = E_TMOUT; goto unlock_and_exit; } else if (xnsynch_pended_p(&flag->synchbase) && !(flag->flgatr & TA_WMUL)) { err = E_OBJ; goto unlock_and_exit; } task = ui_current_task(); xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT); task->wargs.flag.wfmode = wfmode; task->wargs.flag.waiptn = waiptn; xnsynch_sleep_on(&flag->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(&task->threadbase, XNRMID)) err = E_DLT; /* Flag deleted while pending. */ else if (xnthread_test_info(&task->threadbase, XNTIMEO)) err = E_TMOUT; /* Timeout. */ else if (xnthread_test_info(&task->threadbase, XNBREAK)) err = E_RLWAI; /* rel_wai() or signal received while waiting. */ else *p_flgptn = task->wargs.flag.waiptn; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }