static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
int cobalt_monitor_exit(cobalt_monitor_t *mon) { struct cobalt_monitor_data *datp; unsigned long status; xnhandle_t cur; __sync_synchronize(); datp = get_monitor_data(mon); if ((datp->flags & COBALT_MONITOR_PENDED) && (datp->flags & COBALT_MONITOR_SIGNALED)) goto syscall; status = cobalt_get_current_mode(); if (status & XNWEAK) goto syscall; cur = cobalt_get_current(); if (xnsynch_fast_release(&datp->owner, cur)) return 0; syscall: return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_exit, mon); }
int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH xnarch_atomic_t *ownerp; unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out_err; } if (unlikely(status & XNOTHER)) goto do_syscall; ownerp = get_ownerp(shadow); err = xnsynch_fast_owner_check(ownerp, cur); if (unlikely(err)) goto out_err; if (shadow->lockcnt > 1) { --shadow->lockcnt; goto out; } if (likely(xnsynch_fast_release(ownerp, cur))) { out: cb_read_unlock(&shadow->lock, s); return 0; } do_syscall: #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_unlock, shadow); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out_err: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }