int cobalt_monitor_enter(cobalt_monitor_t *mon) { struct cobalt_monitor_data *datp; unsigned long status; int ret, oldtype; xnhandle_t cur; /* * Assumptions on entry: * * - this is a Xenomai shadow (caller checked this). * - no recursive entry/locking. */ status = cobalt_get_current_mode(); if (status & (XNRELAX|XNWEAK)) goto syscall; datp = get_monitor_data(mon); cur = cobalt_get_current(); ret = xnsynch_fast_acquire(&datp->owner, cur); if (ret == 0) { datp->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST); return 0; } syscall: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); /* * Jump to kernel to wait for entry. We redo in case of * interrupt. */ do ret = XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_enter, mon); while (ret == -EINTR); pthread_setcanceltype(oldtype, NULL); return ret; }
int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *to) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (shadow->magic != PSE51_MUTEX_MAGIC) { err = -EINVAL; goto out; } /* See __wrap_pthread_mutex_lock() */ if (likely(!(status & (XNRELAX|XNOTHER)))) { err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY) switch(shadow->attr.type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: err = -EDEADLK; goto out; case PTHREAD_MUTEX_RECURSIVE: if (shadow->lockcnt == UINT_MAX) { err = -EAGAIN; goto out; } ++shadow->lockcnt; goto out; } } #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL2(__pse51_muxid, __pse51_mutex_timedlock, shadow, to); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }
int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(status & XNOTHER)) goto do_syscall; if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out; } if (unlikely(status & XNRELAX)) { do { err = XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } while (err == -EINTR); if (err < 0) goto out; } err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) { if (shadow->lockcnt == UINT_MAX) err = -EAGAIN; else { ++shadow->lockcnt; err = 0; } } else err = -EBUSY; out: cb_read_unlock(&shadow->lock, s); return -err; do_syscall: #endif /* !CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_trylock, shadow); } while (err == -EINTR); return -err; }
int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (shadow->magic != PSE51_MUTEX_MAGIC) { err = -EINVAL; goto out; } /* * We track resource ownership for non real-time shadows in * order to handle the auto-relax feature, so we must always * obtain them via a syscall. */ if (likely(!(status & (XNRELAX|XNOTHER)))) { err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY) switch(shadow->attr.type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: err = -EDEADLK; goto out; case PTHREAD_MUTEX_RECURSIVE: if (shadow->lockcnt == UINT_MAX) { err = -EAGAIN; goto out; } ++shadow->lockcnt; err = 0; goto out; } } #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid,__pse51_mutex_lock,shadow); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }