int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH xnarch_atomic_t *ownerp; unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out_err; } if (unlikely(status & XNOTHER)) goto do_syscall; ownerp = get_ownerp(shadow); err = xnsynch_fast_owner_check(ownerp, cur); if (unlikely(err)) goto out_err; if (shadow->lockcnt > 1) { --shadow->lockcnt; goto out; } if (likely(xnsynch_fast_release(ownerp, cur))) { out: cb_read_unlock(&shadow->lock, s); return 0; } do_syscall: #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_unlock, shadow); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out_err: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }
int __wrap_putchar(int c) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_putchar(c); else { rt_print_flush_buffers(); return __real_putchar(c); } }
int __wrap_fputc(int c, FILE *stream) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_fputc(c, stream); else { rt_print_flush_buffers(); return __real_fputc(c, stream); } }
int __wrap_vfprintf(FILE *stream, const char *fmt, va_list args) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_vfprintf(stream, fmt, args); else { rt_print_flush_buffers(); return __real_vfprintf(stream, fmt, args); } }
void __wrap_vsyslog(int priority, const char *fmt, va_list ap) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_vsyslog(priority, fmt, ap); else { rt_print_flush_buffers(); __real_vsyslog(priority, fmt, ap); } }
int __wrap_puts(const char *s) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_puts(s); else { rt_print_flush_buffers(); return puts(s); } }
size_t __wrap_fwrite(void *ptr, size_t size, size_t nmemb, FILE *stream) { if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return rt_fwrite(ptr, size, nmemb, stream); else { rt_print_flush_buffers(); return __real_fwrite(ptr, size, nmemb, stream); } }
void __wrap___vsyslog_chk(int pri, int flag, const char *fmt, va_list ap) { #ifdef CONFIG_XENO_FORTIFY if (unlikely(xeno_get_current() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) return __rt_vsyslog_chk(pri, flag, fmt, ap); else { rt_print_flush_buffers(); __real___vsyslog_chk(pri, flag, fmt, ap); } #else __wrap_fprintf(stderr, "Xenomai needs to be compiled with --enable-fortify " "to support applications\ncompiled with " "-D_FORTIFY_SOURCE\n"); exit(EXIT_FAILURE); #endif }
/* * Note: Works without syscalls but may not catch all errors when used inside * TSD destructors (as registered via pthread_key_create) when TLS support * (__thread) is disabled. */ void assert_nrt_fast(void) { if (unlikely(xeno_get_current_fast() != XN_NO_HANDLE && !(xeno_get_current_mode() & XNRELAX))) assert_nrt_inner(); }
int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(status & XNOTHER)) goto do_syscall; if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out; } if (unlikely(status & XNRELAX)) { do { err = XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } while (err == -EINTR); if (err < 0) goto out; } err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) { if (shadow->lockcnt == UINT_MAX) err = -EAGAIN; else { ++shadow->lockcnt; err = 0; } } else err = -EBUSY; out: cb_read_unlock(&shadow->lock, s); return -err; do_syscall: #endif /* !CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_trylock, shadow); } while (err == -EINTR); return -err; }
int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *to) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (shadow->magic != PSE51_MUTEX_MAGIC) { err = -EINVAL; goto out; } /* See __wrap_pthread_mutex_lock() */ if (likely(!(status & (XNRELAX|XNOTHER)))) { err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY) switch(shadow->attr.type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: err = -EDEADLK; goto out; case PTHREAD_MUTEX_RECURSIVE: if (shadow->lockcnt == UINT_MAX) { err = -EAGAIN; goto out; } ++shadow->lockcnt; goto out; } } #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL2(__pse51_muxid, __pse51_mutex_timedlock, shadow, to); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }
int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (shadow->magic != PSE51_MUTEX_MAGIC) { err = -EINVAL; goto out; } /* * We track resource ownership for non real-time shadows in * order to handle the auto-relax feature, so we must always * obtain them via a syscall. */ if (likely(!(status & (XNRELAX|XNOTHER)))) { err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY) switch(shadow->attr.type) { case PTHREAD_MUTEX_NORMAL: break; case PTHREAD_MUTEX_ERRORCHECK: err = -EDEADLK; goto out; case PTHREAD_MUTEX_RECURSIVE: if (shadow->lockcnt == UINT_MAX) { err = -EAGAIN; goto out; } ++shadow->lockcnt; err = 0; goto out; } } #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid,__pse51_mutex_lock,shadow); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }