int pthread_setschedparam_ex(pthread_t thread, int policy, const struct sched_param_ex *param) { pthread_t myself = pthread_self(); struct sched_param short_param; unsigned long mode_offset; int err, promoted; if (thread == myself) xeno_fault_stack(); err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_thread_setschedparam_ex, thread, policy, param, &mode_offset, &promoted); if (err == EPERM) { short_param.sched_priority = param->sched_priority; return __STD(pthread_setschedparam(thread, policy, &short_param)); } if (!err && promoted) { xeno_sigshadow_install_once(); xeno_set_current(); xeno_set_current_mode(mode_offset); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; }
int sc_fpend(int fid, long timeout, int mask, int opt, int *errp) { int mask_r = 0; *errp = XENOMAI_SKINCALL5(__vrtx_muxid, __vrtx_fpend, fid, timeout, mask, opt, &mask_r); return mask_r; }
u_long q_vreceive(u_long qid, u_long flags, u_long timeout, void *msgbuf_r, u_long buflen, u_long *msglen_r) { struct { u_long flags; u_long timeout; } modifiers; /* Combine to fit into available arg space (i.e. 5) */ return XENOMAI_SKINCALL5(__psos_muxid, __psos_q_vreceive, qid, &modifiers, msgbuf_r, buflen, msglen_r); }
int msgQReceive(MSG_Q_ID qid, char *buf, UINT nbytes, int timeout) { int err, rbytes; err = XENOMAI_SKINCALL5(__vxworks_muxid, __vxworks_msgq_receive, qid, buf, nbytes, timeout, &rbytes); if (err) { errno = abs(err); return ERROR; } return rbytes; }
STATUS msgQSend(MSG_Q_ID qid, const char *buf, UINT nbytes, int timeout, int prio) { int err; err = XENOMAI_SKINCALL5(__vxworks_muxid, __vxworks_msgq_send, qid, buf, nbytes, timeout, prio); if (err) { errno = abs(err); return ERROR; } return OK; }
int cobalt_event_wait(cobalt_event_t *event, unsigned long bits, unsigned long *bits_r, int mode, const struct timespec *timeout) { int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = XENOMAI_SKINCALL5(__cobalt_muxid, sc_cobalt_event_wait, event, bits, bits_r, mode, timeout); pthread_setcanceltype(oldtype, NULL); return ret; }
int rt_queue_create(RT_QUEUE *q, const char *name, size_t poolsize, size_t qlimit, int mode) { RT_QUEUE_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL5(__native_muxid, __native_queue_create, &ph, name, poolsize, qlimit, mode | Q_SHARED); if (err) return err; err = __map_queue_memory(q, &ph); if (err) /* If the mapping fails, make sure we don't leave a dandling queue in kernel space -- remove it. */ XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, &ph); return err; }
int __wrap_pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param) { pthread_t myself = pthread_self(); int err, promoted; err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_thread_setschedparam, thread, policy, param, myself, &promoted); if (err == EPERM) return __real_pthread_setschedparam(thread, policy, param); else __real_pthread_setschedparam(thread, policy, param); if (!err && promoted) { old_sigharden_handler = signal(SIGHARDEN, &__pthread_sigharden_handler); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; }
int rt_cond_wait(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout) { struct rt_cond_cleanup_t c = { .mutex = mutex, }; int err, oldtype; pthread_cleanup_push(&__rt_cond_cleanup, &c); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); #ifdef CONFIG_XENO_FASTSYNCH c.saved_lockcnt = mutex->lockcnt; #endif /* CONFIG_XENO_FASTSYNCH */ err = XENOMAI_SKINCALL5(__native_muxid, __native_cond_wait_prologue, cond, mutex, &c.saved_lockcnt, XN_RELATIVE, &timeout); pthread_setcanceltype(oldtype, NULL); pthread_cleanup_pop(0); while (err == -EINTR) err = XENOMAI_SKINCALL2(__native_muxid, __native_cond_wait_epilogue, mutex, c.saved_lockcnt); #ifdef CONFIG_XENO_FASTSYNCH mutex->lockcnt = c.saved_lockcnt; #endif /* CONFIG_XENO_FASTSYNCH */ pthread_testcancel(); return err ?: c.err; }
#include <pthread.h> #include <posix/syscall.h> #include <sys/select.h> extern int __pse51_muxid; int __wrap_select (int __nfds, fd_set *__restrict __readfds, fd_set *__restrict __writefds, fd_set *__restrict __exceptfds, struct timeval *__restrict __timeout) { int err, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); err = XENOMAI_SKINCALL5(__pse51_muxid, __pse51_select, __nfds, __readfds, __writefds, __exceptfds, __timeout); pthread_setcanceltype(oldtype, NULL); if (err == -EBADF || err == -EPERM || err == -ENOSYS) return __real_select(__nfds, __readfds, __writefds, __exceptfds, __timeout); if (err >= 0) return err; errno = -err; return -1; }
u_long q_vcreate(const char *name, u_long flags, u_long maxnum, u_long maxlen, u_long *qid_r) { return XENOMAI_SKINCALL5(__psos_muxid, __psos_q_vcreate, name, maxnum, maxlen, flags, qid_r); }
int __wrap_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) { struct pse51_cond_cleanup_t c = { .cond = (union __xeno_cond *)cond, .mutex = (union __xeno_mutex *)mutex, }; int err, oldtype; if (unlikely(c.cond->shadow_cond.magic != PSE51_COND_MAGIC)) goto autoinit; start: if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s)) return EINVAL; pthread_cleanup_push(&__pthread_cond_cleanup, &c); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_cond_wait_prologue, &c.cond->shadow_cond, &c.mutex->shadow_mutex, &c.count, 0, NULL); pthread_setcanceltype(oldtype, NULL); pthread_cleanup_pop(0); while (err == EINTR) err = -XENOMAI_SKINCALL3(__pse51_muxid, __pse51_cond_wait_epilogue, &c.cond->shadow_cond, &c.mutex->shadow_mutex, c.count); cb_read_unlock(&c.mutex->shadow_mutex.lock, s); pthread_testcancel(); return err ?: c.err; autoinit: err = cond_autoinit(cond); if (err) return err; goto start; } int __wrap_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, const struct timespec *abstime) { struct pse51_cond_cleanup_t c = { .cond = (union __xeno_cond *)cond, .mutex = (union __xeno_mutex *)mutex, }; int err, oldtype; if (unlikely(c.cond->shadow_cond.magic != PSE51_COND_MAGIC)) goto autoinit; start: if (cb_try_read_lock(&c.mutex->shadow_mutex.lock, s)) return EINVAL; pthread_cleanup_push(&__pthread_cond_cleanup, &c); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_cond_wait_prologue, &c.cond->shadow_cond, &c.mutex->shadow_mutex, &c.count, 1, abstime); pthread_setcanceltype(oldtype, NULL); pthread_cleanup_pop(0); while (err == EINTR) err = -XENOMAI_SKINCALL3(__pse51_muxid, __pse51_cond_wait_epilogue, &c.cond->shadow_cond, &c.mutex->shadow_mutex, c.count); cb_read_unlock(&c.mutex->shadow_mutex.lock, s); pthread_testcancel(); return err ?: c.err; autoinit: err = cond_autoinit(cond); if (err) return err; goto start; } int __wrap_pthread_cond_signal(pthread_cond_t * cond) { union __xeno_cond *_cond = (union __xeno_cond *)cond; int err; if (unlikely(_cond->shadow_cond.magic != PSE51_COND_MAGIC)) goto autoinit; start: return -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_cond_signal, &_cond->shadow_cond); autoinit: err = cond_autoinit(cond); if (err) return err; goto start; } int __wrap_pthread_cond_broadcast(pthread_cond_t * cond) { union __xeno_cond *_cond = (union __xeno_cond *)cond; int err; if (unlikely(_cond->shadow_cond.magic != PSE51_COND_MAGIC)) goto autoinit; start: return -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_cond_broadcast, &_cond->shadow_cond); autoinit: err = cond_autoinit(cond); if (err) return err; goto start; } static int __attribute__((cold)) cond_autoinit(pthread_cond_t *cond) { return __wrap_pthread_cond_init(cond, NULL); }
u_long rn_getseg(u_long rnid, u_long size, u_long flags, u_long timeout, void **segaddr) { return XENOMAI_SKINCALL5(__psos_muxid, __psos_rn_getseg, rnid, size, flags, timeout, segaddr); }