u_long sm_create(const char name[4], u_long icount, u_long flags, u_long *smid_r) { char short_name[5]; name = __psos_maybe_short_name(short_name, name); return XENOMAI_SKINCALL4(__psos_muxid, __psos_sm_create, name, icount, flags, smid_r); }
MSG_Q_ID msgQCreate(int nb_msgs, int length, int flags) { MSG_Q_ID qid; int err; err = XENOMAI_SKINCALL4(__vxworks_muxid, __vxworks_msgq_create, nb_msgs, length, flags, &qid); if (err) { errno = abs(err); return 0; } return qid; }
int __wrap_clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp) { int err, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); err = -XENOMAI_SKINCALL4(__pse51_muxid, __pse51_clock_nanosleep, clock_id, flags, rqtp, rmtp); pthread_setcanceltype(oldtype, NULL); return err; }
static void *uitron_task_trampoline(void *cookie) { struct uitron_task_iargs *iargs = (struct uitron_task_iargs *)cookie; struct sched_param param; unsigned long mode_offset; void (*entry)(INT); int policy; long err; INT arg; /* * Apply sched params here as some libpthread implementations * fail doing this properly via pthread_create. */ policy = uitron_task_set_posix_priority(iargs->pk_ctsk->itskpri, ¶m); pthread_setschedparam(pthread_self(), policy, ¶m); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); xeno_sigshadow_install_once(); err = XENOMAI_SKINCALL4(__uitron_muxid, __uitron_cre_tsk, iargs->tskid, iargs->pk_ctsk, iargs->completionp, &mode_offset); if (err) goto fail; xeno_set_current(); xeno_set_current_mode(mode_offset); /* iargs->pk_ctsk might not be valid anymore, after our parent was released from the completion sync, so do not dereference this pointer. */ do err = XENOMAI_SYSCALL2(__xn_sys_barrier, &entry, &arg); while (err == -EINTR); if (!err) entry(arg); fail: return (void *)err; }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { RT_HEAP_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL4(__native_muxid, __native_heap_create, &ph, name, heapsize, mode | H_MAPPABLE); if (err) return err; err = __map_heap_memory(heap, &ph); if (err) /* If the mapping fails, make sure we don't leave a dandling heap in kernel space -- remove it. */ XENOMAI_SKINCALL1(__native_muxid, __native_heap_delete, &ph); return err; }
int cobalt_monitor_wait(cobalt_monitor_t *mon, int event, const struct timespec *ts) { int ret, opret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = XENOMAI_SKINCALL4(__cobalt_muxid, sc_cobalt_monitor_wait, mon, event, ts, &opret); pthread_setcanceltype(oldtype, NULL); /* * If we got interrupted while trying to re-enter the monitor, * we need to redo. In the meantime, any pending linux signal * has been processed. */ if (ret == -EINTR) ret = cobalt_monitor_enter(mon); return ret ?: opret; }
u_long q_receive(u_long qid, u_long flags, u_long timeout, u_long msgbuf_r[4]) { return XENOMAI_SKINCALL4(__psos_muxid, __psos_q_receive, qid, flags, timeout, msgbuf_r); }
u_long q_create(const char *name, u_long maxnum, u_long flags, u_long *qid_r) { return XENOMAI_SKINCALL4(__psos_muxid, __psos_q_create, name, maxnum, flags, qid_r); }
u_long q_vbroadcast(u_long qid, void *msgbuf, u_long msglen, u_long *count_r) { return XENOMAI_SKINCALL4(__psos_muxid, __psos_q_vbroadcast, qid, msgbuf, msglen, count_r); }
int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **bufp) { return XENOMAI_SKINCALL4(__native_muxid, __native_heap_alloc, heap, size, &timeout, bufp); }
int rt_intr_create(RT_INTR *intr, const char *name, unsigned irq, int mode) { return XENOMAI_SKINCALL4(__native_muxid, __native_intr_create, intr, name, irq, mode); }
ssize_t rt_queue_read(RT_QUEUE *q, void *buf, size_t size, RTIME timeout) { return XENOMAI_SKINCALL4(__native_muxid, __native_queue_read, q, buf, size, &timeout); }
int rt_queue_write(RT_QUEUE *q, const void *buf, size_t size, int mode) { return XENOMAI_SKINCALL4(__native_muxid, __native_queue_write, q, buf, size, mode); }
int rt_queue_send(RT_QUEUE *q, void *buf, size_t size, int mode) { return XENOMAI_SKINCALL4(__native_muxid, __native_queue_send, q, buf, size, mode); }