int __wrap_accept(int fd, struct sockaddr *addr, socklen_t * addrlen) { if (fd >= __pse51_rtdm_fd_start) { struct _rtdm_getsockaddr_args args = { addr, addrlen }; int oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); fd = XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_ACCEPT, &args); pthread_setcanceltype(oldtype, NULL); if (fd < 0) return set_errno(fd); return fd + __pse51_rtdm_fd_start; } else { fd = __real_accept(fd, addr, addrlen); if (fd >= __pse51_rtdm_fd_start) { __real_close(fd); errno = EMFILE; fd = -1; } return fd; } }
int __wrap_socket(int protocol_family, int socket_type, int protocol) { int ret; ret = XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_socket, protocol_family, socket_type, protocol); if (ret >= 0) ret += __pse51_rtdm_fd_start; else if (ret == -EAFNOSUPPORT || ret == -EPROTONOSUPPORT || ret == -ENOSYS) { ret = __real_socket(protocol_family, socket_type, protocol); if (ret >= __pse51_rtdm_fd_start) { __real_close(ret); errno = -EMFILE; ret = -1; } } else { errno = -ret; ret = -1; } return ret; }
ssize_t __wrap_recvfrom(int fd, void *buf, size_t len, int flags, struct sockaddr * from, socklen_t * fromlen) { if (fd >= __pse51_rtdm_fd_start) { struct iovec iov = { buf, len }; struct msghdr msg = { from, (from != NULL) ? *fromlen : 0, &iov, 1, NULL, 0 }; int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_recvmsg, fd - __pse51_rtdm_fd_start, &msg, flags); pthread_setcanceltype(oldtype, NULL); if (ret < 0) { errno = -ret; ret = -1; } else if (from != NULL) *fromlen = msg.msg_namelen; return ret; } else return __real_recvfrom(fd, buf, len, flags, from, fromlen); }
int sc_fclear(int fid, int mask, int *errp) { int mask_r = 0; *errp = XENOMAI_SKINCALL3(__vrtx_muxid, __vrtx_fclear, fid, mask, &mask_r); return mask_r; }
void *rt_queue_alloc(RT_QUEUE *q, size_t size) { void *buf; return XENOMAI_SKINCALL3(__native_muxid, __native_queue_alloc, q, size, &buf) ? NULL : buf; }
int rt_queue_bind(RT_QUEUE *q, const char *name, RTIME timeout) { RT_QUEUE_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL3(__native_muxid, __native_queue_bind, &ph, name, &timeout); return err ? : __map_queue_memory(q, &ph); }
int rt_heap_bind(RT_HEAP *heap, const char *name, RTIME timeout) { RT_HEAP_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL3(__native_muxid, __native_heap_bind, &ph, name, &timeout); return err ? : __map_heap_memory(heap, &ph); }
int __wrap_listen(int fd, int backlog) { if (fd >= __pse51_rtdm_fd_start) { return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_LISTEN, backlog)); } else return __real_listen(fd, backlog); }
int __wrap_shutdown(int fd, int how) { if (fd >= __pse51_rtdm_fd_start) { return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_SHUTDOWN, how)); } else return __real_shutdown(fd, how); }
int __wrap_getpeername(int fd, struct sockaddr *name, socklen_t * namelen) { if (fd >= __pse51_rtdm_fd_start) { struct _rtdm_getsockaddr_args args = { name, namelen }; return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_GETPEERNAME, &args)); } else return __real_getpeername(fd, name, namelen); }
int __wrap_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen) { if (fd >= __pse51_rtdm_fd_start) { struct _rtdm_setsockaddr_args args = { my_addr, addrlen }; return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_BIND, &args)); } else return __real_bind(fd, my_addr, addrlen); }
static void __pthread_cond_cleanup(void *data) { struct pse51_cond_cleanup_t *c = (struct pse51_cond_cleanup_t *) data; int err; do { err = -XENOMAI_SKINCALL3(__pse51_muxid, __pse51_cond_wait_epilogue, &c->cond->shadow_cond, &c->mutex->shadow_mutex, c->count); } while (err == EINTR); }
int __wrap_setsockopt(int fd, int level, int optname, const void *optval, socklen_t optlen) { if (fd >= __pse51_rtdm_fd_start) { struct _rtdm_setsockopt_args args = { level, optname, (void *)optval, optlen }; return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_SETSOCKOPT, &args)); } else return __real_setsockopt(fd, level, optname, optval, optlen); }
TCB *sc_tinquiry(int pinfo[], int tid, int *errp) { TCB *tcb; #ifdef HAVE___THREAD tcb = &__vrtx_tcb; #else /* !HAVE___THREAD */ tcb = (TCB *) pthread_getspecific(__vrtx_tskey); /* Cannot fail. */ #endif /* !HAVE___THREAD */ *errp = XENOMAI_SKINCALL3(__vrtx_muxid, __vrtx_tinquiry, pinfo, tcb, tid); if (*errp) return NULL; return tcb; }
int __wrap_ioctl(int fd, unsigned long int request, ...) { va_list ap; void *arg; va_start(ap, request); arg = va_arg(ap, void *); va_end(ap); if (fd >= __pse51_rtdm_fd_start) return set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, request, arg)); else return __real_ioctl(fd, request, arg); }
ssize_t __wrap_sendmsg(int fd, const struct msghdr * msg, int flags) { if (fd >= __pse51_rtdm_fd_start) { int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_sendmsg, fd - __pse51_rtdm_fd_start, msg, flags)); pthread_setcanceltype(oldtype, NULL); return ret; } else return __real_sendmsg(fd, msg, flags); }
ssize_t __wrap_write(int fd, const void *buf, size_t nbyte) { if (fd >= __pse51_rtdm_fd_start) { int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_write, fd - __pse51_rtdm_fd_start, buf, nbyte)); pthread_setcanceltype(oldtype, NULL); return ret; } else return __real_write(fd, buf, nbyte); }
int __wrap_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen) { if (fd >= __pse51_rtdm_fd_start) { struct _rtdm_setsockaddr_args args = { serv_addr, addrlen }; int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_ioctl, fd - __pse51_rtdm_fd_start, _RTIOC_CONNECT, &args)); pthread_setcanceltype(oldtype, NULL); return ret; } else return __real_connect(fd, serv_addr, addrlen); }
ssize_t __wrap_send(int fd, const void *buf, size_t len, int flags) { if (fd >= __pse51_rtdm_fd_start) { struct iovec iov = { (void *)buf, len }; struct msghdr msg = { NULL, 0, &iov, 1, NULL, 0 }; int ret, oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = set_errno(XENOMAI_SKINCALL3(__pse51_rtdm_muxid, __rtdm_sendmsg, fd - __pse51_rtdm_fd_start, &msg, flags)); pthread_setcanceltype(oldtype, NULL); return ret; } else return __real_send(fd, buf, len, flags); }
int cobalt_monitor_init(cobalt_monitor_t *mon, clockid_t clk_id, int flags) { struct cobalt_monitor_data *datp; int ret; ret = XENOMAI_SKINCALL3(__cobalt_muxid, sc_cobalt_monitor_init, mon, clk_id, flags); if (ret) return ret; if ((flags & COBALT_MONITOR_SHARED) == 0) { datp = (void *)cobalt_sem_heap[0] + mon->u.data_offset; mon->u.data = datp; } else datp = get_monitor_data(mon); __cobalt_prefault(datp); return 0; }
u_long rn_create(const char name[4], void *rnaddr, u_long rnsize, u_long usize, u_long flags, u_long *rnid, u_long *allocsz) { struct rninfo rninfo; struct { u_long rnsize; u_long usize; u_long flags; } sizeopt; u_long err; if (rnaddr) fprintf(stderr, "rn_create() - rnaddr parameter ignored from user-space context\n"); sizeopt.rnsize = rnsize; sizeopt.usize = usize; sizeopt.flags = flags; err = XENOMAI_SKINCALL3(__psos_muxid, __psos_rn_create, name, &sizeopt, &rninfo); if (err) return err; err = __map_heap_memory(&rninfo); if (err) { /* If the mapping fails, make sure we don't leave a dandling heap in kernel space -- remove it. */ XENOMAI_SKINCALL1(__psos_muxid, __psos_rn_delete, rninfo.rnid); return err; } *rnid = rninfo.rnid; *allocsz = rninfo.allocsz; return SUCCESS; }
int cobalt_event_init(cobalt_event_t *event, unsigned long value, int flags) { struct cobalt_event_data *datp; int ret; ret = XENOMAI_SKINCALL3(__cobalt_muxid, sc_cobalt_event_init, event, value, flags); if (ret) return ret; if ((flags & COBALT_EVENT_SHARED) == 0) { datp = (void *)cobalt_sem_heap[0] + event->u.data_offset; event->u.data = datp; } else datp = get_event_data(event); __cobalt_prefault(datp); return 0; }
ER shd_tsk(ID tskid, T_CTSK *pk_ctsk) /* Xenomai extension. */ { struct sched_param param; int policy, err; xeno_fault_stack(); /* Make sure the POSIX library caches the right priority. */ policy = uitron_task_set_posix_priority(pk_ctsk->itskpri, ¶m); pthread_setschedparam(pthread_self(), policy, ¶m); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); xeno_sigshadow_install_once(); err = XENOMAI_SKINCALL3(__uitron_muxid, __uitron_cre_tsk, tskid, pk_ctsk, NULL); if (!err) xeno_set_current(); return err; }
u_long q_vurgent(u_long qid, void *msgbuf, u_long msglen) { return XENOMAI_SKINCALL3(__psos_muxid, __psos_q_vurgent, qid, msgbuf, msglen); }
u_long q_broadcast(u_long qid, u_long msgbuf[4], u_long *count_r) { return XENOMAI_SKINCALL3(__psos_muxid, __psos_q_broadcast, qid, msgbuf, count_r); }
int rt_mutex_bind(RT_MUTEX *mutex, const char *name, RTIME timeout) { return XENOMAI_SKINCALL3(__native_muxid, __native_mutex_bind, mutex, name, &timeout); }
ssize_t rt_queue_receive(RT_QUEUE *q, void **bufp, RTIME timeout) { return XENOMAI_SKINCALL3(__native_muxid, __native_queue_receive, q, bufp, &timeout); }
int rt_cond_bind(RT_COND *cond, const char *name, RTIME timeout) { return XENOMAI_SKINCALL3(__native_muxid, __native_cond_bind, cond, name, &timeout); }
old_sigharden_handler = signal(SIGHARDEN, &__pthread_sigharden_handler); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; } int __wrap_pthread_getschedparam(pthread_t thread, int *__restrict__ policy, struct sched_param *__restrict__ param) { int err; err = -XENOMAI_SKINCALL3(__pse51_muxid, __pse51_thread_getschedparam, thread, policy, param); if (err == ESRCH) return __real_pthread_getschedparam(thread, policy, param); return err; } int __wrap_sched_yield(void) { int err = -XENOMAI_SKINCALL0(__pse51_muxid, __pse51_sched_yield); if (err == -1) err = __real_sched_yield();
int rt_intr_bind(RT_INTR *intr, const char *name, RTIME timeout) { return XENOMAI_SKINCALL3(__native_muxid, __native_intr_bind, intr, name, &timeout); }