int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH xnarch_atomic_t *ownerp; unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out_err; } if (unlikely(status & XNOTHER)) goto do_syscall; ownerp = get_ownerp(shadow); err = xnsynch_fast_owner_check(ownerp, cur); if (unlikely(err)) goto out_err; if (shadow->lockcnt > 1) { --shadow->lockcnt; goto out; } if (likely(xnsynch_fast_release(ownerp, cur))) { out: cb_read_unlock(&shadow->lock, s); return 0; } do_syscall: #endif /* CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_unlock, shadow); } while (err == -EINTR); #ifdef CONFIG_XENO_FASTSYNCH out_err: cb_read_unlock(&shadow->lock, s); #endif /* CONFIG_XENO_FASTSYNCH */ return -err; }
int __wrap_pthread_mutex_destroy(pthread_mutex_t * mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; return -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_destroy, &_mutex->shadow_mutex); }
int __wrap_close(int fd) { extern int __shm_close(int fd); int ret; if (fd >= __pse51_rtdm_fd_start) { int oldtype; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = set_errno(XENOMAI_SKINCALL1(__pse51_rtdm_muxid, __rtdm_close, fd - __pse51_rtdm_fd_start)); pthread_setcanceltype(oldtype, NULL); return ret; } else ret = __shm_close(fd); if (ret == -1 && (errno == EBADF || errno == ENOSYS)) return __real_close(fd); return ret; }
int cobalt_monitor_exit(cobalt_monitor_t *mon) { struct cobalt_monitor_data *datp; unsigned long status; xnhandle_t cur; __sync_synchronize(); datp = get_monitor_data(mon); if ((datp->flags & COBALT_MONITOR_PENDED) && (datp->flags & COBALT_MONITOR_SIGNALED)) goto syscall; status = cobalt_get_current_mode(); if (status & XNWEAK) goto syscall; cur = cobalt_get_current(); if (xnsynch_fast_release(&datp->owner, cur)) return 0; syscall: return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_exit, mon); }
int sc_fcreate(int *errp) { int fid = -1; *errp = XENOMAI_SKINCALL1(__vrtx_muxid, __vrtx_fcreate, &fid); return fid; }
int __wrap_pthread_cond_destroy(pthread_cond_t * cond) { union __xeno_cond *_cond = (union __xeno_cond *)cond; return -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_cond_destroy, &_cond->shadow_cond); }
STATUS msgQDelete(MSG_Q_ID qid) { int err; err = XENOMAI_SKINCALL1(__vxworks_muxid, __vxworks_msgq_delete, qid); if (err) { errno = abs(err); return ERROR; } return OK; }
int __wrap_pthread_mutex_lock(pthread_mutex_t * mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; int err; do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_lock, &_mutex->shadow_mutex); } while (err == -EINTR); return -err; }
int rt_heap_delete(RT_HEAP *heap) { int err; err = XENOMAI_SKINCALL1(__native_muxid, __native_heap_delete, heap); if (err) return err; heap->opaque = XN_NO_HANDLE; heap->mapbase = NULL; heap->mapsize = 0; return 0; }
int rt_queue_delete(RT_QUEUE *q) { int err; err = XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, q); if (err) return err; q->opaque = XN_NO_HANDLE; q->mapbase = NULL; q->mapsize = 0; return 0; }
int cobalt_event_post(cobalt_event_t *event, unsigned long bits) { struct cobalt_event_data *datp = get_event_data(event); if (bits == 0) return 0; __sync_or_and_fetch(&datp->value, bits); /* full barrier. */ if ((datp->flags & COBALT_EVENT_PENDED) == 0) return 0; return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_event_sync, event); }
int __wrap_pthread_mutex_destroy(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; if (unlikely(cb_try_write_lock(&shadow->lock, s))) return EINVAL; err = -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_destroy, shadow); cb_write_unlock(&shadow->lock, s); return err; }
int __cobalt_thread_join(pthread_t thread) { int ret, oldtype; /* * Serialize with the regular task exit path, so that no call * for the joined pthread may succeed after this routine * returns. A successful call to sc_cobalt_thread_join * receives -EIDRM, meaning that we eventually joined the * exiting thread as seen by the Cobalt core. * * -ESRCH means that the joined thread has already exited * linux-wise, while we were about to wait for it from the * Cobalt side, in which case we are fine. * * -EBUSY denotes a multiple join for several threads in * parallel to the same target. * * -EPERM may be received because the current context is not a * Xenomai thread. * * -EINVAL is received in case the target is not a joinable * thread (i.e. detached). * * Zero is unexpected. * * CAUTION: this service joins a thread Cobat-wise only, not * glibc-wise. For a complete join comprising the libc * cleanups, __STD(pthread_join()) should be paired with this * call. */ pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); do ret = XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_thread_join, thread); while (ret == -EINTR); pthread_setcanceltype(oldtype, NULL); return ret; }
int cobalt_monitor_enter(cobalt_monitor_t *mon) { struct cobalt_monitor_data *datp; unsigned long status; int ret, oldtype; xnhandle_t cur; /* * Assumptions on entry: * * - this is a Xenomai shadow (caller checked this). * - no recursive entry/locking. */ status = cobalt_get_current_mode(); if (status & (XNRELAX|XNWEAK)) goto syscall; datp = get_monitor_data(mon); cur = cobalt_get_current(); ret = xnsynch_fast_acquire(&datp->owner, cur); if (ret == 0) { datp->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST); return 0; } syscall: pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); /* * Jump to kernel to wait for entry. We redo in case of * interrupt. */ do ret = XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_enter, mon); while (ret == -EINTR); pthread_setcanceltype(oldtype, NULL); return ret; }
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode) { RT_HEAP_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL4(__native_muxid, __native_heap_create, &ph, name, heapsize, mode | H_MAPPABLE); if (err) return err; err = __map_heap_memory(heap, &ph); if (err) /* If the mapping fails, make sure we don't leave a dandling heap in kernel space -- remove it. */ XENOMAI_SKINCALL1(__native_muxid, __native_heap_delete, &ph); return err; }
int rt_queue_create(RT_QUEUE *q, const char *name, size_t poolsize, size_t qlimit, int mode) { RT_QUEUE_PLACEHOLDER ph; int err; err = XENOMAI_SKINCALL5(__native_muxid, __native_queue_create, &ph, name, poolsize, qlimit, mode | Q_SHARED); if (err) return err; err = __map_queue_memory(q, &ph); if (err) /* If the mapping fails, make sure we don't leave a dandling queue in kernel space -- remove it. */ XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, &ph); return err; }
u_long rn_create(const char name[4], void *rnaddr, u_long rnsize, u_long usize, u_long flags, u_long *rnid, u_long *allocsz) { struct rninfo rninfo; struct { u_long rnsize; u_long usize; u_long flags; } sizeopt; u_long err; if (rnaddr) fprintf(stderr, "rn_create() - rnaddr parameter ignored from user-space context\n"); sizeopt.rnsize = rnsize; sizeopt.usize = usize; sizeopt.flags = flags; err = XENOMAI_SKINCALL3(__psos_muxid, __psos_rn_create, name, &sizeopt, &rninfo); if (err) return err; err = __map_heap_memory(&rninfo); if (err) { /* If the mapping fails, make sure we don't leave a dandling heap in kernel space -- remove it. */ XENOMAI_SKINCALL1(__psos_muxid, __psos_rn_delete, rninfo.rnid); return err; } *rnid = rninfo.rnid; *allocsz = rninfo.allocsz; return SUCCESS; }
int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon) { struct cobalt_monitor_data *datp = get_monitor_data(mon); int ret, oldtype; cobalt_monitor_drain_all(mon); if ((datp->flags & COBALT_MONITOR_PENDED) == 0) return 0; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype); ret = XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_sync, mon); pthread_setcanceltype(oldtype, NULL); if (ret == -EINTR) return cobalt_monitor_enter(mon); return ret; }
u_long q_vdelete(u_long qid) { return XENOMAI_SKINCALL1(__psos_muxid, __psos_q_vdelete, qid); }
ER wup_tsk(ID tskid) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_wup_tsk, tskid); }
int rt_mutex_release(RT_MUTEX *mutex) { return XENOMAI_SKINCALL1(__native_muxid, __native_mutex_release, mutex); }
ER frsm_tsk(ID tskid) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_frsm_tsk, tskid); }
ER tslp_tsk(TMO tmout) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_tslp_tsk, tmout); }
ER rel_wai(ID tskid) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_rel_wai, tskid); }
ER get_tid(ID *p_tskid) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_get_tid, p_tskid); }
ER rot_rdq(PRI tskpri) { return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_rot_rdq, tskpri); }
void sc_tslice(unsigned short ticks) { XENOMAI_SKINCALL1(__vrtx_muxid, __vrtx_tslice, ticks); }
int rt_intr_disable(RT_INTR *intr) { return XENOMAI_SKINCALL1(__native_muxid, __native_intr_disable, intr); }
int cobalt_monitor_destroy(cobalt_monitor_t *mon) { return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_monitor_destroy, mon); }
int cobalt_event_destroy(cobalt_event_t *event) { return XENOMAI_SKINCALL1(__cobalt_muxid, sc_cobalt_event_destroy, event); }