Ejemplo n.º 1
0
int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	xnarch_atomic_t *ownerp;
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
		err = -EINVAL;
		goto out_err;
	}

	if (unlikely(status & XNOTHER))
		goto do_syscall;

	ownerp = get_ownerp(shadow);

	err = xnsynch_fast_owner_check(ownerp, cur);
	if (unlikely(err))
		goto out_err;

	if (shadow->lockcnt > 1) {
		--shadow->lockcnt;
		goto out;
	}

	if (likely(xnsynch_fast_release(ownerp, cur))) {
	  out:
		cb_read_unlock(&shadow->lock, s);
		return 0;
	}

do_syscall:
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_unlock, shadow);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out_err:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Ejemplo n.º 2
0
int __wrap_pthread_mutex_destroy(pthread_mutex_t * mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;

	return -XENOMAI_SKINCALL1(__pse51_muxid,
				  __pse51_mutex_destroy, &_mutex->shadow_mutex);
}
Ejemplo n.º 3
0
int __wrap_close(int fd)
{
	extern int __shm_close(int fd);
	int ret;

	if (fd >= __pse51_rtdm_fd_start) {
		int oldtype;

		pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

		ret = set_errno(XENOMAI_SKINCALL1(__pse51_rtdm_muxid,
						  __rtdm_close,
						  fd - __pse51_rtdm_fd_start));

		pthread_setcanceltype(oldtype, NULL);

		return ret;
	} else
		ret = __shm_close(fd);

	if (ret == -1 && (errno == EBADF || errno == ENOSYS))
		return __real_close(fd);

	return ret;
}
Ejemplo n.º 4
0
int cobalt_monitor_exit(cobalt_monitor_t *mon)
{
	struct cobalt_monitor_data *datp;
	unsigned long status;
	xnhandle_t cur;

	__sync_synchronize();

	datp = get_monitor_data(mon);
	if ((datp->flags & COBALT_MONITOR_PENDED) &&
	    (datp->flags & COBALT_MONITOR_SIGNALED))
		goto syscall;

	status = cobalt_get_current_mode();
	if (status & XNWEAK)
		goto syscall;

	cur = cobalt_get_current();
	if (xnsynch_fast_release(&datp->owner, cur))
		return 0;
syscall:
	return XENOMAI_SKINCALL1(__cobalt_muxid,
				 sc_cobalt_monitor_exit,
				 mon);
}
Ejemplo n.º 5
0
int sc_fcreate(int *errp)
{
	int fid = -1;

	*errp = XENOMAI_SKINCALL1(__vrtx_muxid, __vrtx_fcreate, &fid);
	return fid;
}
Ejemplo n.º 6
0
int __wrap_pthread_cond_destroy(pthread_cond_t * cond)
{
	union __xeno_cond *_cond = (union __xeno_cond *)cond;

	return -XENOMAI_SKINCALL1(__pse51_muxid,
				  __pse51_cond_destroy, &_cond->shadow_cond);
}
Ejemplo n.º 7
0
STATUS msgQDelete(MSG_Q_ID qid)
{
	int err;

	err = XENOMAI_SKINCALL1(__vxworks_muxid, __vxworks_msgq_delete, qid);
	if (err) {
		errno = abs(err);
		return ERROR;
	}

	return OK;
}
Ejemplo n.º 8
0
int __wrap_pthread_mutex_lock(pthread_mutex_t * mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	int err;

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_lock,
					&_mutex->shadow_mutex);
	} while (err == -EINTR);

	return -err;
}
Ejemplo n.º 9
0
int rt_heap_delete(RT_HEAP *heap)
{
	int err;

	err = XENOMAI_SKINCALL1(__native_muxid, __native_heap_delete, heap);
	if (err)
		return err;

	heap->opaque = XN_NO_HANDLE;
	heap->mapbase = NULL;
	heap->mapsize = 0;

	return 0;
}
Ejemplo n.º 10
0
int rt_queue_delete(RT_QUEUE *q)
{
	int err;

	err = XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, q);
	if (err)
		return err;

	q->opaque = XN_NO_HANDLE;
	q->mapbase = NULL;
	q->mapsize = 0;

	return 0;
}
Ejemplo n.º 11
0
int cobalt_event_post(cobalt_event_t *event, unsigned long bits)
{
	struct cobalt_event_data *datp = get_event_data(event);

	if (bits == 0)
		return 0;

	__sync_or_and_fetch(&datp->value, bits); /* full barrier. */

	if ((datp->flags & COBALT_EVENT_PENDED) == 0)
		return 0;

	return XENOMAI_SKINCALL1(__cobalt_muxid,
				 sc_cobalt_event_sync, event);
}
Ejemplo n.º 12
0
int __wrap_pthread_mutex_destroy(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

	if (unlikely(cb_try_write_lock(&shadow->lock, s)))
		return EINVAL;

	err = -XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_destroy, shadow);

	cb_write_unlock(&shadow->lock, s);

	return err;
}
Ejemplo n.º 13
0
int __cobalt_thread_join(pthread_t thread)
{
	int ret, oldtype;

	/*
	 * Serialize with the regular task exit path, so that no call
	 * for the joined pthread may succeed after this routine
	 * returns. A successful call to sc_cobalt_thread_join
	 * receives -EIDRM, meaning that we eventually joined the
	 * exiting thread as seen by the Cobalt core.
	 *
	 * -ESRCH means that the joined thread has already exited
	 * linux-wise, while we were about to wait for it from the
	 * Cobalt side, in which case we are fine.
	 *
	 * -EBUSY denotes a multiple join for several threads in
	 * parallel to the same target.
	 *
	 * -EPERM may be received because the current context is not a
	 * Xenomai thread.
	 *
	 * -EINVAL is received in case the target is not a joinable
	 * thread (i.e. detached).
	 *
	 * Zero is unexpected.
	 *
	 * CAUTION: this service joins a thread Cobat-wise only, not
	 * glibc-wise.  For a complete join comprising the libc
	 * cleanups, __STD(pthread_join()) should be paired with this
	 * call.
	 */
	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	do
		ret = XENOMAI_SKINCALL1(__cobalt_muxid,
					sc_cobalt_thread_join, thread);
	while (ret == -EINTR);

	pthread_setcanceltype(oldtype, NULL);

	return ret;
}
Ejemplo n.º 14
0
int cobalt_monitor_enter(cobalt_monitor_t *mon)
{
	struct cobalt_monitor_data *datp;
	unsigned long status;
	int ret, oldtype;
	xnhandle_t cur;

	/*
	 * Assumptions on entry:
	 *
	 * - this is a Xenomai shadow (caller checked this).
	 * - no recursive entry/locking.
	 */

	status = cobalt_get_current_mode();
	if (status & (XNRELAX|XNWEAK))
		goto syscall;

	datp = get_monitor_data(mon);
	cur = cobalt_get_current();
	ret = xnsynch_fast_acquire(&datp->owner, cur);
	if (ret == 0) {
		datp->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
		return 0;
	}
syscall:
	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	/*
	 * Jump to kernel to wait for entry. We redo in case of
	 * interrupt.
	 */
	do
		ret = XENOMAI_SKINCALL1(__cobalt_muxid,
					sc_cobalt_monitor_enter,
					mon);
	while (ret == -EINTR);

	pthread_setcanceltype(oldtype, NULL);

	return ret;
}
Ejemplo n.º 15
0
int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsize, int mode)
{
	RT_HEAP_PLACEHOLDER ph;
	int err;

	err = XENOMAI_SKINCALL4(__native_muxid,
				__native_heap_create,
				&ph, name, heapsize, mode | H_MAPPABLE);
	if (err)
		return err;

	err = __map_heap_memory(heap, &ph);

	if (err)
		/* If the mapping fails, make sure we don't leave a dandling
		   heap in kernel space -- remove it. */
		XENOMAI_SKINCALL1(__native_muxid, __native_heap_delete, &ph);

	return err;
}
Ejemplo n.º 16
0
int rt_queue_create(RT_QUEUE *q,
		    const char *name, size_t poolsize, size_t qlimit, int mode)
{
	RT_QUEUE_PLACEHOLDER ph;
	int err;

	err = XENOMAI_SKINCALL5(__native_muxid,
				__native_queue_create,
				&ph, name, poolsize, qlimit, mode | Q_SHARED);
	if (err)
		return err;

	err = __map_queue_memory(q, &ph);

	if (err)
		/* If the mapping fails, make sure we don't leave a dandling
		   queue in kernel space -- remove it. */
		XENOMAI_SKINCALL1(__native_muxid, __native_queue_delete, &ph);
	return err;
}
Ejemplo n.º 17
0
u_long rn_create(const char name[4],
		 void *rnaddr,
		 u_long rnsize,
		 u_long usize, u_long flags, u_long *rnid, u_long *allocsz)
{
	struct rninfo rninfo;
	struct {
		u_long rnsize;
		u_long usize;
		u_long flags;
	} sizeopt;
	u_long err;

	if (rnaddr)
		fprintf(stderr,
			"rn_create() - rnaddr parameter ignored from user-space context\n");

	sizeopt.rnsize = rnsize;
	sizeopt.usize = usize;
	sizeopt.flags = flags;

	err = XENOMAI_SKINCALL3(__psos_muxid,
				__psos_rn_create, name, &sizeopt, &rninfo);
	if (err)
		return err;

	err = __map_heap_memory(&rninfo);

	if (err) {
		/* If the mapping fails, make sure we don't leave a dandling
		   heap in kernel space -- remove it. */
		XENOMAI_SKINCALL1(__psos_muxid, __psos_rn_delete, rninfo.rnid);
		return err;
	}

	*rnid = rninfo.rnid;
	*allocsz = rninfo.allocsz;

	return SUCCESS;
}
Ejemplo n.º 18
0
int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon)
{
	struct cobalt_monitor_data *datp = get_monitor_data(mon);
	int ret, oldtype;

	cobalt_monitor_drain_all(mon);

	if ((datp->flags & COBALT_MONITOR_PENDED) == 0)
		return 0;

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	ret = XENOMAI_SKINCALL1(__cobalt_muxid,
				sc_cobalt_monitor_sync,
				mon);

	pthread_setcanceltype(oldtype, NULL);

	if (ret == -EINTR)
		return cobalt_monitor_enter(mon);

	return ret;
}
Ejemplo n.º 19
0
u_long q_vdelete(u_long qid)
{
	return XENOMAI_SKINCALL1(__psos_muxid, __psos_q_vdelete, qid);
}
Ejemplo n.º 20
0
ER wup_tsk(ID tskid)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_wup_tsk, tskid);
}
Ejemplo n.º 21
0
int rt_mutex_release(RT_MUTEX *mutex)
{
	return XENOMAI_SKINCALL1(__native_muxid, __native_mutex_release, mutex);
}
Ejemplo n.º 22
0
ER frsm_tsk(ID tskid)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_frsm_tsk, tskid);
}
Ejemplo n.º 23
0
ER tslp_tsk(TMO tmout)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_tslp_tsk, tmout);
}
Ejemplo n.º 24
0
ER rel_wai(ID tskid)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_rel_wai, tskid);
}
Ejemplo n.º 25
0
ER get_tid(ID *p_tskid)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_get_tid, p_tskid);
}
Ejemplo n.º 26
0
ER rot_rdq(PRI tskpri)
{
	return XENOMAI_SKINCALL1(__uitron_muxid, __uitron_rot_rdq, tskpri);
}
Ejemplo n.º 27
0
void sc_tslice(unsigned short ticks)
{
	XENOMAI_SKINCALL1(__vrtx_muxid, __vrtx_tslice, ticks);
}
Ejemplo n.º 28
0
int rt_intr_disable(RT_INTR *intr)
{
	return XENOMAI_SKINCALL1(__native_muxid, __native_intr_disable, intr);
}
Ejemplo n.º 29
0
int cobalt_monitor_destroy(cobalt_monitor_t *mon)
{
	return XENOMAI_SKINCALL1(__cobalt_muxid,
				 sc_cobalt_monitor_destroy,
				 mon);
}
Ejemplo n.º 30
0
int cobalt_event_destroy(cobalt_event_t *event)
{
	return XENOMAI_SKINCALL1(__cobalt_muxid,
				 sc_cobalt_event_destroy,
				 event);
}