Ejemplo n.º 1
0
int __wrap_pthread_mutex_init(pthread_mutex_t *mutex,
			      const pthread_mutexattr_t *attr)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		goto checked;

	err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_check_init,shadow,attr);

	if (err) {
		cb_read_unlock(&shadow->lock, s);
		return err;
	}

  checked:
	cb_force_write_lock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	err = -XENOMAI_SKINCALL2(__pse51_muxid,__pse51_mutex_init,shadow,attr);

#ifdef CONFIG_XENO_FASTSYNCH
	if (!shadow->attr.pshared)
		shadow->owner = (xnarch_atomic_t *)
			(xeno_sem_heap[0] + shadow->owner_offset);

	cb_write_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return err;
}
Ejemplo n.º 2
0
int __wrap_clock_gettime(clockid_t clock_id, struct timespec *tp)
{
	int err;
#ifdef CONFIG_XENO_HW_DIRECT_TSC
	if (clock_id == CLOCK_MONOTONIC && sysinfo.tickval == 1) {
		unsigned long long tsc;
		unsigned long rem;

		tsc = __xn_rdtsc();
		tp->tv_sec = xnarch_ulldiv(tsc, sysinfo.cpufreq, &rem);
		/* Next line is 64 bits safe, since rem is less than
		   sysinfo.cpufreq hence fits on 32 bits. */
		tp->tv_nsec = xnarch_imuldiv(rem, 1000000000, sysinfo.cpufreq);
		return 0;
	}
#endif /* CONFIG_XENO_HW_DIRECT_TSC */

	err = -XENOMAI_SKINCALL2(__pse51_muxid,
				 __pse51_clock_gettime,
				 clock_id,
				 tp);

	if (!err)
		return 0;

	errno = err;
	return -1;
}
Ejemplo n.º 3
0
int __wrap_clock_gettime(clockid_t clock_id, struct timespec *tp)
{
	int err;
#ifdef XNARCH_HAVE_NONPRIV_TSC
	if (clock_id == CLOCK_MONOTONIC && __pse51_sysinfo.tickval == 1) {
		unsigned long long ns;
		unsigned long rem;

		ns = xnarch_tsc_to_ns(__xn_rdtsc());
		tp->tv_sec = xnarch_divrem_billion(ns, &rem);
		tp->tv_nsec = rem;
		return 0;
	}
#endif /* XNARCH_HAVE_NONPRIV_TSC */

	err = -XENOMAI_SKINCALL2(__pse51_muxid,
				 __pse51_clock_gettime,
				 clock_id,
				 tp);

	if (!err)
		return 0;

	errno = err;
	return -1;
}
Ejemplo n.º 4
0
int sc_finquiry(int fid, int *errp)
{
	int mask_r = 0;

	*errp = XENOMAI_SKINCALL2(__vrtx_muxid, __vrtx_finquiry, fid, &mask_r);
	return mask_r;
}
Ejemplo n.º 5
0
u_long sm_ident(const char *name, u_long nodeno, u_long *smid_r)
{
	char short_name[5];

	name = __psos_maybe_short_name(short_name, name);

	return XENOMAI_SKINCALL2(__psos_muxid, __psos_sm_ident, name, smid_r);
}
Ejemplo n.º 6
0
int __wrap_pthread_mutex_init(pthread_mutex_t * mutex,
			      const pthread_mutexattr_t * attr)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	int err;

	err = -XENOMAI_SKINCALL2(__pse51_muxid,
				 __pse51_mutex_init,&_mutex->shadow_mutex,attr);
	return err;
}
Ejemplo n.º 7
0
int __wrap_pthread_cond_init(pthread_cond_t * cond,
			     const pthread_condattr_t * attr)
{
	union __xeno_cond *_cond = (union __xeno_cond *)cond;
	int err;

	err = -XENOMAI_SKINCALL2(__pse51_muxid,
				 __pse51_cond_init, &_cond->shadow_cond, attr);
	return err;
}
Ejemplo n.º 8
0
static void *vrtx_task_trampoline(void *cookie)
{
	struct vrtx_task_iargs *iargs = cookie;
 	void (*entry)(void *arg), *arg;
	struct vrtx_arg_bulk bulk;
	unsigned long mode_offset;
	long err;
#ifndef HAVE___THREAD
	TCB *tcb;
#endif /* !HAVE___THREAD */

	/* vrtx_task_delete requires asynchronous cancellation */
	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);

#ifndef HAVE___THREAD
	tcb = malloc(sizeof(*tcb));
	if (tcb == NULL) {
		fprintf(stderr, "Xenomai: failed to allocate local TCB?!\n");
		err = -ENOMEM;
		goto fail;
	}

	pthread_setspecific(__vrtx_tskey, tcb);
#endif /* !HAVE___THREAD */

	xeno_sigshadow_install_once();

	bulk.a1 = (u_long)iargs->tid;
	bulk.a2 = (u_long)iargs->prio;
	bulk.a3 = (u_long)iargs->mode;
	bulk.a4 = (u_long)&mode_offset;
	if (bulk.a4 == 0) {
		err = -ENOMEM;
		goto fail;
	}

 	err = XENOMAI_SKINCALL2(__vrtx_muxid, __vrtx_tecreate,
 				&bulk, &iargs->tid);

 	/* Prevent stale memory access after our parent is released. */
 	entry = iargs->entry;
 	arg = iargs->param;
 	__real_sem_post(&iargs->sync);

  	if (err == 0) {
	  xeno_set_current();
	  xeno_set_current_mode(mode_offset);
	  entry(arg);
	}
fail:
	return (void *)err;
}
Ejemplo n.º 9
0
int msgQNumMsgs(MSG_Q_ID qid)
{
	int err, nummsgs;

	err = XENOMAI_SKINCALL2(__vxworks_muxid,
				__vxworks_msgq_nummsgs, qid, &nummsgs);
	if (err) {
		errno = abs(err);
		return ERROR;
	}

	return nummsgs;
}
Ejemplo n.º 10
0
int rt_intr_wait(RT_INTR *intr, RTIME timeout)
{
	int err, oldtype;

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	err = XENOMAI_SKINCALL2(__native_muxid,
				 __native_intr_wait, intr, &timeout);

	pthread_setcanceltype(oldtype, NULL);

	return err;
}
Ejemplo n.º 11
0
int __wrap_clock_settime(clockid_t clock_id, const struct timespec *tp)
{
	int err = -XENOMAI_SKINCALL2(__pse51_muxid,
				     __pse51_clock_settime,
				     clock_id,
				     tp);

	if (!err)
		return 0;

	errno = err;
	return -1;
}
Ejemplo n.º 12
0
int __wrap_pthread_mutex_timedlock(pthread_mutex_t * mutex,
				   const struct timespec *to)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	int err;

	do {
		err = XENOMAI_SKINCALL2(__pse51_muxid,
					__pse51_mutex_timedlock,
					&_mutex->shadow_mutex, to);
	} while (err == -EINTR);

	return -err;
}
Ejemplo n.º 13
0
static void __rt_cond_cleanup(void *data)
{
	struct rt_cond_cleanup_t *c = (struct rt_cond_cleanup_t *)data;
	int err;

	do {
		err = XENOMAI_SKINCALL2(__native_muxid,
					__native_cond_wait_epilogue, c->mutex,
					c->saved_lockcnt);
	} while (err == EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
	c->mutex->lockcnt = c->saved_lockcnt;
#endif /* CONFIG_XENO_FASTSYNCH */
}
Ejemplo n.º 14
0
static int sys_rtdm_open(const char *path, int oflag)
{
	const char *rtdm_path = path;
	int ret, oldtype;

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	/* skip path prefix for RTDM invocation */
	if (strncmp(path, "/dev/", 5) == 0)
		rtdm_path += 5;

	ret = XENOMAI_SKINCALL2(__pse51_rtdm_muxid, __rtdm_open, rtdm_path, oflag);

	pthread_setcanceltype(oldtype, NULL);

	if (ret >= 0)
		ret += __pse51_rtdm_fd_start;

	return ret;
}
Ejemplo n.º 15
0
int __wrap_open(const char *path, int oflag, ...)
{
	int ret, oldtype;
	const char *rtdm_path = path;

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

	/* skip path prefix for RTDM invocation */
	if (strncmp(path, "/dev/", 5) == 0)
		rtdm_path += 5;

	ret = XENOMAI_SKINCALL2(__rtdm_muxid, __rtdm_open, rtdm_path, oflag);

	pthread_setcanceltype(oldtype, NULL);

	if (ret >= 0)
		ret += __rtdm_fd_start;
	else if (ret == -ENODEV || ret == -ENOSYS) {
		va_list ap;

		va_start(ap, oflag);

		ret = __real_open(path, oflag, va_arg(ap, mode_t));

		va_end(ap);

		if (ret >= __rtdm_fd_start) {
			__real_close(ret);
			errno = EMFILE;
			ret = -1;
		}
	} else {
		errno = -ret;
		ret = -1;
	}

	return ret;
}
Ejemplo n.º 16
0
static int __map_heap_memory(const struct rninfo *rnip)
{
	int err = 0, rnfd;
	caddr_t mapbase;

	/* Open the heap device to share the region memory with the
	   in-kernel skin. */
	rnfd = open(XNHEAP_DEV_NAME, O_RDWR);

	if (rnfd < 0)
		return -ENOENT;

	/* Bind this file instance to the shared heap. */
	err = ioctl(rnfd, 0, rnip->rncb);

	if (err)
		goto close_and_exit;

	/* Map the region memory into our address space. */
	mapbase = (caddr_t) mmap(NULL,
				 rnip->mapsize,
				 PROT_READ | PROT_WRITE,
				 MAP_SHARED, rnfd, 0L);

	if (mapbase == MAP_FAILED)
		err = -ENOMEM;
	else
		err =
		    XENOMAI_SKINCALL2(__psos_muxid, __psos_rn_bind, rnip->rnid,
				      mapbase);

      close_and_exit:

	close(rnfd);

	return err;
}
Ejemplo n.º 17
0
int rt_cond_wait(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout)
{
	struct rt_cond_cleanup_t c = {
		.mutex = mutex,
	};
	int err, oldtype;

	pthread_cleanup_push(&__rt_cond_cleanup, &c);

	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);

#ifdef CONFIG_XENO_FASTSYNCH
	c.saved_lockcnt = mutex->lockcnt;
#endif /* CONFIG_XENO_FASTSYNCH */

	err = XENOMAI_SKINCALL5(__native_muxid,
				__native_cond_wait_prologue, cond, mutex,
				&c.saved_lockcnt, XN_RELATIVE, &timeout);

	pthread_setcanceltype(oldtype, NULL);

	pthread_cleanup_pop(0);

	while (err == -EINTR)
		err = XENOMAI_SKINCALL2(__native_muxid,
					__native_cond_wait_epilogue, mutex,
					c.saved_lockcnt);

#ifdef CONFIG_XENO_FASTSYNCH
	mutex->lockcnt = c.saved_lockcnt;
#endif /* CONFIG_XENO_FASTSYNCH */

	pthread_testcancel();

	return err ?: c.err;
}
Ejemplo n.º 18
0
u_long q_send(u_long qid, u_long msgbuf[4])
{
	return XENOMAI_SKINCALL2(__psos_muxid, __psos_q_send, qid, msgbuf);
}
Ejemplo n.º 19
0
int rt_mutex_inquire(RT_MUTEX *mutex, RT_MUTEX_INFO *info)
{
	return XENOMAI_SKINCALL2(__native_muxid,
				 __native_mutex_inquire, mutex, info);
}
Ejemplo n.º 20
0
int rt_mutex_acquire(RT_MUTEX *mutex, RTIME timeout)
{
	return XENOMAI_SKINCALL2(__native_muxid,
				 __native_mutex_acquire, mutex, &timeout);
}
Ejemplo n.º 21
0
int rt_mutex_create(RT_MUTEX *mutex, const char *name)
{
	return XENOMAI_SKINCALL2(__native_muxid,
				 __native_mutex_create, mutex, name);
}
Ejemplo n.º 22
0
ER can_wup(INT *p_wupcnt, ID tskid)
{
	return XENOMAI_SKINCALL2(__uitron_muxid, __uitron_can_wup,
				 p_wupcnt, tskid);
}
Ejemplo n.º 23
0
ER ref_tsk(T_RTSK *pk_rtsk, ID tskid)
{
	return XENOMAI_SKINCALL2(__uitron_muxid, __uitron_ref_tsk,
				 pk_rtsk, tskid);
}
Ejemplo n.º 24
0
ER sta_tsk(ID tskid, INT stacd)
{
	return XENOMAI_SKINCALL2(__uitron_muxid, __uitron_sta_tsk,
				 tskid, stacd);
}
Ejemplo n.º 25
0
int rt_heap_inquire(RT_HEAP *heap, RT_HEAP_INFO *info)
{
	return XENOMAI_SKINCALL2(__native_muxid, __native_heap_inquire, heap,
				 info);
}
Ejemplo n.º 26
0
int rt_heap_free(RT_HEAP *heap, void *buf)
{
	return XENOMAI_SKINCALL2(__native_muxid, __native_heap_free, heap, buf);
}
Ejemplo n.º 27
0
void sc_fpost(int fid, int mask, int *errp)
{
	*errp = XENOMAI_SKINCALL2(__vrtx_muxid, __vrtx_fpost, fid, mask);
}
Ejemplo n.º 28
0
u_long q_urgent(u_long qid, u_long msgbuf[4])
{
	return XENOMAI_SKINCALL2(__psos_muxid, __psos_q_urgent, qid, msgbuf);
}
Ejemplo n.º 29
0
u_long q_vident(const char *name, u_long node, u_long *qid_r)
{
	return XENOMAI_SKINCALL2(__psos_muxid, __psos_q_vident, name, qid_r);
}
Ejemplo n.º 30
0
ER chg_pri(ID tskid, PRI tskpri)
{
	return XENOMAI_SKINCALL2(__uitron_muxid, __uitron_chg_pri,
				 tskid, tskpri);
}