Пример #1
0
int __wrap_pthread_mutex_unlock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	xnarch_atomic_t *ownerp;
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
		err = -EINVAL;
		goto out_err;
	}

	if (unlikely(status & XNOTHER))
		goto do_syscall;

	ownerp = get_ownerp(shadow);

	err = xnsynch_fast_owner_check(ownerp, cur);
	if (unlikely(err))
		goto out_err;

	if (shadow->lockcnt > 1) {
		--shadow->lockcnt;
		goto out;
	}

	if (likely(xnsynch_fast_release(ownerp, cur))) {
	  out:
		cb_read_unlock(&shadow->lock, s);
		return 0;
	}

do_syscall:
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_unlock, shadow);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out_err:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Пример #2
0
int __wrap_putchar(int c)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_putchar(c);
	else {
		rt_print_flush_buffers();
		return __real_putchar(c);
	}
}
Пример #3
0
int __wrap_fputc(int c, FILE *stream)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_fputc(c, stream);
	else {
		rt_print_flush_buffers();
		return __real_fputc(c, stream);
	}
}
Пример #4
0
int __wrap_vfprintf(FILE *stream, const char *fmt, va_list args)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_vfprintf(stream, fmt, args);
	else {
		rt_print_flush_buffers();
		return __real_vfprintf(stream, fmt, args);
	}
}
Пример #5
0
void __wrap_vsyslog(int priority, const char *fmt, va_list ap)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_vsyslog(priority, fmt, ap);
	else {
		rt_print_flush_buffers();
		__real_vsyslog(priority, fmt, ap);
	}
}
Пример #6
0
int __wrap_puts(const char *s)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_puts(s);
	else {
		rt_print_flush_buffers();
		return puts(s);
	}
}
Пример #7
0
size_t __wrap_fwrite(void *ptr, size_t size, size_t nmemb, FILE *stream)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return rt_fwrite(ptr, size, nmemb, stream);
	else {
		rt_print_flush_buffers();
		return __real_fwrite(ptr, size, nmemb, stream);
	}

}
Пример #8
0
void __wrap___vsyslog_chk(int pri, int flag, const char *fmt, va_list ap)
{
#ifdef CONFIG_XENO_FORTIFY
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		return __rt_vsyslog_chk(pri, flag, fmt, ap);
	else {
		rt_print_flush_buffers();
		__real___vsyslog_chk(pri, flag, fmt, ap);
	}
#else
	__wrap_fprintf(stderr, 
		       "Xenomai needs to be compiled with --enable-fortify "
		      "to support applications\ncompiled with "
		      "-D_FORTIFY_SOURCE\n");
	exit(EXIT_FAILURE);
#endif
}
Пример #9
0
void assert_nrt(void)
{
	if (unlikely(xeno_get_current() != XN_NO_HANDLE &&
		     !(xeno_get_current_mode() & XNRELAX)))
		assert_nrt_inner();
}
Пример #10
0
static void xeno_current_fork_handler(void)
{
	if (xeno_get_current() != XN_NO_HANDLE)
		__xeno_set_current(XN_NO_HANDLE);
}
Пример #11
0
int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();
	if (unlikely(status & XNOTHER))
		goto do_syscall;

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) {
		err = -EINVAL;
		goto out;
	}

	if (unlikely(status & XNRELAX)) {
		do {
			err = XENOMAI_SYSCALL1(__xn_sys_migrate,
					       XENOMAI_XENO_DOMAIN);
		} while (err == -EINTR);

		if (err < 0)
			goto out;
	}

	err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

	if (likely(!err)) {
		shadow->lockcnt = 1;
		cb_read_unlock(&shadow->lock, s);
		return 0;
	}

	if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) {
		if (shadow->lockcnt == UINT_MAX)
			err = -EAGAIN;
		else {
			++shadow->lockcnt;
			err = 0;
		}
	} else
		err = -EBUSY;

  out:
	cb_read_unlock(&shadow->lock, s);
	return -err;

do_syscall:
#endif /* !CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,
					__pse51_mutex_trylock, shadow);
	} while (err == -EINTR);

	return -err;
}
Пример #12
0
int __wrap_pthread_mutex_timedlock(pthread_mutex_t *mutex,
				   const struct timespec *to)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (shadow->magic != PSE51_MUTEX_MAGIC) {
		err = -EINVAL;
		goto out;
	}

	/* See __wrap_pthread_mutex_lock() */
	if (likely(!(status & (XNRELAX|XNOTHER)))) {
		err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

		if (likely(!err)) {
			shadow->lockcnt = 1;
			cb_read_unlock(&shadow->lock, s);
			return 0;
		}

		if (err == -EBUSY)
			switch(shadow->attr.type) {
			case PTHREAD_MUTEX_NORMAL:
				break;

			case PTHREAD_MUTEX_ERRORCHECK:
				err = -EDEADLK;
				goto out;

			case PTHREAD_MUTEX_RECURSIVE:
				if (shadow->lockcnt == UINT_MAX) {
					err = -EAGAIN;
					goto out;
				}

				++shadow->lockcnt;
				goto out;
			}
	}
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL2(__pse51_muxid,
					__pse51_mutex_timedlock, shadow, to);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Пример #13
0
int __wrap_pthread_mutex_lock(pthread_mutex_t *mutex)
{
	union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex;
	struct __shadow_mutex *shadow = &_mutex->shadow_mutex;
	int err;

#ifdef CONFIG_XENO_FASTSYNCH
	unsigned long status;
	xnhandle_t cur;

	cur = xeno_get_current();
	if (cur == XN_NO_HANDLE)
		return EPERM;

	status = xeno_get_current_mode();

	if (unlikely(cb_try_read_lock(&shadow->lock, s)))
		return EINVAL;

	if (shadow->magic != PSE51_MUTEX_MAGIC) {
		err = -EINVAL;
		goto out;
	}

	/*
	 * We track resource ownership for non real-time shadows in
	 * order to handle the auto-relax feature, so we must always
	 * obtain them via a syscall.
	 */
	if (likely(!(status & (XNRELAX|XNOTHER)))) {
		err = xnsynch_fast_acquire(get_ownerp(shadow), cur);

		if (likely(!err)) {
			shadow->lockcnt = 1;
			cb_read_unlock(&shadow->lock, s);
			return 0;
		}

		if (err == -EBUSY)
			switch(shadow->attr.type) {
			case PTHREAD_MUTEX_NORMAL:
				break;

			case PTHREAD_MUTEX_ERRORCHECK:
				err = -EDEADLK;
				goto out;

			case PTHREAD_MUTEX_RECURSIVE:
				if (shadow->lockcnt == UINT_MAX) {
					err = -EAGAIN;
					goto out;
				}
				++shadow->lockcnt;
				err = 0;
				goto out;
			}
		}
#endif /* CONFIG_XENO_FASTSYNCH */

	do {
		err = XENOMAI_SKINCALL1(__pse51_muxid,__pse51_mutex_lock,shadow);
	} while (err == -EINTR);

#ifdef CONFIG_XENO_FASTSYNCH
  out:
	cb_read_unlock(&shadow->lock, s);
#endif /* CONFIG_XENO_FASTSYNCH */

	return -err;
}
Пример #14
0
static __constructor__ void __init_posix_interface(void)
{
	struct sched_param parm;
	int policy;
	int muxid, err;
	const char *noshadow;

	rt_print_auto_init(1);

	muxid =
	    xeno_bind_skin(PSE51_SKIN_MAGIC, "POSIX", "xeno_posix");

#ifdef XNARCH_HAVE_NONPRIV_TSC
	pse51_clock_init(muxid);
#endif /* XNARCH_HAVE_NONPRIV_TSC */

	__pse51_muxid = __xn_mux_shifted_id(muxid);

	muxid = XENOMAI_SYSBIND(RTDM_SKIN_MAGIC,
				XENOMAI_FEAT_DEP, XENOMAI_ABI_REV, NULL);
	if (muxid > 0) {
		__pse51_rtdm_muxid = __xn_mux_shifted_id(muxid);
		__pse51_rtdm_fd_start = FD_SETSIZE - XENOMAI_SKINCALL0(__pse51_rtdm_muxid,
								 __rtdm_fdcount);
	}

	pse51_mutex_init();

	noshadow = getenv("XENO_NOSHADOW");
	if ((!noshadow || !*noshadow) && xeno_get_current() == XN_NO_HANDLE) {
		err = __real_pthread_getschedparam(pthread_self(), &policy,
						   &parm);
		if (err) {
			fprintf(stderr, "Xenomai Posix skin init: "
				"pthread_getschedparam: %s\n", strerror(err));
			exit(EXIT_FAILURE);
		}

		err = __wrap_pthread_setschedparam(pthread_self(), policy,
						   &parm);
		if (err) {
			fprintf(stderr, "Xenomai Posix skin init: "
				"pthread_setschedparam: %s\n", strerror(err));
			exit(EXIT_FAILURE);
		}
	}

	if (fork_handler_registered)
		return;

	err = pthread_atfork(NULL, NULL, &__init_posix_interface);
	if (err) {
		fprintf(stderr, "Xenomai Posix skin init: "
			"pthread_atfork: %s\n", strerror(err));
		exit(EXIT_FAILURE);
	}
	fork_handler_registered = 1;

	if (sizeof(struct __shadow_mutex) > sizeof(pthread_mutex_t)) {
		fprintf(stderr, "sizeof(pthread_mutex_t): %d <"
			" sizeof(shadow_mutex): %d !\n",
			(int) sizeof(pthread_mutex_t),
			(int) sizeof(struct __shadow_mutex));
		exit(EXIT_FAILURE);
	}
}