/* * The following handler is part of the inner user-interface: should * remain extern. */ int cobalt_sigshadow_handler(int sig, siginfo_t *si, void *ctxt) { void *frames[SIGSHADOW_BACKTRACE_DEPTH]; int action, arg, nr, skip; if (si->si_code != SI_QUEUE) return 0; action = sigshadow_action(si->si_int); switch (action) { case SIGSHADOW_ACTION_HARDEN: XENOMAI_SYSCALL1(sc_cobalt_migrate, COBALT_PRIMARY); break; case SIGSHADOW_ACTION_BACKTRACE: arg = sigshadow_arg(si->si_int); nr = backtrace(frames, sizeof(frames) / sizeof(frames[0])); /* Skip the sighandler context. */ skip = nr > 3 ? 3 : 0; XENOMAI_SYSCALL3(sc_cobalt_backtrace, nr - skip, frames + skip, arg); break; default: return 0; } return 1; }
int xeno_sigwinch_handler(int sig, siginfo_t *si, void *ctxt) { int action; if (si->si_code != SI_QUEUE) return 0; action = sigshadow_action(si->si_int); switch(action) { case SIGSHADOW_ACTION_HARDEN: XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); break; case SIGSHADOW_ACTION_RENICE: { struct sched_param param; int policy; param.sched_priority = sigshadow_arg(si->si_int); policy = param.sched_priority > 0 ? SCHED_FIFO: SCHED_OTHER; pthread_setschedparam(pthread_self(), policy, ¶m); break; } default: return 0; } return 1; }
int pthread_setschedparam_ex(pthread_t thread, int policy, const struct sched_param_ex *param) { pthread_t myself = pthread_self(); struct sched_param short_param; unsigned long mode_offset; int err, promoted; if (thread == myself) xeno_fault_stack(); err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_thread_setschedparam_ex, thread, policy, param, &mode_offset, &promoted); if (err == EPERM) { short_param.sched_priority = param->sched_priority; return __STD(pthread_setschedparam(thread, policy, &short_param)); } if (!err && promoted) { xeno_sigshadow_install_once(); xeno_set_current(); xeno_set_current_mode(mode_offset); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; }
void __cobalt_thread_harden(void) { unsigned long status = cobalt_get_current_mode(); /* non-RT shadows are NOT allowed to force primary mode. */ if ((status & (XNRELAX|XNWEAK)) == XNRELAX) XENOMAI_SYSCALL1(sc_nucleus_migrate, XENOMAI_XENO_DOMAIN); }
static void __pthread_sigharden_handler(int sig) { if (old_sigharden_handler && old_sigharden_handler != &__pthread_sigharden_handler) old_sigharden_handler(sig); XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); }
xnhandle_t xeno_slow_get_current(void) { xnhandle_t current; int err; err = XENOMAI_SYSCALL1(__xn_sys_current, ¤t); return err ? XN_NO_HANDLE : current; }
unsigned long xeno_slow_get_current_mode(void) { xnthread_info_t info; int err; err = XENOMAI_SYSCALL1(__xn_sys_current_info, &info); if (err < 0) return XNRELAX; return info.state & XNRELAX; }
void xeno_set_current(void) { xnhandle_t current; int err; err = XENOMAI_SYSCALL1(__xn_sys_current, ¤t); if (err) { fprintf(stderr, "Xenomai: error obtaining handle for current " "thread: %s\n", strerror(-err)); exit(EXIT_FAILURE); } __xeno_set_current(current); }
int sc_tecreate(void (*entry) (void *), int tid, int prio, int mode, u_long ustacksz, u_long sstacksz __attribute__ ((unused)), char *paddr, u_long psize, int *errp) { struct vrtx_task_iargs iargs; struct sched_param param; pthread_attr_t thattr; int err, policy; pthread_t thid; /* Migrate this thread to the Linux domain since we are about to issue a series of regular kernel syscalls in order to create the new Linux thread, which in turn will be mapped to a VRTX shadow. */ XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_LINUX_DOMAIN); iargs.tid = tid; iargs.prio = prio; iargs.mode = mode; iargs.entry = entry; iargs.param = paddr; __real_sem_init(&iargs.sync, 0, 0); pthread_attr_init(&thattr); ustacksz = xeno_stacksize(ustacksz); pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED); policy = vrtx_task_set_posix_priority(prio, ¶m); pthread_attr_setschedparam(&thattr, ¶m); pthread_attr_setschedpolicy(&thattr, policy); pthread_attr_setstacksize(&thattr, ustacksz); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); err = __real_pthread_create(&thid, &thattr, &vrtx_task_trampoline, &iargs); if (err) { *errp = err; __real_sem_destroy(&iargs.sync); return -1; } while (__real_sem_wait(&iargs.sync) && errno == EINTR) ; __real_sem_destroy(&iargs.sync); return iargs.tid; }
ER cre_tsk(ID tskid, T_CTSK *pk_ctsk) { struct uitron_task_iargs iargs; xncompletion_t completion; struct sched_param param; pthread_attr_t thattr; pthread_t thid; int policy; long err; XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_LINUX_DOMAIN); completion.syncflag = 0; completion.pid = -1; iargs.tskid = tskid; iargs.pk_ctsk = pk_ctsk; iargs.completionp = &completion; pthread_attr_init(&thattr); pk_ctsk->stksz = xeno_stacksize(pk_ctsk->stksz); pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED); policy = uitron_task_set_posix_priority(pk_ctsk->itskpri, ¶m); pthread_attr_setschedparam(&thattr, ¶m); pthread_attr_setschedpolicy(&thattr, policy); pthread_attr_setstacksize(&thattr, pk_ctsk->stksz); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); err = pthread_create(&thid, &thattr, &uitron_task_trampoline, &iargs); if (err) return -err; /* Sync with uitron_task_trampoline() then return.*/ return XENOMAI_SYSCALL1(__xn_sys_completion, &completion); }
static void assert_nrt_inner(void) { xnthread_info_t info; int err; err = XENOMAI_SYSCALL1(__xn_sys_current_info, &info); if (err) { fprintf(stderr, "__xn_sys_current_info failed: %s\n", strerror(-err)); return; } if (info.state & XNTRAPSW) pthread_kill(pthread_self(), SIGXCPU); }
/* * The following handler is part of the inner user-interface: should * remain extern. */ int xeno_sigwinch_handler(int sig, siginfo_t *si, void *ctxt) { void *frames[SIGSHADOW_BACKTRACE_DEPTH]; int action, arg, nr, skip; if (si->si_code != SI_QUEUE) return 0; action = sigshadow_action(si->si_int); switch(action) { case SIGSHADOW_ACTION_HARDEN: XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); break; case SIGSHADOW_ACTION_RENICE: { struct sched_param param; int policy; arg = sigshadow_arg(si->si_int); param.sched_priority = arg; policy = param.sched_priority > 0 ? SCHED_FIFO: SCHED_OTHER; pthread_setschedparam(pthread_self(), policy, ¶m); break; } case SIGSHADOW_ACTION_BACKTRACE: arg = sigshadow_arg(si->si_int); nr = backtrace(frames, sizeof(frames) / sizeof(frames[0])); /* Skip the sighandler context. */ skip = nr > 3 ? 3 : 0; XENOMAI_SYSCALL3(__xn_sys_backtrace, nr - skip, frames + skip, arg); break; default: return 0; } return 1; }
int __wrap_pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param) { pthread_t myself = pthread_self(); int err, promoted; err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_thread_setschedparam, thread, policy, param, myself, &promoted); if (err == EPERM) return __real_pthread_setschedparam(thread, policy, param); else __real_pthread_setschedparam(thread, policy, param); if (!err && promoted) { old_sigharden_handler = signal(SIGHARDEN, &__pthread_sigharden_handler); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; }
int xntrace_user_start(void) { return XENOMAI_SYSCALL1(__xn_sys_trace, __xntrace_op_user_start); }
int xntrace_max_reset(void) { return XENOMAI_SYSCALL1(__xn_sys_trace, __xntrace_op_max_reset); }
int __wrap_pthread_mutex_trylock(pthread_mutex_t *mutex) { union __xeno_mutex *_mutex = (union __xeno_mutex *)mutex; struct __shadow_mutex *shadow = &_mutex->shadow_mutex; int err; #ifdef CONFIG_XENO_FASTSYNCH unsigned long status; xnhandle_t cur; cur = xeno_get_current(); if (cur == XN_NO_HANDLE) return EPERM; status = xeno_get_current_mode(); if (unlikely(status & XNOTHER)) goto do_syscall; if (unlikely(cb_try_read_lock(&shadow->lock, s))) return EINVAL; if (unlikely(shadow->magic != PSE51_MUTEX_MAGIC)) { err = -EINVAL; goto out; } if (unlikely(status & XNRELAX)) { do { err = XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } while (err == -EINTR); if (err < 0) goto out; } err = xnsynch_fast_acquire(get_ownerp(shadow), cur); if (likely(!err)) { shadow->lockcnt = 1; cb_read_unlock(&shadow->lock, s); return 0; } if (err == -EBUSY && shadow->attr.type == PTHREAD_MUTEX_RECURSIVE) { if (shadow->lockcnt == UINT_MAX) err = -EAGAIN; else { ++shadow->lockcnt; err = 0; } } else err = -EBUSY; out: cb_read_unlock(&shadow->lock, s); return -err; do_syscall: #endif /* !CONFIG_XENO_FASTSYNCH */ do { err = XENOMAI_SKINCALL1(__pse51_muxid, __pse51_mutex_trylock, shadow); } while (err == -EINTR); return -err; }