int pthread_setschedparam_ex(pthread_t thread, int policy, const struct sched_param_ex *param) { pthread_t myself = pthread_self(); struct sched_param short_param; unsigned long mode_offset; int err, promoted; if (thread == myself) xeno_fault_stack(); err = -XENOMAI_SKINCALL5(__pse51_muxid, __pse51_thread_setschedparam_ex, thread, policy, param, &mode_offset, &promoted); if (err == EPERM) { short_param.sched_priority = param->sched_priority; return __STD(pthread_setschedparam(thread, policy, &short_param)); } if (!err && promoted) { xeno_sigshadow_install_once(); xeno_set_current(); xeno_set_current_mode(mode_offset); if (policy != SCHED_OTHER) XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_XENO_DOMAIN); } return err; }
static void *vrtx_task_trampoline(void *cookie) { struct vrtx_task_iargs *iargs = cookie; void (*entry)(void *arg), *arg; struct vrtx_arg_bulk bulk; unsigned long mode_offset; long err; #ifndef HAVE___THREAD TCB *tcb; #endif /* !HAVE___THREAD */ /* vrtx_task_delete requires asynchronous cancellation */ pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #ifndef HAVE___THREAD tcb = malloc(sizeof(*tcb)); if (tcb == NULL) { fprintf(stderr, "Xenomai: failed to allocate local TCB?!\n"); err = -ENOMEM; goto fail; } pthread_setspecific(__vrtx_tskey, tcb); #endif /* !HAVE___THREAD */ xeno_sigshadow_install_once(); bulk.a1 = (u_long)iargs->tid; bulk.a2 = (u_long)iargs->prio; bulk.a3 = (u_long)iargs->mode; bulk.a4 = (u_long)&mode_offset; if (bulk.a4 == 0) { err = -ENOMEM; goto fail; } err = XENOMAI_SKINCALL2(__vrtx_muxid, __vrtx_tecreate, &bulk, &iargs->tid); /* Prevent stale memory access after our parent is released. */ entry = iargs->entry; arg = iargs->param; __real_sem_post(&iargs->sync); if (err == 0) { xeno_set_current(); xeno_set_current_mode(mode_offset); entry(arg); } fail: return (void *)err; }
static void *uitron_task_trampoline(void *cookie) { struct uitron_task_iargs *iargs = (struct uitron_task_iargs *)cookie; struct sched_param param; unsigned long mode_offset; void (*entry)(INT); int policy; long err; INT arg; /* * Apply sched params here as some libpthread implementations * fail doing this properly via pthread_create. */ policy = uitron_task_set_posix_priority(iargs->pk_ctsk->itskpri, ¶m); pthread_setschedparam(pthread_self(), policy, ¶m); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); xeno_sigshadow_install_once(); err = XENOMAI_SKINCALL4(__uitron_muxid, __uitron_cre_tsk, iargs->tskid, iargs->pk_ctsk, iargs->completionp, &mode_offset); if (err) goto fail; xeno_set_current(); xeno_set_current_mode(mode_offset); /* iargs->pk_ctsk might not be valid anymore, after our parent was released from the completion sync, so do not dereference this pointer. */ do err = XENOMAI_SYSCALL2(__xn_sys_barrier, &entry, &arg); while (err == -EINTR); if (!err) entry(arg); fail: return (void *)err; }
ER shd_tsk(ID tskid, T_CTSK *pk_ctsk) /* Xenomai extension. */ { struct sched_param param; int policy, err; xeno_fault_stack(); /* Make sure the POSIX library caches the right priority. */ policy = uitron_task_set_posix_priority(pk_ctsk->itskpri, ¶m); pthread_setschedparam(pthread_self(), policy, ¶m); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); xeno_sigshadow_install_once(); err = XENOMAI_SKINCALL3(__uitron_muxid, __uitron_cre_tsk, tskid, pk_ctsk, NULL); if (!err) xeno_set_current(); return err; }