static void spawn_printer_thread(void) { pthread_attr_t thattr; pthread_attr_init(&thattr); pthread_attr_setstacksize(&thattr, xeno_stacksize(0)); pthread_create(&printer_thread, &thattr, printer_loop, NULL); }
void xeno_fault_stack(void) { if (pthread_self() == xeno_main_tid) { char stk[xeno_stacksize(1)]; stk[0] = stk[sizeof(stk) - 1] = 0xA5; } }
int sc_tecreate(void (*entry) (void *), int tid, int prio, int mode, u_long ustacksz, u_long sstacksz __attribute__ ((unused)), char *paddr, u_long psize, int *errp) { struct vrtx_task_iargs iargs; struct sched_param param; pthread_attr_t thattr; int err, policy; pthread_t thid; /* Migrate this thread to the Linux domain since we are about to issue a series of regular kernel syscalls in order to create the new Linux thread, which in turn will be mapped to a VRTX shadow. */ XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_LINUX_DOMAIN); iargs.tid = tid; iargs.prio = prio; iargs.mode = mode; iargs.entry = entry; iargs.param = paddr; __real_sem_init(&iargs.sync, 0, 0); pthread_attr_init(&thattr); ustacksz = xeno_stacksize(ustacksz); pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED); policy = vrtx_task_set_posix_priority(prio, ¶m); pthread_attr_setschedparam(&thattr, ¶m); pthread_attr_setschedpolicy(&thattr, policy); pthread_attr_setstacksize(&thattr, ustacksz); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); err = __real_pthread_create(&thid, &thattr, &vrtx_task_trampoline, &iargs); if (err) { *errp = err; __real_sem_destroy(&iargs.sync); return -1; } while (__real_sem_wait(&iargs.sync) && errno == EINTR) ; __real_sem_destroy(&iargs.sync); return iargs.tid; }
ER cre_tsk(ID tskid, T_CTSK *pk_ctsk) { struct uitron_task_iargs iargs; xncompletion_t completion; struct sched_param param; pthread_attr_t thattr; pthread_t thid; int policy; long err; XENOMAI_SYSCALL1(__xn_sys_migrate, XENOMAI_LINUX_DOMAIN); completion.syncflag = 0; completion.pid = -1; iargs.tskid = tskid; iargs.pk_ctsk = pk_ctsk; iargs.completionp = &completion; pthread_attr_init(&thattr); pk_ctsk->stksz = xeno_stacksize(pk_ctsk->stksz); pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED); policy = uitron_task_set_posix_priority(pk_ctsk->itskpri, ¶m); pthread_attr_setschedparam(&thattr, ¶m); pthread_attr_setschedpolicy(&thattr, policy); pthread_attr_setstacksize(&thattr, pk_ctsk->stksz); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED); err = pthread_create(&thid, &thattr, &uitron_task_trampoline, &iargs); if (err) return -err; /* Sync with uitron_task_trampoline() then return.*/ return XENOMAI_SYSCALL1(__xn_sys_completion, &completion); }
int cond_wait_until(cond_t *cond, mutex_t *mutex, unsigned long long date) { struct timespec ts = { .tv_sec = date / NS_PER_S, .tv_nsec = date % NS_PER_S, }; return -pthread_cond_timedwait(cond, mutex, &ts); } #define cond_destroy(cond) (-pthread_cond_destroy(cond)) int thread_msleep(unsigned ms) { struct timespec ts = { .tv_sec = (ms * NS_PER_MS) / NS_PER_S, .tv_nsec = (ms * NS_PER_MS) % NS_PER_S, }; return -nanosleep(&ts, NULL); } int thread_spawn(thread_t *thread, int prio, void *(*handler)(void *cookie), void *cookie) { struct sched_param param; pthread_attr_t tattr; int err; pthread_attr_init(&tattr); pthread_attr_setinheritsched(&tattr, PTHREAD_EXPLICIT_SCHED); pthread_attr_setschedpolicy(&tattr, SCHED_FIFO); param.sched_priority = prio; pthread_attr_setschedparam(&tattr, ¶m); pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE); pthread_attr_setstacksize(&tattr, xeno_stacksize(0)); err = pthread_create(thread, &tattr, handler, cookie); pthread_attr_destroy(&tattr); return -err; } #define thread_yield() sched_yield() #define thread_kill(thread, sig) (-__real_pthread_kill(thread, sig)) #define thread_self() pthread_self() #define thread_join(thread) (-pthread_join(thread, NULL)) #else /* __NATIVE_SKIN__ */ typedef RT_MUTEX mutex_t; typedef RT_TASK *thread_t; typedef RT_COND cond_t; #define timer_read() rt_timer_read() int __mutex_init(mutex_t *mutex, const char *name, int type, int pi) { if (type == PTHREAD_MUTEX_ERRORCHECK) return -EINVAL; (void)(pi); return -rt_mutex_create(mutex, name); } #define mutex_init(mutex, type, pi) __mutex_init(mutex, #mutex, type, pi) #define mutex_destroy(mutex) rt_mutex_delete(mutex) #define mutex_lock(mutex) rt_mutex_acquire(mutex, TM_INFINITE) #define mutex_unlock(mutex) rt_mutex_release(mutex) int __cond_init(cond_t *cond, const char *name, int absolute) { (void)(absolute); return rt_cond_create(cond, name); } #define cond_init(cond, absolute) __cond_init(cond, #cond, absolute) #define cond_signal(cond) rt_cond_signal(cond) #define cond_wait(cond, mutex, ns) rt_cond_wait(cond, mutex, (RTIME)ns) #define cond_wait_until(cond, mutex, ns) \ rt_cond_wait_until(cond, mutex, (RTIME)ns) #define cond_destroy(cond) rt_cond_delete(cond) #define thread_self() rt_task_self() #define thread_msleep(ms) rt_task_sleep((RTIME)ms * NS_PER_MS) int thread_spawn_inner(thread_t *thread, const char *name, int prio, void *(*handler)(void *), void *cookie) { thread_t tcb; int err; tcb = malloc(sizeof(*tcb)); if (!tcb) return -ENOSPC; err = rt_task_spawn(tcb, name, 0, prio, T_JOINABLE, (void (*)(void *))handler, cookie); if (!err) *thread = tcb; return err; }