void psostask_cleanup(void) { xnholder_t *holder; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getheadq(&psostaskq)) != NULL) { psostask_t *task = link2psostask(holder); xnpod_abort_thread(&task->threadbase); xnlock_sync_irq(&nklock, s); } xnlock_put_irqrestore(&nklock, s); xnpod_remove_hook(XNHOOK_THREAD_DELETE, psostask_delete_hook); }
void wind_task_cleanup(void) { xnholder_t *holder; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getheadq(&wind_tasks_q)) != NULL) { WIND_TCB *pTcb = link2wind_task(holder); xnpod_abort_thread(&pTcb->threadbase); xnlock_sync_irq(&nklock, s); } xnlock_put_irqrestore(&nklock, s); xnpod_remove_hook(XNHOOK_THREAD_DELETE, wind_task_delete_hook); }
void __rtai_task_pkg_cleanup(void) { xnholder_t *holder; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getheadq(&__rtai_task_q)) != NULL) { RT_TASK *task = link2rtask(holder); xnpod_abort_thread(&task->thread_base); xnlock_sync_irq(&nklock, s); } xnlock_put_irqrestore(&nklock, s); xnpod_remove_hook(XNHOOK_THREAD_DELETE, &__task_delete_hook); if (__rtai_task_sig) xnpod_remove_hook(XNHOOK_THREAD_SWITCH, &__task_switch_hook); }
int rt_task_init(RT_TASK *task, void (*body) (int), int cookie, int stack_size, int priority, int uses_fpu, void (*sigfn) (void)) { union xnsched_policy_param param; struct xnthread_start_attr sattr; struct xnthread_init_attr iattr; xnflags_t bflags = 0; int ret; spl_t s; if (priority < XNSCHED_LOW_PRIO || priority > XNSCHED_HIGH_PRIO || task->magic == RTAI_TASK_MAGIC) return -EINVAL; priority = XNSCHED_HIGH_PRIO - priority + 1; /* Normalize. */ if (uses_fpu) #ifdef CONFIG_XENO_HW_FPU bflags |= XNFPU; #else /* !CONFIG_XENO_HW_FPU */ return -EINVAL; #endif /* CONFIG_XENO_HW_FPU */ iattr.tbase = rtai_tbase; iattr.name = NULL; iattr.flags = bflags; iattr.ops = &__rtai_task_ops; iattr.stacksize = stack_size; param.rt.prio = priority; if (xnpod_init_thread(&task->thread_base, &iattr, &xnsched_class_rt, ¶m) != 0) /* Assume this is the only possible failure. */ return -ENOMEM; xnarch_cpus_clear(task->affinity); inith(&task->link); task->suspend_depth = 1; task->cookie = cookie; task->body = body; task->sigfn = sigfn; if (xnarch_cpus_empty(task->affinity)) task->affinity = XNPOD_ALL_CPUS; xnlock_get_irqsave(&nklock, s); sattr.mode = XNSUSP; /* Suspend on startup. */ sattr.imask = 0; sattr.affinity = task->affinity; sattr.entry = rt_task_trampoline; sattr.cookie = task; ret = xnpod_start_thread(&task->thread_base, &sattr); if (ret) goto unlock_and_exit; task->magic = RTAI_TASK_MAGIC; appendq(&__rtai_task_q, &task->link); #ifdef CONFIG_XENO_FASTSYNCH /* We need an anonymous registry entry to obtain a handle for fast mutex locking. */ ret = xnthread_register(&task->thread_base, ""); if (ret) { xnpod_abort_thread(&task->thread_base); goto unlock_and_exit; } #endif /* CONFIG_XENO_FASTSYNCH */ /* Add a switch hook only if a signal function has been declared at least once for some created task. */ if (sigfn != NULL && __rtai_task_sig++ == 0) xnpod_add_hook(XNHOOK_THREAD_SWITCH, &__task_switch_hook); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret ? -EINVAL : 0; }