static void psostask_delete_hook(xnthread_t *thread) { /* The scheduler is locked while hooks are running */ psostask_t *task; psostm_t *tm; if (xnthread_get_magic(thread) != PSOS_SKIN_MAGIC) return; #ifdef CONFIG_XENO_OPT_REGISTRY if (xnthread_handle(thread) != XN_NO_HANDLE) xnregistry_remove(xnthread_handle(thread)); #endif /* CONFIG_XENO_OPT_REGISTRY */ task = thread2psostask(thread); removeq(&psostaskq, &task->link); while ((tm = (psostm_t *)getgq(&task->alarmq)) != NULL) tm_destroy_internal(tm); taskev_destroy(&task->evgroup); xnarch_delete_display(&task->threadbase); psos_mark_deleted(task); xnheap_schedule_free(&kheap, task, &task->link); }
static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
static int __wind_task_self(struct pt_regs *regs) { WIND_TCB_PLACEHOLDER ph; WIND_TCB *pTcb; pTcb = __wind_task_current(current); if (!pTcb) /* Calls on behalf of a non-task context beget an error for the user-space interface. */ return S_objLib_OBJ_ID_ERROR; ph.handle = xnthread_handle(&pTcb->threadbase); /* Copy back the task handle. */ return __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &ph, sizeof(ph)); }
static int __wind_task_self(struct task_struct *curr, struct pt_regs *regs) { WIND_TCB_PLACEHOLDER ph; WIND_TCB *pTcb; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(ph))) return -EFAULT; pTcb = __wind_task_current(curr); if (!pTcb) /* Calls on behalf of a non-task context beget an error for the user-space interface. */ return S_objLib_OBJ_ID_ERROR; ph.handle = xnthread_handle(&pTcb->threadbase); /* Copy back the task handle. */ __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &ph, sizeof(ph)); return 0; }
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(), *owner; xnhandle_t threadh = xnthread_handle(thread), fastlock, old; const int use_fastlock = xnsynch_fastlock_p(synch); spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); trace_mark(xn_nucleus, synch_acquire, "synch %p", synch); redo: if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh); if (likely(fastlock == XN_NO_HANDLE)) { if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); return 0; } xnlock_get_irqsave(&nklock, s); /* Set claimed bit. In case it appears to be set already, re-read its state under nklock so that we don't miss any change between the lock-less read and here. But also try to avoid cmpxchg where possible. Only if it appears not to be set, start with cmpxchg directly. */ if (xnsynch_fast_is_claimed(fastlock)) { old = xnarch_atomic_get(lockp); goto test_no_owner; } do { old = xnarch_atomic_cmpxchg(lockp, fastlock, xnsynch_fast_set_claimed(fastlock, 1)); if (likely(old == fastlock)) break; test_no_owner: if (old == XN_NO_HANDLE) { /* Owner called xnsynch_release (on another cpu) */ xnlock_put_irqrestore(&nklock, s); goto redo; } fastlock = old; } while (!xnsynch_fast_is_claimed(fastlock)); owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock)); if (!owner) { /* The handle is broken, therefore pretend that the synch object was deleted to signal an error. */ xnthread_set_info(thread, XNRMID); goto unlock_and_exit; } xnsynch_set_owner(synch, owner); } else { xnlock_get_irqsave(&nklock, s); owner = synch->owner; if (!owner) { synch->owner = thread; if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } } xnsynch_detect_relaxed_owner(synch, thread); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else if (w_cprio(thread) > w_cprio(owner)) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto grab_and_exit; } insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); if (testbits(synch->status, XNSYNCH_PIP)) { if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, w_cprio(thread)); xnsynch_renice_thread(owner, thread); } } else insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) { xnlock_put_irqrestore(&nklock, s); goto redo; } timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) { /* Otherwise, it's too late. */ xnlock_put_irqrestore(&nklock, s); goto redo; } xnthread_set_info(thread, XNTIMEO); } else { grab_and_exit: if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); /* We are the new owner, update the fastlock accordingly. */ if (xnsynch_pended_p(synch)) threadh = xnsynch_fast_set_claimed(threadh, 1); xnarch_atomic_set(lockp, threadh); } } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
u_long t_start(u_long tid, u_long mode, void (*startaddr) (u_long, u_long, u_long, u_long), u_long targs[]) { struct xnthread_start_attr attr; u_long err = SUCCESS; xnflags_t xnmode; psostask_t *task; spl_t s; /* We have no error case here: just clear out any unwanted bit. */ mode &= ~T_START_MASK; xnlock_get_irqsave(&nklock, s); task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t); if (!task) { err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t); goto unlock_and_exit; } if (!xnthread_test_state(&task->threadbase, XNDORMANT)) { err = ERR_ACTIVE; /* Task already started */ goto unlock_and_exit; } xnmode = psos_mode_to_xeno(mode); if (xnmode & XNRRB) { xnpod_set_thread_tslice(&task->threadbase, psos_time_slice); xnmode &= ~XNRRB; } task->entry = startaddr; attr.mode = xnmode; attr.imask = (int)((mode >> 8) & 0x7); attr.affinity = XNPOD_ALL_CPUS; if (targs) memcpy(task->args, targs, sizeof(task->args)); else memset(task->args, 0, sizeof(task->args)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(&task->threadbase, XNSHADOW)) { attr.entry = (void (*)(void *))startaddr; attr.cookie = (void *)xnthread_handle(&task->threadbase); } else #endif /* CONFIG_XENO_OPT_PERVASIVE */ { attr.entry = psostask_trampoline; attr.cookie = task; } xnpod_start_thread(&task->threadbase, &attr); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __wind_task_init(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; char name[XNOBJECT_NAME_LEN]; struct wind_arg_bulk bulk; int err = 0, prio, flags; WIND_TCB_PLACEHOLDER ph; WIND_TCB *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ph))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); if (bulk.a1) { if (!__xn_access_ok(curr, VERIFY_READ, bulk.a1, sizeof(name))) return -EFAULT; __xn_strncpy_from_user(curr, name, (const char __user *)bulk.a1, sizeof(name) - 1); name[sizeof(name) - 1] = '\0'; strncpy(curr->comm, name, sizeof(curr->comm)); curr->comm[sizeof(curr->comm) - 1] = '\0'; } else *name = '\0'; /* Task priority. */ prio = bulk.a2; /* Task flags. */ flags = bulk.a3 | VX_SHADOW; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = (WIND_TCB *)xnmalloc(sizeof(*task)); if (!task) { if (u_completion) xnshadow_signal_completion(u_completion, -ENOMEM); return -ENOMEM; } xnthread_clear_state(&task->threadbase, XNZOMBIE); /* Force FPU support in user-space. This will lead to a no-op if the platform does not support it. */ if (taskInit(task, name, prio, flags, NULL, 0, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == OK) { /* Let the skin discard the TCB memory upon exit. */ task->auto_delete = 1; task->ptid = bulk.a4; /* Copy back the registry handle to the ph struct. */ ph.handle = xnthread_handle(&task->threadbase); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ph, sizeof(ph)); err = xnshadow_map(&task->threadbase, u_completion); } else { /* Unblock and pass back error code. */ err = wind_errnoget(); if (u_completion) xnshadow_signal_completion(u_completion, err); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); return err; }
u_long t_create(const char *name, u_long prio, u_long sstack, u_long ustack, u_long flags, u_long *tid_r) { xnflags_t bflags = 0; psostask_t *task; spl_t s; int n; /* Xenomai extension: we accept priority level #0 for creating non-RT tasks (i.e. underlaid by SCHED_NORMAL pthreads), which are allowed to call into the pSOS emulator, usually for synchronization services. */ if (prio > 255) return ERR_PRIOR; task = (psostask_t *)xnmalloc(sizeof(*task)); if (!task) return ERR_NOTCB; if (flags & T_FPU) bflags |= XNFPU; #ifdef CONFIG_XENO_OPT_PERVASIVE if (flags & T_SHADOW) bflags |= XNSHADOW; #endif /* CONFIG_XENO_OPT_PERVASIVE */ ustack += sstack; if (!(flags & T_SHADOW) && ustack < 1024) { xnfree(task); return ERR_TINYSTK; } if (name && *name) xnobject_copy_name(task->name, name); else /* i.e. Anonymous object which must be accessible from user-space. */ sprintf(task->name, "anon_task%lu", psos_task_ids++); if (xnpod_init_thread(&task->threadbase, psos_tbase, task->name, prio, bflags, ustack, &psos_task_ops) != 0) { xnfree(task); return ERR_NOSTK; /* Assume this is the only possible failure */ } xnthread_time_slice(&task->threadbase) = psos_time_slice; taskev_init(&task->evgroup); inith(&task->link); for (n = 0; n < PSOSTASK_NOTEPAD_REGS; n++) task->notepad[n] = 0; initgq(&task->alarmq, &xnmod_glink_queue, xnmod_alloc_glinks, XNMOD_GHOLDER_THRESHOLD); task->magic = PSOS_TASK_MAGIC; xnlock_get_irqsave(&nklock, s); appendq(&psostaskq, &task->link); *tid_r = (u_long)task; xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY { u_long err = xnregistry_enter(task->name, task, &xnthread_handle(&task->threadbase), NULL); if (err) { t_delete((u_long)task); return err; } } #endif /* CONFIG_XENO_OPT_REGISTRY */ xnarch_create_display(&task->threadbase, task->name, psostask); return SUCCESS; }