static void xnsched_watchdog_handler(struct xntimer *timer) { struct xnsched *sched = xnpod_current_sched(); struct xnthread *thread = sched->curr; if (likely(xnthread_test_state(thread, XNROOT))) { xnsched_reset_watchdog(sched); return; } if (likely(++sched->wdcount < wd_timeout_arg)) return; #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(thread, XNSHADOW) && !xnthread_amok_p(thread)) { trace_mark(xn_nucleus, watchdog_signal, "thread %p thread_name %s", thread, xnthread_name(thread)); xnprintf("watchdog triggered -- signaling runaway thread " "'%s'\n", xnthread_name(thread)); xnthread_set_info(thread, XNAMOK | XNKICKED); xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1); } else #endif /* CONFIG_XENO_OPT_PERVASIVE */ { trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s", thread, xnthread_name(thread)); xnprintf("watchdog triggered -- killing runaway thread '%s'\n", xnthread_name(thread)); xnpod_delete_thread(thread); } xnsched_reset_watchdog(sched); }
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder) { xnthread_t *thread, *lastowner; xnpholder_t *nholder; spl_t s; xnlock_get_irqsave(&nklock, s); lastowner = synch->owner; nholder = poppq(&synch->pendq, holder); thread = link2thread(holder, plink); thread->wchan = NULL; thread->wwake = synch; synch->owner = thread; xnthread_set_info(thread, XNWAKEN); trace_mark(xn_nucleus_synch_wakeup_all, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return nholder; }
xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(); spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else /* i.e. priority-sorted */ insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_EVENT *event = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; RT_TASK *task; priv->value = event->value; /* Refresh as we collect. */ if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&event->synch_base), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); task = thread2rtask(thread); p->mode = task->wait_args.event.mode; p->mask = task->wait_args.event.mask; return 1; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); wind_sem_t *sem = xnvfile_priv(it->vfile); struct xnthread *owner; sem = wind_h2obj_active((SEM_ID)sem, WIND_SEM_MAGIC, wind_sem_t); if (sem == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&sem->synchbase)); priv->type = sem->vtbl->type; if (sem->vtbl == &semm_vtbl) { owner = xnsynch_owner(&sem->synchbase); if (owner) strncpy(priv->owner, xnthread_name(owner), sizeof(priv->owner)); else *priv->owner = 0; priv->count = -1U; } else priv->count = sem->count; return xnsynch_nsleepers(&sem->synchbase); }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_BUFFER *bf = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; struct xnpqueue *waitq; if (priv->curr == NULL) { /* Attempt to switch queues. */ if (!priv->input) /* Finished output side, we are done. */ return 0; priv->input = 0; waitq = xnsynch_wait_queue(&bf->osynch_base); priv->curr = getheadpq(waitq); if (priv->curr == NULL) return 0; } else waitq = priv->input ? xnsynch_wait_queue(&bf->isynch_base) : xnsynch_wait_queue(&bf->osynch_base); /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(waitq, priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); p->input = priv->input; return 1; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct uiflag *flag = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; struct uitask *task; priv->value = flag->flgvalue; /* Refresh as we collect. */ if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&flag->synchbase), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); task = thread2uitask(thread); p->wfmode = task->wargs.flag.wfmode; p->waiptn = task->wargs.flag.waiptn; return 1; }
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch) { struct xnthread *thread = NULL; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); holder = getpq(&synch->pendq); if (holder) { thread = link2thread(holder, plink); thread->wchan = NULL; trace_mark(xn_nucleus, synch_wakeup_one, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return thread; }
static int __ui_cre_tsk(struct pt_regs *regs) { xncompletion_t __user *u_completion; struct task_struct *p = current; unsigned long __user *u_mode_offset; uitask_t *task; T_CTSK pk_ctsk; ID tskid; spl_t s; ER err; tskid = __xn_reg_arg1(regs); if (__xn_safe_copy_from_user(&pk_ctsk, (void __user *)__xn_reg_arg2(regs), sizeof(pk_ctsk))) return -EFAULT; pk_ctsk.tskatr |= TA_SHADOW; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); u_mode_offset = (unsigned long __user *)__xn_reg_arg4(regs); err = cre_tsk(tskid, &pk_ctsk); if (likely(err == E_OK)) { xnlock_get_irqsave(&nklock, s); task = xnmap_fetch(ui_task_idmap, tskid); if (!task) { xnlock_put_irqrestore(&nklock, s); err = E_OBJ; goto fail; } strncpy(p->comm, xnthread_name(&task->threadbase), sizeof(p->comm)); p->comm[sizeof(p->comm) - 1] = '\0'; xnlock_put_irqrestore(&nklock, s); /* Since we may not hold the superlock across a call * to xnshadow_map(), we do have a small race window * here, if the created task is killed then its TCB * recycled before we could map it; however, the risk * is mitigated by consistency checks performed in * xnshadow_map(). */ return xnshadow_map(&task->threadbase, u_completion, u_mode_offset); } fail: /* Unblock and pass back the error code. */ if (u_completion) xnshadow_signal_completion(u_completion, err); return err; }
/* Must be called with nklock locked, interrupts off. */ void xnsched_zombie_hooks(struct xnthread *thread) { XENO_BUGON(NUCLEUS, thread->sched->zombie != NULL); thread->sched->zombie = thread; trace_mark(xn_nucleus, sched_finalize, "thread_out %p thread_out_name %s", thread, xnthread_name(thread)); xnpod_run_hooks(&nkpod->tdeleteq, thread, "DELETE"); xnsched_forget(thread); }
static int __heap_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_HEAP *heap = (RT_HEAP *)data; char *p = page; int len; spl_t s; p += sprintf(p, "type=%s:size=%lu:used=%lu:numaps=%d\n", (heap->mode & H_SHARED) == H_SHARED ? "shared" : (heap->mode & H_MAPPABLE) ? "mappable" : "kernel", xnheap_usable_mem(&heap->heap_base), xnheap_used_mem(&heap->heap_base), atomic_read(&heap->heap_base.archdep.numaps)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder; /* Pended heap -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); size_t size = task->wait_args.heap.size; p += sprintf(p, "+%s (size=%zd)\n", xnthread_name(sleeper), size); holder = nextpq(xnsynch_wait_queue(&heap->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __event_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_EVENT *event = (RT_EVENT *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%lx\n", event->value); if (xnsynch_nsleepers(&event->synch_base) > 0) { xnpholder_t *holder; /* Pended event -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); const char *mode = (task->wait_args.event. mode & EV_ANY) ? "any" : "all"; unsigned long mask = task->wait_args.event.mask; p += sprintf(p, "+%s (mask=0x%lx, %s)\n", xnthread_name(sleeper), mask, mode); holder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __intr_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_INTR *intr = (RT_INTR *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); #ifdef CONFIG_XENO_OPT_PERVASIVE { xnpholder_t *holder; p += sprintf(p, "hits=%lu, pending=%u, mode=0x%x\n", __intr_get_hits(intr), intr->pending, intr->mode); /* Pended interrupt -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&intr->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&intr->synch_base), holder); } } #else /* !CONFIG_XENO_OPT_PERVASIVE */ p += sprintf(p, "hits=%lu\n", __intr_get_hits(intr)); #endif /* CONFIG_XENO_OPT_PERVASIVE */ xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct vrtxmx *mx = xnvfile_priv(it->vfile); struct xnthread *owner; priv->curr = getheadpq(xnsynch_wait_queue(&mx->synchbase)); owner = xnsynch_owner(&mx->synchbase); if (owner) strncpy(priv->owner, xnthread_name(owner), sizeof(priv->owner)); else *priv->owner = 0; return xnsynch_nsleepers(&mx->synchbase); }
static int msgq_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { wind_msgq_t *queue = (wind_msgq_t *)data; char *p = page; int len; spl_t s; p += sprintf(p, "porder=%s:mlength=%u:mcount=%d\n", xnsynch_test_flags(&queue->synchbase, XNSYNCH_PRIO) ? "prio" : "fifo", queue->msg_length, countq(&queue->msgq)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&queue->synchbase) > 0) { xnpholder_t *holder; /* Pended queue -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&queue->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&queue->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
void xnsynch_forget_sleeper(struct xnthread *thread) { struct xnsynch *synch = thread->wchan; struct xnthread *owner, *target; struct xnpholder *h; trace_mark(xn_nucleus, synch_forget, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnthread_clear_state(thread, XNPEND); thread->wchan = NULL; removepq(&synch->pendq, &thread->plink); if (testbits(synch->status, XNSYNCH_CLAIMED)) { /* Find the highest priority needed to enforce the PIP. */ owner = synch->owner; if (emptypq_p(&synch->pendq)) /* No more sleepers: clear the boost. */ xnsynch_clear_boost(synch, owner); else { target = link2thread(getheadpq(&synch->pendq), plink); h = getheadpq(&owner->claimq); if (w_cprio(target) != h->prio) { /* * Reorder the claim queue, and lower * the priority to the required * minimum needed to prevent priority * inversion. */ removepq(&owner->claimq, &synch->link); insertpqf(&owner->claimq, &synch->link, w_cprio(target)); h = getheadpq(&owner->claimq); if (h->prio < w_cprio(owner)) xnsynch_renice_thread(owner, target); } } } xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); }
static int __sem_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_SEM *sem = (RT_SEM *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&sem->synch_base) == 0) /* Idle/posted semaphore -- dump count. */ p += sprintf(p, "=%lu\n", sem->count); else { xnpholder_t *holder; /* Pended semaphore -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&sem->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&sem->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_COND *cond = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&cond->synch_base), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); return 1; }
static int __mbx_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uimbx_t *mbx = (uimbx_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "%d/%d message(s), attr=%s\n", mbx->mcount, mbx->bufcnt, mbx->mbxatr & TA_TPRI ? "TA_TPRI" : "TA_TFIFO"); if (xnsynch_pended_p(&mbx->synchbase)) { xnpholder_t *holder; /* Pended mbx -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&mbx->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&mbx->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __flag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uiflag_t *flag = (uiflag_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%x, attr=%s\n", flag->flgvalue, flag->flgatr & TA_WMUL ? "TA_WMUL" : "TA_WSGL"); if (xnsynch_nsleepers(&flag->synchbase) > 0) { xnpholder_t *holder; /* Pended flag -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
void xnsynch_forget_sleeper(xnthread_t *thread) { xnsynch_t *synch = thread->wchan; trace_mark(xn_nucleus_synch_forget, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnthread_clear_state(thread, XNPEND); thread->wchan = NULL; removepq(&synch->pendq, &thread->plink); if (testbits(synch->status, XNSYNCH_CLAIMED)) { /* Find the highest priority needed to enforce the PIP. */ xnthread_t *owner = synch->owner; int rprio; if (emptypq_p(&synch->pendq)) /* No more sleepers: clear the boost. */ xnsynch_clear_boost(synch, owner); else if (getheadpq(&synch->pendq)->prio != getheadpq(&owner->claimq)->prio) { /* Reorder the claim queue, and lower the priority to the required minimum needed to prevent priority inversion. */ removepq(&owner->claimq, &synch->link); insertpqf(&owner->claimq, &synch->link, getheadpq(&synch->pendq)->prio); rprio = getheadpq(&owner->claimq)->prio; if (rprio < owner->cprio) xnsynch_renice_thread(owner, rprio); } } xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); }
void xnsched_init(struct xnsched *sched, int cpu) { char htimer_name[XNOBJECT_NAME_LEN]; char root_name[XNOBJECT_NAME_LEN]; union xnsched_policy_param param; struct xnthread_init_attr attr; struct xnsched_class *p; sched->cpu = cpu; for_each_xnsched_class(p) { if (p->sched_init) p->sched_init(sched); } #ifdef CONFIG_SMP sprintf(htimer_name, "[host-timer/%u]", cpu); sprintf(root_name, "ROOT/%u", cpu); #else strcpy(htimer_name, "[host-timer]"); strcpy(root_name, "ROOT"); #endif sched->status = 0; sched->inesting = 0; sched->curr = &sched->rootcb; #ifdef CONFIG_XENO_OPT_PRIOCPL xnlock_init(&sched->rpilock); #endif /* * No direct handler here since the host timer processing is * postponed to xnintr_irq_handler(), as part of the interrupt * exit code. */ xntimer_init(&sched->htimer, &nktbase, NULL); xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO); xntimer_set_name(&sched->htimer, htimer_name); xntimer_set_sched(&sched->htimer, sched); sched->zombie = NULL; xnarch_cpus_clear(sched->resched); attr.flags = XNROOT | XNSTARTED | XNFPU; attr.name = root_name; attr.stacksize = 0; attr.tbase = &nktbase; attr.ops = NULL; param.idle.prio = XNSCHED_IDLE_PRIO; xnthread_init(&sched->rootcb, &attr, sched, &xnsched_class_idle, ¶m); sched->rootcb.affinity = xnarch_cpumask_of_cpu(cpu); xnstat_exectime_set_current(sched, &sched->rootcb.stat.account); #ifdef CONFIG_XENO_HW_FPU sched->fpuholder = &sched->rootcb; #endif /* CONFIG_XENO_HW_FPU */ xnarch_init_root_tcb(xnthread_archtcb(&sched->rootcb), &sched->rootcb, xnthread_name(&sched->rootcb)); #ifdef CONFIG_XENO_OPT_WATCHDOG xntimer_init(&sched->wdtimer, &nktbase, xnsched_watchdog_handler); xntimer_set_name(&sched->wdtimer, "[watchdog]"); xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO); xntimer_set_sched(&sched->wdtimer, sched); #endif /* CONFIG_XENO_OPT_WATCHDOG */ xntimerq_init(&sched->timerqueue); }
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout, xntmode_t timeout_mode) { xnthread_t *thread = xnpod_current_thread(), *owner; spl_t s; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus_synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */ insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } redo: owner = synch->owner; if (!owner) { synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } if (thread->cprio > owner->cprio) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto unlock_and_exit; } if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, thread->cprio); insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnsynch_renice_thread(owner, thread->cprio); } else insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) goto redo; timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) /* Otherwise, it's too late. */ goto redo; xnthread_set_info(thread, XNTIMEO); } unlock_and_exit: thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); xnlock_put_irqrestore(&nklock, s); }