static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_BUFFER *bf = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; struct xnpqueue *waitq; if (priv->curr == NULL) { /* Attempt to switch queues. */ if (!priv->input) /* Finished output side, we are done. */ return 0; priv->input = 0; waitq = xnsynch_wait_queue(&bf->osynch_base); priv->curr = getheadpq(waitq); if (priv->curr == NULL) return 0; } else waitq = priv->input ? xnsynch_wait_queue(&bf->isynch_base) : xnsynch_wait_queue(&bf->osynch_base); /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(waitq, priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); p->input = priv->input; return 1; }
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder) { xnthread_t *thread, *lastowner; xnpholder_t *nholder; spl_t s; xnlock_get_irqsave(&nklock, s); lastowner = synch->owner; nholder = poppq(&synch->pendq, holder); thread = link2thread(holder, plink); thread->wchan = NULL; thread->wwake = synch; synch->owner = thread; xnthread_set_info(thread, XNWAKEN); trace_mark(xn_nucleus_synch_wakeup_all, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return nholder; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_EVENT *event = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; RT_TASK *task; priv->value = event->value; /* Refresh as we collect. */ if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&event->synch_base), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); task = thread2rtask(thread); p->mode = task->wait_args.event.mode; p->mask = task->wait_args.event.mask; return 1; }
static void xnsynch_clear_boost(struct xnsynch *synch, struct xnthread *owner) { struct xnthread *target; struct xnsynch *hsynch; struct xnpholder *h; int wprio; removepq(&owner->claimq, &synch->link); __clrbits(synch->status, XNSYNCH_CLAIMED); wprio = w_bprio(owner); if (emptypq_p(&owner->claimq)) { xnthread_clear_state(owner, XNBOOST); target = owner; } else { /* Find the highest priority needed to enforce the PIP. */ hsynch = link2synch(getheadpq(&owner->claimq)); h = getheadpq(&hsynch->pendq); XENO_BUGON(NUCLEUS, h == NULL); target = link2thread(h, plink); if (w_cprio(target) > wprio) wprio = w_cprio(target); else target = owner; } if (w_cprio(owner) != wprio && !xnthread_test_state(owner, XNZOMBIE)) xnsynch_renice_thread(owner, target); }
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch) { struct xnthread *thread = NULL; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); holder = getpq(&synch->pendq); if (holder) { thread = link2thread(holder, plink); thread->wchan = NULL; trace_mark(xn_nucleus, synch_wakeup_one, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnpod_resume_thread(thread, XNPEND); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return thread; }
static struct xnthread * xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner) { const int use_fastlock = xnsynch_fastlock_p(synch); xnhandle_t lastownerh, newownerh; struct xnthread *newowner; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); #ifdef CONFIG_XENO_OPT_PERVASIVE if (xnthread_test_state(lastowner, XNOTHER)) { if (xnthread_get_rescnt(lastowner) == 0) xnshadow_send_sig(lastowner, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); else xnthread_dec_rescnt(lastowner); } #endif lastownerh = xnthread_handle(lastowner); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason) { struct xnpholder *holder; int status; spl_t s; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu", synch, reason); status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED; while ((holder = getpq(&synch->pendq)) != NULL) { struct xnthread *sleeper = link2thread(holder, plink); xnthread_set_info(sleeper, reason); sleeper->wchan = NULL; xnpod_resume_thread(sleeper, XNPEND); } if (testbits(synch->status, XNSYNCH_CLAIMED)) { xnsynch_clear_boost(synch, synch->owner); status = XNSYNCH_RESCHED; } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return status; }
int rt_heap_free(RT_HEAP *heap, void *block) { int err, nwake; spl_t s; if (block == NULL) return -EINVAL; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); goto unlock_and_exit; } if (heap->mode & H_SINGLE) { /* No-op in single-block mode. */ err = 0; goto unlock_and_exit; } err = xnheap_free(&heap->heap_base, block); if (!err && xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder, *nholder; nholder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); nwake = 0; while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); void *block; block = xnheap_alloc(&heap->heap_base, sleeper->wait_args.heap.size); if (block) { nholder = xnsynch_wakeup_this_sleeper(&heap-> synch_base, holder); sleeper->wait_args.heap.block = block; nwake++; } else nholder = nextpq(xnsynch_wait_queue (&heap->synch_base), holder); } if (nwake > 0) xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct uiflag *flag = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; struct uitask *task; priv->value = flag->flgvalue; /* Refresh as we collect. */ if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&flag->synchbase), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); task = thread2uitask(thread); p->wfmode = task->wargs.flag.wfmode; p->waiptn = task->wargs.flag.waiptn; return 1; }
ER set_flg(ID flgid, UINT setptn) { xnpholder_t *holder, *nholder; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (setptn == 0) goto unlock_and_exit; flag->flgvalue |= setptn; if (!xnsynch_pended_p(&flag->synchbase)) goto unlock_and_exit; nholder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while ((holder = nholder) != NULL) { uitask_t *sleeper = thread2uitask(link2thread(holder, plink)); UINT wfmode = sleeper->wargs.flag.wfmode; UINT waiptn = sleeper->wargs.flag.waiptn; if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) { nholder = xnsynch_wakeup_this_sleeper(&flag->synchbase, holder); sleeper->wargs.flag.waiptn = flag->flgvalue; if (wfmode & TWF_CLR) flag->flgvalue = 0; } else nholder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
struct xnthread *xnsynch_release(struct xnsynch *synch) { const int use_fastlock = xnsynch_fastlock_p(synch); struct xnthread *newowner, *lastowner; xnhandle_t lastownerh, newownerh; struct xnpholder *holder; spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); lastownerh = xnthread_handle(xnpod_current_thread()); if (use_fastlock && likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh))) return NULL; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_release, "synch %p", synch); holder = getpq(&synch->pendq); if (holder) { newowner = link2thread(holder, plink); newowner->wchan = NULL; newowner->wwake = synch; lastowner = synch->owner; synch->owner = newowner; xnthread_set_info(newowner, XNWAKEN); xnpod_resume_thread(newowner, XNPEND); if (testbits(synch->status, XNSYNCH_CLAIMED)) xnsynch_clear_boost(synch, lastowner); newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner), xnsynch_pended_p(synch)); } else { newowner = NULL; synch->owner = NULL; newownerh = XN_NO_HANDLE; } if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); xnarch_atomic_set(lockp, newownerh); } xnlock_put_irqrestore(&nklock, s); xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); return newowner; }
int rt_event_signal(RT_EVENT *event, unsigned long mask) { xnpholder_t *holder, *nholder; int err = 0, resched = 0; spl_t s; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } /* Post the flags. */ event->value |= mask; /* And wakeup any sleeper having its request fulfilled. */ nholder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); int mode = sleeper->wait_args.event.mode; unsigned long bits = sleeper->wait_args.event.mask; if (((mode & EV_ANY) && (bits & event->value) != 0) || (!(mode & EV_ANY) && ((bits & event->value) == bits))) { sleeper->wait_args.event.mask = (bits & event->value); nholder = xnsynch_wakeup_this_sleeper(&event->synch_base, holder); resched = 1; } else nholder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } if (resched) xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __heap_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_HEAP *heap = (RT_HEAP *)data; char *p = page; int len; spl_t s; p += sprintf(p, "type=%s:size=%lu:used=%lu:numaps=%d\n", (heap->mode & H_SHARED) == H_SHARED ? "shared" : (heap->mode & H_MAPPABLE) ? "mappable" : "kernel", xnheap_usable_mem(&heap->heap_base), xnheap_used_mem(&heap->heap_base), atomic_read(&heap->heap_base.archdep.numaps)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder; /* Pended heap -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); size_t size = task->wait_args.heap.size; p += sprintf(p, "+%s (size=%zd)\n", xnthread_name(sleeper), size); holder = nextpq(xnsynch_wait_queue(&heap->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
/*! * \fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch); * \brief Access the thread leading a synch object wait queue. * * This services returns the descriptor address of to the thread leading a * synchronization object wait queue. * * @param synch The descriptor address of the target synchronization object. * * @return The descriptor address of the unblocked thread. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Interrupt service routine * - Kernel-based task * - User-space task * * Rescheduling: never. */ struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch) { struct xnthread *thread = NULL; struct xnpholder *holder; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadpq(&synch->pendq); if (holder) thread = link2thread(holder, plink); xnlock_put_irqrestore(&nklock, s); return thread; }
static int __intr_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_INTR *intr = (RT_INTR *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); #ifdef CONFIG_XENO_OPT_PERVASIVE { xnpholder_t *holder; p += sprintf(p, "hits=%lu, pending=%u, mode=0x%x\n", __intr_get_hits(intr), intr->pending, intr->mode); /* Pended interrupt -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&intr->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&intr->synch_base), holder); } } #else /* !CONFIG_XENO_OPT_PERVASIVE */ p += sprintf(p, "hits=%lu\n", __intr_get_hits(intr)); #endif /* CONFIG_XENO_OPT_PERVASIVE */ xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __event_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_EVENT *event = (RT_EVENT *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%lx\n", event->value); if (xnsynch_nsleepers(&event->synch_base) > 0) { xnpholder_t *holder; /* Pended event -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); const char *mode = (task->wait_args.event. mode & EV_ANY) ? "any" : "all"; unsigned long mask = task->wait_args.event.mask; p += sprintf(p, "+%s (mask=0x%lx, %s)\n", xnthread_name(sleeper), mask, mode); holder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int msgq_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { wind_msgq_t *queue = (wind_msgq_t *)data; char *p = page; int len; spl_t s; p += sprintf(p, "porder=%s:mlength=%u:mcount=%d\n", xnsynch_test_flags(&queue->synchbase, XNSYNCH_PRIO) ? "prio" : "fifo", queue->msg_length, countq(&queue->msgq)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&queue->synchbase) > 0) { xnpholder_t *holder; /* Pended queue -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&queue->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&queue->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
void xnsynch_forget_sleeper(struct xnthread *thread) { struct xnsynch *synch = thread->wchan; struct xnthread *owner, *target; struct xnpholder *h; trace_mark(xn_nucleus, synch_forget, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnthread_clear_state(thread, XNPEND); thread->wchan = NULL; removepq(&synch->pendq, &thread->plink); if (testbits(synch->status, XNSYNCH_CLAIMED)) { /* Find the highest priority needed to enforce the PIP. */ owner = synch->owner; if (emptypq_p(&synch->pendq)) /* No more sleepers: clear the boost. */ xnsynch_clear_boost(synch, owner); else { target = link2thread(getheadpq(&synch->pendq), plink); h = getheadpq(&owner->claimq); if (w_cprio(target) != h->prio) { /* * Reorder the claim queue, and lower * the priority to the required * minimum needed to prevent priority * inversion. */ removepq(&owner->claimq, &synch->link); insertpqf(&owner->claimq, &synch->link, w_cprio(target)); h = getheadpq(&owner->claimq); if (h->prio < w_cprio(owner)) xnsynch_renice_thread(owner, target); } } } xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); }
static int __sem_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_SEM *sem = (RT_SEM *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&sem->synch_base) == 0) /* Idle/posted semaphore -- dump count. */ p += sprintf(p, "=%lu\n", sem->count); else { xnpholder_t *holder; /* Pended semaphore -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&sem->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&sem->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_COND *cond = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; if (priv->curr == NULL) return 0; /* We are done. */ /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(xnsynch_wait_queue(&cond->synch_base), priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); return 1; }
static int __mbx_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uimbx_t *mbx = (uimbx_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "%d/%d message(s), attr=%s\n", mbx->mcount, mbx->bufcnt, mbx->mbxatr & TA_TPRI ? "TA_TPRI" : "TA_TFIFO"); if (xnsynch_pended_p(&mbx->synchbase)) { xnpholder_t *holder; /* Pended mbx -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&mbx->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&mbx->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
/* * Detect when a thread is about to relax while holding a * synchronization object currently claimed by another thread, which * bears the TWARNSW bit (thus advertising a concern about potential * spurious relaxes and priority inversion). By relying on the claim * queue, we restrict the checks to PIP-enabled objects, but that * already covers most of the use cases anyway. */ void xnsynch_detect_claimed_relax(struct xnthread *owner) { struct xnpholder *hs, *ht; struct xnthread *sleeper; struct xnsynch *synch; for (hs = getheadpq(&owner->claimq); hs != NULL; hs = nextpq(&owner->claimq, hs)) { synch = link2synch(hs); for (ht = getheadpq(&synch->pendq); ht != NULL; ht = nextpq(&synch->pendq, ht)) { sleeper = link2thread(ht, plink); if (xnthread_test_state(sleeper, XNTRAPSW)) { xnthread_set_info(sleeper, XNSWREP); xnshadow_send_sig(sleeper, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); } } } }
static int __flag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uiflag_t *flag = (uiflag_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%x, attr=%s\n", flag->flgvalue, flag->flgatr & TA_WMUL ? "TA_WMUL" : "TA_WSGL"); if (xnsynch_nsleepers(&flag->synchbase) > 0) { xnpholder_t *holder; /* Pended flag -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
ER ref_mbx(T_RMBX *pk_rmbx, ID mbxid) { uitask_t *sleeper; ER err = E_OK; uimbx_t *mbx; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { err = E_NOEXS; goto unlock_and_exit; } if (xnsynch_pended_p(&mbx->synchbase)) { sleeper = thread2uitask(link2thread (getheadpq(xnsynch_wait_queue(&mbx->synchbase)), plink)); pk_rmbx->wtsk = sleeper->id; } else pk_rmbx->wtsk = FALSE; pk_rmbx->exinf = mbx->exinf; pk_rmbx->pk_msg = mbx->mcount > 0 ? mbx->ring[mbx->rdptr] : (T_MSG *) NADR; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
ER ref_flg(T_RFLG *pk_rflg, ID flgid) { uitask_t *sleeper; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (xnsynch_pended_p(&flag->synchbase)) { xnpholder_t *holder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); xnthread_t *thread = link2thread(holder, plink); sleeper = thread2uitask(thread); pk_rflg->wtsk = sleeper->id; } else pk_rflg->wtsk = FALSE; pk_rflg->exinf = flag->exinf; pk_rflg->flgptn = flag->flgvalue; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }