static void xnsynch_clear_boost(struct xnsynch *synch, struct xnthread *owner) { struct xnthread *target; struct xnsynch *hsynch; struct xnpholder *h; int wprio; removepq(&owner->claimq, &synch->link); __clrbits(synch->status, XNSYNCH_CLAIMED); wprio = w_bprio(owner); if (emptypq_p(&owner->claimq)) { xnthread_clear_state(owner, XNBOOST); target = owner; } else { /* Find the highest priority needed to enforce the PIP. */ hsynch = link2synch(getheadpq(&owner->claimq)); h = getheadpq(&hsynch->pendq); XENO_BUGON(NUCLEUS, h == NULL); target = link2thread(h, plink); if (w_cprio(target) > wprio) wprio = w_cprio(target); else target = owner; } if (w_cprio(owner) != wprio && !xnthread_test_state(owner, XNZOMBIE)) xnsynch_renice_thread(owner, target); }
int rt_heap_free(RT_HEAP *heap, void *block) { int err, nwake; spl_t s; if (block == NULL) return -EINVAL; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); goto unlock_and_exit; } if (heap->mode & H_SINGLE) { /* No-op in single-block mode. */ err = 0; goto unlock_and_exit; } err = xnheap_free(&heap->heap_base, block); if (!err && xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder, *nholder; nholder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); nwake = 0; while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); void *block; block = xnheap_alloc(&heap->heap_base, sleeper->wait_args.heap.size); if (block) { nholder = xnsynch_wakeup_this_sleeper(&heap-> synch_base, holder); sleeper->wait_args.heap.block = block; nwake++; } else nholder = nextpq(xnsynch_wait_queue (&heap->synch_base), holder); } if (nwake > 0) xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_BUFFER *bf = xnvfile_priv(it->vfile); struct vfile_data *p = data; struct xnthread *thread; struct xnpqueue *waitq; if (priv->curr == NULL) { /* Attempt to switch queues. */ if (!priv->input) /* Finished output side, we are done. */ return 0; priv->input = 0; waitq = xnsynch_wait_queue(&bf->osynch_base); priv->curr = getheadpq(waitq); if (priv->curr == NULL) return 0; } else waitq = priv->input ? xnsynch_wait_queue(&bf->isynch_base) : xnsynch_wait_queue(&bf->osynch_base); /* Fetch current waiter, advance list cursor. */ thread = link2thread(priv->curr, plink); priv->curr = nextpq(waitq, priv->curr); /* Collect thread name to be output in ->show(). */ strncpy(p->name, xnthread_name(thread), sizeof(p->name)); p->input = priv->input; return 1; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); wind_sem_t *sem = xnvfile_priv(it->vfile); struct xnthread *owner; sem = wind_h2obj_active((SEM_ID)sem, WIND_SEM_MAGIC, wind_sem_t); if (sem == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&sem->synchbase)); priv->type = sem->vtbl->type; if (sem->vtbl == &semm_vtbl) { owner = xnsynch_owner(&sem->synchbase); if (owner) strncpy(priv->owner, xnthread_name(owner), sizeof(priv->owner)); else *priv->owner = 0; priv->count = -1U; } else priv->count = sem->count; return xnsynch_nsleepers(&sem->synchbase); }
ER set_flg(ID flgid, UINT setptn) { xnpholder_t *holder, *nholder; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (setptn == 0) goto unlock_and_exit; flag->flgvalue |= setptn; if (!xnsynch_pended_p(&flag->synchbase)) goto unlock_and_exit; nholder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while ((holder = nholder) != NULL) { uitask_t *sleeper = thread2uitask(link2thread(holder, plink)); UINT wfmode = sleeper->wargs.flag.wfmode; UINT waiptn = sleeper->wargs.flag.waiptn; if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) { nholder = xnsynch_wakeup_this_sleeper(&flag->synchbase, holder); sleeper->wargs.flag.waiptn = flag->flgvalue; if (wfmode & TWF_CLR) flag->flgvalue = 0; } else nholder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void xnsynch_forget_sleeper(struct xnthread *thread) { struct xnsynch *synch = thread->wchan; struct xnthread *owner, *target; struct xnpholder *h; trace_mark(xn_nucleus, synch_forget, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnthread_clear_state(thread, XNPEND); thread->wchan = NULL; removepq(&synch->pendq, &thread->plink); if (testbits(synch->status, XNSYNCH_CLAIMED)) { /* Find the highest priority needed to enforce the PIP. */ owner = synch->owner; if (emptypq_p(&synch->pendq)) /* No more sleepers: clear the boost. */ xnsynch_clear_boost(synch, owner); else { target = link2thread(getheadpq(&synch->pendq), plink); h = getheadpq(&owner->claimq); if (w_cprio(target) != h->prio) { /* * Reorder the claim queue, and lower * the priority to the required * minimum needed to prevent priority * inversion. */ removepq(&owner->claimq, &synch->link); insertpqf(&owner->claimq, &synch->link, w_cprio(target)); h = getheadpq(&owner->claimq); if (h->prio < w_cprio(owner)) xnsynch_renice_thread(owner, target); } } } xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct vrtxmb *mb = xnvfile_priv(it->vfile); priv->curr = getheadpq(xnsynch_wait_queue(&mb->synchbase)); priv->msg = mb->msg; return xnsynch_nsleepers(&mb->synchbase); }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct vrtxsem *sem = xnvfile_priv(it->vfile); priv->curr = getheadpq(xnsynch_wait_queue(&sem->synchbase)); priv->count = sem->count; return xnsynch_nsleepers(&sem->synchbase); }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct uiflag *flag = xnvfile_priv(it->vfile); priv->curr = getheadpq(xnsynch_wait_queue(&flag->synchbase)); priv->value = flag->flgvalue; return xnsynch_nsleepers(&flag->synchbase); }
/* * Detect when a thread is about to relax while holding a * synchronization object currently claimed by another thread, which * bears the TWARNSW bit (thus advertising a concern about potential * spurious relaxes and priority inversion). By relying on the claim * queue, we restrict the checks to PIP-enabled objects, but that * already covers most of the use cases anyway. */ void xnsynch_detect_claimed_relax(struct xnthread *owner) { struct xnpholder *hs, *ht; struct xnthread *sleeper; struct xnsynch *synch; for (hs = getheadpq(&owner->claimq); hs != NULL; hs = nextpq(&owner->claimq, hs)) { synch = link2synch(hs); for (ht = getheadpq(&synch->pendq); ht != NULL; ht = nextpq(&synch->pendq, ht)) { sleeper = link2thread(ht, plink); if (xnthread_test_state(sleeper, XNTRAPSW)) { xnthread_set_info(sleeper, XNSWREP); xnshadow_send_sig(sleeper, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); } } } }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_COND *cond = xnvfile_priv(it->vfile); cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND); if (cond == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&cond->synch_base)); return xnsynch_nsleepers(&cond->synch_base); }
void xnsynch_forget_sleeper(xnthread_t *thread) { xnsynch_t *synch = thread->wchan; trace_mark(xn_nucleus_synch_forget, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); xnthread_clear_state(thread, XNPEND); thread->wchan = NULL; removepq(&synch->pendq, &thread->plink); if (testbits(synch->status, XNSYNCH_CLAIMED)) { /* Find the highest priority needed to enforce the PIP. */ xnthread_t *owner = synch->owner; int rprio; if (emptypq_p(&synch->pendq)) /* No more sleepers: clear the boost. */ xnsynch_clear_boost(synch, owner); else if (getheadpq(&synch->pendq)->prio != getheadpq(&owner->claimq)->prio) { /* Reorder the claim queue, and lower the priority to the required minimum needed to prevent priority inversion. */ removepq(&owner->claimq, &synch->link); insertpqf(&owner->claimq, &synch->link, getheadpq(&synch->pendq)->prio); rprio = getheadpq(&owner->claimq)->prio; if (rprio < owner->cprio) xnsynch_renice_thread(owner, rprio); } } xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq)); }
int rt_event_signal(RT_EVENT *event, unsigned long mask) { xnpholder_t *holder, *nholder; int err = 0, resched = 0; spl_t s; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } /* Post the flags. */ event->value |= mask; /* And wakeup any sleeper having its request fulfilled. */ nholder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while ((holder = nholder) != NULL) { RT_TASK *sleeper = thread2rtask(link2thread(holder, plink)); int mode = sleeper->wait_args.event.mode; unsigned long bits = sleeper->wait_args.event.mask; if (((mode & EV_ANY) && (bits & event->value) != 0) || (!(mode & EV_ANY) && ((bits & event->value) == bits))) { sleeper->wait_args.event.mask = (bits & event->value); nholder = xnsynch_wakeup_this_sleeper(&event->synch_base, holder); resched = 1; } else nholder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } if (resched) xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_EVENT *event = xnvfile_priv(it->vfile); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (event == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&event->synch_base)); priv->value = event->value; return xnsynch_nsleepers(&event->synch_base); }
static int __heap_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_HEAP *heap = (RT_HEAP *)data; char *p = page; int len; spl_t s; p += sprintf(p, "type=%s:size=%lu:used=%lu:numaps=%d\n", (heap->mode & H_SHARED) == H_SHARED ? "shared" : (heap->mode & H_MAPPABLE) ? "mappable" : "kernel", xnheap_usable_mem(&heap->heap_base), xnheap_used_mem(&heap->heap_base), atomic_read(&heap->heap_base.archdep.numaps)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder; /* Pended heap -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); size_t size = task->wait_args.heap.size; p += sprintf(p, "+%s (size=%zd)\n", xnthread_name(sleeper), size); holder = nextpq(xnsynch_wait_queue(&heap->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
/*! * \fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch); * \brief Access the thread leading a synch object wait queue. * * This services returns the descriptor address of to the thread leading a * synchronization object wait queue. * * @param synch The descriptor address of the target synchronization object. * * @return The descriptor address of the unblocked thread. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Interrupt service routine * - Kernel-based task * - User-space task * * Rescheduling: never. */ struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch) { struct xnthread *thread = NULL; struct xnpholder *holder; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadpq(&synch->pendq); if (holder) thread = link2thread(holder, plink); xnlock_put_irqrestore(&nklock, s); return thread; }
static int __event_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_EVENT *event = (RT_EVENT *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%lx\n", event->value); if (xnsynch_nsleepers(&event->synch_base) > 0) { xnpholder_t *holder; /* Pended event -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&event->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); RT_TASK *task = thread2rtask(sleeper); const char *mode = (task->wait_args.event. mode & EV_ANY) ? "any" : "all"; unsigned long mask = task->wait_args.event.mask; p += sprintf(p, "+%s (mask=0x%lx, %s)\n", xnthread_name(sleeper), mask, mode); holder = nextpq(xnsynch_wait_queue(&event->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __intr_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_INTR *intr = (RT_INTR *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); #ifdef CONFIG_XENO_OPT_PERVASIVE { xnpholder_t *holder; p += sprintf(p, "hits=%lu, pending=%u, mode=0x%x\n", __intr_get_hits(intr), intr->pending, intr->mode); /* Pended interrupt -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&intr->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&intr->synch_base), holder); } } #else /* !CONFIG_XENO_OPT_PERVASIVE */ p += sprintf(p, "hits=%lu\n", __intr_get_hits(intr)); #endif /* CONFIG_XENO_OPT_PERVASIVE */ xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); wind_msgq_t *q = xnvfile_priv(it->vfile); q = wind_h2obj_active((MSG_Q_ID)q, WIND_MSGQ_MAGIC, wind_msgq_t); if (q == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&q->synchbase)); priv->flags = xnsynch_test_flags(&q->synchbase, XNSYNCH_PRIO); priv->mlength = q->msg_length; priv->mcount = countq(&q->msgq); return xnsynch_nsleepers(&q->synchbase); }
void xnsynch_release_all_ownerships(xnthread_t *thread) { xnpholder_t *holder, *nholder; for (holder = getheadpq(&thread->claimq); holder != NULL; holder = nholder) { /* Since xnsynch_wakeup_one_sleeper() alters the claim queue, we need to be conservative while scanning it. */ xnsynch_t *synch = link2synch(holder); nholder = nextpq(&thread->claimq, holder); xnsynch_wakeup_one_sleeper(synch); if (synch->cleanup) synch->cleanup(synch); } }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_HEAP *heap = xnvfile_priv(it->vfile); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (heap == NULL) return -EIDRM; priv->curr = getheadpq(xnsynch_wait_queue(&heap->synch_base)); priv->mode = heap->mode; priv->usable_mem = xnheap_usable_mem(&heap->heap_base); priv->used_mem = xnheap_used_mem(&heap->heap_base); priv->nrmaps = heap->heap_base.archdep.numaps; return xnsynch_nsleepers(&heap->synch_base); }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); struct vrtxmx *mx = xnvfile_priv(it->vfile); struct xnthread *owner; priv->curr = getheadpq(xnsynch_wait_queue(&mx->synchbase)); owner = xnsynch_owner(&mx->synchbase); if (owner) strncpy(priv->owner, xnthread_name(owner), sizeof(priv->owner)); else *priv->owner = 0; return xnsynch_nsleepers(&mx->synchbase); }
static int msgq_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { wind_msgq_t *queue = (wind_msgq_t *)data; char *p = page; int len; spl_t s; p += sprintf(p, "porder=%s:mlength=%u:mcount=%d\n", xnsynch_test_flags(&queue->synchbase, XNSYNCH_PRIO) ? "prio" : "fifo", queue->msg_length, countq(&queue->msgq)); xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&queue->synchbase) > 0) { xnpholder_t *holder; /* Pended queue -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&queue->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&queue->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
void xnsynch_release_all_ownerships(struct xnthread *thread) { struct xnpholder *holder, *nholder; struct xnsynch *synch; for (holder = getheadpq(&thread->claimq); holder != NULL; holder = nholder) { /* * Since xnsynch_release() alters the claim queue, we * need to be conservative while scanning it. */ synch = link2synch(holder); nholder = nextpq(&thread->claimq, holder); xnsynch_release_thread(synch, thread); if (synch->cleanup) synch->cleanup(synch); } }
static int __sem_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { RT_SEM *sem = (RT_SEM *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); if (xnsynch_nsleepers(&sem->synch_base) == 0) /* Idle/posted semaphore -- dump count. */ p += sprintf(p, "=%lu\n", sem->count); else { xnpholder_t *holder; /* Pended semaphore -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&sem->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&sem->synch_base), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int vfile_rewind(struct xnvfile_snapshot_iterator *it) { struct vfile_priv *priv = xnvfile_iterator_priv(it); RT_BUFFER *bf = xnvfile_priv(it->vfile); bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER); if (bf == NULL) return -EIDRM; /* Start collecting records from the input wait side. */ priv->curr = getheadpq(xnsynch_wait_queue(&bf->isynch_base)); priv->mode = bf->mode; priv->bufsz = bf->bufsz; priv->fillsz = bf->fillsz; priv->input = 1; return xnsynch_nsleepers(&bf->isynch_base) + xnsynch_nsleepers(&bf->osynch_base); }
static int __mbx_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uimbx_t *mbx = (uimbx_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "%d/%d message(s), attr=%s\n", mbx->mcount, mbx->bufcnt, mbx->mbxatr & TA_TPRI ? "TA_TPRI" : "TA_TFIFO"); if (xnsynch_pended_p(&mbx->synchbase)) { xnpholder_t *holder; /* Pended mbx -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&mbx->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&mbx->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
static int __flag_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { uiflag_t *flag = (uiflag_t *)data; char *p = page; int len; spl_t s; xnlock_get_irqsave(&nklock, s); p += sprintf(p, "=0x%x, attr=%s\n", flag->flgvalue, flag->flgatr & TA_WMUL ? "TA_WMUL" : "TA_WSGL"); if (xnsynch_nsleepers(&flag->synchbase) > 0) { xnpholder_t *holder; /* Pended flag -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&flag->synchbase)); while (holder) { xnthread_t *sleeper = link2thread(holder, plink); p += sprintf(p, "+%s\n", xnthread_name(sleeper)); holder = nextpq(xnsynch_wait_queue(&flag->synchbase), holder); } } xnlock_put_irqrestore(&nklock, s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if (len > count) len = count; if (len < 0) len = 0; return len; }
ER ref_mbx(T_RMBX *pk_rmbx, ID mbxid) { uitask_t *sleeper; ER err = E_OK; uimbx_t *mbx; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { err = E_NOEXS; goto unlock_and_exit; } if (xnsynch_pended_p(&mbx->synchbase)) { sleeper = thread2uitask(link2thread (getheadpq(xnsynch_wait_queue(&mbx->synchbase)), plink)); pk_rmbx->wtsk = sleeper->id; } else pk_rmbx->wtsk = FALSE; pk_rmbx->exinf = mbx->exinf; pk_rmbx->pk_msg = mbx->mcount > 0 ? mbx->ring[mbx->rdptr] : (T_MSG *) NADR; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static void xnsynch_clear_boost(xnsynch_t *synch, xnthread_t *lastowner) { int downprio; removepq(&lastowner->claimq, &synch->link); downprio = lastowner->bprio; __clrbits(synch->status, XNSYNCH_CLAIMED); if (emptypq_p(&lastowner->claimq)) xnthread_clear_state(lastowner, XNBOOST); else { /* Find the highest priority needed to enforce the PIP. */ int rprio = getheadpq(&lastowner->claimq)->prio; if (rprio > downprio) downprio = rprio; } if (lastowner->cprio != downprio) xnsynch_renice_thread(lastowner, downprio); }