ER del_mbx(ID mbxid) { uimbx_t *mbx; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { xnlock_put_irqrestore(&nklock, s); return E_NOEXS; } xnmap_remove(ui_mbx_idmap, mbx->id); ui_mark_deleted(mbx); #ifdef CONFIG_XENO_OPT_REGISTRY xnregistry_remove(mbx->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ xnfree(mbx->ring); xnfree(mbx); if (xnsynch_destroy(&mbx->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); return E_OK; }
ER del_flg(ID flgid) { uiflag_t *flag; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { xnlock_put_irqrestore(&nklock, s); return E_NOEXS; } xnmap_remove(ui_flag_idmap, flag->id); ui_mark_deleted(flag); xnregistry_remove(flag->handle); xnfree(flag); if (xnsynch_destroy(&flag->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); return E_OK; }
static void xnselector_destroy_loop(void *cookie) { struct xnselector *selector; xnholder_t *holder; int resched; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&xnselectors))) { selector = container_of(holder, struct xnselector, destroy_link); while ((holder = getq(&selector->bindings))) { struct xnselect_binding *binding; struct xnselect *fd; binding = link2binding(holder, slink); fd = binding->fd; removeq(&fd->bindings, &binding->link); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } resched = xnsynch_destroy(&selector->synchbase) == XNSYNCH_RESCHED; xnlock_put_irqrestore(&nklock, s); xnfree(selector); if (resched) xnpod_schedule(); xnlock_get_irqsave(&nklock, s); } xnlock_put_irqrestore(&nklock, s); }
static void __heap_post_release(struct xnheap *h) { RT_HEAP *heap = container_of(h, RT_HEAP, heap_base); spl_t s; xnlock_get_irqsave(&nklock, s); removeq(heap->rqueue, &heap->rlink); if (heap->handle) xnregistry_remove(heap->handle); if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); #ifndef __XENO_SIM__ if (heap->cpid) xnfree(heap); #endif }
int mx_destroy_internal(vrtxmx_t *mx) { int s = xnsynch_destroy(&mx->synchbase); xnmap_remove(vrtx_mx_idmap, mx->mid); removeq(&vrtx_mx_q, &mx->link); xnregistry_remove(mx->handle); xnfree(mx); return s; }
int rt_buffer_delete(RT_BUFFER *bf) { int ret = 0, resched; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER); if (bf == NULL) { ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER); goto unlock_and_exit; } xnarch_free_host_mem(bf->bufmem, bf->bufsz); removeq(bf->rqueue, &bf->rlink); resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED; resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED; if (bf->handle) xnregistry_remove(bf->handle); xeno_mark_deleted(bf); if (resched) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); xnregistry_remove(sem->handle); vrtx_mark_deleted(sem); xnfree(sem); return s; }
static void sem_destroy_inner(pse51_sem_t * sem, pse51_kqueues_t *q) { spl_t s; xnlock_get_irqsave(&nklock, s); removeq(&q->semq, &sem->link); if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); if (sem->is_named) xnfree(sem2named_sem(sem)); else xnfree(sem); }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); #ifdef CONFIG_XENO_OPT_REGISTRY xnregistry_remove(sem->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ vrtx_mark_deleted(sem); xnfree(sem); return s; }
static void __heap_post_release(struct xnheap *h) /* nklock held, IRQs off */ { RT_HEAP *heap = container_of(h, RT_HEAP, heap_base); removeq(heap->rqueue, &heap->rlink); #ifdef CONFIG_XENO_OPT_REGISTRY if (heap->handle) xnregistry_remove(heap->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); }
int rt_intr_delete(RT_INTR *intr) { int err = 0, rc = XNSYNCH_DONE; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR); if (!intr) { err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR); xnlock_put_irqrestore(&nklock, s); return err; } removeq(intr->rqueue, &intr->rlink); #ifdef CONFIG_XENO_OPT_PERVASIVE rc = xnsynch_destroy(&intr->synch_base); #endif /* CONFIG_XENO_OPT_PERVASIVE */ if (intr->handle) xnregistry_remove(intr->handle); xeno_mark_deleted(intr); xnlock_put_irqrestore(&nklock, s); err = xnintr_destroy(&intr->intr_base); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); return err; }
int rt_event_delete(RT_EVENT *event) { int err = 0, rc; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } removeq(event->rqueue, &event->rlink); rc = xnsynch_destroy(&event->synch_base); #ifdef CONFIG_XENO_OPT_REGISTRY if (event->handle) xnregistry_remove(event->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ xeno_mark_deleted(event); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static void *__wind_shadow_eventcb(int event, void *data) { struct wind_resource_holder *rh; switch (event) { case XNSHADOW_CLIENT_ATTACH: rh = (struct wind_resource_holder *) xnarch_alloc_host_mem(sizeof(*rh)); if (!rh) return ERR_PTR(-ENOMEM); initq(&rh->wdq); /* A single server thread pends on this. */ xnsynch_init(&rh->wdsynch, XNSYNCH_FIFO, NULL); initq(&rh->wdpending); rh->wdcount = 0; initq(&rh->msgQq); initq(&rh->semq); return &rh->ppd; case XNSHADOW_CLIENT_DETACH: rh = ppd2rholder((xnshadow_ppd_t *) data); wind_wd_flush_rq(&rh->wdq); xnsynch_destroy(&rh->wdsynch); /* No need to reschedule: all our threads have been zapped. */ wind_msgq_flush_rq(&rh->msgQq); wind_sem_flush_rq(&rh->semq); xnarch_free_host_mem(rh, sizeof(*rh)); return NULL; } return ERR_PTR(-EINVAL); }
int rt_sem_delete(RT_SEM *sem) { int err = 0, rc; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); sem = xeno_h2obj_validate(sem, XENO_SEM_MAGIC, RT_SEM); if (!sem) { err = xeno_handle_error(sem, XENO_SEM_MAGIC, RT_SEM); goto unlock_and_exit; } removeq(sem->rqueue, &sem->rlink); rc = xnsynch_destroy(&sem->synch_base); if (sem->handle) xnregistry_remove(sem->handle); xeno_mark_deleted(sem); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
int xnpipe_disconnect(int minor) { struct xnpipe_state *state; int need_sched = 0; spl_t s; if (minor < 0 || minor >= XNPIPE_NDEVS) return -ENODEV; state = &xnpipe_states[minor]; xnlock_get_irqsave(&nklock, s); if (!testbits(state->status, XNPIPE_KERN_CONN)) { xnlock_put_irqrestore(&nklock, s); return -EBADF; } __clrbits(state->status, XNPIPE_KERN_CONN); state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s); if (!testbits(state->status, XNPIPE_USER_CONN)) goto cleanup; xnpipe_flushq(state, inq, free_ibuf, s); if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); if (testbits(state->status, XNPIPE_USER_WREAD)) { /* * Wake up the regular Linux task waiting for some * operation from the Xenomai side (read/write or * poll). */ __setbits(state->status, XNPIPE_USER_WREAD_READY); need_sched = 1; } if (state->asyncq) { /* Schedule asynch sig. */ __setbits(state->status, XNPIPE_USER_SIGIO); need_sched = 1; } cleanup: /* * If xnpipe_release() has not fully run, enter lingering * close. This will prevent the extra state from being wiped * out until then. */ if (testbits(state->status, XNPIPE_USER_CONN)) __setbits(state->status, XNPIPE_KERN_LCLOSE); else { xnlock_put_irqrestore(&nklock, s); state->ops.release(state->xstate); xnlock_get_irqsave(&nklock, s); xnpipe_minor_free(minor); } if (need_sched) xnpipe_schedule_request(); xnlock_put_irqrestore(&nklock, s); return 0; }
void taskev_destroy(psosevent_t *evgroup) { if (xnsynch_destroy(&evgroup->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); }