static void xnselector_destroy_loop(void *cookie) { struct xnselector *selector; xnholder_t *holder; int resched; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&xnselectors))) { selector = container_of(holder, struct xnselector, destroy_link); while ((holder = getq(&selector->bindings))) { struct xnselect_binding *binding; struct xnselect *fd; binding = link2binding(holder, slink); fd = binding->fd; removeq(&fd->bindings, &binding->link); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } resched = xnsynch_destroy(&selector->synchbase) == XNSYNCH_RESCHED; xnlock_put_irqrestore(&nklock, s); xnfree(selector); if (resched) xnpod_schedule(); xnlock_get_irqsave(&nklock, s); } xnlock_put_irqrestore(&nklock, s); }
/** * Set name attribute. * * This service set to @a name, the value of the @a name attribute in the * attribute object @a attr. * * The @a name attribute is the name under which a thread created with the * attribute object @a attr will appear under /proc/xenomai/sched. * * If @a name is @a NULL, a unique default name will be used. * * This service is a non-portable extension of the POSIX interface. * * @param attr attribute object; * * @param name value of the @a name attribute. * * @return 0 on success; * @return an error number if: * - EINVAL, @a attr is invalid; * - ENOMEM, insufficient memory exists in the system heap to duplicate the name * string, increase CONFIG_XENO_OPT_SYS_HEAPSZ. * * @par Valid contexts: * - kernel module initialization or cleanup routine; * - Xenomai kernel-space thread. */ int pthread_attr_setname_np(pthread_attr_t * attr, const char *name) { char *old_name, *new_name; spl_t s; if (name) { new_name = xnmalloc(strlen(name) + 1); if (!new_name) return ENOMEM; strcpy(new_name, name); } else new_name = NULL; xnlock_get_irqsave(&nklock, s); if (!pse51_obj_active(attr, PSE51_THREAD_ATTR_MAGIC, pthread_attr_t)) { xnlock_put_irqrestore(&nklock, s); if (name) xnfree(new_name); return EINVAL; } old_name = attr->name; attr->name = new_name; xnlock_put_irqrestore(&nklock, s); if (old_name) xnfree(old_name); return 0; }
ER del_mbx(ID mbxid) { uimbx_t *mbx; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { xnlock_put_irqrestore(&nklock, s); return E_NOEXS; } xnmap_remove(ui_mbx_idmap, mbx->id); ui_mark_deleted(mbx); #ifdef CONFIG_XENO_OPT_REGISTRY xnregistry_remove(mbx->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ xnfree(mbx->ring); xnfree(mbx); if (xnsynch_destroy(&mbx->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); return E_OK; }
static void buffer_finalize(struct syncobj *sobj) { struct alchemy_buffer *bcb; bcb = container_of(sobj, struct alchemy_buffer, sobj); xnfree(bcb->buf); xnfree(bcb); }
ER cre_mbx(ID mbxid, T_CMBX *pk_cmbx) { uimbx_t *mbx; T_MSG **ring; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID) return E_ID; if (pk_cmbx->bufcnt <= 0) return E_PAR; if (pk_cmbx->mbxatr & TA_MPRI) return E_RSATR; mbx = xnmalloc(sizeof(*mbx)); if (!mbx) return E_NOMEM; ring = xnmalloc(sizeof(T_MSG *) * pk_cmbx->bufcnt); if (!ring) { xnfree(mbx); return E_NOMEM; } mbxid = xnmap_enter(ui_mbx_idmap, mbxid, mbx); if (mbxid <= 0) { xnfree(mbx); return E_OBJ; } xnsynch_init(&mbx->synchbase, (pk_cmbx->mbxatr & TA_TPRI) ? XNSYNCH_PRIO : XNSYNCH_FIFO); mbx->id = mbxid; mbx->exinf = pk_cmbx->exinf; mbx->mbxatr = pk_cmbx->mbxatr; mbx->bufcnt = pk_cmbx->bufcnt; mbx->rdptr = 0; mbx->wrptr = 0; mbx->mcount = 0; mbx->ring = ring; #ifdef CONFIG_XENO_OPT_REGISTRY sprintf(mbx->name, "mbx%d", mbxid); xnregistry_enter(mbx->name, mbx, &mbx->handle, &__mbx_pnode); #endif /* CONFIG_XENO_OPT_REGISTRY */ xnarch_memory_barrier(); mbx->magic = uITRON_MBX_MAGIC; return E_OK; }
static void sem_destroy_inner(pse51_sem_t * sem, pse51_kqueues_t *q) { spl_t s; xnlock_get_irqsave(&nklock, s); removeq(&q->semq, &sem->link); if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); if (sem->is_named) xnfree(sem2named_sem(sem)); else xnfree(sem); }
/* Must be called nklock locked, irq off. */ static void pse51_shm_destroy(pse51_shm_t * shm, int force) { spl_t ignored; removeq(&pse51_shmq, &shm->link); xnlock_clear_irqon(&nklock); down(&shm->maplock); if (shm->addr) { xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (force) { xnholder_t *holder; while ((holder = getq(&shm->mappings))) { up(&shm->maplock); xnfree(link2map(holder)); down(&shm->maplock); } } up(&shm->maplock); xnlock_get_irqsave(&nklock, ignored); }
int rt_cond_delete(RT_COND *cond) { struct alchemy_cond *ccb; struct service svc; int ret = 0; if (threadobj_async_p()) return -EPERM; COPPERPLATE_PROTECT(svc); ccb = get_alchemy_cond(cond, &ret); if (ccb == NULL) goto out; ret = -__RT(pthread_cond_destroy(&ccb->cond)); if (ret) { if (ret == -EBUSY) put_alchemy_cond(ccb); goto out; } ccb->magic = ~cond_magic; put_alchemy_cond(ccb); cluster_delobj(&alchemy_cond_table, &ccb->cobj); __RT(pthread_mutex_destroy(&ccb->safe)); xnfree(ccb); out: COPPERPLATE_UNPROTECT(svc); return ret; }
ER cre_flg(ID flgid, T_CFLG *pk_cflg) { uiflag_t *flag; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_MBXID) return E_ID; flag = xnmalloc(sizeof(*flag)); if (!flag) return E_NOMEM; flgid = xnmap_enter(ui_flag_idmap, flgid, flag); if (flgid <= 0) { xnfree(flag); return E_OBJ; } xnsynch_init(&flag->synchbase, XNSYNCH_FIFO, NULL); flag->id = flgid; flag->exinf = pk_cflg->exinf; flag->flgatr = pk_cflg->flgatr; flag->flgvalue = pk_cflg->iflgptn; sprintf(flag->name, "flg%d", flgid); xnregistry_enter(flag->name, flag, &flag->handle, &__flag_pnode.node); xnarch_memory_barrier(); flag->magic = uITRON_FLAG_MAGIC; return E_OK; }
static SEM_ID alloc_xsem(int options, int initval, int maxval) { int sobj_flags = 0, ret; struct wind_sem *sem; if (options & ~SEM_Q_PRIORITY) { errno = S_semLib_INVALID_OPTION; return (SEM_ID)0; } sem = alloc_sem(options, &xsem_ops); if (sem == NULL) { errno = S_memLib_NOT_ENOUGH_MEMORY; return (SEM_ID)0; } if (options & SEM_Q_PRIORITY) sobj_flags = SYNCOBJ_PRIO; sem->u.xsem.value = initval; sem->u.xsem.maxvalue = maxval; ret = syncobj_init(&sem->u.xsem.sobj, CLOCK_COPPERPLATE, sobj_flags, fnref_put(libvxworks, sem_finalize)); if (ret) { xnfree(sem); errno = S_memLib_NOT_ENOUGH_MEMORY; return (SEM_ID)0; } return mainheap_ref(sem, SEM_ID); }
ER del_flg(ID flgid) { uiflag_t *flag; spl_t s; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { xnlock_put_irqrestore(&nklock, s); return E_NOEXS; } xnmap_remove(ui_flag_idmap, flag->id); ui_mark_deleted(flag); xnregistry_remove(flag->handle); xnfree(flag); if (xnsynch_destroy(&flag->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); return E_OK; }
/** * Destroy the @a xnselect structure associated with a file descriptor. * * Any binding with a @a xnselector block is destroyed. * * @param select_block pointer to the @a xnselect structure associated with a file descriptor */ void xnselect_destroy(struct xnselect *select_block) { xnholder_t *holder; int resched = 0; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&select_block->bindings))) { struct xnselect_binding *binding; struct xnselector *selector; binding = link2binding(holder, link); selector = binding->selector; __FD_CLR__(binding->bit_index, &selector->fds[binding->type].expected); if (!__FD_ISSET__(binding->bit_index, &selector->fds[binding->type].pending)) { __FD_SET__(binding->bit_index, &selector->fds[binding->type].pending); if (xnselect_wakeup(selector)) resched = 1; } removeq(&selector->bindings, &binding->slink); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } if (resched) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); }
static void __heap_post_release(struct xnheap *h) { RT_HEAP *heap = container_of(h, RT_HEAP, heap_base); spl_t s; xnlock_get_irqsave(&nklock, s); removeq(heap->rqueue, &heap->rlink); if (heap->handle) xnregistry_remove(heap->handle); if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); #ifndef __XENO_SIM__ if (heap->cpid) xnfree(heap); #endif }
int mx_destroy_internal(vrtxmx_t *mx) { int s = xnsynch_destroy(&mx->synchbase); xnmap_remove(vrtx_mx_idmap, mx->mid); removeq(&vrtx_mx_q, &mx->link); xnregistry_remove(mx->handle); xnfree(mx); return s; }
static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs) { xncompletion_t __user *u_completion; struct vrtx_arg_bulk bulk; int prio, mode, tid, err; vrtxtask_t *task; if (!__xn_access_ok (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk))) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid))) return -EFAULT; __xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs), sizeof(bulk)); /* Suggested task id. */ tid = bulk.a1; /* Task priority. */ prio = bulk.a2; /* Task mode. */ mode = bulk.a3 | 0x100; /* Completion descriptor our parent thread is pending on. */ u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs); task = xnmalloc(sizeof(*task)); if (!task) { err = ER_TCB; goto done; } xnthread_clear_state(&task->threadbase, XNZOMBIE); tid = sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err); if (tid < 0) { if (u_completion) xnshadow_signal_completion(u_completion, err); } else { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &tid, sizeof(tid)); err = xnshadow_map(&task->threadbase, u_completion); } if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE)) xnfree(task); done: return err; }
static void ufd_cleanup(pse51_assoc_t *assoc) { pse51_ufd_t *ufd = assoc2ufd(assoc); #if XENO_DEBUG(POSIX) xnprintf("Posix: closing shared memory descriptor %lu.\n", pse51_assoc_key(assoc)); #endif /* XENO_DEBUG(POSIX) */ pse51_shm_close(ufd->kfd); xnfree(ufd); }
static void umap_cleanup(pse51_assoc_t *assoc) { pse51_umap_t *umap = assoc2umap(assoc); #if XENO_DEBUG(POSIX) xnprintf("Posix: unmapping shared memory 0x%08lx.\n", pse51_assoc_key(assoc)); #endif /* XENO_DEBUG(POSIX) */ munmap(umap->kaddr, umap->len); xnfree(umap); }
static void usem_cleanup(pse51_assoc_t *assoc) { struct pse51_sem *sem = (struct pse51_sem *) pse51_assoc_key(assoc); pse51_usem_t *usem = assoc2usem(assoc); nsem_t *nsem = sem2named_sem(sem); #if XENO_DEBUG(POSIX) xnprintf("Posix: closing semaphore \"%s\".\n", nsem->nodebase.name); #endif /* XENO_DEBUG(POSIX) */ sem_close(&nsem->descriptor.native_sem); xnfree(usem); }
static void task_finalizer(struct threadobj *thobj) { struct wind_task *task = container_of(thobj, struct wind_task, thobj); task->tcb->status |= WIND_DEAD; cluster_delobj(&wind_task_table, &task->cobj); registry_destroy_file(&task->fsobj); __RT(pthread_mutex_destroy(&task->safelock)); threadobj_destroy(&task->thobj); xnfree(task); }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); xnregistry_remove(sem->handle); vrtx_mark_deleted(sem); xnfree(sem); return s; }
int sc_screate(unsigned initval, int opt, int *errp) { int bflags = 0, semid; vrtxsem_t *sem; spl_t s; if (opt & ~1) { *errp = ER_IIP; return -1; } sem = (vrtxsem_t *)xnmalloc(sizeof(*sem)); if (!sem) { *errp = ER_NOCB; return -1; } semid = xnmap_enter(vrtx_sem_idmap, -1, sem); if (semid < 0) { *errp = ER_NOCB; xnfree(sem); return -1; } if (opt == 0) bflags = XNSYNCH_PRIO; else bflags = XNSYNCH_FIFO; xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD); inith(&sem->link); sem->semid = semid; sem->magic = VRTX_SEM_MAGIC; sem->count = initval; xnlock_get_irqsave(&nklock, s); appendq(&vrtx_sem_q, &sem->link); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY sprintf(sem->name, "sem%d", semid); xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode); #endif /* CONFIG_XENO_OPT_REGISTRY */ *errp = RET_OK; return semid; }
static int __wind_msgq_receive(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, err; unsigned nbytes; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); if (!__xn_access_ok(curr, VERIFY_WRITE, __xn_reg_arg2(regs), nbytes)) return -EFAULT; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg5(regs), sizeof(nbytes))) return -EFAULT; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ err = msgQReceive((MSG_Q_ID)msgq, msgbuf, nbytes, timeout); if (err != ERROR) { __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), msgbuf, err); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg5(regs), &err, sizeof(err)); err = 0; } else err = wind_errnoget(); if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
static int __wind_msgq_send(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); char tmp_buf[128], *msgbuf; wind_msgq_t *msgq; int timeout, prio; unsigned nbytes; STATUS err; nbytes = __xn_reg_arg3(regs); timeout = __xn_reg_arg4(regs); prio = __xn_reg_arg5(regs); if (timeout != NO_WAIT && !xnpod_primary_p()) return -EPERM; msgq = (wind_msgq_t *)xnregistry_fetch(handle); if (!msgq) return S_objLib_OBJ_ID_ERROR; if (nbytes > msgq->msg_length) return S_msgQLib_INVALID_MSG_LENGTH; if (nbytes <= sizeof(tmp_buf)) msgbuf = tmp_buf; else { msgbuf = (char *)xnmalloc(nbytes); if (!msgbuf) return S_memLib_NOT_ENOUGH_MEMORY; } /* This is sub-optimal since we end up copying the data twice. */ if (__xn_safe_copy_from_user(msgbuf, (void __user *)__xn_reg_arg2(regs), nbytes)) err = -EFAULT; else { if (msgQSend((MSG_Q_ID)msgq, msgbuf, nbytes, timeout, prio) == ERROR) err = wind_errnoget(); else err = 0; } if (msgbuf != tmp_buf) xnfree(msgbuf); return err; }
int sc_mcreate(unsigned int opt, int *errp) { int bflags, mid; vrtxmx_t *mx; spl_t s; switch (opt) { case 0: bflags = XNSYNCH_PRIO; break; case 1: bflags = XNSYNCH_FIFO; break; case 2: bflags = XNSYNCH_PRIO | XNSYNCH_PIP; break; default: *errp = ER_IIP; return 0; } mx = xnmalloc(sizeof(*mx)); if (mx == NULL) { *errp = ER_NOCB; return -1; } mid = xnmap_enter(vrtx_mx_idmap, -1, mx); if (mid < 0) { xnfree(mx); return -1; } inith(&mx->link); mx->mid = mid; xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER, NULL); xnlock_get_irqsave(&nklock, s); appendq(&vrtx_mx_q, &mx->link); xnlock_put_irqrestore(&nklock, s); sprintf(mx->name, "mx%d", mid); xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node); *errp = RET_OK; return mid; }
/** * Initialize an unnamed semaphore. * * This service initializes the semaphore @a sm, with the value @a value. * * This service fails if @a sm is already initialized or is a named semaphore. * * @param sm the semaphore to be initialized; * * @param pshared if zero, means that the new semaphore may only be used by * threads in the same process as the thread calling sem_init(); if non zero, * means that the new semaphore may be used by any thread that has access to the * memory where the semaphore is allocated. * * @param value the semaphore initial value. * * @retval 0 on success, * @retval -1 with @a errno set if: * - EBUSY, the semaphore @a sm was already initialized; * - ENOSPC, insufficient memory exists in the system heap to initialize the * semaphore, increase CONFIG_XENO_OPT_SYS_HEAPSZ; * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX. * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_init.html"> * Specification.</a> * */ int sem_init(sem_t * sm, int pshared, unsigned value) { struct __shadow_sem *shadow = &((union __xeno_sem *)sm)->shadow_sem; pse51_sem_t *sem; xnqueue_t *semq; int err; spl_t s; sem = (pse51_sem_t *) xnmalloc(sizeof(pse51_sem_t)); if (!sem) { err = ENOSPC; goto error; } xnlock_get_irqsave(&nklock, s); semq = &pse51_kqueues(pshared)->semq; if (shadow->magic == PSE51_SEM_MAGIC || shadow->magic == PSE51_NAMED_SEM_MAGIC || shadow->magic == ~PSE51_NAMED_SEM_MAGIC) { xnholder_t *holder; for (holder = getheadq(semq); holder; holder = nextq(semq, holder)) if (holder == &shadow->sem->link) { err = EBUSY; goto err_lock_put; } } err = pse51_sem_init_inner(sem, pshared, value); if (err) goto err_lock_put; shadow->magic = PSE51_SEM_MAGIC; shadow->sem = sem; xnlock_put_irqrestore(&nklock, s); return 0; err_lock_put: xnlock_put_irqrestore(&nklock, s); xnfree(sem); error: thread_set_errno(err); return -1; }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); #ifdef CONFIG_XENO_OPT_REGISTRY xnregistry_remove(sem->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ vrtx_mark_deleted(sem); xnfree(sem); return s; }
static void pse51_shm_put(pse51_shm_t * shm, unsigned dec) { spl_t s; xnlock_get_irqsave(&nklock, s); while (dec--) pse51_node_put(&shm->nodebase); if (pse51_node_removed_p(&shm->nodebase)) { xnlock_put_irqrestore(&nklock, s); pse51_shm_destroy(shm, 0); xnfree(shm); } else xnlock_put_irqrestore(&nklock, s); }
static void flush_backtrace(struct backtrace_data *btd) { struct error_frame *ef, *nef; /* Locking order must be __printlock, then btlock. */ write_lock(&btd->lock); for (ef = btd->inner; ef; ef = nef) { nef = ef->next; xnfree(ef); } btd->inner = NULL; write_unlock(&btd->lock); }
int rt_cond_create(RT_COND *cond, const char *name) { pthread_mutexattr_t mattr; struct alchemy_cond *ccb; pthread_condattr_t cattr; struct service svc; if (threadobj_async_p()) return -EPERM; COPPERPLATE_PROTECT(svc); ccb = xnmalloc(sizeof(*ccb)); if (ccb == NULL) { COPPERPLATE_UNPROTECT(svc); return -ENOMEM; } strncpy(ccb->name, name, sizeof(ccb->name)); ccb->name[sizeof(ccb->name) - 1] = '\0'; ccb->nwaiters = 0; if (cluster_addobj(&alchemy_cond_table, ccb->name, &ccb->cobj)) { xnfree(ccb); COPPERPLATE_UNPROTECT(svc); return -EEXIST; } __RT(pthread_mutexattr_init(&mattr)); __RT(pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT)); __RT(pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute)); __RT(pthread_mutex_init(&ccb->safe, &mattr)); __RT(pthread_mutexattr_destroy(&mattr)); __RT(pthread_condattr_init(&cattr)); __RT(pthread_condattr_setpshared(&cattr, mutex_scope_attribute)); __RT(pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE)); __RT(pthread_cond_init(&ccb->cond, &cattr)); __RT(pthread_condattr_destroy(&cattr)); ccb->magic = cond_magic; cond->handle = mainheap_ref(ccb, uintptr_t); COPPERPLATE_UNPROTECT(svc); return 0; }
int rt_alarm_create(RT_ALARM *alarm, const char *name) { struct trank_alarm_wait *aw; pthread_mutexattr_t mattr; pthread_condattr_t cattr; int ret; aw = xnmalloc(sizeof(*aw)); if (aw == NULL) return -ENOMEM; aw->alarm_pulses = 0; pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, mutex_type_attribute); pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT); pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE); ret = __bt(-__RT(pthread_mutex_init(&aw->lock, &mattr))); pthread_mutexattr_destroy(&mattr); if (ret) goto fail_lock; pthread_condattr_init(&cattr); pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE); ret = __bt(-pthread_cond_init(&aw->event, &cattr)); pthread_condattr_destroy(&cattr); if (ret) goto fail_cond; ret = __CURRENT(rt_alarm_create(alarm, name, trank_alarm_handler, aw)); if (ret) goto fail_alarm; return 0; fail_alarm: __RT(pthread_cond_destroy(&aw->event)); fail_cond: __RT(pthread_mutex_destroy(&aw->lock)); fail_lock: xnfree(aw); return ret; }