ER cre_flg(ID flgid, T_CFLG *pk_cflg) { uiflag_t *flag; if (xnpod_asynch_p()) return EN_CTXID; if (flgid <= 0 || flgid > uITRON_MAX_MBXID) return E_ID; flag = xnmalloc(sizeof(*flag)); if (!flag) return E_NOMEM; flgid = xnmap_enter(ui_flag_idmap, flgid, flag); if (flgid <= 0) { xnfree(flag); return E_OBJ; } xnsynch_init(&flag->synchbase, XNSYNCH_FIFO, NULL); flag->id = flgid; flag->exinf = pk_cflg->exinf; flag->flgatr = pk_cflg->flgatr; flag->flgvalue = pk_cflg->iflgptn; sprintf(flag->name, "flg%d", flgid); xnregistry_enter(flag->name, flag, &flag->handle, &__flag_pnode.node); xnarch_memory_barrier(); flag->magic = uITRON_FLAG_MAGIC; return E_OK; }
ER cre_mbx(ID mbxid, T_CMBX *pk_cmbx) { uimbx_t *mbx; T_MSG **ring; if (xnpod_asynch_p()) return EN_CTXID; if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID) return E_ID; if (pk_cmbx->bufcnt <= 0) return E_PAR; if (pk_cmbx->mbxatr & TA_MPRI) return E_RSATR; mbx = xnmalloc(sizeof(*mbx)); if (!mbx) return E_NOMEM; ring = xnmalloc(sizeof(T_MSG *) * pk_cmbx->bufcnt); if (!ring) { xnfree(mbx); return E_NOMEM; } mbxid = xnmap_enter(ui_mbx_idmap, mbxid, mbx); if (mbxid <= 0) { xnfree(mbx); return E_OBJ; } xnsynch_init(&mbx->synchbase, (pk_cmbx->mbxatr & TA_TPRI) ? XNSYNCH_PRIO : XNSYNCH_FIFO); mbx->id = mbxid; mbx->exinf = pk_cmbx->exinf; mbx->mbxatr = pk_cmbx->mbxatr; mbx->bufcnt = pk_cmbx->bufcnt; mbx->rdptr = 0; mbx->wrptr = 0; mbx->mcount = 0; mbx->ring = ring; #ifdef CONFIG_XENO_OPT_REGISTRY sprintf(mbx->name, "mbx%d", mbxid); xnregistry_enter(mbx->name, mbx, &mbx->handle, &__mbx_pnode); #endif /* CONFIG_XENO_OPT_REGISTRY */ xnarch_memory_barrier(); mbx->magic = uITRON_MBX_MAGIC; return E_OK; }
int sc_screate(unsigned initval, int opt, int *errp) { int bflags = 0, semid; vrtxsem_t *sem; spl_t s; if (opt & ~1) { *errp = ER_IIP; return -1; } sem = (vrtxsem_t *)xnmalloc(sizeof(*sem)); if (!sem) { *errp = ER_NOCB; return -1; } semid = xnmap_enter(vrtx_sem_idmap, -1, sem); if (semid < 0) { *errp = ER_NOCB; xnfree(sem); return -1; } if (opt == 0) bflags = XNSYNCH_PRIO; else bflags = XNSYNCH_FIFO; xnsynch_init(&sem->synchbase, bflags | XNSYNCH_DREORD); inith(&sem->link); sem->semid = semid; sem->magic = VRTX_SEM_MAGIC; sem->count = initval; xnlock_get_irqsave(&nklock, s); appendq(&vrtx_sem_q, &sem->link); xnlock_put_irqrestore(&nklock, s); #ifdef CONFIG_XENO_OPT_REGISTRY sprintf(sem->name, "sem%d", semid); xnregistry_enter(sem->name, sem, &sem->handle, &__sem_pnode); #endif /* CONFIG_XENO_OPT_REGISTRY */ *errp = RET_OK; return semid; }
int sc_mcreate(unsigned int opt, int *errp) { int bflags, mid; vrtxmx_t *mx; spl_t s; switch (opt) { case 0: bflags = XNSYNCH_PRIO; break; case 1: bflags = XNSYNCH_FIFO; break; case 2: bflags = XNSYNCH_PRIO | XNSYNCH_PIP; break; default: *errp = ER_IIP; return 0; } mx = xnmalloc(sizeof(*mx)); if (mx == NULL) { *errp = ER_NOCB; return -1; } mid = xnmap_enter(vrtx_mx_idmap, -1, mx); if (mid < 0) { xnfree(mx); return -1; } inith(&mx->link); mx->mid = mid; xnsynch_init(&mx->synchbase, bflags | XNSYNCH_DREORD | XNSYNCH_OWNER, NULL); xnlock_get_irqsave(&nklock, s); appendq(&vrtx_mx_q, &mx->link); xnlock_put_irqrestore(&nklock, s); sprintf(mx->name, "mx%d", mid); xnregistry_enter(mx->name, mx, &mx->handle, &__mutex_pnode.node); *errp = RET_OK; return mid; }