u_long ev_receive(u_long events, u_long flags, u_long timeout, u_long *events_r) { u_long err = SUCCESS; psosevent_t *evgroup; psostask_t *task; spl_t s; if (xnpod_unblockable_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); task = psos_current_task(); evgroup = &task->evgroup; if (!events) { *events_r = evgroup->events; goto unlock_and_exit; } if (flags & EV_NOWAIT) { u_long bits = (evgroup->events & events); evgroup->events &= ~events; *events_r = bits; if (flags & EV_ANY) { if (!bits) err = ERR_NOEVS; } else if (bits != events) err = ERR_NOEVS; goto unlock_and_exit; } if (((flags & EV_ANY) && (events & evgroup->events) != 0) || (!(flags & EV_ANY) && ((events & evgroup->events) == events))) { *events_r = (evgroup->events & events); evgroup->events &= ~events; goto unlock_and_exit; } task->waitargs.evgroup.flags = flags; task->waitargs.evgroup.events = events; xnsynch_sleep_on(&evgroup->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(&task->threadbase, XNBREAK)) err = -EINTR; else if (xnthread_test_info(&task->threadbase, XNTIMEO)) { err = ERR_TIMEOUT; *events_r = evgroup->events; } else *events_r = task->waitargs.evgroup.events; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __wind_wd_wait(struct task_struct *curr, struct pt_regs *regs) { xnholder_t *holder; wind_rholder_t *rh; WIND_TCB *pTcb; wind_wd_t *wd; int err = 0; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg1(regs), sizeof(wd->wdt))) return -EFAULT; rh = wind_get_rholder(); xnlock_get_irqsave(&nklock, s); pTcb = __wind_task_current(curr); if (xnthread_base_priority(&pTcb->threadbase) != XNCORE_IRQ_PRIO) /* Renice the waiter above all regular tasks if needed. */ xnpod_renice_thread(&pTcb->threadbase, XNCORE_IRQ_PRIO); if (!emptyq_p(&rh->wdpending)) goto pull_event; xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE); if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) { err = -EINTR; /* Unblocked. */ goto unlock_and_exit; } if (xnthread_test_info(&pTcb->threadbase, XNRMID)) { err = -EIDRM; /* Watchdog deleted while pending. */ goto unlock_and_exit; } pull_event: holder = getq(&rh->wdpending); if (holder) { wd = link2wind_wd(holder); /* We need the following to mark the watchdog as unqueued. */ inith(holder); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg1(regs), &wd->wdt, sizeof(wd->wdt)); return 0; } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __wind_wd_wait(struct pt_regs *regs) { union xnsched_policy_param param; xnholder_t *holder; wind_rholder_t *rh; WIND_TCB *pTcb; wind_wd_t *wd; int err = 0; spl_t s; rh = wind_get_rholder(); xnlock_get_irqsave(&nklock, s); pTcb = __wind_task_current(current); if (xnthread_base_priority(&pTcb->threadbase) != XNSCHED_IRQ_PRIO) { /* Boost the waiter above all regular tasks if needed. */ param.rt.prio = XNSCHED_IRQ_PRIO; xnpod_set_thread_schedparam(&pTcb->threadbase, &xnsched_class_rt, ¶m); } if (!emptyq_p(&rh->wdpending)) goto pull_event; xnsynch_sleep_on(&rh->wdsynch, XN_INFINITE, XN_RELATIVE); if (xnthread_test_info(&pTcb->threadbase, XNBREAK)) { err = -EINTR; /* Unblocked. */ goto unlock_and_exit; } if (xnthread_test_info(&pTcb->threadbase, XNRMID)) { err = -EIDRM; /* Watchdog deleted while pending. */ goto unlock_and_exit; } pull_event: holder = getq(&rh->wdpending); if (holder) { wd = link2wind_wd(holder); /* We need the following to mark the watchdog as unqueued. */ inith(holder); xnlock_put_irqrestore(&nklock, s); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &wd->wdt, sizeof(wd->wdt)); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
static int __ui_twai_flg(struct pt_regs *regs) { UINT flgptn, waiptn, wfmode; TMO tmout; ID flgid; ER err; flgid = __xn_reg_arg2(regs); waiptn = __xn_reg_arg3(regs); wfmode = __xn_reg_arg4(regs); tmout = __xn_reg_arg5(regs); err = twai_flg(&flgptn, flgid, waiptn, wfmode, tmout); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &flgptn, sizeof(flgptn))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(); spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else /* i.e. priority-sorted */ insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
int __rtai_task_suspend(RT_TASK *task) { int err = 0; spl_t s; if (!task) { if (!xnpod_primary_p()) return -EINVAL; task = rtai_current_task(); } xnlock_get_irqsave(&nklock, s); task = rtai_h2obj_validate(task, RTAI_TASK_MAGIC, RT_TASK); if (!task) { err = -EINVAL; goto unlock_and_exit; } if (task->suspend_depth++ == 0) { xnpod_suspend_thread(&task->thread_base, XNSUSP, XN_INFINITE, XN_RELATIVE, NULL); if (xnthread_test_info(&task->thread_base, XNBREAK)) err = -EINTR; } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void sc_mpend(int mid, unsigned long timeout, int *errp) { xnthread_t *cur = xnpod_current_thread(); vrtxtask_t *task; vrtxmx_t *mx; spl_t s; xnlock_get_irqsave(&nklock, s); if (xnpod_unblockable_p()) { *errp = -EPERM; goto unlock_and_exit; } mx = xnmap_fetch(vrtx_mx_idmap, mid); if (mx == NULL) { *errp = ER_ID; goto unlock_and_exit; } *errp = RET_OK; if (xnthread_try_grab(cur, &mx->synchbase)) goto unlock_and_exit; if (xnsynch_owner(&mx->synchbase) == cur) goto unlock_and_exit; task = thread2vrtxtask(cur); task->vrtxtcb.TCBSTAT = TBSMUTEX; if (timeout) task->vrtxtcb.TCBSTAT |= TBSDELAY; xnsynch_acquire(&mx->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(cur, XNBREAK)) *errp = -EINTR; else if (xnthread_test_info(cur, XNRMID)) *errp = ER_DEL; /* Mutex deleted while pending. */ else if (xnthread_test_info(cur, XNTIMEO)) *errp = ER_TMO; /* Timeout. */ unlock_and_exit: xnlock_put_irqrestore(&nklock, s); }
static int __sc_delay(struct task_struct *curr, struct pt_regs *regs) { vrtxtask_t *task = vrtx_current_task(); sc_delay(__xn_reg_arg1(regs)); if (xnthread_test_info(&task->threadbase, XNBREAK)) return -EINTR; return 0; }
void sc_spend(int semid, long timeout, int *errp) { vrtxtask_t *task; vrtxsem_t *sem; spl_t s; xnlock_get_irqsave(&nklock, s); sem = xnmap_fetch(vrtx_sem_idmap, semid); if (sem == NULL) { *errp = ER_ID; goto unlock_and_exit; } *errp = RET_OK; if (sem->count > 0) sem->count--; else { if (xnpod_unblockable_p()) { *errp = -EPERM; goto unlock_and_exit; } task = vrtx_current_task(); task->vrtxtcb.TCBSTAT = TBSSEMA; if (timeout) task->vrtxtcb.TCBSTAT |= TBSDELAY; xnsynch_sleep_on(&sem->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(&task->threadbase, XNBREAK)) *errp = -EINTR; else if (xnthread_test_info(&task->threadbase, XNRMID)) *errp = ER_DEL; /* Semaphore deleted while pending. */ else if (xnthread_test_info(&task->threadbase, XNTIMEO)) *errp = ER_TMO; /* Timeout. */ } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); }
u_long t_suspend(u_long tid) { u_long err = SUCCESS; psostask_t *task; spl_t s; if (tid == 0) { if (xnpod_unblockable_p()) return -EPERM; xnpod_suspend_self(); if (xnthread_test_info(&psos_current_task()->threadbase, XNBREAK)) return -EINTR; return SUCCESS; } xnlock_get_irqsave(&nklock, s); task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t); if (!task) { err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t); goto unlock_and_exit; } if (xnthread_test_state(&task->threadbase, XNSUSP)) { err = ERR_SUSP; /* Task already suspended. */ goto unlock_and_exit; } xnpod_suspend_thread(&task->threadbase, XNSUSP, XN_INFINITE, XN_RELATIVE, NULL); if (xnthread_test_info(&task->threadbase, XNBREAK)) err = -EINTR; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
/** * Sleep some amount of time. * * This service suspends the calling thread until the wakeup time specified by * @a rqtp, or a signal is delivered to the caller. If the flag TIMER_ABSTIME is * set in the @a flags argument, the wakeup time is specified as an absolute * value of the clock @a clock_id. If the flag TIMER_ABSTIME is not set, the * wakeup time is specified as a time interval. * * If this service is interrupted by a signal, the flag TIMER_ABSTIME is not * set, and @a rmtp is not @a NULL, the time remaining until the specified * wakeup time is returned at the address @a rmtp. * * The resolution of this service is one system clock tick. * * @param clock_id clock identifier, either CLOCK_REALTIME, * CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW. * * @param flags one of: * - 0 meaning that the wakeup time @a rqtp is a time interval; * - TIMER_ABSTIME, meaning that the wakeup time is an absolute value of the * clock @a clock_id. * * @param rqtp address of the wakeup time. * * @param rmtp address where the remaining time before wakeup will be stored if * the service is interrupted by a signal. * * @return 0 on success; * @return an error number if: * - EPERM, the caller context is invalid; * - ENOTSUP, the specified clock is unsupported; * - EINVAL, the specified wakeup time is invalid; * - EINTR, this service was interrupted by a signal. * * @par Valid contexts: * - Xenomai kernel-space thread, * - Xenomai user-space thread (switches to primary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_nanosleep.html"> * Specification.</a> * */ int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp) { xnthread_t *cur; spl_t s; int err = 0; if (xnpod_unblockable_p()) return EPERM; if (clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_MONOTONIC_RAW && clock_id != CLOCK_REALTIME) return ENOTSUP; if ((unsigned long)rqtp->tv_nsec >= ONE_BILLION) return EINVAL; if (flags & ~TIMER_ABSTIME) return EINVAL; cur = xnpod_current_thread(); xnlock_get_irqsave(&nklock, s); thread_cancellation_point(cur); xnpod_suspend_thread(cur, XNDELAY, ts2ticks_ceil(rqtp) + 1, clock_flag(flags, clock_id), NULL); thread_cancellation_point(cur); if (xnthread_test_info(cur, XNBREAK)) { if (flags == 0 && rmtp) { xnticks_t now, expiry; xnsticks_t rem; now = clock_get_ticks(clock_id); expiry = xntimer_get_date(&cur->rtimer); xnlock_put_irqrestore(&nklock, s); rem = expiry - now; ticks2ts(rmtp, rem > 0 ? rem : 0); } else xnlock_put_irqrestore(&nklock, s); return EINTR; } xnlock_put_irqrestore(&nklock, s); return err; }
/* * Detect when a thread is about to sleep on a synchronization * object currently owned by someone running in secondary mode. */ void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper) { if (xnthread_test_state(sleeper, XNTRAPSW) && !xnthread_test_info(sleeper, XNSWREP) && xnthread_test_state(synch->owner, XNRELAX)) { xnthread_set_info(sleeper, XNSWREP); xnshadow_send_sig(sleeper, SIGDEBUG, SIGDEBUG_MIGRATE_PRIOINV, 1); } else xnthread_clear_info(sleeper, XNSWREP); }
static int __ui_slp_tsk(struct pt_regs *regs) { ER err = slp_tsk(); if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int __ui_wai_sem(struct pt_regs *regs) { ID semid = __xn_reg_arg1(regs); ER err; err = wai_sem(semid); if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static int __ui_tslp_tsk(struct pt_regs *regs) { TMO tmout = __xn_reg_arg1(regs); ER err; err = tslp_tsk(tmout); if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
static inline int sem_timedwait_internal(struct __shadow_sem *shadow, int timed, xnticks_t to) { pse51_sem_t *sem = shadow->sem; xnthread_t *cur; int err; if (xnpod_unblockable_p()) return EPERM; cur = xnpod_current_thread(); if ((err = sem_trywait_internal(shadow)) != EAGAIN) return err; thread_cancellation_point(cur); if (timed) xnsynch_sleep_on(&sem->synchbase, to, XN_REALTIME); else xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE); /* Handle cancellation requests. */ thread_cancellation_point(cur); if (xnthread_test_info(cur, XNRMID)) return EINVAL; if (xnthread_test_info(cur, XNBREAK)) return EINTR; if (xnthread_test_info(cur, XNTIMEO)) return ETIMEDOUT; return 0; }
int msgQReceive(MSG_Q_ID qid, char *buf, UINT bytes, int to) { xnticks_t timeout; wind_msgq_t *queue; wind_msg_t *msg; xnthread_t *thread; wind_task_t *task; spl_t s; error_check(buf == NULL, 0, return ERROR); check_NOT_ISR_CALLABLE(return ERROR); xnlock_get_irqsave(&nklock, s); check_OBJ_ID_ERROR(qid, wind_msgq_t, queue, WIND_MSGQ_MAGIC, goto error); if ((msg = unqueue_msg(queue)) == NULL) { /* message queue is empty */ error_check(to == NO_WAIT || xnpod_unblockable_p(), S_objLib_OBJ_UNAVAILABLE, goto error); if (to == WAIT_FOREVER) timeout = XN_INFINITE; else timeout = to; task = wind_current_task(); thread = &task->threadbase; task->rcv_buf = buf; task->rcv_bytes = bytes; xnsynch_sleep_on(&queue->synchbase, timeout, XN_RELATIVE); error_check(xnthread_test_info(thread, XNBREAK), -EINTR, goto error); error_check(xnthread_test_info(thread, XNRMID), S_objLib_OBJ_DELETED, goto error); error_check(xnthread_test_info(thread, XNTIMEO), S_objLib_OBJ_TIMEOUT, goto error); bytes = task->rcv_bytes; } else {
static int __ui_rcv_msg(struct pt_regs *regs) { ID mbxid = __xn_reg_arg2(regs); T_MSG *pk_msg; ER err; err = rcv_msg(&pk_msg, mbxid); if (err == E_OK) { if (__xn_safe_copy_to_user((void __user *)__xn_reg_arg1(regs), &pk_msg, sizeof(pk_msg))) return -EFAULT; } else if (err == E_RLWAI) { uitask_t *task = ui_current_task(); if (!xnthread_test_info(&task->threadbase, uITRON_TASK_RLWAIT)) err = -EINTR; } return err; }
/* Must be called with nklock locked, interrupts off. */ static STATUS semm_take(wind_sem_t *sem, xnticks_t to) { xnthread_t *cur = xnpod_current_thread(); if (xnsynch_owner(&sem->synchbase) == NULL) { xnsynch_set_owner(&sem->synchbase, cur); goto grab_sem; } if (xnsynch_owner(&sem->synchbase) == cur) { sem->count++; return OK; } error_check(to == XN_NONBLOCK, S_objLib_OBJ_UNAVAILABLE, return ERROR); xnsynch_acquire(&sem->synchbase, to, XN_RELATIVE); error_check(xnthread_test_info(cur, XNBREAK), -EINTR, return ERROR); error_check(xnthread_test_info(cur, XNRMID), S_objLib_OBJ_DELETED, return ERROR); error_check(xnthread_test_info(cur, XNTIMEO), S_objLib_OBJ_TIMEOUT, return ERROR); grab_sem: /* * xnsynch_sleep_on() might have stolen the resource, so we * need to put our internal data in sync. */ sem->count = 1; if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE)) taskSafeInner(cur); return OK; }
/* Must be called with nklock locked, interrupts off. */ static STATUS semb_take(wind_sem_t *sem, xnticks_t to) { xnthread_t *thread = xnpod_current_thread(); if (sem->count > 0) --sem->count; else { error_check(to == XN_NONBLOCK, S_objLib_OBJ_UNAVAILABLE, return ERROR); xnsynch_sleep_on(&sem->synchbase, to, XN_RELATIVE); error_check(xnthread_test_info(thread, XNBREAK), -EINTR, return ERROR); error_check(xnthread_test_info(thread, XNRMID), S_objLib_OBJ_DELETED, return ERROR); error_check(xnthread_test_info(thread, XNTIMEO), S_objLib_OBJ_TIMEOUT, return ERROR); } return OK; }
static ER wai_flg_helper(UINT *p_flgptn, ID flgid, UINT waiptn, UINT wfmode, TMO tmout) { xnticks_t timeout; uitask_t *task; uiflag_t *flag; ER err = E_OK; spl_t s; if (xnpod_unblockable_p()) return E_CTX; if (flgid <= 0 || flgid > uITRON_MAX_FLAGID) return E_ID; if (waiptn == 0) return E_PAR; if (tmout == TMO_FEVR) timeout = XN_INFINITE; else if (tmout == 0) timeout = XN_NONBLOCK; else if (tmout < TMO_FEVR) return E_PAR; else timeout = (xnticks_t)tmout; xnlock_get_irqsave(&nklock, s); flag = xnmap_fetch(ui_flag_idmap, flgid); if (!flag) { err = E_NOEXS; goto unlock_and_exit; } if (((wfmode & TWF_ORW) && (waiptn & flag->flgvalue) != 0) || (!(wfmode & TWF_ORW) && ((waiptn & flag->flgvalue) == waiptn))) { *p_flgptn = flag->flgvalue; if (wfmode & TWF_CLR) flag->flgvalue = 0; goto unlock_and_exit; } if (timeout == XN_NONBLOCK) { err = E_TMOUT; goto unlock_and_exit; } else if (xnsynch_pended_p(&flag->synchbase) && !(flag->flgatr & TA_WMUL)) { err = E_OBJ; goto unlock_and_exit; } task = ui_current_task(); xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT); task->wargs.flag.wfmode = wfmode; task->wargs.flag.waiptn = waiptn; xnsynch_sleep_on(&flag->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(&task->threadbase, XNRMID)) err = E_DLT; /* Flag deleted while pending. */ else if (xnthread_test_info(&task->threadbase, XNTIMEO)) err = E_TMOUT; /* Timeout. */ else if (xnthread_test_info(&task->threadbase, XNBREAK)) err = E_RLWAI; /* rel_wai() or signal received while waiting. */ else *p_flgptn = task->wargs.flag.waiptn; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout) { struct xnpipe_state *state; struct xnholder *h; xnthread_t *thread; ssize_t ret; spl_t s; if (minor < 0 || minor >= XNPIPE_NDEVS) return -ENODEV; if (xnpod_asynch_p()) return -EPERM; state = &xnpipe_states[minor]; xnlock_get_irqsave(&nklock, s); if (!testbits(state->status, XNPIPE_KERN_CONN)) { ret = -EBADF; goto unlock_and_exit; } thread = xnpod_current_thread(); while ((h = getq(&state->inq)) == NULL) { if (timeout == XN_NONBLOCK) { ret = -EWOULDBLOCK; goto unlock_and_exit; } xnsynch_sleep_on(&state->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(thread, XNTIMEO)) { ret = -ETIMEDOUT; goto unlock_and_exit; } if (xnthread_test_info(thread, XNBREAK)) { ret = -EINTR; goto unlock_and_exit; } if (xnthread_test_info(thread, XNRMID)) { ret = -EIDRM; goto unlock_and_exit; } /* remaining timeout */ timeout = xnthread_timeout(thread); } *pmh = link2mh(h); ret = (ssize_t) xnpipe_m_size(*pmh); if (testbits(state->status, XNPIPE_USER_WSYNC)) { __setbits(state->status, XNPIPE_USER_WSYNC_READY); xnpipe_schedule_request(); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout, xntmode_t timeout_mode) { xnthread_t *thread = xnpod_current_thread(), *owner; spl_t s; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus_synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */ insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } redo: owner = synch->owner; if (!owner) { synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } if (thread->cprio > owner->cprio) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto unlock_and_exit; } if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, thread->cprio); insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnsynch_renice_thread(owner, thread->cprio); } else insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) goto redo; timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) /* Otherwise, it's too late. */ goto redo; xnthread_set_info(thread, XNTIMEO); } unlock_and_exit: thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); xnlock_put_irqrestore(&nklock, s); }
int rt_event_wait(RT_EVENT *event, unsigned long mask, unsigned long *mask_r, int mode, RTIME timeout) { RT_TASK *task; int err = 0; spl_t s; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } if (!mask) { *mask_r = event->value; goto unlock_and_exit; } if (timeout == TM_NONBLOCK) { unsigned long bits = (event->value & mask); *mask_r = bits; if (mode & EV_ANY) { if (!bits) err = -EWOULDBLOCK; } else if (bits != mask) err = -EWOULDBLOCK; goto unlock_and_exit; } if (((mode & EV_ANY) && (mask & event->value) != 0) || (!(mode & EV_ANY) && ((mask & event->value) == mask))) { *mask_r = (event->value & mask); goto unlock_and_exit; } if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } task = xeno_current_task(); task->wait_args.event.mode = mode; task->wait_args.event.mask = mask; xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE); /* The returned mask is only significant if the operation has succeeded, but do always write it back anyway. */ *mask_r = task->wait_args.event.mask; if (xnthread_test_info(&task->thread_base, XNRMID)) err = -EIDRM; /* Event group deleted while pending. */ else if (xnthread_test_info(&task->thread_base, XNTIMEO)) err = -ETIMEDOUT; /* Timeout. */ else if (xnthread_test_info(&task->thread_base, XNBREAK)) err = -EINTR; /* Unblocked. */ unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
/** * Check the state of a number of file descriptors, wait for a state change if * no descriptor is ready. * * @param selector structure to check for pending events * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned; * @param in_fds the set of descriptors which events should be checked * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1; * @param timeout the timeout, whose meaning depends on @a timeout_mode, note * that xnselect() pass @a timeout and @a timeout_mode unchanged to * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep * than expected if the sleep is interrupted. * @param timeout_mode the mode of @a timeout. * * @retval -EINVAL if @a nfds is negative; * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet * been registered with xnselect_bind(), @a out_fds contains the set of such * descriptors; * @retval -EINTR if @a xnselect was interrupted while waiting; * @retval 0 in case of timeout. * @retval the number of file descriptors having received an event. */ int xnselect(struct xnselector *selector, fd_set *out_fds[XNSELECT_MAX_TYPES], fd_set *in_fds[XNSELECT_MAX_TYPES], int nfds, xnticks_t timeout, xntmode_t timeout_mode) { unsigned i, not_empty = 0; xnthread_t *thread; spl_t s; if ((unsigned) nfds > __FD_SETSIZE) return -EINVAL; thread = xnpod_current_thread(); for (i = 0; i < XNSELECT_MAX_TYPES; i++) if (out_fds[i]) fd_set_zeropad(out_fds[i], nfds); xnlock_get_irqsave(&nklock, s); for (i = 0; i < XNSELECT_MAX_TYPES; i++) if (out_fds[i] && fd_set_andnot(out_fds[i], in_fds[i], &selector->fds[i].expected, nfds)) not_empty = 1; xnlock_put_irqrestore(&nklock, s); if (not_empty) return -ECHRNG; xnlock_get_irqsave(&nklock, s); for (i = 0; i < XNSELECT_MAX_TYPES; i++) if (out_fds[i] && fd_set_and(out_fds[i], in_fds[i], &selector->fds[i].pending, nfds)) not_empty = 1; while (!not_empty) { xnsynch_sleep_on(&selector->synchbase, timeout, timeout_mode); for (i = 0; i < XNSELECT_MAX_TYPES; i++) if (out_fds[i] && fd_set_and(out_fds[i], in_fds[i], &selector->fds[i].pending, nfds)) not_empty = 1; if (xnthread_test_info(thread, XNBREAK | XNTIMEO)) break; } xnlock_put_irqrestore(&nklock, s); if (not_empty) { unsigned count; for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++) if (out_fds[i]) count += fd_set_popcount(out_fds[i], nfds); return count; } if (xnthread_test_info(thread, XNBREAK)) return -EINTR; return 0; /* Timeout */ }
int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp) { void *block = NULL; RT_TASK *task; int err = 0; spl_t s; xnlock_get_irqsave(&nklock, s); heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP); if (!heap) { err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP); goto unlock_and_exit; } /* In single-block mode, there is only a single allocation returning the whole addressable heap space to the user. All users referring to this heap are then returned the same block. */ if (heap->mode & H_SINGLE) { block = heap->sba; if (!block) { /* It's ok to pass zero for size here, since the requested size is implicitely the whole heap space; but if non-zero is given, it must match the original heap size. */ if (size > 0 && size != heap->csize) { err = -EINVAL; goto unlock_and_exit; } block = heap->sba = xnheap_alloc(&heap->heap_base, xnheap_max_contiguous (&heap->heap_base)); } if (block) goto unlock_and_exit; err = -ENOMEM; /* This should never happen. Paranoid. */ goto unlock_and_exit; } block = xnheap_alloc(&heap->heap_base, size); if (block) goto unlock_and_exit; if (timeout == TM_NONBLOCK) { err = -EWOULDBLOCK; goto unlock_and_exit; } if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } task = xeno_current_task(); task->wait_args.heap.size = size; task->wait_args.heap.block = NULL; xnsynch_sleep_on(&heap->synch_base, timeout, XN_RELATIVE); if (xnthread_test_info(&task->thread_base, XNRMID)) err = -EIDRM; /* Heap deleted while pending. */ else if (xnthread_test_info(&task->thread_base, XNTIMEO)) err = -ETIMEDOUT; /* Timeout. */ else if (xnthread_test_info(&task->thread_base, XNBREAK)) err = -EINTR; /* Unblocked. */ else block = task->wait_args.heap.block; unlock_and_exit: *blockp = block; xnlock_put_irqrestore(&nklock, s); return err; }
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(), *owner; xnhandle_t threadh = xnthread_handle(thread), fastlock, old; const int use_fastlock = xnsynch_fastlock_p(synch); spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); trace_mark(xn_nucleus, synch_acquire, "synch %p", synch); redo: if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh); if (likely(fastlock == XN_NO_HANDLE)) { if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); return 0; } xnlock_get_irqsave(&nklock, s); /* Set claimed bit. In case it appears to be set already, re-read its state under nklock so that we don't miss any change between the lock-less read and here. But also try to avoid cmpxchg where possible. Only if it appears not to be set, start with cmpxchg directly. */ if (xnsynch_fast_is_claimed(fastlock)) { old = xnarch_atomic_get(lockp); goto test_no_owner; } do { old = xnarch_atomic_cmpxchg(lockp, fastlock, xnsynch_fast_set_claimed(fastlock, 1)); if (likely(old == fastlock)) break; test_no_owner: if (old == XN_NO_HANDLE) { /* Owner called xnsynch_release (on another cpu) */ xnlock_put_irqrestore(&nklock, s); goto redo; } fastlock = old; } while (!xnsynch_fast_is_claimed(fastlock)); owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock)); if (!owner) { /* The handle is broken, therefore pretend that the synch object was deleted to signal an error. */ xnthread_set_info(thread, XNRMID); goto unlock_and_exit; } xnsynch_set_owner(synch, owner); } else { xnlock_get_irqsave(&nklock, s); owner = synch->owner; if (!owner) { synch->owner = thread; if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } } xnsynch_detect_relaxed_owner(synch, thread); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else if (w_cprio(thread) > w_cprio(owner)) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto grab_and_exit; } insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); if (testbits(synch->status, XNSYNCH_PIP)) { if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, w_cprio(thread)); xnsynch_renice_thread(owner, thread); } } else insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) { xnlock_put_irqrestore(&nklock, s); goto redo; } timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) { /* Otherwise, it's too late. */ xnlock_put_irqrestore(&nklock, s); goto redo; } xnthread_set_info(thread, XNTIMEO); } else { grab_and_exit: if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); /* We are the new owner, update the fastlock accordingly. */ if (xnsynch_pended_p(synch)) threadh = xnsynch_fast_set_claimed(threadh, 1); xnarch_atomic_set(lockp, threadh); } } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
static ER rcv_msg_helper(T_MSG ** ppk_msg, ID mbxid, TMO tmout) { xnticks_t timeout; uitask_t *task; ER err = E_OK; uimbx_t *mbx; spl_t s; if (xnpod_unblockable_p()) return E_CTX; if (tmout == TMO_FEVR) timeout = XN_INFINITE; else if (tmout == 0) timeout = XN_NONBLOCK; else if (tmout < TMO_FEVR) return E_PAR; else timeout = (xnticks_t)tmout; if (mbxid <= 0 || mbxid > uITRON_MAX_MBXID) return E_ID; xnlock_get_irqsave(&nklock, s); mbx = xnmap_fetch(ui_mbx_idmap, mbxid); if (!mbx) { err = E_NOEXS; goto unlock_and_exit; } if (mbx->mcount > 0) { *ppk_msg = mbx->ring[mbx->rdptr]; mbx->rdptr = (mbx->rdptr + 1) % mbx->bufcnt; mbx->mcount--; goto unlock_and_exit; } if (timeout == XN_NONBLOCK) { err = E_TMOUT; goto unlock_and_exit; } task = ui_current_task(); xnthread_clear_info(&task->threadbase, uITRON_TASK_RLWAIT); xnsynch_sleep_on(&mbx->synchbase, timeout, XN_RELATIVE); if (xnthread_test_info(&task->threadbase, XNRMID)) err = E_DLT; /* Flag deleted while pending. */ else if (xnthread_test_info(&task->threadbase, XNTIMEO)) err = E_TMOUT; /* Timeout. */ else if (xnthread_test_info(&task->threadbase, XNBREAK)) err = E_RLWAI; /* rel_wai() or signal received while waiting. */ else *ppk_msg = task->wargs.msg; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }