xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(); spl_t s; XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER)); xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus, synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else /* i.e. priority-sorted */ insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
int __rtai_task_suspend(RT_TASK *task) { int err = 0; spl_t s; if (!task) { if (!xnpod_primary_p()) return -EINVAL; task = rtai_current_task(); } xnlock_get_irqsave(&nklock, s); task = rtai_h2obj_validate(task, RTAI_TASK_MAGIC, RT_TASK); if (!task) { err = -EINVAL; goto unlock_and_exit; } if (task->suspend_depth++ == 0) { xnpod_suspend_thread(&task->thread_base, XNSUSP, XN_INFINITE, XN_RELATIVE, NULL); if (xnthread_test_info(&task->thread_base, XNBREAK)) err = -EINTR; } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void rt_sleep(RTIME delay) { if (delay <= 0) return; xnpod_suspend_thread(&rtai_current_task()->thread_base, XNDELAY, delay, XN_RELATIVE, NULL); }
/** * Sleep some amount of time. * * This service suspends the calling thread until the wakeup time specified by * @a rqtp, or a signal is delivered to the caller. If the flag TIMER_ABSTIME is * set in the @a flags argument, the wakeup time is specified as an absolute * value of the clock @a clock_id. If the flag TIMER_ABSTIME is not set, the * wakeup time is specified as a time interval. * * If this service is interrupted by a signal, the flag TIMER_ABSTIME is not * set, and @a rmtp is not @a NULL, the time remaining until the specified * wakeup time is returned at the address @a rmtp. * * The resolution of this service is one system clock tick. * * @param clock_id clock identifier, either CLOCK_REALTIME, * CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW. * * @param flags one of: * - 0 meaning that the wakeup time @a rqtp is a time interval; * - TIMER_ABSTIME, meaning that the wakeup time is an absolute value of the * clock @a clock_id. * * @param rqtp address of the wakeup time. * * @param rmtp address where the remaining time before wakeup will be stored if * the service is interrupted by a signal. * * @return 0 on success; * @return an error number if: * - EPERM, the caller context is invalid; * - ENOTSUP, the specified clock is unsupported; * - EINVAL, the specified wakeup time is invalid; * - EINTR, this service was interrupted by a signal. * * @par Valid contexts: * - Xenomai kernel-space thread, * - Xenomai user-space thread (switches to primary mode). * * @see * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_nanosleep.html"> * Specification.</a> * */ int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp) { xnthread_t *cur; spl_t s; int err = 0; if (xnpod_unblockable_p()) return EPERM; if (clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_MONOTONIC_RAW && clock_id != CLOCK_REALTIME) return ENOTSUP; if ((unsigned long)rqtp->tv_nsec >= ONE_BILLION) return EINVAL; if (flags & ~TIMER_ABSTIME) return EINVAL; cur = xnpod_current_thread(); xnlock_get_irqsave(&nklock, s); thread_cancellation_point(cur); xnpod_suspend_thread(cur, XNDELAY, ts2ticks_ceil(rqtp) + 1, clock_flag(flags, clock_id), NULL); thread_cancellation_point(cur); if (xnthread_test_info(cur, XNBREAK)) { if (flags == 0 && rmtp) { xnticks_t now, expiry; xnsticks_t rem; now = clock_get_ticks(clock_id); expiry = xntimer_get_date(&cur->rtimer); xnlock_put_irqrestore(&nklock, s); rem = expiry - now; ticks2ts(rmtp, rem > 0 ? rem : 0); } else xnlock_put_irqrestore(&nklock, s); return EINTR; } xnlock_put_irqrestore(&nklock, s); return err; }
u_long t_suspend(u_long tid) { u_long err = SUCCESS; psostask_t *task; spl_t s; if (tid == 0) { if (xnpod_unblockable_p()) return -EPERM; xnpod_suspend_self(); if (xnthread_test_info(&psos_current_task()->threadbase, XNBREAK)) return -EINTR; return SUCCESS; } xnlock_get_irqsave(&nklock, s); task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t); if (!task) { err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t); goto unlock_and_exit; } if (xnthread_test_state(&task->threadbase, XNSUSP)) { err = ERR_SUSP; /* Task already suspended. */ goto unlock_and_exit; } xnpod_suspend_thread(&task->threadbase, XNSUSP, XN_INFINITE, XN_RELATIVE, NULL); if (xnthread_test_info(&task->threadbase, XNBREAK)) err = -EINTR; unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode) { struct xnthread *thread = xnpod_current_thread(), *owner; xnhandle_t threadh = xnthread_handle(thread), fastlock, old; const int use_fastlock = xnsynch_fastlock_p(synch); spl_t s; XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER)); trace_mark(xn_nucleus, synch_acquire, "synch %p", synch); redo: if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); fastlock = xnarch_atomic_cmpxchg(lockp, XN_NO_HANDLE, threadh); if (likely(fastlock == XN_NO_HANDLE)) { if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); return 0; } xnlock_get_irqsave(&nklock, s); /* Set claimed bit. In case it appears to be set already, re-read its state under nklock so that we don't miss any change between the lock-less read and here. But also try to avoid cmpxchg where possible. Only if it appears not to be set, start with cmpxchg directly. */ if (xnsynch_fast_is_claimed(fastlock)) { old = xnarch_atomic_get(lockp); goto test_no_owner; } do { old = xnarch_atomic_cmpxchg(lockp, fastlock, xnsynch_fast_set_claimed(fastlock, 1)); if (likely(old == fastlock)) break; test_no_owner: if (old == XN_NO_HANDLE) { /* Owner called xnsynch_release (on another cpu) */ xnlock_put_irqrestore(&nklock, s); goto redo; } fastlock = old; } while (!xnsynch_fast_is_claimed(fastlock)); owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock)); if (!owner) { /* The handle is broken, therefore pretend that the synch object was deleted to signal an error. */ xnthread_set_info(thread, XNRMID); goto unlock_and_exit; } xnsynch_set_owner(synch, owner); } else { xnlock_get_irqsave(&nklock, s); owner = synch->owner; if (!owner) { synch->owner = thread; if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } } xnsynch_detect_relaxed_owner(synch, thread); if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); else if (w_cprio(thread) > w_cprio(owner)) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto grab_and_exit; } insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); if (testbits(synch->status, XNSYNCH_PIP)) { if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, w_cprio(thread)); xnsynch_renice_thread(owner, thread); } } else insertpqf(&synch->pendq, &thread->plink, w_cprio(thread)); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) { xnlock_put_irqrestore(&nklock, s); goto redo; } timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) { /* Otherwise, it's too late. */ xnlock_put_irqrestore(&nklock, s); goto redo; } xnthread_set_info(thread, XNTIMEO); } else { grab_and_exit: if (xnthread_test_state(thread, XNOTHER)) xnthread_inc_rescnt(thread); if (use_fastlock) { xnarch_atomic_t *lockp = xnsynch_fastlock(synch); /* We are the new owner, update the fastlock accordingly. */ if (xnsynch_pended_p(synch)) threadh = xnsynch_fast_set_claimed(threadh, 1); xnarch_atomic_set(lockp, threadh); } } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK); }
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout, xntmode_t timeout_mode) { xnthread_t *thread = xnpod_current_thread(), *owner; spl_t s; xnlock_get_irqsave(&nklock, s); trace_mark(xn_nucleus_synch_sleepon, "thread %p thread_name %s synch %p", thread, xnthread_name(thread), synch); if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */ appendpq(&synch->pendq, &thread->plink); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */ insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); goto unlock_and_exit; } redo: owner = synch->owner; if (!owner) { synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); goto unlock_and_exit; } if (thread->cprio > owner->cprio) { if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) { /* Ownership is still pending, steal the resource. */ synch->owner = thread; xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK); xnthread_set_info(owner, XNROBBED); goto unlock_and_exit; } if (!xnthread_test_state(owner, XNBOOST)) { owner->bprio = owner->cprio; xnthread_set_state(owner, XNBOOST); } if (testbits(synch->status, XNSYNCH_CLAIMED)) removepq(&owner->claimq, &synch->link); else __setbits(synch->status, XNSYNCH_CLAIMED); insertpqf(&owner->claimq, &synch->link, thread->cprio); insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnsynch_renice_thread(owner, thread->cprio); } else insertpqf(&synch->pendq, &thread->plink, thread->cprio); xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch); if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK)) goto unlock_and_exit; if (xnthread_test_info(thread, XNROBBED)) { /* Somebody stole us the ownership while we were ready to run, waiting for the CPU: we need to wait again for the resource. */ if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) goto redo; timeout = xntimer_get_timeout_stopped(&thread->rtimer); if (timeout > 1) /* Otherwise, it's too late. */ goto redo; xnthread_set_info(thread, XNTIMEO); } unlock_and_exit: thread->wwake = NULL; xnthread_clear_info(thread, XNWAKEN); xnlock_put_irqrestore(&nklock, s); }