Esempio n. 1
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semm_give(wind_sem_t *sem)
{
	xnthread_t *cur = xnpod_current_thread();
	int resched = 0;

	check_NOT_ISR_CALLABLE(return ERROR);

	if (cur != xnsynch_owner(&sem->synchbase)) {
		wind_errnoset(S_semLib_INVALID_OPERATION);
		return ERROR;
	}

	if (--sem->count > 0)
		return OK;

	if (xnsynch_release(&sem->synchbase)) {
		sem->count = 1;
		resched = 1;
	}

	if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE))
		if (taskUnsafeInner(cur))
			resched = 1;

	if (resched)
		xnpod_schedule();

	return OK;
}
Esempio n. 2
0
void sc_mdelete(int mid, int opt, int *errp)
{
	xnthread_t *owner;
	vrtxmx_t *mx;
	spl_t s;

	if (opt & ~1) {
		*errp = ER_IIP;
		return;
	}

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	owner = xnsynch_owner(&mx->synchbase);
	if (owner && (opt == 0 || xnpod_current_thread() != owner)) {
		*errp = ER_PND;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (mx_destroy_internal(mx) == XNSYNCH_RESCHED)
		xnpod_schedule();

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Esempio n. 3
0
void sc_maccept(int mid, int *errp)
{
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	if (xnthread_try_grab(xnpod_current_thread(), &mx->synchbase))
		*errp = RET_OK;
	else
		*errp = ER_PND;

unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Esempio n. 4
0
xnflags_t xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
			   xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread();
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else /* i.e. priority-sorted */
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Esempio n. 5
0
/* NOTE: caller must provide locking */
void xnthread_prepare_wait(struct xnthread_wait_context *wc)
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = wc;
	wc->oldstate = xnthread_test_state(curr, XNDEFCAN);
	xnthread_set_state(curr, XNDEFCAN);
}
Esempio n. 6
0
void __xntimer_init(xntimer_t *timer, xntbase_t *base,
		    void (*handler) (xntimer_t *timer))
{
	/* CAUTION: Setup from xntimer_init() must not depend on the
	   periodic/aperiodic timing mode. */

	xntimerh_init(&timer->aplink);
	xntimerh_date(&timer->aplink) = XN_INFINITE;
#ifdef CONFIG_XENO_OPT_TIMING_PERIODIC
	timer->base = base;
	xntlholder_init(&timer->plink);
	xntlholder_date(&timer->plink) = XN_INFINITE;
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */
	xntimer_set_priority(timer, XNTIMER_STDPRIO);
	timer->status = XNTIMER_DEQUEUED;
	timer->handler = handler;
	timer->interval = 0;
	timer->sched = xnpod_current_sched();

#ifdef CONFIG_XENO_OPT_STATS
	{
		spl_t s;

		if (!xnpod_current_thread() || xnpod_shadow_p())
			snprintf(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
				 current->pid, current->comm);
		else
			xnobject_copy_name(timer->name,
					   xnpod_current_thread()->name);

		inith(&timer->tblink);
		xnstat_counter_set(&timer->scheduled, 0);
		xnstat_counter_set(&timer->fired, 0);

		xnlock_get_irqsave(&nklock, s);
		appendq(&base->timerq, &timer->tblink);
		base->timerq_rev++;
		xnlock_put_irqrestore(&nklock, s);
	}
#endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */

	xnarch_init_display_context(timer);
}
Esempio n. 7
0
/**
 * Sleep some amount of time.
 *
 * This service suspends the calling thread until the wakeup time specified by
 * @a rqtp, or a signal is delivered to the caller. If the flag TIMER_ABSTIME is
 * set in the @a flags argument, the wakeup time is specified as an absolute
 * value of the clock @a clock_id. If the flag TIMER_ABSTIME is not set, the
 * wakeup time is specified as a time interval.
 *
 * If this service is interrupted by a signal, the flag TIMER_ABSTIME is not
 * set, and @a rmtp is not @a NULL, the time remaining until the specified
 * wakeup time is returned at the address @a rmtp.
 *
 * The resolution of this service is one system clock tick.
 *
 * @param clock_id clock identifier, either CLOCK_REALTIME,
 * CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW.
 *
 * @param flags one of:
 * - 0 meaning that the wakeup time @a rqtp is a time interval;
 * - TIMER_ABSTIME, meaning that the wakeup time is an absolute value of the
 *   clock @a clock_id.
 *
 * @param rqtp address of the wakeup time.
 *
 * @param rmtp address where the remaining time before wakeup will be stored if
 * the service is interrupted by a signal.
 *
 * @return 0 on success;
 * @return an error number if:
 * - EPERM, the caller context is invalid;
 * - ENOTSUP, the specified clock is unsupported;
 * - EINVAL, the specified wakeup time is invalid;
 * - EINTR, this service was interrupted by a signal.
 *
 * @par Valid contexts:
 * - Xenomai kernel-space thread,
 * - Xenomai user-space thread (switches to primary mode).
 *
 * @see
 * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_nanosleep.html">
 * Specification.</a>
 *
 */
int clock_nanosleep(clockid_t clock_id,
		    int flags,
		    const struct timespec *rqtp, struct timespec *rmtp)
{
	xnthread_t *cur;
	spl_t s;
	int err = 0;

	if (xnpod_unblockable_p())
		return EPERM;

	if (clock_id != CLOCK_MONOTONIC &&
	    clock_id != CLOCK_MONOTONIC_RAW &&
	    clock_id != CLOCK_REALTIME)
		return ENOTSUP;

	if ((unsigned long)rqtp->tv_nsec >= ONE_BILLION)
		return EINVAL;

	if (flags & ~TIMER_ABSTIME)
		return EINVAL;

	cur = xnpod_current_thread();

	xnlock_get_irqsave(&nklock, s);

	thread_cancellation_point(cur);

	xnpod_suspend_thread(cur, XNDELAY, ts2ticks_ceil(rqtp) + 1,
			     clock_flag(flags, clock_id), NULL);

	thread_cancellation_point(cur);

	if (xnthread_test_info(cur, XNBREAK)) {

		if (flags == 0 && rmtp) {
			xnticks_t now, expiry;
			xnsticks_t rem;

			now = clock_get_ticks(clock_id);
			expiry = xntimer_get_date(&cur->rtimer);
			xnlock_put_irqrestore(&nklock, s);
			rem = expiry - now;

			ticks2ts(rmtp, rem > 0 ? rem : 0);
		} else
			xnlock_put_irqrestore(&nklock, s);

		return EINTR;
	}

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 8
0
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	struct xnthread *newowner, *lastowner;
	xnhandle_t lastownerh, newownerh;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	lastownerh = xnthread_handle(xnpod_current_thread());

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		lastowner = synch->owner;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Esempio n. 9
0
/* NOTE: caller must provide locking */
void xnthread_finish_wait(struct xnthread_wait_context *wc,
			  void (*cleanup)(struct xnthread_wait_context *wc))
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = NULL;
	if ((wc->oldstate & XNDEFCAN) == 0)
		xnthread_clear_state(curr, XNDEFCAN);

	if (xnthread_test_state(curr, XNCANPND)) {
		if (cleanup)
			cleanup(wc);
		xnpod_delete_self();
	}
}
Esempio n. 10
0
static int __wind_task_setmode(struct task_struct *curr, struct pt_regs *regs)
{
	int setmask, clrmask, mode_r;

	clrmask = __xn_reg_arg1(regs);
	setmask = __xn_reg_arg2(regs);

	/* Primary required: current thread must be valid. */
	mode_r = xnpod_set_thread_mode(xnpod_current_thread(),
				       clrmask, setmask);
	if (__xn_reg_arg3(regs))
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg3(regs),
				  &mode_r, sizeof(mode_r));
	return 0;
}
Esempio n. 11
0
void sc_mpend(int mid, unsigned long timeout, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxtask_t *task;
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	if (xnpod_unblockable_p()) {
		*errp = -EPERM;
		goto unlock_and_exit;
	}

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	if (mx == NULL) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnthread_try_grab(cur, &mx->synchbase))
		goto unlock_and_exit;

	if (xnsynch_owner(&mx->synchbase) == cur)
		goto unlock_and_exit;

	task = thread2vrtxtask(cur);
	task->vrtxtcb.TCBSTAT = TBSMUTEX;

	if (timeout)
		task->vrtxtcb.TCBSTAT |= TBSDELAY;

	xnsynch_acquire(&mx->synchbase, timeout, XN_RELATIVE);

	if (xnthread_test_info(cur, XNBREAK))
		*errp = -EINTR;
	else if (xnthread_test_info(cur, XNRMID))
		*errp = ER_DEL;	/* Mutex deleted while pending. */
	else if (xnthread_test_info(cur, XNTIMEO))
		*errp = ER_TMO;	/* Timeout. */

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Esempio n. 12
0
static void xnsynch_renice_thread(xnthread_t *thread, int prio)
{
	thread->cprio = prio;

	if (thread->wchan)
		/* Ignoring the XNSYNCH_DREORD flag on purpose here. */
		xnsynch_renice_sleeper(thread);
	else if (thread != xnpod_current_thread() &&
		 xnthread_test_state(thread, XNREADY))
		/* xnpod_resume_thread() must be called for runnable
		   threads but the running one. */
		xnpod_resume_thread(thread, 0);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNRELAX))
		xnshadow_renice(thread);
#endif /* CONFIG_XENO_OPT_PERVASIVE */
}
Esempio n. 13
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semm_take(wind_sem_t *sem, xnticks_t to)
{
	xnthread_t *cur = xnpod_current_thread();

	if (xnsynch_owner(&sem->synchbase) == NULL) {
		xnsynch_set_owner(&sem->synchbase, cur);
		goto grab_sem;
	}

	if (xnsynch_owner(&sem->synchbase) == cur) {
		sem->count++;
		return OK;
	}

	error_check(to == XN_NONBLOCK, S_objLib_OBJ_UNAVAILABLE,
		    return ERROR);

	xnsynch_acquire(&sem->synchbase, to, XN_RELATIVE);

	error_check(xnthread_test_info(cur, XNBREAK),
		    -EINTR, return ERROR);

	error_check(xnthread_test_info(cur, XNRMID),
		    S_objLib_OBJ_DELETED, return ERROR);

	error_check(xnthread_test_info(cur, XNTIMEO),
		    S_objLib_OBJ_TIMEOUT, return ERROR);
 grab_sem:
	/*
	 * xnsynch_sleep_on() might have stolen the resource, so we
	 * need to put our internal data in sync.
	 */
	sem->count = 1;

	if (xnsynch_test_flags(&sem->synchbase, WIND_SEM_DEL_SAFE))
		taskSafeInner(cur);

	return OK;
}
Esempio n. 14
0
static inline int sem_timedwait_internal(struct __shadow_sem *shadow,
					 int timed, xnticks_t to)
{
	pse51_sem_t *sem = shadow->sem;
	xnthread_t *cur;
	int err;

	if (xnpod_unblockable_p())
		return EPERM;

	cur = xnpod_current_thread();

	if ((err = sem_trywait_internal(shadow)) != EAGAIN)
		return err;

	thread_cancellation_point(cur);

	if (timed)
		xnsynch_sleep_on(&sem->synchbase, to, XN_REALTIME);
	else
		xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);

	/* Handle cancellation requests. */
	thread_cancellation_point(cur);

	if (xnthread_test_info(cur, XNRMID))
		return EINVAL;

	if (xnthread_test_info(cur, XNBREAK))
		return EINTR;

	if (xnthread_test_info(cur, XNTIMEO))
		return ETIMEDOUT;

	return 0;
}
Esempio n. 15
0
/* Must be called with nklock locked, interrupts off. */
static STATUS semb_take(wind_sem_t *sem, xnticks_t to)
{
	xnthread_t *thread = xnpod_current_thread();

	if (sem->count > 0)
		--sem->count;
	else {
		error_check(to == XN_NONBLOCK, S_objLib_OBJ_UNAVAILABLE,
			    return ERROR);

		xnsynch_sleep_on(&sem->synchbase, to, XN_RELATIVE);

		error_check(xnthread_test_info(thread, XNBREAK), -EINTR,
			    return ERROR);

		error_check(xnthread_test_info(thread, XNRMID),
			    S_objLib_OBJ_DELETED, return ERROR);

		error_check(xnthread_test_info(thread, XNTIMEO),
			    S_objLib_OBJ_TIMEOUT, return ERROR);
	}

	return OK;
}
Esempio n. 16
0
void sc_mpost(int mid, int *errp)
{
	xnthread_t *cur = xnpod_current_thread();
	vrtxmx_t *mx;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	mx = xnmap_fetch(vrtx_mx_idmap, mid);
	/* Return ER_ID if the poster does not own the mutex. */
	if (mx == NULL || xnsynch_owner(&mx->synchbase) != cur) {
		*errp = ER_ID;
		goto unlock_and_exit;
	}

	*errp = RET_OK;

	if (xnsynch_release(&mx->synchbase))
		xnpod_schedule();

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);
}
Esempio n. 17
0
int _rtapi_trap_handler(unsigned event, unsigned domid, void *data) {
    struct pt_regs *regs = data;
    xnthread_t *thread = xnpod_current_thread(); ;

    int task_id = _rtapi_task_self_hook();

    rtapi_exception_detail_t detail = {0};

    detail.task_id = task_id;
    detail.error_code = thread->errcode;

    detail.flavor.xeno.event = event;
    detail.flavor.xeno.domid = domid;
    detail.flavor.xeno.ip = (exc_register_t) regs->ip;
    detail.flavor.xeno.sp = (exc_register_t) regs->sp;

    if (rt_exception_handler)
	rt_exception_handler(XK_TRAP, &detail,
			     (task_id > -1) ?
			     &global_data->thread_status[task_id] : NULL);

    // forward to default Xenomai trap handler
    return ((rthal_trap_handler_t) old_trap_handler)(event, domid, data);
}
Esempio n. 18
0
/*!
 * \fn struct xnthread *xnsynch_release(struct xnsynch *synch);
 * \brief Give the resource ownership to the next waiting thread.
 *
 * This service releases the ownership of the given synchronization
 * object. The thread which is currently leading the object's pending
 * list, if any, is unblocked from its pending state. However, no
 * reschedule is performed.
 *
 * This service must be used only with synchronization objects that
 * track ownership (XNSYNCH_OWNER set).
 *
 * @param synch The descriptor address of the synchronization object
 * whose ownership is changed.
 *
 * @return The descriptor address of the unblocked thread.
 *
 * Side-effects:
 *
 * - The effective priority of the previous resource owner might be
 * lowered to its base priority value as a consequence of the priority
 * inheritance boost being cleared.
 *
 * - The synchronization object ownership is transfered to the
 * unblocked thread.
 *
 * Environments:
 *
 * This service can be called from:
 *
 * - Kernel module initialization/cleanup code
 * - Interrupt service routine
 * - Kernel-based task
 * - User-space task
 *
 * Rescheduling: never.
 */
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	return xnsynch_release_thread(synch, xnpod_current_thread());
}
Esempio n. 19
0
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
			  xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread(), *owner;
	xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
	const int use_fastlock = xnsynch_fastlock_p(synch);
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);

      redo:

	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);

		fastlock = xnarch_atomic_cmpxchg(lockp,
						 XN_NO_HANDLE, threadh);

		if (likely(fastlock == XN_NO_HANDLE)) {
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			return 0;
		}

		xnlock_get_irqsave(&nklock, s);

		/* Set claimed bit.
		   In case it appears to be set already, re-read its state
		   under nklock so that we don't miss any change between the
		   lock-less read and here. But also try to avoid cmpxchg
		   where possible. Only if it appears not to be set, start
		   with cmpxchg directly. */
		if (xnsynch_fast_is_claimed(fastlock)) {
			old = xnarch_atomic_get(lockp);
			goto test_no_owner;
		}
		do {
			old = xnarch_atomic_cmpxchg(lockp, fastlock,
					xnsynch_fast_set_claimed(fastlock, 1));
			if (likely(old == fastlock))
				break;

		  test_no_owner:
			if (old == XN_NO_HANDLE) {
				/* Owner called xnsynch_release
				   (on another cpu) */
				xnlock_put_irqrestore(&nklock, s);
				goto redo;
			}
			fastlock = old;
		} while (!xnsynch_fast_is_claimed(fastlock));

		owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock));

		if (!owner) {
			/* The handle is broken, therefore pretend that the synch
			   object was deleted to signal an error. */
			xnthread_set_info(thread, XNRMID);
			goto unlock_and_exit;
		}

		xnsynch_set_owner(synch, owner);
	} else {
		xnlock_get_irqsave(&nklock, s);

		owner = synch->owner;

		if (!owner) {
			synch->owner = thread;
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			goto unlock_and_exit;
		}
	}

	xnsynch_detect_relaxed_owner(synch, thread);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else if (w_cprio(thread) > w_cprio(owner)) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto grab_and_exit;
		}

		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

		if (testbits(synch->status, XNSYNCH_PIP)) {
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}

			if (testbits(synch->status, XNSYNCH_CLAIMED))
				removepq(&owner->claimq, &synch->link);
			else
				__setbits(synch->status, XNSYNCH_CLAIMED);

			insertpqf(&owner->claimq, &synch->link, w_cprio(thread));
			xnsynch_renice_thread(owner, thread);
		}
	} else
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) { /* Otherwise, it's too late. */
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		xnthread_set_info(thread, XNTIMEO);
	} else {

	      grab_and_exit:

		if (xnthread_test_state(thread, XNOTHER))
			xnthread_inc_rescnt(thread);

		if (use_fastlock) {
			xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
			/* We are the new owner, update the fastlock
			   accordingly. */
			if (xnsynch_pended_p(synch))
				threadh =
				    xnsynch_fast_set_claimed(threadh, 1);
			xnarch_atomic_set(lockp, threadh);
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Esempio n. 20
0
int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp)
{
	void *block = NULL;
	xnthread_t *thread;
	xnflags_t info;
	int err = 0;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	heap = xeno_h2obj_validate(heap, XENO_HEAP_MAGIC, RT_HEAP);

	if (!heap) {
		err = xeno_handle_error(heap, XENO_HEAP_MAGIC, RT_HEAP);
		goto unlock_and_exit;
	}

	/* In single-block mode, there is only a single allocation
	   returning the whole addressable heap space to the user. All
	   users referring to this heap are then returned the same
	   block. */

	if (heap->mode & H_SINGLE) {
		block = heap->sba;

		if (!block) {
			/* It's ok to pass zero for size here, since the requested
			   size is implicitely the whole heap space; but if
			   non-zero is given, it must match the original heap
			   size. */

			if (size > 0 && size != heap->csize) {
				err = -EINVAL;
				goto unlock_and_exit;
			}

			block = heap->sba = xnheap_alloc(&heap->heap_base,
							 xnheap_max_contiguous
							 (&heap->heap_base));
		}

		if (block)
			goto unlock_and_exit;

		err = -ENOMEM;	/* This should never happen. Paranoid. */
		goto unlock_and_exit;
	}

	block = xnheap_alloc(&heap->heap_base, size);

	if (block)
		goto unlock_and_exit;

	if (timeout == TM_NONBLOCK) {
		err = -EWOULDBLOCK;
		goto unlock_and_exit;
	}

	if (xnpod_unblockable_p()) {
		err = -EPERM;
		goto unlock_and_exit;
	}

	thread = xnpod_current_thread();
	thread->wait_u.buffer.size = size;
	thread->wait_u.buffer.ptr = NULL;
	info = xnsynch_sleep_on(&heap->synch_base, timeout, XN_RELATIVE);
	if (info & XNRMID)
		err = -EIDRM;	/* Heap deleted while pending. */
	else if (info & XNTIMEO)
		err = -ETIMEDOUT;	/* Timeout. */
	else if (info & XNBREAK)
		err = -EINTR;	/* Unblocked. */
	else
		block = thread->wait_u.buffer.ptr;

      unlock_and_exit:

	*blockp = block;

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Esempio n. 21
0
int rt_cond_wait_prologue(RT_COND *cond, RT_MUTEX *mutex, unsigned *plockcnt,
                          xntmode_t timeout_mode, RTIME timeout)
{
    xnthread_t *thread;
    xnflags_t info;
    spl_t s;
    int err;

    if (timeout == TM_NONBLOCK)
        return -EWOULDBLOCK;

    if (xnpod_unblockable_p())
        return -EPERM;

    xnlock_get_irqsave(&nklock, s);

    cond = xeno_h2obj_validate(cond, XENO_COND_MAGIC, RT_COND);

    if (!cond) {
        err = xeno_handle_error(cond, XENO_COND_MAGIC, RT_COND);
        goto unlock_and_exit;
    }

    mutex = xeno_h2obj_validate(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);

    if (!mutex) {
        err = xeno_handle_error(mutex, XENO_MUTEX_MAGIC, RT_MUTEX);
        goto unlock_and_exit;
    }

    thread = xnpod_current_thread();

    err = xnsynch_owner_check(&mutex->synch_base, thread);

    if (err)
        goto unlock_and_exit;

    /*
     * We can't use rt_mutex_release since that might reschedule
     * before enter xnsynch_sleep_on.
     */
    *plockcnt = mutex->lockcnt; /* Leave even if mutex is nested */

    mutex->lockcnt = 0;

    xnsynch_release(&mutex->synch_base);
    /* Scheduling deferred */

    info = xnsynch_sleep_on(&cond->synch_base,
                            timeout, timeout_mode);
    if (info & XNRMID)
        err = -EIDRM;	/* Condvar deleted while pending. */
    else if (info & XNTIMEO)
        err = -ETIMEDOUT;	/* Timeout. */
    else if (info & XNBREAK) {
        err = -EINTR;	/* Unblocked. */
    }

unlock_and_exit:

    xnlock_put_irqrestore(&nklock, s);

    return err;
}
Esempio n. 22
0
File: select.c Progetto: ArcEye/RTAI
/**
 * Check the state of a number of file descriptors, wait for a state change if
 * no descriptor is ready.
 *
 * @param selector structure to check for pending events
 * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
 * @param in_fds the set of descriptors which events should be checked
 * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
 * @param timeout the timeout, whose meaning depends on @a timeout_mode, note
 * that xnselect() pass @a timeout and @a timeout_mode unchanged to
 * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
 * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
 * than expected if the sleep is interrupted.
 * @param timeout_mode the mode of @a timeout.
 *
 * @retval -EINVAL if @a nfds is negative;
 * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
 * been registered with xnselect_bind(), @a out_fds contains the set of such
 * descriptors;
 * @retval -EINTR if @a xnselect was interrupted while waiting;
 * @retval 0 in case of timeout.
 * @retval the number of file descriptors having received an event.
 */
int xnselect(struct xnselector *selector,
	     fd_set *out_fds[XNSELECT_MAX_TYPES],
	     fd_set *in_fds[XNSELECT_MAX_TYPES],
	     int nfds,
	     xnticks_t timeout, xntmode_t timeout_mode)
{
	unsigned i, not_empty = 0;
	xnthread_t *thread;
	spl_t s;

	if ((unsigned) nfds > __FD_SETSIZE)
		return -EINVAL;

	thread = xnpod_current_thread();

	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
		if (out_fds[i])
			fd_set_zeropad(out_fds[i], nfds);

	xnlock_get_irqsave(&nklock, s);
	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
		if (out_fds[i]
		    && fd_set_andnot(out_fds[i], in_fds[i],
				     &selector->fds[i].expected, nfds))
			not_empty = 1;
	xnlock_put_irqrestore(&nklock, s);

	if (not_empty)
		return -ECHRNG;

	xnlock_get_irqsave(&nklock, s);
	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
		if (out_fds[i]
		    && fd_set_and(out_fds[i], in_fds[i],
				  &selector->fds[i].pending, nfds))
			not_empty = 1;

	while (!not_empty) {
		xnsynch_sleep_on(&selector->synchbase, timeout, timeout_mode);

		for (i = 0; i < XNSELECT_MAX_TYPES; i++)
			if (out_fds[i]
			    && fd_set_and(out_fds[i], in_fds[i],
					  &selector->fds[i].pending, nfds))
				not_empty = 1;

		if (xnthread_test_info(thread, XNBREAK | XNTIMEO))
			break;
	}
	xnlock_put_irqrestore(&nklock, s);

	if (not_empty) {
		unsigned count;

		for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
			if (out_fds[i])
				count += fd_set_popcount(out_fds[i], nfds);

		return count;
	}

	if (xnthread_test_info(thread, XNBREAK))
		return -EINTR;

	return 0; /* Timeout */
}
Esempio n. 23
0
ssize_t rt_buffer_read_inner(RT_BUFFER *bf,
			     struct xnbufd *bufd,
			     xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long rdtoken;
	off_t rdoff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only return complete messages to readers, so there
	 * is no point in waiting for messages which are larger than
	 * what the buffer can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before receiving the
		 * data, so let's always use an absolute time spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to read a complete message of the
		 * requested length, or block.
		 */
		if (bf->fillsz < len)
			goto wait;

		/*
		 * Draw the next read token so that we can later
		 * detect preemption.
		 */
		rdtoken = ++bf->rdtoken;

		/* Read from the buffer in a circular way. */
		rdoff = bf->rdoff;
		rbytes = len;

		do {
			if (rdoff + rbytes > bf->bufsz)
				n = bf->bufsz - rdoff;
			else
				n = rbytes;
			/*
			 * Release the nklock while retrieving the
			 * data to keep latency low.
			 */

			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_from_kmem(bufd, bf->bufmem + rdoff, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while retrieving
			 * the message, we have to re-read the whole
			 * thing.
			 */
			if (bf->rdtoken != rdtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			rdoff = (rdoff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz -= len;
		bf->rdoff = rdoff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the output wait
		 * queue, if we freed enough room for the leading one
		 * to post its message.
		 */
		waiter = xnsynch_peek_pendq(&bf->osynch_base);
		if (waiter && waiter->wait_u.size + bf->fillsz <= bf->bufsz) {
			if (xnsynch_flush(&bf->osynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		/*
		 * Check whether writers are already waiting for
		 * sending data, while we are about to wait for
		 * receiving some. In such a case, we have a
		 * pathological use of the buffer. We must allow for a
		 * short read to prevent a deadlock.
		 */
		if (bf->fillsz > 0 &&
		    xnsynch_nsleepers(&bf->osynch_base) > 0) {
			len = bf->fillsz;
			goto redo;
		}

		thread = xnpod_current_thread();
		thread->wait_u.bufd =  bufd;
		info = xnsynch_sleep_on(&bf->isynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} else if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 24
0
ssize_t rt_buffer_write_inner(RT_BUFFER *bf,
			      struct xnbufd *bufd,
			      xntmode_t timeout_mode, RTIME timeout)
{
	xnthread_t *thread, *waiter;
	size_t len, rbytes, n;
	xnflags_t info;
	u_long wrtoken;
	off_t wroff;
	ssize_t ret;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
	if (bf == NULL) {
		ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER);
		goto unlock_and_exit;
	}

	/*
	 * We may only send complete messages, so there is no point in
	 * accepting messages which are larger than what the buffer
	 * can hold.
	 */
	len = bufd->b_len;
	if (len > bf->bufsz) {
		ret = -EINVAL;
		goto unlock_and_exit;
	}

	if (len == 0) {
		ret = 0;
		goto unlock_and_exit;
	}

	if (timeout_mode == XN_RELATIVE &&
	    timeout != TM_NONBLOCK && timeout != TM_INFINITE) {
		/*
		 * We may sleep several times before being able to
		 * send the data, so let's always use an absolute time
		 * spec.
		 */
		timeout_mode = XN_REALTIME;
		timeout += xntbase_get_time(__native_tbase);
	}

redo:
	for (;;) {
		/*
		 * We should be able to write the entire message at
		 * once, or block.
		 */
		if (bf->fillsz + len > bf->bufsz)
			goto wait;

		/*
		 * Draw the next write token so that we can later
		 * detect preemption.
		 */
		wrtoken = ++bf->wrtoken;

		/* Write to the buffer in a circular way. */
		wroff = bf->wroff;
		rbytes = len;

		do {
			if (wroff + rbytes > bf->bufsz)
				n = bf->bufsz - wroff;
			else
				n = rbytes;
			/*
			 * Release the nklock while copying the source
			 * data to keep latency low.
			 */
			xnlock_put_irqrestore(&nklock, s);

			ret = xnbufd_copy_to_kmem(bf->bufmem + wroff, bufd, n);
			if (ret < 0)
				return ret;

			xnlock_get_irqsave(&nklock, s);
			/*
			 * In case we were preempted while writing
			 * the message, we have to resend the whole
			 * thing.
			 */
			if (bf->wrtoken != wrtoken) {
				xnbufd_reset(bufd);
				goto redo;
			}

			wroff = (wroff + n) % bf->bufsz;
			rbytes -= n;
		} while (rbytes > 0);

		bf->fillsz += len;
		bf->wroff = wroff;
		ret = (ssize_t)len;

		/*
		 * Wake up all threads pending on the input wait
		 * queue, if we accumulated enough data to feed the
		 * leading one.
		 */
		waiter = xnsynch_peek_pendq(&bf->isynch_base);
		if (waiter && waiter->wait_u.bufd->b_len <= bf->fillsz) {
			if (xnsynch_flush(&bf->isynch_base, 0) == XNSYNCH_RESCHED)
				xnpod_schedule();
		}

		/*
		 * We cannot fail anymore once some data has been
		 * copied via the buffer descriptor, so no need to
		 * check for any reason to invalidate the latter.
		 */
		goto unlock_and_exit;

	wait:
		if (timeout_mode == XN_RELATIVE && timeout == TM_NONBLOCK) {
			ret = -EWOULDBLOCK;
			break;
		}

		if (xnpod_unblockable_p()) {
			ret = -EPERM;
			break;
		}

		thread = xnpod_current_thread();
		thread->wait_u.size = len;
		info = xnsynch_sleep_on(&bf->osynch_base,
					timeout, timeout_mode);
		if (info & XNRMID) {
			ret = -EIDRM;	/* Buffer deleted while pending. */
			break;
		} if (info & XNTIMEO) {
			ret = -ETIMEDOUT;	/* Timeout. */
			break;
		} if (info & XNBREAK) {
			ret = -EINTR;	/* Unblocked. */
			break;
		}
	}

      unlock_and_exit:

	/*
	 * xnpod_schedule() is smarter than us; it will detect any
	 * worthless call inline and won't branch to the rescheduling
	 * code in such a case.
	 */
	xnpod_schedule();

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 25
0
ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
{
	struct xnpipe_state *state;
	struct xnholder *h;
	xnthread_t *thread;
	ssize_t ret;
	spl_t s;

	if (minor < 0 || minor >= XNPIPE_NDEVS)
		return -ENODEV;

	if (xnpod_asynch_p())
		return -EPERM;

	state = &xnpipe_states[minor];

	xnlock_get_irqsave(&nklock, s);

	if (!testbits(state->status, XNPIPE_KERN_CONN)) {
		ret = -EBADF;
		goto unlock_and_exit;
	}

	thread = xnpod_current_thread();

	while ((h = getq(&state->inq)) == NULL) {
		if (timeout == XN_NONBLOCK) {
			ret = -EWOULDBLOCK;
			goto unlock_and_exit;
		}

		xnsynch_sleep_on(&state->synchbase, timeout, XN_RELATIVE);

		if (xnthread_test_info(thread, XNTIMEO)) {
			ret = -ETIMEDOUT;
			goto unlock_and_exit;
		}
		if (xnthread_test_info(thread, XNBREAK)) {
			ret = -EINTR;
			goto unlock_and_exit;
		}
		if (xnthread_test_info(thread, XNRMID)) {
			ret = -EIDRM;
			goto unlock_and_exit;
		}

		/* remaining timeout */
		timeout = xnthread_timeout(thread);
	}

	*pmh = link2mh(h);

	ret = (ssize_t) xnpipe_m_size(*pmh);

	if (testbits(state->status, XNPIPE_USER_WSYNC)) {
		__setbits(state->status, XNPIPE_USER_WSYNC_READY);
		xnpipe_schedule_request();
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return ret;
}
Esempio n. 26
0
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout,
		      xntmode_t timeout_mode)
{
	xnthread_t *thread = xnpod_current_thread(), *owner;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus_synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

	if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

redo:
	owner = synch->owner;

	if (!owner) {
		synch->owner = thread;
		xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
		goto unlock_and_exit;
	}

	if (thread->cprio > owner->cprio) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto unlock_and_exit;
		}

		if (!xnthread_test_state(owner, XNBOOST)) {
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			removepq(&owner->claimq, &synch->link);
		else
			__setbits(synch->status, XNSYNCH_CLAIMED);

		insertpqf(&owner->claimq, &synch->link, thread->cprio);
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnsynch_renice_thread(owner, thread->cprio);
	} else
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE)
			goto redo;
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) /* Otherwise, it's too late. */
			goto redo;
		xnthread_set_info(thread, XNTIMEO);
	}

      unlock_and_exit:

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	xnlock_put_irqrestore(&nklock, s);
}