Exemplo n.º 1
0
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
{
	struct xnpholder *holder;
	int status;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
		   synch, reason);

	status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED;

	while ((holder = getpq(&synch->pendq)) != NULL) {
		struct xnthread *sleeper = link2thread(holder, plink);
		xnthread_set_info(sleeper, reason);
		sleeper->wchan = NULL;
		xnpod_resume_thread(sleeper, XNPEND);
	}

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		xnsynch_clear_boost(synch, synch->owner);
		status = XNSYNCH_RESCHED;
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return status;
}
Exemplo n.º 2
0
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder)
{
	xnthread_t *thread, *lastowner;
	xnpholder_t *nholder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	lastowner = synch->owner;
	nholder = poppq(&synch->pendq, holder);

	thread = link2thread(holder, plink);
	thread->wchan = NULL;
	thread->wwake = synch;
	synch->owner = thread;
	xnthread_set_info(thread, XNWAKEN);
	trace_mark(xn_nucleus_synch_wakeup_all,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);
	xnpod_resume_thread(thread, XNPEND);

	if (testbits(synch->status, XNSYNCH_CLAIMED))
		xnsynch_clear_boost(synch, lastowner);

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return nholder;
}
Exemplo n.º 3
0
static struct xnthread *
xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	xnhandle_t lastownerh, newownerh;
	struct xnthread *newowner;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(lastowner, XNOTHER)) {
		if (xnthread_get_rescnt(lastowner) == 0)
			xnshadow_send_sig(lastowner, SIGDEBUG,
					  SIGDEBUG_MIGRATE_PRIOINV, 1);
		else
			xnthread_dec_rescnt(lastowner);
	}
#endif
	lastownerh = xnthread_handle(lastowner);

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Exemplo n.º 4
0
static void xnsched_watchdog_handler(struct xntimer *timer)
{
	struct xnsched *sched = xnpod_current_sched();
	struct xnthread *thread = sched->curr;

	if (likely(xnthread_test_state(thread, XNROOT))) {
		xnsched_reset_watchdog(sched);
		return;
	}

	if (likely(++sched->wdcount < wd_timeout_arg))
		return;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNSHADOW) &&
	    !xnthread_amok_p(thread)) {
		trace_mark(xn_nucleus, watchdog_signal,
			   "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- signaling runaway thread "
			 "'%s'\n", xnthread_name(thread));
		xnthread_set_info(thread, XNAMOK | XNKICKED);
		xnshadow_send_sig(thread, SIGDEBUG, SIGDEBUG_WATCHDOG, 1);
	} else
#endif /* CONFIG_XENO_OPT_PERVASIVE */
	{
		trace_mark(xn_nucleus, watchdog, "thread %p thread_name %s",
			   thread, xnthread_name(thread));
		xnprintf("watchdog triggered -- killing runaway thread '%s'\n",
			 xnthread_name(thread));
		xnpod_delete_thread(thread);
	}
	xnsched_reset_watchdog(sched);
}
Exemplo n.º 5
0
/*
 * Detect when a thread is about to sleep on a synchronization
 * object currently owned by someone running in secondary mode.
 */
void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper)
{
	if (xnthread_test_state(sleeper, XNTRAPSW) &&
	    !xnthread_test_info(sleeper, XNSWREP) &&
	    xnthread_test_state(synch->owner, XNRELAX)) {
		xnthread_set_info(sleeper, XNSWREP);
		xnshadow_send_sig(sleeper, SIGDEBUG,
				  SIGDEBUG_MIGRATE_PRIOINV, 1);
	} else
		xnthread_clear_info(sleeper,  XNSWREP);
}
Exemplo n.º 6
0
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	struct xnthread *newowner, *lastowner;
	xnhandle_t lastownerh, newownerh;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	lastownerh = xnthread_handle(xnpod_current_thread());

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		lastowner = synch->owner;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Exemplo n.º 7
0
static void xnsynch_renice_thread(struct xnthread *thread,
				  struct xnthread *target)
{
	/* Apply the scheduling policy of "target" to "thread" */
	xnsched_track_policy(thread, target);

	if (thread->wchan)
		xnsynch_requeue_sleeper(thread);

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(thread, XNRELAX))
		xnshadow_renice(thread);
	else if (xnthread_test_state(thread, XNSHADOW))
		xnthread_set_info(thread, XNPRIOSET);
#endif /* CONFIG_XENO_OPT_PERVASIVE */
}
Exemplo n.º 8
0
/*
 * Detect when a thread is about to relax while holding a
 * synchronization object currently claimed by another thread, which
 * bears the TWARNSW bit (thus advertising a concern about potential
 * spurious relaxes and priority inversion). By relying on the claim
 * queue, we restrict the checks to PIP-enabled objects, but that
 * already covers most of the use cases anyway.
 */
void xnsynch_detect_claimed_relax(struct xnthread *owner)
{
	struct xnpholder *hs, *ht;
	struct xnthread *sleeper;
	struct xnsynch *synch;

	for (hs = getheadpq(&owner->claimq); hs != NULL;
	     hs = nextpq(&owner->claimq, hs)) {
		synch = link2synch(hs);
		for (ht = getheadpq(&synch->pendq); ht != NULL;
		     ht = nextpq(&synch->pendq, ht)) {
			sleeper = link2thread(ht, plink);
			if (xnthread_test_state(sleeper, XNTRAPSW)) {
				xnthread_set_info(sleeper, XNSWREP);
				xnshadow_send_sig(sleeper, SIGDEBUG,
						  SIGDEBUG_MIGRATE_PRIOINV, 1);
			}
		}
	}
}
Exemplo n.º 9
0
/**
 * @internal
 * @fn void watchdog_handler(struct xntimer *timer)
 * @brief Process watchdog ticks.
 *
 * This internal routine handles incoming watchdog ticks to detect
 * software lockups. It kills any offending thread which is found to
 * monopolize the CPU so as to starve the Linux kernel for too long.
 *
 * @coretags{coreirq-only, atomic-entry}
 */
static void watchdog_handler(struct xntimer *timer)
{
	struct xnsched *sched = xnsched_current();
	struct xnthread *curr = sched->curr;

	if (likely(xnthread_test_state(curr, XNROOT))) {
		xnsched_reset_watchdog(sched);
		return;
	}

	if (likely(++sched->wdcount < wd_timeout_arg))
		return;

	trace_cobalt_watchdog_signal(curr);

	if (xnthread_test_state(curr, XNUSER)) {
		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
		       "'%s' signaled\n", xnsched_cpu(sched), curr->name);
		xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
	} else {
		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
		       "'%s' canceled\n", xnsched_cpu(sched), curr->name);
		/*
		 * On behalf on an IRQ handler, xnthread_cancel()
		 * would go half way cancelling the preempted
		 * thread. Therefore we manually raise XNKICKED to
		 * cause the next call to xnthread_suspend() to return
		 * early in XNBREAK condition, and XNCANCELD so that
		 * @thread exits next time it invokes
		 * xnthread_test_cancel().
		 */
		xnthread_set_info(curr, XNKICKED|XNCANCELD);
	}

	xnsched_reset_watchdog(sched);
}
Exemplo n.º 10
0
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
			  xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread(), *owner;
	xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
	const int use_fastlock = xnsynch_fastlock_p(synch);
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);

      redo:

	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);

		fastlock = xnarch_atomic_cmpxchg(lockp,
						 XN_NO_HANDLE, threadh);

		if (likely(fastlock == XN_NO_HANDLE)) {
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			return 0;
		}

		xnlock_get_irqsave(&nklock, s);

		/* Set claimed bit.
		   In case it appears to be set already, re-read its state
		   under nklock so that we don't miss any change between the
		   lock-less read and here. But also try to avoid cmpxchg
		   where possible. Only if it appears not to be set, start
		   with cmpxchg directly. */
		if (xnsynch_fast_is_claimed(fastlock)) {
			old = xnarch_atomic_get(lockp);
			goto test_no_owner;
		}
		do {
			old = xnarch_atomic_cmpxchg(lockp, fastlock,
					xnsynch_fast_set_claimed(fastlock, 1));
			if (likely(old == fastlock))
				break;

		  test_no_owner:
			if (old == XN_NO_HANDLE) {
				/* Owner called xnsynch_release
				   (on another cpu) */
				xnlock_put_irqrestore(&nklock, s);
				goto redo;
			}
			fastlock = old;
		} while (!xnsynch_fast_is_claimed(fastlock));

		owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock));

		if (!owner) {
			/* The handle is broken, therefore pretend that the synch
			   object was deleted to signal an error. */
			xnthread_set_info(thread, XNRMID);
			goto unlock_and_exit;
		}

		xnsynch_set_owner(synch, owner);
	} else {
		xnlock_get_irqsave(&nklock, s);

		owner = synch->owner;

		if (!owner) {
			synch->owner = thread;
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			goto unlock_and_exit;
		}
	}

	xnsynch_detect_relaxed_owner(synch, thread);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else if (w_cprio(thread) > w_cprio(owner)) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto grab_and_exit;
		}

		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

		if (testbits(synch->status, XNSYNCH_PIP)) {
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}

			if (testbits(synch->status, XNSYNCH_CLAIMED))
				removepq(&owner->claimq, &synch->link);
			else
				__setbits(synch->status, XNSYNCH_CLAIMED);

			insertpqf(&owner->claimq, &synch->link, w_cprio(thread));
			xnsynch_renice_thread(owner, thread);
		}
	} else
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) { /* Otherwise, it's too late. */
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		xnthread_set_info(thread, XNTIMEO);
	} else {

	      grab_and_exit:

		if (xnthread_test_state(thread, XNOTHER))
			xnthread_inc_rescnt(thread);

		if (use_fastlock) {
			xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
			/* We are the new owner, update the fastlock
			   accordingly. */
			if (xnsynch_pended_p(synch))
				threadh =
				    xnsynch_fast_set_claimed(threadh, 1);
			xnarch_atomic_set(lockp, threadh);
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Exemplo n.º 11
0
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout,
		      xntmode_t timeout_mode)
{
	xnthread_t *thread = xnpod_current_thread(), *owner;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus_synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

	if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

redo:
	owner = synch->owner;

	if (!owner) {
		synch->owner = thread;
		xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
		goto unlock_and_exit;
	}

	if (thread->cprio > owner->cprio) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto unlock_and_exit;
		}

		if (!xnthread_test_state(owner, XNBOOST)) {
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			removepq(&owner->claimq, &synch->link);
		else
			__setbits(synch->status, XNSYNCH_CLAIMED);

		insertpqf(&owner->claimq, &synch->link, thread->cprio);
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnsynch_renice_thread(owner, thread->cprio);
	} else
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE)
			goto redo;
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) /* Otherwise, it's too late. */
			goto redo;
		xnthread_set_info(thread, XNTIMEO);
	}

      unlock_and_exit:

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	xnlock_put_irqrestore(&nklock, s);
}
Exemplo n.º 12
0
static void xnthread_timeout_handler(xntimer_t *timer)
{
	xnthread_t *thread = container_of(timer, xnthread_t, rtimer);
	xnthread_set_info(thread, XNTIMEO);	/* Interrupts are off. */
	xnpod_resume_thread(thread, XNDELAY);
}