Ejemplo n.º 1
0
void xnsynch_renice_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;
	xnthread_t *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, thread->cprio);
	owner = synch->owner;

	if (owner != NULL && thread->cprio > owner->cprio) {
		/* The new priority of the sleeping thread is higher
		 * than the priority of the current owner of the
		 * resource: we need to update the PI state. */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/* The resource is already claimed, just
			   reorder the claim queue. */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
		} else {
			/* The resource was NOT claimed, claim it now
			 * and boost the owner. */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link, thread->cprio);
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}
		/* Renice the owner thread, progressing in the PI
		   chain as needed. */
		xnsynch_renice_thread(owner, thread->cprio);
	}
}
Ejemplo n.º 2
0
void xnsched_weak_setparam(struct xnthread *thread,
			   const union xnsched_policy_param *p)
{
	thread->cprio = p->weak.prio;
	if (!xnthread_test_state(thread, XNBOOST))
		xnthread_set_state(thread, XNWEAK);
}
Ejemplo n.º 3
0
/* Must be called with nklock locked, interrupts off. thread must be
 * runnable. */
void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;

#ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
	/*
	 * Mark the thread in flight, xnsched_finish_unlocked_switch()
	 * will put the thread on the remote runqueue.
	 */
	xnthread_set_state(thread, XNMIGRATE);
#else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
	/* Move thread to the remote runnable queue. */
	xnsched_putback(thread);
#endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
}
Ejemplo n.º 4
0
/* NOTE: caller must provide locking */
void xnthread_prepare_wait(struct xnthread_wait_context *wc)
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = wc;
	wc->oldstate = xnthread_test_state(curr, XNDEFCAN);
	xnthread_set_state(curr, XNDEFCAN);
}
Ejemplo n.º 5
0
/*
 * Must be called with nklock locked, interrupts off. Thread may be
 * blocked.
 */
void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
{
	migrate_thread(thread, sched);

	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
		xnsched_requeue(thread);
		xnthread_set_state(thread, XNREADY);
	}
}
Ejemplo n.º 6
0
void ___xnsched_lock(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;

	if (curr->lock_count++ == 0) {
		sched->lflags |= XNINLOCK;
		xnthread_set_state(curr, XNLOCK);
	}
}
Ejemplo n.º 7
0
/*
 * Detect when a thread is about to sleep on a synchronization
 * object currently owned by someone running in secondary mode.
 */
void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper)
{
	if (xnthread_test_state(sleeper, XNTRAPSW|XNSWREP) == XNTRAPSW &&
	    xnthread_test_state(synch->owner, XNRELAX)) {
		xnthread_set_state(sleeper, XNSWREP);
		xnshadow_send_sig(sleeper, SIGDEBUG,
				  SIGDEBUG_MIGRATE_PRIOINV, 1);
	} else
		xnthread_clear_state(sleeper,  XNSWREP);
}
Ejemplo n.º 8
0
/* Must be called with nklock locked, interrupts off. */
void xnsched_putback(struct xnthread *thread)
{
	if (xnthread_test_state(thread, XNREADY))
		xnsched_dequeue(thread);
	else
		xnthread_set_state(thread, XNREADY);

	xnsched_enqueue(thread);
	xnsched_set_resched(thread->sched);
}
Ejemplo n.º 9
0
/* Must be called with nklock locked, interrupts off. */
struct xnthread *xnsched_pick_next(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;
	struct xnsched_class *p;
	struct xnthread *thread;

	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
		/*
		 * Do not preempt the current thread if it holds the
		 * scheduler lock.
		 */
		if (xnthread_test_state(curr, XNLOCK)) {
			xnsched_set_self_resched(sched);
			return curr;
		}
		/*
		 * Push the current thread back to the runnable queue
		 * of the scheduling class it belongs to, if not yet
		 * linked to it (XNREADY tells us if it is).
		 */
		if (!xnthread_test_state(curr, XNREADY)) {
			xnsched_requeue(curr);
			xnthread_set_state(curr, XNREADY);
		}
#ifdef __XENO_SIM__
		if (nkpod->schedhook)
			nkpod->schedhook(curr, XNREADY);
#endif /* __XENO_SIM__ */
	}

	/*
	 * Find the runnable thread having the highest priority among
	 * all scheduling classes, scanned by decreasing priority.
	 */
#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
	for_each_xnsched_class(p) {
		thread = p->sched_pick(sched);
		if (thread) {
			xnthread_clear_state(thread, XNREADY);
			return thread;
		}
	}

	return NULL; /* Never executed because of the idle class. */
#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
	thread = __xnsched_rt_pick(sched); (void)p;
	if (unlikely(thread == NULL))
		thread = &sched->rootcb;

	xnthread_clear_state(thread, XNREADY);

	return thread;
#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
}
Ejemplo n.º 10
0
/*
 * Must be called with nklock locked, interrupts off. thread must be
 * runnable.
 */
void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
{
	migrate_thread(thread, sched);

#ifdef CONFIG_XENO_ARCH_UNLOCKED_SWITCH
	/*
	 * Mark the thread in flight, xnsched_finish_unlocked_switch()
	 * will put the thread on the remote runqueue.
	 */
	xnthread_set_state(thread, XNMIGRATE);
#else /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
	/* Move thread to the remote runnable queue. */
	xnsched_putback(thread);
#endif /* !CONFIG_XENO_ARCH_UNLOCKED_SWITCH */
}
Ejemplo n.º 11
0
void xnsynch_requeue_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner;

	if (!testbits(synch->status, XNSYNCH_PRIO))
		return;

	removepq(&synch->pendq, &thread->plink);
	insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));
	owner = synch->owner;

	if (owner != NULL && w_cprio(thread) > w_cprio(owner)) {
		/*
		 * The new (weighted) priority of the sleeping thread
		 * is higher than the priority of the current owner of
		 * the resource: we need to update the PI state.
		 */
		if (testbits(synch->status, XNSYNCH_CLAIMED)) {
			/*
			 * The resource is already claimed, just
			 * reorder the claim queue.
			 */
			removepq(&owner->claimq, &synch->link);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
		} else {
			/*
			 * The resource was NOT claimed, claim it now
			 * and boost the owner.
			 */
			__setbits(synch->status, XNSYNCH_CLAIMED);
			insertpqf(&owner->claimq, &synch->link,
				  w_cprio(thread));
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}
		}
		/*
		 * Renice the owner thread, progressing in the PI
		 * chain as needed.
		 */
		xnsynch_renice_thread(owner, thread);
	}
}
Ejemplo n.º 12
0
/*
 * Detect when a thread is about to relax while holding a
 * synchronization object currently claimed by another thread, which
 * bears the TWARNSW bit (thus advertising a concern about potential
 * spurious relaxes and priority inversion). By relying on the claim
 * queue, we restrict the checks to PIP-enabled objects, but that
 * already covers most of the use cases anyway.
 */
void xnsynch_detect_claimed_relax(struct xnthread *owner)
{
	struct xnpholder *hs, *ht;
	struct xnthread *sleeper;
	struct xnsynch *synch;

	for (hs = getheadpq(&owner->claimq); hs != NULL;
	     hs = nextpq(&owner->claimq, hs)) {
		synch = link2synch(hs);
		for (ht = getheadpq(&synch->pendq); ht != NULL;
		     ht = nextpq(&synch->pendq, ht)) {
			sleeper = link2thread(ht, plink);
			if (xnthread_test_state(sleeper, XNTRAPSW)) {
				xnthread_set_state(sleeper, XNSWREP);
				xnshadow_send_sig(sleeper, SIGDEBUG,
						  SIGDEBUG_MIGRATE_PRIOINV, 1);
			}
		}
	}
}
Ejemplo n.º 13
0
/* Must be called with nklock locked, interrupts off. thread may be
 * blocked. */
void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;

	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
		xnsched_requeue(thread);
		xnthread_set_state(thread, XNREADY);
	}
}
Ejemplo n.º 14
0
xnflags_t xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
			  xntmode_t timeout_mode)
{
	struct xnthread *thread = xnpod_current_thread(), *owner;
	xnhandle_t threadh = xnthread_handle(thread), fastlock, old;
	const int use_fastlock = xnsynch_fastlock_p(synch);
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	trace_mark(xn_nucleus, synch_acquire, "synch %p", synch);

      redo:

	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);

		fastlock = xnarch_atomic_cmpxchg(lockp,
						 XN_NO_HANDLE, threadh);

		if (likely(fastlock == XN_NO_HANDLE)) {
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			return 0;
		}

		xnlock_get_irqsave(&nklock, s);

		/* Set claimed bit.
		   In case it appears to be set already, re-read its state
		   under nklock so that we don't miss any change between the
		   lock-less read and here. But also try to avoid cmpxchg
		   where possible. Only if it appears not to be set, start
		   with cmpxchg directly. */
		if (xnsynch_fast_is_claimed(fastlock)) {
			old = xnarch_atomic_get(lockp);
			goto test_no_owner;
		}
		do {
			old = xnarch_atomic_cmpxchg(lockp, fastlock,
					xnsynch_fast_set_claimed(fastlock, 1));
			if (likely(old == fastlock))
				break;

		  test_no_owner:
			if (old == XN_NO_HANDLE) {
				/* Owner called xnsynch_release
				   (on another cpu) */
				xnlock_put_irqrestore(&nklock, s);
				goto redo;
			}
			fastlock = old;
		} while (!xnsynch_fast_is_claimed(fastlock));

		owner = xnthread_lookup(xnsynch_fast_mask_claimed(fastlock));

		if (!owner) {
			/* The handle is broken, therefore pretend that the synch
			   object was deleted to signal an error. */
			xnthread_set_info(thread, XNRMID);
			goto unlock_and_exit;
		}

		xnsynch_set_owner(synch, owner);
	} else {
		xnlock_get_irqsave(&nklock, s);

		owner = synch->owner;

		if (!owner) {
			synch->owner = thread;
			if (xnthread_test_state(thread, XNOTHER))
				xnthread_inc_rescnt(thread);
			xnthread_clear_info(thread,
					    XNRMID | XNTIMEO | XNBREAK);
			goto unlock_and_exit;
		}
	}

	xnsynch_detect_relaxed_owner(synch, thread);

	if (!testbits(synch->status, XNSYNCH_PRIO)) /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
	else if (w_cprio(thread) > w_cprio(owner)) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto grab_and_exit;
		}

		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

		if (testbits(synch->status, XNSYNCH_PIP)) {
			if (!xnthread_test_state(owner, XNBOOST)) {
				owner->bprio = owner->cprio;
				xnthread_set_state(owner, XNBOOST);
			}

			if (testbits(synch->status, XNSYNCH_CLAIMED))
				removepq(&owner->claimq, &synch->link);
			else
				__setbits(synch->status, XNSYNCH_CLAIMED);

			insertpqf(&owner->claimq, &synch->link, w_cprio(thread));
			xnsynch_renice_thread(owner, thread);
		}
	} else
		insertpqf(&synch->pendq, &thread->plink, w_cprio(thread));

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) { /* Otherwise, it's too late. */
			xnlock_put_irqrestore(&nklock, s);
			goto redo;
		}
		xnthread_set_info(thread, XNTIMEO);
	} else {

	      grab_and_exit:

		if (xnthread_test_state(thread, XNOTHER))
			xnthread_inc_rescnt(thread);

		if (use_fastlock) {
			xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
			/* We are the new owner, update the fastlock
			   accordingly. */
			if (xnsynch_pended_p(synch))
				threadh =
				    xnsynch_fast_set_claimed(threadh, 1);
			xnarch_atomic_set(lockp, threadh);
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
}
Ejemplo n.º 15
0
void xnsynch_sleep_on(xnsynch_t *synch, xnticks_t timeout,
		      xntmode_t timeout_mode)
{
	xnthread_t *thread = xnpod_current_thread(), *owner;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus_synch_sleepon,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	if (!testbits(synch->status, XNSYNCH_PRIO)) { /* i.e. FIFO */
		appendpq(&synch->pendq, &thread->plink);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

	if (!testbits(synch->status, XNSYNCH_PIP)) { /* i.e. no ownership */
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);
		goto unlock_and_exit;
	}

redo:
	owner = synch->owner;

	if (!owner) {
		synch->owner = thread;
		xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
		goto unlock_and_exit;
	}

	if (thread->cprio > owner->cprio) {
		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
			/* Ownership is still pending, steal the resource. */
			synch->owner = thread;
			xnthread_clear_info(thread, XNRMID | XNTIMEO | XNBREAK);
			xnthread_set_info(owner, XNROBBED);
			goto unlock_and_exit;
		}

		if (!xnthread_test_state(owner, XNBOOST)) {
			owner->bprio = owner->cprio;
			xnthread_set_state(owner, XNBOOST);
		}

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			removepq(&owner->claimq, &synch->link);
		else
			__setbits(synch->status, XNSYNCH_CLAIMED);

		insertpqf(&owner->claimq, &synch->link, thread->cprio);
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);
		xnsynch_renice_thread(owner, thread->cprio);
	} else
		insertpqf(&synch->pendq, &thread->plink, thread->cprio);

	xnpod_suspend_thread(thread, XNPEND, timeout, timeout_mode, synch);

	if (xnthread_test_info(thread, XNRMID | XNTIMEO | XNBREAK))
		goto unlock_and_exit;

	if (xnthread_test_info(thread, XNROBBED)) {
		/* Somebody stole us the ownership while we were ready
		   to run, waiting for the CPU: we need to wait again
		   for the resource. */
		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE)
			goto redo;
		timeout = xntimer_get_timeout_stopped(&thread->rtimer);
		if (timeout > 1) /* Otherwise, it's too late. */
			goto redo;
		xnthread_set_info(thread, XNTIMEO);
	}

      unlock_and_exit:

	thread->wwake = NULL;
	xnthread_clear_info(thread, XNWAKEN);

	xnlock_put_irqrestore(&nklock, s);
}