Пример #1
0
int xnsynch_flush(struct xnsynch *synch, xnflags_t reason)
{
	struct xnpholder *holder;
	int status;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_flush, "synch %p reason %lu",
		   synch, reason);

	status = emptypq_p(&synch->pendq) ? XNSYNCH_DONE : XNSYNCH_RESCHED;

	while ((holder = getpq(&synch->pendq)) != NULL) {
		struct xnthread *sleeper = link2thread(holder, plink);
		xnthread_set_info(sleeper, reason);
		sleeper->wchan = NULL;
		xnpod_resume_thread(sleeper, XNPEND);
	}

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		xnsynch_clear_boost(synch, synch->owner);
		status = XNSYNCH_RESCHED;
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return status;
}
Пример #2
0
static struct xnthread *
xnsynch_release_thread(struct xnsynch *synch, struct xnthread *lastowner)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	xnhandle_t lastownerh, newownerh;
	struct xnthread *newowner;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

#ifdef CONFIG_XENO_OPT_PERVASIVE
	if (xnthread_test_state(lastowner, XNOTHER)) {
		if (xnthread_get_rescnt(lastowner) == 0)
			xnshadow_send_sig(lastowner, SIGDEBUG,
					  SIGDEBUG_MIGRATE_PRIOINV, 1);
		else
			xnthread_dec_rescnt(lastowner);
	}
#endif
	lastownerh = xnthread_handle(lastowner);

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Пример #3
0
struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
{
	struct xnthread *thread = NULL;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, testbits(synch->status, XNSYNCH_OWNER));

	xnlock_get_irqsave(&nklock, s);

	holder = getpq(&synch->pendq);
	if (holder) {
		thread = link2thread(holder, plink);
		thread->wchan = NULL;
		trace_mark(xn_nucleus, synch_wakeup_one,
			   "thread %p thread_name %s synch %p",
			   thread, xnthread_name(thread), synch);
		xnpod_resume_thread(thread, XNPEND);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return thread;
}
Пример #4
0
static void xnsynch_clear_boost(struct xnsynch *synch,
				struct xnthread *owner)
{
	struct xnthread *target;
	struct xnsynch *hsynch;
	struct xnpholder *h;
	int wprio;

	removepq(&owner->claimq, &synch->link);
	__clrbits(synch->status, XNSYNCH_CLAIMED);
	wprio = w_bprio(owner);

	if (emptypq_p(&owner->claimq)) {
		xnthread_clear_state(owner, XNBOOST);
		target = owner;
	} else {
		/* Find the highest priority needed to enforce the PIP. */
		hsynch = link2synch(getheadpq(&owner->claimq));
		h = getheadpq(&hsynch->pendq);
		XENO_BUGON(NUCLEUS, h == NULL);
		target = link2thread(h, plink);
		if (w_cprio(target) > wprio)
			wprio = w_cprio(target);
		else
			target = owner;
	}

	if (w_cprio(owner) != wprio &&
	    !xnthread_test_state(owner, XNZOMBIE))
		xnsynch_renice_thread(owner, target);
}
Пример #5
0
xnpholder_t *xnsynch_wakeup_this_sleeper(xnsynch_t *synch, xnpholder_t *holder)
{
	xnthread_t *thread, *lastowner;
	xnpholder_t *nholder;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	lastowner = synch->owner;
	nholder = poppq(&synch->pendq, holder);

	thread = link2thread(holder, plink);
	thread->wchan = NULL;
	thread->wwake = synch;
	synch->owner = thread;
	xnthread_set_info(thread, XNWAKEN);
	trace_mark(xn_nucleus_synch_wakeup_all,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);
	xnpod_resume_thread(thread, XNPEND);

	if (testbits(synch->status, XNSYNCH_CLAIMED))
		xnsynch_clear_boost(synch, lastowner);

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return nholder;
}
Пример #6
0
void xnsynch_forget_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner, *target;
	struct xnpholder *h;

	trace_mark(xn_nucleus, synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		owner = synch->owner;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else {
			target = link2thread(getheadpq(&synch->pendq), plink);
			h = getheadpq(&owner->claimq);
			if (w_cprio(target) != h->prio) {
				/*
				 * Reorder the claim queue, and lower
				 * the priority to the required
				 * minimum needed to prevent priority
				 * inversion.
				 */
				removepq(&owner->claimq, &synch->link);
				insertpqf(&owner->claimq, &synch->link,
					  w_cprio(target));

				h = getheadpq(&owner->claimq);
				if (h->prio < w_cprio(owner))
					xnsynch_renice_thread(owner, target);
			}
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
Пример #7
0
void xnsynch_forget_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;

	trace_mark(xn_nucleus_synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		xnthread_t *owner = synch->owner;
		int rprio;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else if (getheadpq(&synch->pendq)->prio !=
			 getheadpq(&owner->claimq)->prio) {
			/* Reorder the claim queue, and lower the priority to the
			   required minimum needed to prevent priority
			   inversion. */
			removepq(&owner->claimq, &synch->link);

			insertpqf(&owner->claimq,
				  &synch->link, getheadpq(&synch->pendq)->prio);

			rprio = getheadpq(&owner->claimq)->prio;

			if (rprio < owner->cprio)
				xnsynch_renice_thread(owner, rprio);
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
Пример #8
0
struct xnthread *xnsynch_release(struct xnsynch *synch)
{
	const int use_fastlock = xnsynch_fastlock_p(synch);
	struct xnthread *newowner, *lastowner;
	xnhandle_t lastownerh, newownerh;
	struct xnpholder *holder;
	spl_t s;

	XENO_BUGON(NUCLEUS, !testbits(synch->status, XNSYNCH_OWNER));

	lastownerh = xnthread_handle(xnpod_current_thread());

	if (use_fastlock &&
	    likely(xnsynch_fast_release(xnsynch_fastlock(synch), lastownerh)))
		return NULL;

	xnlock_get_irqsave(&nklock, s);

	trace_mark(xn_nucleus, synch_release, "synch %p", synch);

	holder = getpq(&synch->pendq);
	if (holder) {
		newowner = link2thread(holder, plink);
		newowner->wchan = NULL;
		newowner->wwake = synch;
		lastowner = synch->owner;
		synch->owner = newowner;
		xnthread_set_info(newowner, XNWAKEN);
		xnpod_resume_thread(newowner, XNPEND);

		if (testbits(synch->status, XNSYNCH_CLAIMED))
			xnsynch_clear_boost(synch, lastowner);

		newownerh = xnsynch_fast_set_claimed(xnthread_handle(newowner),
						     xnsynch_pended_p(synch));
	} else {
		newowner = NULL;
		synch->owner = NULL;
		newownerh = XN_NO_HANDLE;
	}
	if (use_fastlock) {
		xnarch_atomic_t *lockp = xnsynch_fastlock(synch);
		xnarch_atomic_set(lockp, newownerh);
	}

	xnlock_put_irqrestore(&nklock, s);

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));

	return newowner;
}
Пример #9
0
u_long ev_send(u_long tid, u_long events)
{
	u_long err = SUCCESS;
	psosevent_t *evgroup;
	psostask_t *task;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

	task = psos_h2obj_active(tid, PSOS_TASK_MAGIC, psostask_t);

	if (!task) {
		err = psos_handle_error(tid, PSOS_TASK_MAGIC, psostask_t);
		goto unlock_and_exit;
	}

	evgroup = &task->evgroup;
	evgroup->events |= events;

	/* Only the task to which the event group pertains can
	   pend on it. */

	if (!emptypq_p(xnsynch_wait_queue(&evgroup->synchbase))) {
		u_long flags = task->waitargs.evgroup.flags;
		u_long bits = task->waitargs.evgroup.events;

		if (((flags & EV_ANY) && (bits & evgroup->events) != 0) ||
		    (!(flags & EV_ANY) && ((bits & evgroup->events) == bits))) {
			xnsynch_wakeup_one_sleeper(&evgroup->synchbase);
			task->waitargs.evgroup.events =
			    (bits & evgroup->events);
			evgroup->events &= ~bits;
			xnpod_schedule();
		}
	}

      unlock_and_exit:

	xnlock_put_irqrestore(&nklock, s);

	return err;
}
Пример #10
0
static void xnsynch_clear_boost(xnsynch_t *synch, xnthread_t *lastowner)
{
	int downprio;

	removepq(&lastowner->claimq, &synch->link);
	downprio = lastowner->bprio;
	__clrbits(synch->status, XNSYNCH_CLAIMED);

	if (emptypq_p(&lastowner->claimq))
		xnthread_clear_state(lastowner, XNBOOST);
	else {
		/* Find the highest priority needed to enforce the PIP. */
		int rprio = getheadpq(&lastowner->claimq)->prio;

		if (rprio > downprio)
			downprio = rprio;
	}

	if (lastowner->cprio != downprio)
		xnsynch_renice_thread(lastowner, downprio);
}