예제 #1
0
/* Must be called with nklock locked, interrupts off. */
struct xnthread *xnsched_pick_next(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;
	struct xnsched_class *p;
	struct xnthread *thread;

	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
		/*
		 * Do not preempt the current thread if it holds the
		 * scheduler lock.
		 */
		if (xnthread_test_state(curr, XNLOCK)) {
			xnsched_set_self_resched(sched);
			return curr;
		}
		/*
		 * Push the current thread back to the runnable queue
		 * of the scheduling class it belongs to, if not yet
		 * linked to it (XNREADY tells us if it is).
		 */
		if (!xnthread_test_state(curr, XNREADY)) {
			xnsched_requeue(curr);
			xnthread_set_state(curr, XNREADY);
		}
#ifdef __XENO_SIM__
		if (nkpod->schedhook)
			nkpod->schedhook(curr, XNREADY);
#endif /* __XENO_SIM__ */
	}

	/*
	 * Find the runnable thread having the highest priority among
	 * all scheduling classes, scanned by decreasing priority.
	 */
#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
	for_each_xnsched_class(p) {
		thread = p->sched_pick(sched);
		if (thread) {
			xnthread_clear_state(thread, XNREADY);
			return thread;
		}
	}

	return NULL; /* Never executed because of the idle class. */
#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
	thread = __xnsched_rt_pick(sched); (void)p;
	if (unlikely(thread == NULL))
		thread = &sched->rootcb;

	xnthread_clear_state(thread, XNREADY);

	return thread;
#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
}
예제 #2
0
static void xnsynch_clear_boost(struct xnsynch *synch,
				struct xnthread *owner)
{
	struct xnthread *target;
	struct xnsynch *hsynch;
	struct xnpholder *h;
	int wprio;

	removepq(&owner->claimq, &synch->link);
	__clrbits(synch->status, XNSYNCH_CLAIMED);
	wprio = w_bprio(owner);

	if (emptypq_p(&owner->claimq)) {
		xnthread_clear_state(owner, XNBOOST);
		target = owner;
	} else {
		/* Find the highest priority needed to enforce the PIP. */
		hsynch = link2synch(getheadpq(&owner->claimq));
		h = getheadpq(&hsynch->pendq);
		XENO_BUGON(NUCLEUS, h == NULL);
		target = link2thread(h, plink);
		if (w_cprio(target) > wprio)
			wprio = w_cprio(target);
		else
			target = owner;
	}

	if (w_cprio(owner) != wprio &&
	    !xnthread_test_state(owner, XNZOMBIE))
		xnsynch_renice_thread(owner, target);
}
예제 #3
0
/* Must be called with nklock locked, interrupts off. thread must be
 * runnable. */
void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;

#ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
	/*
	 * Mark the thread in flight, xnsched_finish_unlocked_switch()
	 * will put the thread on the remote runqueue.
	 */
	xnthread_set_state(thread, XNMIGRATE);
#else /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
	/* Move thread to the remote runnable queue. */
	xnsched_putback(thread);
#endif /* !CONFIG_XENO_HW_UNLOCKED_SWITCH */
}
예제 #4
0
static int __sc_tecreate(struct task_struct *curr, struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	struct vrtx_arg_bulk bulk;
	int prio, mode, tid, err;
	vrtxtask_t *task;

	if (!__xn_access_ok
	    (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk)))
		return -EFAULT;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(tid)))
		return -EFAULT;

	__xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs),
			    sizeof(bulk));

	/* Suggested task id. */
	tid = bulk.a1;
	/* Task priority. */
	prio = bulk.a2;
	/* Task mode. */
	mode = bulk.a3 | 0x100;

	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	task = xnmalloc(sizeof(*task));

	if (!task) {
		err = ER_TCB;
		goto done;
	}

	xnthread_clear_state(&task->threadbase, XNZOMBIE);

	tid =
	    sc_tecreate_inner(task, NULL, tid, prio, mode, 0, 0, NULL, 0, &err);

	if (tid < 0) {
		if (u_completion)
			xnshadow_signal_completion(u_completion, err);
	} else {
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs),
				  &tid, sizeof(tid));
		err = xnshadow_map(&task->threadbase, u_completion);
	}

	if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE))
		xnfree(task);

      done:

	return err;
}
예제 #5
0
파일: synch.c 프로젝트: chrmorais/miniemc2
/*
 * Detect when a thread is about to sleep on a synchronization
 * object currently owned by someone running in secondary mode.
 */
void xnsynch_detect_relaxed_owner(struct xnsynch *synch, struct xnthread *sleeper)
{
	if (xnthread_test_state(sleeper, XNTRAPSW|XNSWREP) == XNTRAPSW &&
	    xnthread_test_state(synch->owner, XNRELAX)) {
		xnthread_set_state(sleeper, XNSWREP);
		xnshadow_send_sig(sleeper, SIGDEBUG,
				  SIGDEBUG_MIGRATE_PRIOINV, 1);
	} else
		xnthread_clear_state(sleeper,  XNSWREP);
}
예제 #6
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
void ___xnsched_unlock_fully(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;

	curr->lock_count = 0;
	xnthread_clear_state(curr, XNLOCK);
	xnthread_clear_info(curr, XNLBALERT);
	sched->lflags &= ~XNINLOCK;
	xnsched_run();
}
예제 #7
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
static inline void set_thread_running(struct xnsched *sched,
				      struct xnthread *thread)
{
	xnthread_clear_state(thread, XNREADY);
	if (xnthread_test_state(thread, XNRRB))
		xntimer_start(&sched->rrbtimer,
			      thread->rrperiod, XN_INFINITE, XN_RELATIVE);
	else
		xntimer_stop(&sched->rrbtimer);
}
예제 #8
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
void ___xnsched_unlock(struct xnsched *sched)
{
	struct xnthread *curr = sched->curr;

	if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
		return;

	if (--curr->lock_count == 0) {
		xnthread_clear_state(curr, XNLOCK);
		xnthread_clear_info(curr, XNLBALERT);
		sched->lflags &= ~XNINLOCK;
		xnsched_run();
	}
}
예제 #9
0
/* NOTE: caller must provide locking */
void xnthread_finish_wait(struct xnthread_wait_context *wc,
			  void (*cleanup)(struct xnthread_wait_context *wc))
{
	struct xnthread *curr = xnpod_current_thread();

	curr->wcontext = NULL;
	if ((wc->oldstate & XNDEFCAN) == 0)
		xnthread_clear_state(curr, XNDEFCAN);

	if (xnthread_test_state(curr, XNCANPND)) {
		if (cleanup)
			cleanup(wc);
		xnpod_delete_self();
	}
}
예제 #10
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
static void migrate_thread(struct xnthread *thread, struct xnsched *sched)
{
	struct xnsched_class *sched_class = thread->sched_class;

	if (xnthread_test_state(thread, XNREADY)) {
		xnsched_dequeue(thread);
		xnthread_clear_state(thread, XNREADY);
	}

	if (sched_class->sched_migrate)
		sched_class->sched_migrate(thread, sched);
	/*
	 * WARNING: the scheduling class may have just changed as a
	 * result of calling the per-class migration hook.
	 */
	xnsched_set_resched(thread->sched);
	thread->sched = sched;
}
예제 #11
0
void xnsynch_forget_sleeper(struct xnthread *thread)
{
	struct xnsynch *synch = thread->wchan;
	struct xnthread *owner, *target;
	struct xnpholder *h;

	trace_mark(xn_nucleus, synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		owner = synch->owner;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else {
			target = link2thread(getheadpq(&synch->pendq), plink);
			h = getheadpq(&owner->claimq);
			if (w_cprio(target) != h->prio) {
				/*
				 * Reorder the claim queue, and lower
				 * the priority to the required
				 * minimum needed to prevent priority
				 * inversion.
				 */
				removepq(&owner->claimq, &synch->link);
				insertpqf(&owner->claimq, &synch->link,
					  w_cprio(target));

				h = getheadpq(&owner->claimq);
				if (h->prio < w_cprio(owner))
					xnsynch_renice_thread(owner, target);
			}
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
예제 #12
0
static void xnsynch_clear_boost(xnsynch_t *synch, xnthread_t *lastowner)
{
	int downprio;

	removepq(&lastowner->claimq, &synch->link);
	downprio = lastowner->bprio;
	__clrbits(synch->status, XNSYNCH_CLAIMED);

	if (emptypq_p(&lastowner->claimq))
		xnthread_clear_state(lastowner, XNBOOST);
	else {
		/* Find the highest priority needed to enforce the PIP. */
		int rprio = getheadpq(&lastowner->claimq)->prio;

		if (rprio > downprio)
			downprio = rprio;
	}

	if (lastowner->cprio != downprio)
		xnsynch_renice_thread(lastowner, downprio);
}
예제 #13
0
void xnsynch_forget_sleeper(xnthread_t *thread)
{
	xnsynch_t *synch = thread->wchan;

	trace_mark(xn_nucleus_synch_forget,
		   "thread %p thread_name %s synch %p",
		   thread, xnthread_name(thread), synch);

	xnthread_clear_state(thread, XNPEND);
	thread->wchan = NULL;
	removepq(&synch->pendq, &thread->plink);

	if (testbits(synch->status, XNSYNCH_CLAIMED)) {
		/* Find the highest priority needed to enforce the PIP. */
		xnthread_t *owner = synch->owner;
		int rprio;

		if (emptypq_p(&synch->pendq))
			/* No more sleepers: clear the boost. */
			xnsynch_clear_boost(synch, owner);
		else if (getheadpq(&synch->pendq)->prio !=
			 getheadpq(&owner->claimq)->prio) {
			/* Reorder the claim queue, and lower the priority to the
			   required minimum needed to prevent priority
			   inversion. */
			removepq(&owner->claimq, &synch->link);

			insertpqf(&owner->claimq,
				  &synch->link, getheadpq(&synch->pendq)->prio);

			rprio = getheadpq(&owner->claimq)->prio;

			if (rprio < owner->cprio)
				xnsynch_renice_thread(owner, rprio);
		}
	}

	xnarch_post_graph_if(synch, 0, emptypq_p(&synch->pendq));
}
예제 #14
0
파일: sched.c 프로젝트: ChunHungLiu/xenomai
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
{
	struct xnthread *last;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

#ifdef CONFIG_SMP
	/* If current thread migrated while suspended */
	sched = xnsched_current();
#endif /* CONFIG_SMP */

	last = sched->last;
	sched->status &= ~XNINSW;

	/* Detect a thread which called xnthread_migrate() */
	if (last->sched != sched) {
		xnsched_putback(last);
		xnthread_clear_state(last, XNMIGRATE);
	}

	return sched;
}
예제 #15
0
struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched)
{
	struct xnthread *last;
	spl_t s;

	xnlock_get_irqsave(&nklock, s);

#ifdef CONFIG_SMP
	/* If current thread migrated while suspended */
	sched = xnpod_current_sched();
#endif /* CONFIG_SMP */

	last = sched->last;
	__clrbits(sched->status, XNSWLOCK);

	/* Detect a thread which called xnpod_migrate_thread */
	if (last->sched != sched) {
		xnsched_putback(last);
		xnthread_clear_state(last, XNMIGRATE);
	}

	if (xnthread_test_state(last, XNZOMBIE)) {
		/*
		 * There are two cases where sched->last has the zombie
		 * bit:
		 * - either it had it before the context switch, the hooks
		 * have been executed and sched->zombie is last;
		 * - or it has been killed while the nklocked was unlocked
		 * during the context switch, in which case we must run the
		 * hooks, and we do it now.
		 */
		if (sched->zombie != last)
			xnsched_zombie_hooks(last);
	}

	return sched;
}
예제 #16
0
static int __wind_task_init(struct task_struct *curr, struct pt_regs *regs)
{
	xncompletion_t __user *u_completion;
	char name[XNOBJECT_NAME_LEN];
	struct wind_arg_bulk bulk;
	int err = 0, prio, flags;
	WIND_TCB_PLACEHOLDER ph;
	WIND_TCB *task;

	if (!__xn_access_ok
	    (curr, VERIFY_READ, __xn_reg_arg1(regs), sizeof(bulk)))
		return -EFAULT;

	if (!__xn_access_ok
	    (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(ph)))
		return -EFAULT;

	__xn_copy_from_user(curr, &bulk, (void __user *)__xn_reg_arg1(regs),
			    sizeof(bulk));

	if (bulk.a1) {
		if (!__xn_access_ok(curr, VERIFY_READ, bulk.a1, sizeof(name)))
			return -EFAULT;

		__xn_strncpy_from_user(curr, name, (const char __user *)bulk.a1,
				       sizeof(name) - 1);
		name[sizeof(name) - 1] = '\0';
		strncpy(curr->comm, name, sizeof(curr->comm));
		curr->comm[sizeof(curr->comm) - 1] = '\0';
	} else
		*name = '\0';

	/* Task priority. */
	prio = bulk.a2;
	/* Task flags. */
	flags = bulk.a3 | VX_SHADOW;
	/* Completion descriptor our parent thread is pending on. */
	u_completion = (xncompletion_t __user *)__xn_reg_arg3(regs);

	task = (WIND_TCB *)xnmalloc(sizeof(*task));

	if (!task) {
		if (u_completion)
			xnshadow_signal_completion(u_completion, -ENOMEM);

		return -ENOMEM;
	}

	xnthread_clear_state(&task->threadbase, XNZOMBIE);

	/* Force FPU support in user-space. This will lead to a no-op if
	   the platform does not support it. */

	if (taskInit(task, name, prio, flags, NULL, 0, NULL,
		     0, 0, 0, 0, 0, 0, 0, 0, 0, 0) == OK) {
		/* Let the skin discard the TCB memory upon exit. */
		task->auto_delete = 1;
		task->ptid = bulk.a4;
		/* Copy back the registry handle to the ph struct. */
		ph.handle = xnthread_handle(&task->threadbase);
		__xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &ph,
				  sizeof(ph));
		err = xnshadow_map(&task->threadbase, u_completion);
	} else {
		/* Unblock and pass back error code. */

		err = wind_errnoget();

		if (u_completion)
			xnshadow_signal_completion(u_completion, err);
	}

	if (err && !xnthread_test_state(&task->threadbase, XNZOMBIE))
		xnfree(task);

	return err;
}