Example #1
0
/*
 * Generic function to trigger preemption on either local or remote cpu
 * from scheduler plugins. The key feature is that this function is
 * non-preemptive section aware and does not invoke the scheduler / send
 * IPIs if the to-be-preempted task is actually non-preemptive.
 */
void preempt_if_preemptable(struct task_struct* t, int cpu)
{
	/* t is the real-time task executing on CPU on_cpu If t is NULL, then
	 * on_cpu is currently scheduling background work.
	 */

	int reschedule = 0;

	if (!t)
		/* move non-real-time task out of the way */
		reschedule = 1;
	else {
		if (smp_processor_id() == cpu) {
			/* local CPU case */
			/* check if we need to poke userspace */
			if (is_user_np(t))
				/* Yes, poke it. This doesn't have to be atomic since
				 * the task is definitely not executing. */
				request_exit_np(t);
			else if (!is_kernel_np(t))
				/* only if we are allowed to preempt the
				 * currently-executing task */
				reschedule = 1;
		} else {
			/* Remote CPU case.  Only notify if it's not a kernel
			 * NP section and if we didn't set the userspace
			 * flag. */
			reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t));
		}
	}
	if (likely(reschedule))
		litmus_reschedule(cpu);
}
Example #2
0
static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t, int in_schedule)
{
	/* Assumption: t is scheduled on the CPU executing this callback */

	if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
		/* signal exhaustion */
		send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
	}

	if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
		if (!is_np(t)) {
			/* np tasks will be preempted when they become
			 * preemptable again
			 */
			litmus_reschedule_local();
			TRACE("%d is preemptable => FORCE_RESCHED\n", t->pid);
		} else if (is_user_np(t)) {
			TRACE("%d is non-preemptable, preemption delayed.\n", t->pid);
			request_exit_np(t);
		}
	}

	return HRTIMER_NORESTART;
}
Example #3
0
static struct task_struct* psnedf_schedule(struct task_struct * prev)
{
	psnedf_domain_t* 	pedf = local_pedf;
	rt_domain_t*		edf  = &pedf->domain;
	struct task_struct*	next;

	int 			out_of_time, sleep, preempt,
				np, exists, blocks, resched;

	raw_readyq_lock(&pedf->slock);

	/* sanity checking
	 * differently from gedf, when a task exits (dead)
	 * pedf->schedule may be null and prev _is_ realtime
	 */
	BUG_ON(pedf->scheduled && pedf->scheduled != prev);
	BUG_ON(pedf->scheduled && !is_realtime(prev));

	/* (0) Determine state */
	exists      = pedf->scheduled != NULL;
	blocks      = exists && !is_running(pedf->scheduled);
	out_of_time = exists &&
				  budget_enforced(pedf->scheduled) &&
				  bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED);
	np 	    = exists && is_np(pedf->scheduled);
	sleep	    = exists && is_completed(pedf->scheduled);
	preempt     = edf_preemption_needed(edf, prev);

	/* If we need to preempt do so.
	 * The following checks set resched to 1 in case of special
	 * circumstances.
	 */
	resched = preempt;

	/* Do budget stuff */
	if (blocks)
		budget_state_machine(prev,on_blocked);
	else if (sleep)
		budget_state_machine(prev,on_sleep);
	else if (preempt)
		budget_state_machine(prev,on_preempt);

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		resched = 1;

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * Multiple calls to request_exit_np() don't hurt.
	 */
	if (np && (out_of_time || preempt || sleep))
		request_exit_np(pedf->scheduled);

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this.
	 */
	if (!np && (out_of_time || sleep) && !blocks) {
		job_completion(pedf->scheduled, !sleep);
		resched = 1;
	}

	/* The final scheduling decision. Do we need to switch for some reason?
	 * Switch if we are in RT mode and have no task or if we need to
	 * resched.
	 */
	next = NULL;
	if ((!np || blocks) && (resched || !exists)) {
		/* When preempting a task that does not block, then
		 * re-insert it into either the ready queue or the
		 * release queue (if it completed). requeue() picks
		 * the appropriate queue.
		 */
		if (pedf->scheduled && !blocks)
			requeue(pedf->scheduled, edf);
		next = __take_ready(edf);
	} else
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;

	if (next) {
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	} else {
		TRACE("becoming idle at %llu\n", litmus_clock());
	}

	pedf->scheduled = next;
	sched_state_task_picked();
	raw_readyq_unlock(&pedf->slock);

	return next;
}