Beispiel #1
0
inline static void arm_enforcement_timer(struct task_struct* t, int force)
{
	struct enforcement_timer* et;
	lt_t when_to_fire, remaining_budget;
	lt_t now;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;
	if (et->armed) {
		TRACE_TASK(t, "timer already armed!\n");
		return;
	}

	if (!force) {
		if ( (!budget_enforced(t) ||
				(budget_enforced(t) &&
					bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
				&&
			(!budget_signalled(t) ||
				(budget_signalled(t) &&
					bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
			TRACE_TASK(t,
					"trying to arm timer when budget "
					"has already been exhausted.\n");
			return;
		}
	}

	TRACE_TASK(t, "arming enforcement timer.\n");

	/* __hrtimer_start_range_ns() cancels the timer
	 * anyway, so we don't have to check whether it is still armed */
	raw_spin_lock_irqsave(&et->lock, flags);

	if (et->armed) {
		TRACE_TASK(t, "timer already armed (race)!\n");
		goto out;
	}

	now = litmus_clock();
	remaining_budget = budget_remaining(t);
	when_to_fire = now + remaining_budget;

	TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n",
					remaining_budget, when_to_fire);

	__hrtimer_start_range_ns(&et->timer,
				 ns_to_ktime(when_to_fire),
				 0 /* delta */,
				 HRTIMER_MODE_ABS_PINNED,  /* TODO: need to use non-pinned? */
				 0 /* no wakeup */);
	et->armed = 1;

out:
	raw_spin_unlock_irqrestore(&et->lock, flags);
}
Beispiel #2
0
static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t, int in_schedule)
{
	/* Assumption: t is scheduled on the CPU executing this callback */

	if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
		/* signal exhaustion */
		send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
	}

	if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
		if (!is_np(t)) {
			/* np tasks will be preempted when they become
			 * preemptable again
			 */
			litmus_reschedule_local();
			TRACE("%d is preemptable => FORCE_RESCHED\n", t->pid);
		} else if (is_user_np(t)) {
			TRACE("%d is non-preemptable, preemption delayed.\n", t->pid);
			request_exit_np(t);
		}
	}

	return HRTIMER_NORESTART;
}
Beispiel #3
0
static struct task_struct* psnedf_schedule(struct task_struct * prev)
{
	psnedf_domain_t* 	pedf = local_pedf;
	rt_domain_t*		edf  = &pedf->domain;
	struct task_struct*	next;

	int 			out_of_time, sleep, preempt,
				np, exists, blocks, resched;

	raw_readyq_lock(&pedf->slock);

	/* sanity checking
	 * differently from gedf, when a task exits (dead)
	 * pedf->schedule may be null and prev _is_ realtime
	 */
	BUG_ON(pedf->scheduled && pedf->scheduled != prev);
	BUG_ON(pedf->scheduled && !is_realtime(prev));

	/* (0) Determine state */
	exists      = pedf->scheduled != NULL;
	blocks      = exists && !is_running(pedf->scheduled);
	out_of_time = exists &&
				  budget_enforced(pedf->scheduled) &&
				  bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED);
	np 	    = exists && is_np(pedf->scheduled);
	sleep	    = exists && is_completed(pedf->scheduled);
	preempt     = edf_preemption_needed(edf, prev);

	/* If we need to preempt do so.
	 * The following checks set resched to 1 in case of special
	 * circumstances.
	 */
	resched = preempt;

	/* Do budget stuff */
	if (blocks)
		budget_state_machine(prev,on_blocked);
	else if (sleep)
		budget_state_machine(prev,on_sleep);
	else if (preempt)
		budget_state_machine(prev,on_preempt);

	/* If a task blocks we have no choice but to reschedule.
	 */
	if (blocks)
		resched = 1;

	/* Request a sys_exit_np() call if we would like to preempt but cannot.
	 * Multiple calls to request_exit_np() don't hurt.
	 */
	if (np && (out_of_time || preempt || sleep))
		request_exit_np(pedf->scheduled);

	/* Any task that is preemptable and either exhausts its execution
	 * budget or wants to sleep completes. We may have to reschedule after
	 * this.
	 */
	if (!np && (out_of_time || sleep) && !blocks) {
		job_completion(pedf->scheduled, !sleep);
		resched = 1;
	}

	/* The final scheduling decision. Do we need to switch for some reason?
	 * Switch if we are in RT mode and have no task or if we need to
	 * resched.
	 */
	next = NULL;
	if ((!np || blocks) && (resched || !exists)) {
		/* When preempting a task that does not block, then
		 * re-insert it into either the ready queue or the
		 * release queue (if it completed). requeue() picks
		 * the appropriate queue.
		 */
		if (pedf->scheduled && !blocks)
			requeue(pedf->scheduled, edf);
		next = __take_ready(edf);
	} else
		/* Only override Linux scheduler if we have a real-time task
		 * scheduled that needs to continue.
		 */
		if (exists)
			next = prev;

	if (next) {
		TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
	} else {
		TRACE("becoming idle at %llu\n", litmus_clock());
	}

	pedf->scheduled = next;
	sched_state_task_picked();
	raw_readyq_unlock(&pedf->slock);

	return next;
}
static struct task_struct* demo_schedule(struct task_struct * prev)
{
        struct demo_cpu_state *local_state = local_cpu_state();

        /* next == NULL means "schedule background work". */
        struct task_struct *next = NULL;

        /* prev's task state */
        int exists, out_of_time, job_completed, self_suspends, preempt, resched;

        raw_spin_lock(&local_state->local_queues.ready_lock);

        BUG_ON(local_state->scheduled && local_state->scheduled != prev);
        BUG_ON(local_state->scheduled && !is_realtime(prev));

        exists = local_state->scheduled != NULL;
        self_suspends = exists && !is_current_running();
        out_of_time = exists && budget_enforced(prev) && budget_exhausted(prev);
        job_completed = exists && is_completed(prev);

        /* preempt is true if task `prev` has lower priority than something on
         * the ready queue. */
        preempt = edf_preemption_needed(&local_state->local_queues, prev);

        /* check all conditions that make us reschedule */
        resched = preempt;

        /* if `prev` suspends, it CANNOT be scheduled anymore => reschedule */
        if (self_suspends) {
                resched = 1;
        }

        /* also check for (in-)voluntary job completions */
        if (out_of_time || job_completed) {
                demo_job_completion(prev, out_of_time);
                resched = 1;
        }

        if (resched) {
                /* First check if the previous task goes back onto the ready
                 * queue, which it does if it did not self_suspend.
                 */
                if (exists && !self_suspends) {
                        demo_requeue(prev, local_state);
                }
                next = __take_ready(&local_state->local_queues);
        } else {
                /* No preemption is required. */
                next = local_state->scheduled;
        }

        local_state->scheduled = next;
        if (exists && prev != next) {
                TRACE_TASK(prev, "descheduled.\n");
        }
        if (next) {
                TRACE_TASK(next, "scheduled.\n");
        }

        /* This mandatory. It triggers a transition in the LITMUS^RT remote
         * preemption state machine. Call this AFTER the plugin has made a local
         * scheduling decision.
         */
        sched_state_task_picked();

        raw_spin_unlock(&local_state->local_queues.ready_lock);
        return next;
}