Example #1
0
static void pfair_task_wake_up(struct task_struct *t)
{
	unsigned long flags;
	lt_t now;

	TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
		   litmus_clock(), cur_release(t), pfair_time);

	raw_spin_lock_irqsave(&pfair_lock, flags);

	/* It is a little unclear how to deal with Pfair
	 * tasks that block for a while and then wake. For now,
	 * if a task blocks and wakes before its next job release,
	 * then it may resume if it is currently linked somewhere
	 * (as if it never blocked at all). Otherwise, we have a
	 * new sporadic job release.
	 */
	if (tsk_pfair(t)->sporadic_release) {
		now = litmus_clock();
		release_at(t, now);
		prepare_release(t, time2quanta(now, CEIL));
		sched_trace_task_release(t);
		/* FIXME: race with pfair_time advancing */
		pfair_add_release(t);
		tsk_pfair(t)->sporadic_release = 0;
	}

	check_preempt(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
	TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
}
Example #2
0
void sobliv_on_blocked(struct task_struct* t)
{
	if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
		/* there is a fraction of time where we're double-counting the
		 * time tracked by the rq and suspension time.
		 * TODO: Do this recording closer to suspension time. */
		tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

		if (!tsk_rt(t)->budget.timer.armed) {
			/* budget exhaustion timer fired as t was waking up, so budget
			 * routine thought t was running. We need to re-trigger the budget
			 * exhastion routine via timer. Schedulers do not call
			 * job_completion() when a task blocks, even if t's budget has been
			 * exhausted. Unfortunately, we cannot rerun the exhaustion routine
			 * here due to spinlock ordering issues. Just re-arm the timer with
			 * the exhausted time, re-running the timer routine immediately once
			 * interrupts have been re-enabled. */

			/* clear the exhausted flag so handle will re-run. this will not
			 * trigger another exhaustion signal since signals are controled by
			 * BTF_SIG_BUDGET_SENT. */
			bt_flag_clear(t, BTF_BUDGET_EXHAUSTED);

			if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
				TRACE_TASK(t, "budget timer not armed. "
						   "Raced with exhaustion-resched? Re-arming.\n");
				arm_enforcement_timer(t, 1);
			}
			else {
				TRACE_TASK(t, "not arming timer because task is waiting "
								"for release.\n");
			}
		}
	}
}
Example #3
0
int cancel_enforcement_timer(struct task_struct* t)
{
	struct enforcement_timer* et;
	int ret = 0;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;

	TRACE_TASK(t, "canceling enforcement timer.\n");

	if (et->armed) {
		raw_spin_lock_irqsave(&et->lock, flags);
		if (et->armed) {
			ret = hrtimer_try_to_cancel(&et->timer);
			if (ret < 0)
				TRACE_TASK(t, "timer already running. failed to cancel.\n");
			else {
				TRACE_TASK(t, "canceled timer with %lld ns remaining.\n",
					ktime_to_ns(hrtimer_expires_remaining(&et->timer)));
				et->armed = 0;
			}
		}
		else
			TRACE_TASK(t, "timer was not armed (race).\n");
		raw_spin_unlock_irqrestore(&et->lock, flags);
	}
	else
		TRACE_TASK(t, "timer was not armed.\n");

	return ret;
}
Example #4
0
/* returns 1 if the task needs to go the release queue */
static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
{
	struct pfair_param* p = tsk_pfair(t);
	int to_relq;
	p->cur = (p->cur + 1) % p->quanta;
	if (!p->cur) {
		sched_trace_task_completion(t, 1);
		if (tsk_rt(t)->present) {
			/* we start a new job */
			prepare_for_next_period(t);
			sched_trace_task_release(t);
			get_rt_flags(t) = RT_F_RUNNING;
			p->release += p->period;
		} else {
			/* remove task from system until it wakes */
			drop_all_references(t);
			tsk_pfair(t)->sporadic_release = 1;
			TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n",
				   cpu, p->cur);
			return 0;
		}
	}
	to_relq = time_after(cur_release(t), time);
	TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n",
		   cpu, p->cur, to_relq);
	return to_relq;
}
Example #5
0
inline static void arm_enforcement_timer(struct task_struct* t, int force)
{
	struct enforcement_timer* et;
	lt_t when_to_fire, remaining_budget;
	lt_t now;
	unsigned long flags;

	BUG_ON(!t);
	BUG_ON(!is_realtime(t));

	et = &tsk_rt(t)->budget.timer;
	if (et->armed) {
		TRACE_TASK(t, "timer already armed!\n");
		return;
	}

	if (!force) {
		if ( (!budget_enforced(t) ||
				(budget_enforced(t) &&
					bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
				&&
			(!budget_signalled(t) ||
				(budget_signalled(t) &&
					bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
			TRACE_TASK(t,
					"trying to arm timer when budget "
					"has already been exhausted.\n");
			return;
		}
	}

	TRACE_TASK(t, "arming enforcement timer.\n");

	/* __hrtimer_start_range_ns() cancels the timer
	 * anyway, so we don't have to check whether it is still armed */
	raw_spin_lock_irqsave(&et->lock, flags);

	if (et->armed) {
		TRACE_TASK(t, "timer already armed (race)!\n");
		goto out;
	}

	now = litmus_clock();
	remaining_budget = budget_remaining(t);
	when_to_fire = now + remaining_budget;

	TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n",
					remaining_budget, when_to_fire);

	__hrtimer_start_range_ns(&et->timer,
				 ns_to_ktime(when_to_fire),
				 0 /* delta */,
				 HRTIMER_MODE_ABS_PINNED,  /* TODO: need to use non-pinned? */
				 0 /* no wakeup */);
	et->armed = 1;

out:
	raw_spin_unlock_irqrestore(&et->lock, flags);
}
Example #6
0
void simple_io_on_wakeup(struct task_struct* t)
{
	/* we're waking up from an io-based suspension */
	if (tsk_rt(t)->budget.suspend_timestamp) {
		lt_t suspend_cost = litmus_clock() -
				tsk_rt(t)->budget.suspend_timestamp;
		tsk_rt(t)->budget.suspend_timestamp = 0;
		TRACE_TASK(t, "budget consumed while io-suspended: %llu\n",
						suspend_cost);
		get_exec_time(t) += suspend_cost;
	}
	else {
		TRACE_TASK(t, "waking from non-io blocking\n");
	}
}
/* Called when the state of tsk changes back to TASK_RUNNING.
 * We need to requeue the task.
 *
 * NOTE: If a sporadic task is suspended for a long time,
 * this might actually be an event-driven release of a new job.
 */
static void demo_task_resume(struct task_struct  *tsk)
{
        unsigned long flags;
        struct demo_cpu_state *state = cpu_state_for(get_partition(tsk));
        lt_t now;
        TRACE_TASK(tsk, "wake_up at %llu\n", litmus_clock());
        raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags);

        now = litmus_clock();

        if (is_sporadic(tsk) && is_tardy(tsk, now)) {
                /* This sporadic task was gone for a "long" time and woke up past
                 * its deadline. Give it a new budget by triggering a job
                 * release. */
                release_at(tsk, now);
        }

        /* This check is required to avoid races with tasks that resume before
         * the scheduler "noticed" that it resumed. That is, the wake up may
         * race with the call to schedule(). */
        if (state->scheduled != tsk) {
                demo_requeue(tsk, state);
                if (edf_preemption_needed(&state->local_queues, state->scheduled)) {
                        preempt_if_preemptable(state->scheduled, state->cpu);
                }
        }

        raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags);
}
static void demo_task_new(struct task_struct *tsk, int on_runqueue,
                          int is_running)
{
        /* We'll use this to store IRQ flags. */
        unsigned long flags;
        struct demo_cpu_state *state = cpu_state_for(get_partition(tsk));
        lt_t now;

        TRACE_TASK(tsk, "is a new RT task %llu (on runqueue:%d, running:%d)\n",
                   litmus_clock(), on_runqueue, is_running);

        /* Acquire the lock protecting the state and disable interrupts. */
        raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags);

        now = litmus_clock();

        /* Release the first job now. */
        release_at(tsk, now);

        if (is_running) {
                /* If tsk is running, then no other task can be running
                 * on the local CPU. */
                BUG_ON(state->scheduled != NULL);
                state->scheduled = tsk;
        } else if (on_runqueue) {
                demo_requeue(tsk, state);
        }

        if (edf_preemption_needed(&state->local_queues, state->scheduled))
                preempt_if_preemptable(state->scheduled, state->cpu);

        raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags);
}
Example #9
0
static void boost_priority(struct task_struct* t)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(t);
	lt_t			now;

	raw_readyq_lock_irqsave(&pedf->slock, flags);
	now = litmus_clock();

	TRACE_TASK(t, "priority boosted at %llu\n", now);

	tsk_rt(t)->priority_boosted = 1;
	tsk_rt(t)->boost_start_time = now;

	if (pedf->scheduled != t) {
		/* holder may be queued: first stop queue changes */
		raw_spin_lock(&pedf->domain.release_lock);
		if (is_queued(t) &&
		    /* If it is queued, then we need to re-order. */
		    bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) &&
		    /* If we bubbled to the top, then we need to check for preemptions. */
		    edf_preemption_needed(&pedf->domain, pedf->scheduled))
				preempt(pedf);
		raw_spin_unlock(&pedf->domain.release_lock);
	} /* else: nothing to do since the job is not queued while scheduled */

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
Example #10
0
static void pfair_release_at(struct task_struct* task, lt_t start)
{
	unsigned long flags;
	quanta_t release;

	BUG_ON(!is_realtime(task));

	raw_spin_lock_irqsave(&pfair_lock, flags);
	release_at(task, start);
	release = time2quanta(start, CEIL);

	if (release - pfair_time >= PFAIR_MAX_PERIOD)
		release = pfair_time + PFAIR_MAX_PERIOD;

	TRACE_TASK(task, "sys release at %lu\n", release);

	drop_all_references(task);
	prepare_release(task, release);
	pfair_add_release(task);

	/* Clear sporadic release flag, since this release subsumes any
	 * sporadic release on wake.
	 */
	tsk_pfair(task)->sporadic_release = 0;

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
}
Example #11
0
static void unboost_priority(struct task_struct* t)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(t);
	lt_t			now;

	raw_readyq_lock_irqsave(&pedf->slock, flags);
	now = litmus_clock();

	/* assumption: this only happens when the job is scheduled */
	BUG_ON(pedf->scheduled != t);

	TRACE_TASK(t, "priority restored at %llu\n", now);

	/* priority boosted jobs must be scheduled */
	BUG_ON(pedf->scheduled != t);

	tsk_rt(t)->priority_boosted = 0;
	tsk_rt(t)->boost_start_time = 0;

	/* check if this changes anything */
	if (edf_preemption_needed(&pedf->domain, pedf->scheduled))
		preempt(pedf);

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
Example #12
0
void sched_state_will_schedule(struct task_struct* tsk)
{
	/* Litmus hack: we only care about processor-local invocations of
	 * set_tsk_need_resched(). We can't reliably set the flag remotely
	 * since it might race with other updates to the scheduling state.  We
	 * can't rely on the runqueue lock protecting updates to the sched
	 * state since processors do not acquire the runqueue locks for all
	 * updates to the sched state (to avoid acquiring two runqueue locks at
	 * the same time). Further, if tsk is residing on a remote processor,
	 * then that processor doesn't actually know yet that it is going to
	 * reschedule; it still must receive an IPI (unless a local invocation
	 * races).
	 */
	if (likely(task_cpu(tsk) == smp_processor_id())) {
		VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
		if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))
			set_sched_state(PICKED_WRONG_TASK);
		else
			set_sched_state(WILL_SCHEDULE);
	} else
		/* Litmus tasks should never be subject to a remote
		 * set_tsk_need_resched(). */
		BUG_ON(is_realtime(tsk));
	TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
		   __builtin_return_address(0));
}
Example #13
0
static struct task_struct* pfair_schedule(struct task_struct * prev)
{
	struct pfair_state* state = &__get_cpu_var(pfair_state);
	int blocks;
	struct task_struct* next = NULL;

	raw_spin_lock(&pfair_lock);

	blocks  = is_realtime(prev) && !is_running(prev);

	if (state->local && safe_to_schedule(state->local, state->cpu))
		next = state->local;

	if (prev != next) {
		tsk_rt(prev)->scheduled_on = NO_CPU;
		if (next)
			tsk_rt(next)->scheduled_on = state->cpu;
	}

	raw_spin_unlock(&pfair_lock);

	if (next)
		TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n",
			   tsk_pfair(next)->release, pfair_time, litmus_clock());
	else if (is_realtime(prev))
		TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock());

	return next;
}
Example #14
0
/*	Prepare a task for running in RT mode
 */
static void psnedf_task_new(struct task_struct * t, int on_rq, int is_scheduled)
{
	rt_domain_t* 		edf  = task_edf(t);
	psnedf_domain_t* 	pedf = task_pedf(t);
	unsigned long		flags;

	TRACE_TASK(t, "psn edf: task new, cpu = %d\n",
		   t->rt_param.task_params.cpu);

	/* setup job parameters */
	release_at(t, litmus_clock());

	/* The task should be running in the queue, otherwise signal
	 * code will try to wake it up with fatal consequences.
	 */
	raw_readyq_lock_irqsave(&pedf->slock, flags);
	if (is_scheduled) {
		/* there shouldn't be anything else scheduled at the time */
		BUG_ON(pedf->scheduled);
		pedf->scheduled = t;
	} else {
		/* !is_scheduled means it is not scheduled right now, but it
		 * does not mean that it is suspended. If it is not suspended,
		 * it still needs to be requeued. If it is suspended, there is
		 * nothing that we need to do as it will be handled by the
		 * wake_up() handler. */
		if (is_running(t)) {
			requeue(t, edf);
			/* maybe we have to reschedule */
			psnedf_preempt_check(pedf);
		}
	}
	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
Example #15
0
static void psnedf_task_block(struct task_struct *t)
{
	/* only running tasks can block, thus t is in no queue */
	TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);

	BUG_ON(!is_realtime(t));
	BUG_ON(is_queued(t));
}
Example #16
0
static void job_completion(struct task_struct* t, int forced)
{
	sched_trace_task_completion(t,forced);
	TRACE_TASK(t, "job_completion().\n");

	tsk_rt(t)->completed = 0;
	prepare_for_next_period(t);
}
Example #17
0
static void psnedf_task_wake_up(struct task_struct *task)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(task);
	rt_domain_t* 		edf  = task_edf(task);
	lt_t			now;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
	raw_readyq_lock_irqsave(&pedf->slock, flags);

	set_task_state(task, TASK_RUNNING);

	BUG_ON(is_queued(task));
	now = litmus_clock();
	if (is_sporadic(task) && is_tardy(task, now)
#ifdef CONFIG_LITMUS_LOCKING
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	    && !is_priority_boosted(task)
#endif
		) {
		/* new sporadic release */
		release_at(task, now);
		sched_trace_task_release(task);
	}

	budget_state_machine(task,on_wakeup);

	/* Only add to ready queue if it is not the currently-scheduled
	 * task. This could be the case if a task was woken up concurrently
	 * on a remote CPU before the executing CPU got around to actually
	 * de-scheduling the task, i.e., wake_up() raced with schedule()
	 * and won.
	 */
	if (pedf->scheduled != task) {
		requeue(task, edf);
		psnedf_preempt_check(pedf);
	}

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
	TRACE_TASK(task, "wake up done\n");
}
Example #18
0
void simple_io_on_blocked(struct task_struct* t)
{
	/* hiding is turned on by locking protocols, so if there isn't any
	   hiding, then we're blocking for some other reason.  assume it's I/O. */
	int for_io = 0;
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	for_io |= !tsk_rt(t)->blocked_lock;
#endif
#ifdef CONFIG_REALTIME_AUX_TASKS
	for_io |= tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks;
#endif
#ifdef CONFIG_LITMUS_NVIDIA
	for_io |= tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu;
#endif

	/* we drain budget for io-based suspensions */
	if (for_io) {
		/* there is a fraction of time where we're double-counting the
		 * time tracked by the rq and suspension time.
		 * TODO: Do this recording closer to suspension time. */
		tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

		TRACE_TASK(t, "blocking for I/O.\n");

		if (!tsk_rt(t)->budget.timer.armed) {
			bt_flag_clear(t, BTF_BUDGET_EXHAUSTED);

			if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
				TRACE_TASK(t, "budget timer not armed. "
						   "Raced with exhaustion-resched? Re-arming.\n");
				arm_enforcement_timer(t, 1);
			}
			else {
				TRACE_TASK(t, "not arming timer because task is waiting "
						   "for release.\n");
			}
		}
	}
	else {
		TRACE_TASK(t, "blocking for litmus lock. stop draining.\n");
		simple_on_blocked(t);
	}
}
Example #19
0
static int safe_to_schedule(struct task_struct* t, int cpu)
{
	int where = tsk_rt(t)->scheduled_on;
	if (where != NO_CPU && where != cpu) {
		TRACE_TASK(t, "BAD: can't be scheduled on %d, "
			   "scheduled already on %d.\n", cpu, where);
		return 0;
	} else
		return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING;
}
Example #20
0
void sobliv_on_exit_top_m(struct task_struct* t)
{
	if (budget_precisely_tracked(t)) {
		if (tsk_rt(t)->budget.timer.armed) {

			if (!is_running(t)) {
				/* the time at which we started draining budget while
				 * suspended is recorded in evt_timestamp.  evt_timestamp
				 * was set either when 't' exited the top-m while suspended
				 * or when 't' blocked. */
				lt_t suspend_cost;
				BUG_ON(!tsk_rt(t)->budget.suspend_timestamp);
				suspend_cost = litmus_clock() -
						tsk_rt(t)->budget.suspend_timestamp;
				TRACE_TASK(t, "budget consumed while suspended: %llu\n",
								suspend_cost);
				get_exec_time(t) += suspend_cost;

				/* timer should have fired before now */
				if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) {
					TRACE_TASK(t,
						"budget overrun while suspended by over 1/10 "
						"millisecond! timer should have already fired!\n");
					WARN_ON(1);
				}
			}

			TRACE_TASK(t, "stops draining budget\n");
			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				/* TODO: record a timestamp if the task isn't running */
				cancel_enforcement_timer(t);
			}
			else {
				TRACE_TASK(t,
					"within callback context. skipping operation.\n");
			}
		}
		else {
			TRACE_TASK(t, "was not draining budget\n");
		}
	}
}
Example #21
0
void send_sigbudget(struct task_struct* t)
{
	if (!bt_flag_test_and_set(t, BTF_SIG_BUDGET_SENT)) {
		/* signal has not yet been sent and we are responsible for sending
		 * since we just set the sent-bit when it was previously 0. */

		TRACE_TASK(t, "SIG_BUDGET being sent!\n");
		send_sig(SIG_BUDGET, t, 1); /* '1' denotes signal sent from kernel */
	}
}
Example #22
0
static void requeue(struct task_struct* t, rt_domain_t *edf)
{
	if (t->state != TASK_RUNNING)
		TRACE_TASK(t, "requeue: !TASK_RUNNING\n");

	tsk_rt(t)->completed = 0;
	if (is_early_releasing(t) || is_released(t, litmus_clock()))
		__add_ready(edf, t);
	else
		add_release(edf, t); /* it has got to wait */
}
Example #23
0
static void dump_subtasks(struct task_struct* t)
{
	unsigned long i;
	for (i = 0; i < t->rt_param.pfair->quanta; i++)
		TRACE_TASK(t, "SUBTASK %lu: rel=%lu dl=%lu bbit:%lu gdl:%lu\n",
			   i + 1,
			   t->rt_param.pfair->subtasks[i].release,
			   t->rt_param.pfair->subtasks[i].deadline,
			   t->rt_param.pfair->subtasks[i].overlap,
			   t->rt_param.pfair->subtasks[i].group_deadline);
}
Example #24
0
void reevaluate_inheritance(struct task_struct* t)
{
#ifdef CONFIG_LITMUS_NESTED_LOCKING
	struct litmus_lock *blocked_lock = NULL;

	TRACE_TASK(t, "reevaluating locks in light of budget exhaustion.\n");

	/* do we need to inherit from any tasks now that our own
	 * priority has decreased? */
	raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
	if (holds_locks(t)) {
		struct task_struct* hp_blocked =
				top_priority(&tsk_rt(t)->hp_blocked_tasks);

		if (litmus->compare(hp_blocked, t))
			litmus->increase_prio(t, effective_priority(hp_blocked));
	}
	raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);

	/* do we need to tell the lock we're blocked on about our
	 * changed priority? */
	blocked_lock = tsk_rt(t)->blocked_lock;
	if(blocked_lock) {
		if(blocked_lock->ops->supports_budget_exhaustion) {
			TRACE_TASK(t, "Lock %d supports budget exhaustion.\n",
					   blocked_lock->ident);
			blocked_lock->ops->budget_exhausted(blocked_lock, t);
		}
	}
	else {
		TRACE_TASK(t,
			"Budget exhausted while task not blocked on Litmus lock.\n");
	}
#else
	/* prio-reeval currently relies upon nested locking infrastructure */
	TRACE_TASK(t,
		"Unable to check if sleeping task is blocked "
		"on Litmus lock without "
		"CONFIG_LITMUS_NESTED_LOCKING enabled.\n");
#endif
}
Example #25
0
static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
{
	enum hrtimer_restart restart = HRTIMER_NORESTART;
	unsigned long flags;

	struct budget_tracker* bt =
		container_of(
			container_of(timer,
				struct enforcement_timer,
				timer),
			struct budget_tracker,
			timer);

	struct task_struct* t =
		container_of(
			container_of(bt, struct rt_param, budget),
			struct task_struct,
			rt_param);

	TRACE_TASK(t, "budget timer interrupt fired at time %lu\n",
					litmus_clock());

	raw_spin_lock_irqsave(&bt->timer.lock, flags);
	tsk_rt(t)->budget.timer.armed = 0;
	raw_spin_unlock_irqrestore(&bt->timer.lock, flags);

	if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
		TRACE_TASK(t,
			"spurious exhastion while waiting for release. dropping.\n");
		goto out;
	}

	restart = bt->ops->on_exhausted(t,!IN_SCHEDULE);

	raw_spin_lock_irqsave(&bt->timer.lock, flags);
	tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART);
	raw_spin_unlock_irqrestore(&bt->timer.lock, flags);

out:
	return restart;
}
Example #26
0
void sobliv_on_enter_top_m(struct task_struct* t)
{
	if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
		if (tsk_rt(t)->budget.timer.armed)
			TRACE_TASK(t, "budget timer already armed.\n");
		else {
			/* if we're blocked, then record the time at which we
			   started measuring */
			if (!is_running(t))
				tsk_rt(t)->budget.suspend_timestamp = litmus_clock();

			/* the callback will handle it if it is executing */
			if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) {
				arm_enforcement_timer(t, 0);
			}
			else {
				TRACE_TASK(t,
					"within callback context. deferring timer arm.\n");
			}
		}
	}
}
Example #27
0
/* allocate t->rt_param.ctrl_page*/
static int alloc_ctrl_page(struct task_struct *t)
{
	int err = 0;

	/* only allocate if the task doesn't have one yet */
	if (!tsk_rt(t)->ctrl_page) {
		tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
		if (!tsk_rt(t)->ctrl_page)
			err = -ENOMEM;
		/* will get de-allocated in task teardown */
		TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__,
			   tsk_rt(t)->ctrl_page);
	}
	return err;
}
Example #28
0
static void psnedf_tick(struct task_struct *t)
{
	psnedf_domain_t *pedf = local_pedf;

	/* Check for inconsistency. We don't need the lock for this since
	 * ->scheduled is only changed in schedule, which obviously is not
	 *  executing in parallel on this CPU
	 */
	BUG_ON(is_realtime(t) && t != pedf->scheduled);

	if (is_realtime(t) &&
		tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
		budget_exhausted(t)) {
		TRACE_TASK(t, "budget exhausted\n");
		budget_state_machine2(t,on_exhausted,!IN_SCHEDULE);
	}
}
Example #29
0
void sobliv_on_wakeup(struct task_struct* t)
{
	if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
		/* we're waking up while in top-m.  record the time spent
		 * suspended while draining in exec_cost. suspend_timestamp was
		 * either set when we entered top-m while asleep, or when we
		 * blocked. */
		if (tsk_rt(t)->budget.suspend_timestamp) {
			lt_t suspend_cost = litmus_clock() -
					tsk_rt(t)->budget.suspend_timestamp;
			tsk_rt(t)->budget.suspend_timestamp = 0;
			TRACE_TASK(t, "budget consumed while suspended: %llu\n",
					suspend_cost);
			get_exec_time(t) += suspend_cost;
		}
		else {
			WARN_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE));
		}
	}
}
Example #30
0
static void pfair_task_exit(struct task_struct * t)
{
	unsigned long flags;

	BUG_ON(!is_realtime(t));

	/* Remote task from release or ready queue, and ensure
	 * that it is not the scheduled task for ANY CPU. We
	 * do this blanket check because occassionally when
	 * tasks exit while blocked, the task_cpu of the task
	 * might not be the same as the CPU that the PFAIR scheduler
	 * has chosen for it.
	 */
	raw_spin_lock_irqsave(&pfair_lock, flags);

	TRACE_TASK(t, "RIP, state:%d\n", t->state);
	drop_all_references(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);

	kfree(t->rt_param.pfair);
	t->rt_param.pfair = NULL;
}