Esempio n. 1
0
static void pfair_task_wake_up(struct task_struct *t)
{
	unsigned long flags;
	lt_t now;

	TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
		   litmus_clock(), cur_release(t), pfair_time);

	raw_spin_lock_irqsave(&pfair_lock, flags);

	/* It is a little unclear how to deal with Pfair
	 * tasks that block for a while and then wake. For now,
	 * if a task blocks and wakes before its next job release,
	 * then it may resume if it is currently linked somewhere
	 * (as if it never blocked at all). Otherwise, we have a
	 * new sporadic job release.
	 */
	if (tsk_pfair(t)->sporadic_release) {
		now = litmus_clock();
		release_at(t, now);
		prepare_release(t, time2quanta(now, CEIL));
		sched_trace_task_release(t);
		/* FIXME: race with pfair_time advancing */
		pfair_add_release(t);
		tsk_pfair(t)->sporadic_release = 0;
	}

	check_preempt(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
	TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
}
Esempio n. 2
0
/* returns 1 if the task needs to go the release queue */
static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
{
	struct pfair_param* p = tsk_pfair(t);
	int to_relq;
	p->cur = (p->cur + 1) % p->quanta;
	if (!p->cur) {
		sched_trace_task_completion(t, 1);
		if (tsk_rt(t)->present) {
			/* we start a new job */
			prepare_for_next_period(t);
			sched_trace_task_release(t);
			get_rt_flags(t) = RT_F_RUNNING;
			p->release += p->period;
		} else {
			/* remove task from system until it wakes */
			drop_all_references(t);
			tsk_pfair(t)->sporadic_release = 1;
			TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n",
				   cpu, p->cur);
			return 0;
		}
	}
	to_relq = time_after(cur_release(t), time);
	TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n",
		   cpu, p->cur, to_relq);
	return to_relq;
}
Esempio n. 3
0
static void psnedf_task_wake_up(struct task_struct *task)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(task);
	rt_domain_t* 		edf  = task_edf(task);
	lt_t			now;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
	raw_readyq_lock_irqsave(&pedf->slock, flags);

	set_task_state(task, TASK_RUNNING);

	BUG_ON(is_queued(task));
	now = litmus_clock();
	if (is_sporadic(task) && is_tardy(task, now)
#ifdef CONFIG_LITMUS_LOCKING
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	    && !is_priority_boosted(task)
#endif
		) {
		/* new sporadic release */
		release_at(task, now);
		sched_trace_task_release(task);
	}

	budget_state_machine(task,on_wakeup);

	/* Only add to ready queue if it is not the currently-scheduled
	 * task. This could be the case if a task was woken up concurrently
	 * on a remote CPU before the executing CPU got around to actually
	 * de-scheduling the task, i.e., wake_up() raced with schedule()
	 * and won.
	 */
	if (pedf->scheduled != task) {
		requeue(task, edf);
		psnedf_preempt_check(pedf);
	}

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
	TRACE_TASK(task, "wake up done\n");
}