Ejemplo n.º 1
0
/*	Prepare a task for running in RT mode
 */
static void psnedf_task_new(struct task_struct * t, int on_rq, int is_scheduled)
{
	rt_domain_t* 		edf  = task_edf(t);
	psnedf_domain_t* 	pedf = task_pedf(t);
	unsigned long		flags;

	TRACE_TASK(t, "psn edf: task new, cpu = %d\n",
		   t->rt_param.task_params.cpu);

	/* setup job parameters */
	release_at(t, litmus_clock());

	/* The task should be running in the queue, otherwise signal
	 * code will try to wake it up with fatal consequences.
	 */
	raw_readyq_lock_irqsave(&pedf->slock, flags);
	if (is_scheduled) {
		/* there shouldn't be anything else scheduled at the time */
		BUG_ON(pedf->scheduled);
		pedf->scheduled = t;
	} else {
		/* !is_scheduled means it is not scheduled right now, but it
		 * does not mean that it is suspended. If it is not suspended,
		 * it still needs to be requeued. If it is suspended, there is
		 * nothing that we need to do as it will be handled by the
		 * wake_up() handler. */
		if (is_running(t)) {
			requeue(t, edf);
			/* maybe we have to reschedule */
			psnedf_preempt_check(pedf);
		}
	}
	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
}
Ejemplo n.º 2
0
/* Called when the state of tsk changes back to TASK_RUNNING.
 * We need to requeue the task.
 *
 * NOTE: If a sporadic task is suspended for a long time,
 * this might actually be an event-driven release of a new job.
 */
static void demo_task_resume(struct task_struct  *tsk)
{
        unsigned long flags;
        struct demo_cpu_state *state = cpu_state_for(get_partition(tsk));
        lt_t now;
        TRACE_TASK(tsk, "wake_up at %llu\n", litmus_clock());
        raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags);

        now = litmus_clock();

        if (is_sporadic(tsk) && is_tardy(tsk, now)) {
                /* This sporadic task was gone for a "long" time and woke up past
                 * its deadline. Give it a new budget by triggering a job
                 * release. */
                release_at(tsk, now);
        }

        /* This check is required to avoid races with tasks that resume before
         * the scheduler "noticed" that it resumed. That is, the wake up may
         * race with the call to schedule(). */
        if (state->scheduled != tsk) {
                demo_requeue(tsk, state);
                if (edf_preemption_needed(&state->local_queues, state->scheduled)) {
                        preempt_if_preemptable(state->scheduled, state->cpu);
                }
        }

        raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags);
}
Ejemplo n.º 3
0
static void demo_task_new(struct task_struct *tsk, int on_runqueue,
                          int is_running)
{
        /* We'll use this to store IRQ flags. */
        unsigned long flags;
        struct demo_cpu_state *state = cpu_state_for(get_partition(tsk));
        lt_t now;

        TRACE_TASK(tsk, "is a new RT task %llu (on runqueue:%d, running:%d)\n",
                   litmus_clock(), on_runqueue, is_running);

        /* Acquire the lock protecting the state and disable interrupts. */
        raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags);

        now = litmus_clock();

        /* Release the first job now. */
        release_at(tsk, now);

        if (is_running) {
                /* If tsk is running, then no other task can be running
                 * on the local CPU. */
                BUG_ON(state->scheduled != NULL);
                state->scheduled = tsk;
        } else if (on_runqueue) {
                demo_requeue(tsk, state);
        }

        if (edf_preemption_needed(&state->local_queues, state->scheduled))
                preempt_if_preemptable(state->scheduled, state->cpu);

        raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags);
}
Ejemplo n.º 4
0
static void pfair_release_at(struct task_struct* task, lt_t start)
{
	unsigned long flags;
	quanta_t release;

	BUG_ON(!is_realtime(task));

	raw_spin_lock_irqsave(&pfair_lock, flags);
	release_at(task, start);
	release = time2quanta(start, CEIL);

	if (release - pfair_time >= PFAIR_MAX_PERIOD)
		release = pfair_time + PFAIR_MAX_PERIOD;

	TRACE_TASK(task, "sys release at %lu\n", release);

	drop_all_references(task);
	prepare_release(task, release);
	pfair_add_release(task);

	/* Clear sporadic release flag, since this release subsumes any
	 * sporadic release on wake.
	 */
	tsk_pfair(task)->sporadic_release = 0;

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
}
Ejemplo n.º 5
0
static void pfair_task_wake_up(struct task_struct *t)
{
	unsigned long flags;
	lt_t now;

	TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n",
		   litmus_clock(), cur_release(t), pfair_time);

	raw_spin_lock_irqsave(&pfair_lock, flags);

	/* It is a little unclear how to deal with Pfair
	 * tasks that block for a while and then wake. For now,
	 * if a task blocks and wakes before its next job release,
	 * then it may resume if it is currently linked somewhere
	 * (as if it never blocked at all). Otherwise, we have a
	 * new sporadic job release.
	 */
	if (tsk_pfair(t)->sporadic_release) {
		now = litmus_clock();
		release_at(t, now);
		prepare_release(t, time2quanta(now, CEIL));
		sched_trace_task_release(t);
		/* FIXME: race with pfair_time advancing */
		pfair_add_release(t);
		tsk_pfair(t)->sporadic_release = 0;
	}

	check_preempt(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
	TRACE_TASK(t, "wake up done at %llu\n", litmus_clock());
}
Ejemplo n.º 6
0
static void psnedf_task_wake_up(struct task_struct *task)
{
	unsigned long		flags;
	psnedf_domain_t* 	pedf = task_pedf(task);
	rt_domain_t* 		edf  = task_edf(task);
	lt_t			now;

	TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
	raw_readyq_lock_irqsave(&pedf->slock, flags);

	set_task_state(task, TASK_RUNNING);

	BUG_ON(is_queued(task));
	now = litmus_clock();
	if (is_sporadic(task) && is_tardy(task, now)
#ifdef CONFIG_LITMUS_LOCKING
	/* We need to take suspensions because of semaphores into
	 * account! If a job resumes after being suspended due to acquiring
	 * a semaphore, it should never be treated as a new job release.
	 */
	    && !is_priority_boosted(task)
#endif
		) {
		/* new sporadic release */
		release_at(task, now);
		sched_trace_task_release(task);
	}

	budget_state_machine(task,on_wakeup);

	/* Only add to ready queue if it is not the currently-scheduled
	 * task. This could be the case if a task was woken up concurrently
	 * on a remote CPU before the executing CPU got around to actually
	 * de-scheduling the task, i.e., wake_up() raced with schedule()
	 * and won.
	 */
	if (pedf->scheduled != task) {
		requeue(task, edf);
		psnedf_preempt_check(pedf);
	}

	raw_readyq_unlock_irqrestore(&pedf->slock, flags);
	TRACE_TASK(task, "wake up done\n");
}