/* Called when the state of tsk changes back to TASK_RUNNING. * We need to requeue the task. * * NOTE: If a sporadic task is suspended for a long time, * this might actually be an event-driven release of a new job. */ static void demo_task_resume(struct task_struct *tsk) { unsigned long flags; struct demo_cpu_state *state = cpu_state_for(get_partition(tsk)); lt_t now; TRACE_TASK(tsk, "wake_up at %llu\n", litmus_clock()); raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags); now = litmus_clock(); if (is_sporadic(tsk) && is_tardy(tsk, now)) { /* This sporadic task was gone for a "long" time and woke up past * its deadline. Give it a new budget by triggering a job * release. */ release_at(tsk, now); } /* This check is required to avoid races with tasks that resume before * the scheduler "noticed" that it resumed. That is, the wake up may * race with the call to schedule(). */ if (state->scheduled != tsk) { demo_requeue(tsk, state); if (edf_preemption_needed(&state->local_queues, state->scheduled)) { preempt_if_preemptable(state->scheduled, state->cpu); } } raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags); }
static void demo_task_new(struct task_struct *tsk, int on_runqueue, int is_running) { /* We'll use this to store IRQ flags. */ unsigned long flags; struct demo_cpu_state *state = cpu_state_for(get_partition(tsk)); lt_t now; TRACE_TASK(tsk, "is a new RT task %llu (on runqueue:%d, running:%d)\n", litmus_clock(), on_runqueue, is_running); /* Acquire the lock protecting the state and disable interrupts. */ raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags); now = litmus_clock(); /* Release the first job now. */ release_at(tsk, now); if (is_running) { /* If tsk is running, then no other task can be running * on the local CPU. */ BUG_ON(state->scheduled != NULL); state->scheduled = tsk; } else if (on_runqueue) { demo_requeue(tsk, state); } if (edf_preemption_needed(&state->local_queues, state->scheduled)) preempt_if_preemptable(state->scheduled, state->cpu); raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags); }
static int demo_check_for_preemption_on_release(rt_domain_t *local_queues) { struct demo_cpu_state *state = container_of(local_queues, struct demo_cpu_state, local_queues); /* Because this is a callback from rt_domain_t we already hold * the necessary lock for the ready queue. */ if (edf_preemption_needed(local_queues, state->scheduled)) { preempt_if_preemptable(state->scheduled, state->cpu); return 1; } return 0; }
/* we assume the lock is being held */ static void preempt(psnedf_domain_t *pedf) { preempt_if_preemptable(pedf->scheduled, pedf->cpu); }