static void boost_priority(struct task_struct* t) { unsigned long flags; psnedf_domain_t* pedf = task_pedf(t); lt_t now; raw_readyq_lock_irqsave(&pedf->slock, flags); now = litmus_clock(); TRACE_TASK(t, "priority boosted at %llu\n", now); tsk_rt(t)->priority_boosted = 1; tsk_rt(t)->boost_start_time = now; if (pedf->scheduled != t) { /* holder may be queued: first stop queue changes */ raw_spin_lock(&pedf->domain.release_lock); if (is_queued(t) && /* If it is queued, then we need to re-order. */ bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) && /* If we bubbled to the top, then we need to check for preemptions. */ edf_preemption_needed(&pedf->domain, pedf->scheduled)) preempt(pedf); raw_spin_unlock(&pedf->domain.release_lock); } /* else: nothing to do since the job is not queued while scheduled */ raw_readyq_unlock_irqrestore(&pedf->slock, flags); }
static void unboost_priority(struct task_struct* t) { unsigned long flags; psnedf_domain_t* pedf = task_pedf(t); lt_t now; raw_readyq_lock_irqsave(&pedf->slock, flags); now = litmus_clock(); /* assumption: this only happens when the job is scheduled */ BUG_ON(pedf->scheduled != t); TRACE_TASK(t, "priority restored at %llu\n", now); /* priority boosted jobs must be scheduled */ BUG_ON(pedf->scheduled != t); tsk_rt(t)->priority_boosted = 0; tsk_rt(t)->boost_start_time = 0; /* check if this changes anything */ if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) preempt(pedf); raw_readyq_unlock_irqrestore(&pedf->slock, flags); }
/* Called when the state of tsk changes back to TASK_RUNNING. * We need to requeue the task. * * NOTE: If a sporadic task is suspended for a long time, * this might actually be an event-driven release of a new job. */ static void demo_task_resume(struct task_struct *tsk) { unsigned long flags; struct demo_cpu_state *state = cpu_state_for(get_partition(tsk)); lt_t now; TRACE_TASK(tsk, "wake_up at %llu\n", litmus_clock()); raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags); now = litmus_clock(); if (is_sporadic(tsk) && is_tardy(tsk, now)) { /* This sporadic task was gone for a "long" time and woke up past * its deadline. Give it a new budget by triggering a job * release. */ release_at(tsk, now); } /* This check is required to avoid races with tasks that resume before * the scheduler "noticed" that it resumed. That is, the wake up may * race with the call to schedule(). */ if (state->scheduled != tsk) { demo_requeue(tsk, state); if (edf_preemption_needed(&state->local_queues, state->scheduled)) { preempt_if_preemptable(state->scheduled, state->cpu); } } raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags); }
static void demo_task_new(struct task_struct *tsk, int on_runqueue, int is_running) { /* We'll use this to store IRQ flags. */ unsigned long flags; struct demo_cpu_state *state = cpu_state_for(get_partition(tsk)); lt_t now; TRACE_TASK(tsk, "is a new RT task %llu (on runqueue:%d, running:%d)\n", litmus_clock(), on_runqueue, is_running); /* Acquire the lock protecting the state and disable interrupts. */ raw_spin_lock_irqsave(&state->local_queues.ready_lock, flags); now = litmus_clock(); /* Release the first job now. */ release_at(tsk, now); if (is_running) { /* If tsk is running, then no other task can be running * on the local CPU. */ BUG_ON(state->scheduled != NULL); state->scheduled = tsk; } else if (on_runqueue) { demo_requeue(tsk, state); } if (edf_preemption_needed(&state->local_queues, state->scheduled)) preempt_if_preemptable(state->scheduled, state->cpu); raw_spin_unlock_irqrestore(&state->local_queues.ready_lock, flags); }
static int psnedf_preempt_check(psnedf_domain_t *pedf) { if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) { preempt(pedf); return 1; } else return 0; }
static int demo_check_for_preemption_on_release(rt_domain_t *local_queues) { struct demo_cpu_state *state = container_of(local_queues, struct demo_cpu_state, local_queues); /* Because this is a callback from rt_domain_t we already hold * the necessary lock for the ready queue. */ if (edf_preemption_needed(local_queues, state->scheduled)) { preempt_if_preemptable(state->scheduled, state->cpu); return 1; } return 0; }
static struct task_struct* psnedf_schedule(struct task_struct * prev) { psnedf_domain_t* pedf = local_pedf; rt_domain_t* edf = &pedf->domain; struct task_struct* next; int out_of_time, sleep, preempt, np, exists, blocks, resched; raw_readyq_lock(&pedf->slock); /* sanity checking * differently from gedf, when a task exits (dead) * pedf->schedule may be null and prev _is_ realtime */ BUG_ON(pedf->scheduled && pedf->scheduled != prev); BUG_ON(pedf->scheduled && !is_realtime(prev)); /* (0) Determine state */ exists = pedf->scheduled != NULL; blocks = exists && !is_running(pedf->scheduled); out_of_time = exists && budget_enforced(pedf->scheduled) && bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED); np = exists && is_np(pedf->scheduled); sleep = exists && is_completed(pedf->scheduled); preempt = edf_preemption_needed(edf, prev); /* If we need to preempt do so. * The following checks set resched to 1 in case of special * circumstances. */ resched = preempt; /* Do budget stuff */ if (blocks) budget_state_machine(prev,on_blocked); else if (sleep) budget_state_machine(prev,on_sleep); else if (preempt) budget_state_machine(prev,on_preempt); /* If a task blocks we have no choice but to reschedule. */ if (blocks) resched = 1; /* Request a sys_exit_np() call if we would like to preempt but cannot. * Multiple calls to request_exit_np() don't hurt. */ if (np && (out_of_time || preempt || sleep)) request_exit_np(pedf->scheduled); /* Any task that is preemptable and either exhausts its execution * budget or wants to sleep completes. We may have to reschedule after * this. */ if (!np && (out_of_time || sleep) && !blocks) { job_completion(pedf->scheduled, !sleep); resched = 1; } /* The final scheduling decision. Do we need to switch for some reason? * Switch if we are in RT mode and have no task or if we need to * resched. */ next = NULL; if ((!np || blocks) && (resched || !exists)) { /* When preempting a task that does not block, then * re-insert it into either the ready queue or the * release queue (if it completed). requeue() picks * the appropriate queue. */ if (pedf->scheduled && !blocks) requeue(pedf->scheduled, edf); next = __take_ready(edf); } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; if (next) { TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); } else { TRACE("becoming idle at %llu\n", litmus_clock()); } pedf->scheduled = next; sched_state_task_picked(); raw_readyq_unlock(&pedf->slock); return next; }
static struct task_struct* demo_schedule(struct task_struct * prev) { struct demo_cpu_state *local_state = local_cpu_state(); /* next == NULL means "schedule background work". */ struct task_struct *next = NULL; /* prev's task state */ int exists, out_of_time, job_completed, self_suspends, preempt, resched; raw_spin_lock(&local_state->local_queues.ready_lock); BUG_ON(local_state->scheduled && local_state->scheduled != prev); BUG_ON(local_state->scheduled && !is_realtime(prev)); exists = local_state->scheduled != NULL; self_suspends = exists && !is_current_running(); out_of_time = exists && budget_enforced(prev) && budget_exhausted(prev); job_completed = exists && is_completed(prev); /* preempt is true if task `prev` has lower priority than something on * the ready queue. */ preempt = edf_preemption_needed(&local_state->local_queues, prev); /* check all conditions that make us reschedule */ resched = preempt; /* if `prev` suspends, it CANNOT be scheduled anymore => reschedule */ if (self_suspends) { resched = 1; } /* also check for (in-)voluntary job completions */ if (out_of_time || job_completed) { demo_job_completion(prev, out_of_time); resched = 1; } if (resched) { /* First check if the previous task goes back onto the ready * queue, which it does if it did not self_suspend. */ if (exists && !self_suspends) { demo_requeue(prev, local_state); } next = __take_ready(&local_state->local_queues); } else { /* No preemption is required. */ next = local_state->scheduled; } local_state->scheduled = next; if (exists && prev != next) { TRACE_TASK(prev, "descheduled.\n"); } if (next) { TRACE_TASK(next, "scheduled.\n"); } /* This mandatory. It triggers a transition in the LITMUS^RT remote * preemption state machine. Call this AFTER the plugin has made a local * scheduling decision. */ sched_state_task_picked(); raw_spin_unlock(&local_state->local_queues.ready_lock); return next; }