void sobliv_on_blocked(struct task_struct* t) { if (bt_flag_is_set(t, BTF_IS_TOP_M)) { /* there is a fraction of time where we're double-counting the * time tracked by the rq and suspension time. * TODO: Do this recording closer to suspension time. */ tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); if (!tsk_rt(t)->budget.timer.armed) { /* budget exhaustion timer fired as t was waking up, so budget * routine thought t was running. We need to re-trigger the budget * exhastion routine via timer. Schedulers do not call * job_completion() when a task blocks, even if t's budget has been * exhausted. Unfortunately, we cannot rerun the exhaustion routine * here due to spinlock ordering issues. Just re-arm the timer with * the exhausted time, re-running the timer routine immediately once * interrupts have been re-enabled. */ /* clear the exhausted flag so handle will re-run. this will not * trigger another exhaustion signal since signals are controled by * BTF_SIG_BUDGET_SENT. */ bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { TRACE_TASK(t, "budget timer not armed. " "Raced with exhaustion-resched? Re-arming.\n"); arm_enforcement_timer(t, 1); } else { TRACE_TASK(t, "not arming timer because task is waiting " "for release.\n"); } } } }
inline static void arm_enforcement_timer(struct task_struct* t, int force) { struct enforcement_timer* et; lt_t when_to_fire, remaining_budget; lt_t now; unsigned long flags; BUG_ON(!t); BUG_ON(!is_realtime(t)); et = &tsk_rt(t)->budget.timer; if (et->armed) { TRACE_TASK(t, "timer already armed!\n"); return; } if (!force) { if ( (!budget_enforced(t) || (budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED))) && (!budget_signalled(t) || (budget_signalled(t) && bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) { TRACE_TASK(t, "trying to arm timer when budget " "has already been exhausted.\n"); return; } } TRACE_TASK(t, "arming enforcement timer.\n"); /* __hrtimer_start_range_ns() cancels the timer * anyway, so we don't have to check whether it is still armed */ raw_spin_lock_irqsave(&et->lock, flags); if (et->armed) { TRACE_TASK(t, "timer already armed (race)!\n"); goto out; } now = litmus_clock(); remaining_budget = budget_remaining(t); when_to_fire = now + remaining_budget; TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n", remaining_budget, when_to_fire); __hrtimer_start_range_ns(&et->timer, ns_to_ktime(when_to_fire), 0 /* delta */, HRTIMER_MODE_ABS_PINNED, /* TODO: need to use non-pinned? */ 0 /* no wakeup */); et->armed = 1; out: raw_spin_unlock_irqrestore(&et->lock, flags); }
void simple_on_scheduled(struct task_struct* t) { BUG_ON(!t); if(budget_precisely_tracked(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) if (!tsk_rt(t)->budget.timer.armed) arm_enforcement_timer(t, 0); }
void sobliv_on_wakeup(struct task_struct* t) { if (bt_flag_is_set(t, BTF_IS_TOP_M)) { /* we're waking up while in top-m. record the time spent * suspended while draining in exec_cost. suspend_timestamp was * either set when we entered top-m while asleep, or when we * blocked. */ if (tsk_rt(t)->budget.suspend_timestamp) { lt_t suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp; tsk_rt(t)->budget.suspend_timestamp = 0; TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost); get_exec_time(t) += suspend_cost; } else { WARN_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE)); } } }
void simple_io_on_blocked(struct task_struct* t) { /* hiding is turned on by locking protocols, so if there isn't any hiding, then we're blocking for some other reason. assume it's I/O. */ int for_io = 0; #ifdef CONFIG_LITMUS_NESTED_LOCKING for_io |= !tsk_rt(t)->blocked_lock; #endif #ifdef CONFIG_REALTIME_AUX_TASKS for_io |= tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks; #endif #ifdef CONFIG_LITMUS_NVIDIA for_io |= tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu; #endif /* we drain budget for io-based suspensions */ if (for_io) { /* there is a fraction of time where we're double-counting the * time tracked by the rq and suspension time. * TODO: Do this recording closer to suspension time. */ tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); TRACE_TASK(t, "blocking for I/O.\n"); if (!tsk_rt(t)->budget.timer.armed) { bt_flag_clear(t, BTF_BUDGET_EXHAUSTED); if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { TRACE_TASK(t, "budget timer not armed. " "Raced with exhaustion-resched? Re-arming.\n"); arm_enforcement_timer(t, 1); } else { TRACE_TASK(t, "not arming timer because task is waiting " "for release.\n"); } } } else { TRACE_TASK(t, "blocking for litmus lock. stop draining.\n"); simple_on_blocked(t); } }
static enum hrtimer_restart __on_timeout(struct hrtimer *timer) { enum hrtimer_restart restart = HRTIMER_NORESTART; unsigned long flags; struct budget_tracker* bt = container_of( container_of(timer, struct enforcement_timer, timer), struct budget_tracker, timer); struct task_struct* t = container_of( container_of(bt, struct rt_param, budget), struct task_struct, rt_param); TRACE_TASK(t, "budget timer interrupt fired at time %lu\n", litmus_clock()); raw_spin_lock_irqsave(&bt->timer.lock, flags); tsk_rt(t)->budget.timer.armed = 0; raw_spin_unlock_irqrestore(&bt->timer.lock, flags); if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { TRACE_TASK(t, "spurious exhastion while waiting for release. dropping.\n"); goto out; } restart = bt->ops->on_exhausted(t,!IN_SCHEDULE); raw_spin_lock_irqsave(&bt->timer.lock, flags); tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART); raw_spin_unlock_irqrestore(&bt->timer.lock, flags); out: return restart; }
void sobliv_on_enter_top_m(struct task_struct* t) { if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { if (tsk_rt(t)->budget.timer.armed) TRACE_TASK(t, "budget timer already armed.\n"); else { /* if we're blocked, then record the time at which we started measuring */ if (!is_running(t)) tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); /* the callback will handle it if it is executing */ if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) { arm_enforcement_timer(t, 0); } else { TRACE_TASK(t, "within callback context. deferring timer arm.\n"); } } } }
static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t, int in_schedule) { /* Assumption: t is scheduled on the CPU executing this callback */ if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { /* signal exhaustion */ send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */ } if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) { if (!is_np(t)) { /* np tasks will be preempted when they become * preemptable again */ litmus_reschedule_local(); TRACE("%d is preemptable => FORCE_RESCHED\n", t->pid); } else if (is_user_np(t)) { TRACE("%d is non-preemptable, preemption delayed.\n", t->pid); request_exit_np(t); } } return HRTIMER_NORESTART; }
static struct task_struct* psnedf_schedule(struct task_struct * prev) { psnedf_domain_t* pedf = local_pedf; rt_domain_t* edf = &pedf->domain; struct task_struct* next; int out_of_time, sleep, preempt, np, exists, blocks, resched; raw_readyq_lock(&pedf->slock); /* sanity checking * differently from gedf, when a task exits (dead) * pedf->schedule may be null and prev _is_ realtime */ BUG_ON(pedf->scheduled && pedf->scheduled != prev); BUG_ON(pedf->scheduled && !is_realtime(prev)); /* (0) Determine state */ exists = pedf->scheduled != NULL; blocks = exists && !is_running(pedf->scheduled); out_of_time = exists && budget_enforced(pedf->scheduled) && bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED); np = exists && is_np(pedf->scheduled); sleep = exists && is_completed(pedf->scheduled); preempt = edf_preemption_needed(edf, prev); /* If we need to preempt do so. * The following checks set resched to 1 in case of special * circumstances. */ resched = preempt; /* Do budget stuff */ if (blocks) budget_state_machine(prev,on_blocked); else if (sleep) budget_state_machine(prev,on_sleep); else if (preempt) budget_state_machine(prev,on_preempt); /* If a task blocks we have no choice but to reschedule. */ if (blocks) resched = 1; /* Request a sys_exit_np() call if we would like to preempt but cannot. * Multiple calls to request_exit_np() don't hurt. */ if (np && (out_of_time || preempt || sleep)) request_exit_np(pedf->scheduled); /* Any task that is preemptable and either exhausts its execution * budget or wants to sleep completes. We may have to reschedule after * this. */ if (!np && (out_of_time || sleep) && !blocks) { job_completion(pedf->scheduled, !sleep); resched = 1; } /* The final scheduling decision. Do we need to switch for some reason? * Switch if we are in RT mode and have no task or if we need to * resched. */ next = NULL; if ((!np || blocks) && (resched || !exists)) { /* When preempting a task that does not block, then * re-insert it into either the ready queue or the * release queue (if it completed). requeue() picks * the appropriate queue. */ if (pedf->scheduled && !blocks) requeue(pedf->scheduled, edf); next = __take_ready(edf); } else /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; if (next) { TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); } else { TRACE("becoming idle at %llu\n", litmus_clock()); } pedf->scheduled = next; sched_state_task_picked(); raw_readyq_unlock(&pedf->slock); return next; }