static void update_rt_migration(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { if (!rt_rq->overloaded) { rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 1; } } else if (rt_rq->overloaded) { rt_clear_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 0; } }
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) { int i, idle = 1; cpumask_t span; if (rt_b->rt_runtime == RUNTIME_INF) return 1; span = sched_rt_period_mask(); for_each_cpu_mask(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); spin_lock(&rq->lock); if (rt_rq->rt_time) { u64 runtime; spin_lock(&rt_rq->rt_runtime_lock); runtime = rt_rq->rt_runtime; rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq->rt_throttled = 0; enqueue = 1; } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; spin_unlock(&rt_rq->rt_runtime_lock); } else if (rt_rq->rt_nr_running) idle = 0; if (enqueue) sched_rt_rq_enqueue(rt_rq); spin_unlock(&rq->lock); }
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct sched_rt_entity *rt_se = rt_rq->rt_se; if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) enqueue_rt_entity(rt_se); if (rt_rq->highest_prio < curr->prio) resched_task(curr); } }
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { if (!rt_entity_is_task(rt_se)) return; rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total--; if (rt_se->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory--; update_rt_migration(rt_rq); }
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); }