void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, struct sched_rt_entity *parent) { struct rq *rq = cpu_rq(cpu); rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->rt_nr_boosted = 0; rt_rq->rq = rq; rt_rq->tg = tg; tg->rt_rq[cpu] = rt_rq; tg->rt_se[cpu] = rt_se; if (!rt_se) return; if (!parent) rt_se->rt_rq = &rq->rt; else rt_se->rt_rq = parent->my_q; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); }
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); raw_spin_lock_irqsave(&rq->lock, flags); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); spread = max_vruntime - MIN_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); spread0 = min_vruntime - rq0_min_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", cfs_rq->nr_spread_over); #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); #endif print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif }
void print_cfs_stats_at_KE(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq; rcu_read_lock(); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq_at_KE(m, cpu, cfs_rq); rcu_read_unlock(); }
static void print_rtws_stats(struct seq_file *m, int cpu) { struct rtws_rq *rtws_rq; rtws_rq = &cpu_rq(cpu)->rtws; rcu_read_lock(); print_rtws_rq(m, cpu, rtws_rq); rcu_read_unlock(); }
void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; if (!raw_spin_trylock_irqsave(&rq->lock, flags)) return; resched_task(cpu_curr(cpu)); raw_spin_unlock_irqrestore(&rq->lock, flags); }
/* * schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_sched_qs(cpu); prev = rq->curr; switch_count = &prev->nivcsw; release_kernel_lock(prev); need_resched_nonpreemptible: schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); update_rq_clock(rq); clear_tsk_need_resched(prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) prev->state = TASK_RUNNING; else deactivate_task(rq, prev, 1); switch_count = &prev->nvcsw; } pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); put_prev_task(rq, prev); next = pick_next_task(rq); if (likely(prev != next)) { sched_info_switch(prev, next); perf_event_task_sched_out(prev, next); rq->nr_switches++; rq->curr = next; ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */
/* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; /* Remove the sched domains which do not contribute to scheduling. */ for (tmp = sd; tmp; ) { struct sched_domain *parent = tmp->parent; if (!parent) break; if (sd_parent_degenerate(tmp, parent)) { tmp->parent = parent->parent; if (parent->parent) parent->parent->child = tmp; /* * Transfer SD_PREFER_SIBLING down in case of a * degenerate parent; the spans match for this * so the property transfers. */ if (parent->flags & SD_PREFER_SIBLING) tmp->flags |= SD_PREFER_SIBLING; destroy_sched_domain(parent); } else tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { tmp = sd; sd = sd->parent; destroy_sched_domain(tmp); if (sd) sd->child = NULL; } sched_domain_debug(sd, cpu); rq_attach_root(rq, rd); tmp = rq->sd; rcu_assign_pointer(rq->sd, sd); dirty_sched_domain_sysctl(cpu); destroy_sched_domains(tmp); update_top_cache_domain(cpu); }
static int hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) { int cpu = (int)(long)hcpu; switch (action) { case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: hrtick_clear(cpu_rq(cpu)); return NOTIFY_OK; } return NOTIFY_DONE; }
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) { struct sched_entity *se = tg->se[cpu]; #define P(F) \ SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) #define PN(F) \ SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) if (!se) { struct sched_avg *avg = &cpu_rq(cpu)->avg; P(avg->runnable_avg_sum); P(avg->avg_period); return; } PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); #ifdef CONFIG_SCHEDSTATS PN(se->statistics.wait_start); PN(se->statistics.sleep_start); PN(se->statistics.block_start); PN(se->statistics.sleep_max); PN(se->statistics.block_max); PN(se->statistics.exec_max); PN(se->statistics.slice_max); PN(se->statistics.wait_max); PN(se->statistics.wait_sum); P(se->statistics.wait_count); #endif P(se->load.weight); #ifdef CONFIG_SMP P(se->avg.runnable_avg_sum); P(se->avg.running_avg_sum); P(se->avg.avg_period); P(se->avg.load_avg_contrib); P(se->avg.utilization_avg_contrib); P(se->avg.decay_count); #endif #undef PN #undef P }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err_free_rt_rq; init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) goto err_free_rt_se; rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i)); if (!rt_se) goto err_free_rq; init_rt_rq(rt_rq, cpu_rq(i)); rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); } return 1; err_free_rq: kfree(rt_rq); err_free_rt_se: kfree(tg->rt_se); err_free_rt_rq: kfree(tg->rt_rq); err: return 0; }
/* * Tries to push a -rtws task to a "random" idle rq. */ static int push_idle_rtws(struct rq *this_rq, struct task_struct *p) { struct rq *target_rq; int ret = 0, target_cpu; struct cpudl *cp = &this_rq->rd->rtwsc_cpudl; retry: target_cpu = find_idle_cpu_rtws(cp); if (target_cpu == -1) return 0; printk(KERN_INFO "idle cpu %d\n", target_cpu); target_rq = cpu_rq(target_cpu); /* We might release rq lock */ get_task_struct(p); double_lock_balance(this_rq, target_rq); if (unlikely(target_rq->rtws.nr_running)) { double_unlock_balance(this_rq, target_rq); put_task_struct(p); target_rq = NULL; goto retry; } set_task_cpu(p, target_cpu); activate_task(target_rq, p, 0); ret = 1; resched_task(target_rq->curr); double_unlock_balance(this_rq, target_rq); put_task_struct(p); return ret; }
static int steal_pjob_rtws(struct rq *this_rq) { int ret = 0, this_cpu = this_rq->cpu, target_cpu; struct task_struct *p; struct rq *target_rq; struct global_rq *global_rq = this_rq->rtws.global_rq; if (global_rq->random) { /* * Pseudo random selection of our victim rq, * among rqs with to-be-stolen pjobs, that's it. */ target_cpu = find_random_stealable_cpu_rtws(&this_rq->rd->rtwss_cpudl, this_rq->cpu); } else { /* * When not in random mode, we gotta find the rq with the earliest * deadline stealable pjob. */ target_cpu = find_earliest_stealable_cpu_rtws(&this_rq->rd->rtwss_cpudl); } if (target_cpu == -1) return 0; printk(KERN_INFO "stealable cpu %d\n", target_cpu); target_rq = cpu_rq(target_cpu); /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could alter this_rq */ double_lock_balance(this_rq, target_rq); if (unlikely(target_rq->rtws.nr_running <= 1)) goto unlock; if (unlikely(this_rq->rtws.nr_running)) goto unlock; p = pick_next_stealable_pjob_rtws(&target_rq->rtws); if (p) { WARN_ON(p == target_rq->curr); WARN_ON(!p->se.on_rq); WARN_ON(!rtws_task(p)); deactivate_task(target_rq, p, 0); p->rtws.stolen = target_cpu; set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); this_rq->rtws.tot_steals++; printk(KERN_INFO "=task %d STOLEN by cpu %d from cpu %d!\n", p->pid, this_cpu, target_cpu); ret = 1; } unlock: double_unlock_balance(this_rq, target_rq); return ret; }
/* * schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; need_resched: preempt_disable(); cpu = smp_processor_id(); rq = cpu_rq(cpu); rcu_note_context_switch(cpu); prev = rq->curr; schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) { prev->state = TASK_RUNNING; } else { /* * If a worker is going to sleep, notify and * ask workqueue whether it wants to wake up a * task to maintain concurrency. If so, wake * up the task. */ if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev, cpu); if (to_wakeup) try_to_wake_up_local(to_wakeup); } deactivate_task(rq, prev, DEQUEUE_SLEEP); /* * If we are going to sleep and we have plugged IO queued, make * sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(prev)) { raw_spin_unlock(&rq->lock); blk_schedule_flush_plug(prev); raw_spin_lock(&rq->lock); } } switch_count = &prev->nvcsw; } pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); put_prev_task(rq, prev); next = pick_next_task(rq); clear_tsk_need_resched(prev); rq->skip_clock_update = 0; if (likely(prev != next)) { rq->nr_switches++; rq->curr = next; ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ /* * The context switch have flipped the stack from under us * and restored the local variables which were saved when * this task called schedule() in the past. prev == current * is still correct, but it can be moved to another cpu/rq. */ cpu = smp_processor_id(); rq = cpu_rq(cpu); } else raw_spin_unlock_irq(&rq->lock); post_schedule(rq); preempt_enable_no_resched(); if (need_resched()) goto need_resched; }
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) { return &cpu_rq(this_cpu)->cfs; }
static void __sched notrace __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; struct rq *rq; int cpu; /* ==1== 找到当前cpu上的就绪队列rq 并将正在运行的进程curr保存到prev中 */ cpu = smp_processor_id(); rq = cpu_rq(cpu); prev = rq->curr; /* * do_exit() calls schedule() with preemption disabled as an exception; * however we must fix that up, otherwise the next task will see an * inconsistent (higher) preempt count. * * It also avoids the below schedule_debug() test from complaining * about this. */ if (unlikely(prev->state == TASK_DEAD)) preempt_enable_no_resched_notrace(); /* 如果禁止内核抢占,而又调用了cond_resched就会出错 * 这里就是用来捕获该错误的 */ schedule_debug(prev); if (sched_feat(HRTICK)) hrtick_clear(rq); /* 关闭本地中断 */ local_irq_disable(); /* 更新全局状态, * 标识当前CPU发生上下文的切换 */ rcu_note_context_switch(); /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * done by the caller to avoid the race with signal_wake_up(). */ smp_mb__before_spinlock(); /* 锁住该队列 */ raw_spin_lock(&rq->lock); lockdep_pin_lock(&rq->lock); rq->clock_skip_update <<= 1; /* promote REQ to ACT */ /* 切换次数记录, 默认认为非主动调度计数(抢占) */ switch_count = &prev->nivcsw; /* * scheduler检查prev的状态state和内核抢占表示 * 如果prev是不可运行的, 并且在内核态没有被抢占 * * 此时当前进程不是处于运行态, 并且不是被抢占 * 此时不能只检查抢占计数 * 因为可能某个进程(如网卡轮询)直接调用了schedule * 如果不判断prev->stat就可能误认为task进程为RUNNING状态 * 到达这里,有两种可能,一种是主动schedule, 另外一种是被抢占 * 被抢占有两种情况, 一种是时间片到点, 一种是时间片没到点 * 时间片到点后, 主要是置当前进程的need_resched标志 * 接下来在时钟中断结束后, 会preempt_schedule_irq抢占调度 * * 那么我们正常应该做的是应该将进程prev从就绪队列rq中删除, * 但是如果当前进程prev有非阻塞等待信号, * 并且它的状态是TASK_INTERRUPTIBLE * 我们就不应该从就绪队列总删除它 * 而是配置其状态为TASK_RUNNING, 并且把他留在rq中 /* 如果内核态没有被抢占, 并且内核抢占有效 即是否同时满足以下条件: 1 该进程处于停止状态 2 该进程没有在内核态被抢占 */ if (!preempt && prev->state) { /* 如果当前进程有非阻塞等待信号,并且它的状态是TASK_INTERRUPTIBLE */ if (unlikely(signal_pending_state(prev->state, prev))) { /* 将当前进程的状态设为:TASK_RUNNING */ prev->state = TASK_RUNNING; } else /* 否则需要将prev进程从就绪队列中删除*/ { /* 将当前进程从runqueue(运行队列)中删除 */ deactivate_task(rq, prev, DEQUEUE_SLEEP); /* 标识当前进程不在runqueue中 */ prev->on_rq = 0; /* * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. */ if (prev->flags & PF_WQ_WORKER) { struct task_struct *to_wakeup; to_wakeup = wq_worker_sleeping(prev); if (to_wakeup) try_to_wake_up_local(to_wakeup); } } /* 如果不是被抢占的,就累加主动切换次数 */ switch_count = &prev->nvcsw; } /* 如果prev进程仍然在就绪队列上没有被删除 */ if (task_on_rq_queued(prev)) update_rq_clock(rq); /* 跟新就绪队列的时钟 */ /* 挑选一个优先级最高的任务将其排进队列 */ next = pick_next_task(rq, prev); /* 清除pre的TIF_NEED_RESCHED标志 */ clear_tsk_need_resched(prev); /* 清楚内核抢占标识 */ clear_preempt_need_resched(); rq->clock_skip_update = 0; /* 如果prev和next非同一个进程 */ if (likely(prev != next)) { rq->nr_switches++; /* 队列切换次数更新 */ rq->curr = next; /* 将next标记为队列的curr进程 */ ++*switch_count; /* 进程切换次数更新 */ trace_sched_switch(preempt, prev, next); /* 进程之间上下文切换 */ rq = context_switch(rq, prev, next); /* unlocks the rq */ } else /* 如果prev和next为同一进程,则不进行进程切换 */ { lockdep_unpin_lock(&rq->lock); raw_spin_unlock_irq(&rq->lock); } balance_callback(rq); }
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); #else SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); #endif SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); raw_spin_lock_irqsave(&rq->lock, flags); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); spread = max_vruntime - MIN_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); spread0 = min_vruntime - rq0_min_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg", SPLIT_NS(cfs_rq->load_avg)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period", SPLIT_NS(cfs_rq->load_period)); SEQ_printf(m, " .%-30s: %ld\n", "load_contrib", cfs_rq->load_contribution); SEQ_printf(m, " .%-30s: %d\n", "load_tg", atomic_read(&cfs_rq->tg->load_weight)); #endif #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", cfs_rq->tg->cfs_bandwidth.timer_active); SEQ_printf(m, " .%-30s: %d\n", "throttled", cfs_rq->throttled); SEQ_printf(m, " .%-30s: %d\n", "throttle_count", cfs_rq->throttle_count); #endif print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif }
static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; #ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, "\ncpu#%d\n", cpu); #endif #define P(x) \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x)) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); PN(next_balance); P(curr->pid); PN(clock); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #undef P #undef PN #ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); P(yld_count); P(sched_count); P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); #endif P(ttwu_count); P(ttwu_local); #undef P #undef P64 #endif spin_lock_irqsave(&sched_debug_lock, flags); print_cfs_stats(m, cpu); print_rt_stats(m, cpu); rcu_read_lock(); print_rq(m, rq, cpu); rcu_read_unlock(); spin_unlock_irqrestore(&sched_debug_lock, flags); }
static inline cpumask_t sched_rt_period_mask(void) { return cpu_rq(smp_processor_id())->rd->span; }
static int show_schedstat(struct seq_file *seq, void *v) { int cpu; if (v == (void *)1) { seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); seq_printf(seq, "timestamp %lu\n", jiffies); } else { struct rq *rq; #ifdef CONFIG_SMP struct sched_domain *sd; int dcount = 0; #endif cpu = (unsigned long)(v - 2); rq = cpu_rq(cpu); /* runqueue-specific stats */ seq_printf(seq, "cpu%d %u 0 %u %u %u %u %llu %llu %lu %u", cpu, rq->yld_count, rq->sched_count, rq->sched_goidle, rq->ttwu_count, rq->ttwu_local, rq->rq_cpu_time, rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount, rq->yield_sleep_count); seq_printf(seq, "\n"); #ifdef CONFIG_SMP /* domain-specific stats */ rcu_read_lock(); for_each_domain(cpu, sd) { enum cpu_idle_type itype; seq_printf(seq, "domain%d %*pb", dcount++, cpumask_pr_args(sched_domain_span(sd))); for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; itype++) { seq_printf(seq, " %u %u %u %u %u %u %u %u", sd->lb_count[itype], sd->lb_balanced[itype], sd->lb_failed[itype], sd->lb_imbalance[itype], sd->lb_gained[itype], sd->lb_hot_gained[itype], sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); } rcu_read_unlock(); #endif } return 0; }
static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) { return &cpu_rq(cpu)->rt; }
static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; #ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, "cpu#%d\n", cpu); #endif #define P(x) \ do { \ if (sizeof(rq->x) == 4) \ SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ else \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ } while (0) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); PN(next_balance); P(curr->pid); PN(clock); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #ifdef CONFIG_SMP P(cpu_power); #endif #ifdef CONFIG_SCHED_HMP P(mostly_idle_load); P(mostly_idle_nr_run); P(load_scale_factor); P(capacity); P(max_possible_capacity); P(efficiency); P(cur_freq); P(max_freq); #endif #ifdef CONFIG_SCHED_HMP P(hmp_stats.nr_big_tasks); P(hmp_stats.nr_small_tasks); SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg", rq->hmp_stats.cumulative_runnable_avg); #endif #undef P #undef PN #ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); P(yld_count); P(sched_count); P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); #endif P(ttwu_count); P(ttwu_local); #undef P #undef P64 #endif spin_lock_irqsave(&sched_debug_lock, flags); print_cfs_stats(m, cpu); print_rt_stats(m, cpu); rcu_read_lock(); print_rq(m, rq, cpu); rcu_read_unlock(); spin_unlock_irqrestore(&sched_debug_lock, flags); SEQ_printf(m, "\n"); }
int show_schedstat(struct seq_file *seq, void *v) { int cpu; int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; char *mask_str = kmalloc(mask_len, GFP_KERNEL); if (mask_str == NULL) return -ENOMEM; seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); seq_printf(seq, "timestamp %lu\n", jiffies); for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); #ifdef CONFIG_SMP struct sched_domain *sd; int dcount = 0; #endif /* runqueue-specific stats */ seq_printf(seq, "cpu%d %u %u %u %u %u %u %llu %llu %lu", cpu, rq->yld_count, rq->sched_switch, rq->sched_count, rq->sched_goidle, rq->ttwu_count, rq->ttwu_local, rq->rq_cpu_time, rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); seq_printf(seq, "\n"); #ifdef CONFIG_SMP /* domain-specific stats */ rcu_read_lock(); for_each_domain(cpu, sd) { enum cpu_idle_type itype; cpumask_scnprintf(mask_str, mask_len, sched_domain_span(sd)); seq_printf(seq, "domain%d %s", dcount++, mask_str); for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; itype++) { seq_printf(seq, " %u %u %u %u %u %u %u %u", sd->lb_count[itype], sd->lb_balanced[itype], sd->lb_failed[itype], sd->lb_imbalance[itype], sd->lb_gained[itype], sd->lb_hot_gained[itype], sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); } rcu_read_unlock(); #endif } kfree(mask_str); return 0; }