static int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned) { int tsk_cache_hot = 0; /* * We do not migrate tasks that are: * 1) running (obviously), or * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { schedstat_inc(p, se.nr_failed_migrations_affine); return 0; } *all_pinned = 0; if (task_running(rq, p)) { schedstat_inc(p, se.nr_failed_migrations_running); return 0; } /* * Aggressive migration if: * 1) task is cache cold, or * 2) too many balance attempts have failed. */ tsk_cache_hot = task_hot(p, rq->clock, sd); if (!tsk_cache_hot || sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS if (tsk_cache_hot) { schedstat_inc(sd, lb_hot_gained[idle]); schedstat_inc(p, se.nr_forced_migrations); } #endif return 1; } if (tsk_cache_hot) { schedstat_inc(p, se.nr_failed_migrations_hot); return 0; } return 1; }
static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev) { put_prev_task(rq, prev); schedstat_inc(rq, sched_goidle); return rq->idle; }
static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); /* adjust the active tasks as we might go into a long sleep */ calc_load_account_active(rq); return rq->idle; }
static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP /* Trigger the post schedule to do an idle_enter for CFS */ rq->post_schedule = 1; #endif return rq->idle; }
static struct task_struct *pick_next_task_idle(struct rq *rq,int lst) { schedstat_inc(rq, sched_goidle); calc_load_account_idle(rq); return rq->idle; }
static struct task_struct *pick_next_task_idle(struct rq *rq) { schedstat_inc(rq, sched_goidle); return rq->idle; }