void sys_tick_handler(void) { uint32_t next_timer = 0; volatile uint32_t reload = systick_get_reload(); uint32_t this_timeslice; SysTick_Hook(); jiffies+= clock_interval; _n_int++; next_timer = ktimers_check(); #ifdef CONFIG_LOWPOWER if (next_timer < 0 || next_timer > 1000){ next_timer = 1000; /* Wake up every second if timer is too long, or if no timers */ } /* Checking deep sleep */ if (next_timer >= 1000 && scheduler_can_sleep()) { systick_interrupt_disable(); cputimer_start(next_timer); return; } #ifdef CONFIG_TICKLESS this_timeslice = task_timeslice(); if (_sched_active && (this_timeslice == 0) && (!task_running())) { schedule(); } else { systick_interrupt_disable(); cputimer_start(this_timeslice); } return; #endif #endif if (_sched_active && ((task_timeslice() == 0) || (!task_running()))) { schedule(); (void)next_timer; } }
static int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned) { int tsk_cache_hot = 0; /* * We do not migrate tasks that are: * 1) running (obviously), or * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { schedstat_inc(p, se.nr_failed_migrations_affine); return 0; } *all_pinned = 0; if (task_running(rq, p)) { schedstat_inc(p, se.nr_failed_migrations_running); return 0; } /* * Aggressive migration if: * 1) task is cache cold, or * 2) too many balance attempts have failed. */ tsk_cache_hot = task_hot(p, rq->clock, sd); if (!tsk_cache_hot || sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS if (tsk_cache_hot) { schedstat_inc(sd, lb_hot_gained[idle]); schedstat_inc(p, se.nr_forced_migrations); } #endif return 1; } if (tsk_cache_hot) { schedstat_inc(p, se.nr_failed_migrations_hot); return 0; } return 1; }