/* The caller shall take enable_sem write semaphore to avoid any timer race. * The cpu_timer and cpu_slack_timer must be deactivated when calling this * function. */ static void cpufreq_interactive_timer_start(int cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy); unsigned long flags; u64 now = ktime_to_us(ktime_get()); pcpu->cpu_timer.expires = expires; add_timer_on(&pcpu->cpu_timer, cpu); if (timer_slack_val >= 0 && (pcpu->target_freq > pcpu->policy->min || (pcpu->target_freq == pcpu->policy->min && now < boostpulse_endtime))) { expires += usecs_to_jiffies(timer_slack_val); pcpu->cpu_slack_timer.expires = expires; add_timer_on(&pcpu->cpu_slack_timer, cpu); } spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; spin_unlock_irqrestore(&pcpu->load_lock, flags); }
static void cpufreq_interactive_timer_resched(unsigned long cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); u64 expires; unsigned long flags; spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(smp_processor_id(), &pcpu->time_in_idle_timestamp, io_is_busy); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; expires = round_to_nw_start(pcpu->last_evaluated_jiffy); del_timer(&pcpu->cpu_timer); pcpu->cpu_timer.expires = expires; add_timer_on(&pcpu->cpu_timer, cpu); if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { expires += usecs_to_jiffies(timer_slack_val); del_timer(&pcpu->cpu_slack_timer); pcpu->cpu_slack_timer.expires = expires; add_timer_on(&pcpu->cpu_slack_timer, cpu); } spin_unlock_irqrestore(&pcpu->load_lock, flags); }
static inline int set_window_helper(void) { return sched_set_window(round_to_nw_start(get_jiffies_64()), usecs_to_jiffies(timer_rate)); }