/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Else, we adjust the frequency * proportional to load. */ static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; dbs_info->freq_lo = 0; /* Check for frequency increase */ if (load > od_tuners->up_threshold) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); } else { /* Calculate the next frequency proportional to load */ unsigned int freq_next; freq_next = load * policy->cpuinfo.max_freq / 100; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } }
/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate, we look * for the lowest frequency which can sustain the load while keeping idle time * over 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. Frequency reduction * happens at minimum steps of 5% (default) of current frequency */ static void od_check_cpu(int cpu, unsigned int load_freq) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; dbs_info->freq_lo = 0; /* Check for frequency increase */ if (load_freq > od_tuners->up_threshold * policy->cur) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; /* * The optimal frequency is the frequency that is the lowest that can * support the current CPU usage without triggering the up policy. To be * safe, we focus 10 points under the threshold. */ if (load_freq < od_tuners->adj_up_threshold * policy->cur) { unsigned int freq_next; freq_next = load_freq / od_tuners->adj_up_threshold; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (freq_next < policy->min) freq_next = policy->min; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } }
/* * Every sampling_rate, we check, if current idle time is less than 37% * (default), then we try to increase frequency. Else, we adjust the frequency * proportional to load. */ static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; bool boosted; u64 now; dbs_info->freq_lo = 0; now = ktime_to_us(ktime_get()); boosted = now < (last_input_time + get_input_boost_duration()); /* Check for frequency increase */ if (load > od_tuners->up_threshold) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); } else { /* Calculate the next frequency proportional to load */ unsigned int freq_next, min_f, max_f; min_f = policy->cpuinfo.min_freq; max_f = policy->cpuinfo.max_freq; freq_next = min_f + load * (max_f - min_f) / 100; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (boosted && policy->cur < input_boost_freq && freq_next < input_boost_freq) freq_next = input_boost_freq; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); return; } if (boosted && policy->cur <= input_boost_freq) return; freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); } }
static void od_check_cpu(int cpu, unsigned int load) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; dbs_info->freq_lo = 0; if (load > od_tuners->up_threshold) { if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, load, policy->max); } else { unsigned int freq_next; freq_next = load * policy->cpuinfo.max_freq / 100; dbs_info->rate_mult = 1; if (!od_tuners->powersave_bias) { trace_cpufreq_interactive_target(policy->cpu, load, freq_next, policy->cur, freq_next); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); trace_cpufreq_interactive_setspeed(policy->cpu, freq_next, policy->cur); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); trace_cpufreq_interactive_target(policy->cpu, load, freq_next, policy->cur, freq_next); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); trace_cpufreq_interactive_setspeed(policy->cpu, freq_next, policy->cur); } }
/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate, we look * for the lowest frequency which can sustain the load while keeping idle time * over 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. Frequency reduction * happens at minimum steps of 5% (default) of current frequency */ static void od_check_cpu(int cpu, unsigned int load_freq) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; char name[]= "BAT0"; int bat_current = 0; int bat_capacity = 0; int bat_percentage =0; int ac_status=0; int bat_status=0; struct power_supply *psy = power_supply_get_by_name(name); union power_supply_propval chargenow, chargecapacity, batstatus; dbs_info->freq_lo = 0; bat_capacity = psy->get_property(psy,POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,&chargecapacity); bat_current = psy->get_property(psy,POWER_SUPPLY_PROP_CHARGE_NOW,&chargenow); bat_status = psy->get_property(psy,POWER_SUPPLY_PROP_STATUS,&batstatus); ac_status = batstatus.intval; bat_percentage=(chargenow.intval*100)/chargecapacity.intval; printk(KERN_INFO "*****************%d*********************",ac_status); if(bat_percentage<DEF_BATTERY_THRESHOLD&&(!(ac_status==4||ac_status==1))){ if(bat_prev-bat_percentage>=2){ bat_prev=bat_percentage; target_freq=max(policy->min,((policy->max-policy->min)*bat_percentage)/100); } printk(KERN_INFO "policy->curr:: %d target_freq:: %d load_freq:: %d \n",policy->cur,target_freq,load_freq); if(bat_flag==0){ bat_flag=1; bat_sampling_rate=od_tuners->sampling_rate; } if(hist_counter==10){ hist_counter=0; } freq_history[hist_counter]=policy->cur; hist_counter=hist_counter+1; his_flag=1; his_flag=freq_history[0]^freq_history[1]^freq_history[2]^freq_history[3]^freq_history[4]^freq_history[5]^freq_history[6]^freq_history[7]^freq_history[8]^freq_history[9]; //use a low cost methord ie xor /*for(hist_loop_cntr=0;hist_loop_cntr<10;hist_loop_cntr++){ printk(KERN_INFO "%d_%d\n",hist_loop_cntr,freq_history[hist_loop_cntr]); if(freq_history[hist_loop_cntr]!=freq_history[0]){ if(od_tuners->sampling_rate==bat_sampling_rate) break; printk(KERN_INFO "Reseted sampling to::: %d",od_tuners->sampling_rate); his_flag=1; break; } }*/ if((his_flag==0)&&(od_tuners->sampling_rate==bat_sampling_rate)){ od_tuners->sampling_rate=od_tuners->sampling_rate*11; printk(KERN_INFO "changed sampling to::: %d",od_tuners->sampling_rate); } else if(his_flag!=0){ if(od_tuners->sampling_rate!=bat_sampling_rate){ od_tuners->sampling_rate=bat_sampling_rate; printk(KERN_INFO "Reseted sampling to::: %d",od_tuners->sampling_rate); his_flag=1; } } if (load_freq > od_tuners->up_threshold *policy->cur) { freq_down_counter=1; if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; target_freq=target_freq+(freq_up_counter*10000); printk(KERN_INFO "load is gr8r upcounter %d target %d\n",freq_up_counter,target_freq); freq_up_counter++; } else if(load_freq < od_tuners->adj_up_threshold* policy->cur){ freq_up_counter=0; target_freq=target_freq-((freq_down_counter-1)*10000); printk(KERN_INFO "load is less downcr %d target %d adj_up %d \n",freq_down_counter,target_freq,od_tuners->adj_up_threshold); freq_down_counter=freq_down_counter*2; } printk(KERN_INFO "Battery <95 Target freq %d load freq %d threshold %d\n",target_freq,load_freq,od_tuners->up_threshold); __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_L); if(target_freq<=policy->min){ freq_down_counter=1; target_freq=policy->cur; } if(target_freq>=policy->max){ freq_up_counter=0; target_freq=policy->cur; } return; } if(bat_flag==1){ bat_flag=0; freq_up_counter=0; freq_down_counter=1; bat_prev=105; hist_counter=0; od_tuners->sampling_rate=bat_sampling_rate; } /* Check for frequency increase */ if (load_freq > od_tuners->up_threshold * policy->cur) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; /* * The optimal frequency is the frequency that is the lowest that can * support the current CPU usage without triggering the up policy. To be * safe, we focus 10 points under the threshold. */ if (load_freq < od_tuners->adj_up_threshold * policy->cur) { unsigned int freq_next; freq_next = load_freq / od_tuners->adj_up_threshold; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (freq_next < policy->min) freq_next = policy->min; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } }
/* * Every sampling_rate, we check, if current idle time is less than 20% * (default), then we try to increase frequency. Every sampling_rate, we look * for the lowest frequency which can sustain the load while keeping idle time * over 30%. If such a frequency exist, we try to decrease to this frequency. * * Any frequency increase takes it to the maximum frequency. Frequency reduction * happens at minimum steps of 5% (default) of current frequency */ static void od_check_cpu(int cpu, unsigned int load_freq) { struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct dbs_data *dbs_data = policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; dbs_info->freq_lo = 0; /* Check for frequency increase */ #ifdef CONFIG_ARCH_HI6XXX if(load_freq > od_tuners->od_6xxx_up_threshold * policy->cur) { unsigned int freq_next; /* If increase speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; if (load_freq > od_tuners->up_threshold * policy->cur) freq_next = policy->max; else freq_next = load_freq / od_tuners->od_6xxx_up_threshold; dbs_freq_increase(policy, freq_next); return; } #else if (load_freq > od_tuners->up_threshold * policy->cur) { /* If switching to max speed, apply sampling_down_factor */ if (policy->cur < policy->max) dbs_info->rate_mult = od_tuners->sampling_down_factor; dbs_freq_increase(policy, policy->max); return; } #endif /* Check for frequency decrease */ /* if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; /* * The optimal frequency is the frequency that is the lowest that can * support the current CPU usage without triggering the up policy. To be * safe, we focus 10 points under the threshold. */ #ifdef CONFIG_ARCH_HI6XXX if (load_freq < od_tuners->od_6xxx_down_threshold * policy->cur) { unsigned int freq_next; freq_next = load_freq / od_tuners->od_6xxx_down_threshold; #else if (load_freq < od_tuners->adj_up_threshold * policy->cur) { unsigned int freq_next; freq_next = load_freq / od_tuners->adj_up_threshold; #endif /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (freq_next < policy->min) freq_next = policy->min; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); } } static void od_dbs_timer(struct work_struct *work) { struct od_cpu_dbs_info_s *dbs_info = container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; struct od_dbs_tuners *od_tuners = dbs_data->tuners; int delay = 0, sample_type = core_dbs_info->sample_type; bool modify_all = true; mutex_lock(&core_dbs_info->cdbs.timer_mutex); if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) { modify_all = false; goto max_delay; } /* Common NORMAL_SAMPLE setup */ core_dbs_info->sample_type = OD_NORMAL_SAMPLE; if (sample_type == OD_SUB_SAMPLE) { delay = core_dbs_info->freq_lo_jiffies; __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, core_dbs_info->freq_lo, CPUFREQ_RELATION_H); } else { dbs_check_cpu(dbs_data, cpu); if (core_dbs_info->freq_lo) { /* Setup timer for SUB_SAMPLE */ core_dbs_info->sample_type = OD_SUB_SAMPLE; delay = core_dbs_info->freq_hi_jiffies; } } max_delay: if (!delay) delay = delay_for_sampling_rate(od_tuners->sampling_rate * core_dbs_info->rate_mult); gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); mutex_unlock(&core_dbs_info->cdbs.timer_mutex); } /************************** sysfs interface ************************/ static struct common_dbs_data od_dbs_cdata; /** * update_sampling_rate - update sampling rate effective immediately if needed. * @new_rate: new sampling rate * * If new rate is smaller than the old, simply updating * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the * original sampling_rate was 1 second and the requested new sampling rate is 10 * ms because the user needs immediate reaction from ondemand governor, but not * sure if higher frequency will be required or not, then, the governor may * change the sampling rate too late; up to 1 second later. Thus, if we are * reducing the sampling rate, we need to make the new value effective * immediately. */ static void update_sampling_rate(struct dbs_data *dbs_data, unsigned int new_rate) { struct od_dbs_tuners *od_tuners = dbs_data->tuners; int cpu; od_tuners->sampling_rate = new_rate = max(new_rate, dbs_data->min_sampling_rate); for_each_online_cpu(cpu) { struct cpufreq_policy *policy; struct od_cpu_dbs_info_s *dbs_info; unsigned long next_sampling, appointed_at; policy = cpufreq_cpu_get(cpu); if (!policy) continue; if (policy->governor != &cpufreq_gov_ondemand) { cpufreq_cpu_put(policy); continue; } dbs_info = &per_cpu(od_cpu_dbs_info, cpu); cpufreq_cpu_put(policy); mutex_lock(&dbs_info->cdbs.timer_mutex); if (!delayed_work_pending(&dbs_info->cdbs.work)) { mutex_unlock(&dbs_info->cdbs.timer_mutex); continue; } next_sampling = jiffies + usecs_to_jiffies(new_rate); appointed_at = dbs_info->cdbs.work.timer.expires; if (time_before(next_sampling, appointed_at)) { mutex_unlock(&dbs_info->cdbs.timer_mutex); cancel_delayed_work_sync(&dbs_info->cdbs.work); mutex_lock(&dbs_info->cdbs.timer_mutex); gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, usecs_to_jiffies(new_rate), true); } mutex_unlock(&dbs_info->cdbs.timer_mutex); } } static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, size_t count) { unsigned int input; int ret; ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; update_sampling_rate(dbs_data, input); return count; }