static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If there is an extended block of busy processing, set * frequency to turbo. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = 0; kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO); } else if (priv->idle_dcvs) { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id); } else { if (pwr->step_mul > 1) val = __secure_tz_entry3(TZ_UPDATE_ID, (pwr->active_pwrlevel + 1)/2, priv->bin.total_time, priv->bin.busy_time); else val = __secure_tz_entry3(TZ_UPDATE_ID, pwr->active_pwrlevel, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* If the decision is to move to a lower level, make sure the GPU * frequency drops. */ if (val > 0) val *= pwr->step_mul; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq, u32 *flag) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; struct xstats b; int val, level = 0; int act_level; int norm_cycles; int gpu_percent; if (priv->bus.num) stats.private_data = &b; else stats.private_data = NULL; result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; *flag = 0; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; if (priv->bus.num) { priv->bus.total_time += stats.total_time; priv->bus.gpu_time += stats.busy_time; priv->bus.ram_time += b.ram_time; priv->bus.ram_time += b.ram_wait; } /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run or the gpu hasn't been * busier than MIN_BUSY. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR) || (unsigned int) priv->bin.busy_time < MIN_BUSY) { return 0; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1 * level; } else { val = __secure_tz_entry3(TZ_UPDATE_ID, level, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a different level, make sure the GPU * frequency changes. */ if (val) { level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state); goto clear; } if (priv->bus.total_time < LONG_FLOOR) goto end; norm_cycles = (unsigned int)priv->bus.ram_time / (unsigned int) priv->bus.total_time; gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) / (unsigned int) priv->bus.total_time; /* * If there's a new high watermark, update the cutoffs and send the * FAST hint. Otherwise check the current value against the current * cutoffs. */ if (norm_cycles > priv->bus.max) { _update_cutoff(priv, norm_cycles); *flag = DEVFREQ_FLAG_FAST_HINT; } else { /* * Normalize by gpu_time unless it is a small fraction of * the total time interval. */ norm_cycles = (100 * norm_cycles) / TARGET; act_level = priv->bus.index[level] + b.mod; act_level = (act_level < 0) ? 0 : act_level; act_level = (act_level >= priv->bus.num) ? (priv->bus.num - 1) : act_level; if (norm_cycles > priv->bus.up[act_level] && gpu_percent > CAP) *flag = DEVFREQ_FLAG_FAST_HINT; else if (norm_cycles < priv->bus.down[act_level] && level) *flag = DEVFREQ_FLAG_SLOW_HINT; } clear: priv->bus.total_time = 0; priv->bus.gpu_time = 0; priv->bus.ram_time = 0; end: *freq = devfreq->profile->freq_table[level]; return 0; }
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; int val, level = 0; result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) { return 0; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else { val = __secure_tz_entry3(TZ_UPDATE_ID, level, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a lower level, make sure the GPU * frequency drops. */ level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state); *freq = devfreq->profile->freq_table[level]; /* * By setting freq as UINT_MAX we notify the kgsl target function * to go up one power level without considering the freq value */ if (val < 0) *freq = UINT_MAX; return 0; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } /* If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else if (priv->idle_dcvs) { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id); } else { if (pwr->step_mul > 1) val = __secure_tz_entry3(TZ_UPDATE_ID, (pwr->active_pwrlevel + 1)/2, priv->bin.total_time, priv->bin.busy_time); else val = __secure_tz_entry3(TZ_UPDATE_ID, pwr->active_pwrlevel, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* If the decision is to move to a lower level, make sure the GPU * frequency drops. */ if (val > 0) val *= pwr->step_mul; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }