static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq, u32 *flag) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; struct xstats b; int val, level = 0; int act_level; int norm_cycles; int gpu_percent; if (priv->bus.num) stats.private_data = &b; else stats.private_data = NULL; result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; *flag = 0; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; if (priv->bus.num) { priv->bus.total_time += stats.total_time; priv->bus.gpu_time += stats.busy_time; priv->bus.ram_time += b.ram_time; priv->bus.ram_time += b.ram_wait; } /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run or the gpu hasn't been * busier than MIN_BUSY. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR) || (unsigned int) priv->bin.busy_time < MIN_BUSY) { return 0; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1 * level; } else { val = __secure_tz_entry3(TZ_UPDATE_ID, level, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a different level, make sure the GPU * frequency changes. */ if (val) { level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state); goto clear; } if (priv->bus.total_time < LONG_FLOOR) goto end; norm_cycles = (unsigned int)priv->bus.ram_time / (unsigned int) priv->bus.total_time; gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) / (unsigned int) priv->bus.total_time; /* * If there's a new high watermark, update the cutoffs and send the * FAST hint. Otherwise check the current value against the current * cutoffs. */ if (norm_cycles > priv->bus.max) { _update_cutoff(priv, norm_cycles); *flag = DEVFREQ_FLAG_FAST_HINT; } else { /* * Normalize by gpu_time unless it is a small fraction of * the total time interval. */ norm_cycles = (100 * norm_cycles) / TARGET; act_level = priv->bus.index[level] + b.mod; act_level = (act_level < 0) ? 0 : act_level; act_level = (act_level >= priv->bus.num) ? (priv->bus.num - 1) : act_level; if (norm_cycles > priv->bus.up[act_level] && gpu_percent > CAP) *flag = DEVFREQ_FLAG_FAST_HINT; else if (norm_cycles < priv->bus.down[act_level] && level) *flag = DEVFREQ_FLAG_SLOW_HINT; } clear: priv->bus.total_time = 0; priv->bus.gpu_time = 0; priv->bus.ram_time = 0; end: *freq = devfreq->profile->freq_table[level]; return 0; }
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; int val, level = 0; result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) { return 0; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else { val = __secure_tz_entry3(TZ_UPDATE_ID, level, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a lower level, make sure the GPU * frequency drops. */ level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state); *freq = devfreq->profile->freq_table[level]; /* * By setting freq as UINT_MAX we notify the kgsl target function * to go up one power level without considering the freq value */ if (val < 0) *freq = UINT_MAX; return 0; }
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq, u32 *flag) { int result = 0; struct devfreq_msm_adreno_tz_data *priv = devfreq->data; struct devfreq_dev_status stats; int val, level = 0; unsigned int scm_data[3]; static int busy_bin, frame_flag; /* keeps stats.private_data == NULL */ result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats); if (result) { pr_err(TAG "get_status failed %d\n", result); return result; } *freq = stats.current_frequency; priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run or the gpu hasn't been * busier than MIN_BUSY. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR) || (unsigned int) priv->bin.busy_time < MIN_BUSY) { return 0; } if ((stats.busy_time * 100 / stats.total_time) > BUSY_BIN) { busy_bin += stats.busy_time; if (stats.total_time > LONG_FRAME) frame_flag = 1; } else { busy_bin = 0; frame_flag = 0; } level = devfreq_get_freq_level(devfreq, stats.current_frequency); if (level < 0) { pr_err(TAG "bad freq %ld\n", stats.current_frequency); return level; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING || (busy_bin > CEILING && frame_flag)) { val = -1 * level; busy_bin = 0; frame_flag = 0; } else { scm_data[0] = level; scm_data[1] = priv->bin.total_time; scm_data[2] = priv->bin.busy_time; __secure_tz_update_entry3(scm_data, sizeof(scm_data), &val, sizeof(val), priv->is_64); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* * If the decision is to move to a different level, make sure the GPU * frequency changes. */ if (val) { level += val; level = max(level, 0); level = min_t(int, level, devfreq->profile->max_state - 1); } *freq = devfreq->profile->freq_table[level]; return 0; }
/* * Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if (stat.total_time == 0 || priv->bin.total_time < FLOOR) return 0; /* Prevent overflow */ if (stat.busy_time >= 1 << 24 || stat.total_time >= 1 << 24) { stat.busy_time >>= 7; stat.total_time >>= 7; } /* If current level is unknown, default to max */ level = devfreq_get_freq_level(devfreq, stat.current_frequency); if (unlikely(level < 0)) { *freq = max; goto clear; } /* * If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { *freq = max; goto clear; } /* Apply conservativeness factor */
static int devfreq_gpubw_get_target(struct devfreq *df, unsigned long *freq, u32 *flag) { struct devfreq_msm_adreno_tz_data *priv = df->data; struct msm_busmon_extended_profile *bus_profile = container_of( (df->profile), struct msm_busmon_extended_profile, profile); struct devfreq_dev_status stats; struct xstats b; int result; int level = 0; int act_level; int norm_cycles; int gpu_percent; /* * Normalized AB should at max usage be the gpu_bimc frequency in MHz. * Start with a reasonable value and let the system push it up to max. */ static int norm_ab_max = 300; int norm_ab; unsigned long ab_mbytes = 0; stats.private_data = &b; result = df->profile->get_dev_status(df->dev.parent, &stats); *freq = stats.current_frequency; priv->bus.total_time += stats.total_time; priv->bus.gpu_time += stats.busy_time; priv->bus.ram_time += b.ram_time; priv->bus.ram_wait += b.ram_wait; level = devfreq_get_freq_level(df, stats.current_frequency); if (priv->bus.total_time < LONG_FLOOR) return result; norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) / (unsigned int) priv->bus.total_time; gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) / (unsigned int) priv->bus.total_time; /* * If there's a new high watermark, update the cutoffs and send the * FAST hint. Otherwise check the current value against the current * cutoffs. */ if (norm_cycles > priv->bus.max) { _update_cutoff(priv, norm_cycles); bus_profile->flag = DEVFREQ_FLAG_FAST_HINT; } else { /* GPU votes for IB not AB so don't under vote the system */ norm_cycles = (100 * norm_cycles) / TARGET; act_level = priv->bus.index[level] + b.mod; act_level = (act_level < 0) ? 0 : act_level; act_level = (act_level >= priv->bus.num) ? (priv->bus.num - 1) : act_level; if (norm_cycles > priv->bus.up[act_level] && gpu_percent > CAP) bus_profile->flag = DEVFREQ_FLAG_FAST_HINT; else if (norm_cycles < priv->bus.down[act_level] && level) bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT; } /* Calculate the AB vote based on bus width if defined */ if (priv->bus.width) { norm_ab = (unsigned int)priv->bus.ram_time / (unsigned int) priv->bus.total_time; /* Calculate AB in Mega Bytes and roundup in BW_STEP */ ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20; bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP); } else if (bus_profile->flag) {
static int devfreq_gpubw_get_target(struct devfreq *df, unsigned long *freq, u32 *flag) { struct devfreq_msm_adreno_tz_data *priv = df->data; struct msm_busmon_extended_profile *bus_profile = container_of( (df->profile), struct msm_busmon_extended_profile, profile); struct devfreq_dev_status stats; struct xstats b; int result; int level = 0; int act_level; int norm_cycles; int gpu_percent; stats.private_data = &b; result = df->profile->get_dev_status(df->dev.parent, &stats); *freq = stats.current_frequency; priv->bus.total_time += stats.total_time; priv->bus.gpu_time += stats.busy_time; priv->bus.ram_time += b.ram_time; priv->bus.ram_time += b.ram_wait; level = devfreq_get_freq_level(df, stats.current_frequency); if (priv->bus.total_time < LONG_FLOOR) return result; norm_cycles = (unsigned int)priv->bus.ram_time / (unsigned int) priv->bus.total_time; gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) / (unsigned int) priv->bus.total_time; /* * If there's a new high watermark, update the cutoffs and send the * FAST hint. Otherwise check the current value against the current * cutoffs. */ if (norm_cycles > priv->bus.max) { _update_cutoff(priv, norm_cycles); bus_profile->flag = DEVFREQ_FLAG_FAST_HINT; } else { /* GPU votes for IB not AB so don't under vote the system */ norm_cycles = (100 * norm_cycles) / TARGET; act_level = priv->bus.index[level] + b.mod; act_level = (act_level < 0) ? 0 : act_level; act_level = (act_level >= priv->bus.num) ? (priv->bus.num - 1) : act_level; if (norm_cycles > priv->bus.up[act_level] && gpu_percent > CAP) bus_profile->flag = DEVFREQ_FLAG_FAST_HINT; else if (norm_cycles < priv->bus.down[act_level] && level) bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT; } priv->bus.total_time = 0; priv->bus.gpu_time = 0; priv->bus.ram_time = 0; return result; }