static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv; int ret; if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() || cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627())) return -EINVAL; priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); if (pwrscale->priv == NULL) return -ENOMEM; priv->governor = TZ_GOVERNOR_ONDEMAND; spin_lock_init(&tz_lock); kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); ret = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_WRITE_ALGORITHM); if(ret == 1) pr_info("Using HTC GPU DCVS algorithm\n"); else pr_info("Using QCT GPU DCVS algorithm\n"); return 0; }
static void tz_sleep(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv = pwrscale->priv; __secure_tz_entry(TZ_RESET_ID, 0, device->id); priv->no_switch_cnt = 0; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); if (stats.total_time == 0) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } idle = stats.total_time - stats.busy_time; idle = (idle > 0) ? idle : 0; dcvs_total_time += stats.total_time; if (idle) dcvs_busy_time += stats.busy_time; else dcvs_busy_time += stats.total_time; if (dcvs_algorithm == 0) { //DCVS algorithm by percentage if (dcvs_total_time < FRAME_INTERVAL) return; val = dcvs_update(dcvs_total_time, dcvs_busy_time); } else { //Qualcomm DCVS algorithm val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id); } if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); dcvs_total_time = 0; dcvs_busy_time = 0; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } /* If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id); } priv->bin.total_time = 0; priv->bin.busy_time = 0; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
static ssize_t dcvs_downthreshold_count_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_DOWNTHRESHOLD_COUNT); ret = sprintf(buf, "%d\n", val); return ret; }
static ssize_t dcvs_algorithm_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_ALGORITHM); ret = sprintf(buf, "%d\n", val); return ret; }
static ssize_t dcvs_init_idle_vector_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_INITIDLEVECTOR); ret = sprintf(buf, "%d\n", val); return ret; }
static ssize_t dcvs_numgaps_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_NUMGAPS); ret = sprintf(buf, "%d\n", val); return ret; }
static ssize_t dcvs_down_count_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_UPDATE_ID, 0, PARAM_INDEX_READ_MINGAPCOUNT); ret = sprintf(buf, "%d\n", val); return ret; }
static ssize_t dcvs_upthreshold_show(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, char *buf) { int val, ret; val = __secure_tz_entry(TZ_UPDATE_ID, 0, PARAM_INDEX_READ_UPTHRESHOLD); ret = sprintf(buf, "%d\n", val); return ret; }
static void tz_sleep(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv = pwrscale->priv; __secure_tz_entry(TZ_RESET_ID, 0, device->id); priv->no_switch_cnt = 0; dcvs_total_time = 0; dcvs_busy_time = 0; dcvs_up_count = 0; dcvs_down_count = 0; }
static ssize_t dcvs_downthreshold_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { int val, ret; ret = sscanf(buf, "%d", &val); if (ret != 1) return -EINVAL; __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_DOWNTHRESHOLD); return count; }
static ssize_t dcvs_algorithm_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { int val, ret; ret = sscanf(buf, "%d", &val); if (ret != 1) return -EINVAL; __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_ALGORITHM); return count; }
static ssize_t dcvs_init_idle_vector_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { int val, ret; ret = sscanf(buf, "%d", &val); if (ret != 1) return -EINVAL; __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_INITIDLEVECTOR); return count; }
static ssize_t dcvs_numgaps_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { int val, ret; ret = sscanf(buf, "%d", &val); if (ret != 1) return -EINVAL; __secure_tz_entry(TZ_UPDATE_ID, val, PARAM_INDEX_WRITE_NUMGAPS); return count; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle, total_time; if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } if (priv->bin.busy_time > CEILING) { val = -1; } else { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; total_time = stats.total_time & 0x0FFFFFFF; total_time |= (pwr->active_pwrlevel) << 28; val = __secure_tz_entry(TZ_UPDATE_ID, idle, total_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }