static void tz_sleep(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv = pwrscale->priv; __secure_tz_entry2(TZ_RESET_ID, 0, 0); priv->bin.total_time = 0; priv->bin.busy_time = 0; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If there is an extended block of busy processing, set * frequency to turbo. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = 0; kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO); } else if (priv->idle_dcvs) { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id); } else { if (pwr->step_mul > 1) val = __secure_tz_entry3(TZ_UPDATE_ID, (pwr->active_pwrlevel + 1)/2, priv->bin.total_time, priv->bin.busy_time); else val = __secure_tz_entry3(TZ_UPDATE_ID, pwr->active_pwrlevel, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* If the decision is to move to a lower level, make sure the GPU * frequency drops. */ if (val > 0) val *= pwr->step_mul; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
static int tz_suspend(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv = devfreq->data; __secure_tz_entry2(TZ_RESET_ID, 0, 0); priv->bin.total_time = 0; priv->bin.busy_time = 0; return 0; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } /* If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else if (priv->idle_dcvs) { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id); } else { if (pwr->step_mul > 1) val = __secure_tz_entry3(TZ_UPDATE_ID, (pwr->active_pwrlevel + 1)/2, priv->bin.total_time, priv->bin.busy_time); else val = __secure_tz_entry3(TZ_UPDATE_ID, pwr->active_pwrlevel, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* If the decision is to move to a lower level, make sure the GPU * frequency drops. */ if (val > 0) val *= pwr->step_mul; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }