static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If there is an extended block of busy processing, set * frequency to turbo. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = 0; kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO); } else if (priv->idle_dcvs) { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id); } else { if (pwr->step_mul > 1) val = __secure_tz_entry3(TZ_UPDATE_ID, (pwr->active_pwrlevel + 1)/2, priv->bin.total_time, priv->bin.busy_time); else val = __secure_tz_entry3(TZ_UPDATE_ID, pwr->active_pwrlevel, priv->bin.total_time, priv->bin.busy_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; /* If the decision is to move to a lower level, make sure the GPU * frequency drops. */ if (val > 0) val *= pwr->step_mul; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
static ssize_t tz_governor_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { char str[20]; struct tz_priv *priv = pwrscale->priv; struct kgsl_pwrctrl *pwr = &device->pwrctrl; int ret; ret = sscanf(buf, "%20s", str); if (ret != 1) return -EINVAL; mutex_lock(&device->mutex); if (!strncmp(str, "ondemand", 8)) priv->governor = TZ_GOVERNOR_ONDEMAND; else if (!strncmp(str, "performance", 11)) priv->governor = TZ_GOVERNOR_PERFORMANCE; if (priv->governor == TZ_GOVERNOR_PERFORMANCE) { kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel); pwr->default_pwrlevel = pwr->max_pwrlevel; } else { pwr->default_pwrlevel = pwr->init_pwrlevel; } mutex_unlock(&device->mutex); return count; }
static int _slumber(struct kgsl_device *device) { switch (device->state) { case KGSL_STATE_ACTIVE: if (!device->ftbl->isidle(device)) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); device->pwrctrl.restore_slumber = true; return -EBUSY; } /* fall through */ case KGSL_STATE_NAP: case KGSL_STATE_SLEEP: del_timer_sync(&device->idle_timer); kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL); device->ftbl->suspend_context(device); device->ftbl->stop(device); device->pwrctrl.restore_slumber = true; _sleep_accounting(device); kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER); if (device->idle_wakelock.name) wake_unlock(&device->idle_wakelock); break; case KGSL_STATE_SLUMBER: break; default: KGSL_PWR_WARN(device, "unhandled state %s\n", kgsl_pwrstate_to_str(device->state)); break; } return 0; }
static void conservative_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct kgsl_power_stats stats; int val = 0; unsigned int loadpct; if (enable_boost == 1) { if (boosted_pwrlevel < pwr->active_pwrlevel) kgsl_pwrctrl_pwrlevel_change(device, boosted_pwrlevel); return; } device->ftbl->power_stats(device, &stats); if (stats.total_time == 0) return; walltime_total += (unsigned long)stats.total_time; busytime_total += (unsigned long)stats.busy_time; if (walltime_total <= g_polling_interval) return; if (g_show_stats == 1) pr_info("%s: walltime_total: %lu, busytime_total: %lu\n", KGSL_NAME, walltime_total, busytime_total); loadpct = (100 * busytime_total) / walltime_total; walltime_total = busytime_total = 0; if (loadpct < thresh_tbl[pwr->active_pwrlevel].down_threshold) val = 1; else if (loadpct > thresh_tbl[pwr->active_pwrlevel].up_threshold) val = -1; if (g_show_stats == 1) pr_info("%s: loadpct: %d, active_pwrlevel: %d, change: %d\n", KGSL_NAME, loadpct, pwr->active_pwrlevel, val); if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { kgsl_pwrctrl_pwrlevel_change(device, level); pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv = pwrscale->priv; if (device->state != KGSL_STATE_NAP && priv->governor == TZ_GOVERNOR_ONDEMAND) kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.default_pwrlevel); }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); if (stats.total_time == 0) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } idle = stats.total_time - stats.busy_time; idle = (idle > 0) ? idle : 0; dcvs_total_time += stats.total_time; if (idle) dcvs_busy_time += stats.busy_time; else dcvs_busy_time += stats.total_time; if (dcvs_algorithm == 0) { //DCVS algorithm by percentage if (dcvs_total_time < FRAME_INTERVAL) return; val = dcvs_update(dcvs_total_time, dcvs_busy_time); } else { //Qualcomm DCVS algorithm val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id); } if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); dcvs_total_time = 0; dcvs_busy_time = 0; }
static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device) { if (device->pwrscale.policy != NULL) { device->pwrscale.policy->close(device, &device->pwrscale); kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.thermal_pwrlevel); } device->pwrscale.policy = NULL; }
/* * kgsl_pwrscale_disable - temporarily disable the governor * @device: The device * * Temporarily disable the governor, to prevent interference * with profiling tools that expect a fixed clock frequency. * This function must be called with the device mutex locked. */ void kgsl_pwrscale_disable(struct kgsl_device *device) { BUG_ON(!mutex_is_locked(&device->mutex)); if (device->pwrscale.devfreqptr) queue_work(device->pwrscale.devfreq_wq, &device->pwrscale.devfreq_suspend_ws); device->pwrscale.enabled = false; kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO); }
static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct tz_priv *priv = pwrscale->priv; if (device->state != KGSL_STATE_NAP && priv->governor == TZ_GOVERNOR_ONDEMAND && device->pwrctrl.restore_slumber == 0) kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.thermal_pwrlevel); }
static int __gpuclk_store(int max, struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret, i, delta = 5000000; unsigned long val; struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; if (device == NULL) return 0; pwr = &device->pwrctrl; ret = sscanf(buf, "%ld", &val); if (ret != 1) return count; mutex_lock(&device->mutex); for (i = 0; i < pwr->num_pwrlevels; i++) { if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) { if (max) pwr->thermal_pwrlevel = i; break; } } if (i == pwr->num_pwrlevels) goto done; /* * If the current or requested clock speed is greater than the * thermal limit, bump down immediately. */ if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq > pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq) kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel); else if (!max) kgsl_pwrctrl_pwrlevel_change(device, i); done: mutex_unlock(&device->mutex); return count; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle; /* In "performance" mode the clock speed always stays the same */ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; /* Do not waste CPU cycles running this algorithm if * the GPU just started, or if less than FLOOR time * has passed since the last run. */ if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; /* If the GPU has stayed in turbo mode for a while, * * stop writing out values. */ if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } /* If there is an extended block of busy processing, * increase frequency. Otherwise run the normal algorithm. */ if (priv->bin.busy_time > CEILING) { val = -1; } else { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id); } priv->bin.total_time = 0; priv->bin.busy_time = 0; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } kgsl_pwrctrl_pwrlevel_change(device, level); *freq = kgsl_pwrctrl_active_freq(pwr); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; int level, i; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } mutex_lock(&device->mutex); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; pwr_level = &pwr->pwrlevels[level]; /* If the governor recommends a new frequency, update it here */ if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { if (pwr->thermal_cycle == CYCLE_ACTIVE) level = _thermal_adjust(pwr, i); else level = i; break; } if (level != pwr->active_pwrlevel) kgsl_pwrctrl_pwrlevel_change(device, level); } *freq = kgsl_pwrctrl_active_freq(pwr); mutex_unlock(&device->mutex); return 0; }
static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device) { if (device->pwrscale.policy != NULL) { device->pwrscale.policy->close(device, &device->pwrscale); /* * Try to set max pwrlevel which will be limited to thermal by * kgsl_pwrctrl_pwrlevel_change if thermal is indeed lower */ kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.max_pwrlevel); } device->pwrscale.policy = NULL; }
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; struct tz_priv *priv = pwrscale->priv; struct kgsl_power_stats stats; int val, idle, total_time; if (priv->governor == TZ_GOVERNOR_PERFORMANCE) return; device->ftbl->power_stats(device, &stats); priv->bin.total_time += stats.total_time; priv->bin.busy_time += stats.busy_time; if ((stats.total_time == 0) || (priv->bin.total_time < FLOOR)) return; if (pwr->active_pwrlevel == 0) { if (priv->no_switch_cnt > SWITCH_OFF) { priv->skip_cnt++; if (priv->skip_cnt > SKIP_COUNTER) { priv->no_switch_cnt -= SWITCH_OFF_RESET_TH; priv->skip_cnt = 0; } return; } priv->no_switch_cnt++; } else { priv->no_switch_cnt = 0; } if (priv->bin.busy_time > CEILING) { val = -1; } else { idle = priv->bin.total_time - priv->bin.busy_time; idle = (idle > 0) ? idle : 0; total_time = stats.total_time & 0x0FFFFFFF; total_time |= (pwr->active_pwrlevel) << 28; val = __secure_tz_entry(TZ_UPDATE_ID, idle, total_time); } priv->bin.total_time = 0; priv->bin.busy_time = 0; if (val) kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel + val); }
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; mutex_lock(&device->mutex); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } kgsl_pwrctrl_pwrlevel_change(device, level); *freq = kgsl_pwrctrl_active_freq(pwr); mutex_unlock(&device->mutex); return 0; }
static void conservative_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { struct kgsl_power_stats stats; if (g_show_stats == 1) pr_info("%s: GPU waking up\n", KGSL_NAME); if (device->state != KGSL_STATE_NAP) { kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.default_pwrlevel); /* reset the power stats counters */ device->ftbl->power_stats(device, &stats); walltime_total = 0; busytime_total = 0; } }
/* * kgsl_pwrscale_enable - re-enable the governor * @device: The device * * Reenable the governor after a kgsl_pwrscale_disable() call. * This function must be called with the device mutex locked. */ void kgsl_pwrscale_enable(struct kgsl_device *device) { BUG_ON(!mutex_is_locked(&device->mutex)); if (device->pwrscale.devfreqptr) { queue_work(device->pwrscale.devfreq_wq, &device->pwrscale.devfreq_resume_ws); device->pwrscale.enabled = true; } else { /* * Don't enable it if devfreq is not set and let the device * run at default level; */ kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.default_pwrlevel); device->pwrscale.enabled = false; } }
static ssize_t tz_governor_store(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale, const char *buf, size_t count) { struct tz_priv *priv = pwrscale->priv; struct kgsl_pwrctrl *pwr = &device->pwrctrl; mutex_lock(&device->mutex); if (!strncmp(buf, "ondemand", 8)) priv->governor = TZ_GOVERNOR_ONDEMAND; else if (!strncmp(buf, "performance", 11)) priv->governor = TZ_GOVERNOR_PERFORMANCE; if (priv->governor == TZ_GOVERNOR_PERFORMANCE) kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel); mutex_unlock(&device->mutex); return count; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; int level, i; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; pwr_level = &pwr->pwrlevels[level]; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
static void conservative_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { if (device->state != KGSL_STATE_NAP) kgsl_pwrctrl_pwrlevel_change(device, device->pwrctrl.default_pwrlevel); }