int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { kgsl_pwrctrl_pwrlevel_change(device, level); pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } kgsl_pwrctrl_pwrlevel_change(device, level); *freq = kgsl_pwrctrl_active_freq(pwr); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; int level, i; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } mutex_lock(&device->mutex); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; pwr_level = &pwr->pwrlevels[level]; /* If the governor recommends a new frequency, update it here */ if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { if (pwr->thermal_cycle == CYCLE_ACTIVE) level = _thermal_adjust(pwr, i); else level = i; break; } if (level != pwr->active_pwrlevel) kgsl_pwrctrl_pwrlevel_change(device, level); } *freq = kgsl_pwrctrl_active_freq(pwr); mutex_unlock(&device->mutex); return 0; }
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; mutex_lock(&device->mutex); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } kgsl_pwrctrl_pwrlevel_change(device, level); *freq = kgsl_pwrctrl_active_freq(pwr); mutex_unlock(&device->mutex); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwrctrl; struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; pwrctrl = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will * already be stored in accum_stats. */ kgsl_pwrscale_update_stats(device); tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); /* * keep the latest devfreq_dev_status values * and vbif counters data * to be (re)used by kgsl_busmon_get_dev_status() */ if (pwrctrl->bus_control) { struct xstats *last_b = (struct xstats *)last_status.private_data; last_status.total_time = stat->total_time; last_status.busy_time = stat->busy_time; last_status.current_frequency = stat->current_frequency; last_b->ram_time = device->pwrscale.accum_stats.ram_time; last_b->ram_wait = device->pwrscale.accum_stats.ram_wait; last_b->mod = device->pwrctrl.bus_mod; } kgsl_pwrctrl_busy_time(device, stat->total_time, stat->busy_time); trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* make sure we don't turn on clocks just to read stats */ if (device->state == KGSL_STATE_ACTIVE) { struct kgsl_power_stats extra; device->ftbl->power_stats(device, &extra); device->pwrscale.accum_stats.busy_time += extra.busy_time; device->pwrscale.accum_stats.ram_time += extra.ram_time; device->pwrscale.accum_stats.ram_wait += extra.ram_wait; } tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); if (stat->private_data) { struct xstats *b = (struct xstats *)stat->private_data; b->ram_time = device->pwrscale.accum_stats.ram_time; b->ram_wait = device->pwrscale.accum_stats.ram_wait; b->mod = device->pwrctrl.bus_mod; } #if defined (CONFIG_SYSTEM_LOAD_ANALYZER) { unsigned long long busy_time_x1000; if (stat->total_time != 0) { busy_time_x1000 = pwrscale->accum_stats.busy_time * 1000; do_div(busy_time_x1000, stat->total_time); store_external_load_factor(GPU_UTILIZATION, busy_time_x1000); } } #endif trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_cur_freq - devfreq_dev_profile.get_cur_freq callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) { struct kgsl_device *device = dev_get_drvdata(dev); if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); *freq = kgsl_pwrctrl_active_freq(&device->pwrctrl); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* make sure we don't turn on clocks just to read stats */ if (device->state == KGSL_STATE_ACTIVE) { struct kgsl_power_stats extra; device->ftbl->power_stats(device, &extra); device->pwrscale.accum_stats.busy_time += extra.busy_time; device->pwrscale.accum_stats.ram_time += extra.ram_time; device->pwrscale.accum_stats.ram_wait += extra.ram_wait; } tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); if (stat->private_data) { struct xstats *b = (struct xstats *)stat->private_data; b->ram_time = device->pwrscale.accum_stats.ram_time; b->ram_wait = device->pwrscale.accum_stats.ram_wait; b->mod = device->pwrctrl.bus_mod; } trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; memset(stat, 0, sizeof(*stat)); kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will * already be stored in accum_stats. */ kgsl_pwrscale_update_stats(device); tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; int level, i, b; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } else if (flags && pwr->bus_control) { /* * Signal for faster or slower bus. If KGSL isn't already * running at the desired speed for the given level, modify * its vote. */ b = pwr->bus_mod; if ((flags & DEVFREQ_FLAG_FAST_HINT) && (pwr->bus_mod != FAST_BUS)) pwr->bus_mod = (pwr->bus_mod == SLOW_BUS) ? 0 : FAST_BUS; else if ((flags & DEVFREQ_FLAG_SLOW_HINT) && (pwr->bus_mod != SLOW_BUS)) pwr->bus_mod = (pwr->bus_mod == FAST_BUS) ? 0 : SLOW_BUS; if (pwr->bus_mod != b) kgsl_pwrctrl_buslevel_update(device, true); } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
/* * kgsl_devfreq_target - devfreq_dev_profile.target callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; int level, i; unsigned long cur_freq; if (device == NULL) return -ENODEV; if (freq == NULL) return -EINVAL; if (!device->pwrscale.enabled) return 0; pwr = &device->pwrctrl; if (flags & DEVFREQ_FLAG_WAKEUP_MAXFREQ) { /* * The GPU is about to get suspended, * but it needs to be at the max power level when waking up */ pwr->wakeup_maxpwrlevel = 1; return 0; } kgsl_mutex_lock(&device->mutex, &device->mutex_owner); cur_freq = kgsl_pwrctrl_active_freq(pwr); level = pwr->active_pwrlevel; pwr_level = &pwr->pwrlevels[level]; if (*freq != cur_freq) { level = pwr->max_pwrlevel; for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { level = i; break; } } /* * The power constraints need an entire interval to do their magic, so * skip changing the powerlevel if the time hasn't expired yet and the * new level is less than the constraint */ if ((pwr->constraint.type != KGSL_CONSTRAINT_NONE) && (!time_after(jiffies, pwr->constraint.expires)) && (level >= pwr->constraint.hint.pwrlevel.level)) *freq = cur_freq; else { /* Change the power level */ kgsl_pwrctrl_pwrlevel_change(device, level); /*Invalidate the constraint set */ pwr->constraint.type = KGSL_CONSTRAINT_NONE; pwr->constraint.expires = 0; *freq = kgsl_pwrctrl_active_freq(pwr); } kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }