void kgsl_idle_check(struct work_struct *work) { struct kgsl_device *device = container_of(work, struct kgsl_device, idle_check_ws); WARN_ON(device == NULL); if (device == NULL) return; mutex_lock(&device->mutex); if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) { if ((device->requested_state != KGSL_STATE_SLEEP) && (device->requested_state != KGSL_STATE_SLUMBER)) kgsl_pwrscale_idle(device); if (kgsl_pwrctrl_sleep(device) != 0) { mod_timer(&device->idle_timer, jiffies + device->pwrctrl.interval_timeout); /* If the GPU has been too busy to sleep, make sure * * that is acurately reflected in the % busy numbers. */ device->pwrctrl.busy.no_nap_cnt++; if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) { kgsl_pwrctrl_busy_time(device, true); device->pwrctrl.busy.no_nap_cnt = 0; } } } else if (device->state & (KGSL_STATE_HUNG | KGSL_STATE_DUMP_AND_RECOVER)) { kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE); } mutex_unlock(&device->mutex); }
static void _sleep_accounting(struct kgsl_device *device) { kgsl_pwrctrl_busy_time(device, false); device->pwrctrl.busy.start.tv_sec = 0; device->pwrctrl.time = 0; kgsl_pwrscale_sleep(device); }
/* * kgsl_devfreq_get_dev_status - devfreq_dev_profile.get_dev_status callback * @dev: see devfreq.h * @freq: see devfreq.h * @flags: see devfreq.h * * This function expects the device mutex to be unlocked. */ int kgsl_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) { struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwrctrl; struct kgsl_pwrscale *pwrscale; s64 tmp; if (device == NULL) return -ENODEV; if (stat == NULL) return -EINVAL; pwrscale = &device->pwrscale; pwrctrl = &device->pwrctrl; kgsl_mutex_lock(&device->mutex, &device->mutex_owner); /* * If the GPU clock is on grab the latest power counter * values. Otherwise the most recent ACTIVE values will * already be stored in accum_stats. */ kgsl_pwrscale_update_stats(device); tmp = ktime_to_us(ktime_get()); stat->total_time = tmp - pwrscale->time; pwrscale->time = tmp; stat->busy_time = pwrscale->accum_stats.busy_time; stat->current_frequency = kgsl_pwrctrl_active_freq(&device->pwrctrl); /* * keep the latest devfreq_dev_status values * and vbif counters data * to be (re)used by kgsl_busmon_get_dev_status() */ if (pwrctrl->bus_control) { struct xstats *last_b = (struct xstats *)last_status.private_data; last_status.total_time = stat->total_time; last_status.busy_time = stat->busy_time; last_status.current_frequency = stat->current_frequency; last_b->ram_time = device->pwrscale.accum_stats.ram_time; last_b->ram_wait = device->pwrscale.accum_stats.ram_wait; last_b->mod = device->pwrctrl.bus_mod; } kgsl_pwrctrl_busy_time(device, stat->total_time, stat->busy_time); trace_kgsl_pwrstats(device, stat->total_time, &pwrscale->accum_stats); memset(&pwrscale->accum_stats, 0, sizeof(pwrscale->accum_stats)); kgsl_mutex_unlock(&device->mutex, &device->mutex_owner); return 0; }
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state, int requested_state) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; int i = 0; if (state == KGSL_PWRFLAGS_OFF) { if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "clocks off, device %d\n", device->id); for (i = KGSL_MAX_CLKS - 1; i > 0; i--) if (pwr->grp_clks[i]) clk_disable(pwr->grp_clks[i]); if ((pwr->pwrlevels[0].gpu_freq > 0) && (requested_state != KGSL_STATE_NAP)) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. gpu_freq); kgsl_pwrctrl_busy_time(device, true); } } else if (state == KGSL_PWRFLAGS_ON) { if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) { KGSL_PWR_INFO(device, "clocks on, device %d\n", device->id); if ((pwr->pwrlevels[0].gpu_freq > 0) && (device->state != KGSL_STATE_NAP)) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->active_pwrlevel]. gpu_freq); /* as last step, enable grp_clk this is to let GPU interrupt to come */ for (i = KGSL_MAX_CLKS - 1; i > 0; i--) if (pwr->grp_clks[i]) clk_enable(pwr->grp_clks[i]); kgsl_pwrctrl_busy_time(device, false); } } }
/* Caller must hold the device mutex. */ int kgsl_pwrctrl_sleep(struct kgsl_device *device) { struct kgsl_pwrctrl *pwr = &device->pwrctrl; KGSL_PWR_INFO(device, "sleep device %d\n", device->id); /* Work through the legal state transitions */ if (device->requested_state == KGSL_STATE_NAP) { if (device->ftbl->isidle(device)) goto nap; } else if (device->requested_state == KGSL_STATE_SLEEP) { if (device->state == KGSL_STATE_NAP || device->ftbl->isidle(device)) goto sleep; } device->requested_state = KGSL_STATE_NONE; return -EBUSY; sleep: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF); if (pwr->pwrlevels[0].gpu_freq > 0) clk_set_rate(pwr->grp_clks[0], pwr->pwrlevels[pwr->num_pwrlevels - 1]. gpu_freq); kgsl_pwrctrl_busy_time(device, false); pwr->busy.start.tv_sec = 0; device->pwrctrl.time = 0; kgsl_pwrscale_sleep(device); goto clk_off; nap: kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF); clk_off: kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF); device->state = device->requested_state; device->requested_state = KGSL_STATE_NONE; wake_unlock(&device->idle_wakelock); // pm_qos_update_request(PM_QOS_CPU_DMA_LATENCY, // PM_QOS_DEFAULT_VALUE); KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n", device->state, device->id); return 0; }