static void nvhost_scale_notify(struct platform_device *pdev, bool busy) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile = pdata->power_profile; struct devfreq *devfreq = pdata->power_manager; /* Is the device profile initialised? */ if (!profile) return; /* inform edp about new constraint */ if (pdata->gpu_edp_device) { u32 avg = 0; actmon_op().read_avg_norm(profile->actmon, &avg); BUG(); /* the next line passes a bogus frequency */ tegra_edp_notify_gpu_load(avg, 0); } /* If defreq is disabled, set the freq to max or min */ if (!devfreq) { unsigned long freq = busy ? UINT_MAX : 0; nvhost_scale_target(&pdev->dev, &freq, 0); return; } mutex_lock(&devfreq->lock); if (!profile->actmon) update_load_estimate(profile, busy); profile->dev_stat.busy = busy; update_devfreq(devfreq); mutex_unlock(&devfreq->lock); }
static void nvhost_scale_notify(struct platform_device *pdev, bool busy) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile = pdata->power_profile; struct devfreq *devfreq = pdata->power_manager; /* Is the device profile initialised? */ if (!profile) return; /* If defreq is disabled, set the freq to max or min */ if (!devfreq) { unsigned long freq = busy ? UINT_MAX : 0; nvhost_scale_target(&pdev->dev, &freq, 0); return; } mutex_lock(&devfreq->lock); if (!profile->actmon) update_load_estimate(profile, busy); profile->last_event_type = busy ? DEVICE_BUSY : DEVICE_IDLE; update_devfreq(devfreq); mutex_unlock(&devfreq->lock); }
static int nvhost_pod_estimate_freq(struct devfreq *df, unsigned long *freq) { struct podgov_info_rec *podgov = df->data; struct devfreq_dev_status dev_stat; struct nvhost_devfreq_ext_stat *ext_stat; long delay; int current_event; int stat; ktime_t now; stat = df->profile->get_dev_status(df->dev.parent, &dev_stat); if (stat < 0) return stat; /* Ensure maximal clock when scaling is disabled */ if (!podgov->enable) { *freq = df->max_freq; return 0; } if (podgov->p_user) { *freq = podgov->p_freq_request; return 0; } current_event = DEVICE_IDLE; stat = 0; now = ktime_get(); /* Local adjustments (i.e. requests from kernel threads) are * handled here */ if (podgov->adjustment_type == ADJUSTMENT_LOCAL) { podgov->adjustment_type = ADJUSTMENT_DEVICE_REQ; /* Do not do unnecessary scaling */ scaling_limit(df, &podgov->adjustment_frequency); if (df->previous_freq == podgov->adjustment_frequency) return GET_TARGET_FREQ_DONTSCALE; trace_podgov_estimate_freq(df->previous_freq, podgov->adjustment_frequency); *freq = podgov->adjustment_frequency; return 0; } /* Retrieve extended data */ ext_stat = dev_stat.private_data; if (!ext_stat) return -EINVAL; current_event = ext_stat->busy; *freq = dev_stat.current_frequency; df->min_freq = ext_stat->min_freq; df->max_freq = ext_stat->max_freq; /* Sustain local variables */ podgov->last_event_type = current_event; podgov->total_idle += (dev_stat.total_time - dev_stat.busy_time); podgov->last_total_idle += (dev_stat.total_time - dev_stat.busy_time); /* update the load estimate based on idle time */ update_load_estimate(df); /* if throughput hint enabled, and last hint is recent enough, return */ if (podgov->p_use_throughput_hint && ktime_us_delta(now, podgov->last_throughput_hint) < 1000000) return GET_TARGET_FREQ_DONTSCALE; switch (current_event) { case DEVICE_IDLE: /* delay idle_max % of 2 * fast_response time (given in * microseconds) */ *freq = scaling_state_check(df, now); delay = (podgov->idle_max * podgov->p_estimation_window) / 500000; schedule_delayed_work(&podgov->idle_timer, msecs_to_jiffies(delay)); break; case DEVICE_BUSY: cancel_delayed_work(&podgov->idle_timer); *freq = scaling_state_check(df, now); break; case DEVICE_UNKNOWN: *freq = scaling_state_check(df, now); break; } if (!(*freq) || (*freq == df->previous_freq)) return GET_TARGET_FREQ_DONTSCALE; trace_podgov_estimate_freq(df->previous_freq, *freq); return 0; }