int update_bw_hwmon(struct bw_hwmon *hwmon) { struct devfreq *df; struct hwmon_node *node; int ret; if (!hwmon) return -EINVAL; df = hwmon->df; if (!df) return -ENODEV; node = df->data; if (!node) return -ENODEV; if (!node->mon_started) return -EBUSY; dev_dbg(df->dev.parent, "Got update request\n"); devfreq_monitor_stop(df); mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) dev_err(df->dev.parent, "Unable to update freq on request!\n"); mutex_unlock(&df->lock); devfreq_monitor_start(df); return 0; }
static ssize_t store_request(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *devfreq = to_devfreq(dev); struct userspace_data *data; int wanted; int req_freq; int err = 0; req_freq = 0; mutex_lock(&devfreq->lock); data = devfreq->data; sscanf(buf, "%d", &wanted); if(data){ data->req_bw += wanted; pr_debug("*** %s, request:%d, total request:%d ***\n", __func__, wanted, data->req_bw); if(data->req_bw < 0) data->req_bw = 0; if(data->convert_bw_to_freq) req_freq = data->convert_bw_to_freq(wanted); } user_requests.req_sum += req_freq; if(user_requests.req_sum < 0) user_requests.req_sum = 0; err = update_devfreq(devfreq); if (err == 0) err = count; mutex_unlock(&devfreq->lock); return err; }
/* * add a new ddr bandwidth request. * @req_bw: KB * + addition(add>=0) or - subtraction(add<0) */ void dfs_request_bw(int req_bw) { u32 req_freq = 0; int add = 1; struct userspace_data *user_data; if (req_bw < 0) { req_bw = -req_bw; add = -1; } if(g_devfreq && g_devfreq->data){ user_data = (struct userspace_data *)(g_devfreq->data); if(user_data->convert_bw_to_freq){ req_freq = (user_data->convert_bw_to_freq)(req_bw); } } pr_debug("*** %s, pid:%u, %creq_bw:%u, req_freq:%u ***\n", __func__, current->pid, add>=0?'+':'-', req_bw, req_freq ); if(req_freq){ mutex_lock(&g_devfreq->lock); if(add >= 0) user_requests.req_sum += req_freq; else user_requests.req_sum -= req_freq; if(user_requests.req_sum < 0) user_requests.req_sum = 0; update_devfreq(g_devfreq); mutex_unlock(&g_devfreq->lock); } }
static void podgov_set_freq_request(struct device *dev, int freq_request) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_set_freq_request(freq_request); podgov->p_freq_request = freq_request; /* update the request only if podgov is enabled, device is turned on * and the scaling is in user mode */ if (podgov->enable && podgov->p_user && pm_runtime_active(&d->dev)) { podgov->adjustment_frequency = freq_request; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); } mutex_unlock(&df->lock); nvhost_module_idle(d); }
static void nvhost_scale_notify(struct platform_device *pdev, bool busy) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile = pdata->power_profile; struct devfreq *devfreq = pdata->power_manager; /* Is the device profile initialised? */ if (!profile) return; /* inform edp about new constraint */ if (pdata->gpu_edp_device) { u32 avg = 0; actmon_op().read_avg_norm(profile->actmon, &avg); BUG(); /* the next line passes a bogus frequency */ tegra_edp_notify_gpu_load(avg, 0); } /* If defreq is disabled, set the freq to max or min */ if (!devfreq) { unsigned long freq = busy ? UINT_MAX : 0; nvhost_scale_target(&pdev->dev, &freq, 0); return; } mutex_lock(&devfreq->lock); if (!profile->actmon) update_load_estimate(profile, busy); profile->dev_stat.busy = busy; update_devfreq(devfreq); mutex_unlock(&devfreq->lock); }
static int gov_suspend(struct devfreq *df) { struct hwmon_node *node = df->data; unsigned long resume_freq = df->previous_freq; unsigned long resume_ab = *node->dev_ab; if (!node->hw->suspend_hwmon) return -ENOSYS; if (node->resume_freq) { dev_warn(df->dev.parent, "Governor already suspended!\n"); return -EBUSY; } stop_monitor(df, false); mutex_lock(&df->lock); update_devfreq(df); mutex_unlock(&df->lock); node->resume_freq = resume_freq; node->resume_ab = resume_ab; return 0; }
static void mali_dvfs_event_proc(struct work_struct *w) { #ifdef CONFIG_MALI_DEVFREQ if (mali_devfreq) { mutex_lock(&mali_devfreq->lock); update_devfreq(mali_devfreq); mutex_unlock(&mali_devfreq->lock); } return 0; #else mali_dvfs_status *dvfs_status; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; mali_dvfs_decide_next_level(dvfs_status); if (dvfs_status->step >= dvfs_step_max) dvfs_status->step = dvfs_step_max-1; if (dvfs_status->step < dvfs_step_min) dvfs_status->step = dvfs_step_min; if (!pm_runtime_status_suspended(dvfs_status->kbdev->osdev.dev)) kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); #endif }
static int devfreq_performance_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret = 0; if (event == DEVFREQ_GOV_START) { mutex_lock(&devfreq->lock); ret = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); } return ret; }
int devfreq_vbif_update_bw(void) { int ret = 0; mutex_lock(&df_lock); if (df) { mutex_lock(&df->lock); ret = update_devfreq(df); mutex_unlock(&df->lock); } mutex_unlock(&df_lock); return ret; }
static int devfreq_simple_exynos_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct devfreq_notifier_block *devfreq_nb; devfreq_nb = container_of(nb, struct devfreq_notifier_block, nb); mutex_lock(&devfreq_nb->df->lock); update_devfreq(devfreq_nb->df); mutex_unlock(&devfreq_nb->df->lock); return NOTIFY_OK; }
static int devfreq_watermark_event_handler(struct devfreq *df, unsigned int event, void *wmark_type) { int ret = 0; struct wmark_gov_info *wmarkinfo = df->data; enum watermark_type *type = wmark_type; switch (event) { case DEVFREQ_GOV_START: devfreq_watermark_start(df); wmarkinfo = df->data; if (df->profile->set_low_wmark) df->profile->set_low_wmark(df->dev.parent, wmarkinfo->p_low_wmark); if (df->profile->set_high_wmark) df->profile->set_high_wmark(df->dev.parent, wmarkinfo->p_high_wmark); break; case DEVFREQ_GOV_STOP: devfreq_watermark_debug_stop(df); break; case DEVFREQ_GOV_SUSPEND: devfreq_monitor_suspend(df); break; case DEVFREQ_GOV_RESUME: if (df->profile->set_low_wmark) df->profile->set_low_wmark(df->dev.parent, wmarkinfo->p_low_wmark); if (df->profile->set_high_wmark) df->profile->set_high_wmark(df->dev.parent, wmarkinfo->p_high_wmark); devfreq_monitor_resume(df); break; case DEVFREQ_GOV_WMARK: /* Set watermark interrupt type */ wmarkinfo->event = *type; mutex_lock(&df->lock); update_devfreq(df); mutex_unlock(&df->lock); break; default: break; } return ret; }
static int exynos5_devfreq_isp_reboot_notifier(struct notifier_block *nb, unsigned long val, void *v) { unsigned long freq = exynos5433_qos_isp.default_qos; struct devfreq_data_isp *data = dev_get_drvdata(isp_dev); struct devfreq *devfreq_isp = data->devfreq; devfreq_isp->max_freq = freq; mutex_lock(&devfreq_isp->lock); update_devfreq(devfreq_isp); mutex_unlock(&devfreq_isp->lock); return NOTIFY_DONE; }
static int exynos8890_devfreq_int_reboot(struct device *dev, struct exynos_devfreq_data *data) { u32 freq = DEVFREQ_INT_REBOOT_FREQ; data->max_freq = freq; data->devfreq->max_freq = data->max_freq; mutex_lock(&data->devfreq->lock); update_devfreq(data->devfreq); mutex_unlock(&data->devfreq->lock); return 0; }
static int devfreq_watermark_event_handler(struct devfreq *df, unsigned int event, void *wmark_type) { int ret = 0; switch (event) { case DEVFREQ_GOV_START: { struct devfreq_dev_status dev_stat; ret = df->profile->get_dev_status(df->dev.parent, &dev_stat); if (ret < 0) break; ret = devfreq_watermark_start(df); if (ret < 0) break; update_watermarks(df, dev_stat.current_frequency, dev_stat.current_frequency); break; } case DEVFREQ_GOV_STOP: devfreq_watermark_debug_stop(df); break; case DEVFREQ_GOV_SUSPEND: devfreq_monitor_suspend(df); break; case DEVFREQ_GOV_RESUME: { struct devfreq_dev_status dev_stat; ret = df->profile->get_dev_status(df->dev.parent, &dev_stat); if (ret < 0) break; update_watermarks(df, dev_stat.current_frequency, dev_stat.current_frequency); devfreq_monitor_resume(df); break; } case DEVFREQ_GOV_WMARK: mutex_lock(&df->lock); update_devfreq(df); mutex_unlock(&df->lock); break; default: break; } return ret; }
int devfreq_vbif_update_bw(unsigned long ib, unsigned long ab) { int ret = 0; mutex_lock(&df_lock); if (df) { mutex_lock(&df->lock); dev_ib = ib; *dev_ab = ab; ret = update_devfreq(df); mutex_unlock(&df->lock); } mutex_unlock(&df_lock); return ret; }
static int devfreq_simple_usage_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct devfreq_notifier_block *devfreq_nb; struct devfreq_simple_usage_data *simple_usage_data; devfreq_nb = container_of(nb, struct devfreq_notifier_block, nb); simple_usage_data = container_of(devfreq_nb, struct devfreq_simple_usage_data, nb); devfreq_nb = &simple_usage_data->nb; mutex_lock(&devfreq_nb->df->lock); update_devfreq(devfreq_nb->df); mutex_unlock(&devfreq_nb->df->lock); return NOTIFY_OK; }
static void podgov_enable(struct device *dev, int enable) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_enabled(enable); /* bad configuration. quit. */ if (df->min_freq == df->max_freq) goto exit_unlock; /* store the enable information */ podgov->enable = enable; /* skip local adjustment if we are enabling or the device is * suspended */ if (enable || !pm_runtime_active(&d->dev)) goto exit_unlock; /* full speed */ podgov->adjustment_frequency = df->max_freq; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); mutex_unlock(&df->lock); nvhost_module_idle(d); stop_podgov_workers(podgov); return; exit_unlock: mutex_unlock(&df->lock); nvhost_module_idle(d); }
static void podgov_set_user_ctl(struct device *dev, int user) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; int old_user; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_set_user_ctl(user); /* store the new user value */ old_user = podgov->p_user; podgov->p_user = user; /* skip scaling, if scaling (or the whole device) is turned off * - or the scaling already was in user mode */ if (!pm_runtime_active(&d->dev) || !podgov->enable || !(user && !old_user)) goto exit_unlock; /* write request */ podgov->adjustment_frequency = podgov->p_freq_request; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); mutex_unlock(&df->lock); nvhost_module_idle(d); stop_podgov_workers(podgov); return; exit_unlock: mutex_unlock(&df->lock); nvhost_module_idle(d); }
static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp) { int result = 0; struct devfreq *devfreq = devp; switch (type) { case ADRENO_DEVFREQ_NOTIFY_IDLE: case ADRENO_DEVFREQ_NOTIFY_RETIRE: mutex_lock(&devfreq->lock); result = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); break; /* ignored by this governor */ case ADRENO_DEVFREQ_NOTIFY_SUBMIT: default: break; } return notifier_from_errno(result); }
static ssize_t store_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *devfreq = to_devfreq(dev); struct userspace_data *data; unsigned long wanted; int err = 0; mutex_lock(&devfreq->lock); data = devfreq->data; sscanf(buf, "%lu", &wanted); data->user_frequency = wanted; data->valid = true; err = update_devfreq(devfreq); if (err == 0) err = count; mutex_unlock(&devfreq->lock); return err; }
/* * raise ddr frequency up temporarily * @req_bw, KB */ void dfs_freq_raise_quirk(unsigned int req_bw) { if(req_bw == 0) return; spin_lock(&dfs_req_lock); if(user_requests.req_quirk || devfreq_request_ignore() ){ spin_unlock(&dfs_req_lock); return; } user_requests.req_quirk = req_bw; spin_unlock(&dfs_req_lock); mutex_lock(&g_devfreq->lock); devfreq_min_freq_cnt_reset(-1, 1); update_devfreq(g_devfreq); devfreq_min_freq_cnt_reset(-1, 0); user_requests.req_quirk = 0; mutex_unlock(&g_devfreq->lock); }
void dfs_request_bw_timeout(unsigned int req_bw) { struct userspace_data *user_data; unsigned int req_freq; if(req_bw == 0) return; spin_lock(&dfs_req_lock); if(user_requests.req_timeout || devfreq_request_ignore() ){ spin_unlock(&dfs_req_lock); return; } spin_unlock(&dfs_req_lock); if(g_devfreq && g_devfreq->data){ user_data = (struct userspace_data *)(g_devfreq->data); if(user_data->convert_bw_to_freq){ req_freq = (user_data->convert_bw_to_freq)(req_bw); printk("*** %s, req_freq:%d ***\n", __func__, req_freq ); } } spin_lock(&dfs_req_lock); user_requests.req_timeout = req_freq; spin_unlock(&dfs_req_lock); #if 0 if(req_bw) mod_timer(&dfs_req_timer, jiffies+dfs_req_timeout); else del_timer_sync(&dfs_req_timer); #endif if(req_freq){ mutex_lock(&g_devfreq->lock); devfreq_min_freq_cnt_reset(-1, 1); update_devfreq(g_devfreq); devfreq_min_freq_cnt_reset(-1, 0); dfs_req_timer_timeout(1); mutex_unlock(&g_devfreq->lock); } }
static int gov_resume(struct devfreq *df) { struct hwmon_node *node = df->data; if (!node->hw->resume_hwmon) return -ENOSYS; if (!node->resume_freq) { dev_warn(df->dev.parent, "Governor already resumed!\n"); return -EBUSY; } mutex_lock(&df->lock); update_devfreq(df); mutex_unlock(&df->lock); node->resume_freq = 0; node->resume_ab = 0; return start_monitor(df, false); }
static void mali_dvfs_event_proc(struct work_struct *w) { #ifdef CONFIG_MALI_DEVFREQ if (mali_devfreq) { mutex_lock(&mali_devfreq->lock); update_devfreq(mali_devfreq); mutex_unlock(&mali_devfreq->lock); } return 0; #else mali_dvfs_status *dvfs_status; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; mali_dvfs_decide_next_level(dvfs_status); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); #endif }
/* * add a new ddr bandwidth request. when time is up, request is cleared automatically * @req_bw, KB */ void dfs_request_bw_timeout(unsigned int req_bw) { struct userspace_data *user_data; unsigned int req_freq; req_freq = 0; if(req_bw == 0) return; spin_lock(&dfs_req_lock); if( user_requests.req_timeout ){ spin_unlock(&dfs_req_lock); pr_debug("*** %s, ignore, req_timeout:%d ***\n", __func__, user_requests.req_timeout); return; } spin_unlock(&dfs_req_lock); if(g_devfreq && g_devfreq->data){ user_data = (struct userspace_data *)(g_devfreq->data); if(user_data->convert_bw_to_freq){ req_freq = (user_data->convert_bw_to_freq)(req_bw); printk("*** %s, req_freq:%d ***\n", __func__, req_freq ); } } spin_lock(&dfs_req_lock); user_requests.req_timeout = req_freq; spin_unlock(&dfs_req_lock); if(req_freq) mod_timer(&dfs_req_timer, jiffies+dfs_req_timeout); else del_timer_sync(&dfs_req_timer); if(req_freq){ mutex_lock(&g_devfreq->lock); update_devfreq(g_devfreq); mutex_unlock(&g_devfreq->lock); } }
static void nvhost_scale3d_notify(struct platform_device *dev, int busy) { struct nvhost_device_data *pdata = platform_get_drvdata(dev); struct devfreq *df = pdata->power_manager; /* If defreq is disabled, do nothing */ if (!df) { /* Ok.. make sure the 3d gets highest frequency always */ if (busy) { unsigned long freq = power_profile.max_rate_3d; nvhost_scale3d_target(&dev->dev, &freq, 0); } return; } mutex_lock(&df->lock); power_profile.last_event_type = busy ? DEVICE_BUSY : DEVICE_IDLE; update_devfreq(df); mutex_unlock(&df->lock); }
int dfs_set_freq(int freq) { struct userspace_data *user_data; int err; if(freq < 0){ err = -1; pr_debug("*** %s,freq < 0\n",__func__); goto done; } user_data = (struct userspace_data *)(g_devfreq->data); mutex_lock(&g_devfreq->lock); if(user_data){ if(freq > 0){ user_data->set_count++; user_data->devfreq_enable = false; if(freq > user_data->set_freq) user_data->set_freq = freq; }else{ if(user_data->set_count > 0) { user_data->set_count--; if(user_data->set_count == 0) { user_data->set_freq = 0; user_data->devfreq_enable = true; } } } pr_debug("*** %s, set freq:%d KHz, set_count:%lu ***\n", __func__, freq, user_data->set_count ); } else { pr_debug("*** %s,user_data == 0\n",__func__); } err = update_devfreq(g_devfreq); mutex_unlock(&g_devfreq->lock); done: return err; }
static void gk20a_scale_notify(struct platform_device *pdev, bool busy) { struct gk20a_platform *platform = platform_get_drvdata(pdev); struct gk20a *g = get_gk20a(pdev); struct gk20a_scale_profile *profile = g->scale_profile; struct devfreq *devfreq = g->devfreq; /* update the software shadow */ gk20a_pmu_load_update(g); /* inform edp about new constraint */ if (platform->prescale) platform->prescale(pdev); /* Is the device profile initialised? */ if (!(profile && devfreq)) return; mutex_lock(&devfreq->lock); profile->dev_stat.busy = busy; update_devfreq(devfreq); mutex_unlock(&devfreq->lock); }
static irqreturn_t mon_intr_handler(int irq, void *dev) { struct devfreq *df = dev; ktime_t ts; unsigned int us; u32 regval; int ret; regval = get_l2_indirect_reg(L2PMOVSR); pr_debug("Got interrupt: %x\n", regval); devfreq_monitor_stop(df); /* * Don't recalc bandwidth if the interrupt comes right after a * previous bandwidth calculation. This is done for two reasons: * * 1. Sampling the BW during a very short duration can result in a * very inaccurate measurement due to very short bursts. * 2. This can only happen if the limit was hit very close to the end * of the previous sample period. Which means the current BW * estimate is not very off and doesn't need to be readjusted. */ ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (us > TOO_SOON_US) { mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) pr_err("Unable to update freq on IRQ!\n"); mutex_unlock(&df->lock); } devfreq_monitor_start(df); return IRQ_HANDLED; }
static void nvhost_scale_notify(struct platform_device *pdev, bool busy) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile = pdata->power_profile; struct devfreq *devfreq = pdata->power_manager; /* Is the device profile initialised? */ if (!profile) return; /* If defreq is disabled, set the freq to max or min */ if (!devfreq) { unsigned long freq = busy ? UINT_MAX : 0; nvhost_scale_target(&pdev->dev, &freq, 0); return; } mutex_lock(&devfreq->lock); if (!profile->actmon) update_load_estimate(profile, busy); profile->last_event_type = busy ? DEVICE_BUSY : DEVICE_IDLE; update_devfreq(devfreq); mutex_unlock(&devfreq->lock); }