static int mali_devfreq_add(struct device *dev) { mali_devfreq = devfreq_add_device(dev, &exynos4_g3d_devfreq_profile, &exynos4_g3d_abs_governor, NULL); if (mali_devfreq < 0) return MALI_FALSE; return MALI_TRUE; }
void gk20a_scale_init(struct platform_device *pdev) { struct gk20a_platform *platform = platform_get_drvdata(pdev); struct gk20a *g = platform->g; struct gk20a_scale_profile *profile; int err; if (g->scale_profile) return; profile = kzalloc(sizeof(*profile), GFP_KERNEL); profile->pdev = pdev; profile->dev_stat.busy = false; /* Create frequency table */ err = gk20a_scale_make_freq_table(profile); if (err || !profile->devfreq_profile.max_state) goto err_get_freqs; /* Store device profile so we can access it if devfreq governor * init needs that */ g->scale_profile = profile; if (platform->devfreq_governor) { struct devfreq *devfreq; profile->devfreq_profile.initial_freq = profile->devfreq_profile.freq_table[0]; profile->devfreq_profile.target = gk20a_scale_target; profile->devfreq_profile.get_dev_status = gk20a_scale_get_dev_status; devfreq = devfreq_add_device(&pdev->dev, &profile->devfreq_profile, platform->devfreq_governor, NULL); if (IS_ERR(devfreq)) devfreq = NULL; g->devfreq = devfreq; } /* Should we register QoS callback for this device? */ if (platform->qos_id < PM_QOS_NUM_CLASSES && platform->qos_id != PM_QOS_RESERVED && platform->postscale) { profile->qos_notify_block.notifier_call = &gk20a_scale_qos_notify; pm_qos_add_notifier(platform->qos_id, &profile->qos_notify_block); } return; err_get_freqs: kfree(g->scale_profile); g->scale_profile = NULL; }
int mali_devfreq_add(struct kbase_device *kbdev) { struct device *dev = kbdev->osdev.dev; mali_devfreq = devfreq_add_device(dev, &exynos5_g3d_devfreq_profile, &exynos5_g3d_abs_governor, NULL); if (mali_devfreq < 0) return MALI_FALSE; DEBUG_PRINT_INFO("\n[mali_devfreq]mali_devfreq_add"); return MALI_TRUE; }
static mali_bool kbase_platform_init(struct kbase_device *kbdev) { struct device *dev = kbdev->dev; dev->platform_data = kbdev; #ifdef CONFIG_REPORT_VSYNC kbase_dev = kbdev; #endif kbdev->clk = devm_clk_get(dev, NULL); if (IS_ERR(kbdev->clk)) { printk("[mali-midgard] Failed to get clk\n"); return MALI_FALSE; } kbdev->regulator = devm_regulator_get(dev, KBASE_HI3635_PLATFORM_GPU_REGULATOR_NAME); if (IS_ERR(kbdev->regulator)) { printk("[mali-midgard] Failed to get regulator\n"); return MALI_FALSE; } #ifdef CONFIG_PM_DEVFREQ if (of_init_opp_table(dev) || opp_init_devfreq_table(dev, &mali_kbase_devfreq_profile.freq_table)) { printk("[mali-midgard] Failed to init devfreq_table\n"); kbdev->devfreq = NULL; } else { mali_kbase_devfreq_profile.initial_freq = clk_get_rate(kbdev->clk); rcu_read_lock(); mali_kbase_devfreq_profile.max_state = opp_get_opp_count(dev); rcu_read_unlock(); kbdev->devfreq = devfreq_add_device(dev, &mali_kbase_devfreq_profile, "mali_ondemand", NULL); } if (IS_ERR(kbdev->devfreq)) { printk("[mali-midgard] NULL pointer [kbdev->devFreq]\n"); return MALI_FALSE; } /* make devfreq function */ //mali_kbase_devfreq_profile.polling_ms = DEFAULT_POLLING_MS; #if KBASE_HI3635_GPU_IRDROP_ISSUE /* init update work */ sw_policy.kbdev = kbdev; INIT_WORK(&sw_policy.update, handle_switch_policy); #endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */ #endif return MALI_TRUE; }
static int __init cpubw_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct devfreq_dev_profile *p = &cpubw_profile; struct devfreq *df; u32 *data, ports[MAX_PATHS * 2]; int ret, len, i; if (of_find_property(dev->of_node, PROP_PORTS, &len)) { len /= sizeof(ports[0]); if (len % 2 || len > ARRAY_SIZE(ports)) { dev_err(dev, "Unexpected number of ports\n"); return -EINVAL; } ret = of_property_read_u32_array(dev->of_node, PROP_PORTS, ports, len); if (ret) return ret; num_paths = len / 2; } else { return -EINVAL; } for (i = 0; i < num_paths; i++) { bw_levels[0].vectors[i].src = ports[2 * i]; bw_levels[0].vectors[i].dst = ports[2 * i + 1]; bw_levels[1].vectors[i].src = ports[2 * i]; bw_levels[1].vectors[i].dst = ports[2 * i + 1]; } bw_levels[0].num_paths = num_paths; bw_levels[1].num_paths = num_paths; if (of_find_property(dev->of_node, PROP_TBL, &len)) { len /= sizeof(*data); data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table), GFP_KERNEL); if (!p->freq_table) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len); if (ret) return ret; for (i = 0; i < len; i++) p->freq_table[i] = data[i]; p->max_state = len; p->initial_freq = data[len-1]; } bus_client = msm_bus_scale_register_client(&bw_data); if (!bus_client) { dev_err(dev, "Unable to register bus client\n"); return -ENODEV; } df = devfreq_add_device(dev, &cpubw_profile, "msm_cpufreq", NULL); if (IS_ERR(df)) { msm_bus_scale_unregister_client(bus_client); return PTR_ERR(df); } return 0; }
static int devfreq_clock_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dev_data *d; struct devfreq_dev_profile *p; u32 *data, poll; const char *gov_name; const char *clk_name; int ret, len, i, j; unsigned long f; d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; platform_set_drvdata(pdev, d); if (of_property_read_string(dev->of_node, "clock-name", &clk_name)) { pr_err("%s can't find clock-name attribute!", __func__); return -EINVAL; } d->clk = devm_clk_get(dev, clk_name); if (IS_ERR(d->clk)) { pr_err("%s clock lookup failed! %s", __func__, clk_name); return PTR_ERR(d->clk); } if (!of_find_property(dev->of_node, PROP_TBL, &len)) { pr_err("%s prop table lookup failed!", __func__); return -EINVAL; } len /= sizeof(*data); data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; p = &d->profile; p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table), GFP_KERNEL); if (!p->freq_table) { pr_err("%s no freq table!", __func__); return -ENOMEM; } ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len); if (ret) return ret; j = 0; for (i = 0; i < len; i++) { f = clk_round_rate(d->clk, data[i] * 1000); if (IS_ERR_VALUE(f)) dev_warn(dev, "Unable to find dev rate for %d KHz", data[i]); else p->freq_table[j++] = f / 1000; } p->max_state = j; devm_kfree(dev, data); if (p->max_state == 0) { dev_err(dev, "Error parsing property %s!\n", PROP_TBL); return -EINVAL; } p->target = dev_target; p->get_cur_freq = dev_get_cur_freq; ret = dev_get_cur_freq(dev, &p->initial_freq); if (ret) return ret; p->polling_ms = 50; if (!of_property_read_u32(dev->of_node, "polling-ms", &poll)) p->polling_ms = poll; if (of_property_read_string(dev->of_node, "governor", &gov_name)) gov_name = "performance"; d->df = devfreq_add_device(dev, p, gov_name, NULL); if (IS_ERR(d->df)) return PTR_ERR(d->df); return 0; }
static int exynos7_devfreq_disp_probe(struct platform_device *pdev) { int ret = 0; struct devfreq_data_disp *data; struct exynos_devfreq_platdata *plat_data; data = kzalloc(sizeof(struct devfreq_data_disp), GFP_KERNEL); if (data == NULL) { pr_err("DEVFREQ(DISP) : Failed to allocate private data\n"); ret = -ENOMEM; goto err_data; } ret = exynos7420_devfreq_disp_init(data); if (ret) { pr_err("DEVFREQ(DISP) : Failed to intialize data\n"); goto err_freqtable; } exynos7_devfreq_disp_profile.max_state = data->max_state; exynos7_devfreq_disp_profile.freq_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL); if (exynos7_devfreq_disp_profile.freq_table == NULL) { pr_err("DEVFREQ(DISP) : Failed to allocate freq table\n"); ret = -ENOMEM; goto err_freqtable; } ret = exynos7_init_disp_table(&pdev->dev, data); if (ret) goto err_inittable; platform_set_drvdata(pdev, data); mutex_init(&data->lock); data->volt_offset = 0; data->dev = &pdev->dev; data->vdd_disp_cam0 = regulator_get(NULL, "vdd_disp_cam0"); if (data->vdd_disp_cam0) data->old_volt = regulator_get_voltage(data->vdd_disp_cam0); data->devfreq = devfreq_add_device(data->dev, &exynos7_devfreq_disp_profile, "simple_ondemand", &exynos7_devfreq_disp_governor_data); plat_data = data->dev->platform_data; data->devfreq->min_freq = plat_data->default_qos; data->devfreq->max_freq = exynos7_devfreq_disp_governor_data.cal_qos_max; data->cur_freq = exynos7_devfreq_disp_profile.initial_freq; register_reboot_notifier(&exynos7_disp_reboot_notifier); #ifdef CONFIG_EXYNOS_THERMAL exynos_tmu_add_notifier(&data->tmu_notifier); #endif data->use_dvfs = true; return ret; err_inittable: devfreq_remove_device(data->devfreq); kfree(exynos7_devfreq_disp_profile.freq_table); err_freqtable: kfree(data); err_data: return ret; }
static int ddr_devfreq_probe(struct platform_device *pdev) { struct ddr_devfreq_device *ddev = NULL; struct device_node *np = pdev->dev.of_node; struct devfreq_pm_qos_data *ddata = NULL; const char *type = NULL; int ret = 0; #ifdef CONFIG_INPUT_PULSE_SUPPORT int rc = 0; static int inited = 0; struct device_node *root = NULL; if (inited == 0) { root = of_find_compatible_node(NULL, NULL, "hisilicon,ddrfreq_boost"); if (!root) { pr_err("%s hisilicon,ddrfreq_boost no root node\n",__func__); } else { of_property_read_u32_array(root, "switch-value", &boost_ddrfreq_switch, 0x1); pr_err("switch-value: %d", boost_ddrfreq_switch); if (boost_ddrfreq_switch != 0x0) { of_property_read_u32_array(root, "ddrfreq_dfs_value", &boost_ddr_dfs_band, 0x1); of_property_read_u32_array(root, "ddrfreq_dfs_last", &boost_ddr_dfs_last, 0x1); pr_err("boost_ddr_dfs_band: %d, boost_ddr_dfs_last: %d\n", boost_ddr_dfs_band, boost_ddr_dfs_last); rc = input_register_handler(&ddrfreq_input_handler); if (rc) pr_warn("%s: failed to register input handler\n", __func__); down_wq = alloc_workqueue("ddrfreq_down", 0, 1); if (!down_wq) return -ENOMEM; INIT_WORK(&inputopen.inputopen_work, ddrfreq_input_open); ddrfreq_min_req.pm_qos_class = 0; atomic_set(&flag, 0x0); INIT_DELAYED_WORK(&ddrfreq_begin, (work_func_t)ddrfreq_begin_work); INIT_DELAYED_WORK(&ddrfreq_end, (work_func_t)ddrfreq_end_work); } } inited = 1; } #endif /*#ifdef CONFIG_INPUT_PULSE_SUPPORT*/ if (!np) { pr_err("%s: %s %d, no device node\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto out; } ret = of_property_read_string(np, "pm_qos_class", &type); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); ret = -EINVAL; goto no_type; } if (!strcmp("memory_tput", type)) { ret=of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *)&ddr_devfreq_pm_qos_data, 0x2); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); } pr_err("%s: %s %d, per_hz %d utilization %d\n", MODULE_NAME, __func__, __LINE__,ddr_devfreq_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_pm_qos_data.bd_utilization); ddata = &ddr_devfreq_pm_qos_data; dev_set_name(&pdev->dev, "ddrfreq"); } else if (!strcmp("memory_tput_up_threshold", type)) { ret = of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *) &ddr_devfreq_up_th_pm_qos_data, 0x2); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); } pr_err("%s: %s %d, per_hz %d utilization %d\n", MODULE_NAME, __func__, __LINE__,ddr_devfreq_up_th_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_up_th_pm_qos_data.bd_utilization); ddata = &ddr_devfreq_up_th_pm_qos_data; dev_set_name(&pdev->dev, "ddrfreq_up_threshold"); } else { pr_err("%s: %s %d, err type\n", MODULE_NAME, __func__, __LINE__); ret = -EINVAL; goto err_type; } ddev = kmalloc(sizeof(struct ddr_devfreq_device), GFP_KERNEL); if (!ddev) { pr_err("%s: %s %d, no mem\n", MODULE_NAME, __func__, __LINE__); ret = -ENOMEM; goto no_men; } ddev->set = of_clk_get(np, 0); if (IS_ERR(ddev->set)) { pr_err("%s: %s %d, Failed to get set-clk\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto no_clk1; } ddev->get = of_clk_get(np, 1); if (IS_ERR(ddev->get)) { pr_err("%s: %s %d, Failed to get get-clk\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto no_clk2; } if (of_init_opp_table(&pdev->dev) || opp_init_devfreq_table(&pdev->dev, &ddr_devfreq_dev_profile.freq_table)) { ddev->devfreq = NULL; } else { ddr_devfreq_dev_profile.initial_freq = clk_get_rate(ddev->get); rcu_read_lock(); ddr_devfreq_dev_profile.max_state = opp_get_opp_count(&pdev->dev); rcu_read_unlock(); ddev->devfreq = devfreq_add_device(&pdev->dev, &ddr_devfreq_dev_profile, "pm_qos", ddata); } if (IS_ERR(ddev->devfreq)) { pr_err("%s: %s %d, <%s>, Failed to init ddr devfreq_table\n", MODULE_NAME, __func__, __LINE__, type); ret = -ENODEV; goto no_devfreq; } /* * cache value. * It does not mean actual ddr clk currently, * but a frequency cache of every clk setting in the module. * Because, it is not obligatory that setting value is equal to * the currently actual ddr clk frequency. */ ddev->freq = 0; if(ddev->devfreq) { ddev->devfreq->max_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1]; ddev->devfreq->min_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1]; } platform_set_drvdata(pdev, ddev); pr_info("%s: <%s> ready\n", MODULE_NAME, type); return ret; no_devfreq: clk_put(ddev->get); opp_free_devfreq_table(&pdev->dev, &ddr_devfreq_dev_profile.freq_table); no_clk2: clk_put(ddev->set); no_clk1: kfree(ddev); no_men: err_type: no_type: out: return ret; }
static int probe(struct platform_device *pdev) { struct spdm_data *data = 0; int ret = -EINVAL; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->action = SPDM_DOWN; platform_set_drvdata(pdev, data); ret = populate_spdm_data(data, pdev); if (ret) goto bad_of; data->bus_scale_client_id = msm_bus_scale_register_client(data->pdata); if (!data->bus_scale_client_id) { ret = -EINVAL; goto no_bus_scaling; } data->cci_clk = clk_get(&pdev->dev, "cci_clk"); if (IS_ERR(data->cci_clk)) { ret = PTR_ERR(data->cci_clk); goto no_clock; } data->profile = devm_kzalloc(&pdev->dev, sizeof(*(data->profile)), GFP_KERNEL); if (!data->profile) { ret = -ENOMEM; goto no_profile; } data->profile->target = change_bw; data->profile->get_dev_status = get_dev_status; data->profile->get_cur_freq = get_cur_bw; data->profile->polling_ms = data->window; data->devfreq = devfreq_add_device(&pdev->dev, data->profile, "spdm_bw_hyp", data); if (IS_ERR(data->devfreq)) { ret = PTR_ERR(data->devfreq); goto no_spdm_device; } spdm_init_debugfs(&pdev->dev); spdm_ipc_log_ctxt = ipc_log_context_create(SPDM_IPC_LOG_PAGES, "devfreq_spdm", 0); if (IS_ERR_OR_NULL(spdm_ipc_log_ctxt)) { pr_err("%s: Failed to create IPC log context\n", __func__); spdm_ipc_log_ctxt = NULL; } return 0; no_spdm_device: devm_kfree(&pdev->dev, data->profile); no_profile: no_clock: msm_bus_scale_unregister_client(data->bus_scale_client_id); no_bus_scaling: devm_kfree(&pdev->dev, data->config_data.ports); bad_of: devm_kfree(&pdev->dev, data); platform_set_drvdata(pdev, NULL); return ret; }
static int exynos5_devfreq_int_probe(struct platform_device *pdev) { int ret = 0; struct devfreq_data_int *data; struct devfreq_notifier_block *devfreq_nb; struct exynos_devfreq_platdata *plat_data; data = kzalloc(sizeof(struct devfreq_data_int), GFP_KERNEL); if (data == NULL) { pr_err("DEVFREQ(INT) : Failed to allocate private data\n"); ret = -ENOMEM; goto err_data; } exynos5433_devfreq_int_init(data); data->initial_freq = exynos5_devfreq_int_profile.initial_freq; exynos5_devfreq_int_profile.freq_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL); if (exynos5_devfreq_int_profile.freq_table == NULL) { pr_err("DEVFREQ(INT) : Failed to allocate freq table\n"); ret = -ENOMEM; goto err_freqtable; } ret = exynos5_init_int_table(&pdev->dev, data); if (ret) goto err_inittable; platform_set_drvdata(pdev, data); mutex_init(&data->lock); data->target_volt = get_match_volt(ID_INT, DEVFREQ_INITIAL_FREQ); data->volt_constraint_isp = 0; data->volt_offset = 0; int_dev = data->dev = &pdev->dev; data->vdd_int = regulator_get(NULL, "vdd_int"); data->devfreq = devfreq_add_device(data->dev, &exynos5_devfreq_int_profile, "simple_ondemand", &exynos5_devfreq_int_governor_data); devfreq_nb = kzalloc(sizeof(struct devfreq_notifier_block), GFP_KERNEL); if (devfreq_nb == NULL) { pr_err("DEVFREQ(INT) : Failed to allocate notifier block\n"); ret = -ENOMEM; goto err_nb; } devfreq_nb->df = data->devfreq; devfreq_nb->nb.notifier_call = exynos5_devfreq_int_notifier; exynos5433_devfreq_register(&devfreq_int_exynos); exynos5433_ppmu_register_notifier(INT, &devfreq_nb->nb); plat_data = data->dev->platform_data; data->default_qos = plat_data->default_qos; data->devfreq->min_freq = plat_data->default_qos; data->devfreq->max_freq = devfreq_int_opp_list[0].freq; register_reboot_notifier(&exynos5_int_reboot_notifier); #ifdef CONFIG_EXYNOS_THERMAL data->tmu_notifier.notifier_call = exynos5_devfreq_int_tmu_notifier; exynos_tmu_add_notifier(&data->tmu_notifier); #endif data->use_dvfs = true; return ret; err_nb: devfreq_remove_device(data->devfreq); err_inittable: kfree(exynos5_devfreq_int_profile.freq_table); err_freqtable: kfree(data); err_data: return ret; }
void nvhost_scale_init(struct platform_device *pdev) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile; if (pdata->power_profile) return; profile = kzalloc(sizeof(struct nvhost_device_profile), GFP_KERNEL); if (!profile) return; pdata->power_profile = profile; profile->pdev = pdev; profile->clk = pdata->clk[0]; profile->last_event_type = DEVICE_UNKNOWN; /* Initialize devfreq related structures */ profile->dev_stat.private_data = &profile->ext_stat; profile->ext_stat.min_freq = clk_round_rate(clk_get_parent(profile->clk), 0); profile->ext_stat.max_freq = clk_round_rate(clk_get_parent(profile->clk), UINT_MAX); profile->ext_stat.busy = DEVICE_UNKNOWN; if (profile->ext_stat.min_freq == profile->ext_stat.max_freq) { dev_warn(&pdev->dev, "max rate = min rate (%lu), disabling scaling\n", profile->ext_stat.min_freq); goto err_fetch_clocks; } /* Initialize actmon */ if (pdata->actmon_enabled) { if (device_create_file(&pdev->dev, &dev_attr_load)) goto err_create_sysfs_entry; profile->actmon = kzalloc(sizeof(struct host1x_actmon), GFP_KERNEL); if (!profile->actmon) goto err_allocate_actmon; profile->actmon->host = nvhost_get_host(pdev); profile->actmon->regs = nvhost_get_host(pdev)->aperture + pdata->actmon_regs; actmon_op().init(profile->actmon); actmon_op().debug_init(profile->actmon, pdata->debugfs); actmon_op().deinit(profile->actmon); } if (pdata->devfreq_governor) { struct devfreq *devfreq = devfreq_add_device(&pdev->dev, &nvhost_scale_devfreq_profile, pdata->devfreq_governor, NULL); if (IS_ERR(devfreq)) devfreq = NULL; pdata->power_manager = devfreq; } return; err_allocate_actmon: device_remove_file(&pdev->dev, &dev_attr_load); err_create_sysfs_entry: err_fetch_clocks: kfree(pdata->power_profile); pdata->power_profile = NULL; }
void nvhost_scale3d_actmon_init(struct platform_device *dev) { struct nvhost_devfreq_ext_stat *ext_stat; struct nvhost_device_data *pdata = platform_get_drvdata(dev); if (power_profile.init) return; /* Get clocks */ power_profile.dev = dev; power_profile.clk_3d = pdata->clk[0]; if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3) { power_profile.clk_3d2 = pdata->clk[1]; power_profile.clk_3d_emc = pdata->clk[2]; } else power_profile.clk_3d_emc = pdata->clk[1]; /* Get frequency settings */ power_profile.max_rate_3d = clk_round_rate(power_profile.clk_3d, UINT_MAX); power_profile.min_rate_3d = clk_round_rate(power_profile.clk_3d, 0); nvhost_scale3d_devfreq_profile.initial_freq = power_profile.max_rate_3d; if (power_profile.max_rate_3d == power_profile.min_rate_3d) { pr_warn("scale3d: 3d max rate = min rate (%lu), disabling\n", power_profile.max_rate_3d); goto err_bad_power_profile; } /* Reserve space for devfreq structures (dev_stat and ext_dev_stat) */ power_profile.dev_stat = kzalloc(sizeof(struct power_profile_gr3d), GFP_KERNEL); if (!power_profile.dev_stat) goto err_devfreq_alloc; ext_stat = kzalloc(sizeof(struct nvhost_devfreq_ext_stat), GFP_KERNEL); if (!ext_stat) goto err_devfreq_ext_stat_alloc; /* Initialise the dev_stat and ext_stat structures */ power_profile.dev_stat->private_data = ext_stat; power_profile.last_event_type = DEVICE_UNKNOWN; ext_stat->min_freq = power_profile.min_rate_3d; ext_stat->max_freq = power_profile.max_rate_3d; power_profile.last_request_time = ktime_get(); nvhost_scale3d_calibrate_emc(); /* Start using devfreq */ pdata->power_manager = devfreq_add_device(&dev->dev, &nvhost_scale3d_devfreq_profile, &nvhost_podgov, NULL); power_profile.init = 1; return; err_devfreq_ext_stat_alloc: kfree(power_profile.dev_stat); err_devfreq_alloc: err_bad_power_profile: return; }
int devfreq_add_devbw(struct device *dev) { struct dev_data *d; struct devfreq_dev_profile *p; u32 *data, ports[MAX_PATHS * 2]; const char *gov_name; int ret, len, i, num_paths; d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); if (!d) return -ENOMEM; dev_set_drvdata(dev, d); if (of_find_property(dev->of_node, PROP_PORTS, &len)) { len /= sizeof(ports[0]); if (len % 2 || len > ARRAY_SIZE(ports)) { dev_err(dev, "Unexpected number of ports\n"); return -EINVAL; } ret = of_property_read_u32_array(dev->of_node, PROP_PORTS, ports, len); if (ret) return ret; num_paths = len / 2; } else { return -EINVAL; } d->bw_levels[0].vectors = &d->vectors[0]; d->bw_levels[1].vectors = &d->vectors[MAX_PATHS]; d->bw_data.usecase = d->bw_levels; d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels); d->bw_data.name = dev_name(dev); d->bw_data.active_only = of_property_read_bool(dev->of_node, PROP_ACTIVE); for (i = 0; i < num_paths; i++) { d->bw_levels[0].vectors[i].src = ports[2 * i]; d->bw_levels[0].vectors[i].dst = ports[2 * i + 1]; d->bw_levels[1].vectors[i].src = ports[2 * i]; d->bw_levels[1].vectors[i].dst = ports[2 * i + 1]; } d->bw_levels[0].num_paths = num_paths; d->bw_levels[1].num_paths = num_paths; d->num_paths = num_paths; p = &d->dp; p->polling_ms = 50; p->target = devbw_target; p->get_dev_status = devbw_get_dev_status; if (of_find_property(dev->of_node, PROP_TBL, &len)) { len /= sizeof(*data); data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table), GFP_KERNEL); if (!p->freq_table) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len); if (ret) return ret; for (i = 0; i < len; i++) p->freq_table[i] = data[i]; p->max_state = len; } if (of_find_property(dev->of_node, PROP_AB_TBL, &len)) { len /= sizeof(*data); data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; d->freq_ab_table = devm_kzalloc(dev, len * sizeof(*d->freq_ab_table), GFP_KERNEL); if (!d->freq_ab_table) return -ENOMEM; ret = of_property_read_u32_array(dev->of_node, PROP_AB_TBL, data, len); if (ret) return ret; for (i = 0; i < len; i++) d->freq_ab_table[i] = data[i]; } d->bus_client = msm_bus_scale_register_client(&d->bw_data); if (!d->bus_client) { dev_err(dev, "Unable to register bus client\n"); return -ENODEV; } if (of_property_read_string(dev->of_node, "governor", &gov_name)) gov_name = "performance"; d->df = devfreq_add_device(dev, p, gov_name, NULL); if (IS_ERR(d->df)) { msm_bus_scale_unregister_client(d->bus_client); return PTR_ERR(d->df); } return 0; }
int kbase_devfreq_init(struct kbase_device *kbdev) { struct devfreq_dev_profile *dp; int err; dev_dbg(kbdev->dev, "Init Mali devfreq\n"); if (!kbdev->clock) return -ENODEV; dp = &kbdev->devfreq_profile; dp->initial_freq = clk_get_rate(kbdev->clock); dp->polling_ms = 1000; dp->target = kbase_devfreq_target; dp->get_dev_status = kbase_devfreq_status; dp->get_cur_freq = kbase_devfreq_cur_freq; dp->exit = kbase_devfreq_exit; if (kbase_devfreq_init_freq_table(kbdev, dp)) return -EFAULT; kbdev->devfreq = devfreq_add_device(kbdev->dev, dp, "simple_ondemand", NULL); if (IS_ERR_OR_NULL(kbdev->devfreq)) { kbase_devfreq_term_freq_table(kbdev); return PTR_ERR(kbdev->devfreq); } err = devfreq_register_opp_notifier(kbdev->dev, kbdev->devfreq); if (err) { dev_err(kbdev->dev, "Failed to register OPP notifier (%d)\n", err); goto opp_notifier_failed; } #ifdef CONFIG_DEVFREQ_THERMAL kbdev->devfreq_cooling = of_devfreq_cooling_register( kbdev->dev->of_node, kbdev->devfreq); if (IS_ERR_OR_NULL(kbdev->devfreq_cooling)) { err = PTR_ERR(kbdev->devfreq_cooling); dev_err(kbdev->dev, "Failed to register cooling device (%d)\n", err); goto cooling_failed; } #ifdef CONFIG_MALI_POWER_ACTOR err = mali_pa_init(kbdev); if (err) { dev_err(kbdev->dev, "Failed to init power actor\n"); goto pa_failed; } #endif #endif return 0; #ifdef CONFIG_DEVFREQ_THERMAL #ifdef CONFIG_MALI_POWER_ACTOR pa_failed: devfreq_cooling_unregister(kbdev->devfreq_cooling); #endif /* CONFIG_MALI_POWER_ACTOR */ cooling_failed: devfreq_unregister_opp_notifier(kbdev->dev, kbdev->devfreq); #endif /* CONFIG_DEVFREQ_THERMAL */ opp_notifier_failed: err = devfreq_remove_device(kbdev->devfreq); if (err) dev_err(kbdev->dev, "Failed to terminate devfreq (%d)\n", err); else kbdev->devfreq = NULL; return err; }
int g3_display_probe(struct platform_device *pdev){ struct g3_display_data *data; struct device *dev = &pdev->dev; int ret = 0; struct opp *opp; int i; data = kzalloc(sizeof(struct g3_display_data), GFP_KERNEL); if(!data){ dev_err(dev, "cannot_allocate memory.\n"); return -ENOMEM; } data->dev = dev; mutex_init(&data->lock); /* register opp entries */ for(i=0; i<_LV_END_; i++){ ret = opp_add(dev, g3_display_opp_table[i].freq, g3_display_opp_table[i].volt); if(ret){ dev_err(dev, "cannot add opp entries.\n"); goto err_alloc_mem; } } /* find opp entry with init frequency */ opp = opp_find_freq_floor(dev, &g3_display_profile.initial_freq); if(IS_ERR(opp)){ dev_err(dev, "invalid initial frequency %lu.\n", g3_display_profile.initial_freq); ret = PTR_ERR(opp); goto err_alloc_mem; } data->curr_opp = opp; /* initialize qos */ // TODO /* register g3_display to devfreq framework */ data->devfreq = devfreq_add_device(dev, &g3_display_profile, "simple_ondemand", &g3_display_ondemand_data); if(IS_ERR(data->devfreq)){ ret = PTR_ERR(data->devfreq); dev_err(dev, "failed to add devfreq: %d\n", ret); goto err_alloc_mem; } devfreq_register_opp_notifier(dev, data->devfreq); /* register g3_display as client to pm notifier */ memset(&data->nb_pm, 0, sizeof(data->nb_pm)); data->nb_pm.notifier_call = g3_display_pm_notifier_callback; ret = register_pm_notifier(&data->nb_pm); if(ret < 0){ dev_err(dev, "failed to get pm notifier: %d\n", ret); goto err_add_devfreq; } platform_set_drvdata(pdev, data); return 0; err_add_devfreq: devfreq_remove_device(data->devfreq); err_alloc_mem: kfree(data); return ret; }
static int exynos5_devfreq_isp_probe(struct platform_device *pdev) { int ret = 0; struct devfreq_data_isp *data; struct exynos_devfreq_platdata *plat_data; struct opp *target_opp; unsigned long freq; unsigned long volt; data = kzalloc(sizeof(struct devfreq_data_isp), GFP_KERNEL); if (data == NULL) { pr_err("DEVFREQ(ISP) : Failed to allocate private data\n"); ret = -ENOMEM; goto err_data; } exynos5433_devfreq_isp_init(data); exynos5_devfreq_isp_profile.max_state = data->max_state; exynos5_devfreq_isp_profile.freq_table = kzalloc(sizeof(int) * data->max_state, GFP_KERNEL); if (exynos5_devfreq_isp_profile.freq_table == NULL) { pr_err("DEVFREQ(ISP) : Failed to allocate freq table\n"); ret = -ENOMEM; goto err_freqtable; } ret = exynos5_init_isp_table(&pdev->dev, data); if (ret) goto err_inittable; platform_set_drvdata(pdev, data); mutex_init(&data->lock); data->initial_freq = exynos5_devfreq_isp_profile.initial_freq; data->volt_offset = 0; isp_dev = data->dev = &pdev->dev; data->vdd_isp = regulator_get(NULL, "vdd_disp_cam0"); if (IS_ERR_OR_NULL(data->vdd_isp)) { pr_err("DEVFREQ(ISP) : Failed to get regulator\n"); goto err_inittable; } freq = DEVFREQ_INITIAL_FREQ; rcu_read_lock(); target_opp = devfreq_recommended_opp(data->dev, &freq, 0); if (IS_ERR(target_opp)) { rcu_read_unlock(); dev_err(data->dev, "DEVFREQ(ISP) : Invalid OPP to set voltage"); ret = PTR_ERR(target_opp); goto err_opp; } volt = opp_get_voltage(target_opp); #ifdef CONFIG_EXYNOS_THERMAL volt = get_limit_voltage(volt, data->volt_offset); #endif rcu_read_unlock(); if (data->isp_set_volt) data->isp_set_volt(data, volt, volt + VOLT_STEP, false); data->devfreq = devfreq_add_device(data->dev, &exynos5_devfreq_isp_profile, "simple_ondemand", &exynos5_devfreq_isp_governor_data); plat_data = data->dev->platform_data; data->devfreq->min_freq = plat_data->default_qos; data->devfreq->max_freq = exynos5_devfreq_isp_governor_data.cal_qos_max; register_reboot_notifier(&exynos5_isp_reboot_notifier); #ifdef CONFIG_EXYNOS_THERMAL data->tmu_notifier.notifier_call = exynos5_devfreq_isp_tmu_notifier; exynos_tmu_add_notifier(&data->tmu_notifier); #endif data->use_dvfs = true; return ret; err_opp: regulator_put(data->vdd_isp); err_inittable: devfreq_remove_device(data->devfreq); kfree(exynos5_devfreq_isp_profile.freq_table); err_freqtable: kfree(data); err_data: return ret; }
void nvhost_scale_init(struct platform_device *pdev) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_device_profile *profile; int err; if (pdata->power_profile) return; profile = kzalloc(sizeof(struct nvhost_device_profile), GFP_KERNEL); if (!profile) return; pdata->power_profile = profile; profile->pdev = pdev; profile->clk = pdata->clk[0]; profile->dev_stat.busy = false; /* Create frequency table */ err = nvhost_scale_make_freq_table(profile); if (err || !profile->devfreq_profile.max_state) goto err_get_freqs; /* Initialize actmon */ if (pdata->actmon_enabled) { if (device_create_file(&pdev->dev, &dev_attr_load)) goto err_create_sysfs_entry; profile->actmon = kzalloc(sizeof(struct host1x_actmon), GFP_KERNEL); if (!profile->actmon) goto err_allocate_actmon; profile->actmon->host = nvhost_get_host(pdev); profile->actmon->regs = nvhost_get_host(pdev)->aperture + pdata->actmon_regs; actmon_op().init(profile->actmon); actmon_op().debug_init(profile->actmon, pdata->debugfs); actmon_op().deinit(profile->actmon); } if (pdata->devfreq_governor) { struct devfreq *devfreq; profile->devfreq_profile.initial_freq = profile->devfreq_profile.freq_table[0]; profile->devfreq_profile.target = nvhost_scale_target; profile->devfreq_profile.get_dev_status = nvhost_scale_get_dev_status; devfreq = devfreq_add_device(&pdev->dev, &profile->devfreq_profile, pdata->devfreq_governor, NULL); if (IS_ERR(devfreq)) devfreq = NULL; pdata->power_manager = devfreq; } /* Should we register QoS callback for this device? */ if (pdata->qos_id < PM_QOS_NUM_CLASSES && pdata->qos_id != PM_QOS_RESERVED) { profile->qos_notify_block.notifier_call = &nvhost_scale_qos_notify; pm_qos_add_notifier(pdata->qos_id, &profile->qos_notify_block); } return; err_get_freqs: err_allocate_actmon: device_remove_file(&pdev->dev, &dev_attr_load); err_create_sysfs_entry: kfree(pdata->power_profile); pdata->power_profile = NULL; }