PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) { #if !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) PVR_UNREFERENCED_PARAMETER(psSysSpecificData); #else IMG_UINT32 i, *freq_list; IMG_INT32 opp_count; unsigned long freq; struct opp *opp; rcu_read_lock(); opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev); if (opp_count < 1) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count")); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC); if (!freq_list) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list")); return PVRSRV_ERROR_OUT_OF_MEMORY; } freq = 0; for (i = 0; i < opp_count; i++) { opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq); if (IS_ERR_OR_NULL(opp)) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i)); kfree(freq_list); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list[i] = (IMG_UINT32)freq; freq++; } rcu_read_unlock(); freq_list[opp_count] = freq_list[opp_count - 1]; psSysSpecificData->ui32SGXFreqListSize = opp_count + 1; psSysSpecificData->pui32SGXFreqList = freq_list; psSysSpecificData->ui32SGXFreqListIndex = opp_count; #endif return PVRSRV_OK; }
static mali_bool kbase_platform_init(struct kbase_device *kbdev) { struct device *dev = kbdev->dev; dev->platform_data = kbdev; #ifdef CONFIG_REPORT_VSYNC kbase_dev = kbdev; #endif kbdev->clk = devm_clk_get(dev, NULL); if (IS_ERR(kbdev->clk)) { printk("[mali-midgard] Failed to get clk\n"); return MALI_FALSE; } kbdev->regulator = devm_regulator_get(dev, KBASE_HI3635_PLATFORM_GPU_REGULATOR_NAME); if (IS_ERR(kbdev->regulator)) { printk("[mali-midgard] Failed to get regulator\n"); return MALI_FALSE; } #ifdef CONFIG_PM_DEVFREQ if (of_init_opp_table(dev) || opp_init_devfreq_table(dev, &mali_kbase_devfreq_profile.freq_table)) { printk("[mali-midgard] Failed to init devfreq_table\n"); kbdev->devfreq = NULL; } else { mali_kbase_devfreq_profile.initial_freq = clk_get_rate(kbdev->clk); rcu_read_lock(); mali_kbase_devfreq_profile.max_state = opp_get_opp_count(dev); rcu_read_unlock(); kbdev->devfreq = devfreq_add_device(dev, &mali_kbase_devfreq_profile, "mali_ondemand", NULL); } if (IS_ERR(kbdev->devfreq)) { printk("[mali-midgard] NULL pointer [kbdev->devFreq]\n"); return MALI_FALSE; } /* make devfreq function */ //mali_kbase_devfreq_profile.polling_ms = DEFAULT_POLLING_MS; #if KBASE_HI3635_GPU_IRDROP_ISSUE /* init update work */ sw_policy.kbdev = kbdev; INIT_WORK(&sw_policy.update, handle_switch_policy); #endif /* KBASE_HI3635_GPU_IRDROP_ISSUE */ #endif return MALI_TRUE; }
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) { #if !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) PVR_UNREFERENCED_PARAMETER(psSysSpecificData); #else /* !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ IMG_UINT32 i, *freq_list; IMG_INT32 opp_count; unsigned long freq; struct opp *opp; /* * We query and store the list of SGX frequencies just this once under the * assumption that they are unchanging, e.g. no disabling of high frequency * option for thermal management. This is currently valid for 4430 and 4460. */ rcu_read_lock(); opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev); if (opp_count < 1) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count")); return PVRSRV_ERROR_NOT_SUPPORTED; } /* * Allocate the frequency list with a slot for each available frequency plus * one additional slot to hold a designated frequency value to assume when in * an unknown frequency state. */ freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC); if (!freq_list) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list")); return PVRSRV_ERROR_OUT_OF_MEMORY; } /* * Fill in frequency list from lowest to highest then finally the "unknown" * frequency value. We use the highest available frequency as our assumed value * when in an unknown state, because it is safer for APM and hardware recovery * timers to be longer than intended rather than shorter. */ freq = 0; for (i = 0; i < opp_count; i++) { opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq); if (IS_ERR_OR_NULL(opp)) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i)); kfree(freq_list); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list[i] = (IMG_UINT32)freq; freq++; } rcu_read_unlock(); freq_list[opp_count] = freq_list[opp_count - 1]; psSysSpecificData->ui32SGXFreqListSize = opp_count + 1; psSysSpecificData->pui32SGXFreqList = freq_list; /* Start in unknown state - no frequency request to DVFS yet made */ psSysSpecificData->ui32SGXFreqListIndex = opp_count; #endif /* !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ return PVRSRV_OK; }
static int ddr_devfreq_probe(struct platform_device *pdev) { struct ddr_devfreq_device *ddev = NULL; struct device_node *np = pdev->dev.of_node; struct devfreq_pm_qos_data *ddata = NULL; const char *type = NULL; int ret = 0; #ifdef CONFIG_INPUT_PULSE_SUPPORT int rc = 0; static int inited = 0; struct device_node *root = NULL; if (inited == 0) { root = of_find_compatible_node(NULL, NULL, "hisilicon,ddrfreq_boost"); if (!root) { pr_err("%s hisilicon,ddrfreq_boost no root node\n",__func__); } else { of_property_read_u32_array(root, "switch-value", &boost_ddrfreq_switch, 0x1); pr_err("switch-value: %d", boost_ddrfreq_switch); if (boost_ddrfreq_switch != 0x0) { of_property_read_u32_array(root, "ddrfreq_dfs_value", &boost_ddr_dfs_band, 0x1); of_property_read_u32_array(root, "ddrfreq_dfs_last", &boost_ddr_dfs_last, 0x1); pr_err("boost_ddr_dfs_band: %d, boost_ddr_dfs_last: %d\n", boost_ddr_dfs_band, boost_ddr_dfs_last); rc = input_register_handler(&ddrfreq_input_handler); if (rc) pr_warn("%s: failed to register input handler\n", __func__); down_wq = alloc_workqueue("ddrfreq_down", 0, 1); if (!down_wq) return -ENOMEM; INIT_WORK(&inputopen.inputopen_work, ddrfreq_input_open); ddrfreq_min_req.pm_qos_class = 0; atomic_set(&flag, 0x0); INIT_DELAYED_WORK(&ddrfreq_begin, (work_func_t)ddrfreq_begin_work); INIT_DELAYED_WORK(&ddrfreq_end, (work_func_t)ddrfreq_end_work); } } inited = 1; } #endif /*#ifdef CONFIG_INPUT_PULSE_SUPPORT*/ if (!np) { pr_err("%s: %s %d, no device node\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto out; } ret = of_property_read_string(np, "pm_qos_class", &type); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); ret = -EINVAL; goto no_type; } if (!strcmp("memory_tput", type)) { ret=of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *)&ddr_devfreq_pm_qos_data, 0x2); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); } pr_err("%s: %s %d, per_hz %d utilization %d\n", MODULE_NAME, __func__, __LINE__,ddr_devfreq_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_pm_qos_data.bd_utilization); ddata = &ddr_devfreq_pm_qos_data; dev_set_name(&pdev->dev, "ddrfreq"); } else if (!strcmp("memory_tput_up_threshold", type)) { ret = of_property_read_u32_array(np, "pm_qos_data_reg", (u32 *) &ddr_devfreq_up_th_pm_qos_data, 0x2); if (ret) { pr_err("%s: %s %d, no type\n", MODULE_NAME, __func__, __LINE__); } pr_err("%s: %s %d, per_hz %d utilization %d\n", MODULE_NAME, __func__, __LINE__,ddr_devfreq_up_th_pm_qos_data.bytes_per_sec_per_hz,ddr_devfreq_up_th_pm_qos_data.bd_utilization); ddata = &ddr_devfreq_up_th_pm_qos_data; dev_set_name(&pdev->dev, "ddrfreq_up_threshold"); } else { pr_err("%s: %s %d, err type\n", MODULE_NAME, __func__, __LINE__); ret = -EINVAL; goto err_type; } ddev = kmalloc(sizeof(struct ddr_devfreq_device), GFP_KERNEL); if (!ddev) { pr_err("%s: %s %d, no mem\n", MODULE_NAME, __func__, __LINE__); ret = -ENOMEM; goto no_men; } ddev->set = of_clk_get(np, 0); if (IS_ERR(ddev->set)) { pr_err("%s: %s %d, Failed to get set-clk\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto no_clk1; } ddev->get = of_clk_get(np, 1); if (IS_ERR(ddev->get)) { pr_err("%s: %s %d, Failed to get get-clk\n", MODULE_NAME, __func__, __LINE__); ret = -ENODEV; goto no_clk2; } if (of_init_opp_table(&pdev->dev) || opp_init_devfreq_table(&pdev->dev, &ddr_devfreq_dev_profile.freq_table)) { ddev->devfreq = NULL; } else { ddr_devfreq_dev_profile.initial_freq = clk_get_rate(ddev->get); rcu_read_lock(); ddr_devfreq_dev_profile.max_state = opp_get_opp_count(&pdev->dev); rcu_read_unlock(); ddev->devfreq = devfreq_add_device(&pdev->dev, &ddr_devfreq_dev_profile, "pm_qos", ddata); } if (IS_ERR(ddev->devfreq)) { pr_err("%s: %s %d, <%s>, Failed to init ddr devfreq_table\n", MODULE_NAME, __func__, __LINE__, type); ret = -ENODEV; goto no_devfreq; } /* * cache value. * It does not mean actual ddr clk currently, * but a frequency cache of every clk setting in the module. * Because, it is not obligatory that setting value is equal to * the currently actual ddr clk frequency. */ ddev->freq = 0; if(ddev->devfreq) { ddev->devfreq->max_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1]; ddev->devfreq->min_freq = ddr_devfreq_dev_profile.freq_table[ddr_devfreq_dev_profile.max_state - 1]; } platform_set_drvdata(pdev, ddev); pr_info("%s: <%s> ready\n", MODULE_NAME, type); return ret; no_devfreq: clk_put(ddev->get); opp_free_devfreq_table(&pdev->dev, &ddr_devfreq_dev_profile.freq_table); no_clk2: clk_put(ddev->set); no_clk1: kfree(ddev); no_men: err_type: no_type: out: return ret; }