/* * This API is to be called during init to set the various voltage * domains to the voltage as per the opp table. Typically we boot up * at the nominal voltage. So this function finds out the rate of * the clock associated with the voltage domain, finds out the correct * opp entry and sets the voltage domain to the voltage specified * in the opp entry */ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, const char *oh_name) { struct voltagedomain *voltdm; struct clk *clk; struct opp *opp; unsigned long freq, bootup_volt; struct device *dev; if (!vdd_name || !clk_name || !oh_name) { pr_err("%s: invalid parameters\n", __func__); goto exit; } dev = omap_device_get_by_hwmod_name(oh_name); if (IS_ERR(dev)) { pr_err("%s: Unable to get dev pointer for hwmod %s\n", __func__, oh_name); goto exit; } voltdm = voltdm_lookup(vdd_name); if (IS_ERR(voltdm)) { pr_err("%s: unable to get vdd pointer for vdd_%s\n", __func__, vdd_name); goto exit; } clk = clk_get(NULL, clk_name); if (IS_ERR(clk)) { pr_err("%s: unable to get clk %s\n", __func__, clk_name); goto exit; } freq = clk->rate; clk_put(clk); opp = opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { pr_err("%s: unable to find boot up OPP for vdd_%s\n", __func__, vdd_name); goto exit; } bootup_volt = opp_get_voltage(opp); if (!bootup_volt) { pr_err("%s: unable to find voltage corresponding " "to the bootup OPP for vdd_%s\n", __func__, vdd_name); goto exit; } voltdm_scale(voltdm, bootup_volt); return 0; exit: pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name); return -EINVAL; }
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) { #if !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) PVR_UNREFERENCED_PARAMETER(psSysSpecificData); #else IMG_UINT32 i, *freq_list; IMG_INT32 opp_count; unsigned long freq; struct opp *opp; rcu_read_lock(); opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev); if (opp_count < 1) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count")); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC); if (!freq_list) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list")); return PVRSRV_ERROR_OUT_OF_MEMORY; } freq = 0; for (i = 0; i < opp_count; i++) { opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq); if (IS_ERR_OR_NULL(opp)) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i)); kfree(freq_list); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list[i] = (IMG_UINT32)freq; freq++; } rcu_read_unlock(); freq_list[opp_count] = freq_list[opp_count - 1]; psSysSpecificData->ui32SGXFreqListSize = opp_count + 1; psSysSpecificData->pui32SGXFreqList = freq_list; psSysSpecificData->ui32SGXFreqListIndex = opp_count; #endif return PVRSRV_OK; }
/* * This API is to be called during init to put the various voltage * domains to the voltage as per the opp table. Typically we boot up * at the nominal voltage. So this function finds out the rate of * the clock associated with the voltage domain, finds out the correct * opp entry and puts the voltage domain to the voltage specifies * in the opp entry */ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, struct device *dev) { struct voltagedomain *voltdm; struct clk *clk; struct opp *opp; unsigned long freq, bootup_volt; if (!vdd_name || !clk_name || !dev) { printk(KERN_ERR "%s: Invalid parameters!\n", __func__); goto exit; } voltdm = omap_voltage_domain_lookup(vdd_name); if (IS_ERR(voltdm)) { printk(KERN_ERR "%s: Unable to get vdd pointer for vdd_%s\n", __func__, vdd_name); goto exit; } clk = clk_get(NULL, clk_name); if (IS_ERR(clk)) { printk(KERN_ERR "%s: unable to get clk %s\n", __func__, clk_name); goto exit; } freq = clk->rate; clk_put(clk); opp = opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { printk(KERN_ERR "%s: unable to find boot up OPP for vdd_%s\n", __func__, vdd_name); goto exit; } bootup_volt = opp_get_voltage(opp); if (!bootup_volt) { printk(KERN_ERR "%s: unable to find voltage corresponding" "to the bootup OPP for vdd_%s\n", __func__, vdd_name); goto exit; } omap_voltage_scale_vdd(voltdm, bootup_volt); return 0; exit: printk(KERN_ERR "%s: Unable to put vdd_%s to its init voltage\n\n", __func__, vdd_name); return -EINVAL; }
int omap_device_scale_gpu(struct device *req_dev, struct device *target_dev, unsigned long rate) { unsigned long freq = 0; /* find lowest frequency */ opp_find_freq_ceil(target_dev, &freq); if (rate > freq) omap4_dpll_cascading_blocker_hold(target_dev); else omap4_dpll_cascading_blocker_release(target_dev); return omap_device_scale(req_dev, target_dev, rate); }
static struct opp __maybe_unused *step_up(struct busfreq_data *data, int step) { int i; struct opp *opp = data->curr_opp; unsigned long newfreq; if (data->max_opp == data->curr_opp) return data->curr_opp; for (i = 0; i < step; i++) { newfreq = opp_get_freq(opp) + 1; opp = opp_find_freq_ceil(data->dev, &newfreq); if (opp == data->max_opp) break; } return opp; }
int exynos5250_init(struct device *dev, struct busfreq_data *data) { unsigned int i, tmp; unsigned long maxfreq = ULONG_MAX; unsigned long minfreq = 0; unsigned long cdrexfreq; unsigned long lrbusfreq; struct clk *clk; int ret; /* Enable pause function for DREX2 DVFS */ drex2_pause_ctrl = __raw_readl(EXYNOS5_DREX2_PAUSE); drex2_pause_ctrl |= DMC_PAUSE_ENABLE; __raw_writel(drex2_pause_ctrl, EXYNOS5_DREX2_PAUSE); clk = clk_get(NULL, "mclk_cdrex"); if (IS_ERR(clk)) { dev_err(dev, "Fail to get mclk_cdrex clock"); ret = PTR_ERR(clk); return ret; } cdrexfreq = clk_get_rate(clk) / 1000; clk_put(clk); clk = clk_get(NULL, "aclk_266"); if (IS_ERR(clk)) { dev_err(dev, "Fail to get aclk_266 clock"); ret = PTR_ERR(clk); return ret; } lrbusfreq = clk_get_rate(clk) / 1000; clk_put(clk); if (cdrexfreq == 800000) { clkdiv_cdrex = clkdiv_cdrex_for800; exynos5_busfreq_table_mif = exynos5_busfreq_table_for800; exynos5_mif_volt = exynos5_mif_volt_for800; } else if (cdrexfreq == 666857) { clkdiv_cdrex = clkdiv_cdrex_for667; exynos5_busfreq_table_mif = exynos5_busfreq_table_for667; exynos5_mif_volt = exynos5_mif_volt_for667; } else if (cdrexfreq == 533000) { clkdiv_cdrex = clkdiv_cdrex_for533; exynos5_busfreq_table_mif = exynos5_busfreq_table_for533; exynos5_mif_volt = exynos5_mif_volt_for533; } else if (cdrexfreq == 400000) { clkdiv_cdrex = clkdiv_cdrex_for400; exynos5_busfreq_table_mif = exynos5_busfreq_table_for400; exynos5_mif_volt = exynos5_mif_volt_for400; } else { dev_err(dev, "Don't support cdrex table\n"); return -EINVAL; } tmp = __raw_readl(EXYNOS5_CLKDIV_LEX); for (i = LV_0; i < LV_INT_END; i++) { tmp &= ~(EXYNOS5_CLKDIV_LEX_ATCLK_LEX_MASK | EXYNOS5_CLKDIV_LEX_PCLK_LEX_MASK); tmp |= ((clkdiv_lex[i][0] << EXYNOS5_CLKDIV_LEX_ATCLK_LEX_SHIFT) | (clkdiv_lex[i][1] << EXYNOS5_CLKDIV_LEX_PCLK_LEX_SHIFT)); data->lex_divtable[i] = tmp; } tmp = __raw_readl(EXYNOS5_CLKDIV_R0X); for (i = LV_0; i < LV_INT_END; i++) { tmp &= ~EXYNOS5_CLKDIV_R0X_PCLK_R0X_MASK; tmp |= (clkdiv_r0x[i][0] << EXYNOS5_CLKDIV_R0X_PCLK_R0X_SHIFT); data->r0x_divtable[i] = tmp; } tmp = __raw_readl(EXYNOS5_CLKDIV_R1X); for (i = LV_0; i < LV_INT_END; i++) { tmp &= ~EXYNOS5_CLKDIV_R1X_PCLK_R1X_MASK; tmp |= (clkdiv_r1x[i][0] << EXYNOS5_CLKDIV_R1X_PCLK_R1X_SHIFT); data->r1x_divtable[i] = tmp; } tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX); if (samsung_rev() < EXYNOS5250_REV_1_0) { for (i = LV_0; i < LV_MIF_END; i++) { tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK | EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK); tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) | (clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) | (clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) | (clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) | (clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) | (clkdiv_cdrex[i][5] << EXYNOS5_CLKDIV_CDREX_ACLK_CLK400_SHIFT) | (clkdiv_cdrex[i][6] << EXYNOS5_CLKDIV_CDREX_ACLK_C2C200_SHIFT) | (clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT)); data->cdrex_divtable[i] = tmp; } } else { for (i = LV_0; i < LV_MIF_END; i++) { tmp &= ~(EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_MASK | EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_MASK | EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_MASK); tmp |= ((clkdiv_cdrex[i][0] << EXYNOS5_CLKDIV_CDREX_MCLK_DPHY_SHIFT) | (clkdiv_cdrex[i][1] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX2_SHIFT) | (clkdiv_cdrex[i][2] << EXYNOS5_CLKDIV_CDREX_ACLK_CDREX_SHIFT) | (clkdiv_cdrex[i][3] << EXYNOS5_CLKDIV_CDREX_MCLK_CDREX_SHIFT) | (clkdiv_cdrex[i][4] << EXYNOS5_CLKDIV_CDREX_PCLK_CDREX_SHIFT) | (clkdiv_cdrex[i][8] << EXYNOS5_CLKDIV_CDREX_ACLK_EFCON_SHIFT)); data->cdrex_divtable[i] = tmp; } } if (samsung_rev() < EXYNOS5250_REV_1_0) { tmp = __raw_readl(EXYNOS5_CLKDIV_CDREX2); for (i = LV_0; i < LV_MIF_END; i++) { tmp &= ~EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_MASK; tmp |= clkdiv_cdrex[i][7] << EXYNOS5_CLKDIV_CDREX2_MCLK_EFPHY_SHIFT; data->cdrex2_divtable[i] = tmp; } } exynos5250_set_bus_volt(); data->dev[PPMU_MIF] = dev; data->dev[PPMU_INT] = &busfreq_for_int; for (i = LV_0; i < LV_MIF_END; i++) { ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk, exynos5_busfreq_table_mif[i].volt); if (ret) { dev_err(dev, "Fail to add opp entries.\n"); return ret; } } #if defined(CONFIG_DP_60HZ_P11) || defined(CONFIG_DP_60HZ_P10) if (cdrexfreq == 666857) { opp_disable(data->dev[PPMU_MIF], 334000); opp_disable(data->dev[PPMU_MIF], 110000); } else if (cdrexfreq == 533000) { opp_disable(data->dev[PPMU_MIF], 267000); opp_disable(data->dev[PPMU_MIF], 107000); } else if (cdrexfreq == 400000) { opp_disable(data->dev[PPMU_MIF], 267000); opp_disable(data->dev[PPMU_MIF], 100000); } #endif for (i = LV_0; i < LV_INT_END; i++) { ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk, exynos5_busfreq_table_int[i].volt); if (ret) { dev_err(dev, "Fail to add opp entries.\n"); return ret; } } data->target = exynos5250_target; data->get_table_index = exynos5250_get_table_index; data->monitor = exynos5250_monitor; data->busfreq_suspend = exynos5250_suspend; data->busfreq_resume = exynos5250_resume; data->sampling_rate = usecs_to_jiffies(100000); data->table[PPMU_MIF] = exynos5_busfreq_table_mif; data->table[PPMU_INT] = exynos5_busfreq_table_int; /* Find max frequency for mif */ data->max_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq)); data->min_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq)); data->curr_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq)); /* Find max frequency for int */ maxfreq = ULONG_MAX; minfreq = 0; data->max_freq[PPMU_INT] = opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq)); data->min_freq[PPMU_INT] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq)); data->curr_freq[PPMU_INT] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq)); data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int"); if (IS_ERR(data->vdd_reg[PPMU_INT])) { pr_err("failed to get resource %s\n", "vdd_int"); return -ENODEV; } data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif"); if (IS_ERR(data->vdd_reg[PPMU_MIF])) { pr_err("failed to get resource %s\n", "vdd_mif"); regulator_put(data->vdd_reg[PPMU_INT]); return -ENODEV; } data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend; data->busfreq_early_suspend_handler.resume = &busfreq_late_resume; data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend; data->busfreq_early_suspend_handler.resume = &busfreq_late_resume; /* Request min 300MHz for MIF and 150MHz for INT*/ dev_lock(dev, dev, 300150); register_early_suspend(&data->busfreq_early_suspend_handler); tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL); tmp &= ~(0x1f | (1 << 31) | (1 << 7)); tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7)); __raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL); return 0; }
static void exynos5250_monitor(struct busfreq_data *data, struct opp **mif_opp, struct opp **int_opp) { int i; unsigned int cpu_load_average = 0; unsigned int ddr_c_load_average = 0; unsigned int ddr_l_load_average = 0; unsigned int ddr_r1_load_average = 0; unsigned int right0_load_average = 0; unsigned int ddr_load_average; unsigned long cpufreq = 0; unsigned long freq_int_right0 = 0; unsigned long lockfreq[PPMU_TYPE_END]; unsigned long freq[PPMU_TYPE_END]; unsigned long cpu_load; unsigned long ddr_load=0; unsigned long ddr_load_int=0; unsigned long ddr_c_load; unsigned long ddr_r1_load; unsigned long ddr_l_load; unsigned long right0_load; struct opp *opp[PPMU_TYPE_END]; unsigned long newfreq[PPMU_TYPE_END]; ppmu_update(data->dev[PPMU_MIF], 3); /* Convert from base xxx to base maxfreq */ cpu_load = div64_u64(ppmu_load[PPMU_CPU] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); ddr_c_load = div64_u64(ppmu_load[PPMU_DDR_C] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); ddr_r1_load = div64_u64(ppmu_load[PPMU_DDR_R1] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); ddr_l_load = div64_u64(ppmu_load[PPMU_DDR_L] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); right0_load = div64_u64(ppmu_load[PPMU_RIGHT0_BUS] * data->curr_freq[PPMU_INT], data->max_freq[PPMU_INT]); data->load_history[PPMU_CPU][data->index] = cpu_load; data->load_history[PPMU_DDR_C][data->index] = ddr_c_load; data->load_history[PPMU_DDR_R1][data->index] = ddr_r1_load; data->load_history[PPMU_DDR_L][data->index] = ddr_l_load; data->load_history[PPMU_RIGHT0_BUS][data->index++] = right0_load; if (data->index >= LOAD_HISTORY_SIZE) data->index = 0; for (i = 0; i < LOAD_HISTORY_SIZE; i++) { cpu_load_average += data->load_history[PPMU_CPU][i]; ddr_c_load_average += data->load_history[PPMU_DDR_C][i]; ddr_r1_load_average += data->load_history[PPMU_DDR_R1][i]; ddr_l_load_average += data->load_history[PPMU_DDR_L][i]; right0_load_average += data->load_history[PPMU_RIGHT0_BUS][i]; } /* Calculate average Load */ cpu_load_average /= LOAD_HISTORY_SIZE; ddr_c_load_average /= LOAD_HISTORY_SIZE; ddr_r1_load_average /= LOAD_HISTORY_SIZE; ddr_l_load_average /= LOAD_HISTORY_SIZE; right0_load_average /= LOAD_HISTORY_SIZE; if (ddr_c_load >= ddr_l_load) { ddr_load = ddr_c_load; ddr_load_average = ddr_c_load_average; } else { ddr_load = ddr_l_load; ddr_load_average = ddr_l_load_average; } ddr_load_int = ddr_load; //Calculate MIF/INT frequency level if (ddr_r1_load >= MIF_R1_THRESHOLD) { freq[PPMU_MIF] = data->max_freq[PPMU_MIF]; if (right0_load >= INT_RIGHT0_THRESHOLD) { freq[PPMU_INT] = data->max_freq[PPMU_INT]; goto go_max; } else { freq_int_right0 = div64_u64(data->max_freq[PPMU_INT] * right0_load, INT_RIGHT0_THRESHOLD); } } else { // Caculate next MIF frequency if (ddr_load >= MIF_MAX_THRESHOLD) { freq[PPMU_MIF] = data->max_freq[PPMU_MIF]; } else if ( ddr_load < IDLE_THRESHOLD) { if (ddr_load_average < IDLE_THRESHOLD) freq[PPMU_MIF] = step_down(data, PPMU_MIF, 1); else freq[PPMU_MIF] = data->curr_freq[PPMU_MIF]; } else { if (ddr_load < ddr_load_average) { ddr_load = ddr_load_average; if (ddr_load >= MIF_MAX_THRESHOLD) ddr_load = MIF_MAX_THRESHOLD; } freq[PPMU_MIF] = div64_u64(data->max_freq[PPMU_MIF] * ddr_load, MIF_MAX_THRESHOLD); } freq_int_right0 = div64_u64(data->max_freq[PPMU_INT] * right0_load, INT_RIGHT0_THRESHOLD); } // Caculate next INT frequency if (ddr_load_int >= INT_MAX_THRESHOLD) { freq[PPMU_INT] = data->max_freq[PPMU_INT]; } else if ( ddr_load_int < IDLE_THRESHOLD) { if (ddr_load_average < IDLE_THRESHOLD) freq[PPMU_INT] = step_down(data, PPMU_INT, 1); else freq[PPMU_INT] = data->curr_freq[PPMU_INT]; } else { if (ddr_load_int < ddr_load_average) { ddr_load_int = ddr_load_average; if (ddr_load_int >= INT_MAX_THRESHOLD) ddr_load_int = INT_MAX_THRESHOLD; } freq[PPMU_INT] = div64_u64(data->max_freq[PPMU_INT] * ddr_load_int, INT_MAX_THRESHOLD); } freq[PPMU_INT] = max(freq[PPMU_INT], freq_int_right0); if (freq[PPMU_INT] == data->max_freq[PPMU_INT]) freq[PPMU_MIF] = data->max_freq[PPMU_MIF]; go_max: #ifdef BUSFREQ_PROFILE_DEBUG printk(KERN_DEBUG "cpu[%ld] l[%ld] c[%ld] r1[%ld] rt[%ld] m_load[%ld] i_load[%ld]\n", cpu_load, ddr_l_load, ddr_c_load, ddr_r1_load, right0_load, ddr_load, ddr_load_int); #endif lockfreq[PPMU_MIF] = (dev_max_freq(data->dev[PPMU_MIF])/1000)*1000; lockfreq[PPMU_INT] = (dev_max_freq(data->dev[PPMU_MIF])%1000)*1000; #ifdef BUSFREQ_PROFILE_DEBUG printk(KERN_DEBUG "i_cf[%ld] m_cf[%ld] i_nf[%ld] m_nf[%ld] lock_Mfreq[%ld] lock_Ifreq[%ld]\n", data->curr_freq[PPMU_INT],data->curr_freq[PPMU_MIF],freq[PPMU_INT], freq[PPMU_MIF], lockfreq[PPMU_MIF], lockfreq[PPMU_INT]); #endif newfreq[PPMU_MIF] = max(lockfreq[PPMU_MIF], freq[PPMU_MIF]); newfreq[PPMU_INT] = max(lockfreq[PPMU_INT], freq[PPMU_INT]); opp[PPMU_MIF] = opp_find_freq_ceil(data->dev[PPMU_MIF], &newfreq[PPMU_MIF]); opp[PPMU_INT] = opp_find_freq_ceil(data->dev[PPMU_INT], &newfreq[PPMU_INT]); *mif_opp = opp[PPMU_MIF]; *int_opp = opp[PPMU_INT]; }
int exynos5250_init(struct device *dev, struct busfreq_data *data) { unsigned int i; unsigned long maxfreq = ULONG_MAX; unsigned long minfreq = 0; unsigned long cdrexfreq; unsigned long lrbusfreq; struct clk *clk; int ret; /* Enable pause function for DREX2 DVFS */ dmc_pause_ctrl = __raw_readl(EXYNOS5_DMC_PAUSE_CTRL); dmc_pause_ctrl |= DMC_PAUSE_ENABLE; __raw_writel(dmc_pause_ctrl, EXYNOS5_DMC_PAUSE_CTRL); clk = clk_get(NULL, "mout_cdrex"); if (IS_ERR(clk)) { dev_err(dev, "Fail to get mclk_cdrex clock"); ret = PTR_ERR(clk); return ret; } cdrexfreq = clk_get_rate(clk) / 1000; clk_put(clk); clk = clk_get(NULL, "aclk_266"); if (IS_ERR(clk)) { dev_err(dev, "Fail to get aclk_266 clock"); ret = PTR_ERR(clk); return ret; } lrbusfreq = clk_get_rate(clk) / 1000; clk_put(clk); if (cdrexfreq == 800000) { clkdiv_cdrex = clkdiv_cdrex_for800; exynos5_busfreq_table_mif = exynos5_busfreq_table_for800; exynos5_mif_volt = exynos5_mif_volt_for800; } else if (cdrexfreq == 666857) { clkdiv_cdrex = clkdiv_cdrex_for667; exynos5_busfreq_table_mif = exynos5_busfreq_table_for667; exynos5_mif_volt = exynos5_mif_volt_for667; } else if (cdrexfreq == 533000) { clkdiv_cdrex = clkdiv_cdrex_for533; exynos5_busfreq_table_mif = exynos5_busfreq_table_for533; exynos5_mif_volt = exynos5_mif_volt_for533; } else if (cdrexfreq == 400000) { clkdiv_cdrex = clkdiv_cdrex_for400; exynos5_busfreq_table_mif = exynos5_busfreq_table_for400; exynos5_mif_volt = exynos5_mif_volt_for400; } else { dev_err(dev, "Don't support cdrex table\n"); return -EINVAL; } exynos5250_set_bus_volt(); data->dev[PPMU_MIF] = dev; data->dev[PPMU_INT] = &busfreq_for_int; for (i = LV_0; i < LV_MIF_END; i++) { ret = opp_add(data->dev[PPMU_MIF], exynos5_busfreq_table_mif[i].mem_clk, exynos5_busfreq_table_mif[i].volt); if (ret) { dev_err(dev, "Fail to add opp entries.\n"); return ret; } } opp_disable(data->dev[PPMU_MIF], 107000); for (i = LV_0; i < LV_INT_END; i++) { ret = opp_add(data->dev[PPMU_INT], exynos5_busfreq_table_int[i].mem_clk, exynos5_busfreq_table_int[i].volt); if (ret) { dev_err(dev, "Fail to add opp entries.\n"); return ret; } } data->target = exynos5250_target; data->get_table_index = exynos5250_get_table_index; data->monitor = exynos5250_monitor; data->busfreq_suspend = exynos5250_suspend; data->busfreq_resume = exynos5250_resume; data->sampling_rate = usecs_to_jiffies(100000); data->table[PPMU_MIF] = exynos5_busfreq_table_mif; data->table[PPMU_INT] = exynos5_busfreq_table_int; /* Find max frequency for mif */ data->max_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_floor(data->dev[PPMU_MIF], &maxfreq)); data->min_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &minfreq)); data->curr_freq[PPMU_MIF] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_MIF], &cdrexfreq)); /* Find max frequency for int */ maxfreq = ULONG_MAX; minfreq = 0; data->max_freq[PPMU_INT] = opp_get_freq(opp_find_freq_floor(data->dev[PPMU_INT], &maxfreq)); data->min_freq[PPMU_INT] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &minfreq)); data->curr_freq[PPMU_INT] = opp_get_freq(opp_find_freq_ceil(data->dev[PPMU_INT], &lrbusfreq)); data->vdd_reg[PPMU_INT] = regulator_get(NULL, "vdd_int"); if (IS_ERR(data->vdd_reg[PPMU_INT])) { pr_err("failed to get resource %s\n", "vdd_int"); return -ENODEV; } data->vdd_reg[PPMU_MIF] = regulator_get(NULL, "vdd_mif"); if (IS_ERR(data->vdd_reg[PPMU_MIF])) { pr_err("failed to get resource %s\n", "vdd_mif"); regulator_put(data->vdd_reg[PPMU_INT]); return -ENODEV; } data->busfreq_early_suspend_handler.suspend = &busfreq_early_suspend; data->busfreq_early_suspend_handler.resume = &busfreq_late_resume; /* Request min 300MHz */ dev_lock(dev, dev, 300000); register_early_suspend(&data->busfreq_early_suspend_handler); tmp = __raw_readl(EXYNOS5_ABBG_INT_CONTROL); tmp &= ~(0x1f | (1 << 31) | (1 << 7)); tmp |= ((8 + INT_RBB) | (1 << 31) | (1 << 7)); __raw_writel(tmp, EXYNOS5_ABBG_INT_CONTROL); return 0; }
static void exynos5250_monitor(struct busfreq_data *data, struct opp **mif_opp, struct opp **int_opp) { int i; unsigned int cpu_load_average = 0; unsigned int dmc_c_load_average = 0; unsigned int dmc_l_load_average = 0; unsigned int dmc_r1_load_average = 0; unsigned int dmc_load_average; unsigned long cpufreq = 0; unsigned long lockfreq; unsigned long dmcfreq; unsigned long cpu_load; unsigned long dmc_load; unsigned long dmc_c_load; unsigned long dmc_r1_load; unsigned long dmc_l_load; struct opp *opp[PPMU_TYPE_END]; unsigned long newfreq[PPMU_TYPE_END]; ppmu_update(data->dev[PPMU_MIF], 3); /* Convert from base xxx to base maxfreq */ cpu_load = div64_u64(ppmu_load[PPMU_CPU] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); dmc_c_load = div64_u64(ppmu_load[PPMU_DDR_C] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); dmc_r1_load = div64_u64(ppmu_load[PPMU_DDR_R1] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); dmc_l_load = div64_u64(ppmu_load[PPMU_DDR_L] * data->curr_freq[PPMU_MIF], data->max_freq[PPMU_MIF]); data->load_history[PPMU_CPU][data->index] = cpu_load; data->load_history[PPMU_DDR_C][data->index] = dmc_c_load; data->load_history[PPMU_DDR_R1][data->index] = dmc_r1_load; data->load_history[PPMU_DDR_L][data->index++] = dmc_l_load; if (data->index >= LOAD_HISTORY_SIZE) data->index = 0; for (i = 0; i < LOAD_HISTORY_SIZE; i++) { cpu_load_average += data->load_history[PPMU_CPU][i]; dmc_c_load_average += data->load_history[PPMU_DDR_C][i]; dmc_r1_load_average += data->load_history[PPMU_DDR_R1][i]; dmc_l_load_average += data->load_history[PPMU_DDR_L][i]; } /* Calculate average Load */ cpu_load_average /= LOAD_HISTORY_SIZE; dmc_c_load_average /= LOAD_HISTORY_SIZE; dmc_r1_load_average /= LOAD_HISTORY_SIZE; dmc_l_load_average /= LOAD_HISTORY_SIZE; if (dmc_c_load >= dmc_r1_load) { dmc_load = dmc_c_load; dmc_load_average = dmc_c_load_average; } else { dmc_load = dmc_r1_load; dmc_load_average = dmc_r1_load_average; } if (dmc_l_load >= dmc_load) { dmc_load = dmc_l_load; dmc_load_average = dmc_l_load_average; } if (dmc_load >= DMC_MAX_THRESHOLD) { dmcfreq = data->max_freq[PPMU_MIF]; } else if (dmc_load < IDLE_THRESHOLD) { if (dmc_load_average < IDLE_THRESHOLD) dmcfreq = step_down(data, PPMU_MIF, 1); else dmcfreq = data->curr_freq[PPMU_MIF]; } else { if (dmc_load < dmc_load_average) { dmc_load = dmc_load_average; if (dmc_load >= DMC_MAX_THRESHOLD) dmc_load = DMC_MAX_THRESHOLD; } dmcfreq = div64_u64(data->max_freq[PPMU_MIF] * dmc_load, DMC_MAX_THRESHOLD); } lockfreq = dev_max_freq(data->dev[PPMU_MIF]); newfreq[PPMU_MIF] = max3(lockfreq, dmcfreq, cpufreq); opp[PPMU_MIF] = opp_find_freq_ceil(data->dev[PPMU_MIF], &newfreq[PPMU_MIF]); opp[PPMU_INT] = opp_find_freq_ceil(data->dev[PPMU_INT], &data->max_freq[PPMU_INT]); *mif_opp = opp[PPMU_MIF]; /* temporary */ *int_opp = opp[PPMU_INT]; }
PVRSRV_ERROR SysDvfsInitialize(SYS_SPECIFIC_DATA *psSysSpecificData) { #if !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) PVR_UNREFERENCED_PARAMETER(psSysSpecificData); #else /* !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ IMG_UINT32 i, *freq_list; IMG_INT32 opp_count; unsigned long freq; struct opp *opp; /* * We query and store the list of SGX frequencies just this once under the * assumption that they are unchanging, e.g. no disabling of high frequency * option for thermal management. This is currently valid for 4430 and 4460. */ rcu_read_lock(); opp_count = opp_get_opp_count(&gpsPVRLDMDev->dev); if (opp_count < 1) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp count")); return PVRSRV_ERROR_NOT_SUPPORTED; } /* * Allocate the frequency list with a slot for each available frequency plus * one additional slot to hold a designated frequency value to assume when in * an unknown frequency state. */ freq_list = kmalloc((opp_count + 1) * sizeof(IMG_UINT32), GFP_ATOMIC); if (!freq_list) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not allocate frequency list")); return PVRSRV_ERROR_OUT_OF_MEMORY; } /* * Fill in frequency list from lowest to highest then finally the "unknown" * frequency value. We use the highest available frequency as our assumed value * when in an unknown state, because it is safer for APM and hardware recovery * timers to be longer than intended rather than shorter. */ freq = 0; for (i = 0; i < opp_count; i++) { opp = opp_find_freq_ceil(&gpsPVRLDMDev->dev, &freq); if (IS_ERR_OR_NULL(opp)) { rcu_read_unlock(); PVR_DPF((PVR_DBG_ERROR, "SysDvfsInitialize: Could not retrieve opp level %d", i)); kfree(freq_list); return PVRSRV_ERROR_NOT_SUPPORTED; } freq_list[i] = (IMG_UINT32)freq; freq++; } rcu_read_unlock(); freq_list[opp_count] = freq_list[opp_count - 1]; psSysSpecificData->ui32SGXFreqListSize = opp_count + 1; psSysSpecificData->pui32SGXFreqList = freq_list; /* Start in unknown state - no frequency request to DVFS yet made */ psSysSpecificData->ui32SGXFreqListIndex = opp_count; #endif /* !defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) */ return PVRSRV_OK; }
/* * This API is to be called during init to set the various voltage * domains to the voltage as per the opp table. Typically we boot up * at the nominal voltage. So this function finds out the rate of * the clock associated with the voltage domain, finds out the correct * opp entry and sets the voltage domain to the voltage specified * in the opp entry */ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, const char *oh_name) { struct voltagedomain *voltdm; struct clk *clk; struct opp *opp; struct device *dev; unsigned long freq_cur, freq_valid, bootup_volt; int ret = -EINVAL; dev = omap_device_get_by_hwmod_name(oh_name); if (IS_ERR(dev)) { pr_err("%s: Unable to get dev pointer for hwmod %s\n", __func__, oh_name); goto exit; } voltdm = voltdm_lookup(vdd_name); if (IS_ERR(voltdm)) { pr_err("%s: unable to get vdd pointer for vdd_%s\n", __func__, vdd_name); goto exit; } clk = clk_get(NULL, clk_name); if (IS_ERR(clk)) { pr_err("%s: unable to get clk %s\n", __func__, clk_name); goto exit; } freq_cur = clk->rate; freq_valid = freq_cur; rcu_read_lock(); opp = opp_find_freq_ceil(dev, &freq_valid); if (IS_ERR(opp)) { opp = opp_find_freq_floor(dev, &freq_valid); if (IS_ERR(opp)) { rcu_read_unlock(); pr_err("%s: no boot OPP match for %ld on vdd_%s\n", __func__, freq_cur, vdd_name); ret = -ENOENT; goto exit_ck; } } bootup_volt = opp_get_voltage(opp); rcu_read_unlock(); if (!bootup_volt) { pr_err("%s: unable to find voltage corresponding " "to the bootup OPP for vdd_%s\n", __func__, vdd_name); ret = -ENOENT; goto exit_ck; } /* * Frequency and Voltage have to be sequenced: if we move from * a lower frequency to higher frequency, raise voltage, followed by * frequency, and vice versa. we assume that the voltage at boot * is the required voltage for the frequency it was set for. * NOTE: * we can check the frequency, but there is numerous ways to set * voltage. We play the safe path and just set the voltage. */ if (freq_cur < freq_valid) { ret = voltdm_scale(voltdm, bootup_volt); if (ret) { pr_err("%s: Fail set voltage-%s(f=%ld v=%ld)on vdd%s\n", __func__, vdd_name, freq_valid, bootup_volt, vdd_name); goto exit_ck; } } /* Set freq only if there is a difference in freq */ if (freq_valid != freq_cur) { ret = clk_set_rate(clk, freq_valid); if (ret) { pr_err("%s: Fail set clk-%s(f=%ld v=%ld)on vdd%s\n", __func__, clk_name, freq_valid, bootup_volt, vdd_name); goto exit_ck; } } if (freq_cur >= freq_valid) { ret = voltdm_scale(voltdm, bootup_volt); if (ret) { pr_err("%s: Fail set voltage-%s(f=%ld v=%ld)on vdd%s\n", __func__, clk_name, freq_valid, bootup_volt, vdd_name); goto exit_ck; } } ret = 0; exit_ck: clk_put(clk); if (!ret) return 0; exit: pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name); return -EINVAL; }
/** * omap_device_set_rate - Set a new rate at which the device is to operate * @req_dev : pointer to the device requesting the scaling. * @dev : pointer to the device that is to be scaled * @rate : the rnew rate for the device. * * This API gets the device opp table associated with this device and * tries putting the device to the requested rate and the voltage domain * associated with the device to the voltage corresponding to the * requested rate. Since multiple devices can be assocciated with a * voltage domain this API finds out the possible voltage the * voltage domain can enter and then decides on the final device * rate. Return 0 on success else the error value */ int omap_device_set_rate(struct device *req_dev, struct device *dev, unsigned long rate) { struct omap_opp *opp; unsigned long volt, freq, min_freq, max_freq, flags; struct voltagedomain *voltdm; struct platform_device *pdev; struct omap_device *od; int ret; pdev = container_of(dev, struct platform_device, dev); od = _find_by_pdev(pdev); /* if in low power DPLL cascading mode, bail out early */ if (cpu_is_omap44xx()) { read_lock_irqsave(&dpll_cascading_lock, flags); if (in_dpll_cascading) { ret = -EINVAL; goto out; } } /* * Figure out if the desired frquency lies between the * maximum and minimum possible for the particular device */ min_freq = 0; if (IS_ERR(opp_find_freq_ceil(dev, &min_freq))) { dev_err(dev, "%s: Unable to find lowest opp\n", __func__); ret = -ENODEV; goto out; } max_freq = ULONG_MAX; if (IS_ERR(opp_find_freq_floor(dev, &max_freq))) { dev_err(dev, "%s: Unable to find highest opp\n", __func__); ret = -ENODEV; goto out; } if (rate < min_freq) freq = min_freq; else if (rate > max_freq) freq = max_freq; else freq = rate; /* Get the possible rate from the opp layer */ opp = opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { dev_dbg(dev, "%s: Unable to find OPP for freq%ld\n", __func__, rate); ret = -ENODEV; goto out; } if (unlikely(freq != rate)) dev_dbg(dev, "%s: Available freq %ld != dpll freq %ld.\n", __func__, freq, rate); /* Get the voltage corresponding to the requested frequency */ volt = opp_get_voltage(opp); /* * Call into the voltage layer to get the final voltage possible * for the voltage domain associated with the device. */ voltdm = od->hwmods[0]->voltdm; ret = omap_voltage_add_userreq(voltdm, req_dev, &volt); if (ret) { dev_err(dev, "%s: Unable to get the final volt for scaling\n", __func__); goto out; } /* Do the actual scaling */ ret = omap_voltage_scale(voltdm); out: if (cpu_is_omap44xx()) read_unlock_irqrestore(&dpll_cascading_lock, flags); return ret; }
/** * omap_device_scale() - Set a new rate at which the device is to operate * @req_dev: pointer to the device requesting the scaling. * @dev: pointer to the device that is to be scaled * @rate: the rnew rate for the device. * * This API gets the device opp table associated with this device and * tries putting the device to the requested rate and the voltage domain * associated with the device to the voltage corresponding to the * requested rate. Since multiple devices can be assocciated with a * voltage domain this API finds out the possible voltage the * voltage domain can enter and then decides on the final device * rate. Return 0 on success else the error value */ int omap_device_scale(struct device *req_dev, struct device *dev, unsigned long rate) { struct opp *opp; unsigned long volt, freq, min_freq, max_freq; struct voltagedomain *voltdm; struct platform_device *pdev; struct omap_device *od; int ret; pdev = container_of(dev, struct platform_device, dev); od = _find_by_pdev(pdev); /* * Figure out if the desired frquency lies between the * maximum and minimum possible for the particular device */ min_freq = 0; if (IS_ERR(opp_find_freq_ceil(dev, &min_freq))) { dev_err(dev, "%s: Unable to find lowest opp\n", __func__); return -ENODEV; } max_freq = ULONG_MAX; if (IS_ERR(opp_find_freq_floor(dev, &max_freq))) { dev_err(dev, "%s: Unable to find highest opp\n", __func__); return -ENODEV; } if (rate < min_freq) freq = min_freq; else if (rate > max_freq) freq = max_freq; else freq = rate; opp = opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { dev_err(dev, "%s: Unable to find OPP for freq%ld\n", __func__, rate); return -ENODEV; } /* Get the voltage corresponding to the requested frequency */ volt = opp_get_voltage(opp); /* * Call into the voltage layer to get the final voltage possible * for the voltage domain associated with the device. */ voltdm = od->hwmods[0]->voltdm; ret = omap_voltage_add_request(voltdm, req_dev, &volt); if (ret) { dev_err(dev, "%s: Unable to get the final volt for scaling\n", __func__); return ret; } /* Do the actual scaling */ return omap_voltage_scale(voltdm, volt); }