static int sec_gpu_clock_enable(void) { int err = 0; /* adonis must be set parent function after runtime pm resume */ err = gpu_clock_set_parent(); if (err) { return err; } /* if setting wakeup lock clock, resume clock using that*/ /* if different with current clock and default cleck, need to set clock*/ if (gpu_clock_get() != sec_gpu_setting_clock) gpu_clock_set(sec_gpu_setting_clock); if (gpu_voltage_get() != sec_gpu_setting_voltage) gpu_voltage_set(sec_gpu_setting_voltage); if (sec_wakeup_lock_state) { if (gpu_voltage_get() < WAKEUP_LOCK_VOLTAGE + gpu_voltage_marin) gpu_voltage_set(WAKEUP_LOCK_VOLTAGE + gpu_voltage_marin); if (gpu_clock_get() < WAKEUP_LOCK_CLOCK) gpu_clock_set(WAKEUP_LOCK_CLOCK); } err = gpu_clock_enable(); if (err) { return err; } /* wait for more than 10 clocks to proper reset SGX core */ OSWaitus(1); return err; }
void sec_gpu_dvfs_handler(int utilization_value) { /*utilization_value is zero mean is gpu going to idle*/ if (utilization_value == 0) return; sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: current clock: %d MHz not found in DVFS table. so set to max clock", gpu_clock_get())); sec_gpu_vol_clk_change(gdata[BASE_START_LEVEL].clock, gdata[BASE_START_LEVEL].voltage); return; } PVR_DPF((PVR_DBG_MESSAGE, "INFO: AUTO DVFS [%d MHz] <%d, %d>, utilization [%d]", gpu_clock_get(), gdata[sgx_dvfs_level].min_threadhold, gdata[sgx_dvfs_level].max_threadhold, utilization_value)); /* check current level's threadhold value */ if (gdata[sgx_dvfs_level].min_threadhold > utilization_value) { /* need to down current clock */ sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_DOWN_STEP_LEVEL); } else if (gdata[sgx_dvfs_level].max_threadhold < utilization_value) { /* need to up current clock */ sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_UP_STEP_LEVEL); } else sgx_dvfs_down_requirement = gdata[sgx_dvfs_level].stay_total_count; g_g3dfreq = gdata[sgx_dvfs_level].clock; }
void sec_gpu_dvfs_down_requirement_reset() { int level; level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); sgx_dvfs_down_requirement = gdata[level].stay_total_count; }
/* gpu clock setting*/ void sec_gpu_vol_clk_change(int sgx_clock, int sgx_voltage) { int cur_sgx_clock; mutex_lock(&lock); cur_sgx_clock = gpu_clock_get(); sgx_voltage += gpu_voltage_marin; #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) if (sec_gpu_power_on) { if (sgx_clock >= sec_gpu_top_clock) { #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ pm_qos_update_request(&exynos5_g3d_cpu_qos, 600000); #else pm_qos_update_request(&exynos5_g3d_cpu_qos, 800000); #endif } if (sgx_clock < MIF_THRESHHOLD_VALUE_CLK) pm_qos_update_request(&exynos5_g3d_mif_qos, 267000); else pm_qos_update_request(&exynos5_g3d_mif_qos, 800000); } else { pm_qos_update_request(&exynos5_g3d_cpu_qos, 0); pm_qos_update_request(&exynos5_g3d_int_qos, 0); pm_qos_update_request(&exynos5_g3d_mif_qos, 0); } #endif if (sec_gpu_power_on) { if (cur_sgx_clock > sgx_clock) { gpu_clock_set(sgx_clock); gpu_voltage_set(sgx_voltage); } else if (cur_sgx_clock < sgx_clock) { gpu_voltage_set(sgx_voltage); gpu_clock_set(sgx_clock); } sec_gpu_setting_clock = gpu_clock_get(); sec_gpu_setting_voltage = gpu_voltage_get(); } else { sec_gpu_setting_clock = sgx_clock; sec_gpu_setting_voltage = sgx_voltage; // PVR_LOG(("SGX keep DVFS info sgx_clock:%d MHz, sgx_voltage:%d mV ", sgx_clock, sgx_voltage)); } mutex_unlock(&lock); }
void sec_gpu_dvfs_down_requirement_reset() { int level; level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); if (level >= 0) sgx_dvfs_down_requirement = g_gpu_dvfs_data[level].stay_total_count; else sgx_dvfs_down_requirement = DOWN_REQUIREMENT_THRESHOLD; }
void sec_gpu_dvfs_init(void) { struct platform_device *pdev; int i = 0; ssize_t total = 0, offset = 0; memset(gdata, 0x00, sizeof(struct gpu_dvfs_data)*MAX_DVFS_LEVEL); for (i = 0; i < GPU_DVFS_MAX_LEVEL; i++) { gdata[i].level = default_dvfs_data[i].level; gdata[i].clock = default_dvfs_data[i].clock; gdata[i].voltage = get_match_volt(ID_G3D, default_dvfs_data[i].clock * 1000); gdata[i].clock_source = default_dvfs_data[i].clock_source; gdata[i].min_threadhold = default_dvfs_data[i].min_threadhold; gdata[i].max_threadhold = default_dvfs_data[i].max_threadhold; gdata[i].quick_down_threadhold = default_dvfs_data[i].quick_down_threadhold; gdata[i].quick_up_threadhold = default_dvfs_data[i].quick_up_threadhold; gdata[i].stay_total_count = default_dvfs_data[i].stay_total_count; gdata[i].mask = setmask(default_dvfs_data[i].level, default_dvfs_data[i].clock); PVR_LOG(("G3D DVFS Info: Level:%d, Clock:%d MHz, Voltage:%d uV", gdata[i].level, gdata[i].clock, gdata[i].voltage)); } /* default dvfs level depend on default clock setting */ sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); sgx_dvfs_down_requirement = DOWN_REQUIREMENT_THRESHOLD; pdev = gpsPVRLDMDev; /* Required name attribute */ if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_min_lock) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_min_lock fail")); if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_max_lock) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_max_lock fail")); if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_volt_table) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_volt_table fail")); /* Generate DVFS table list*/ for( i = 0; i < GPU_DVFS_MAX_LEVEL ; i++) { offset = sprintf(sgx_dvfs_table_string+total, "%d\n", gdata[i].clock); total += offset; } sgx_dvfs_table = sgx_dvfs_table_string; if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_table) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_table fail")); }
static int sec_gpu_lock_control_proc(int bmax, long value, size_t count) { int lock_level = sec_gpu_dvfs_level_from_clk_get(value); int retval = -EINVAL; sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); if (lock_level < 0) { /* unlock something */ if (bmax) sgx_dvfs_max_lock = custom_max_lock_level = 0; else sgx_dvfs_min_lock = custom_min_lock_level = 0; if (sgx_dvfs_min_lock && (sgx_dvfs_level > custom_min_lock_level)) /* min lock only - likely */ sec_gpu_vol_clk_change(gdata[custom_min_lock_level].clock, gdata[custom_min_lock_level].voltage); else if (sgx_dvfs_max_lock && (sgx_dvfs_level < custom_max_lock_level)) /* max lock only - unlikely */ sec_gpu_vol_clk_change(gdata[custom_max_lock_level].clock, gdata[custom_max_lock_level].voltage); if (value == 0) retval = count; } else{ /* lock something */ if (bmax) { sgx_dvfs_max_lock = value; custom_max_lock_level = lock_level; } else { sgx_dvfs_min_lock = value; custom_min_lock_level = lock_level; } if ((sgx_dvfs_max_lock) && (sgx_dvfs_min_lock) && (sgx_dvfs_max_lock < sgx_dvfs_min_lock)){ /* abnormal status */ if (sgx_dvfs_max_lock) /* max lock */ sec_gpu_vol_clk_change(gdata[custom_max_lock_level].clock, gdata[custom_max_lock_level].voltage); } else { /* normal status */ if ((bmax) && sgx_dvfs_max_lock && (sgx_dvfs_level < custom_max_lock_level)) /* max lock */ sec_gpu_vol_clk_change(gdata[custom_max_lock_level].clock, gdata[custom_max_lock_level].voltage); if ((!bmax) && sgx_dvfs_min_lock && (sgx_dvfs_level > custom_min_lock_level)) /* min lock */ sec_gpu_vol_clk_change(gdata[custom_min_lock_level].clock, gdata[custom_min_lock_level].voltage); } retval = count; } return retval; }
void sec_gpu_dvfs_handler(int utilization_value) { if (custom_threshold_change) sec_custom_threshold_set(); /*utilization_value is zero mean is gpu going to idle*/ if (utilization_value == 0) return; #ifdef CONFIG_ASV_MARGIN_TEST sgx_dvfs_custom_clock = set_g3d_freq; #endif /* this check for custom dvfs setting - 0:auto, others: custom lock clock*/ if (sgx_dvfs_custom_clock) { if (sgx_dvfs_custom_clock != gpu_clock_get()) { sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(sgx_dvfs_custom_clock); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: custom clock: %d MHz not found in DVFS table", sgx_dvfs_custom_clock)); return; } if (sgx_dvfs_level < MAX_DVFS_LEVEL && sgx_dvfs_level >= 0) { sec_gpu_vol_clk_change(g_gpu_dvfs_data[sgx_dvfs_level].clock, g_gpu_dvfs_data[sgx_dvfs_level].voltage); PVR_LOG(("INFO: CUSTOM DVFS [%d MHz] (%d, %d), utilization [%d] -(%d MHz)", gpu_clock_get(), g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold, g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold, utilization_value, sgx_dvfs_custom_clock )); } else { PVR_LOG(("INFO: CUSTOM DVFS [%d MHz] invalid clock - restore auto mode", sgx_dvfs_custom_clock)); sgx_dvfs_custom_clock = 0; } } } else { sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: current clock: %d MHz not found in DVFS table. so set to max clock", gpu_clock_get())); sec_gpu_vol_clk_change(g_gpu_dvfs_data[BASE_START_LEVEL].clock, g_gpu_dvfs_data[BASE_START_LEVEL].voltage); return; } PVR_DPF((PVR_DBG_MESSAGE, "INFO: AUTO DVFS [%d MHz] <%d, %d>, utilization [%d]", gpu_clock_get(), g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold, g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold, utilization_value)); /* check current level's threadhold value */ if (g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold > utilization_value) { #if defined(USING_BOOST_DOWN_MODE) /* check need Quick up/down change */ if (g_gpu_dvfs_data[sgx_dvfs_level].quick_down_threadhold >= utilization_value) sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_QUICK_DOWN_LEVEL); else #endif /* need to down current clock */ sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_DWON_STEP_LEVEL); } else if (g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold < utilization_value) { #if defined(USING_BOOST_UP_MODE) if (g_gpu_dvfs_data[sgx_dvfs_level].quick_up_threadhold <= utilization_value) sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_QUICK_UP_LEVEL); else #endif /* need to up current clock */ sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_UP_STEP_LEVEL); } else sgx_dvfs_down_requirement = g_gpu_dvfs_data[sgx_dvfs_level].stay_total_count; } g_g3dfreq = g_gpu_dvfs_data[sgx_dvfs_level].clock; }