GED_ERROR ged_dvfs_system_init() { mutex_init(&gsDVFSLock); mutex_init(&gsVSyncOffsetLock); // initial as locked, signal when vsync_sw_notify g_iSkipCount = MTK_DEFER_DVFS_WORK_MS / MTK_DVFS_SWITCH_INTERVAL_MS; g_ulvsync_period = get_ns_period_from_fps(60); #ifdef GED_DVFS_ENABLE gpu_dvfs_enable = 1; #else gpu_dvfs_enable = 0; #endif g_dvfs_skip_round = 0; #ifdef GED_DVFS_ENABLE g_bottom_freq_id = mt_gpufreq_get_dvfs_table_num() - 1; gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id); g_cust_boost_freq_id = mt_gpufreq_get_dvfs_table_num() - 1; gpu_cust_boost_freq = mt_gpufreq_get_freq_by_idx(g_cust_boost_freq_id); g_cust_upbound_freq_id = 0; gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id); // GPU HAL fp mount //mt_gpufreq_input_boost_notify_registerCB(ged_dvfs_freq_input_boostCB); // MTKFreqInputBoostCB mt_gpufreq_power_limit_notify_registerCB(ged_dvfs_freq_thermal_limitCB); // MTKFreqPowerLimitCB mtk_boost_gpu_freq_fp = ged_dvfs_boost_gpu_freq; mtk_set_bottom_gpu_freq_fp = ged_dvfs_set_bottom_gpu_freq; mtk_get_bottom_gpu_freq_fp = ged_dvfs_get_bottom_gpu_freq; mtk_custom_get_gpu_freq_level_count_fp = ged_dvfs_get_gpu_freq_level_count; mtk_custom_boost_gpu_freq_fp = ged_dvfs_custom_boost_gpu_freq; mtk_custom_upbound_gpu_freq_fp = ged_dvfs_custom_ceiling_gpu_freq; mtk_get_custom_boost_gpu_freq_fp = ged_dvfs_get_custom_boost_gpu_freq; mtk_get_custom_upbound_gpu_freq_fp = ged_dvfs_get_custom_ceiling_gpu_freq; mtk_get_gpu_loading_fp = ged_dvfs_get_gpu_loading; mtk_get_gpu_block_fp = ged_dvfs_get_gpu_blocking; mtk_get_gpu_idle_fp = ged_dvfs_get_gpu_idle; mtk_do_gpu_dvfs_fp = ged_dvfs_run; mtk_gpu_dvfs_set_mode_fp = ged_dvfs_set_tuning_mode_wrap; #endif return GED_OK; }
void _mtk_gpu_dvfs_init(void) { int i; unsigned int iCurrentFreqCount; printk(KERN_EMERG "[MALI] _mtk_gpu_dvfs_init\n"); iCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get curent platform index for(i=0 ; i<MTK_MT6735_GPU_LIMIT_COUNT ; i++) { if(iCurrentFreqCount == mt6735_gpu_freq_limit_data[i].actual_freq_index_count) { g_current_gpu_platform_id = i; break; } } // init g_custom_gpu_boost_id and g_ged_gpu_boost_id as 0 mtk_kbase_custom_boost_gpu_freq(0); mtk_kbase_ged_bottom_gpu_freq(0); g_power_off_gpu_freq_idx = 0;//mt6735_gpu_freq_limit_data[g_current_gpu_platform_id].virtual_freq_index_count-1; }
static unsigned int ged_dvfs_get_custom_ceiling_gpu_freq(void) { unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; return ui32MaxLevel - g_cust_upbound_freq_id; }
static void ged_dvfs_custom_ceiling_gpu_freq(unsigned int ui32FreqLevel) { unsigned int ui32MaxLevel; if (gpu_debug_enable) { GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel); } ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; if (ui32MaxLevel < ui32FreqLevel) { ui32FreqLevel = ui32MaxLevel; } mutex_lock(&gsDVFSLock); // 0 => The highest frequency // table_num - 1 => The lowest frequency g_cust_upbound_freq_id = ui32MaxLevel - ui32FreqLevel; gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id); if (g_cust_upbound_freq_id > mt_gpufreq_get_cur_freq_index()) { ged_dvfs_gpu_freq_commit(g_cust_upbound_freq_id, GED_DVFS_CUSTOM_CEIL_COMMIT); } mutex_unlock(&gsDVFSLock); }
static void ged_dvfs_set_bottom_gpu_freq(unsigned int ui32FreqLevel) { unsigned int ui32MaxLevel; if (gpu_debug_enable) { GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel); } ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; if (ui32MaxLevel < ui32FreqLevel) { ui32FreqLevel = ui32MaxLevel; } mutex_lock(&gsDVFSLock); // 0 => The highest frequency // table_num - 1 => The lowest frequency g_bottom_freq_id = ui32MaxLevel - ui32FreqLevel; gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id); //if current id is larger, ie lower freq, we need to reflect immedately if(g_bottom_freq_id < mt_gpufreq_get_cur_freq_index()) ged_dvfs_gpu_freq_commit(g_bottom_freq_id, GED_DVFS_SET_BOTTOM_COMMIT); mutex_unlock(&gsDVFSLock); }
unsigned long ged_query_info( GED_INFO eType) { switch(eType) { #ifdef GED_DVFS_ENABLE case GED_LOADING: return gpu_loading; case GED_IDLE: return gpu_idle; case GED_BLOCKING: return gpu_block; case GED_PRE_FREQ: return mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID); case GED_PRE_FREQ_IDX: return g_ui32PreFreqID; case GED_CUR_FREQ: return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_cur_freq_index()); case GED_CUR_FREQ_IDX: return mt_gpufreq_get_cur_freq_index(); case GED_MAX_FREQ_IDX: return mt_gpufreq_get_dvfs_table_num()-1; case GED_MAX_FREQ_IDX_FREQ: return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_dvfs_table_num()-1); case GED_MIN_FREQ_IDX: return 0; case GED_MIN_FREQ_IDX_FREQ: return mt_gpufreq_get_freq_by_idx(0); case GED_3D_FENCE_DONE_TIME: return ged_monitor_3D_fence_done_time(); case GED_VSYNC_OFFSET: return ged_dvfs_vsync_offset_level_get(); case GED_EVENT_STATUS: return g_ui32EventStatus; case GED_EVENT_DEBUG_STATUS: return g_ui32EventDebugStatus; case GED_SRV_SUICIDE: ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT); return g_probe_pid; case GED_PRE_HALF_PERIOD: return g_ulWorkingPeriod_us; case GED_LATEST_START: return g_ulPreCalResetTS_us; #endif default: return 0; } }
static unsigned int ged_dvfs_get_bottom_gpu_freq(void) { unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; return ui32MaxLevel - g_bottom_freq_id; }
unsigned int ged_dvfs_get_custom_boost_gpu_freq(void) { #ifdef GED_DVFS_ENABLE unsigned int ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; #else unsigned int ui32MaxLevel = 0; #endif return ui32MaxLevel - g_cust_boost_freq_id; }
static unsigned int _mtk_gpu_dvfs_index_to_frequency(int iFreq) { unsigned int iCurrentFreqCount; iCurrentFreqCount =mt_gpufreq_get_dvfs_table_num(); if(iCurrentFreqCount == 6) // MT6752 { switch(iFreq) { case 0: return 7280000; case 1: return 6500000; case 2: return 5980000; case 3: return 5200000; case 4: return 4160000; case 5: return 3120000; } } else if(iCurrentFreqCount == 4) // MT6752M, 6751, 6733 { switch(iFreq) { case 0: return 5980000; case 1: return 5200000; case 2: return 4160000; case 3: return 3120000; } } else if(iCurrentFreqCount == 3) // MT6732, 6732M { switch(iFreq) { case 0: return 4940000; case 1: return 4160000; case 2: return 3120000; } } }
/* MTK set boost. 0 is the lowest frequency index. The function is used for GED boost currently.*/ static void mtk_ged_hal_callback(unsigned int level) { unsigned int total; unsigned int targetID; total = mt_gpufreq_get_dvfs_table_num(); if (level >= total) { level = total - 1; } targetID = total - level - 1; mtk_set_ged_hal_frequency_id(targetID); MALI_DEBUG_PRINT(4, ("[MALI] mtk_ged_hal_callback() level=%d, boost ID=%d", level, targetID)); if (targetID != mt_gpufreq_get_cur_freq_index()) { mt_gpufreq_target(targetID); } }
/* this function will be called periodically with sampling period 200ms~1000ms */ void mali_pmm_utilization_handler(struct mali_gpu_utilization_data *data) { int utilization; mali_dvfs_action action; int frequency; int duration; int currentID; int targetID; int deferred; mali_utilization = data->utilization_gpu; if (0 == atomic_read(&g_is_power_enabled)) { MALI_DEBUG_PRINT(4, ("GPU clock is in off state\n")); return; } utilization = (mali_utilization * 100) / 256; MALI_DEBUG_PRINT(4, ("%s GPU utilization=%d\n", __FUNCTION__, utilization)); if (utilization <= mtk_get_dvfs_threshold_min()) { action = MALI_DVFS_CLOCK_DOWN; } else if (utilization >= mtk_get_dvfs_threshold_max()) { action = MALI_DVFS_CLOCK_UP; } else { MALI_DEBUG_PRINT(4, ("No need to adjust GPU frequency!\n")); return; } // Get current frequency id currentID = mt_gpufreq_get_cur_freq_index(); // Get current deferred count deferred = mtk_get_current_deferred_count(); switch(action) { case MALI_DVFS_CLOCK_UP: frequency = mapIndexToFrequency(currentID) * utilization / mtk_get_dvfs_threshold_min(); targetID = mapFrequencyToIndex(frequency); deferred += 1; break; case MALI_DVFS_CLOCK_DOWN: frequency = mapIndexToFrequency(currentID) * utilization / mtk_get_dvfs_threshold_max(); targetID = mapFrequencyToIndex(frequency); deferred += 1; break; default: MALI_DEBUG_PRINT(4, ("Unknown GPU DVFS operation!\n")); return; } // Thermal power limit if (targetID < mt_gpufreq_get_thermal_limit_index()) { targetID = mt_gpufreq_get_thermal_limit_index(); } duration = mtk_get_input_boost_duration(); if((duration > 0) && (mtk_get_input_boost_enabled() != 0)) { MALI_DEBUG_PRINT(4, ("Still in the boost duration!\n")); mtk_set_input_boost_duration(duration - 1); if (targetID >= mtk_get_boost_frequency_id()) { mtk_set_current_deferred_count(deferred); return; } } else if (deferred < mtk_get_dvfs_deferred_count()) { MALI_DEBUG_PRINT(4, ("Defer DVFS frequency operation!\n")); mtk_set_current_deferred_count(deferred); return; } if(currentID == targetID) { MALI_DEBUG_PRINT(4, ("Target GPU frequency is the same!\n")); return; } if (targetID < 0) { targetID = 0; } else if (targetID >= mt_gpufreq_get_dvfs_table_num()) { targetID = mt_gpufreq_get_dvfs_table_num() - 1; } mtk_set_current_frequency_id(targetID); if (targetID < mtk_get_boost_frequency_id()) { mtk_set_boost_frequency_id(targetID); } mtk_set_current_deferred_count(0); #if MALI_LICENSE_IS_GPL if (mali_dvfs_queue && (1 == atomic_read(&g_is_power_enabled))) { queue_work(mali_dvfs_queue, &mali_dvfs_work); } #endif // MALI_LICENSE_IS_GPL }
static unsigned int mtk_get_freq_level_count(void) { return mt_gpufreq_get_dvfs_table_num(); }
static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned int uiCurrentFreqCount; volatile int polling_count = 100000; volatile int i = 0; unsigned int code; /// 1. Delay 0.01ms before power off for (i=0; i < DELAY_LOOP_COUNT;i++); if (DELAY_LOOP_COUNT != i) { pr_debug("[MALI] power off delay error!\n"); } /// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// pr_debug("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_debug("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } #if HARD_RESET_AT_POWER_OFF /* Cause a GPU hard reset to test whether we have actually idled the GPU * and that we properly reconfigure the GPU on power up. * Usually this would be dangerous, but if the GPU is working correctly it should * be completely safe as the GPU should not be active at this point. * However this is disabled normally because it will most likely interfere with * bus logging etc. */ //KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); #endif /// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// pr_debug("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_debug("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } g_power_status = 0; // the power status is "power off". g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index. //pr_debug("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx ); #if 1 uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq. #endif code = mt_get_chip_hw_code(); /* MTK clock modified */ if (0x321 == code) { // do something for Denali-1(6735) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #else clk_disable_unprepare(kbdev->clk_mfg); clk_disable_unprepare(kbdev->clk_mfg_scp); clk_disable_unprepare(kbdev->clk_smi_common); clk_disable_unprepare(kbdev->clk_display_scp); #endif } else if (0x335 == code) { // do something for Denali-2(6735M) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else if (0x337 == code) { // do something for Denali-3(6753) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else { // unknown chip ID, error !! #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } mt_gpufreq_voltage_enable_set(0); }
static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned int uiCurrentFreqCount; volatile int polling_count = 100000; volatile int i = 0; struct mtk_config *config; if (!kbdev) { pr_alert("MALI: input parameter is NULL \n"); } config = (struct mtk_config *)kbdev->mtk_config; if (!config) { pr_alert("MALI: mtk_config is NULL \n"); } /// 1. Delay 0.01ms before power off for (i=0; i < DELAY_LOOP_COUNT;i++); if (DELAY_LOOP_COUNT != i) { pr_warn("[MALI] power off delay error!\n"); } /// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_warn("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } #if HARD_RESET_AT_POWER_OFF /* Cause a GPU hard reset to test whether we have actually idled the GPU * and that we properly reconfigure the GPU on power up. * Usually this would be dangerous, but if the GPU is working correctly it should * be completely safe as the GPU should not be active at this point. * However this is disabled normally because it will most likely interfere with * bus logging etc. */ //KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); /// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { printk("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index. //printk("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx ); #if 1 uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq. #endif /* MTK clock modified */ #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif if(mt6325_upmu_get_swcid() >= PMIC6325_E3_CID_CODE) { mt_gpufreq_voltage_enable_set(0); } #ifdef ENABLE_COMMON_DVFS ged_dvfs_gpu_clock_switch_notify(0); #endif mtk_set_vgpu_power_on_flag(MTK_VGPU_POWER_OFF); // the power status is "power off". #endif }
static bool ged_dvfs_policy( unsigned int ui32GPULoading, unsigned int* pui32NewFreqID, unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed) { #ifdef GED_DVFS_ENABLE int i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1); unsigned int ui32GPUFreq = mt_gpufreq_get_cur_freq_index(); int i32NewFreqID = (int)ui32GPUFreq; if(false==bRefreshed) { if(g_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0) gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us) ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us - g_ulPreDVFS_TS_us); else gpu_loading = 0; g_ulPreDVFS_TS_us = g_ulCalResetTS_us; gpu_pre_loading = gpu_av_loading; ui32GPULoading = gpu_loading; gpu_av_loading = gpu_loading; } //GED_LOGE("[5566] HWEvent Fallback\n"); if (ui32GPULoading >= 99) { i32NewFreqID = 0; } else if (ui32GPULoading <= 1) { i32NewFreqID = i32MaxLevel; } else if (ui32GPULoading >= 85) { i32NewFreqID -= 2; } else if (ui32GPULoading <= 30) { i32NewFreqID += 2; } else if (ui32GPULoading >= 70) { i32NewFreqID -= 1; } else if (ui32GPULoading <= 50) { i32NewFreqID += 1; } if (i32NewFreqID < ui32GPUFreq) { if (gpu_pre_loading * 17 / 10 < ui32GPULoading) { i32NewFreqID -= 1; } } else if (i32NewFreqID > ui32GPUFreq) { if (ui32GPULoading * 17 / 10 < gpu_pre_loading) { i32NewFreqID += 1; } } if (i32NewFreqID > i32MaxLevel) { i32NewFreqID = i32MaxLevel; } else if (i32NewFreqID < 0) { i32NewFreqID = 0; } *pui32NewFreqID = (unsigned int)i32NewFreqID; g_ulWorkingPeriod_us = 0; return *pui32NewFreqID != ui32GPUFreq ? GED_TRUE : GED_FALSE; #else return GED_FALSE; #endif }
GED_ERROR ged_dvfs_um_commit( unsigned long gpu_tar_freq, bool bFallback) { #ifdef ENABLE_COMMON_DVFS int i32MaxLevel = 0; unsigned int ui32NewFreqID; int i ; unsigned long gpu_freq ; if(g_gpu_timer_based_emu) { return GED_INTENTIONAL_BLOCK; } #ifdef GED_DVFS_ENABLE unsigned int ui32CurFreqID; i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1); ui32CurFreqID = mt_gpufreq_get_cur_freq_index(); #endif #ifdef GED_DVFS_UM_CAL mutex_lock(&gsDVFSLock); if(g_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0) gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us) ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us - g_ulPreDVFS_TS_us); else gpu_loading =0 ; gpu_pre_loading = gpu_av_loading; gpu_av_loading = gpu_loading; g_ulPreDVFS_TS_us = g_ulCalResetTS_us; if(gpu_tar_freq&0x1) // Magic to kill ged_srv { ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT); } if(bFallback==true) // in the fallback mode, gpu_tar_freq taking as freq index { ged_dvfs_policy(gpu_loading, &ui32NewFreqID, 0, 0, 0, true); } else { // Search suitable frequency level ui32NewFreqID = i32MaxLevel; for (i = 0; i <= i32MaxLevel; i++) { #ifdef GED_DVFS_ENABLE gpu_freq = mt_gpufreq_get_freq_by_idx(i); #endif if (gpu_tar_freq > gpu_freq) { if(i==0) ui32NewFreqID = 0; else ui32NewFreqID = i-1; break; } } } if(g_eTuningMode==GED_DVFS_LP) { if(ui32NewFreqID!=i32MaxLevel && bFallback==GED_FALSE) { ui32NewFreqID++; } ged_monitor_3D_fence_set_disable(GED_TRUE); } else ged_monitor_3D_fence_set_disable(GED_FALSE); ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] rdy to commit (%u)",ui32NewFreqID); g_computed_freq_id = ui32NewFreqID; ged_dvfs_gpu_freq_commit(ui32NewFreqID, GED_DVFS_DEFAULT_COMMIT); g_ulWorkingPeriod_us = 0; mutex_unlock(&gsDVFSLock); #endif #else gpu_pre_loading = 0; #endif return GED_OK; }