static void ged_dvfs_custom_ceiling_gpu_freq(unsigned int ui32FreqLevel) { unsigned int ui32MaxLevel; if (gpu_debug_enable) { GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel); } ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; if (ui32MaxLevel < ui32FreqLevel) { ui32FreqLevel = ui32MaxLevel; } mutex_lock(&gsDVFSLock); // 0 => The highest frequency // table_num - 1 => The lowest frequency g_cust_upbound_freq_id = ui32MaxLevel - ui32FreqLevel; gpu_cust_upbound_freq = mt_gpufreq_get_freq_by_idx(g_cust_upbound_freq_id); if (g_cust_upbound_freq_id > mt_gpufreq_get_cur_freq_index()) { ged_dvfs_gpu_freq_commit(g_cust_upbound_freq_id, GED_DVFS_CUSTOM_CEIL_COMMIT); } mutex_unlock(&gsDVFSLock); }
static int pm_callback_power_on(struct kbase_device *kbdev) { unsigned int current_gpu_freq_idx; #ifndef CONFIG_MTK_CLKMGR int ret; #endif mt_gpufreq_voltage_enable_set(1); mtk_set_vgpu_power_on_flag(MTK_VGPU_POWER_ON); // the power status is "power on". #ifdef ENABLE_COMMON_DVFS ged_dvfs_gpu_clock_switch_notify(1); #endif #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #endif mt_gpufreq_target(g_power_off_gpu_freq_idx); current_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); if( current_gpu_freq_idx > g_power_off_gpu_freq_idx) pr_debug("MALI: GPU freq. can't switch to idx=%d\n", g_power_off_gpu_freq_idx ); /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ return 1; }
static void ged_dvfs_freq_thermal_limitCB(unsigned int ui32LimitFreqID) { if (0 < g_iSkipCount) { return; } if(ui32LimitFreqID == 0) // thermal event disable ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_FALSE); else ged_dvfs_vsync_offset_event_switch(GED_DVFS_VSYNC_OFFSET_THERMAL_EVENT , GED_TRUE); mutex_lock(&gsDVFSLock); if (ui32LimitFreqID > mt_gpufreq_get_cur_freq_index()) { if (ged_dvfs_gpu_freq_commit(ui32LimitFreqID, GED_DVFS_SET_LIMIT_COMMIT)) { g_dvfs_skip_round = GED_DVFS_SKIP_ROUNDS; // of course this must be fixed } } mutex_unlock(&gsDVFSLock); }
static void ged_dvfs_set_bottom_gpu_freq(unsigned int ui32FreqLevel) { unsigned int ui32MaxLevel; if (gpu_debug_enable) { GED_LOGE("%s: freq = %d", __func__,ui32FreqLevel); } ui32MaxLevel = mt_gpufreq_get_dvfs_table_num() - 1; if (ui32MaxLevel < ui32FreqLevel) { ui32FreqLevel = ui32MaxLevel; } mutex_lock(&gsDVFSLock); // 0 => The highest frequency // table_num - 1 => The lowest frequency g_bottom_freq_id = ui32MaxLevel - ui32FreqLevel; gpu_bottom_freq = mt_gpufreq_get_freq_by_idx(g_bottom_freq_id); //if current id is larger, ie lower freq, we need to reflect immedately if(g_bottom_freq_id < mt_gpufreq_get_cur_freq_index()) ged_dvfs_gpu_freq_commit(g_bottom_freq_id, GED_DVFS_SET_BOTTOM_COMMIT); mutex_unlock(&gsDVFSLock); }
static void ged_dvfs_freq_input_boostCB(unsigned int ui32BoostFreqID) { #ifdef GED_DVFS_ENABLE if (0 < g_iSkipCount) { return; } if (boost_gpu_enable == 0) { return; } mutex_lock(&gsDVFSLock); if (ui32BoostFreqID < mt_gpufreq_get_cur_freq_index()) { if (ged_dvfs_gpu_freq_commit(ui32BoostFreqID,GED_DVFS_INPUT_BOOST_COMMIT )) { g_dvfs_skip_round = GED_DVFS_SKIP_ROUNDS; // of course this must be fixed } } mutex_unlock(&gsDVFSLock); #endif }
void ged_dvfs_get_gpu_cur_freq(GED_DVFS_FREQ_DATA* psData) { #ifdef GED_DVFS_ENABLE psData->ui32Idx = mt_gpufreq_get_cur_freq_index(); psData->ulFreq = mt_gpufreq_get_freq_by_idx(psData->ui32Idx); #endif }
unsigned long ged_query_info( GED_INFO eType) { switch(eType) { #ifdef GED_DVFS_ENABLE case GED_LOADING: return gpu_loading; case GED_IDLE: return gpu_idle; case GED_BLOCKING: return gpu_block; case GED_PRE_FREQ: return mt_gpufreq_get_freq_by_idx(g_ui32PreFreqID); case GED_PRE_FREQ_IDX: return g_ui32PreFreqID; case GED_CUR_FREQ: return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_cur_freq_index()); case GED_CUR_FREQ_IDX: return mt_gpufreq_get_cur_freq_index(); case GED_MAX_FREQ_IDX: return mt_gpufreq_get_dvfs_table_num()-1; case GED_MAX_FREQ_IDX_FREQ: return mt_gpufreq_get_freq_by_idx(mt_gpufreq_get_dvfs_table_num()-1); case GED_MIN_FREQ_IDX: return 0; case GED_MIN_FREQ_IDX_FREQ: return mt_gpufreq_get_freq_by_idx(0); case GED_3D_FENCE_DONE_TIME: return ged_monitor_3D_fence_done_time(); case GED_VSYNC_OFFSET: return ged_dvfs_vsync_offset_level_get(); case GED_EVENT_STATUS: return g_ui32EventStatus; case GED_EVENT_DEBUG_STATUS: return g_ui32EventDebugStatus; case GED_SRV_SUICIDE: ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT); return g_probe_pid; case GED_PRE_HALF_PERIOD: return g_ulWorkingPeriod_us; case GED_LATEST_START: return g_ulPreCalResetTS_us; #endif default: return 0; } }
static void mali_dvfs_handler(struct work_struct *work) { /*unsigned long flags;*/ int enabled; int duration; int boostID; int perfID; int gedID; int halID; int targetID; if (0 == atomic_read(&g_is_power_enabled)) { MALI_DEBUG_PRINT(4, ("GPU clock is switching down\n")); return; } // Get related settings enabled = mtk_get_input_boost_enabled(); duration = mtk_get_input_boost_duration(); boostID = mtk_get_boost_frequency_id(); perfID = mtk_get_perf_hal_frequency_id(); gedID = mtk_get_ged_hal_frequency_id(); targetID = mtk_get_current_frequency_id(); if ((enabled != 0) && (duration > 0)) { targetID = boostID; } else { if (targetID < mt_gpufreq_get_thermal_limit_index()) { targetID = mt_gpufreq_get_thermal_limit_index(); } /* Calculate higher boost frequency (e.g. lower index id) */ if(perfID < gedID) { halID = perfID; } else { halID = gedID; } if(targetID > halID) { MALI_DEBUG_PRINT(4, ("Use GPU boost frequency %d as target!\n", halID)); targetID = halID; } } if (targetID != mt_gpufreq_get_cur_freq_index()) { mt_gpufreq_target(targetID); } }
static int get_mali_gpu_frequency(void) { unsigned int iCurrentFreq; iCurrentFreq = mt_gpufreq_get_cur_freq_index(); return _mtk_gpu_dvfs_index_to_frequency(iCurrentFreq); }
/// 3. For query GPU frequency index static int proc_gpu_frequency_show(struct seq_file *m, void *v) { unsigned int iCurrentFreq; iCurrentFreq = mt_gpufreq_get_cur_freq_index(); seq_printf(m, "GPU Frequency Index: %u\n", iCurrentFreq); return 0; }
void ged_dvfs_sw_vsync_query_data(GED_DVFS_UM_QUERY_PACK* psQueryData) { psQueryData->ui32GPULoading = gpu_loading; psQueryData->ui32GPUFreqID = mt_gpufreq_get_cur_freq_index(); psQueryData->gpu_cur_freq = mt_gpufreq_get_freq_by_idx(psQueryData->ui32GPUFreqID) ; psQueryData->gpu_pre_freq = g_ui32PreFreqID; psQueryData->nsOffset = ged_dvfs_vsync_offset_level_get(); psQueryData->ulWorkingPeriod_us = g_ulWorkingPeriod_us; psQueryData->ulPreCalResetTS_us = g_ulPreCalResetTS_us; }
bool ged_dvfs_gpu_freq_commit(unsigned long ui32NewFreqID, GED_DVFS_COMMIT_TYPE eCommitType) { int bCommited=false; #ifdef GED_DVFS_ENABLE unsigned long ui32CurFreqID; ui32CurFreqID = mt_gpufreq_get_cur_freq_index(); if (NULL != ged_dvfs_gpu_freq_commit_fp) { if (ui32NewFreqID > g_bottom_freq_id) { ui32NewFreqID = g_bottom_freq_id; } if (ui32NewFreqID > g_cust_boost_freq_id) { ui32NewFreqID = g_cust_boost_freq_id; } // up bound if (ui32NewFreqID < g_cust_upbound_freq_id) { ui32NewFreqID = g_cust_upbound_freq_id; } // thermal power limit if (ui32NewFreqID < mt_gpufreq_get_thermal_limit_index()) { ui32NewFreqID = mt_gpufreq_get_thermal_limit_index(); } // do change if (ui32NewFreqID != ui32CurFreqID) { // call to DVFS module ged_dvfs_gpu_freq_commit_fp(ui32NewFreqID, eCommitType, &bCommited); /* * To-Do: refine previous freq contributions, * since it is possible to have multiple freq settings in previous execution period * Does this fatal for precision? */ ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] new freq ID commited: idx=%lu type=%u",ui32NewFreqID, eCommitType); if(true==bCommited) { ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] commited true"); g_ui32PreFreqID = ui32CurFreqID; } } } #endif return bCommited; }
/// 2. For GL/CL utilization static int proc_gpu_utilization_show(struct seq_file *m, void *v) { unsigned long gl, cl0, cl1; unsigned int iCurrentFreq; iCurrentFreq = mt_gpufreq_get_cur_freq_index(); gl = kbasep_get_gl_utilization(); cl0 = kbasep_get_cl_js0_utilization(); cl1 = kbasep_get_cl_js1_utilization(); seq_printf(m, "gpu/cljs0/cljs1=%lu/%lu/%lu, frequency index=%d power(0:off, 1:0n):%d\n", gl, cl0, cl1, iCurrentFreq, mtk_get_vgpu_power_on_flag()); return 0; }
void mtk_gpu_power_limit_callback(unsigned int limitID) { unsigned int currentID; MALI_DEBUG_PRINT(4, ("[MALI] mtk_gpu_power_limit_callback() set to freq id=%d\n", limitID)); currentID = mt_gpufreq_get_cur_freq_index(); if ((1 == atomic_read(&g_is_power_enabled)) && (currentID < limitID)) { mtk_set_current_frequency_id(limitID); mtk_set_input_boost_duration(0); mtk_set_current_deferred_count(0); mt_gpufreq_target(limitID); } }
void PVRGpuTraceClientWork( const IMG_UINT32 ui32Pid, const IMG_UINT32 ui32ExtJobRef, const IMG_UINT32 ui32IntJobRef, const IMG_CHAR* pszKickType) { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_FTRACE_GPU_JOB* psJob; IMG_UINT32 ui32CtxId = 0; PVR_ASSERT(pszKickType); PVR_DPF((PVR_DBG_VERBOSE, "PVRGpuTraceClientKick(%s): PID %u, extJobRef %u, intJobRef %u", pszKickType, ui32Pid, ui32ExtJobRef, ui32IntJobRef)); CreateJob(ui32Pid, ui32ExtJobRef, ui32IntJobRef); /* Always create jobs for client work above but only emit the enqueue trace if the feature is enabled. This keeps the lookup tables up to date when the gpu_tracing_on is disabled so that when it is re-enabled the packets that might be in the HWPerf buffer can be decoded in the switch event processing below. */ if (PVRGpuTraceEnabled()) { eError = GetCtxAndJobID(ui32Pid, ui32ExtJobRef, ui32IntJobRef, &ui32CtxId, &psJob); PVR_LOGRN_IF_ERROR(eError, "GetCtxAndJobID"); #if defined(CONFIG_TRACING) && defined(CONFIG_MTK_SCHED_TRACERS) && defined(MTK_GPU_DVFS) { unsigned int ui32CurFreqID = mt_gpufreq_get_cur_freq_index(); unsigned int ui32GPUFreq = mt_gpufreq_get_frequency_by_level(ui32CurFreqID); trace_gpu_freq(ui32GPUFreq); } #endif trace_gpu_job_enqueue(ui32CtxId, PVRSRV_FTRACE_JOB_GET_ID(psJob), pszKickType); PVRSRV_FTRACE_JOB_SET_FLAGS(psJob, PVRSRV_FTRACE_JOB_FLAG_ENQUEUED); } }
static int pm_callback_power_on(struct kbase_device *kbdev) { #ifdef CONFIG_MALI_MIDGARD_DVFS int touch_boost_flag, touch_boost_id; #endif /* CONFIG_MALI_MIDGARD_DVFS */ unsigned int current_gpu_freq_idx; #ifndef CONFIG_MTK_CLKMGR int ret; #endif unsigned int code; code = mt_get_chip_hw_code(); mt_gpufreq_voltage_enable_set(1); #ifdef ENABLE_COMMON_DVFS ged_dvfs_gpu_clock_switch_notify(1); #endif #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #endif g_power_status = 1; // the power status is "power on". mt_gpufreq_target(g_power_off_gpu_freq_idx); current_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); if( current_gpu_freq_idx > g_power_off_gpu_freq_idx) pr_debug("MALI: GPU freq. can't switch to idx=%d\n", g_power_off_gpu_freq_idx ); mtk_get_touch_boost_flag( &touch_boost_flag, &touch_boost_id); if(touch_boost_flag > 0) { mt_gpufreq_target(touch_boost_id); mtk_clear_touch_boost_flag(); } /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ return 1; }
/* MTK set boost. 0 is the lowest frequency index. The function is used for GED boost currently.*/ static void mtk_ged_hal_callback(unsigned int level) { unsigned int total; unsigned int targetID; total = mt_gpufreq_get_dvfs_table_num(); if (level >= total) { level = total - 1; } targetID = total - level - 1; mtk_set_ged_hal_frequency_id(targetID); MALI_DEBUG_PRINT(4, ("[MALI] mtk_ged_hal_callback() level=%d, boost ID=%d", level, targetID)); if (targetID != mt_gpufreq_get_cur_freq_index()) { mt_gpufreq_target(targetID); } }
void mtk_gpu_input_boost_callback(unsigned int boostID) { unsigned int currentID; if(mtk_get_input_boost_enabled() == 0) { // Boost is disabled, so return directly. return; } MALI_DEBUG_PRINT(4, ("[MALI] mtk_gpu_input_boost_callback() set to freq id=%d\n", boostID)); currentID = mt_gpufreq_get_cur_freq_index(); if ((1 == atomic_read(&g_is_power_enabled)) && (boostID < currentID)) { mtk_set_boost_frequency_id(boostID); mtk_set_input_boost_duration(3); mtk_set_current_deferred_count(0); mt_gpufreq_target(boostID); } }
/* this function will be called periodically with sampling period 200ms~1000ms */ void mali_pmm_utilization_handler(struct mali_gpu_utilization_data *data) { int utilization; mali_dvfs_action action; int frequency; int duration; int currentID; int targetID; int deferred; mali_utilization = data->utilization_gpu; if (0 == atomic_read(&g_is_power_enabled)) { MALI_DEBUG_PRINT(4, ("GPU clock is in off state\n")); return; } utilization = (mali_utilization * 100) / 256; MALI_DEBUG_PRINT(4, ("%s GPU utilization=%d\n", __FUNCTION__, utilization)); if (utilization <= mtk_get_dvfs_threshold_min()) { action = MALI_DVFS_CLOCK_DOWN; } else if (utilization >= mtk_get_dvfs_threshold_max()) { action = MALI_DVFS_CLOCK_UP; } else { MALI_DEBUG_PRINT(4, ("No need to adjust GPU frequency!\n")); return; } // Get current frequency id currentID = mt_gpufreq_get_cur_freq_index(); // Get current deferred count deferred = mtk_get_current_deferred_count(); switch(action) { case MALI_DVFS_CLOCK_UP: frequency = mapIndexToFrequency(currentID) * utilization / mtk_get_dvfs_threshold_min(); targetID = mapFrequencyToIndex(frequency); deferred += 1; break; case MALI_DVFS_CLOCK_DOWN: frequency = mapIndexToFrequency(currentID) * utilization / mtk_get_dvfs_threshold_max(); targetID = mapFrequencyToIndex(frequency); deferred += 1; break; default: MALI_DEBUG_PRINT(4, ("Unknown GPU DVFS operation!\n")); return; } // Thermal power limit if (targetID < mt_gpufreq_get_thermal_limit_index()) { targetID = mt_gpufreq_get_thermal_limit_index(); } duration = mtk_get_input_boost_duration(); if((duration > 0) && (mtk_get_input_boost_enabled() != 0)) { MALI_DEBUG_PRINT(4, ("Still in the boost duration!\n")); mtk_set_input_boost_duration(duration - 1); if (targetID >= mtk_get_boost_frequency_id()) { mtk_set_current_deferred_count(deferred); return; } } else if (deferred < mtk_get_dvfs_deferred_count()) { MALI_DEBUG_PRINT(4, ("Defer DVFS frequency operation!\n")); mtk_set_current_deferred_count(deferred); return; } if(currentID == targetID) { MALI_DEBUG_PRINT(4, ("Target GPU frequency is the same!\n")); return; } if (targetID < 0) { targetID = 0; } else if (targetID >= mt_gpufreq_get_dvfs_table_num()) { targetID = mt_gpufreq_get_dvfs_table_num() - 1; } mtk_set_current_frequency_id(targetID); if (targetID < mtk_get_boost_frequency_id()) { mtk_set_boost_frequency_id(targetID); } mtk_set_current_deferred_count(0); #if MALI_LICENSE_IS_GPL if (mali_dvfs_queue && (1 == atomic_read(&g_is_power_enabled))) { queue_work(mali_dvfs_queue, &mali_dvfs_work); } #endif // MALI_LICENSE_IS_GPL }
GED_ERROR ged_dvfs_um_commit( unsigned long gpu_tar_freq, bool bFallback) { #ifdef ENABLE_COMMON_DVFS int i32MaxLevel = 0; unsigned int ui32NewFreqID; int i ; unsigned long gpu_freq ; if(g_gpu_timer_based_emu) { return GED_INTENTIONAL_BLOCK; } #ifdef GED_DVFS_ENABLE unsigned int ui32CurFreqID; i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1); ui32CurFreqID = mt_gpufreq_get_cur_freq_index(); #endif #ifdef GED_DVFS_UM_CAL mutex_lock(&gsDVFSLock); if(g_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0) gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us) ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us - g_ulPreDVFS_TS_us); else gpu_loading =0 ; gpu_pre_loading = gpu_av_loading; gpu_av_loading = gpu_loading; g_ulPreDVFS_TS_us = g_ulCalResetTS_us; if(gpu_tar_freq&0x1) // Magic to kill ged_srv { ged_dvfs_probe_signal(GED_SRV_SUICIDE_EVENT); } if(bFallback==true) // in the fallback mode, gpu_tar_freq taking as freq index { ged_dvfs_policy(gpu_loading, &ui32NewFreqID, 0, 0, 0, true); } else { // Search suitable frequency level ui32NewFreqID = i32MaxLevel; for (i = 0; i <= i32MaxLevel; i++) { #ifdef GED_DVFS_ENABLE gpu_freq = mt_gpufreq_get_freq_by_idx(i); #endif if (gpu_tar_freq > gpu_freq) { if(i==0) ui32NewFreqID = 0; else ui32NewFreqID = i-1; break; } } } if(g_eTuningMode==GED_DVFS_LP) { if(ui32NewFreqID!=i32MaxLevel && bFallback==GED_FALSE) { ui32NewFreqID++; } ged_monitor_3D_fence_set_disable(GED_TRUE); } else ged_monitor_3D_fence_set_disable(GED_FALSE); ged_log_buf_print(ghLogBuf_DVFS, "[GED_K] rdy to commit (%u)",ui32NewFreqID); g_computed_freq_id = ui32NewFreqID; ged_dvfs_gpu_freq_commit(ui32NewFreqID, GED_DVFS_DEFAULT_COMMIT); g_ulWorkingPeriod_us = 0; mutex_unlock(&gsDVFSLock); #endif #else gpu_pre_loading = 0; #endif return GED_OK; }
static int pm_callback_power_on(struct kbase_device *kbdev) { int touch_boost_flag, touch_boost_id; unsigned int current_gpu_freq_idx; #ifndef CONFIG_MTK_CLKMGR int ret; #endif unsigned int code = mt_get_chip_hw_code(); mt_gpufreq_voltage_enable_set(1); if (0x321 == code) { // do something for Denali-1(6735) #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #else ret = clk_prepare_enable(kbdev->clk_display_scp); if (ret) { pr_debug("MALI: clk_prepare_enable failed when enabling display MTCMOS"); } ret = clk_prepare_enable(kbdev->clk_smi_common); if (ret) { pr_debug("MALI: clk_prepare_enable failed when enabling display smi_common clock"); } ret = clk_prepare_enable(kbdev->clk_mfg_scp); if (ret) { pr_debug("MALI: clk_prepare_enable failed when enabling mfg MTCMOS"); } ret = clk_prepare_enable(kbdev->clk_mfg); if (ret) { pr_debug("MALI: clk_prepare_enable failed when enabling mfg clock"); } #endif } else if (0x335 == code) { // do something for Denali-2(6735M) #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else if (0x337 == code) { // do something for Denali-3(6753) #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else { // unknown chip ID, error !! #ifdef CONFIG_MTK_CLKMGR enable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); enable_clock( MT_CG_MFG_BG3D, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } g_power_status = 1; // the power status is "power on". mt_gpufreq_target(g_power_off_gpu_freq_idx); current_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); if( current_gpu_freq_idx > g_power_off_gpu_freq_idx) pr_debug("MALI: GPU freq. can't switch to idx=%d\n", g_power_off_gpu_freq_idx ); mtk_get_touch_boost_flag( &touch_boost_flag, &touch_boost_id); if(g_type_T==1) { if(touch_boost_flag > 0) { mt_gpufreq_target(1); mtk_clear_touch_boost_flag(); } } else { if(touch_boost_flag > 0) { mt_gpufreq_target(touch_boost_id); mtk_clear_touch_boost_flag(); } } /* Nothing is needed on VExpress, but we may have destroyed GPU state (if the below HARD_RESET code is active) */ return 1; }
static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned int uiCurrentFreqCount; volatile int polling_count = 100000; volatile int i = 0; unsigned int code; /// 1. Delay 0.01ms before power off for (i=0; i < DELAY_LOOP_COUNT;i++); if (DELAY_LOOP_COUNT != i) { pr_debug("[MALI] power off delay error!\n"); } /// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// pr_debug("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_debug("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } #if HARD_RESET_AT_POWER_OFF /* Cause a GPU hard reset to test whether we have actually idled the GPU * and that we properly reconfigure the GPU on power up. * Usually this would be dangerous, but if the GPU is working correctly it should * be completely safe as the GPU should not be active at this point. * However this is disabled normally because it will most likely interfere with * bus logging etc. */ //KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); #endif /// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// pr_debug("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_debug("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } g_power_status = 0; // the power status is "power off". g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index. //pr_debug("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx ); #if 1 uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq. #endif code = mt_get_chip_hw_code(); /* MTK clock modified */ if (0x321 == code) { // do something for Denali-1(6735) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #else clk_disable_unprepare(kbdev->clk_mfg); clk_disable_unprepare(kbdev->clk_mfg_scp); clk_disable_unprepare(kbdev->clk_smi_common); clk_disable_unprepare(kbdev->clk_display_scp); #endif } else if (0x335 == code) { // do something for Denali-2(6735M) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else if (0x337 == code) { // do something for Denali-3(6753) #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } else { // unknown chip ID, error !! #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif /* CONFIG_MTK_CLKMGR */ } mt_gpufreq_voltage_enable_set(0); }
static bool ged_dvfs_policy( unsigned int ui32GPULoading, unsigned int* pui32NewFreqID, unsigned long t, long phase, unsigned long ul3DFenceDoneTime, bool bRefreshed) { #ifdef GED_DVFS_ENABLE int i32MaxLevel = (int)(mt_gpufreq_get_dvfs_table_num() - 1); unsigned int ui32GPUFreq = mt_gpufreq_get_cur_freq_index(); int i32NewFreqID = (int)ui32GPUFreq; if(false==bRefreshed) { if(g_ulCalResetTS_us - g_ulPreDVFS_TS_us !=0) gpu_loading = (( gpu_loading * (g_ulCalResetTS_us - g_ulPreCalResetTS_us) ) + 100*g_ulWorkingPeriod_us ) / (g_ulCalResetTS_us - g_ulPreDVFS_TS_us); else gpu_loading = 0; g_ulPreDVFS_TS_us = g_ulCalResetTS_us; gpu_pre_loading = gpu_av_loading; ui32GPULoading = gpu_loading; gpu_av_loading = gpu_loading; } //GED_LOGE("[5566] HWEvent Fallback\n"); if (ui32GPULoading >= 99) { i32NewFreqID = 0; } else if (ui32GPULoading <= 1) { i32NewFreqID = i32MaxLevel; } else if (ui32GPULoading >= 85) { i32NewFreqID -= 2; } else if (ui32GPULoading <= 30) { i32NewFreqID += 2; } else if (ui32GPULoading >= 70) { i32NewFreqID -= 1; } else if (ui32GPULoading <= 50) { i32NewFreqID += 1; } if (i32NewFreqID < ui32GPUFreq) { if (gpu_pre_loading * 17 / 10 < ui32GPULoading) { i32NewFreqID -= 1; } } else if (i32NewFreqID > ui32GPUFreq) { if (ui32GPULoading * 17 / 10 < gpu_pre_loading) { i32NewFreqID += 1; } } if (i32NewFreqID > i32MaxLevel) { i32NewFreqID = i32MaxLevel; } else if (i32NewFreqID < 0) { i32NewFreqID = 0; } *pui32NewFreqID = (unsigned int)i32NewFreqID; g_ulWorkingPeriod_us = 0; return *pui32NewFreqID != ui32GPUFreq ? GED_TRUE : GED_FALSE; #else return GED_FALSE; #endif }
void ged_dvfs_run(unsigned long t, long phase, unsigned long ul3DFenceDoneTime) { bool bError; //ged_profile_dvfs_record_SW_vsync(t, phase, ul3DFenceDoneTime); mutex_lock(&gsDVFSLock); //gpu_pre_loading = gpu_loading; if (0 == gpu_dvfs_enable) { gpu_power = 0; gpu_loading = 0; gpu_block= 0; gpu_idle = 0; goto EXIT_ged_dvfs_run; } // SKIP for keeping boost freq if(g_dvfs_skip_round>0) { g_dvfs_skip_round--; goto EXIT_ged_dvfs_run; } if (g_iSkipCount > 0) { gpu_power = 0; gpu_loading = 0; gpu_block= 0; gpu_idle = 0; g_iSkipCount -= 1; } else { g_ulPreCalResetTS_us = g_ulCalResetTS_us; g_ulCalResetTS_us = t; bError=ged_dvfs_cal_gpu_utilization(&gpu_loading, &gpu_block, &gpu_idle); #ifdef GED_DVFS_UM_CAL if(GED_DVFS_FALLBACK==phase) // timer-based DVFS use only #endif { if (ged_dvfs_policy(gpu_loading, &g_ui32FreqIDFromPolicy, t, phase, ul3DFenceDoneTime, false)) { g_computed_freq_id = g_ui32FreqIDFromPolicy; ged_dvfs_gpu_freq_commit(g_ui32FreqIDFromPolicy, GED_DVFS_DEFAULT_COMMIT); } } } if(gpu_debug_enable) { #ifdef GED_DVFS_ENABLE GED_LOGE("%s:gpu_loading=%d %d, g_iSkipCount=%d",__func__, gpu_loading, mt_gpufreq_get_cur_freq_index(), g_iSkipCount); #endif } EXIT_ged_dvfs_run: mutex_unlock(&gsDVFSLock); }
static void pm_callback_power_off(struct kbase_device *kbdev) { unsigned int uiCurrentFreqCount; volatile int polling_count = 100000; volatile int i = 0; struct mtk_config *config; if (!kbdev) { pr_alert("MALI: input parameter is NULL \n"); } config = (struct mtk_config *)kbdev->mtk_config; if (!config) { pr_alert("MALI: mtk_config is NULL \n"); } /// 1. Delay 0.01ms before power off for (i=0; i < DELAY_LOOP_COUNT;i++); if (DELAY_LOOP_COUNT != i) { pr_warn("[MALI] power off delay error!\n"); } /// 2. Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { pr_warn("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } #if HARD_RESET_AT_POWER_OFF /* Cause a GPU hard reset to test whether we have actually idled the GPU * and that we properly reconfigure the GPU on power up. * Usually this would be dangerous, but if the GPU is working correctly it should * be completely safe as the GPU should not be active at this point. * However this is disabled normally because it will most likely interfere with * bus logging etc. */ //KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0); kbase_os_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND), GPU_COMMAND_HARD_RESET); /// Polling the MFG_DEBUG_REG for checking GPU IDLE before MTCMOS power off (0.1ms) MFG_WRITE32(0x3, MFG_DEBUG_CTRL_REG); do { /// 0x13000184[2] /// 1'b1: bus idle /// 1'b0: bus busy if (MFG_READ32(MFG_DEBUG_STAT_REG) & MFG_BUS_IDLE_BIT) { /// printk("[MALI]MFG BUS already IDLE! Ready to power off, %d\n", polling_count); break; } } while (polling_count--); if (polling_count <=0) { printk("[MALI]!!!!MFG(GPU) subsys is still BUSY!!!!!, polling_count=%d\n", polling_count); } g_power_off_gpu_freq_idx = mt_gpufreq_get_cur_freq_index(); // record current freq. index. //printk("MALI: GPU power off freq idx : %d\n",g_power_off_gpu_freq_idx ); #if 1 uiCurrentFreqCount = mt_gpufreq_get_dvfs_table_num(); // get freq. table size mt_gpufreq_target(uiCurrentFreqCount-1); // set gpu to lowest freq. #endif /* MTK clock modified */ #ifdef CONFIG_MTK_CLKMGR disable_clock( MT_CG_MFG_BG3D, "GPU"); disable_clock( MT_CG_DISP0_SMI_COMMON, "GPU"); #endif if(mt6325_upmu_get_swcid() >= PMIC6325_E3_CID_CODE) { mt_gpufreq_voltage_enable_set(0); } #ifdef ENABLE_COMMON_DVFS ged_dvfs_gpu_clock_switch_notify(0); #endif mtk_set_vgpu_power_on_flag(MTK_VGPU_POWER_OFF); // the power status is "power off". #endif }