static void do_scaling(struct work_struct *work) { mali_dvfs_threshold_table * pdvfs = pmali_plat->dvfs_table; int err = mali_perf_set_num_pp_cores(num_cores_enabled); scalingdbg(1, "set pp cores to %d\n", num_cores_enabled); MALI_DEBUG_ASSERT(0 == err); MALI_IGNORE(err); scalingdbg(1, "pdvfs[%d].freq_index=%d, pdvfs[%d].freq_index=%d\n", currentStep, pdvfs[currentStep].freq_index, lastStep, pdvfs[lastStep].freq_index); mali_clk_exected(); #ifdef CONFIG_MALI400_PROFILING _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_EVENT_CHANNEL_GPU | MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, get_current_frequency(), 0, 0, 0, 0); #endif }
int mali_core_scaling_init(mali_plat_info_t *mali_plat) { #ifndef CONFIG_MALI_DVFS if (mali_plat == NULL) { scalingdbg(2, " Mali platform data is NULL!!!\n"); return -1; } pmali_plat = mali_plat; num_cores_enabled = pmali_plat->sc_mpp; currentStep = pmali_plat->def_clock; lastStep = currentStep; INIT_WORK(&wq_work, do_scaling); #endif return 0; /* NOTE: Mali is not fully initialized at this point. */ }
void trace_utilization(struct mali_gpu_utilization_data *data, u32 current_idx, u32 next, u32 current_pp, u32 next_pp) { char direction; if (next > current_idx) direction = '>'; else if ((current_idx > pmali_plat->scale_info.minpp) && (next < current_idx)) direction = '<'; else direction = '~'; scalingdbg(2, "[SCALING]%c (%3d-->%3d)@%3d{%3d - %3d}. pp:(%d-->%d)\n", direction, get_mali_freq(current_idx), get_mali_freq(next), data->utilization_gpu, pmali_plat->dvfs_table[current_idx].downthreshold, pmali_plat->dvfs_table[current_idx].upthreshold, current_pp, next_pp); }
int mali_core_scaling_init(mali_plat_info_t *mali_plat) { #ifndef CONFIG_MALI_DVFS if (mali_plat == NULL) { scalingdbg(2, " Mali platform data is NULL!!!\n"); return -1; } pmali_plat = mali_plat; num_cores_enabled = pmali_plat->sc_mpp; gp_pll_user_gpu = gp_pll_user_register("gpu", 1, gp_pll_user_cb_gpu); //not get the gp pll, do need put is_gp_pll_get = 0; is_gp_pll_put = 0; if (gp_pll_user_gpu == NULL) printk("register gp pll user for gpu failed\n"); currentStep = pmali_plat->def_clock; lastStep = currentStep; INIT_WORK(&wq_work, do_scaling); #endif return 0; /* NOTE: Mali is not fully initialized at this point. */ }
static void mali_decide_next_status(struct mali_gpu_utilization_data *data, int* next_fs_idx, int* pp_change_flag) { u32 utilization, mali_up_limit, decided_fs_idx; u32 ld_left, ld_right; u32 ld_up, ld_down; u32 change_mode; *pp_change_flag = 0; change_mode = 0; utilization = 255; mali_up_limit = (scaling_mode == MALI_TURBO_MODE) ? pmali_plat->turbo_clock : pmali_plat->scale_info.maxclk; decided_fs_idx = currentStep; ld_up = pmali_plat->dvfs_table[currentStep].upthreshold; ld_down = pmali_plat->dvfs_table[currentStep].downthreshold; scalingdbg(2, "utilization=%d, ld_up=%d\n ", utilization, ld_up); if (utilization >= ld_up) { /* go up */ scalingdbg(2, "currentStep=%d, mali_up_limit=%d\n ", currentStep, mali_up_limit); if (currentStep < mali_up_limit) { change_mode = 1; if ((currentStep < pmali_plat->def_clock) && (utilization > pmali_plat->bst_gpu)) decided_fs_idx = pmali_plat->def_clock; else decided_fs_idx++; } if ((data->utilization_pp >= ld_up) && (num_cores_enabled < pmali_plat->scale_info.maxpp)) { if ((num_cores_enabled < pmali_plat->sc_mpp) && (data->utilization_pp >= pmali_plat->bst_pp)) { *pp_change_flag = 1; change_mode = 1; } else if (change_mode == 0) { *pp_change_flag = 2; change_mode = 1; } } #if LOG_MALI_SCALING scalingdbg(2, "[nexting..] [LD:%d]-> FS[CRNT:%d LMT:%d NEXT:%d] PP[NUM:%d LMT:%d MD:%d][F:%d]\n", data->utilization_pp, currentStep, mali_up_limit, decided_fs_idx, num_cores_enabled, pmali_plat->scale_info.maxpp, *pp_change_flag, change_mode); #endif } else if (utilization <= ld_down) { /* go down */ if (mali_stay_count > 0) { *next_fs_idx = decided_fs_idx; mali_stay_count--; return; } if (num_cores_enabled > pmali_plat->sc_mpp) { change_mode = 1; if (data->utilization_pp <= ld_down) { ld_left = data->utilization_pp * num_cores_enabled; ld_right = (pmali_plat->dvfs_table[currentStep].upthreshold) * (num_cores_enabled - 1); if (ld_left < ld_right) { change_mode = 2; } } } else if (currentStep > pmali_plat->scale_info.minclk) { change_mode = 1; } else if (num_cores_enabled > 1) { /* decrease PPS */ if (data->utilization_pp <= ld_down) { ld_left = data->utilization_pp * num_cores_enabled; ld_right = (pmali_plat->dvfs_table[currentStep].upthreshold) * (num_cores_enabled - 1); scalingdbg(2, "ld_left=%d, ld_right=%d\n", ld_left, ld_right); if (ld_left < ld_right) { change_mode = 2; } } } if (change_mode == 1) { decided_fs_idx--; } else if (change_mode == 2) { /* decrease PPS */ *pp_change_flag = -1; } } if (change_mode) mali_stay_count = pmali_plat->dvfs_table[decided_fs_idx].keep_count; *next_fs_idx = decided_fs_idx; }
static u32 enable_pp_cores(u32 val) { scalingdbg(2, "meson: enable %d pp cores\n", val); return set_mali_rt_clkpp(currentStep, val, 0); }
static u32 disable_one_core(void) { scalingdbg(2, "meson: disable one pp, current has %d pp cores\n", num_cores_enabled - 1); return set_mali_rt_clkpp(currentStep, num_cores_enabled - 1, 0); }