int gpu_dvfs_governor_setting(struct exynos_context *platform, int governor_type) { #ifdef CONFIG_MALI_DVFS int i; #endif /* CONFIG_MALI_DVFS */ unsigned long flags; DVFS_ASSERT(platform); if ((governor_type < 0) || (governor_type >= G3D_MAX_GOVERNOR_NUM)) { GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid governor type (%d)\n", __func__, governor_type); return -1; } spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); #ifdef CONFIG_MALI_DVFS platform->table = governor_info[governor_type].table; platform->table_size = governor_info[governor_type].table_size; platform->step = gpu_dvfs_get_level(governor_info[governor_type].start_clk); gpu_dvfs_get_next_level = (GET_NEXT_LEVEL)(governor_info[governor_type].governor); platform->env_data.utilization = 80; platform->max_lock = 0; platform->min_lock = 0; for (i = 0; i < NUMBER_LOCK; i++) { platform->user_max_lock[i] = 0; platform->user_min_lock[i] = 0; } platform->down_requirement = 1; platform->governor_type = governor_type; gpu_dvfs_init_time_in_state(); #else /* CONFIG_MALI_DVFS */ platform->table = (gpu_dvfs_info *)gpu_get_attrib_data(platform->attrib, GPU_GOVERNOR_TABLE_DEFAULT); platform->table_size = (u32)gpu_get_attrib_data(platform->attrib, GPU_GOVERNOR_TABLE_SIZE_DEFAULT); platform->step = gpu_dvfs_get_level(platform->gpu_dvfs_start_clock); #endif /* CONFIG_MALI_DVFS */ platform->cur_clock = platform->table[platform->step].clock; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return 0; }
int gpu_dvfs_governor_init(struct kbase_device *kbdev, int governor_type) { unsigned long flags; #ifdef CONFIG_MALI_T6XX_DVFS int i, total = 0; #endif /* CONFIG_MALI_T6XX_DVFS */ struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); #ifdef CONFIG_MALI_T6XX_DVFS switch (governor_type) { case G3D_DVFS_GOVERNOR_DEFAULT: gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default; platform->table = gpu_dvfs_infotbl_default; platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default); #if SOC_NAME == 5260 platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default; #endif /* SOC_NAME */ platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT); break; case G3D_DVFS_GOVERNOR_STATIC: gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_static; platform->table = gpu_dvfs_infotbl_default; platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default); #if SOC_NAME == 5260 platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default; #endif /* SOC_NAME */ platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_STATIC); break; case G3D_DVFS_GOVERNOR_BOOSTER: gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_booster; platform->table = gpu_dvfs_infotbl_default; platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default); #if SOC_NAME == 5260 platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default; #endif /* SOC_NAME */ platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_BOOSTER); break; default: GPU_LOG(DVFS_WARNING, "[gpu_dvfs_governor_init] invalid governor type\n"); gpu_dvfs_get_next_freq = (GET_NEXT_FREQ)&gpu_dvfs_governor_default; platform->table = gpu_dvfs_infotbl_default; platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default); #if SOC_NAME == 5260 platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default; #endif /* SOC_NAME */ platform->step = gpu_dvfs_get_level(platform, G3D_GOVERNOR_DEFAULT_CLOCK_DEFAULT); break; } platform->utilization = 100; platform->target_lock_type = -1; platform->max_lock = 0; platform->min_lock = 0; #ifdef CONFIG_CPU_THERMAL_IPA gpu_ipa_dvfs_calc_norm_utilisation(kbdev); #endif /* CONFIG_CPU_THERMAL_IPA */ for (i = 0; i < NUMBER_LOCK; i++) { platform->user_max_lock[i] = 0; platform->user_min_lock[i] = 0; } platform->down_requirement = 1; platform->wakeup_lock = 0; platform->governor_type = governor_type; platform->governor_num = G3D_MAX_GOVERNOR_NUM; for (i = 0; i < G3D_MAX_GOVERNOR_NUM; i++) total += snprintf(platform->governor_list+total, sizeof(platform->governor_list), "[%d] %s\n", i, governor_list[i]); gpu_dvfs_init_time_in_state(platform); #else platform->table = gpu_dvfs_infotbl_default; platform->table_size = GPU_DVFS_TABLE_SIZE(gpu_dvfs_infotbl_default); #if SOC_NAME == 5260 platform->devfreq_g3d_asv_abb = gpu_abb_infobl_default; #endif /* SOC_NAME */ platform->step = gpu_dvfs_get_level(platform, MALI_DVFS_START_FREQ); #endif /* CONFIG_MALI_T6XX_DVFS */ platform->cur_clock = platform->table[platform->step].clock; /* asv info update */ gpu_dvfs_update_asv_table(platform, governor_type); spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return 1; }
static ssize_t set_time_in_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { gpu_dvfs_init_time_in_state(); return count; }
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param) { int ret = 0; #ifdef CONFIG_MALI_MIDGARD_DVFS int i; bool dirty = false; unsigned long flags; #endif /* CONFIG_MALI_MIDGARD_DVFS */ struct exynos_context *platform; platform = (struct exynos_context *)kbdev->platform_context; if (!platform) return -ENODEV; switch (command) { #ifdef CONFIG_MALI_MIDGARD_DVFS case GPU_HANDLER_DVFS_ON: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, true); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_OFF: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, false); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_GOVERNOR_CHANGE: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, false); gpu_dvfs_governor_init(kbdev, param); gpu_dvfs_on_off(kbdev, true); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_MAX_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->min_lock >= 0) && (param < platform->min_lock)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n"); return -1; } if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_max_lock[platform->target_lock_type] = param; platform->max_lock = param; if (platform->max_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]); } } else { platform->max_lock = param; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock)) gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock); GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock, platform->user_min_lock[platform->target_lock_type], platform->cur_clock); platform->target_lock_type = -1; break; case GPU_HANDLER_DVFS_MIN_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->max_lock > 0) && (param > platform->max_lock)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n"); return -1; } if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_min_lock[platform->target_lock_type] = param; platform->min_lock = param; if (platform->min_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]); } } else { platform->min_lock = param; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock)) gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock); GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock, platform->user_min_lock[platform->target_lock_type], platform->cur_clock); platform->target_lock_type = -1; break; case GPU_HANDLER_DVFS_MAX_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_max_lock[platform->target_lock_type] = 0; platform->max_lock = platform->table[platform->table_size-1].clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) { dirty = true; platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock); } } if (!dirty) platform->max_lock = 0; platform->target_lock_type = -1; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n"); break; case GPU_HANDLER_DVFS_MIN_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_min_lock[platform->target_lock_type] = 0; platform->min_lock = platform->table[0].clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) { dirty = true; platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock); } } if (!dirty) platform->min_lock = 0; platform->target_lock_type = -1; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n"); break; case GPU_HANDLER_INIT_TIME_IN_STATE: gpu_dvfs_init_time_in_state(platform); break; case GPU_HANDLER_UPDATE_TIME_IN_STATE: gpu_dvfs_update_time_in_state(platform, param); break; case GPU_HANDLER_DVFS_GET_LEVEL: ret = gpu_dvfs_get_level(platform, param); break; #endif /* CONFIG_MALI_MIDGARD_DVFS */ case GPU_HANDLER_DVFS_GET_VOLTAGE: ret = gpu_dvfs_get_voltage(platform, param); break; default: break; } return ret; }