int gpu_set_target_clk_vol_pending(int clk) { int ret = 0, target_clk = 0, target_vol = 0; int prev_clk = 0; struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); target_clk = gpu_check_target_clock(platform, clk); if (target_clk < 0) { GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk); return -1; } target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol); prev_clk = gpu_get_cur_clock(platform); GPU_SET_CLK_VOL(kbdev, platform->cur_clock, target_clk, target_vol); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "pending clk[%d -> %d], vol[%d (margin : %d)]\n", prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin); ret = gpu_update_cur_level(platform); return ret; }
static int gpu_tmu_notifier(struct notifier_block *notifier, unsigned long event, void *v) { struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; if (!platform->tmu_status) return NOTIFY_OK; platform->voltage_margin = 0; if (event == GPU_COLD) { platform->voltage_margin = platform->gpu_default_vol_margin; } else if (event == GPU_NORMAL) { gpu_tmu_normal_work(pkbdev); } else if (event >= GPU_THROTTLING1 && event <= GPU_TRIPPING) { if (gpu_tmu_hot_check_and_work(pkbdev, event)) GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to open device", __func__); } GPU_LOG(DVFS_DEBUG, LSI_TMU_VALUE, 0u, event, "tmu event %ld\n", event); gpu_control_set_voltage_locked(pkbdev, MAX(gpu_dvfs_get_voltage(platform->cur_clock) + platform->voltage_margin, platform->cold_min_vol)); return NOTIFY_OK; }
int gpu_set_target_clk_vol(int clk, bool pending_is_allowed) { int ret = 0, target_clk = 0, target_vol = 0; int prev_clk = 0; struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); if (!gpu_control_is_power_on(pkbdev)) { GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock and voltage in the power-off state!\n", __func__); return -1; } mutex_lock(&platform->gpu_clock_lock); #ifdef CONFIG_MALI_DVFS if (pending_is_allowed && platform->dvs_is_enabled) { if (!platform->dvfs_pending && clk < platform->cur_clock) { platform->dvfs_pending = clk; GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "pending to change the clock [%d -> %d\n", platform->cur_clock, platform->dvfs_pending); } else if (clk > platform->cur_clock) { platform->dvfs_pending = 0; } mutex_unlock(&platform->gpu_clock_lock); return 0; } else { platform->dvfs_pending = 0; } #endif /* CONFIG_MALI_DVFS */ target_clk = gpu_check_target_clock(platform, clk); if (target_clk < 0) { mutex_unlock(&platform->gpu_clock_lock); GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk); return -1; } target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol); prev_clk = gpu_get_cur_clock(platform); GPU_SET_CLK_VOL(kbdev, prev_clk, target_clk, target_vol); ret = gpu_update_cur_level(platform); mutex_unlock(&platform->gpu_clock_lock); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "clk[%d -> %d], vol[%d (margin : %d)]\n", prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin); return ret; }
int gpu_set_target_clk_vol_pending(int clk) { int ret = 0, target_clk = 0, target_vol = 0; int prev_clk = 0; struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; #ifdef CONFIG_EXYNOS_CL_DVFS_G3D int level = 0; #endif DVFS_ASSERT(platform); target_clk = gpu_check_target_clock(platform, clk); if (target_clk < 0) { GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk); return -1; } target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol); target_vol = target_vol < (int) platform->table[0].voltage ? target_vol : (int) platform->table[0].voltage; prev_clk = gpu_get_cur_clock(platform); #ifdef CONFIG_EXYNOS_CL_DVFS_G3D level = gpu_dvfs_get_level(clk); exynos7420_cl_dvfs_stop(ID_G3D, level); #endif GPU_SET_CLK_VOL(kbdev, platform->cur_clock, target_clk, target_vol); ret = gpu_update_cur_level(platform); #ifdef CONFIG_EXYNOS_CL_DVFS_G3D if (!platform->voltage_margin && platform->cl_dvfs_start_base && platform->cur_clock >= platform->cl_dvfs_start_base) exynos7420_cl_dvfs_start(ID_G3D); #endif GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "pending clk[%d -> %d], vol[%d (margin : %d)]\n", prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin); return ret; }
static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int voltage = 0; struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; if (sysfs_streq("0", buf)) { if (platform->voltage_margin != 0) { platform->voltage_margin = 0; voltage = MAX(gpu_dvfs_get_voltage(platform->cur_clock), platform->cold_min_vol); gpu_control_set_voltage_locked(pkbdev, voltage); GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "we set the voltage: %d\n", voltage); } gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, TMU_LOCK, 0); platform->tmu_status = false; } else if (sysfs_streq("1", buf)) platform->tmu_status = true; else GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value - only [0 or 1] is available\n", __func__); return count; }
int gpu_dvfs_handler_control(struct kbase_device *kbdev, gpu_dvfs_handler_command command, int param) { int ret = 0; #ifdef CONFIG_MALI_MIDGARD_DVFS int i; bool dirty = false; unsigned long flags; #endif /* CONFIG_MALI_MIDGARD_DVFS */ struct exynos_context *platform; platform = (struct exynos_context *)kbdev->platform_context; if (!platform) return -ENODEV; switch (command) { #ifdef CONFIG_MALI_MIDGARD_DVFS case GPU_HANDLER_DVFS_ON: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, true); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_OFF: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, false); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_GOVERNOR_CHANGE: mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_on_off(kbdev, false); gpu_dvfs_governor_init(kbdev, param); gpu_dvfs_on_off(kbdev, true); mutex_unlock(&platform->gpu_dvfs_handler_lock); break; case GPU_HANDLER_DVFS_MAX_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->min_lock >= 0) && (param < platform->min_lock)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, "[G3D] max lock Error: lock is smaller than min lock\n"); return -1; } if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_max_lock[platform->target_lock_type] = param; platform->max_lock = param; if (platform->max_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]); } } else { platform->max_lock = param; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->max_lock > 0) && (platform->cur_clock > platform->max_lock)) gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->max_lock); GPU_LOG(DVFS_DEBUG, "[G3D] Lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock, platform->user_min_lock[platform->target_lock_type], platform->cur_clock); platform->target_lock_type = -1; break; case GPU_HANDLER_DVFS_MIN_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->max_lock > 0) && (param > platform->max_lock)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, "min lock Error: the lock is larger than max lock\n"); return -1; } if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_min_lock[platform->target_lock_type] = param; platform->min_lock = param; if (platform->min_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]); } } else { platform->min_lock = param; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->min_lock > 0) && (platform->cur_clock < platform->min_lock)) gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->min_lock); GPU_LOG(DVFS_DEBUG, "[G3D] Lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock, platform->user_min_lock[platform->target_lock_type], platform->cur_clock); platform->target_lock_type = -1; break; case GPU_HANDLER_DVFS_MAX_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_max_lock[platform->target_lock_type] = 0; platform->max_lock = platform->table[platform->table_size-1].clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) { dirty = true; platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock); } } if (!dirty) platform->max_lock = 0; platform->target_lock_type = -1; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, "[G3D] Unlock max clk\n"); break; case GPU_HANDLER_DVFS_MIN_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if ((platform->target_lock_type < TMU_LOCK) || (platform->target_lock_type >= NUMBER_LOCK)) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); return -1; } platform->user_min_lock[platform->target_lock_type] = 0; platform->min_lock = platform->table[0].clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) { dirty = true; platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock); } } if (!dirty) platform->min_lock = 0; platform->target_lock_type = -1; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, "[G3D] Unlock min clk\n"); break; case GPU_HANDLER_INIT_TIME_IN_STATE: gpu_dvfs_init_time_in_state(platform); break; case GPU_HANDLER_UPDATE_TIME_IN_STATE: gpu_dvfs_update_time_in_state(platform, param); break; case GPU_HANDLER_DVFS_GET_LEVEL: ret = gpu_dvfs_get_level(platform, param); break; #endif /* CONFIG_MALI_MIDGARD_DVFS */ case GPU_HANDLER_DVFS_GET_VOLTAGE: ret = gpu_dvfs_get_voltage(platform, param); break; default: break; } return ret; }