int gpu_dvfs_on_off(bool enable) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context; DVFS_ASSERT(platform); if (enable && !platform->dvfs_status) { mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_set_target_clk_vol(platform->cur_clock, false); gpu_dvfs_handler_init(kbdev); mutex_unlock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_timer_control(true); } else if (!enable && platform->dvfs_status) { gpu_dvfs_timer_control(false); mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_handler_deinit(kbdev); gpu_set_target_clk_vol(platform->gpu_dvfs_config_clock, false); mutex_unlock(&platform->gpu_dvfs_handler_lock); } else { GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: impossible state to change dvfs status (current: %d, request: %d)\n", __func__, platform->dvfs_status, enable); return -1; } return 0; }
static int gpu_tmu_notifier(struct notifier_block *notifier, unsigned long event, void *v) { unsigned long index; struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; if (!platform->tmu_status) return NOTIFY_OK; platform->voltage_margin = 0; index = *(unsigned long*)v; if (event == GPU_COLD) { platform->voltage_margin = platform->gpu_default_vol_margin; } else if (event == GPU_NORMAL) { gpu_tmu_normal_work(pkbdev); } else if (event == GPU_THROTTLING || event == GPU_TRIPPING) { if (gpu_tmu_hot_check_and_work(pkbdev, event, index)) GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: failed to open device", __func__); } GPU_LOG(DVFS_DEBUG, LSI_TMU_VALUE, 0u, event, "tmu event %ld\n", event); gpu_set_target_clk_vol(platform->cur_clock, false); return NOTIFY_OK; }
static int pm_callback_change_dvfs_level(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; mali_bool enabledebug = MALI_FALSE; if(kbdev->vendor_callbacks->get_poweron_dbg) enabledebug = kbdev->vendor_callbacks->get_poweron_dbg(); if (enabledebug) GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, #ifdef CONFIG_EXYNOS_ASV "asv table[%u] " #endif "clk[%d to %d]MHz, vol[%d (margin : %d) real: %d]mV\n", #ifdef CONFIG_EXYNOS_ASV exynos_get_table_ver(), #endif gpu_get_cur_clock(platform), platform->gpu_dvfs_start_clock, gpu_get_cur_voltage(platform), platform->voltage_margin, platform->cur_voltage); gpu_set_target_clk_vol(platform->gpu_dvfs_start_clock, false); gpu_dvfs_reset_env_data(kbdev); return 0; }
static ssize_t set_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int clk = 0; int ret, i, policy_count; static bool cur_state; const struct kbase_pm_policy *const *policy_list; static const struct kbase_pm_policy *prev_policy; static bool prev_tmu_status = true; #ifdef CONFIG_MALI_DVFS static bool prev_dvfs_status = true; #endif /* CONFIG_MALI_DVFS */ struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; ret = kstrtoint(buf, 0, &clk); if (ret) { GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value\n", __func__); return -ENOENT; } if (!cur_state) { prev_tmu_status = platform->tmu_status; #ifdef CONFIG_MALI_DVFS prev_dvfs_status = platform->dvfs_status; #endif /* CONFIG_MALI_DVFS */ prev_policy = kbase_pm_get_policy(pkbdev); } if (clk == 0) { kbase_pm_set_policy(pkbdev, prev_policy); platform->tmu_status = prev_tmu_status; #ifdef CONFIG_MALI_DVFS if (!platform->dvfs_status) gpu_dvfs_on_off(true); #endif /* CONFIG_MALI_DVFS */ cur_state = false; } else { policy_count = kbase_pm_list_policies(&policy_list); for (i = 0; i < policy_count; i++) { if (sysfs_streq(policy_list[i]->name, "always_on")) { kbase_pm_set_policy(pkbdev, policy_list[i]); break; } } platform->tmu_status = false; #ifdef CONFIG_MALI_DVFS if (platform->dvfs_status) gpu_dvfs_on_off(false); #endif /* CONFIG_MALI_DVFS */ gpu_set_target_clk_vol(clk, false); cur_state = true; } return count; }
static int pm_callback_runtime_on(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; GPU_LOG(DVFS_INFO, LSI_GPU_ON, 0u, 0u, "runtime on callback\n"); gpu_control_enable_clock(kbdev); #ifdef CONFIG_MALI_DVFS gpu_dvfs_start_env_data_gathering(kbdev); if (platform->dvfs_status && platform->wakeup_lock) gpu_set_target_clk_vol(platform->gpu_dvfs_start_clock, false); else #endif /* CONFIG_MALI_DVFS */ gpu_set_target_clk_vol(platform->cur_clock, false); return 0; }
int gpu_dvfs_handler_init(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); if (!platform->dvfs_wq) platform->dvfs_wq = create_singlethread_workqueue("g3d_dvfs"); if (!platform->dvfs_status) platform->dvfs_status = true; gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_INIT); gpu_set_target_clk_vol(platform->table[platform->step].clock); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "dvfs handler initialized\n"); return 0; }
static void gpu_dvfs_event_proc(struct work_struct *q) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform; platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); gpu_dvfs_calculate_env_data(kbdev); #ifdef CONFIG_MALI_DVFS mutex_lock(&platform->gpu_dvfs_handler_lock); if (gpu_control_is_power_on(kbdev)) { int clk = 0; clk = gpu_dvfs_decide_next_freq(kbdev, platform->env_data.utilization); gpu_set_target_clk_vol(clk); } mutex_unlock(&platform->gpu_dvfs_handler_lock); #endif /* CONFIG_MALI_DVFS */ }
static ssize_t set_tmu_control(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; if (sysfs_streq("0", buf)) { if (platform->voltage_margin != 0) { platform->voltage_margin = 0; gpu_set_target_clk_vol(platform->cur_clock, false); } gpu_dvfs_clock_lock(GPU_DVFS_MAX_UNLOCK, TMU_LOCK, 0); platform->tmu_status = false; } else if (sysfs_streq("1", buf)) platform->tmu_status = true; else GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid value - only [0 or 1] is available\n", __func__); return count; }
int gpu_dvfs_clock_lock(gpu_dvfs_lock_command lock_command, gpu_dvfs_lock_type lock_type, int clock) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; int i; bool dirty = false; unsigned long flags; DVFS_ASSERT(platform); if (!platform->dvfs_status) return 0; if ((lock_type < TMU_LOCK) || (lock_type >= NUMBER_LOCK)) { GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: invalid lock type is called (%d)\n", __func__, lock_type); return -1; } switch (lock_command) { case GPU_DVFS_MAX_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if (gpu_dvfs_get_level(clock) < 0) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "max lock error: invalid clock value %d\n", clock); return -1; } platform->user_max_lock[lock_type] = clock; platform->max_lock = clock; if (platform->max_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) platform->max_lock = MIN(platform->max_lock, platform->user_max_lock[i]); } } else { platform->max_lock = clock; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->max_lock > 0) && (platform->cur_clock >= platform->max_lock)) gpu_set_target_clk_vol(platform->max_lock, false); GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock, "lock max clk[%d], user lock[%d], current clk[%d]\n", platform->max_lock, platform->user_max_lock[lock_type], platform->cur_clock); break; case GPU_DVFS_MIN_LOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); if (gpu_dvfs_get_level(clock) < 0) { spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "min lock error: invalid clock value %d\n", clock); return -1; } platform->user_min_lock[lock_type] = clock; platform->min_lock = clock; if (platform->min_lock > 0) { for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) platform->min_lock = MAX(platform->min_lock, platform->user_min_lock[i]); } } else { platform->min_lock = clock; } spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); if ((platform->min_lock > 0)&& (platform->cur_clock < platform->min_lock) && (platform->min_lock <= platform->max_lock)) gpu_set_target_clk_vol(platform->min_lock, false); GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock, "lock min clk[%d], user lock[%d], current clk[%d]\n", platform->min_lock, platform->user_min_lock[lock_type], platform->cur_clock); break; case GPU_DVFS_MAX_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); platform->user_max_lock[lock_type] = 0; platform->max_lock = platform->gpu_max_clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_max_lock[i] > 0) { dirty = true; platform->max_lock = MIN(platform->user_max_lock[i], platform->max_lock); } } if (!dirty) platform->max_lock = 0; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, LSI_GPU_MAX_LOCK, lock_type, clock, "unlock max clk\n"); break; case GPU_DVFS_MIN_UNLOCK: spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); platform->user_min_lock[lock_type] = 0; platform->min_lock = platform->gpu_min_clock; for (i = 0; i < NUMBER_LOCK; i++) { if (platform->user_min_lock[i] > 0) { dirty = true; platform->min_lock = MAX(platform->user_min_lock[i], platform->min_lock); } } if (!dirty) platform->min_lock = 0; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_DEBUG, LSI_GPU_MIN_LOCK, lock_type, clock, "unlock min clk\n"); break; default: break; } return 0; }