int gpu_dvfs_on_off(bool enable) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context; DVFS_ASSERT(platform); if (enable && !platform->dvfs_status) { mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_set_target_clk_vol(platform->cur_clock, false); gpu_dvfs_handler_init(kbdev); mutex_unlock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_timer_control(true); } else if (!enable && platform->dvfs_status) { gpu_dvfs_timer_control(false); mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_handler_deinit(kbdev); gpu_set_target_clk_vol(platform->gpu_dvfs_config_clock, false); mutex_unlock(&platform->gpu_dvfs_handler_lock); } else { GPU_LOG(DVFS_WARNING, DUMMY, 0u, 0u, "%s: impossible state to change dvfs status (current: %d, request: %d)\n", __func__, platform->dvfs_status, enable); return -1; } return 0; }
static int pm_callback_dvfs_on(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; gpu_dvfs_timer_control(true); if (platform->dvfs_pending) platform->dvfs_pending = 0; return 0; }
void gpu_dvfs_timer_control_locked(bool enable) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context; DVFS_ASSERT(platform); mutex_lock(&platform->gpu_dvfs_handler_lock); gpu_dvfs_timer_control(enable); mutex_unlock(&platform->gpu_dvfs_handler_lock); }
static void pm_callback_runtime_off(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return; GPU_LOG(DVFS_INFO, LSI_GPU_OFF, 0u, 0u, "runtime off callback\n"); gpu_dvfs_stop_env_data_gathering(kbdev); #ifdef CONFIG_MALI_DVFS gpu_dvfs_timer_control(false); if (platform->dvfs_pending) platform->dvfs_pending = 0; #endif /* CONFIG_MALI_DVFS */ if (!platform->early_clk_gating_status) gpu_control_disable_clock(kbdev); }