static ssize_t show_time_in_state(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; int i; struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; gpu_dvfs_update_time_in_state(gpu_control_is_power_on(pkbdev) * platform->cur_clock); for (i = gpu_dvfs_get_level(platform->gpu_min_clock); i >= gpu_dvfs_get_level(platform->gpu_max_clock); i--) { ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d %llu\n", platform->table[i].clock, platform->table[i].time); } if (ret >= PAGE_SIZE - 1) { buf[PAGE_SIZE-2] = '\n'; buf[PAGE_SIZE-1] = '\0'; ret = PAGE_SIZE-1; } return ret; }
static int gpu_dvfs_update_hwc(struct kbase_device *kbdev) { unsigned long flags; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); if (!platform->hwcnt_gathering_status) return 0; if (!gpu_control_is_power_on(kbdev)) return 0; mutex_lock(&kbdev->hwcnt.mlock); exynos_gpu_hwcnt_update(kbdev); mutex_unlock(&kbdev->hwcnt.mlock); spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); platform->env_data.hwcnt = 0; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "Current HWC: %d\n", platform->env_data.hwcnt); return 0; }
static int gpu_dvfs_update_perf(struct kbase_device *kbdev) { unsigned long flags; unsigned int pmcnt; u64 perfmon; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); if (!platform->perf_gathering_status) return 0; if (!gpu_control_is_power_on(kbdev)) return 0; exynos_gpu_perf_update(&pmcnt); exynos_gpu_perf_reset(); perfmon = div_u64((u64)pmcnt*1000, platform->cur_clock); spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags); platform->env_data.perf = perfmon; spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "Current PERF: %d\n", platform->env_data.perf); return 0; }
int gpu_dvfs_calculate_env_data(struct kbase_device *kbdev) { struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; static int polling_period = 0; DVFS_ASSERT(platform); gpu_dvfs_update_utilization(kbdev); polling_period -= platform->polling_speed; if (polling_period > 0) return 0; if (platform->dvs_is_enabled == true) return 0; #ifdef MALI_SEC_HWCNT if (kbdev->hwcnt.is_hwcnt_attach == true && kbdev->hwcnt.is_hwcnt_enable == true && kbdev->hwcnt.is_hwcnt_gpr_enable == false) { polling_period = platform->hwcnt_polling_speed; if (!gpu_control_is_power_on(kbdev)) return 0; mutex_lock(&kbdev->hwcnt.mlock); if (kbdev->vendor_callbacks->hwcnt_update) { kbdev->vendor_callbacks->hwcnt_update(kbdev); dvfs_hwcnt_get_resource(kbdev); dvfs_hwcnt_utilization_equation(kbdev); } mutex_unlock(&kbdev->hwcnt.mlock); } #endif return 0; }
int gpu_set_target_clk_vol(int clk, bool pending_is_allowed) { int ret = 0, target_clk = 0, target_vol = 0; int prev_clk = 0; struct kbase_device *kbdev = pkbdev; struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); if (!gpu_control_is_power_on(pkbdev)) { GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "%s: can't set clock and voltage in the power-off state!\n", __func__); return -1; } mutex_lock(&platform->gpu_clock_lock); #ifdef CONFIG_MALI_DVFS if (pending_is_allowed && platform->dvs_is_enabled) { if (!platform->dvfs_pending && clk < platform->cur_clock) { platform->dvfs_pending = clk; GPU_LOG(DVFS_DEBUG, DUMMY, 0u, 0u, "pending to change the clock [%d -> %d\n", platform->cur_clock, platform->dvfs_pending); } else if (clk > platform->cur_clock) { platform->dvfs_pending = 0; } mutex_unlock(&platform->gpu_clock_lock); return 0; } else { platform->dvfs_pending = 0; } #endif /* CONFIG_MALI_DVFS */ target_clk = gpu_check_target_clock(platform, clk); if (target_clk < 0) { mutex_unlock(&platform->gpu_clock_lock); GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: mismatch clock error (source %d, target %d)\n", __func__, clk, target_clk); return -1; } target_vol = MAX(gpu_dvfs_get_voltage(target_clk) + platform->voltage_margin, platform->cold_min_vol); prev_clk = gpu_get_cur_clock(platform); GPU_SET_CLK_VOL(kbdev, prev_clk, target_clk, target_vol); ret = gpu_update_cur_level(platform); mutex_unlock(&platform->gpu_clock_lock); GPU_LOG(DVFS_INFO, DUMMY, 0u, 0u, "clk[%d -> %d], vol[%d (margin : %d)]\n", prev_clk, gpu_get_cur_clock(platform), gpu_get_cur_voltage(platform), platform->voltage_margin); return ret; }
static ssize_t show_clock(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; if (platform->dvs_is_enabled) ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->cur_clock); else ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * gpu_get_cur_clock(platform)); if (ret < PAGE_SIZE - 1) { ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n"); } else { buf[PAGE_SIZE-2] = '\n'; buf[PAGE_SIZE-1] = '\0'; ret = PAGE_SIZE-1; } return ret; }
static ssize_t show_perf(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret = 0; struct exynos_context *platform = (struct exynos_context *)pkbdev->platform_context; if (!platform) return -ENODEV; ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d", gpu_control_is_power_on(pkbdev) * platform->env_data.perf); if (ret < PAGE_SIZE - 1) { ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n"); } else { buf[PAGE_SIZE-2] = '\n'; buf[PAGE_SIZE-1] = '\0'; ret = PAGE_SIZE-1; } return ret; }
static void gpu_dvfs_event_proc(struct work_struct *q) { struct kbase_device *kbdev = pkbdev; struct exynos_context *platform; platform = (struct exynos_context *) kbdev->platform_context; DVFS_ASSERT(platform); gpu_dvfs_calculate_env_data(kbdev); #ifdef CONFIG_MALI_DVFS mutex_lock(&platform->gpu_dvfs_handler_lock); if (gpu_control_is_power_on(kbdev)) { int clk = 0; clk = gpu_dvfs_decide_next_freq(kbdev, platform->env_data.utilization); gpu_set_target_clk_vol(clk); } mutex_unlock(&platform->gpu_dvfs_handler_lock); #endif /* CONFIG_MALI_DVFS */ }