static void mali_dvfs_decide_next_level(mali_dvfs_status *dvfs_status) { unsigned long flags; struct exynos_context *platform; platform = (struct exynos_context *)dvfs_status->kbdev->platform_context; #ifdef MALI_DVFS_ASV_ENABLE if (dvfs_status->asv_status == ASV_STATUS_DISABLE_REQ) { dvfs_status->asv_status = mali_dvfs_update_asv(ASV_CMD_DISABLE); } else if (dvfs_status->asv_status == ASV_STATUS_NOT_INIT) { dvfs_status->asv_status = mali_dvfs_update_asv(ASV_CMD_ENABLE); } #endif spin_lock_irqsave(&mali_dvfs_spinlock, flags); #ifdef CONFIG_EXYNOS_THERMAL if (dvfs_status->step == kbase_platform_dvfs_get_level(GPU_MAX_CLK)) { dvfs_status->step--; goto skip; } #endif if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { #ifdef PLATFORM_UTILIZATION if (dvfs_status->step == kbase_platform_dvfs_get_level(500)) { if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { dvfs_status->step++; DVFS_ASSERT(dvfs_status->step < MALI_DVFS_STEP); } } else { #endif dvfs_status->step++; DVFS_ASSERT(dvfs_status->step < MALI_DVFS_STEP); #ifdef PLATFORM_UTILIZATION } #endif } else if ((dvfs_status->step > 0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) { DVFS_ASSERT(dvfs_status->step > 0); dvfs_status->step--; } #ifdef CONFIG_EXYNOS_THERMAL skip: #endif #ifdef CONFIG_MALI_T6XX_FREQ_LOCK if (dvfs_status->min_lock > 0) { if (dvfs_status->step < dvfs_status->min_lock) dvfs_status->step = dvfs_status->min_lock; } if ((dvfs_status->max_lock >= 0) && (dvfs_status->step > dvfs_status->max_lock)) { dvfs_status->step = dvfs_status->max_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); }
void mali_dvfs_freq_min_unlock(gpu_lock_type user_lock) { #ifdef CONFIG_MALI_T6XX_FREQ_LOCK unsigned long flags; int i; bool dirty = false; spin_lock_irqsave(&mali_dvfs_spinlock, flags); if (user_lock < TMU_LOCK || user_lock >= NUMBER_LOCK) { spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); return; } mali_dvfs_status_current.user_min_lock[user_lock] = -1; mali_dvfs_status_current.min_lock = kbase_platform_dvfs_get_level(GPU_MAX_CLK); for (i = 0; i < NUMBER_LOCK; i++) { if (mali_dvfs_status_current.user_min_lock[i] != -1) { dirty = true; mali_dvfs_status_current.min_lock = MIN(mali_dvfs_status_current.min_lock, mali_dvfs_status_current.user_min_lock[i]); } } if (!dirty) mali_dvfs_status_current.min_lock = -1; spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); #endif printk("[G3D] Unlock min clk\n"); }
static void mali_dvfs_event_proc(struct work_struct *w) { unsigned long flags; mali_dvfs_status *dvfs_status; struct rk_context *platform; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; if (!kbase_platform_dvfs_get_enable_status()) { mutex_unlock(&mali_enable_clock_lock); return; } platform = (struct rk_context *)dvfs_status->kbdev->platform_context; spin_lock_irqsave(&mali_dvfs_spinlock, flags); if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { #if 0 if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) { if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } else { dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } #endif dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } else if((dvfs_status->step > 0) && (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) //else if((dvfs_status->step > 0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) { BUG_ON(dvfs_status->step <= 0); dvfs_status->step--; } #ifdef CONFIG_MALI_T6XX_FREQ_LOCK if ((dvfs_status->upper_lock >= 0) && (dvfs_status->step > dvfs_status->upper_lock)) { dvfs_status->step = dvfs_status->upper_lock; } if (dvfs_status->under_lock > 0) { if (dvfs_status->step < dvfs_status->under_lock) dvfs_status->step = dvfs_status->under_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); }
static void mali_dvfs_event_proc(struct work_struct *w) { unsigned long flags; mali_dvfs_status *dvfs_status; struct exynos_context *platform; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; if (!kbase_platform_dvfs_get_enable_status()) { mutex_unlock(&mali_enable_clock_lock); return; } platform = (struct exynos_context *)dvfs_status->kbdev->platform_context; #ifdef MALI_DVFS_ASV_ENABLE if (dvfs_status->asv_status==ASV_STATUS_DISABLE_REQ) { dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_DISABLE); } else if (dvfs_status->asv_status==ASV_STATUS_NOT_INIT) { dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_ENABLE); } #endif spin_lock_irqsave(&mali_dvfs_spinlock, flags); if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) { if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) dvfs_status->step++; OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP); } else { dvfs_status->step++; OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP); } }else if ((dvfs_status->step>0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) { OSK_ASSERT(dvfs_status->step > 0); dvfs_status->step--; } #ifdef CONFIG_MALI_T6XX_FREQ_LOCK if ((dvfs_status->upper_lock >= 0)&&(dvfs_status->step > dvfs_status->upper_lock)) { dvfs_status->step = dvfs_status->upper_lock; } if (dvfs_status->under_lock > 0) { if (dvfs_status->step < dvfs_status->under_lock) dvfs_status->step = dvfs_status->under_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); }
int kbase_platform_dvfs_enable(bool enable, int freq) { mali_dvfs_status *dvfs_status; struct kbase_device *kbdev; unsigned long flags; struct exynos_context *platform; int f; dvfs_status = &mali_dvfs_status_current; kbdev = mali_dvfs_status_current.kbdev; BUG_ON(kbdev == NULL); platform = (struct exynos_context *)kbdev->platform_context; mutex_lock(&mali_enable_clock_lock); if (enable != kbdev->pm.metrics.timer_active) { if (enable) { spin_lock_irqsave(&kbdev->pm.metrics.lock, flags); kbdev->pm.metrics.timer_active = MALI_TRUE; spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags); hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY), HRTIMER_MODE_REL); f = mali_dvfs_infotbl[dvfs_status->step].mem_freq; exynos5_bus_mif_update(mem_freq_req, f); } else { spin_lock_irqsave(&kbdev->pm.metrics.lock, flags); kbdev->pm.metrics.timer_active = MALI_FALSE; spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags); hrtimer_cancel(&kbdev->pm.metrics.timer); exynos5_bus_mif_update(mem_freq_req, 0); } } if (freq != MALI_DVFS_CURRENT_FREQ) { spin_lock_irqsave(&mali_dvfs_spinlock, flags); platform->time_tick = 0; platform->time_busy = 0; platform->time_idle = 0; platform->utilisation = 0; dvfs_status->step = kbase_platform_dvfs_get_level(freq); spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); } mutex_unlock(&mali_enable_clock_lock); return MALI_TRUE; }
int kbase_tmu_hot_check_and_work(unsigned long event) { struct kbase_device *kbdev; mali_dvfs_status *dvfs_status; struct exynos_context *platform; unsigned int clkrate; int lock_level; #ifdef CONFIG_MALI_T6XX_DVFS dvfs_status = &mali_dvfs_status_current; kbdev = dvfs_status->kbdev; KBASE_DEBUG_ASSERT(kbdev != NULL); platform = (struct exynos_context *) kbdev->platform_context; if (!platform) return -ENODEV; if (!platform->aclk_g3d) return -ENODEV; clkrate = clk_get_rate(platform->aclk_g3d); switch(event) { case GPU_THROTTLING1: lock_level = GPU_THROTTLING_90_95; printk("[G3D] GPU_THROTTLING_90_95\n"); break; case GPU_THROTTLING2: lock_level = GPU_THROTTLING_95_100; printk("[G3D] GPU_THROTTLING_95_100\n"); break; case GPU_THROTTLING3: lock_level = GPU_THROTTLING_100_105; printk("[G3D] GPU_THROTTLING_100_105\n"); break; case GPU_THROTTLING4: lock_level = GPU_THROTTLING_105_110; printk("[G3D] GPU_THROTTLING_105_110\n"); break; case GPU_TRIPPING: lock_level = GPU_TRIPPING_110; printk("[G3D] GPU_THROTTLING_110\n"); default: return 0; } mali_dvfs_freq_max_lock(kbase_platform_dvfs_get_level(lock_level), TMU_LOCK); #endif return 0; }
int kbase_platform_dvfs_enable(bool enable, int freq) { mali_dvfs_status *dvfs_status; struct kbase_device *kbdev; unsigned long flags; struct exynos_context *platform; int mif_qos, int_qos, cpu_qos; dvfs_status = &mali_dvfs_status_current; kbdev = mali_dvfs_status_current.kbdev; KBASE_DEBUG_ASSERT(kbdev != NULL); platform = (struct exynos_context *)kbdev->platform_context; mutex_lock(&mali_enable_clock_lock); if (freq != MALI_DVFS_CURRENT_FREQ) { spin_lock_irqsave(&mali_dvfs_spinlock, flags); platform->time_tick = 0; platform->time_busy = 0; platform->time_idle = 0; platform->utilisation = 0; dvfs_status->step = kbase_platform_dvfs_get_level(freq); spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); if (freq == MALI_DVFS_START_FREQ) { if (dvfs_status->min_lock != -1) dvfs_status->step = MAX(dvfs_status->min_lock, dvfs_status->step); if (dvfs_status->max_lock != -1) dvfs_status->step = MIN(dvfs_status->max_lock, dvfs_status->step); } kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); } if (enable != kbdev->pm.metrics.timer_active) { if (enable) { spin_lock_irqsave(&kbdev->pm.metrics.lock, flags); kbdev->pm.metrics.timer_active = MALI_TRUE; spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags); hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(KBASE_PM_DVFS_FREQUENCY), HRTIMER_MODE_REL); DVFS_ASSERT(dvfs_status->step >= 0); mif_qos = mali_dvfs_infotbl[dvfs_status->step].mem_freq; int_qos = mali_dvfs_infotbl[dvfs_status->step].int_freq; cpu_qos = mali_dvfs_infotbl[dvfs_status->step].cpu_freq; #if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ) pm_qos_update_request(&exynos5_g3d_mif_qos, mif_qos); pm_qos_update_request(&exynos5_g3d_int_qos, int_qos); pm_qos_update_request(&exynos5_g3d_cpu_qos, cpu_qos); #endif } else { spin_lock_irqsave(&kbdev->pm.metrics.lock, flags); kbdev->pm.metrics.timer_active = MALI_FALSE; spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags); hrtimer_cancel(&kbdev->pm.metrics.timer); #if defined(CONFIG_ARM_EXYNOS5420_BUS_DEVFREQ) pm_qos_update_request(&exynos5_g3d_mif_qos, 0); pm_qos_update_request(&exynos5_g3d_int_qos, 0); pm_qos_update_request(&exynos5_g3d_cpu_qos, 0); #endif } } mutex_unlock(&mali_enable_clock_lock); return MALI_TRUE; }