static void mali_dvfs_event_proc(struct work_struct *w) { unsigned long flags; mali_dvfs_status *dvfs_status; struct rk_context *platform; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; if (!kbase_platform_dvfs_get_enable_status()) { mutex_unlock(&mali_enable_clock_lock); return; } platform = (struct rk_context *)dvfs_status->kbdev->platform_context; spin_lock_irqsave(&mali_dvfs_spinlock, flags); if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { #if 0 if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) { if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } else { dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } #endif dvfs_status->step++; BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } else if((dvfs_status->step > 0) && (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) //else if((dvfs_status->step > 0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) { BUG_ON(dvfs_status->step <= 0); dvfs_status->step--; } #ifdef CONFIG_MALI_T6XX_FREQ_LOCK if ((dvfs_status->upper_lock >= 0) && (dvfs_status->step > dvfs_status->upper_lock)) { dvfs_status->step = dvfs_status->upper_lock; } if (dvfs_status->under_lock > 0) { if (dvfs_status->step < dvfs_status->under_lock) dvfs_status->step = dvfs_status->under_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); }
static void mali_dvfs_event_proc(struct work_struct *w) { unsigned long flags; mali_dvfs_status *dvfs_status; struct exynos_context *platform; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; if (!kbase_platform_dvfs_get_enable_status()) { mutex_unlock(&mali_enable_clock_lock); return; } platform = (struct exynos_context *)dvfs_status->kbdev->platform_context; #ifdef MALI_DVFS_ASV_ENABLE if (dvfs_status->asv_status==ASV_STATUS_DISABLE_REQ) { dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_DISABLE); } else if (dvfs_status->asv_status==ASV_STATUS_NOT_INIT) { dvfs_status->asv_status=mali_dvfs_update_asv(ASV_CMD_ENABLE); } #endif spin_lock_irqsave(&mali_dvfs_spinlock, flags); if (dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) { if (dvfs_status->step==kbase_platform_dvfs_get_level(450)) { if (platform->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) dvfs_status->step++; OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP); } else { dvfs_status->step++; OSK_ASSERT(dvfs_status->step < MALI_DVFS_STEP); } }else if ((dvfs_status->step>0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) { OSK_ASSERT(dvfs_status->step > 0); dvfs_status->step--; } #ifdef CONFIG_MALI_T6XX_FREQ_LOCK if ((dvfs_status->upper_lock >= 0)&&(dvfs_status->step > dvfs_status->upper_lock)) { dvfs_status->step = dvfs_status->upper_lock; } if (dvfs_status->under_lock > 0) { if (dvfs_status->step < dvfs_status->under_lock) dvfs_status->step = dvfs_status->under_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); }
static void update_time_in_state(int level) { u64 current_time; static u64 prev_time=0; if (!kbase_platform_dvfs_get_enable_status()) return; if (prev_time ==0) prev_time=get_jiffies_64(); current_time = get_jiffies_64(); mali_dvfs_infotbl[level].time += current_time-prev_time; prev_time = current_time; }
static void mali_dvfs_event_proc(struct work_struct *w) { unsigned long flags; mali_dvfs_status *dvfs_status; static int level_down_time = 0; static int level_up_time = 0; struct rk_context *platform; u32 fps=0; u32 fps_limit; u32 policy; mutex_lock(&mali_enable_clock_lock); dvfs_status = &mali_dvfs_status_current; if (!kbase_platform_dvfs_get_enable_status()) { mutex_unlock(&mali_enable_clock_lock); return; } platform = (struct rk_context *)dvfs_status->kbdev->platform_context; fps = 60;//rk_get_real_fps(0); spin_lock_irqsave(&mali_dvfs_spinlock, flags); /* policy = rockchip_pm_get_policy(); */ policy = ROCKCHIP_PM_POLICY_NORMAL; if(ROCKCHIP_PM_POLICY_PERFORMANCE == policy) { /* printk("policy : %d\n",policy); */ dvfs_status->step = MALI_DVFS_STEP - 1; /*Highest level when performance mode*/ } else { fps_limit = (ROCKCHIP_PM_POLICY_NORMAL == policy)?LIMIT_FPS : LIMIT_FPS_POWER_SAVE; /* printk("policy : %d , fps_limit = %d\n",policy,fps_limit); */ if ((dvfs_status->utilisation > mali_dvfs_infotbl[dvfs_status->step].max_threshold) && (dvfs_status->step < MALI_DVFS_STEP-1) && fps < fps_limit) { level_up_time++; if(level_up_time == MALI_DVFS_TIME_INTERVAL) { /* printk("up,utilisation=%d,current clock=%d,fps = %d",dvfs_status->utilisation,mali_dvfs_infotbl[dvfs_status->step].clock,fps); */ dvfs_status->step++; level_up_time = 0; /* printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock); */ BUG_ON(dvfs_status->step >= MALI_DVFS_STEP); } level_down_time = 0; } else if((dvfs_status->step > 0) && (dvfs_status->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) /*else if((dvfs_status->step > 0) && (platform->time_tick == MALI_DVFS_TIME_INTERVAL) && (platform->utilisation < mali_dvfs_infotbl[dvfs_status->step].min_threshold)) */ { level_down_time++; if(level_down_time==MALI_DVFS_TIME_INTERVAL) { /* printk("down,utilisation=%d,current clock=%d,fps = %d",dvfs_status->utilisation,mali_dvfs_infotbl[dvfs_status->step].clock,fps); */ BUG_ON(dvfs_status->step <= 0); dvfs_status->step--; level_down_time = 0; /* printk(" next clock=%d\n",mali_dvfs_infotbl[dvfs_status->step].clock); */ } level_up_time = 0; } else { level_down_time = 0; level_up_time = 0; /* printk("keep,utilisation=%d,current clock=%d,fps = %d\n",dvfs_status->utilisation,mali_dvfs_infotbl[dvfs_status->step].clock,fps); */ } } #ifdef CONFIG_MALI_MIDGARD_FREQ_LOCK if ((dvfs_status->upper_lock >= 0) && (dvfs_status->step > dvfs_status->upper_lock)) { dvfs_status->step = dvfs_status->upper_lock; } if (dvfs_status->under_lock > 0) { if (dvfs_status->step < dvfs_status->under_lock) dvfs_status->step = dvfs_status->under_lock; } #endif spin_unlock_irqrestore(&mali_dvfs_spinlock, flags); kbase_platform_dvfs_set_level(dvfs_status->kbdev, dvfs_status->step); mutex_unlock(&mali_enable_clock_lock); }