static void mxr_vp_stream_set(struct mxr_layer *layer, int en) { exynos_cpufreq_lock_freq(en, MAX_CPU_FREQ); #ifdef CONFIG_ARM_EXYNOS4_BUS_DEVFREQ exynos4_busfreq_lock(!en); #endif mxr_reg_vp_layer_stream(layer->mdev, en); }
static void mxr_graph_stream_set(struct mxr_layer *layer, int en) { struct mxr_device *mdev = layer->mdev; exynos_cpufreq_lock_freq(en, MAX_CPU_FREQ); #ifdef CONFIG_BUSFREQ_OPP if (en) dev_lock(mdev->bus_dev, mdev->dev, BUSFREQ_400MHZ); else dev_unlock(mdev->bus_dev, mdev->dev); #endif #ifdef CONFIG_ARM_EXYNOS4_BUS_DEVFREQ exynos4_busfreq_lock(!en); #endif mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en); }
void s3cfb_busfreq_lock(struct s3cfb_global *fbdev, unsigned int lock) { if (lock) { if (atomic_read(&fbdev->busfreq_lock_cnt) == 0) { exynos4_busfreq_lock(DVFS_LOCK_ID_LCD, BUS_L1); dev_info(fbdev->dev, "[%s] Bus Freq Locked L1\n", __func__); } atomic_inc(&fbdev->busfreq_lock_cnt); fbdev->busfreq_flag = true; } else { if (fbdev->busfreq_flag == true) { atomic_dec(&fbdev->busfreq_lock_cnt); fbdev->busfreq_flag = false; if (atomic_read(&fbdev->busfreq_lock_cnt) == 0) { /* release Freq lock back to normal */ exynos4_busfreq_lock_free(DVFS_LOCK_ID_LCD); dev_info(fbdev->dev, "[%s] Bus Freq lock Released Normal !!\n", __func__); } } } }
static void set_dvfs_lock(struct wacom_i2c *wac_i2c, bool on) { if (on) { cancel_delayed_work(&wac_i2c->dvfs_work); if (!wac_i2c->dvfs_lock_status) { #ifdef SEC_BUS_LOCK #if defined(CONFIG_MACH_P4NOTE) dev_lock(wac_i2c->bus_dev, wac_i2c->dev, BUS_LOCK_FREQ); #else exynos4_busfreq_lock(DVFS_LOCK_ID_PEN, BUS_L1); #endif #endif /* SEC_BUS_LOCK */ exynos_cpufreq_lock(DVFS_LOCK_ID_PEN, wac_i2c->cpufreq_level); wac_i2c->dvfs_lock_status = true; } } else { if (wac_i2c->dvfs_lock_status) schedule_delayed_work(&wac_i2c->dvfs_work, SEC_DVFS_LOCK_TIMEOUT * HZ); } }
static inline void rotation_booster_on(void) { exynos_cpufreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, L4); exynos4_busfreq_lock(DVFS_LOCK_ID_ROTATION_BOOSTER, BUS_L0); exynos_gpufreq_lock(); }