static ssize_t cpufreq_max_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; int lock_ret; ssize_t ret = -EINVAL; struct cpufreq_policy *policy; mutex_lock(&cpufreq_limit_mutex); if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_max_limit_val != -1) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); /* Yank555.lu - unlock now means set lock to scaling max to support powersave mode properly */ /* cpufreq_max_limit_val = -1; */ policy = cpufreq_cpu_get(0); if (get_cpufreq_level(policy->max, &cpufreq_level) == VALID_LEVEL) { lock_ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_USER, cpufreq_level); cpufreq_max_limit_val = policy->max; cpufreq_max_limit_coupled = SCALING_MAX_COUPLED; } } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (get_cpufreq_level((unsigned int)val, &cpufreq_level) == VALID_LEVEL) { if (cpufreq_max_limit_val != -1) { /* Unlock the previous lock */ exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); cpufreq_max_limit_coupled = SCALING_MAX_UNCOUPLED; /* if a limit existed, uncouple */ } else { cpufreq_max_limit_coupled = SCALING_MAX_COUPLED; /* if no limit existed, we're booting, couple */ } lock_ret = exynos_cpufreq_upper_limit(DVFS_LOCK_ID_USER, cpufreq_level); /* ret of exynos_cpufreq_upper_limit is meaningless. 0 is fail? success? */ cpufreq_max_limit_val = val; } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } ret = n; out: mutex_unlock(&cpufreq_limit_mutex); return ret; }
static ssize_t cpufreq_min_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; ssize_t ret = -EINVAL; int cpu; if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_min_limit_val != -1) { /* Reset lock value to default */ cpufreq_min_limit_val = -1; /* Update PRCMU QOS value to default */ prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "power", PRCMU_QOS_DEFAULT_VALUE); /* Clear replacement flag */ min_replacement = 0; } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (get_cpufreq_level((unsigned int)val, &cpufreq_level, DVFS_MIN_LOCK_REQ) == VALID_LEVEL) { cpufreq_min_limit_val = val; /* Max lock has higher priority than Min lock */ if (cpufreq_max_limit_val != -1 && cpufreq_min_limit_val > cpufreq_max_limit_val) { printk(KERN_ERR "%s: Min lock forced to %d" " because of Max lock\n", __func__, cpufreq_max_limit_val); /* Update PRCMU QOS value to max value */ prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "power", cpufreq_max_limit_val); /* Set replacement flag */ min_replacement = 1; } else { /* Update PRCMU QOS value to new value */ prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "power", cpufreq_min_limit_val); } } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } ret = n; out: return ret; }
static ssize_t cpufreq_max_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; int lock_ret; ssize_t ret = -EINVAL; struct cpufreq_policy *policy; mutex_lock(&cpufreq_limit_mutex); if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_max_limit_val != -1) { exynos_cpufreq_upper_limit_free(DVFS_LOCK_ID_USER); cpufreq_max_limit_val = -1; } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (val < 1400000) { val = 1000000; if (get_cpufreq_level((unsigned int)val, &cpufreq_level) == VALID_LEVEL) { if (cpufreq_max_limit_val != -1) /* Unlock the previous lock */ exynos_cpufreq_upper_limit_free( DVFS_LOCK_ID_USER); lock_ret = exynos_cpufreq_upper_limit( DVFS_LOCK_ID_USER, cpufreq_level); /* ret of exynos_cpufreq_upper_limit is meaningless. 0 is fail? success? */ cpufreq_max_limit_val = val; } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } } ret = n; out: mutex_unlock(&cpufreq_limit_mutex); return ret; }
static ssize_t cpufreq_min_limit_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int val; unsigned int cpufreq_level; int lock_ret; ssize_t ret = -EINVAL; mutex_lock(&cpufreq_limit_mutex); if (sscanf(buf, "%d", &val) != 1) { printk(KERN_ERR "%s: Invalid cpufreq format\n", __func__); goto out; } if (val == -1) { /* Unlock request */ if (cpufreq_min_limit_val != -1) { // exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER); cpufreq_min_limit_val = -1; } else /* Already unlocked */ printk(KERN_ERR "%s: Unlock request is ignored\n", __func__); } else { /* Lock request */ if (get_cpufreq_level((unsigned int)val, &cpufreq_level) == VALID_LEVEL) { // if (cpufreq_min_limit_val != -1) /* Unlock the previous lock */ // exynos_cpufreq_lock_free(DVFS_LOCK_ID_USER); // lock_ret = exynos_cpufreq_lock( // DVFS_LOCK_ID_USER, cpufreq_level); /* ret of exynos_cpufreq_lock is meaningless. 0 is fail? success? */ cpufreq_min_limit_val = val; if ((cpufreq_max_limit_val != -1) && (cpufreq_min_limit_val > cpufreq_max_limit_val)) printk(KERN_ERR "%s: Min lock may not work well" " because of Max lock\n", __func__); } else /* Invalid lock request --> No action */ printk(KERN_ERR "%s: Lock request is invalid\n", __func__); } ret = n; out: mutex_unlock(&cpufreq_limit_mutex); return ret; }