/* Set clock frequency */ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; unsigned int freqs_new; int ret; cur_cluster = cpu_to_cluster(cpu); new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); freqs_new = freq_table[cur_cluster][index].frequency; if (is_bL_switching_enabled()) { if ((actual_cluster == A15_CLUSTER) && (freqs_new < clk_big_min)) { new_cluster = A7_CLUSTER; } else if ((actual_cluster == A7_CLUSTER) && (freqs_new > clk_little_max)) { new_cluster = A15_CLUSTER; } } ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); if (!ret) { arch_set_freq_scale(policy->related_cpus, freqs_new, policy->cpuinfo.max_freq); } return ret; }
static unsigned int sunxi_clk_get_cpu_rate(unsigned int cpu) { u32 cur_cluster = per_cpu(physical_cluster, cpu), rate; #ifdef CONFIG_DEBUG_FS ktime_t calltime = ktime_set(0, 0), delta, rettime; #endif mutex_lock(&cluster_lock[cur_cluster]); #ifdef CONFIG_DEBUG_FS calltime = ktime_get(); #endif if (cur_cluster == A7_CLUSTER) clk_get_rate(clk_pll1); else if (cur_cluster == A15_CLUSTER) clk_get_rate(clk_pll2); rate = clk_get_rate(cluster_clk[cur_cluster]) / 1000; /* For switcher we use virtual A15 clock rates */ if (is_bL_switching_enabled()) { rate = VIRT_FREQ(cur_cluster, rate); } #ifdef CONFIG_DEBUG_FS rettime = ktime_get(); delta = ktime_sub(rettime, calltime); if (cur_cluster == A7_CLUSTER) c0_get_time_usecs = ktime_to_ns(delta) >> 10; else if (cur_cluster == A15_CLUSTER)
static unsigned int bL_cpufreq_get_rate(unsigned int cpu) { if (is_bL_switching_enabled()) { pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, cpu)); return per_cpu(cpu_last_req_freq, cpu); } else { return clk_get_cpu_rate(cpu); } }
static unsigned int clk_get_cpu_rate(unsigned int cpu) { u32 cur_cluster = per_cpu(physical_cluster, cpu); u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; /* For switcher we use virtual A7 clock rates */ if (is_bL_switching_enabled()) rate = VIRT_FREQ(cur_cluster, rate); pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, cur_cluster, rate); return rate; }
/* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { u32 cur_cluster = cpu_to_cluster(policy->cpu); struct device *cpu_dev; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); return -ENODEV; } ret = get_cluster_clk_and_freq_table(cpu_dev); if (ret) return ret; ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); if (ret) { dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster); put_cluster_clk_and_freq_table(cpu_dev); return ret; } if (cur_cluster < MAX_CLUSTERS) { int cpu; cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; } else { /* Assumption: during init, we are always running on A15 */ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; } if (arm_bL_ops->get_transition_latency) policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev); else policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; }
static inline int cpu_to_cluster(int cpu) { return is_bL_switching_enabled() ? MAX_CLUSTERS : raw_cpu_to_cluster(cpu); }
static unsigned int bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) { u32 new_rate, prev_rate; int ret; bool bLs = is_bL_switching_enabled(); mutex_lock(&cluster_lock[new_cluster]); if (bLs) { prev_rate = per_cpu(cpu_last_req_freq, cpu); per_cpu(cpu_last_req_freq, cpu) = rate; per_cpu(physical_cluster, cpu) = new_cluster; new_rate = find_cluster_maxfreq(new_cluster); new_rate = ACTUAL_FREQ(new_cluster, new_rate); } else { new_rate = rate; } pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); if (!ret) { /* * FIXME: clk_set_rate hasn't returned an error here however it * may be that clk_change_rate failed due to hardware or * firmware issues and wasn't able to report that due to the * current design of the clk core layer. To work around this * problem we will read back the clock rate and check it is * correct. This needs to be removed once clk core is fixed. */ if (clk_get_rate(clk[new_cluster]) != new_rate * 1000) ret = -EIO; } if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); if (bLs) { per_cpu(cpu_last_req_freq, cpu) = prev_rate; per_cpu(physical_cluster, cpu) = old_cluster; } mutex_unlock(&cluster_lock[new_cluster]); return ret; } mutex_unlock(&cluster_lock[new_cluster]); /* Recalc freq for old cluster when switching clusters */ if (old_cluster != new_cluster) { pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", __func__, cpu, old_cluster, new_cluster); /* Switch cluster */ bL_switch_request(cpu, new_cluster); mutex_lock(&cluster_lock[old_cluster]); /* Set freq of old cluster if there are cpus left on it */ new_rate = find_cluster_maxfreq(old_cluster); new_rate = ACTUAL_FREQ(old_cluster, new_rate); if (new_rate) { pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", __func__, old_cluster, new_rate); if (clk_set_rate(clk[old_cluster], new_rate * 1000)) pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", __func__, ret, old_cluster); } mutex_unlock(&cluster_lock[old_cluster]); } return 0; }
static unsigned int bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) { u32 new_rate, prev_rate; int ret; bool bLs = is_bL_switching_enabled(); mutex_lock(&cluster_lock[new_cluster]); if (bLs) { prev_rate = per_cpu(cpu_last_req_freq, cpu); per_cpu(cpu_last_req_freq, cpu) = rate; per_cpu(physical_cluster, cpu) = new_cluster; new_rate = find_cluster_maxfreq(new_cluster); new_rate = ACTUAL_FREQ(new_cluster, new_rate); } else { new_rate = rate; } pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); if (bLs) { per_cpu(cpu_last_req_freq, cpu) = prev_rate; per_cpu(physical_cluster, cpu) = old_cluster; } mutex_unlock(&cluster_lock[new_cluster]); return ret; } mutex_unlock(&cluster_lock[new_cluster]); /* Recalc freq for old cluster when switching clusters */ if (old_cluster != new_cluster) { pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", __func__, cpu, old_cluster, new_cluster); /* Switch cluster */ bL_switch_request(cpu, new_cluster); mutex_lock(&cluster_lock[old_cluster]); /* Set freq of old cluster if there are cpus left on it */ new_rate = find_cluster_maxfreq(old_cluster); new_rate = ACTUAL_FREQ(old_cluster, new_rate); if (new_rate) { pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", __func__, old_cluster, new_rate); if (clk_set_rate(clk[old_cluster], new_rate * 1000)) pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", __func__, ret, old_cluster); } mutex_unlock(&cluster_lock[old_cluster]); } return 0; }
static unsigned int bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate) { u32 new_rate, prev_rate; int ret; bool bLs = is_bL_switching_enabled(); mutex_lock(&cluster_lock[new_cluster]); if (bLs) { prev_rate = per_cpu(cpu_last_req_freq, cpu); per_cpu(cpu_last_req_freq, cpu) = rate; per_cpu(physical_cluster, cpu) = new_cluster; new_rate = find_cluster_maxfreq(new_cluster); new_rate = ACTUAL_FREQ(new_cluster, new_rate); } else { new_rate = rate; } pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n", __func__, cpu, old_cluster, new_cluster, new_rate); ret = clk_set_rate(clk[new_cluster], new_rate * 1000); if (WARN_ON(ret)) { pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret, new_cluster); if (bLs) { per_cpu(cpu_last_req_freq, cpu) = prev_rate; per_cpu(physical_cluster, cpu) = old_cluster; } mutex_unlock(&cluster_lock[new_cluster]); return ret; } mutex_unlock(&cluster_lock[new_cluster]); /* Recalc freq for old cluster when switching clusters */ if (old_cluster != new_cluster) { pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n", __func__, cpu, old_cluster, new_cluster); /* Switch cluster */ bL_switch_request(cpu, new_cluster); mutex_lock(&cluster_lock[old_cluster]); /* Set freq of old cluster if there are cpus left on it */ new_rate = find_cluster_maxfreq(old_cluster); new_rate = ACTUAL_FREQ(old_cluster, new_rate); if (new_rate) { pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n", __func__, old_cluster, new_rate); if (clk_set_rate(clk[old_cluster], new_rate * 1000)) pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n", __func__, ret, old_cluster); } mutex_unlock(&cluster_lock[old_cluster]); } /* * FIXME: clk_set_rate has to handle the case where clk_change_rate * can fail due to hardware or firmware issues. Until the clk core * layer is fixed, we can check here. In most of the cases we will * be reading only the cached value anyway. This needs to be removed * once clk core is fixed. */ if (bL_cpufreq_get_rate(cpu) != new_rate) return -EIO; return 0; }