static void put_cluster_clk_and_freq_table(struct device *cpu_dev, const struct cpumask *cpumask) { u32 cluster = cpu_to_cluster(cpu_dev->id); int i; if (atomic_dec_return(&cluster_usage[cluster])) return; if (cluster < MAX_CLUSTERS) return _put_cluster_clk_and_freq_table(cpu_dev, cpumask); for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return; } _put_cluster_clk_and_freq_table(cdev, cpumask); } /* free virtual table */ kfree(freq_table[cluster]); }
static int get_cluster_clk_and_freq_table(struct device *cpu_dev, const struct cpumask *cpumask) { u32 cluster = cpu_to_cluster(cpu_dev->id); int i, ret; if (atomic_inc_return(&cluster_usage[cluster]) != 1) return 0; if (cluster < MAX_CLUSTERS) { ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); if (ret) atomic_dec(&cluster_usage[cluster]); return ret; } /* * Get data for all clusters and fill virtual cluster with a merge of * both */ for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } ret = _get_cluster_clk_and_freq_table(cdev, cpumask); if (ret) goto put_clusters; } ret = merge_cluster_tables(); if (ret) goto put_clusters; /* Assuming 2 cluster, set clk_big_min and clk_little_max */ clk_big_min = get_table_min(freq_table[0]); clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", __func__, cluster, clk_big_min, clk_little_max); return 0; put_clusters: for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } _put_cluster_clk_and_freq_table(cdev, cpumask); } atomic_dec(&cluster_usage[cluster]); return ret; }
/* Set clock frequency */ static int bL_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; unsigned int freqs_new; int ret; cur_cluster = cpu_to_cluster(cpu); new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); freqs_new = freq_table[cur_cluster][index].frequency; if (is_bL_switching_enabled()) { if ((actual_cluster == A15_CLUSTER) && (freqs_new < clk_big_min)) { new_cluster = A7_CLUSTER; } else if ((actual_cluster == A7_CLUSTER) && (freqs_new > clk_little_max)) { new_cluster = A15_CLUSTER; } } ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new); if (!ret) { arch_set_freq_scale(policy->related_cpus, freqs_new, policy->cpuinfo.max_freq); } return ret; }
static void bL_cpufreq_ready(struct cpufreq_policy *policy) { int cur_cluster = cpu_to_cluster(policy->cpu); /* Do not register a cpu_cooling device if we are in IKS mode */ if (cur_cluster >= MAX_CLUSTERS) return; cdev[cur_cluster] = of_cpufreq_cooling_register(policy); }
/* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { u32 cur_cluster = cpu_to_cluster(policy->cpu); struct device *cpu_dev; int ret; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); return -ENODEV; } ret = get_cluster_clk_and_freq_table(cpu_dev); if (ret) return ret; ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); if (ret) { dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster); put_cluster_clk_and_freq_table(cpu_dev); return ret; } if (cur_cluster < MAX_CLUSTERS) { int cpu; cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); for_each_cpu(cpu, policy->cpus) per_cpu(physical_cluster, cpu) = cur_cluster; } else { /* Assumption: during init, we are always running on A15 */ per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; } if (arm_bL_ops->get_transition_latency) policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev); else policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; }
static int bL_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; int cur_cluster = cpu_to_cluster(policy->cpu); if (cur_cluster < MAX_CLUSTERS) { cpufreq_cooling_unregister(cdev[cur_cluster]); cdev[cur_cluster] = NULL; } cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); return -ENODEV; } put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus); dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); return 0; }
int store_data(double time, int state, int cpu, struct cpuidle_datas *datas) { struct cpuidle_cstates *cstates = &datas->cstates[cpu]; struct cpufreq_pstate *pstate = datas->pstates[cpu].pstate; struct cpu_core *aff_core; struct cpu_physical *aff_cluster; /* ignore when we got a "closing" state first */ if (state == -1 && cstates->cstate_max == -1) return 0; if (record_cstate_event(cstates, time, state) == -1) return -1; /* Update P-state stats if supported */ if (pstate) { if (state == -1) cpu_pstate_running(datas, cpu, time); else cpu_pstate_idle(datas, cpu, time); } /* Update core and cluster */ aff_core = cpu_to_core(cpu, datas->topo); state = core_get_least_cstate(aff_core); if (record_cstate_event(aff_core->cstates, time, state) == -1) return -1; aff_cluster = cpu_to_cluster(cpu, datas->topo); state = cluster_get_least_cstate(aff_cluster); if (record_cstate_event(aff_cluster->cstates, time,state) == -1) return -1; return 0; }
int check_pstate_composite(struct cpuidle_datas *datas, int cpu, double time) { struct cpu_core *aff_core; struct cpu_physical *aff_cluster; unsigned int freq; aff_core = cpu_to_core(cpu, datas->topo); aff_cluster = cpu_to_cluster(cpu, datas->topo); freq = core_get_highest_freq(aff_core); if (aff_core->is_ht) { verbose_fprintf(stderr, 5, "Core %c%d: freq %9u, time %f\n", aff_cluster->physical_id + 'A', aff_core->core_id, freq, time); } if (record_group_freq(aff_core->pstates, time, freq) == -1) return -1; freq = cluster_get_highest_freq(aff_cluster); verbose_fprintf(stderr, 5, "Cluster %c: freq %9u, time %f\n", aff_cluster->physical_id + 'A', freq, time); return record_group_freq(aff_cluster->pstates, time, freq); }
static int vexpress_init_opp_table(struct device *cpu_dev) { int i = -1, count, cluster = cpu_to_cluster(cpu_dev->id); u32 *table; int ret; count = vexpress_spc_get_freq_table(cluster, &table); if (!table || !count) { pr_err("SPC controller returned invalid freq table"); return -EINVAL; } while (++i < count) { /* FIXME: Voltage value */ ret = opp_add(cpu_dev, table[i] * 1000, 900000); if (ret) { dev_warn(cpu_dev, "%s: Failed to add OPP %d, err: %d\n", __func__, table[i] * 1000, ret); return ret; } } return 0; }