static int get_cluster_clk_and_freq_table(struct device *cpu_dev, const struct cpumask *cpumask) { u32 cluster = cpu_to_cluster(cpu_dev->id); int i, ret; if (atomic_inc_return(&cluster_usage[cluster]) != 1) return 0; if (cluster < MAX_CLUSTERS) { ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask); if (ret) atomic_dec(&cluster_usage[cluster]); return ret; } /* * Get data for all clusters and fill virtual cluster with a merge of * both */ for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } ret = _get_cluster_clk_and_freq_table(cdev, cpumask); if (ret) goto put_clusters; } ret = merge_cluster_tables(); if (ret) goto put_clusters; /* Assuming 2 cluster, set clk_big_min and clk_little_max */ clk_big_min = get_table_min(freq_table[0]); clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1])); pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n", __func__, cluster, clk_big_min, clk_little_max); return 0; put_clusters: for_each_present_cpu(i) { struct device *cdev = get_cpu_device(i); if (!cdev) { pr_err("%s: failed to get cpu%d device\n", __func__, i); return -ENODEV; } _put_cluster_clk_and_freq_table(cdev, cpumask); } atomic_dec(&cluster_usage[cluster]); return ret; }
static int merge_cluster_tables(void) { int i, j, k = 0, count = 1; struct cpufreq_frequency_table *table; for (i = 0; i < MAX_CLUSTERS; i++) count += get_table_count(freq_table[i]); table = kzalloc(sizeof(*table) * count, GFP_KERNEL); if (!table) return -ENOMEM; freq_table[MAX_CLUSTERS] = table; /* Add in reverse order to get freqs in increasing order */ for (i = MAX_CLUSTERS - 1; i >= 0; i--) { for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; j++) { table[k].frequency = VIRT_FREQ(i, freq_table[i][j].frequency); pr_debug("%s: index: %d, freq: %d\n", __func__, k, table[k].frequency); k++; } } table[k].driver_data = k; table[k].frequency = CPUFREQ_TABLE_END; pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k); return 0; }
static unsigned int sunxi_clk_get_cpu_rate(unsigned int cpu) { u32 cur_cluster = per_cpu(physical_cluster, cpu), rate; #ifdef CONFIG_DEBUG_FS ktime_t calltime = ktime_set(0, 0), delta, rettime; #endif mutex_lock(&cluster_lock[cur_cluster]); #ifdef CONFIG_DEBUG_FS calltime = ktime_get(); #endif if (cur_cluster == A7_CLUSTER) clk_get_rate(clk_pll1); else if (cur_cluster == A15_CLUSTER) clk_get_rate(clk_pll2); rate = clk_get_rate(cluster_clk[cur_cluster]) / 1000; /* For switcher we use virtual A15 clock rates */ if (is_bL_switching_enabled()) { rate = VIRT_FREQ(cur_cluster, rate); } #ifdef CONFIG_DEBUG_FS rettime = ktime_get(); delta = ktime_sub(rettime, calltime); if (cur_cluster == A7_CLUSTER) c0_get_time_usecs = ktime_to_ns(delta) >> 10; else if (cur_cluster == A15_CLUSTER)
static unsigned int clk_get_cpu_rate(unsigned int cpu) { u32 cur_cluster = per_cpu(physical_cluster, cpu); u32 rate = clk_get_rate(clk[cur_cluster]) / 1000; /* For switcher we use virtual A7 clock rates */ if (is_bL_switching_enabled()) rate = VIRT_FREQ(cur_cluster, rate); pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu, cur_cluster, rate); return rate; }