static void __init cpufreq_table_init(void) { int cpu; for_each_possible_cpu(cpu) { int i, freq_cnt = 0; /* Construct the freq_table tables from acpu_freq_tbl. */ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0 && freq_cnt < ARRAY_SIZE(*freq_table); i++) { if (acpu_freq_tbl[i].use_for_scaling[cpu]) { freq_table[cpu][freq_cnt].index = freq_cnt; freq_table[cpu][freq_cnt].frequency = acpu_freq_tbl[i].acpuclk_khz; freq_cnt++; } } /* freq_table not big enough to store all usable freqs. */ BUG_ON(acpu_freq_tbl[i].acpuclk_khz != 0); freq_table[cpu][freq_cnt].index = freq_cnt; freq_table[cpu][freq_cnt].frequency = CPUFREQ_TABLE_END; pr_info("CPU%d: %d scaling frequencies supported.\n", cpu, freq_cnt); /* Register table with CPUFreq. */ cpufreq_frequency_table_get_attr(freq_table[cpu], cpu); } }
void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) { unsigned int max_freq; mutex_init(&drv_state.lock); drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us; drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz; drv_state.max_vdd = clkdata->max_vdd; drv_state.acpu_set_vdd = clkdata->acpu_set_vdd; max_freq = msm_acpu_clock_fixup(); /* Configure hardware. */ move_off_scpll(); scpll_init(); lpj_init(); /* Improve boot time */ acpuclk_set_rate(smp_processor_id(), max_freq, SETRATE_CPUFREQ); #ifdef CONFIG_CPU_FREQ_MSM cpufreq_table_init(); cpufreq_frequency_table_get_attr(freq_table, smp_processor_id()); #endif }
static void __init cpufreq_table_init(void) { int i, freq_cnt = 0; /* Construct the freq_table tables from acpuclk_init_data->freq_tbl. */ for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0 && freq_cnt < ARRAY_SIZE(freq_table); i++) { if (!acpuclk_init_data->freq_tbl[i].use_for_scaling) continue; freq_table[freq_cnt].index = freq_cnt; freq_table[freq_cnt].frequency = acpuclk_init_data->freq_tbl[i].khz; freq_cnt++; } /* freq_table not big enough to store all usable freqs. */ BUG_ON(acpuclk_init_data->freq_tbl[i].khz != 0); freq_table[freq_cnt].index = freq_cnt; freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END; pr_info("CPU: %d scaling frequencies supported.\n", freq_cnt); /* Register table with CPUFreq. */ for_each_possible_cpu(i) cpufreq_frequency_table_get_attr(freq_table, i); }
static void fix_freq_table(void) { struct bcm_cpu_info *cpu_info; struct cpufreq_policy *policy; int i; /* table */ if (!cpufreq_frequency_get_table(0)) { cpu_info = (struct bcm_cpu_info*)kallsyms_lookup_name( "bcm215xx_cpu_info"); if (cpu_info) { cpufreq_fix_table = kmalloc(sizeof(struct cpufreq_frequency_table)* (cpu_info[0].num_freqs+1),GFP_KERNEL); for (i=0;i<cpu_info[0].num_freqs;i++) { cpufreq_fix_table[i].index = i; cpufreq_fix_table[i].frequency = cpu_info[0].freq_tbl[i].cpu_freq*1000; } i = cpu_info[0].num_freqs; cpufreq_fix_table[i].index = i; cpufreq_fix_table[i].frequency = CPUFREQ_TABLE_END; cpufreq_frequency_table_get_attr(cpufreq_fix_table,0); } } /* latency */ policy = cpufreq_cpu_get(0); if (policy) { if (policy->cpuinfo.transition_latency > 10000000) policy->cpuinfo.transition_latency = 1000000; cpufreq_cpu_put(policy); } }
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { int ret = cpufreq_frequency_table_cpuinfo(policy, table); if (!ret) cpufreq_frequency_table_get_attr(table, policy->cpu); return ret; }
static int set_freq_table(struct cpufreq_policy *policy, int end_index) { int ret = 0; int i; int zero_no = 0; for (i = 0; i < end_index; i++) { if (profiles[i].cpu == 0) zero_no++; } end_index -= zero_no; cpu_freq_khz_min = profiles[0].cpu; cpu_freq_khz_max = profiles[0].cpu; for (i = 0; i < end_index; i++) { imx_freq_table[end_index - 1 - i].index = end_index - i; imx_freq_table[end_index - 1 - i].frequency = profiles[i].cpu; if ((profiles[i].cpu) < cpu_freq_khz_min) cpu_freq_khz_min = profiles[i].cpu; if ((profiles[i].cpu) > cpu_freq_khz_max) cpu_freq_khz_max = profiles[i].cpu; } imx_freq_table[i].index = 0; imx_freq_table[i].frequency = CPUFREQ_TABLE_END; policy->cur = clk_get_rate(cpu_clk) / 1000; policy->governor = CPUFREQ_DEFAULT_GOVERNOR; policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min; policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max; /* Manual states, that PLL stabilizes in two CLK32 periods */ policy->cpuinfo.transition_latency = 1000; ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table); if (ret < 0) { printk(KERN_ERR "%s: failed to register i.MXC CPUfreq\n", __func__); return ret; } cpufreq_frequency_table_get_attr(imx_freq_table, policy->cpu); return ret; }
static int __init overclock_init(void) { struct cpufreq_policy *policy; ulong cpufreq_cpu_get_addr; uint cpu; printk(KERN_INFO "krait_oc: %s version %s\n", DRIVER_DESCRIPTION, DRIVER_VERSION); printk(KERN_INFO "krait_oc: by %s\n", DRIVER_AUTHOR); printk(KERN_INFO "krait_oc: overclocking to %u at %u uV\n", pll_l_val*HFPLL_FREQ_KHZ, vdd_uv); printk(KERN_INFO "krait_oc: updating cpufreq policy\n"); cpufreq_cpu_get_addr = kallsyms_lookup_name("cpufreq_cpu_get"); if(cpufreq_cpu_get_addr == 0) { printk(KERN_WARNING "krait_oc: symbol not found\n"); printk(KERN_WARNING "krait_oc: not attempting overclock\n"); return 0; } cpufreq_cpu_get_new = (cpufreq_cpu_get_type) cpufreq_cpu_get_addr; policy = cpufreq_cpu_get_new(0); policy->cpuinfo.max_freq = pll_l_val*HFPLL_FREQ_KHZ; printk(KERN_INFO "krait_oc: updating cpufreq tables\n"); freq_table[FREQ_TABLE_LAST].frequency = pll_l_val*HFPLL_FREQ_KHZ; /* Save a pointer to the freq original table to restore if unloaded */ orig_table = cpufreq_frequency_get_table(0); for_each_possible_cpu(cpu) { cpufreq_frequency_table_put_attr(cpu); cpufreq_frequency_table_get_attr(freq_table, cpu); } /* Index 20 is not used for scaling in the acpu_freq_tbl, so fill it * with our new freq. Change all three tables to account for all * possible bins. */ printk(KERN_INFO "krait_oc: updating nominal acpu_freq_tbl\n"); acpu_freq_row_update(ACPU_FREQ_TBL_NOM_NAME); printk(KERN_INFO "krait_oc: updating slow acpu_freq_tbl\n"); acpu_freq_row_update(ACPU_FREQ_TBL_SLOW_NAME); printk(KERN_INFO "krait_oc: updating fast acpu_freq_tbl\n"); acpu_freq_row_update(ACPU_FREQ_TBL_FAST_NAME); return 0; }
static void __exit overclock_exit(void) { struct cpufreq_policy *policy; uint cpu; if(kallsyms_lookup_name("cpufreq_cpu_get") != 0) { printk(KERN_INFO "krait_oc: reverting cpufreq policy\n"); policy = cpufreq_cpu_get_new(0); policy->cpuinfo.max_freq = 1512000; printk(KERN_INFO "krait_oc: reverting cpufreq tables\n"); for_each_possible_cpu(cpu) { cpufreq_frequency_table_put_attr(cpu); cpufreq_frequency_table_get_attr(orig_table, cpu); } }
static s32 balong_cpufreq_cpu_init(struct cpufreq_policy *policy) { /*cpu_online 这里的作用是 ? */ if (!cpu_online(policy->cpu)) return -ENODEV; pr_info("cpufreq: balong_cpufreq_cpu_init.\n"); cpufreq_table_init(); policy->governor = &cpufreq_balong_ondemand; policy->max = policy->cpuinfo.max_freq = BALONG_CPUFREQUENCY_666; policy->min = policy->cpuinfo.min_freq = BALONG_CPUFREQUENCY_100; policy->cur = BALONG_CPUFREQUENCY_666; g_cur_freq = policy->cur; cpufreq_frequency_table_get_attr(&balong_clockrate_table[0], policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, &balong_clockrate_table[0]); }