static void switch_sc_speed(int cpu, struct clkctl_acpu_speed *tgt_s) { struct clkctl_acpu_speed *strt_s = drv_state.current_speed[cpu]; if (strt_s->pll != ACPU_SCPLL && tgt_s->pll != ACPU_SCPLL) { select_clk_source_div(cpu, tgt_s); /* Select core source because target may be AFAB. */ select_core_source(cpu, tgt_s->core_src_sel); } else if (strt_s->pll != ACPU_SCPLL && tgt_s->pll == ACPU_SCPLL) { scpll_enable(cpu, tgt_s->sc_l_val); mb(); select_core_source(cpu, tgt_s->core_src_sel); } else if (strt_s->pll == ACPU_SCPLL && tgt_s->pll != ACPU_SCPLL) { select_clk_source_div(cpu, tgt_s); select_core_source(cpu, tgt_s->core_src_sel); mb(); scpll_disable(cpu); } else scpll_change_freq(cpu, tgt_s->sc_l_val); /* Update the driver state with the new clock freq */ drv_state.current_speed[cpu] = tgt_s; /* Adjust lpj for the new clock speed. */ /* XXX Temporary hack until udelay() is fixed to not care about lpj: * Update the global loops_per_jiffy variable used by udelay() to be * the max of the two CPUs' values. */ #ifdef CONFIG_SMP per_cpu(cpu_data, cpu).loops_per_jiffy = tgt_s->lpj; #endif loops_per_jiffy = tgt_s->lpj; for_each_online_cpu(cpu) if (drv_state.current_speed[cpu]->lpj > loops_per_jiffy) loops_per_jiffy = drv_state.current_speed[cpu]->lpj; }
/* Force ACPU core and L2 cache clocks to their AFAB sources. */ static void __init force_all_to_afab(void) { int cpu; for_each_possible_cpu(cpu) { select_clk_source_div(cpu, &acpu_freq_tbl[AFAB_IDX]); select_core_source(cpu, 0); drv_state.current_speed[cpu] = &acpu_freq_tbl[AFAB_IDX]; l2_vote[cpu] = &acpu_freq_tbl[AFAB_IDX]; } select_core_source(L2, 0); drv_state.current_l2_speed = &acpu_freq_tbl[AFAB_IDX]; /* Both cores are assumed to have the same lpj values when on AFAB. */ calibrate_delay(); }
static void l2_set_speed(struct clkctl_acpu_speed *tgt_s) { if (drv_state.current_l2_speed->l2_src_sel == 1 && tgt_s->l2_src_sel == 1) scpll_change_freq(L2, tgt_s->l2_l_val); else { if (tgt_s->l2_src_sel == 1) { scpll_enable(L2, tgt_s->l2_l_val); mb(); select_core_source(L2, tgt_s->l2_src_sel); } else { select_core_source(L2, tgt_s->l2_src_sel); mb(); scpll_disable(L2); } } drv_state.current_l2_speed = tgt_s; }
/* Make sure ACPU clock is not PLL3, so PLL3 can be re-programmed. */ static void __init move_off_scpll(void) { struct clkctl_acpu_speed *tgt_s = &acpu_freq_tbl[PLL3_CALIBRATION_IDX]; BUG_ON(tgt_s->pll == ACPU_PLL_3); select_clk_source(tgt_s); select_core_source(tgt_s->core_src_sel); drv_state.current_speed = tgt_s; calibrate_delay(); }
int acpuclk_set_rate(int cpu, unsigned long rate, enum setrate_reason reason) { struct clkctl_acpu_speed *tgt_s, *strt_s; int res, rc = 0; if (reason == SETRATE_CPUFREQ) mutex_lock(&drv_state.lock); strt_s = drv_state.current_speed; if (rate == strt_s->acpuclk_khz) goto out; for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++) if (tgt_s->acpuclk_khz == rate) break; if (tgt_s->acpuclk_khz == 0) { rc = -EINVAL; goto out; } if (reason == SETRATE_CPUFREQ) { /* Increase VDD if needed. */ if (tgt_s->vdd > strt_s->vdd) { rc = acpuclk_set_vdd_level(tgt_s->vdd); if (rc) { pr_err("Unable to increase ACPU vdd (%d)\n", rc); goto out; } } } dprintk("Switching from ACPU rate %u KHz -> %u KHz\n", strt_s->acpuclk_khz, tgt_s->acpuclk_khz); if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) { select_clk_source(tgt_s); /* Select core source because target may be AXI. */ select_core_source(tgt_s->core_src_sel); } else if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll == ACPU_PLL_3) { scpll_enable(1, tgt_s); mb(); select_core_source(tgt_s->core_src_sel); } else if (strt_s->pll == ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) { select_clk_source(tgt_s); select_core_source(tgt_s->core_src_sel); mb(); scpll_enable(0, NULL); } else { scpll_change_freq(tgt_s->l_value); } /* Update the driver state with the new clock freq */ drv_state.current_speed = tgt_s; /* Re-adjust lpj for the new clock speed. */ loops_per_jiffy = tgt_s->lpj; /* Nothing else to do for SWFI. */ if (reason == SETRATE_SWFI) goto out; if (strt_s->ebi1clk_khz != tgt_s->ebi1clk_khz) { res = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK, tgt_s->ebi1clk_khz * 1000); if (res < 0) pr_warning("Setting EBI1/AXI min rate failed (%d)\n", res); } /* Nothing else to do for power collapse */ if (reason == SETRATE_PC) goto out; /* Drop VDD level if we can. */ if (tgt_s->vdd < strt_s->vdd) { res = acpuclk_set_vdd_level(tgt_s->vdd); if (res) pr_warning("Unable to drop ACPU vdd (%d)\n", res); } dprintk("ACPU speed change complete\n"); out: if (reason == SETRATE_CPUFREQ) mutex_unlock(&drv_state.lock); return rc; }