int __init am33xx_clk_init(void) { struct omap_clk *c; u32 cpu_clkflg; if (soc_is_am33xx()) { cpu_mask = RATE_IN_AM33XX; cpu_clkflg = CK_AM33XX; } clk_init(&omap2_clk_functions); for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++) clk_preinit(c->lk.clk); for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++) { if (c->cpu & cpu_clkflg) { clkdev_add(&c->lk); clk_register(c->lk.clk); omap2_init_clk_clkdm(c->lk.clk); } } recalculate_root_clocks(); /* * Only enable those clocks we will need, let the drivers * enable other clocks as necessary */ clk_enable_init_clocks(); return 0; }
static int __init omap_hsi_init(void) { int err; struct clk *hsi_clk = &hsi_clock.clk; hsi_clk_init(&hsi_clock); clk_preinit(hsi_clk); #ifdef OMAP_HSI_EXAMPLE_PWR_CODE clkdev_add(&hsi_lk); #endif clk_register(hsi_clk); #ifdef OMAP_HSI_EXAMPLE_PWR_CODE omap2_init_clk_clkdm(hsi_clk); #endif err = platform_device_register(&hsi_pdev); if (err < 0) { pr_err("Unable to register HSI platform device: %d\n", err); return err; } omap_hsi_mux_setup(); pr_info("HSI: device registered\n"); return 0; }
static int __init omap_ssi_init(void) { int err; struct clk *hsi_clk = &ssi_clock.clk; ssi_clk_init(&ssi_clock); clk_preinit(hsi_clk); clkdev_add(&hsi_lk); clk_register(hsi_clk); omap2_init_clk_clkdm(hsi_clk); err = platform_device_register(&ssi_pdev); if (err < 0) { pr_err("Unable to register SSI platform device: %d\n", err); return err; } omap_ssi_mux_setup(); pr_info("SSI: device registered\n"); return 0; }
int __init omap1_clk_init(void) { struct omap_clk *c; int crystal_type = 0; /* Default 12 MHz */ u32 reg; #ifdef CONFIG_DEBUG_LL /* * Resets some clocks that may be left on from bootloader, * but leaves serial clocks on. */ omap_writel(0x3 << 29, MOD_CONF_CTRL_0); #endif /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */ reg = omap_readw(SOFT_REQ_REG) & (1 << 4); omap_writew(reg, SOFT_REQ_REG); if (!cpu_is_omap15xx()) omap_writew(0, SOFT_REQ_REG2); /* By default all idlect1 clocks are allowed to idle */ arm_idlect1_mask = ~0; for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) clk_preinit(c->lk.clk); cpu_mask = 0; if (cpu_is_omap1710()) cpu_mask |= CK_1710; if (cpu_is_omap16xx()) cpu_mask |= CK_16XX; if (cpu_is_omap1510()) cpu_mask |= CK_1510; if (cpu_is_omap7xx()) cpu_mask |= CK_7XX; if (cpu_is_omap310()) cpu_mask |= CK_310; for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) if (c->cpu & cpu_mask) { clkdev_add(&c->lk); clk_register(c->lk.clk); } /* Pointers to these clocks are needed by code in clock.c */ api_ck_p = clk_get(NULL, "api_ck"); ck_dpll1_p = clk_get(NULL, "ck_dpll1"); ck_ref_p = clk_get(NULL, "ck_ref"); if (cpu_is_omap7xx()) ck_ref.rate = 13000000; if (cpu_is_omap16xx() && crystal_type == 2) ck_ref.rate = 19200000; pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n", omap_readw(ARM_SYSST), omap_readw(DPLL_CTL), omap_readw(ARM_CKCTL)); /* We want to be in syncronous scalable mode */ omap_writew(0x1000, ARM_SYSST); /* * Initially use the values set by bootloader. Determine PLL rate and * recalculate dependent clocks as if kernel had changed PLL or * divisors. See also omap1_clk_late_init() that can reprogram dpll1 * after the SRAM is initialized. */ { unsigned pll_ctl_val = omap_readw(DPLL_CTL); ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */ if (pll_ctl_val & 0x10) { /* PLL enabled, apply multiplier and divisor */ if (pll_ctl_val & 0xf80) ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7; ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1; } else { /* PLL disabled, apply bypass divisor */ switch (pll_ctl_val & 0xc) { case 0: break; case 0x4: ck_dpll1.rate /= 2; break; default: ck_dpll1.rate /= 4; break; } } }
int __init omap2_clk_init(void) { struct prcm_config *prcm; struct omap_clk *c; u32 clkrate; if (cpu_is_omap242x()) { prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL; cpu_mask = RATE_IN_242X; } else if (cpu_is_omap2430()) { prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL; cpu_mask = RATE_IN_243X; } clk_init(&omap2_clk_functions); for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++) clk_preinit(c->lk.clk); osc_ck.rate = omap2_osc_clk_recalc(&osc_ck); propagate_rate(&osc_ck); sys_ck.rate = omap2_sys_clk_recalc(&sys_ck); propagate_rate(&sys_ck); for (c = omap24xx_clks; c < omap24xx_clks + ARRAY_SIZE(omap24xx_clks); c++) if (c->cpu & cpu_mask) { clkdev_add(&c->lk); clk_register(c->lk.clk); omap2_init_clk_clkdm(c->lk.clk); } /* Check the MPU rate set by bootloader */ clkrate = omap2xxx_clk_get_core_rate(&dpll_ck); for (prcm = rate_table; prcm->mpu_speed; prcm++) { if (!(prcm->flags & cpu_mask)) continue; if (prcm->xtal_speed != sys_ck.rate) continue; if (prcm->dpll_speed <= clkrate) break; } curr_prcm_set = prcm; recalculate_root_clocks(); printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): " "%ld.%01ld/%ld/%ld MHz\n", (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10, (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ; /* * Only enable those clocks we will need, let the drivers * enable other clocks as necessary */ clk_enable_init_clocks(); /* Avoid sleeping sleeping during omap2_clk_prepare_for_reboot() */ vclk = clk_get(NULL, "virt_prcm_set"); sclk = clk_get(NULL, "sys_ck"); return 0; }
int __init omap1_clk_init(void) { struct omap_clk *c; const struct omap_clock_config *info; int crystal_type = 0; /* Default 12 MHz */ u32 reg, cpu_mask; #ifdef CONFIG_DEBUG_LL /* * Resets some clocks that may be left on from bootloader, * but leaves serial clocks on. */ omap_writel(0x3 << 29, MOD_CONF_CTRL_0); #endif /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */ reg = omap_readw(SOFT_REQ_REG) & (1 << 4); omap_writew(reg, SOFT_REQ_REG); if (!cpu_is_omap15xx()) omap_writew(0, SOFT_REQ_REG2); clk_init(&omap1_clk_functions); /* By default all idlect1 clocks are allowed to idle */ arm_idlect1_mask = ~0; for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) clk_preinit(c->lk.clk); cpu_mask = 0; if (cpu_is_omap16xx()) cpu_mask |= CK_16XX; if (cpu_is_omap1510()) cpu_mask |= CK_1510; if (cpu_is_omap7xx()) cpu_mask |= CK_7XX; if (cpu_is_omap310()) cpu_mask |= CK_310; for (c = omap_clks; c < omap_clks + ARRAY_SIZE(omap_clks); c++) if (c->cpu & cpu_mask) { clkdev_add(&c->lk); clk_register(c->lk.clk); } /* Pointers to these clocks are needed by code in clock.c */ api_ck_p = clk_get(NULL, "api_ck"); ck_dpll1_p = clk_get(NULL, "ck_dpll1"); ck_ref_p = clk_get(NULL, "ck_ref"); info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config); if (info != NULL) { if (!cpu_is_omap15xx()) crystal_type = info->system_clock_type; } #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) ck_ref.rate = 13000000; #elif defined(CONFIG_ARCH_OMAP16XX) if (crystal_type == 2) ck_ref.rate = 19200000; #endif pr_info("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: " "0x%04x\n", omap_readw(ARM_SYSST), omap_readw(DPLL_CTL), omap_readw(ARM_CKCTL)); /* We want to be in syncronous scalable mode */ omap_writew(0x1000, ARM_SYSST); #ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER /* Use values set by bootloader. Determine PLL rate and recalculate * dependent clocks as if kernel had changed PLL or divisors. */ { unsigned pll_ctl_val = omap_readw(DPLL_CTL); ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */ if (pll_ctl_val & 0x10) { /* PLL enabled, apply multiplier and divisor */ if (pll_ctl_val & 0xf80) ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7; ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1; } else { /* PLL disabled, apply bypass divisor */ switch (pll_ctl_val & 0xc) { case 0: break; case 0x4: ck_dpll1.rate /= 2; break; default: ck_dpll1.rate /= 4; break; } } }