static void __init at91sam9g45_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); clkdev_add_physbase(&twi0_clk, AT91SAM9G45_BASE_TWI0, NULL); clkdev_add_physbase(&twi1_clk, AT91SAM9G45_BASE_TWI1, NULL); clkdev_add_physbase(&pioA_clk, AT91SAM9G45_BASE_PIOA, NULL); clkdev_add_physbase(&pioB_clk, AT91SAM9G45_BASE_PIOB, NULL); clkdev_add_physbase(&pioC_clk, AT91SAM9G45_BASE_PIOC, NULL); clkdev_add_physbase(&pioDE_clk, AT91SAM9G45_BASE_PIOD, NULL); clkdev_add_physbase(&pioDE_clk, AT91SAM9G45_BASE_PIOE, NULL); if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11()) clk_register(&vdec_clk); clk_register(&pck0); clk_register(&pck1); }
static int __init pxa300_init(void) { if (cpu_is_pxa300() || cpu_is_pxa310()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa300_mfp_addr_map); clkdev_add_table(ARRAY_AND_SIZE(common_clkregs)); } if (cpu_is_pxa310()) { mfp_init_addr(pxa310_mfp_addr_map); clkdev_add_table(ARRAY_AND_SIZE(pxa310_clkregs)); } return 0; }
static void __init at91sam9rl_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); clk_register(&pck0); clk_register(&pck1); }
int __init arch_clk_init(void) { int k, ret = 0; if (test_mode_pin(MODE_PIN0)) { if (test_mode_pin(MODE_PIN1)) pll1_div = 3; else pll1_div = 4; } else pll1_div = 1; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); return ret; }
void __init v2m_dt_init_early(void) { struct device_node *node; u32 dt_hbi; node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg"); v2m_sysreg_base = of_iomap(node, 0); if (WARN_ON(!v2m_sysreg_base)) return; /* Confirm board type against DT property, if available */ if (of_property_read_u32(allnodes, "arm,hbi", &dt_hbi) == 0) { u32 misc = readl(v2m_sysreg_base + V2M_SYS_MISC); u32 id = readl(v2m_sysreg_base + (misc & SYS_MISC_MASTERSITE ? V2M_SYS_PROCID1 : V2M_SYS_PROCID0)); u32 hbi = id & SYS_PROCIDx_HBI_MASK; if (WARN_ON(dt_hbi != hbi)) pr_warning("vexpress: DT HBI (%x) is not matching " "hardware (%x)!\n", dt_hbi, hbi); } clkdev_add_table(v2m_dt_lookups, ARRAY_SIZE(v2m_dt_lookups)); versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000); }
/** * s3c64xx_register_clocks - register clocks for s3c6400 and s3c6410 * @xtal: The rate for the clock crystal feeding the PLLs. * @armclk_divlimit: Divisor mask for ARMCLK. * * Register the clocks for the S3C6400 and S3C6410 SoC range, such * as ARMCLK as well as the necessary parent clocks. * * This call does not setup the clocks, which is left to the * s3c64xx_setup_clocks() call which may be needed by the cpufreq * or resume code to re-set the clocks if the bootloader has changed * them. */ void __init s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_divlimit) { unsigned int cnt; armclk_mask = armclk_divlimit; s3c24xx_register_baseclocks(xtal); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++) s3c_disable_clocks(clk_cdev[cnt], 1); s3c24xx_register_clocks(clks1, ARRAY_SIZE(clks1)); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); for (cnt = 0; cnt < ARRAY_SIZE(clksrc_cdev); cnt++) s3c_register_clksrc(clksrc_cdev[cnt], 1); clkdev_add_table(s3c64xx_clk_lookup, ARRAY_SIZE(s3c64xx_clk_lookup)); s3c_pwmclk_init(); }
void __init s5p6440_register_clocks(void) { int ptr; unsigned int cnt; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (cnt = 0; cnt < ARRAY_SIZE(clk_cdev); cnt++) s3c_disable_clocks(clk_cdev[cnt], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++) s3c_register_clksrc(clksrc_cdev[ptr], 1); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); clkdev_add_table(s5p6440_clk_lookup, ARRAY_SIZE(s5p6440_clk_lookup)); s3c24xx_register_clock(&dummy_apb_pclk); }
static int __init pxa95x_init(void) { int ret = 0, i; if (cpu_is_pxa95x()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa95x_mfp_addr_map); reset_status = ARSR; /* * clear RDH bit every time after reset * * Note: the last 3 bits DxS are write-1-to-clear so carefully * preserve them here in case they will be referenced later */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); clkdev_add_table(pxa95x_clkregs, ARRAY_SIZE(pxa95x_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa_gpio_syscore_ops); register_syscore_ops(&pxa3xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } return ret; }
void __init s5pv210_register_clocks(void) { int ptr; s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(sysclks); ptr++) s3c_register_clksrc(sysclks[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(sclk_tv); ptr++) s3c_register_clksrc(sclk_tv[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(clksrc_cdev); ptr++) s3c_register_clksrc(clksrc_cdev[ptr], 1); s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs)); s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks)); s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); clkdev_add_table(s5pv210_clk_lookup, ARRAY_SIZE(s5pv210_clk_lookup)); s3c24xx_register_clocks(clk_cdev, ARRAY_SIZE(clk_cdev)); for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) s3c_disable_clocks(clk_cdev[ptr], 1); s3c24xx_register_clock(&dummy_apb_pclk); s3c_pwmclk_init(); }
void __init msm_clock_init(struct clock_init_data *data) { unsigned n; struct clk_lookup *clock_tbl; size_t num_clocks; clk_init_data = data; if (clk_init_data->pre_init) clk_init_data->pre_init(); clock_tbl = data->table; num_clocks = data->size; for (n = 0; n < num_clocks; n++) { struct clk *clk = clock_tbl[n].clk; struct clk *parent = clk_get_parent(clk); if (parent && list_empty(&clk->siblings)) list_add(&clk->siblings, &parent->children); if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) { if (clk->ops->handoff(clk)) { clk->flags |= CLKFLAG_HANDOFF_RATE; clk_prepare_enable(clk); } } } clkdev_add_table(clock_tbl, num_clocks); if (clk_init_data->post_init) clk_init_data->post_init(); }
int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or fll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &fll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); return ret; }
void __init exynos5_register_clocks(void) { int ptr; s3c24xx_register_clocks(exynos5_clks, ARRAY_SIZE(exynos5_clks)); for (ptr = 0; ptr < ARRAY_SIZE(exynos5_sysclks); ptr++) s3c_register_clksrc(exynos5_sysclks[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(exynos5_sclk_tv); ptr++) s3c_register_clksrc(exynos5_sclk_tv[ptr], 1); for (ptr = 0; ptr < ARRAY_SIZE(exynos5_clksrc_cdev); ptr++) s3c_register_clksrc(exynos5_clksrc_cdev[ptr], 1); s3c_register_clksrc(exynos5_clksrcs, ARRAY_SIZE(exynos5_clksrcs)); s3c_register_clocks(exynos5_init_clocks_on, ARRAY_SIZE(exynos5_init_clocks_on)); s3c24xx_register_clocks(exynos5_clk_cdev, ARRAY_SIZE(exynos5_clk_cdev)); for (ptr = 0; ptr < ARRAY_SIZE(exynos5_clk_cdev); ptr++) s3c_disable_clocks(exynos5_clk_cdev[ptr], 1); s3c_register_clocks(exynos5_init_clocks_off, ARRAY_SIZE(exynos5_init_clocks_off)); s3c_disable_clocks(exynos5_init_clocks_off, ARRAY_SIZE(exynos5_init_clocks_off)); clkdev_add_table(exynos5_clk_lookup, ARRAY_SIZE(exynos5_clk_lookup)); register_syscore_ops(&exynos5_clock_syscore_ops); s3c_pwmclk_init(); }
void __init msm_clock_init(struct clock_init_data *data) { unsigned n; struct clk_lookup *clock_tbl; size_t num_clocks; clk_init_data = data; if (clk_init_data->init) clk_init_data->init(); clock_tbl = data->table; num_clocks = data->size; for (n = 0; n < num_clocks; n++) { struct clk *clk = clock_tbl[n].clk; struct clk *parent = clk_get_parent(clk); clk_set_parent(clk, parent); if (clk->ops->handoff && !(clk->flags & CLKFLAG_HANDOFF_RATE)) { if (clk->ops->handoff(clk)) { clk->flags |= CLKFLAG_HANDOFF_RATE; clk_enable(clk); } } } clkdev_add_table(clock_tbl, num_clocks); }
void __init s3c2443_init_clocks(int xtal) { unsigned long epllcon = __raw_readl(S3C2443_EPLLCON); int ptr; clk_epll.rate = s3c2443_get_epll(epllcon, xtal); clk_epll.parent = &clk_epllref.clk; s3c2443_common_init_clocks(xtal, s3c2443_get_mpll, armdiv, ARRAY_SIZE(armdiv), S3C2443_CLKDIV0_ARMDIV_MASK); s3c24xx_register_clocks(clks, ARRAY_SIZE(clks)); for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++) s3c_register_clksrc(clksrcs[ptr], 1); /* We must be careful disabling the clocks we are not intending to * be using at boot time, as subsystems such as the LCD which do * their own DMA requests to the bus can cause the system to lockup * if they where in the middle of requesting bus access. * * Disabling the LCD clock if the LCD is active is very dangerous, * and therefore the bootloader should be careful to not enable * the LCD clock if it is not needed. */ /* install (and disable) the clocks we do not need immediately */ s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup)); s3c_pwmclk_init(); }
int __init arch_clk_init(void) { int k, ret = 0; if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &dll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
static int __init pxa95x_init(void) { int ret = 0, i; if (cpu_is_pxa95x()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa95x_mfp_addr_map); reset_status = ARSR; /* */ ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); clkdev_add_table(pxa95x_clkregs, ARRAY_SIZE(pxa95x_clkregs)); if ((ret = pxa_init_dma(IRQ_DMA, 32))) return ret; register_syscore_ops(&pxa_irq_syscore_ops); register_syscore_ops(&pxa3xx_clock_syscore_ops); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } return ret; }
void __init r8a7779_clock_init(void) { u32 mode = r8a7779_read_mode_pins(); int k, ret = 0; if (mode & MD(1)) { plla_clk.rate = 1500000000; SH_CLK_SET_RATIO(&clkz_clk_ratio, 2, 3); SH_CLK_SET_RATIO(&clkzs_clk_ratio, 1, 6); SH_CLK_SET_RATIO(&clki_clk_ratio, 1, 2); SH_CLK_SET_RATIO(&clks_clk_ratio, 1, 6); SH_CLK_SET_RATIO(&clks1_clk_ratio, 1, 12); SH_CLK_SET_RATIO(&clks3_clk_ratio, 1, 8); SH_CLK_SET_RATIO(&clks4_clk_ratio, 1, 16); SH_CLK_SET_RATIO(&clkp_clk_ratio, 1, 24); SH_CLK_SET_RATIO(&clkg_clk_ratio, 1, 24); if (mode & MD(2)) { SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 36); SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 36); } else { SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 24); SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 24); } } else { plla_clk.rate = 1600000000; SH_CLK_SET_RATIO(&clkz_clk_ratio, 1, 2); SH_CLK_SET_RATIO(&clkzs_clk_ratio, 1, 8); SH_CLK_SET_RATIO(&clki_clk_ratio, 1, 2); SH_CLK_SET_RATIO(&clks_clk_ratio, 1, 8); SH_CLK_SET_RATIO(&clks1_clk_ratio, 1, 16); SH_CLK_SET_RATIO(&clks3_clk_ratio, 1, 8); SH_CLK_SET_RATIO(&clks4_clk_ratio, 1, 16); SH_CLK_SET_RATIO(&clkp_clk_ratio, 1, 32); SH_CLK_SET_RATIO(&clkg_clk_ratio, 1, 24); if (mode & MD(2)) { SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 32); SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 32); } else { SH_CLK_SET_RATIO(&clkb_clk_ratio, 1, 24); SH_CLK_SET_RATIO(&clkout_clk_ratio, 1, 24); } } for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a7779 clocks\n"); }
/* Early initializations */ void __init versatile_init_early(void) { void __iomem *sys = __io_address(VERSATILE_SYS_BASE); osc4_clk.vcoreg = sys + VERSATILE_SYS_OSCCLCD_OFFSET; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); versatile_sched_clock_init(sys + VERSATILE_SYS_24MHz_OFFSET, 24000000); }
static void __init at91sam9g45_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); if (cpu_is_at91sam9m10() || cpu_is_at91sam9m11()) clk_register(&vdec_clk); clk_register(&pck0); clk_register(&pck1); }
void __init s3c2410_init_clocks(int xtal) { s3c24xx_register_baseclocks(xtal); s3c2410_setup_clocks(); s3c2410_baseclk_add(); s3c24xx_register_clock(&s3c2410_armclk); clkdev_add_table(s3c2410_clk_lookup, ARRAY_SIZE(s3c2410_clk_lookup)); samsung_wdt_reset_init(S3C24XX_VA_WATCHDOG); }
static void __init intcp_init_early(void) { clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups)); integrator_init_early(); #ifdef CONFIG_PLAT_VERSATILE_SCHED_CLOCK versatile_sched_clock_init(REFCOUNTER, 24000000); #endif }
int __init mx28_clocks_init(void) { clk_misc_init(); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0); return 0; }
static void __init at91sam9x5_register_clocks(void) { int i; for (i = 0; i < ARRAY_SIZE(periph_clocks); i++) clk_register(periph_clocks[i]); clkdev_add_table(periph_clocks_lookups, ARRAY_SIZE(periph_clocks_lookups)); clkdev_add_table(usart_clocks_lookups, ARRAY_SIZE(usart_clocks_lookups)); if (cpu_is_at91sam9g25() || cpu_is_at91sam9x25()) clk_register(&usart3_clk); if (cpu_is_at91sam9g25() || cpu_is_at91sam9x25() || cpu_is_at91sam9g35() || cpu_is_at91sam9x35()) clk_register(&macb0_clk); if (cpu_is_at91sam9g15() || cpu_is_at91sam9g35() || cpu_is_at91sam9x35()) clk_register(&lcdc_clk); if (cpu_is_at91sam9g25()) clk_register(&isi_clk); if (cpu_is_at91sam9x25()) clk_register(&macb1_clk); if (cpu_is_at91sam9x25() || cpu_is_at91sam9x35()) { clk_register(&can0_clk); clk_register(&can1_clk); } clk_register(&pck0); clk_register(&pck1); }
static int __init clk_init(void) { if (machine_is_realview_pb1176()) oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC0_OFFSET; else oscvco_clk.vcoreg = __io_address(REALVIEW_SYS_BASE) + REALVIEW_SYS_OSC4_OFFSET; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); return 0; }
static void hi3716xv100_cpu_init(struct cpu_info *info) { get_hi3716xv100_clock(&info->clk_cpu, &info->clk_timer); sp804_clk.rate = info->clk_timer; info->cpuversion = ""; clkdev_add_table(hi3716xv100_lookups, ARRAY_SIZE(hi3716xv100_lookups));
static int __init pxa168_init(void) { if (cpu_is_pxa168()) { mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(pxa168_mfp_addr_map); pxa_init_dma(IRQ_PXA168_DMA_INT0, 32); clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs)); } return 0; }
static void __init intcp_init(void) { int i; clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups)); platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs)); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } }
void __init r8a7740_clock_init(u8 md_ck) { int k, ret = 0; /* detect system clock parent */ if (md_ck & MD_CK1) system_clk.parent = &extal1_div2_clk; else system_clk.parent = &extal1_clk; /* detect RCLK parent */ switch (md_ck & (MD_CK2 | MD_CK1)) { case MD_CK2 | MD_CK1: r_clk.parent = &extal1_div2048_clk; break; case MD_CK2: r_clk.parent = &extal1_div1024_clk; break; case MD_CK1: default: r_clk.parent = &extalr_clk; break; } for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); if (!ret) ret = sh_clk_fsidiv_register(fsidivs, FSIDIV_REPARENT_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) shmobile_clk_init(); else panic("failed to setup r8a7740 clocks\n"); }
int __init mx31_clocks_init(unsigned long fref) { u32 reg; ckih_rate = fref; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); /* change the csi_clk parent if necessary */ reg = __raw_readl(MXC_CCM_CCMR); if (!(reg & MXC_CCM_CCMR_CSCS)) if (clk_set_parent(&csi_clk, &usb_pll_clk)) pr_err("%s: error changing csi_clk parent\n", __func__); /* Turn off all possible clocks */ __raw_writel((3 << 4), MXC_CCM_CGR0); __raw_writel(0, MXC_CCM_CGR1); __raw_writel((3 << 8) | (3 << 14) | (3 << 16)| 1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for MX32, but still required to be set */ MXC_CCM_CGR2); /* * Before turning off usb_pll make sure ipg_per_clk is generated * by ipg_clk and not usb_pll. */ __raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR); usb_pll_disable(&usb_pll_clk); pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk)); clk_enable(&gpt_clk); clk_enable(&emi_clk); clk_enable(&iim_clk); clk_enable(&serial_pll_clk); mx31_read_cpu_rev(); if (mx31_revision() >= MX31_CHIP_REV_2_0) { reg = __raw_readl(MXC_CCM_PMCR1); /* No PLL restart on DVFS switch; enable auto EMI handshake */ reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN; __raw_writel(reg, MXC_CCM_PMCR1); } mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT); return 0; }
int __init clk_init(void) { int i; struct clk *clkp; for (i = 0; i < ARRAY_SIZE(bf609_clks); i++) { clkp = bf609_clks[i].clk; if (clkp->flags & NEEDS_INITIALIZATION) clk_get_rate(clkp); } clkdev_add_table(bf609_clks, ARRAY_SIZE(bf609_clks)); return 0; }