static void __init cpg_mstp_clocks_init(struct device_node *np) { struct mstp_clock_group *group; const char *idxname; struct clk **clks; unsigned int i; group = kzalloc(sizeof(*group), GFP_KERNEL); clks = kmalloc(MSTP_MAX_CLOCKS * sizeof(*clks), GFP_KERNEL); if (group == NULL || clks == NULL) { kfree(group); kfree(clks); pr_err("%s: failed to allocate group\n", __func__); return; } spin_lock_init(&group->lock); group->data.clks = clks; group->smstpcr = of_iomap(np, 0); group->mstpsr = of_iomap(np, 1); if (group->smstpcr == NULL) { pr_err("%s: failed to remap SMSTPCR\n", __func__); kfree(group); kfree(clks); return; } for (i = 0; i < MSTP_MAX_CLOCKS; ++i) clks[i] = ERR_PTR(-ENOENT); if (of_find_property(np, "clock-indices", &i)) idxname = "clock-indices"; else idxname = "renesas,clock-indices"; for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { const char *parent_name; const char *name; u32 clkidx; int ret; /* Skip clocks with no name. */ ret = of_property_read_string_index(np, "clock-output-names", i, &name); if (ret < 0 || strlen(name) == 0) continue; parent_name = of_clk_get_parent_name(np, i); ret = of_property_read_u32_index(np, idxname, i, &clkidx); if (parent_name == NULL || ret < 0) break; if (clkidx >= MSTP_MAX_CLOCKS) { pr_err("%s: invalid clock %s %s index %u)\n", __func__, np->name, name, clkidx); continue; } clks[clkidx] = cpg_mstp_clock_register(name, parent_name, clkidx, group); if (!IS_ERR(clks[clkidx])) { group->data.clk_num = max(group->data.clk_num, clkidx + 1); /* * Register a clkdev to let board code retrieve the * clock by name and register aliases for non-DT * devices. * * FIXME: Remove this when all devices that require a * clock will be instantiated from DT. */ clk_register_clkdev(clks[clkidx], name, NULL); } else { pr_err("%s: failed to register %s %s clock (%ld)\n", __func__, np->name, name, PTR_ERR(clks[clkidx])); } } of_clk_add_provider(np, of_clk_src_onecell_get, &group->data); }
/***************************************************************************** * FUNCTION * mjc_probe * DESCRIPTION * 1. Register MJC Device Number. * 2. Allocate and Initial MJC cdev struct. * 3. Add MJC device to kernel. (call cdev_add) * 4. register MJC interrupt * PARAMETERS * None. * RETURNS * None. ****************************************************************************/ static int mjc_probe(struct platform_device *pDev) { struct device_node *node = pDev->dev.of_node; struct resource res; int ret; unsigned long ulFlags; MJCDBG("mjc_probe()"); ret = register_chrdev_region(mjc_devno, 1, MJC_DEVNAME); if(ret) { MJCMSG("[ERROR] Can't Get Major number for MJC Device\n"); } g_mjc_cdev = cdev_alloc(); g_mjc_cdev->owner = THIS_MODULE; g_mjc_cdev->ops = &g_mjc_fops; ret = cdev_add(g_mjc_cdev, mjc_devno, 1); //create /dev/mjc automaticly pMjcClass = class_create(THIS_MODULE, MJC_DEVNAME); if (IS_ERR(pMjcClass)) { ret = PTR_ERR(pMjcClass); MJCMSG("Unable to create class, err = %d", ret); return ret; } mjcDevice = device_create(pMjcClass, NULL, mjc_devno, NULL, MJC_DEVNAME); gulRegister = (unsigned long)of_iomap(node, 0); gi4IrqID = irq_of_parse_and_map(node, 0); of_address_to_resource(node, 0, &res); gu1PaReg = res.start; gu1PaSize= resource_size(&res); spin_lock_irqsave(&ContextLock, ulFlags); grContext.rEvent.u4TimeoutMs = 0xFFFFFFFF; _mjc_CreateEvent(&(grContext.rEvent)); spin_unlock_irqrestore(&ContextLock, ulFlags); spin_lock_irqsave(&HWLock, ulFlags); grHWLockContext.rEvent.u4TimeoutMs = 0xFFFFFFFF; _mjc_CreateEvent(&(grHWLockContext.rEvent)); spin_unlock_irqrestore(&HWLock, ulFlags); // register interrupt // level and low // Gary todo: if (request_irq(gi4IrqID , (irq_handler_t)mjc_intr_dlr, IRQF_TRIGGER_LOW, MJC_DEVNAME, NULL) < 0) { MJCMSG("[ERROR] mjc_probe() error to request dec irq\n"); } else { MJCDBG("mjc_probe() success to request dec irq\n"); } disable_irq(gi4IrqID); return 0; }
static int __init b15_rac_init(void) { struct device_node *dn, *cpu_dn; int ret = 0, cpu; u32 reg, en_mask = 0; dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl"); if (!dn) return -ENODEV; if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) goto out; b15_rac_base = of_iomap(dn, 0); if (!b15_rac_base) { pr_err("failed to remap BIU control base\n"); ret = -ENOMEM; goto out; } cpu_dn = of_get_cpu_node(0, NULL); if (!cpu_dn) { ret = -ENODEV; goto out; } if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15")) rac_flush_offset = B15_RAC_FLUSH_REG; else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53")) rac_flush_offset = B53_RAC_FLUSH_REG; else { pr_err("Unsupported CPU\n"); of_node_put(cpu_dn); ret = -EINVAL; goto out; } of_node_put(cpu_dn); ret = register_reboot_notifier(&b15_rac_reboot_nb); if (ret) { pr_err("failed to register reboot notifier\n"); iounmap(b15_rac_base); goto out; } if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, "arm/cache-b15-rac:dead", NULL, b15_rac_dead_cpu); if (ret) goto out_unmap; ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING, "arm/cache-b15-rac:dying", NULL, b15_rac_dying_cpu); if (ret) goto out_cpu_dead; } if (IS_ENABLED(CONFIG_PM_SLEEP)) register_syscore_ops(&b15_rac_syscore_ops); spin_lock(&rac_lock); reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); for_each_possible_cpu(cpu) en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT)); WARN(reg & en_mask, "Read-ahead cache not previously disabled\n"); b15_rac_enable(); set_bit(RAC_ENABLED, &b15_rac_flags); spin_unlock(&rac_lock); pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n", b15_rac_base + RAC_CONFIG0_REG); goto out; out_cpu_dead: cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING); out_unmap: unregister_reboot_notifier(&b15_rac_reboot_nb); iounmap(b15_rac_base); out: of_node_put(dn); return ret; }
static void __init imx6q_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; int i, irq; int ret; clk[dummy] = imx_clk_fixed("dummy", 0); clk[ckil] = imx_obtain_fixed_clock("ckil", 0); clk[ckih] = imx_obtain_fixed_clock("ckih1", 0); clk[osc] = imx_obtain_fixed_clock("osc", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); base = of_iomap(np, 0); WARN_ON(!base); /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ if (cpu_is_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { post_div_table[1].div = 1; post_div_table[2].div = 1; video_div_table[1].div = 1; video_div_table[2].div = 1; }; /* type name parent_name base div_mask */ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f); clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1); clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3); clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f); clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f); clk[pll6_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3); clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x3); /* * Bit 20 is the reserved and read-only bit, we do this only for: * - Do nothing for usbphy clk_enable/disable * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework may need to enable/disable usbphy's parent */ clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ clk[usbphy1_gate] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); clk[usbphy2_gate] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); clk[sata_ref] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5); clk[pcie_ref] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4); clk[sata_ref_100m] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20); clk[pcie_ref_125m] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); clk[enet_ref] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); clk[lvds1_sel] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); clk[lvds2_sel] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); /* * lvds1_gate and lvds2_gate are pseudo-gates. Both can be * independently configured as clock inputs or outputs. We treat * the "output_enable" bit as a gate, even though it's really just * enabling clock output. */ clk[lvds1_gate] = imx_clk_gate("lvds1_gate", "dummy", base + 0x160, 10); clk[lvds2_gate] = imx_clk_gate("lvds2_gate", "dummy", base + 0x160, 11); /* name parent_name reg idx */ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2); clk[pll4_post_div] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); clk[pll4_audio_div] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); clk[pll5_post_div] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); clk[pll5_video_div] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); imx6q_pm_set_ccm_base(base); /* name reg shift width parent_names num_parents */ clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels)); clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels)); clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels)); clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels)); clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels)); clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels)); clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); clk[ssi1_sel] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[ssi2_sel] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[ssi3_sel] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clk[usdhc1_sel] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc2_sel] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc3_sel] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[usdhc4_sel] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); clk[emi_sel] = imx_clk_fixup_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels), imx_cscmr1_fixup); clk[emi_slow_sel] = imx_clk_fixup_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels), imx_cscmr1_fixup); clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); clk[cko2_sel] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); clk[cko] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); /* name reg shift width busy: reg, shift parent_names num_parents */ clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width */ clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); clk[ipg_per] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3); clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3); clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6); clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); clk[ldb_di0_podf] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0); clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); clk[ldb_di1_podf] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0); clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3); clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3); clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); clk[emi_podf] = imx_clk_fixup_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); clk[emi_slow_podf] = imx_clk_fixup_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); /* name parent_name reg shift width busy: reg, shift */ clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4); clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); /* name parent_name reg shift */ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4); clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6); clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20); clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8); clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10); clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16); clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20); clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22); if (cpu_is_imx6dl()) /* * The multiplexer and divider of imx6q clock gpu3d_shader get * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl. */ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24); else clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4); clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12); clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14); clk[vdoa] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26); clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0); clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2); clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4); clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6); clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8); clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12); clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); if (cpu_is_imx6dl()) /* * The multiplexer and divider of the imx6q clock gpu2d get * redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl. */ clk[mlb] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18); else clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30); clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0); clk[per1_bch] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12); clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16); clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18); clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20); clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22); clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28); clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4); clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); clk[spdif] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14); clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18); clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20); clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22); clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24); clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26); clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); clk[eim_slow] = imx_clk_gate2("eim_slow", "emi_slow_podf", base + 0x80, 10); clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); clk[cko2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); for (i = 0; i < ARRAY_SIZE(clk); i++) if (IS_ERR(clk[i])) pr_err("i.MX6q clk %d: register failed with %ld\n", i, PTR_ERR(clk[i])); clk_data.clks = clk; clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); clk_register_clkdev(clk[ahb], "ahb", NULL); clk_register_clkdev(clk[cko1], "cko1", NULL); clk_register_clkdev(clk[arm], NULL, "cpu0"); clk_register_clkdev(clk[pll4_post_div], "pll4_post_div", NULL); clk_register_clkdev(clk[pll4_audio], "pll4_audio", NULL); if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) || cpu_is_imx6dl()) { clk_set_parent(clk[ldb_di0_sel], clk[pll5_video_div]); clk_set_parent(clk[ldb_di1_sel], clk[pll5_video_div]); } /* * The gpmi needs 100MHz frequency in the EDO/Sync mode, * We can not get the 100MHz from the pll2_pfd0_352m. * So choose pll2_pfd2_396m as enfc_sel's parent. */ clk_set_parent(clk[enfc_sel], clk[pll2_pfd2_396m]); for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clk[clks_init_on[i]]); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { clk_prepare_enable(clk[usbphy1_gate]); clk_prepare_enable(clk[usbphy2_gate]); } /* * Let's initially set up CLKO with OSC24M, since this configuration * is widely used by imx6q board designs to clock audio codec. */ ret = clk_set_parent(clk[cko2_sel], clk[osc]); if (!ret) ret = clk_set_parent(clk[cko], clk[cko2]); if (ret) pr_warn("failed to set up CLKO: %d\n", ret); /* Audio-related clocks configuration */ clk_set_parent(clk[spdif_sel], clk[pll3_pfd3_454m]); /* All existing boards with PCIe use LVDS1 */ if (IS_ENABLED(CONFIG_PCI_IMX6)) clk_set_parent(clk[lvds1_sel], clk[sata_ref]); /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); irq = irq_of_parse_and_map(np, 0); mxc_timer_init(base, irq); }
static int __init keystone_timer_init(struct device_node *np) { struct clock_event_device *event_dev = &timer.event_dev; unsigned long rate; struct clk *clk; int irq, error; irq = irq_of_parse_and_map(np, 0); if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return -EINVAL; } timer.base = of_iomap(np, 0); if (!timer.base) { pr_err("%s: failed to map registers\n", __func__); return -ENXIO; } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { pr_err("%s: failed to get clock\n", __func__); iounmap(timer.base); return PTR_ERR(clk); } error = clk_prepare_enable(clk); if (error) { pr_err("%s: failed to enable clock\n", __func__); goto err; } rate = clk_get_rate(clk); /* disable, use internal clock source */ keystone_timer_writel(0, TCR); /* here we have to be sure the timer has been disabled */ keystone_timer_barrier(); /* reset timer as 64-bit, no pre-scaler, plus features are disabled */ keystone_timer_writel(0, TGCR); /* unreset timer */ keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR); /* init counter to zero */ keystone_timer_writel(0, TIM12); keystone_timer_writel(0, TIM34); timer.hz_period = DIV_ROUND_UP(rate, HZ); /* enable timer interrupts */ keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT); error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER, TIMER_NAME, event_dev); if (error) { pr_err("%s: failed to setup irq\n", __func__); goto err; } /* setup clockevent */ event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; event_dev->set_next_event = keystone_set_next_event; event_dev->set_state_shutdown = keystone_shutdown; event_dev->set_state_periodic = keystone_set_periodic; event_dev->set_state_oneshot = keystone_shutdown; event_dev->cpumask = cpu_possible_mask; event_dev->owner = THIS_MODULE; event_dev->name = TIMER_NAME; event_dev->irq = irq; clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX); pr_info("keystone timer clock @%lu Hz\n", rate); return 0; err: clk_put(clk); iounmap(timer.base); return error; }
static int __devinit tdm_fsl_starlite_probe(struct of_device *ofdev, const struct of_device_id *match) { int ret = 0; struct tdm_priv *priv; struct resource res; priv = kmalloc(sizeof(struct tdm_priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; goto err_alloc; } dev_set_drvdata(&ofdev->dev, priv); priv->device = &ofdev->dev; ret = of_address_to_resource(ofdev->dev.of_node, 0, &res); if (ret) { ret = -EINVAL; goto err_resource; } priv->ptdm_base = res.start; priv->tdm_regs = of_iomap(ofdev->dev.of_node, 0); if (!priv->tdm_regs) { ret = -ENOMEM; goto err_tdmregs; } priv->dmac_regs = of_iomap(ofdev->dev.of_node, 1); if (!priv->dmac_regs) { ret = -ENOMEM; goto err_dmacreg; } /* tdmrd tmdtd at immrbar+0x16100 */ priv->data_regs = (struct tdm_data *)(TDM_DATAREG_OFFSET + (u8 *)priv->tdm_regs); /* TDMCLK_DIV_VAL_RX/TX at TDMBASE+0x180 */ priv->clk_regs = (struct tdm_clock *)(TDM_CLKREG_OFFSET + (u8 *)priv->tdm_regs); /* irqs mapping for tdm err/dmac err, dmac done */ priv->tdm_err_intr = irq_of_parse_and_map(ofdev->dev.of_node, 0); if (priv->tdm_err_intr == NO_IRQ) { ret = -EINVAL; goto err_tdmerr_irqmap; } priv->dmac_done_intr = irq_of_parse_and_map(ofdev->dev.of_node, 1); if (priv->dmac_done_intr == NO_IRQ) { ret = -EINVAL; goto err_dmacdone_irqmap; } ret = request_irq(priv->tdm_err_intr, tdm_err_isr, 0, "tdm_err_isr", priv); if (ret) goto err_tdmerrisr; ret = request_irq(priv->dmac_done_intr, dmac_done_isr, 0, "dmac_done_isr", priv); if (ret) goto err_dmacdoneisr; priv->cfg.loopback = e_TDM_PROCESS_NORMAL; priv->cfg.num_ch = TDM_ACTIVE_CHANNELS; priv->cfg.ch_type = TDM_CHANNEL_TYPE; priv->cfg.ch_width = TDM_SLOT_WIDTH; priv->cfg.num_frames = NUM_OF_FRAMES; priv->adap = &tdm_fsl_starlite_ops; /* Wait q initilization */ priv->adap->tdm_rx_flag = 0; /* todo - these should be configured by dts or init time */ priv->adap->adap_mode = e_TDM_ADAPTER_MODE_NONE; priv->adap->tdm_mode = priv->cfg.loopback; priv->adap->max_num_ports = priv->cfg.num_ch; tdm_set_adapdata(priv->adap, priv); priv->adap->parent = &ofdev->dev; ret = 0; ret = tdm_add_adapter(priv->adap); if (ret < 0) { dev_err(priv->device, "failed to add adapter\n"); goto fail_adapter; } ret = init_tdm(priv); if (ret) goto err_tdminit; ret = tdm_fsl_starlite_reg_init(priv); if (ret) goto err_tdminit; spin_lock_init(&priv->tdmlock); spin_lock(&priv->tdmlock); priv->tdm_active = 0; spin_unlock(&priv->tdmlock); if (tdmen) { ret = tdm_fsl_starlite_enable(priv->adap); if (!ret) goto err_tdminit; } return 0; err_tdminit: fail_adapter: free_irq(priv->dmac_done_intr, priv); err_dmacdoneisr: free_irq(priv->tdm_err_intr, priv); err_tdmerrisr: irq_dispose_mapping(priv->dmac_done_intr); err_dmacdone_irqmap: irq_dispose_mapping(priv->tdm_err_intr); err_tdmerr_irqmap: iounmap(priv->dmac_regs); err_dmacreg: iounmap(priv->tdm_regs); err_tdmregs: err_resource: dev_set_drvdata(&ofdev->dev, NULL); kfree(priv); err_alloc: return ret; }
static void __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; /* * In r2p0 the comparators for each processor with the global timer * fire when the timer value is greater than or equal to. In previous * revisions the comparators fired when the timer value was equal to. */ if ((read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); return; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); return; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); return; } gt_clk = of_clk_get(np, 0); if (!IS_ERR(gt_clk)) { err = clk_prepare_enable(gt_clk); if (err) goto out_unmap; } else { pr_warn("global-timer: clk not found\n"); err = -EINVAL; goto out_unmap; } gt_clk_rate = clk_get_rate(gt_clk); gt_evt = alloc_percpu(struct clock_event_device); if (!gt_evt) { pr_warn("global-timer: can't allocate memory\n"); err = -ENOMEM; goto out_clk; } err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); goto out_free; } err = register_cpu_notifier(>_cpu_nb); if (err) { pr_warn("global-timer: unable to register cpu notifier.\n"); goto out_irq; } /* Immediately configure the timer on the boot CPU */ gt_clocksource_init(); gt_clockevents_init(this_cpu_ptr(gt_evt)); return; out_irq: free_percpu_irq(gt_ppi, gt_evt); out_free: free_percpu(gt_evt); out_clk: clk_disable_unprepare(gt_clk); out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); }
static void __init imx6sl_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; int irq; int i; clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); clks[IMX6SL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0); clks[IMX6SL_CLK_OSC] = imx_obtain_fixed_clock("osc", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop"); base = of_iomap(np, 0); WARN_ON(!base); /* type name parent base div_mask */ clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x7f); clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x1); clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x3); clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x7f); clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x7f); clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6_enet", "osc", base + 0xe0, 0x3); clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host", "osc", base + 0x20, 0x3); /* * usbphy1 and usbphy2 are implemented as dummy gates using reserve * bit 20. They are used by phy driver to keep the refcount of * parent PLL correct. usbphy1_gate and usbphy2_gate only needs to be * turned on during boot, and software will not need to control it * anymore after that. */ clks[IMX6SL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); clks[IMX6SL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); clks[IMX6SL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); clks[IMX6SL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* dev name parent_name flags reg shift width div: flags, div_table lock */ clks[IMX6SL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); clks[IMX6SL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); clks[IMX6SL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); clks[IMX6SL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); /* name parent_name reg idx */ clks[IMX6SL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0", "pll2_bus", base + 0x100, 0); clks[IMX6SL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", base + 0x100, 1); clks[IMX6SL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", base + 0x100, 2); clks[IMX6SL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0", "pll3_usb_otg", base + 0xf0, 0); clks[IMX6SL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", base + 0xf0, 1); clks[IMX6SL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", base + 0xf0, 2); clks[IMX6SL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2); clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); /* Reuse imx6q pm code */ imx6q_pm_set_ccm_base(base); /* name reg shift width parent_names num_parents */ clks[IMX6SL_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); clks[IMX6SL_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); clks[IMX6SL_CLK_OCRAM_ALT_SEL] = imx_clk_mux("ocram_alt_sel", base + 0x14, 7, 1, ocram_alt_sels, ARRAY_SIZE(ocram_alt_sels)); clks[IMX6SL_CLK_OCRAM_SEL] = imx_clk_mux("ocram_sel", base + 0x14, 6, 1, ocram_sels, ARRAY_SIZE(ocram_sels)); clks[IMX6SL_CLK_PRE_PERIPH2_SEL] = imx_clk_mux("pre_periph2_sel", base + 0x18, 21, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); clks[IMX6SL_CLK_PRE_PERIPH_SEL] = imx_clk_mux("pre_periph_sel", base + 0x18, 18, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); clks[IMX6SL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); clks[IMX6SL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); clks[IMX6SL_CLK_CSI_SEL] = imx_clk_mux("csi_sel", base + 0x3c, 9, 2, csi_lcdif_sels, ARRAY_SIZE(csi_lcdif_sels)); clks[IMX6SL_CLK_LCDIF_AXI_SEL] = imx_clk_mux("lcdif_axi_sel", base + 0x3c, 14, 2, csi_lcdif_sels, ARRAY_SIZE(csi_lcdif_sels)); clks[IMX6SL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_PERCLK_SEL] = imx_clk_fixup_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels), imx_cscmr1_fixup); clks[IMX6SL_CLK_PXP_AXI_SEL] = imx_clk_mux("pxp_axi_sel", base + 0x34, 6, 3, epdc_pxp_sels, ARRAY_SIZE(epdc_pxp_sels)); clks[IMX6SL_CLK_EPDC_AXI_SEL] = imx_clk_mux("epdc_axi_sel", base + 0x34, 15, 3, epdc_pxp_sels, ARRAY_SIZE(epdc_pxp_sels)); clks[IMX6SL_CLK_GPU2D_OVG_SEL] = imx_clk_mux("gpu2d_ovg_sel", base + 0x18, 4, 2, gpu2d_ovg_sels, ARRAY_SIZE(gpu2d_ovg_sels)); clks[IMX6SL_CLK_GPU2D_SEL] = imx_clk_mux("gpu2d_sel", base + 0x18, 8, 2, gpu2d_sels, ARRAY_SIZE(gpu2d_sels)); clks[IMX6SL_CLK_LCDIF_PIX_SEL] = imx_clk_mux("lcdif_pix_sel", base + 0x38, 6, 3, lcdif_pix_sels, ARRAY_SIZE(lcdif_pix_sels)); clks[IMX6SL_CLK_EPDC_PIX_SEL] = imx_clk_mux("epdc_pix_sel", base + 0x38, 15, 3, epdc_pix_sels, ARRAY_SIZE(epdc_pix_sels)); clks[IMX6SL_CLK_SPDIF0_SEL] = imx_clk_mux("spdif0_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SL_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); clks[IMX6SL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); /* name reg shift width busy: reg, shift parent_names num_parents */ clks[IMX6SL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); clks[IMX6SL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width */ clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3); clks[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3); clks[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3); clks[IMX6SL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); clks[IMX6SL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); clks[IMX6SL_CLK_LCDIF_AXI_PODF] = imx_clk_divider("lcdif_axi_podf", "lcdif_axi_sel", base + 0x3c, 16, 3); clks[IMX6SL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); clks[IMX6SL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); clks[IMX6SL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); clks[IMX6SL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); clks[IMX6SL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); clks[IMX6SL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); clks[IMX6SL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); clks[IMX6SL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); clks[IMX6SL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); clks[IMX6SL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); clks[IMX6SL_CLK_PERCLK] = imx_clk_fixup_divider("perclk", "perclk_sel", base + 0x1c, 0, 6, imx_cscmr1_fixup); clks[IMX6SL_CLK_PXP_AXI_PODF] = imx_clk_divider("pxp_axi_podf", "pxp_axi_sel", base + 0x34, 3, 3); clks[IMX6SL_CLK_EPDC_AXI_PODF] = imx_clk_divider("epdc_axi_podf", "epdc_axi_sel", base + 0x34, 12, 3); clks[IMX6SL_CLK_GPU2D_OVG_PODF] = imx_clk_divider("gpu2d_ovg_podf", "gpu2d_ovg_sel", base + 0x18, 26, 3); clks[IMX6SL_CLK_GPU2D_PODF] = imx_clk_divider("gpu2d_podf", "gpu2d_sel", base + 0x18, 29, 3); clks[IMX6SL_CLK_LCDIF_PIX_PRED] = imx_clk_divider("lcdif_pix_pred", "lcdif_pix_sel", base + 0x38, 3, 3); clks[IMX6SL_CLK_EPDC_PIX_PRED] = imx_clk_divider("epdc_pix_pred", "epdc_pix_sel", base + 0x38, 12, 3); clks[IMX6SL_CLK_LCDIF_PIX_PODF] = imx_clk_fixup_divider("lcdif_pix_podf", "lcdif_pix_pred", base + 0x1c, 20, 3, imx_cscmr1_fixup); clks[IMX6SL_CLK_EPDC_PIX_PODF] = imx_clk_divider("epdc_pix_podf", "epdc_pix_pred", base + 0x18, 23, 3); clks[IMX6SL_CLK_SPDIF0_PRED] = imx_clk_divider("spdif0_pred", "spdif0_sel", base + 0x30, 25, 3); clks[IMX6SL_CLK_SPDIF0_PODF] = imx_clk_divider("spdif0_podf", "spdif0_pred", base + 0x30, 22, 3); clks[IMX6SL_CLK_SPDIF1_PRED] = imx_clk_divider("spdif1_pred", "spdif1_sel", base + 0x30, 12, 3); clks[IMX6SL_CLK_SPDIF1_PODF] = imx_clk_divider("spdif1_podf", "spdif1_pred", base + 0x30, 9, 3); clks[IMX6SL_CLK_EXTERN_AUDIO_PRED] = imx_clk_divider("extern_audio_pred", "extern_audio_sel", base + 0x28, 9, 3); clks[IMX6SL_CLK_EXTERN_AUDIO_PODF] = imx_clk_divider("extern_audio_podf", "extern_audio_pred", base + 0x28, 25, 3); clks[IMX6SL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6); clks[IMX6SL_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_sel", base + 0x24, 0, 6); /* name parent_name reg shift width busy: reg, shift */ clks[IMX6SL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); clks[IMX6SL_CLK_MMDC_ROOT] = imx_clk_busy_divider("mmdc", "periph2", base + 0x14, 3, 3, base + 0x48, 2); clks[IMX6SL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); /* name parent_name reg shift */ clks[IMX6SL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16); clks[IMX6SL_CLK_GPT] = imx_clk_gate2("gpt", "perclk", base + 0x6c, 20); clks[IMX6SL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22); clks[IMX6SL_CLK_GPU2D_OVG] = imx_clk_gate2("gpu2d_ovg", "gpu2d_ovg_podf", base + 0x6c, 26); clks[IMX6SL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); clks[IMX6SL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); clks[IMX6SL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); clks[IMX6SL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); clks[IMX6SL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x74, 0); clks[IMX6SL_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "pxp_axi_podf", base + 0x74, 2); clks[IMX6SL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_axi", "epdc_axi_podf", base + 0x74, 4); clks[IMX6SL_CLK_LCDIF_AXI] = imx_clk_gate2("lcdif_axi", "lcdif_axi_podf", base + 0x74, 6); clks[IMX6SL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_pix_podf", base + 0x74, 8); clks[IMX6SL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_pix_podf", base + 0x74, 10); clks[IMX6SL_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28); clks[IMX6SL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); clks[IMX6SL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); clks[IMX6SL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); clks[IMX6SL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); clks[IMX6SL_CLK_SDMA] = imx_clk_gate2("sdma", "ipg", base + 0x7c, 6); clks[IMX6SL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif0_podf", base + 0x7c, 14); clks[IMX6SL_CLK_SSI1] = imx_clk_gate2("ssi1", "ssi1_podf", base + 0x7c, 18); clks[IMX6SL_CLK_SSI2] = imx_clk_gate2("ssi2", "ssi2_podf", base + 0x7c, 20); clks[IMX6SL_CLK_SSI3] = imx_clk_gate2("ssi3", "ssi3_podf", base + 0x7c, 22); clks[IMX6SL_CLK_UART] = imx_clk_gate2("uart", "ipg", base + 0x7c, 24); clks[IMX6SL_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_root", base + 0x7c, 26); clks[IMX6SL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); clks[IMX6SL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); clks[IMX6SL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); for (i = 0; i < ARRAY_SIZE(clks); i++) if (IS_ERR(clks[i])) pr_err("i.MX6SL clk %d: register failed with %ld\n", i, PTR_ERR(clks[i])); clk_data.clks = clks; clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_register_clkdev(clks[IMX6SL_CLK_GPT], "ipg", "imx-gpt.0"); clk_register_clkdev(clks[IMX6SL_CLK_GPT_SERIAL], "per", "imx-gpt.0"); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]); clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]); } /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt"); base = of_iomap(np, 0); WARN_ON(!base); irq = irq_of_parse_and_map(np, 0); mxc_timer_init(base, irq); }
/* register exynos_audss clocks */ void __init exynos_audss_clk_init(struct device_node *np) { reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: failed to map audss registers\n", __func__); return; } clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, GFP_KERNEL); if (!clk_table) { pr_err("%s: could not allocate clk lookup table\n", __func__); return; } clk_data.clks = clk_table; clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS; of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", mout_audss_p, ARRAY_SIZE(mout_audss_p), 0, reg_base + ASS_CLK_SRC, 0, 1, 0, &lock, 0, 0, 0); clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s", mout_i2s_p, ARRAY_SIZE(mout_i2s_p), 0, reg_base + ASS_CLK_SRC, 2, 2, 0, &lock, 0, 0, 0); clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp", "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL, "dout_aud_bus", "dout_srp", 0, reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s", "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0, &lock); clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk", "dout_srp", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 0, 0, &lock); clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus", "dout_aud_bus", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 2, 0, &lock); clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s", "dout_i2s", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 3, 0, &lock); clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus", "sclk_pcm", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 4, 0, &lock); clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm", "div_pcm0", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 5, 0, &lock); #ifdef CONFIG_PM_SLEEP register_syscore_ops(&exynos_audss_clk_syscore_ops); #endif pr_info("Exynos: Audss: clock setup completed\n"); }
static int __devinit mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match) { struct resource res; int rc = -ENOMEM; if (lpbfifo.dev != NULL) return -ENOSPC; lpbfifo.irq = irq_of_parse_and_map(op->node, 0); if (!lpbfifo.irq) return -ENODEV; if (of_address_to_resource(op->node, 0, &res)) return -ENODEV; lpbfifo.regs_phys = res.start; lpbfifo.regs = of_iomap(op->node, 0); if (!lpbfifo.regs) return -ENOMEM; spin_lock_init(&lpbfifo.lock); /* Put FIFO into reset */ out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000); /* Register the interrupt handler */ rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0, "mpc52xx-lpbfifo", &lpbfifo); if (rc) goto err_irq; /* Request the Bestcomm receive (fifo --> memory) task and IRQ */ lpbfifo.bcom_rx_task = bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC, 16*1024*1024); if (!lpbfifo.bcom_rx_task) goto err_bcom_rx; rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), mpc52xx_lpbfifo_bcom_irq, 0, "mpc52xx-lpbfifo-rx", &lpbfifo); if (rc) goto err_bcom_rx_irq; lpbfifo.dma_irqs_enabled = 1; /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */ lpbfifo.bcom_tx_task = bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA, BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC); if (!lpbfifo.bcom_tx_task) goto err_bcom_tx; lpbfifo.dev = &op->dev; return 0; err_bcom_tx: free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo); err_bcom_rx_irq: bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task); err_bcom_rx: err_irq: iounmap(lpbfifo.regs); lpbfifo.regs = NULL; dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n"); return -ENODEV; }
of_node_put(best_frame); of_node_put(frame); return; } if (cnttidr & CNTTIDR_VIRT(n)) { of_node_put(best_frame); best_frame = frame; arch_timer_mem_use_virtual = true; break; } of_node_put(best_frame); best_frame = of_node_get(frame); } base = arch_counter_base = of_iomap(best_frame, 0); if (!base) { pr_err("arch_timer: Can't map frame's registers\n"); of_node_put(best_frame); return; } if (arch_timer_mem_use_virtual) irq = irq_of_parse_and_map(best_frame, 1); else irq = irq_of_parse_and_map(best_frame, 0); of_node_put(best_frame); if (!irq) { pr_err("arch_timer: Frame missing %s irq", arch_timer_mem_use_virtual ? "virt" : "phys"); return;
static int kpd_pdrv_probe(struct platform_device *pdev) { int i, r; int err = 0; #ifdef CONFIG_OF kp_base = of_iomap(pdev->dev.of_node, 0); if (!kp_base) { pr_warn(KPD_SAY "KP iomap failed\n"); return -ENODEV; }; kp_irqnr = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!kp_irqnr) { pr_warn(KPD_SAY "KP get irqnr failed\n"); return -ENODEV; } pr_warn(KPD_SAY "kp base: 0x%p, addr:0x%p, kp irq: %d\n", kp_base,&kp_base, kp_irqnr); #endif kpd_ldvt_test_init(); /* API 2 for kpd LFVT test enviroment settings */ /* initialize and register input device (/dev/input/eventX) */ kpd_input_dev = input_allocate_device(); if (!kpd_input_dev) return -ENOMEM; kpd_input_dev->name = KPD_NAME; kpd_input_dev->id.bustype = BUS_HOST; kpd_input_dev->id.vendor = 0x2454; kpd_input_dev->id.product = 0x6500; kpd_input_dev->id.version = 0x0010; kpd_input_dev->open = kpd_open; /* fulfill custom settings */ kpd_memory_setting(); __set_bit(EV_KEY, kpd_input_dev->evbit); #if (KPD_PWRKEY_USE_EINT || KPD_PWRKEY_USE_PMIC) __set_bit(KPD_PWRKEY_MAP, kpd_input_dev->keybit); kpd_keymap[8] = 0; #endif #if !KPD_USE_EXTEND_TYPE for (i = 17; i < KPD_NUM_KEYS; i += 9) /* only [8] works for Power key */ kpd_keymap[i] = 0; #endif for (i = 0; i < KPD_NUM_KEYS; i++) { if (kpd_keymap[i] != 0) __set_bit(kpd_keymap[i], kpd_input_dev->keybit); } #if KPD_AUTOTEST for (i = 0; i < ARRAY_SIZE(kpd_auto_keymap); i++) __set_bit(kpd_auto_keymap[i], kpd_input_dev->keybit); #endif #if KPD_HAS_SLIDE_QWERTY __set_bit(EV_SW, kpd_input_dev->evbit); __set_bit(SW_LID, kpd_input_dev->swbit); #endif #ifdef KPD_PMIC_RSTKEY_MAP __set_bit(KPD_PMIC_RSTKEY_MAP, kpd_input_dev->keybit); #endif #ifdef KPD_KEY_MAP __set_bit(KPD_KEY_MAP, kpd_input_dev->keybit); #endif kpd_input_dev->dev.parent = &pdev->dev; r = input_register_device(kpd_input_dev); if (r) { printk(KPD_SAY "register input device failed (%d)\n", r); input_free_device(kpd_input_dev); return r; } /* register device (/dev/mt6575-kpd) */ kpd_dev.parent = &pdev->dev; r = misc_register(&kpd_dev); if (r) { printk(KPD_SAY "register device failed (%d)\n", r); input_unregister_device(kpd_input_dev); return r; } /* register IRQ and EINT */ kpd_set_debounce(KPD_KEY_DEBOUNCE); #ifdef CONFIG_OF r = request_irq(kp_irqnr, kpd_irq_handler, IRQF_TRIGGER_NONE, KPD_NAME, NULL); #else r = request_irq(MT_KP_IRQ_ID, kpd_irq_handler, IRQF_TRIGGER_FALLING, KPD_NAME, NULL); #endif if (r) { printk(KPD_SAY "register IRQ failed (%d)\n", r); misc_deregister(&kpd_dev); input_unregister_device(kpd_input_dev); return r; } mt_eint_register(); #ifndef KPD_EARLY_PORTING /*add for avoid early porting build err the macro is defined in custom file */ long_press_reboot_function_setting(); /* /API 4 for kpd long press reboot function setting */ #endif hrtimer_init(&aee_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); aee_timer.function = aee_timer_func; #if AEE_ENABLE_5_15 hrtimer_init(&aee_timer_5s, CLOCK_MONOTONIC, HRTIMER_MODE_REL); aee_timer_5s.function = aee_timer_5s_func; #endif if ((err = kpd_create_attr(&kpd_pdrv.driver))) { kpd_print("create attr file fail\n"); kpd_delete_attr(&kpd_pdrv.driver); return err; } pr_warn(KPD_SAY "%s Done\n", __FUNCTION__); return 0; }
static int spear_adc_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct spear_adc_state *st; struct iio_dev *indio_dev = NULL; int ret = -ENODEV; int irq; indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state)); if (!indio_dev) { dev_err(dev, "failed allocating iio device\n"); return -ENOMEM; } st = iio_priv(indio_dev); st->np = np; /* * SPEAr600 has a different register layout than other SPEAr SoC's * (e.g. SPEAr3xx). Let's provide two register base addresses * to support multi-arch kernels. */ st->adc_base_spear6xx = of_iomap(np, 0); if (!st->adc_base_spear6xx) { dev_err(dev, "failed mapping memory\n"); return -ENOMEM; } st->adc_base_spear3xx = (struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx; st->clk = clk_get(dev, NULL); if (IS_ERR(st->clk)) { dev_err(dev, "failed getting clock\n"); goto errout1; } ret = clk_prepare_enable(st->clk); if (ret) { dev_err(dev, "failed enabling clock\n"); goto errout2; } irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(dev, "failed getting interrupt resource\n"); ret = -EINVAL; goto errout3; } ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME, st); if (ret < 0) { dev_err(dev, "failed requesting interrupt\n"); goto errout3; } if (of_property_read_u32(np, "sampling-frequency", &st->sampling_freq)) { dev_err(dev, "sampling-frequency missing in DT\n"); ret = -EINVAL; goto errout3; } /* * Optional avg_samples defaults to 0, resulting in single data * conversion */ of_property_read_u32(np, "average-samples", &st->avg_samples); /* * Optional vref_external defaults to 0, resulting in internal vref * selection */ of_property_read_u32(np, "vref-external", &st->vref_external); spear_adc_configure(st); platform_set_drvdata(pdev, indio_dev); init_completion(&st->completion); indio_dev->name = SPEAR_ADC_MOD_NAME; indio_dev->dev.parent = dev; indio_dev->info = &spear_adc_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = spear_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(spear_adc_iio_channels); ret = iio_device_register(indio_dev); if (ret) goto errout3; dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq); return 0; errout3: clk_disable_unprepare(st->clk); errout2: clk_put(st->clk); errout1: iounmap(st->adc_base_spear6xx); return ret; }
/* register exynos4 clocks */ static void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc exynos4_soc, void __iomem *reg_base, unsigned long xom) { reg_base = of_iomap(np, 0); if (!reg_base) panic("%s: failed to map registers\n", __func__); if (exynos4_soc == EXYNOS4210) samsung_clk_init(np, reg_base, nr_clks, exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save)); else samsung_clk_init(np, reg_base, nr_clks, exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save)); samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks), ext_clk_match); exynos4_clk_register_finpll(xom); if (exynos4_soc == EXYNOS4210) { samsung_clk_register_mux(exynos4210_mux_early, ARRAY_SIZE(exynos4210_mux_early)); if (_get_rate("fin_pll") == 24000000) { exynos4210_plls[apll].rate_table = exynos4210_apll_rates; exynos4210_plls[epll].rate_table = exynos4210_epll_rates; } if (_get_rate("mout_vpllsrc") == 24000000) exynos4210_plls[vpll].rate_table = exynos4210_vpll_rates; samsung_clk_register_pll(exynos4210_plls, ARRAY_SIZE(exynos4210_plls), reg_base); } else { if (_get_rate("fin_pll") == 24000000) { exynos4x12_plls[apll].rate_table = exynos4x12_apll_rates; exynos4x12_plls[epll].rate_table = exynos4x12_epll_rates; exynos4x12_plls[vpll].rate_table = exynos4x12_vpll_rates; } samsung_clk_register_pll(exynos4x12_plls, ARRAY_SIZE(exynos4x12_plls), reg_base); } samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks, ARRAY_SIZE(exynos4_fixed_rate_clks)); samsung_clk_register_mux(exynos4_mux_clks, ARRAY_SIZE(exynos4_mux_clks)); samsung_clk_register_div(exynos4_div_clks, ARRAY_SIZE(exynos4_div_clks)); samsung_clk_register_gate(exynos4_gate_clks, ARRAY_SIZE(exynos4_gate_clks)); if (exynos4_soc == EXYNOS4210) { samsung_clk_register_fixed_rate(exynos4210_fixed_rate_clks, ARRAY_SIZE(exynos4210_fixed_rate_clks)); samsung_clk_register_mux(exynos4210_mux_clks, ARRAY_SIZE(exynos4210_mux_clks)); samsung_clk_register_div(exynos4210_div_clks, ARRAY_SIZE(exynos4210_div_clks)); samsung_clk_register_gate(exynos4210_gate_clks, ARRAY_SIZE(exynos4210_gate_clks)); samsung_clk_register_alias(exynos4210_aliases, ARRAY_SIZE(exynos4210_aliases)); } else { samsung_clk_register_mux(exynos4x12_mux_clks, ARRAY_SIZE(exynos4x12_mux_clks)); samsung_clk_register_div(exynos4x12_div_clks, ARRAY_SIZE(exynos4x12_div_clks)); samsung_clk_register_gate(exynos4x12_gate_clks, ARRAY_SIZE(exynos4x12_gate_clks)); samsung_clk_register_alias(exynos4x12_aliases, ARRAY_SIZE(exynos4x12_aliases)); } samsung_clk_register_alias(exynos4_aliases, ARRAY_SIZE(exynos4_aliases)); pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n" "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n", exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12", _get_rate("sclk_apll"), _get_rate("sclk_mpll"), _get_rate("sclk_epll"), _get_rate("sclk_vpll"), _get_rate("arm_clk")); }
static int __devinit pasemi_nand_probe(struct platform_device *ofdev, const struct of_device_id *match) { struct pci_dev *pdev; struct device_node *np = ofdev->dev.of_node; struct resource res; struct nand_chip *chip; int err = 0; err = of_address_to_resource(np, 0, &res); if (err) return -EINVAL; /* We only support one device at the moment */ if (pasemi_nand_mtd) return -ENODEV; pr_debug("pasemi_nand at %llx-%llx\n", res.start, res.end); /* Allocate memory for MTD device structure and private data */ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!pasemi_nand_mtd) { printk(KERN_WARNING "Unable to allocate PASEMI NAND MTD device structure\n"); err = -ENOMEM; goto out; } /* Get pointer to private data */ chip = (struct nand_chip *)&pasemi_nand_mtd[1]; /* Link the private data with the MTD structure */ pasemi_nand_mtd->priv = chip; pasemi_nand_mtd->owner = THIS_MODULE; chip->IO_ADDR_R = of_iomap(np, 0); chip->IO_ADDR_W = chip->IO_ADDR_R; if (!chip->IO_ADDR_R) { err = -EIO; goto out_mtd; } pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL); if (!pdev) { err = -ENODEV; goto out_ior; } lpcctl = pci_resource_start(pdev, 0); pci_dev_put(pdev); if (!request_region(lpcctl, 4, driver_name)) { err = -EBUSY; goto out_ior; } chip->cmd_ctrl = pasemi_hwcontrol; chip->dev_ready = pasemi_device_ready; chip->read_buf = pasemi_read_buf; chip->write_buf = pasemi_write_buf; chip->chip_delay = 0; chip->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ chip->options = NAND_NO_AUTOINCR; chip->bbt_options = NAND_BBT_USE_FLASH; /* Scan to find existance of the device */ if (nand_scan(pasemi_nand_mtd, 1)) { err = -ENXIO; goto out_lpc; } if (add_mtd_device(pasemi_nand_mtd)) { printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); err = -ENODEV; goto out_lpc; } printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n", res.start, lpcctl); return 0; out_lpc: release_region(lpcctl, 4); out_ior: iounmap(chip->IO_ADDR_R); out_mtd: kfree(pasemi_nand_mtd); out: return err; }
static void __init imx6sx_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; int i; clks[IMX6SX_CLK_DUMMY] = imx_clk_fixed("dummy", 0); clks[IMX6SX_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); clks[IMX6SX_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); /* ipp_di clock is external input */ clks[IMX6SX_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0"); clks[IMX6SX_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1"); /* Clock source from external clock via CLK1 PAD */ clks[IMX6SX_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); base = of_iomap(np, 0); WARN_ON(!base); clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clks[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); clks[IMX6SX_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); /* Do not bypass PLLs initially */ clk_set_parent(clks[IMX6SX_PLL1_BYPASS], clks[IMX6SX_CLK_PLL1]); clk_set_parent(clks[IMX6SX_PLL2_BYPASS], clks[IMX6SX_CLK_PLL2]); clk_set_parent(clks[IMX6SX_PLL3_BYPASS], clks[IMX6SX_CLK_PLL3]); clk_set_parent(clks[IMX6SX_PLL4_BYPASS], clks[IMX6SX_CLK_PLL4]); clk_set_parent(clks[IMX6SX_PLL5_BYPASS], clks[IMX6SX_CLK_PLL5]); clk_set_parent(clks[IMX6SX_PLL6_BYPASS], clks[IMX6SX_CLK_PLL6]); clk_set_parent(clks[IMX6SX_PLL7_BYPASS], clks[IMX6SX_CLK_PLL7]); clks[IMX6SX_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); clks[IMX6SX_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); clks[IMX6SX_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); clks[IMX6SX_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); clks[IMX6SX_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); clks[IMX6SX_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); clks[IMX6SX_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); /* * Bit 20 is the reserved and read-only bit, we do this only for: * - Do nothing for usbphy clk_enable/disable * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework may need to enable/disable usbphy's parent */ clks[IMX6SX_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); clks[IMX6SX_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ clks[IMX6SX_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); clks[IMX6SX_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* FIXME 100Mhz is used for pcie ref for all imx6 pcie, excepted imx6q */ clks[IMX6SX_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 5); clks[IMX6SX_CLK_PCIE_REF_125M] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); clks[IMX6SX_CLK_LVDS1_OUT] = imx_clk_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12)); clks[IMX6SX_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); clks[IMX6SX_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); clks[IMX6SX_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock); clks[IMX6SX_CLK_ENET2_REF_125M] = imx_clk_gate("enet2_ref_125m", "enet2_ref", base + 0xe0, 20); clks[IMX6SX_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); clks[IMX6SX_CLK_ENET_PTP] = imx_clk_gate("enet_ptp_25m", "enet_ptp_ref", base + 0xe0, 21); /* name parent_name reg idx */ clks[IMX6SX_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); clks[IMX6SX_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); clks[IMX6SX_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); clks[IMX6SX_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); clks[IMX6SX_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); clks[IMX6SX_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); clks[IMX6SX_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); clks[IMX6SX_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ clks[IMX6SX_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); clks[IMX6SX_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); clks[IMX6SX_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); clks[IMX6SX_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); clks[IMX6SX_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); clks[IMX6SX_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); clks[IMX6SX_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); clks[IMX6SX_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); clks[IMX6SX_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); clks[IMX6SX_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); /* name reg shift width parent_names num_parents */ clks[IMX6SX_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); imx6q_pm_set_ccm_base(base); /* name reg shift width parent_names num_parents */ clks[IMX6SX_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); clks[IMX6SX_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); clks[IMX6SX_CLK_OCRAM_SEL] = imx_clk_mux("ocram_sel", base + 0x14, 6, 2, ocram_sels, ARRAY_SIZE(ocram_sels)); clks[IMX6SX_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); clks[IMX6SX_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); clks[IMX6SX_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); clks[IMX6SX_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); clks[IMX6SX_CLK_PCIE_AXI_SEL] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); clks[IMX6SX_CLK_GPU_AXI_SEL] = imx_clk_mux("gpu_axi_sel", base + 0x18, 8, 2, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); clks[IMX6SX_CLK_GPU_CORE_SEL] = imx_clk_mux("gpu_core_sel", base + 0x18, 4, 2, gpu_core_sels, ARRAY_SIZE(gpu_core_sels)); clks[IMX6SX_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); clks[IMX6SX_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); clks[IMX6SX_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); clks[IMX6SX_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); clks[IMX6SX_CLK_USDHC4_SEL] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); clks[IMX6SX_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); clks[IMX6SX_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); clks[IMX6SX_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); clks[IMX6SX_CLK_QSPI1_SEL] = imx_clk_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); clks[IMX6SX_CLK_VID_SEL] = imx_clk_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); clks[IMX6SX_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SX_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); clks[IMX6SX_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); clks[IMX6SX_CLK_QSPI2_SEL] = imx_clk_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SX_CLK_AUDIO_SEL] = imx_clk_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); clks[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); clks[IMX6SX_CLK_ENET_SEL] = imx_clk_mux("enet_sel", base + 0x34, 9, 3, enet_sels, ARRAY_SIZE(enet_sels)); clks[IMX6SX_CLK_M4_PRE_SEL] = imx_clk_mux("m4_pre_sel", base + 0x34, 6, 3, m4_pre_sels, ARRAY_SIZE(m4_pre_sels)); clks[IMX6SX_CLK_M4_SEL] = imx_clk_mux("m4_sel", base + 0x34, 0, 3, m4_sels, ARRAY_SIZE(m4_sels)); clks[IMX6SX_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); clks[IMX6SX_CLK_LCDIF2_PRE_SEL] = imx_clk_mux("lcdif2_pre_sel", base + 0x38, 6, 3, lcdif2_pre_sels, ARRAY_SIZE(lcdif2_pre_sels)); clks[IMX6SX_CLK_LCDIF2_SEL] = imx_clk_mux("lcdif2_sel", base + 0x38, 0, 3, lcdif2_sels, ARRAY_SIZE(lcdif2_sels)); clks[IMX6SX_CLK_DISPLAY_SEL] = imx_clk_mux("display_sel", base + 0x3c, 14, 2, display_sels, ARRAY_SIZE(display_sels)); clks[IMX6SX_CLK_CSI_SEL] = imx_clk_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); clks[IMX6SX_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); clks[IMX6SX_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); clks[IMX6SX_CLK_CKO] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); clks[IMX6SX_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_LCDIF1_PRE_SEL] = imx_clk_mux_flags("lcdif1_pre_sel", base + 0x38, 15, 3, lcdif1_pre_sels, ARRAY_SIZE(lcdif1_pre_sels), CLK_SET_RATE_PARENT); clks[IMX6SX_CLK_LCDIF1_SEL] = imx_clk_mux_flags("lcdif1_sel", base + 0x38, 9, 3, lcdif1_sels, ARRAY_SIZE(lcdif1_sels), CLK_SET_RATE_PARENT); /* name parent_name reg shift width */ clks[IMX6SX_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); clks[IMX6SX_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); clks[IMX6SX_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); clks[IMX6SX_CLK_GPU_CORE_PODF] = imx_clk_divider("gpu_core_podf", "gpu_core_sel", base + 0x18, 29, 3); clks[IMX6SX_CLK_GPU_AXI_PODF] = imx_clk_divider("gpu_axi_podf", "gpu_axi_sel", base + 0x18, 26, 3); clks[IMX6SX_CLK_LCDIF1_PODF] = imx_clk_divider("lcdif1_podf", "lcdif1_pred", base + 0x18, 23, 3); clks[IMX6SX_CLK_QSPI1_PODF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); clks[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); clks[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3); clks[IMX6SX_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); clks[IMX6SX_CLK_VID_PODF] = imx_clk_divider("vid_podf", "vid_sel", base + 0x20, 24, 2); clks[IMX6SX_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6); clks[IMX6SX_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); clks[IMX6SX_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); clks[IMX6SX_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); clks[IMX6SX_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); clks[IMX6SX_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); clks[IMX6SX_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); clks[IMX6SX_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); clks[IMX6SX_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); clks[IMX6SX_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); clks[IMX6SX_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); clks[IMX6SX_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); clks[IMX6SX_CLK_QSPI2_PRED] = imx_clk_divider("qspi2_pred", "qspi2_sel", base + 0x2c, 18, 3); clks[IMX6SX_CLK_QSPI2_PODF] = imx_clk_divider("qspi2_podf", "qspi2_pred", base + 0x2c, 21, 6); clks[IMX6SX_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); clks[IMX6SX_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); clks[IMX6SX_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); clks[IMX6SX_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); clks[IMX6SX_CLK_AUDIO_PRED] = imx_clk_divider("audio_pred", "audio_sel", base + 0x30, 12, 3); clks[IMX6SX_CLK_AUDIO_PODF] = imx_clk_divider("audio_podf", "audio_pred", base + 0x30, 9, 3); clks[IMX6SX_CLK_ENET_PODF] = imx_clk_divider("enet_podf", "enet_pre_sel", base + 0x34, 12, 3); clks[IMX6SX_CLK_M4_PODF] = imx_clk_divider("m4_podf", "m4_sel", base + 0x34, 3, 3); clks[IMX6SX_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); clks[IMX6SX_CLK_LCDIF1_PRED] = imx_clk_divider("lcdif1_pred", "lcdif1_pre_sel", base + 0x38, 12, 3); clks[IMX6SX_CLK_LCDIF2_PRED] = imx_clk_divider("lcdif2_pred", "lcdif2_pre_sel", base + 0x38, 3, 3); clks[IMX6SX_CLK_DISPLAY_PODF] = imx_clk_divider("display_podf", "display_sel", base + 0x3c, 16, 3); clks[IMX6SX_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); clks[IMX6SX_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); clks[IMX6SX_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); clks[IMX6SX_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); clks[IMX6SX_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); clks[IMX6SX_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); clks[IMX6SX_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7); /* name reg shift width busy: reg, shift parent_names num_parents */ clks[IMX6SX_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); clks[IMX6SX_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width busy: reg, shift */ clks[IMX6SX_CLK_OCRAM_PODF] = imx_clk_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0); clks[IMX6SX_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); clks[IMX6SX_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); clks[IMX6SX_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); /* name parent_name reg shift */ /* CCGR0 */ clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0); clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2); clks[IMX6SX_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4); clks[IMX6SX_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); clks[IMX6SX_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); clks[IMX6SX_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); clks[IMX6SX_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); clks[IMX6SX_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); clks[IMX6SX_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); clks[IMX6SX_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16); clks[IMX6SX_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); clks[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20); clks[IMX6SX_CLK_DCIC1] = imx_clk_gate2("dcic1", "display_podf", base + 0x68, 24); clks[IMX6SX_CLK_DCIC2] = imx_clk_gate2("dcic2", "display_podf", base + 0x68, 26); clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x68, 30); /* CCGR1 */ clks[IMX6SX_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); clks[IMX6SX_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); clks[IMX6SX_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); clks[IMX6SX_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); clks[IMX6SX_CLK_ECSPI5] = imx_clk_gate2("ecspi5", "ecspi_podf", base + 0x6c, 8); clks[IMX6SX_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); clks[IMX6SX_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); clks[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai); clks[IMX6SX_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai); clks[IMX6SX_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2("wakeup", "ipg", base + 0x6c, 18); clks[IMX6SX_CLK_GPT_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x6c, 20); clks[IMX6SX_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22); clks[IMX6SX_CLK_GPU] = imx_clk_gate2("gpu", "gpu_core_podf", base + 0x6c, 26); clks[IMX6SX_CLK_CANFD] = imx_clk_gate2("canfd", "can_podf", base + 0x6c, 30); /* CCGR2 */ clks[IMX6SX_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2); clks[IMX6SX_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); clks[IMX6SX_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); clks[IMX6SX_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); clks[IMX6SX_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); clks[IMX6SX_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14); clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2("ipmux1", "ahb", base + 0x70, 16); clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2("ipmux2", "ahb", base + 0x70, 18); clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2("ipmux3", "ahb", base + 0x70, 20); clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2("tzasc1", "mmdc_podf", base + 0x70, 22); clks[IMX6SX_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "display_podf", base + 0x70, 28); clks[IMX6SX_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "display_podf", base + 0x70, 30); /* CCGR3 */ clks[IMX6SX_CLK_M4] = imx_clk_gate2("m4", "m4_podf", base + 0x74, 2); clks[IMX6SX_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4); clks[IMX6SX_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "enet_sel", base + 0x74, 4); clks[IMX6SX_CLK_DISPLAY_AXI] = imx_clk_gate2("display_axi", "display_podf", base + 0x74, 6); clks[IMX6SX_CLK_LCDIF2_PIX] = imx_clk_gate2("lcdif2_pix", "lcdif2_sel", base + 0x74, 8); clks[IMX6SX_CLK_LCDIF1_PIX] = imx_clk_gate2("lcdif1_pix", "lcdif1_sel", base + 0x74, 10); clks[IMX6SX_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12); clks[IMX6SX_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14); clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18); clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20); clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24); clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28); /* CCGR4 */ clks[IMX6SX_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "display_podf", base + 0x78, 0); clks[IMX6SX_CLK_QSPI2] = imx_clk_gate2("qspi2", "qspi2_podf", base + 0x78, 10); clks[IMX6SX_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12); clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2("per2_main", "ahb", base + 0x78, 14); clks[IMX6SX_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); clks[IMX6SX_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); clks[IMX6SX_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); clks[IMX6SX_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); clks[IMX6SX_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); clks[IMX6SX_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); clks[IMX6SX_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "qspi2_podf", base + 0x78, 28); clks[IMX6SX_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); /* CCGR5 */ clks[IMX6SX_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0); clks[IMX6SX_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio); clks[IMX6SX_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); clks[IMX6SX_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); clks[IMX6SX_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); clks[IMX6SX_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); clks[IMX6SX_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); clks[IMX6SX_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); clks[IMX6SX_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); clks[IMX6SX_CLK_UART_IPG] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24); clks[IMX6SX_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_podf", base + 0x7c, 26); clks[IMX6SX_CLK_SAI1_IPG] = imx_clk_gate2("sai1_ipg", "ipg", base + 0x7c, 28); clks[IMX6SX_CLK_SAI2_IPG] = imx_clk_gate2("sai2_ipg", "ipg", base + 0x7c, 30); clks[IMX6SX_CLK_SAI1] = imx_clk_gate2("sai1", "ssi1_podf", base + 0x7c, 28); clks[IMX6SX_CLK_SAI2] = imx_clk_gate2("sai2", "ssi2_podf", base + 0x7c, 30); /* CCGR6 */ clks[IMX6SX_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); clks[IMX6SX_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); clks[IMX6SX_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); clks[IMX6SX_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); clks[IMX6SX_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); clks[IMX6SX_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); clks[IMX6SX_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16); clks[IMX6SX_CLK_VADC] = imx_clk_gate2("vadc", "vid_podf", base + 0x80, 20); clks[IMX6SX_CLK_GIS] = imx_clk_gate2("gis", "display_podf", base + 0x80, 22); clks[IMX6SX_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24); clks[IMX6SX_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26); clks[IMX6SX_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28); clks[IMX6SX_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30); clks[IMX6SX_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); clks[IMX6SX_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); /* mask handshake of mmdc */ writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); imx_check_clocks(clks, ARRAY_SIZE(clks)); clk_data.clks = clks; clk_data.clk_num = ARRAY_SIZE(clks); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clks[clks_init_on[i]]); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { clk_prepare_enable(clks[IMX6SX_CLK_USBPHY1_GATE]); clk_prepare_enable(clks[IMX6SX_CLK_USBPHY2_GATE]); } /* Set the default 132MHz for EIM module */ clk_set_parent(clks[IMX6SX_CLK_EIM_SLOW_SEL], clks[IMX6SX_CLK_PLL2_PFD2]); clk_set_rate(clks[IMX6SX_CLK_EIM_SLOW], 132000000); /* set parent clock for LCDIF1 pixel clock */ clk_set_parent(clks[IMX6SX_CLK_LCDIF1_PRE_SEL], clks[IMX6SX_CLK_PLL5_VIDEO_DIV]); clk_set_parent(clks[IMX6SX_CLK_LCDIF1_SEL], clks[IMX6SX_CLK_LCDIF1_PODF]); /* Set the parent clks of PCIe lvds1 and pcie_axi to be pcie ref, axi */ if (clk_set_parent(clks[IMX6SX_CLK_LVDS1_SEL], clks[IMX6SX_CLK_PCIE_REF_125M])) pr_err("Failed to set pcie bus parent clk.\n"); if (clk_set_parent(clks[IMX6SX_CLK_PCIE_AXI_SEL], clks[IMX6SX_CLK_AXI])) pr_err("Failed to set pcie parent clk.\n"); /* * Init enet system AHB clock, set to 200Mhz * pll2_pfd2_396m-> ENET_PODF-> ENET_AHB */ clk_set_parent(clks[IMX6SX_CLK_ENET_PRE_SEL], clks[IMX6SX_CLK_PLL2_PFD2]); clk_set_parent(clks[IMX6SX_CLK_ENET_SEL], clks[IMX6SX_CLK_ENET_PODF]); clk_set_rate(clks[IMX6SX_CLK_ENET_PODF], 200000000); clk_set_rate(clks[IMX6SX_CLK_ENET_REF], 125000000); clk_set_rate(clks[IMX6SX_CLK_ENET2_REF], 125000000); /* Audio clocks */ clk_set_rate(clks[IMX6SX_CLK_PLL4_AUDIO_DIV], 393216000); clk_set_parent(clks[IMX6SX_CLK_SPDIF_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); clk_set_rate(clks[IMX6SX_CLK_SPDIF_PODF], 98304000); clk_set_parent(clks[IMX6SX_CLK_AUDIO_SEL], clks[IMX6SX_CLK_PLL3_USB_OTG]); clk_set_rate(clks[IMX6SX_CLK_AUDIO_PODF], 24000000); clk_set_parent(clks[IMX6SX_CLK_SSI1_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); clk_set_parent(clks[IMX6SX_CLK_SSI2_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); clk_set_parent(clks[IMX6SX_CLK_SSI3_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); clk_set_rate(clks[IMX6SX_CLK_SSI1_PODF], 24576000); clk_set_rate(clks[IMX6SX_CLK_SSI2_PODF], 24576000); clk_set_rate(clks[IMX6SX_CLK_SSI3_PODF], 24576000); clk_set_parent(clks[IMX6SX_CLK_ESAI_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); clk_set_rate(clks[IMX6SX_CLK_ESAI_PODF], 24576000); /* Set parent clock for vadc */ clk_set_parent(clks[IMX6SX_CLK_VID_SEL], clks[IMX6SX_CLK_PLL3_USB_OTG]); /* default parent of can_sel clock is invalid, manually set it here */ clk_set_parent(clks[IMX6SX_CLK_CAN_SEL], clks[IMX6SX_CLK_PLL3_60M]); /* Update gpu clock from default 528M to 720M */ clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); }
static int __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); if (!timer_base) { pr_err("Failed to iomap\n"); return -ENXIO; } local_base = of_iomap(np, 1); if (!local_base) { pr_err("Failed to iomap\n"); return -ENXIO; } if (timer25Mhz) { set = TIMER0_25MHZ; enable_mask = TIMER0_EN; } else { clr = TIMER0_25MHZ; enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); } atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); local_timer_ctrl_clrset(clr, set); /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); atomic_io_modify(timer_base + TIMER_CTRL_OFF, TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); armada_370_delay_timer.freq = timer_clk; register_current_timer_delay(&armada_370_delay_timer); /* * Set scale and timer for sched_clock. */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); if (res) { pr_err("Failed to initialize clocksource mmio\n"); return res; } armada_370_xp_evt = alloc_percpu(struct clock_event_device); if (!armada_370_xp_evt) return -ENOMEM; /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (res) { pr_err("Failed to request percpu irq\n"); return res; } res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, "clockevents/armada:starting", armada_370_xp_timer_starting_cpu, armada_370_xp_timer_dying_cpu); if (res) { pr_err("Failed to setup hotplug state and timer\n"); return res; } register_syscore_ops(&armada_370_xp_timer_syscore_ops); return 0; }
static void __init mx53_clocks_init(struct device_node *np) { int i, irq; unsigned long r; void __iomem *base; clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE); clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE); clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE); clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE); clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); clk[ldb_di1_div] = imx_clk_divider_flags("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1, 0); clk[ldb_di1_sel] = imx_clk_mux_flags("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1, mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel), CLK_SET_RATE_PARENT); clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3); clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); clk[ldb_di0_div] = imx_clk_divider_flags("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1, 0); clk[ldb_di0_sel] = imx_clk_mux_flags("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1, mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel), CLK_SET_RATE_PARENT); clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28); clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30); clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3, mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel)); clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3, mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel)); clk[tve_ext_sel] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1, mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT); clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30); clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3); clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2); clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6); clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10); clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14); clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10); clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12); clk[can_sel] = imx_clk_mux("can_sel", MXC_CCM_CSCMR2, 6, 2, mx53_can_sel, ARRAY_SIZE(mx53_can_sel)); clk[can1_serial_gate] = imx_clk_gate2("can1_serial_gate", "can_sel", MXC_CCM_CCGR6, 22); clk[can1_ipg_gate] = imx_clk_gate2("can1_ipg_gate", "ipg", MXC_CCM_CCGR6, 20); clk[ocram] = imx_clk_gate2("ocram", "ahb", MXC_CCM_CCGR6, 2); clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "can_sel", MXC_CCM_CCGR4, 8); clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 6); clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22); clk[sata_gate] = imx_clk_gate2("sata_gate", "ipg", MXC_CCM_CCGR4, 2); clk[cko1_sel] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4, mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel)); clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3); clk[cko1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7); clk[cko2_sel] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5, mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel)); clk[cko2_podf] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3); clk[cko2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24); clk[spdif_xtal_sel] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2, mx53_spdif_xtal_sel, ARRAY_SIZE(mx53_spdif_xtal_sel)); for (i = 0; i < ARRAY_SIZE(clk); i++) if (IS_ERR(clk[i])) pr_err("i.MX53 clk %d: register failed with %ld\n", i, PTR_ERR(clk[i])); clk_data.clks = clk; clk_data.clk_num = ARRAY_SIZE(clk); of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); mx5_clocks_common_init(0, 0, 0, 0); clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0"); clk_register_clkdev(clk[i2c3_gate], NULL, "imx21-i2c.2"); clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0"); clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0"); clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0"); clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0"); clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0"); clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1"); clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1"); clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1"); clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2"); clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2"); clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2"); clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3"); clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3"); clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3"); /* set SDHC root clock to 200MHZ*/ clk_set_rate(clk[esdhc_a_podf], 200000000); clk_set_rate(clk[esdhc_b_podf], 200000000); clk_prepare_enable(clk[iim_gate]); imx_print_silicon_rev("i.MX53", mx53_revision()); clk_disable_unprepare(clk[iim_gate]); r = clk_round_rate(clk[usboh3_per_gate], 54000000); clk_set_rate(clk[usboh3_per_gate], r); np = of_find_compatible_node(NULL, NULL, "fsl,imx53-gpt"); base = of_iomap(np, 0); WARN_ON(!base); irq = irq_of_parse_and_map(np, 0); mxc_timer_init(base, irq); }
static void __init pistachio_clksrc_of_init(struct device_node *node) { struct clk *sys_clk, *fast_clk; struct regmap *periph_regs; unsigned long rate; int ret; pcs_gpt.base = of_iomap(node, 0); if (!pcs_gpt.base) { pr_err("cannot iomap\n"); return; } periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph"); if (IS_ERR(periph_regs)) { pr_err("cannot get peripheral regmap (%lu)\n", PTR_ERR(periph_regs)); return; } /* Switch to using the fast counter clock */ ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL, 0xf, 0x0); if (ret) return; sys_clk = of_clk_get_by_name(node, "sys"); if (IS_ERR(sys_clk)) { pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk)); return; } fast_clk = of_clk_get_by_name(node, "fast"); if (IS_ERR(fast_clk)) { pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk)); return; } ret = clk_prepare_enable(sys_clk); if (ret < 0) { pr_err("failed to enable clock (%d)\n", ret); return; } ret = clk_prepare_enable(fast_clk); if (ret < 0) { pr_err("failed to enable clock (%d)\n", ret); clk_disable_unprepare(sys_clk); return; } rate = clk_get_rate(fast_clk); /* Disable irq's for clocksource usage */ gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0); gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1); gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2); gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3); /* Enable timer block */ writel(TIMER_ME_GLOBAL, pcs_gpt.base); raw_spin_lock_init(&pcs_gpt.lock); sched_clock_register(pistachio_read_sched_clock, 32, rate); clocksource_register_hz(&pcs_gpt.cs, rate); }
static void hal_dma_set_default_setting(ENUM_DMA_DIR dma_dir) { struct device_node *node = NULL; unsigned int irq_info[3] = {0, 0, 0}; unsigned int phy_base; if(DMA_DIR_RX == dma_dir){ node = of_find_compatible_node(NULL, NULL, "mediatek,AP_DMA_BTIF_RX"); if(node){ mtk_btif_rx_dma.p_irq->irq_id = irq_of_parse_and_map(node,0); /*fixme, be compitable arch 64bits*/ mtk_btif_rx_dma.base = (unsigned long)of_iomap(node, 0); BTIF_INFO_FUNC("get rx_dma irq(%d),register base(0x%lx)\n", mtk_btif_rx_dma.p_irq->irq_id,mtk_btif_rx_dma.base); }else{ BTIF_ERR_FUNC("get rx_dma device node fail\n"); } /* get the interrupt line behaviour */ if (of_property_read_u32_array(node, "interrupts", irq_info, ARRAY_SIZE(irq_info))){ BTIF_ERR_FUNC("get interrupt flag from DTS fail\n"); }else{ mtk_btif_rx_dma.p_irq->irq_flags = irq_info[2]; BTIF_INFO_FUNC("get interrupt flag(0x%x)\n", mtk_btif_rx_dma.p_irq->irq_flags); } if (of_property_read_u32_index(node, "reg", 0, &phy_base)){ BTIF_ERR_FUNC("get register phy base from DTS fail,dma_dir(%d)\n",\ dma_dir); }else{ BTIF_INFO_FUNC("get register phy base dma_dir(%d)(0x%x)\n", dma_dir,(unsigned int)phy_base); } } else if (DMA_DIR_TX == dma_dir) { node = of_find_compatible_node(NULL, NULL, "mediatek,AP_DMA_BTIF_TX"); if(node){ mtk_btif_tx_dma.p_irq->irq_id = irq_of_parse_and_map(node,0); /*fixme, be compitable arch 64bits*/ mtk_btif_tx_dma.base = (unsigned long)of_iomap(node, 0); BTIF_INFO_FUNC("get tx_dma irq(%d),register base(0x%lx)\n", mtk_btif_tx_dma.p_irq->irq_id,mtk_btif_tx_dma.base); }else{ BTIF_ERR_FUNC("get tx_dma device node fail\n"); } /* get the interrupt line behaviour */ if (of_property_read_u32_array(node, "interrupts", irq_info, ARRAY_SIZE(irq_info))){ BTIF_ERR_FUNC("get interrupt flag from DTS fail\n"); }else{ mtk_btif_tx_dma.p_irq->irq_flags = irq_info[2]; BTIF_INFO_FUNC("get interrupt flag(0x%x)\n", mtk_btif_tx_dma.p_irq->irq_flags); } if (of_property_read_u32_index(node, "reg", 0, &phy_base)){ BTIF_ERR_FUNC("get register phy base from DTS fail,dma_dir(%d)\n", dma_dir); }else{ BTIF_INFO_FUNC("get register phy base dma_dir(%d)(0x%x)\n", dma_dir,(unsigned int)phy_base); } } }
/* * OF Platform Bus Binding */ static int mpc52xx_spi_probe(struct platform_device *op) { struct spi_master *master; struct mpc52xx_spi *ms; void __iomem *regs; u8 ctrl1; int rc, i = 0; int gpio_cs; /* MMIO registers */ dev_dbg(&op->dev, "probing mpc5200 SPI device\n"); regs = of_iomap(op->dev.of_node, 0); if (!regs) return -ENODEV; /* initialize the device */ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR; out_8(regs + SPI_CTRL1, ctrl1); out_8(regs + SPI_CTRL2, 0x0); out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */ out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */ /* Clear the status register and re-read it to check for a MODF * failure. This driver cannot currently handle multiple masters * on the SPI bus. This fault will also occur if the SPI signals * are not connected to any pins (port_config setting) */ in_8(regs + SPI_STATUS); out_8(regs + SPI_CTRL1, ctrl1); in_8(regs + SPI_DATA); if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) { dev_err(&op->dev, "mode fault; is port_config correct?\n"); rc = -EIO; goto err_init; } dev_dbg(&op->dev, "allocating spi_master struct\n"); master = spi_alloc_master(&op->dev, sizeof *ms); if (!master) { rc = -ENOMEM; goto err_alloc; } master->transfer = mpc52xx_spi_transfer; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8); master->dev.of_node = op->dev.of_node; platform_set_drvdata(op, master); ms = spi_master_get_devdata(master); ms->master = master; ms->regs = regs; ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0); ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1); ms->state = mpc52xx_spi_fsmstate_idle; ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node); ms->gpio_cs_count = of_gpio_count(op->dev.of_node); if (ms->gpio_cs_count > 0) { master->num_chipselect = ms->gpio_cs_count; ms->gpio_cs = kmalloc_array(ms->gpio_cs_count, sizeof(*ms->gpio_cs), GFP_KERNEL); if (!ms->gpio_cs) { rc = -ENOMEM; goto err_alloc_gpio; } for (i = 0; i < ms->gpio_cs_count; i++) { gpio_cs = of_get_gpio(op->dev.of_node, i); if (!gpio_is_valid(gpio_cs)) { dev_err(&op->dev, "could not parse the gpio field in oftree\n"); rc = -ENODEV; goto err_gpio; } rc = gpio_request(gpio_cs, dev_name(&op->dev)); if (rc) { dev_err(&op->dev, "can't request spi cs gpio #%d on gpio line %d\n", i, gpio_cs); goto err_gpio; } gpio_direction_output(gpio_cs, 1); ms->gpio_cs[i] = gpio_cs; } } spin_lock_init(&ms->lock); INIT_LIST_HEAD(&ms->queue); INIT_WORK(&ms->work, mpc52xx_spi_wq); /* Decide if interrupts can be used */ if (ms->irq0 && ms->irq1) { rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0, "mpc5200-spi-modf", ms); rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0, "mpc5200-spi-spif", ms); if (rc) { free_irq(ms->irq0, ms); free_irq(ms->irq1, ms); ms->irq0 = ms->irq1 = 0; } } else { /* operate in polled mode */ ms->irq0 = ms->irq1 = 0; } if (!ms->irq0) dev_info(&op->dev, "using polled mode\n"); dev_dbg(&op->dev, "registering spi_master struct\n"); rc = spi_register_master(master); if (rc) goto err_register; dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); return rc; err_register: dev_err(&ms->master->dev, "initialization failed\n"); err_gpio: while (i-- > 0) gpio_free(ms->gpio_cs[i]); kfree(ms->gpio_cs); err_alloc_gpio: spi_master_put(master); err_alloc: err_init: iounmap(regs); return rc; }
static int bcm_sf2_sw_setup(struct dsa_switch *ds) { const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct device_node *dn; void __iomem **base; unsigned int port; unsigned int i; u32 reg, rev; int ret; spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); /* All the interesting properties are at the parent device_node * level */ dn = ds->cd->of_node->parent; bcm_sf2_identify_ports(priv, ds->cd->of_node); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { *base = of_iomap(dn, i); if (*base == NULL) { pr_err("unable to find register: %s\n", reg_names[i]); ret = -ENOMEM; goto out_unmap; } base++; } ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); goto out_unmap; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_unmap; } ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); goto out_free_irq0; } /* Reset the MIB counters */ reg = core_readl(priv, CORE_GMNCFGCFG); reg |= RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); reg &= ~RST_MIB_CNT; core_writel(priv, reg, CORE_GMNCFGCFG); /* Get the maximum number of ports for this switch */ priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; if (priv->hw_params.num_ports > DSA_MAX_PORTS) priv->hw_params.num_ports = DSA_MAX_PORTS; /* Assume a single GPHY setup if we can't read that property */ if (of_property_read_u32(dn, "brcm,num-gphy", &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; /* Enable all valid ports and disable those unused */ for (port = 0; port < priv->hw_params.num_ports; port++) { /* IMP port receives special treatment */ if ((1 << port) & ds->enabled_port_mask) bcm_sf2_port_setup(ds, port, NULL); else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); else bcm_sf2_port_disable(ds, port, NULL); } /* Include the pseudo-PHY address and the broadcast PHY address to * divert reads towards our workaround. This is only required for * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such * that we can use the regular SWITCH_MDIO master controller instead. * * By default, DSA initializes ds->phys_mii_mask to * ds->enabled_port_mask to have a 1:1 mapping between Port address * and PHY address in order to utilize the slave_mii_bus instance to * read from Port PHYs. This is not what we want here, so we * initialize phys_mii_mask 0 to always utilize the "master" MDIO * bus backed by the "mdio-unimac" driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); else ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; priv->hw_params.core_rev = (rev & SF2_REV_MASK); rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, priv->core, priv->irq0, priv->irq1); return 0; out_free_irq0: free_irq(priv->irq0, priv); out_unmap: base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { if (*base) iounmap(*base); base++; } return ret; }
/** * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API * @np: device node to get GPIO from * @propname: property name containing gpio specifier(s) * @index: index of the GPIO * @flags: a flags pointer to fill in * * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. If @flags is not NULL the function also fills * in flags for the GPIO. */ int of_get_named_gpio_flags(struct device_node *np, const char *propname, int index, enum of_gpio_flags *flags) { /* Return -EPROBE_DEFER to support probe() functions to be called * later when the GPIO actually becomes available */ struct gg_data gg_data = { .flags = flags, .out_gpio = -EPROBE_DEFER }; int ret; /* .of_xlate might decide to not fill in the flags, so clear it. */ if (flags) *flags = 0; ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index, &gg_data.gpiospec); if (ret) { pr_debug("%s: can't parse gpios property\n", __func__); return ret; } gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); return gg_data.out_gpio; } EXPORT_SYMBOL(of_get_named_gpio_flags); /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags * @gc: pointer to the gpio_chip structure * @np: device node of the GPIO chip * @gpio_spec: gpio specifier as found in the device tree * @flags: a flags pointer to fill in * * This is simple translation function, suitable for the most 1:1 mapped * gpio chips. This function performs only one sanity check: whether gpio * is less than ngpios (that is specified in the gpio_chip). */ int of_gpio_simple_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { /* * We're discouraging gpio_cells < 2, since that way you'll have to * write your own xlate function (that will have to retrive the GPIO * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ if (gc->of_gpio_n_cells < 2) { WARN_ON(1); return -EINVAL; } if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; if (gpiospec->args[0] >= gc->ngpio) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0]; } EXPORT_SYMBOL(of_gpio_simple_xlate); /** * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) * @np: device node of the GPIO chip * @mm_gc: pointer to the of_mm_gpio_chip allocated structure * * To use this function you should allocate and fill mm_gc with: * * 1) In the gpio_chip structure: * - all the callbacks * - of_gpio_n_cells * - of_xlate callback (optional) * * 3) In the of_mm_gpio_chip structure: * - save_regs callback (optional) * * If succeeded, this function will map bank's memory and will * do all necessary work for you. Then you'll able to use .regs * to manage GPIOs from the callbacks. */ int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc) { int ret = -ENOMEM; struct gpio_chip *gc = &mm_gc->gc; gc->label = kstrdup(np->full_name, GFP_KERNEL); if (!gc->label) goto err0; mm_gc->regs = of_iomap(np, 0); if (!mm_gc->regs) goto err1; gc->base = -1; if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); mm_gc->gc.of_node = np; ret = gpiochip_add(gc); if (ret) goto err2; return 0; err2: iounmap(mm_gc->regs); err1: kfree(gc->label); err0: pr_err("%s: GPIO chip registration failed with status %d\n", np->full_name, ret); return ret; }
/*map system registers*/ static int map_sysregs(void) { unsigned int uart_idx = 0; struct device_node *np = NULL; uart_idx = get_console_index(); switch (uart_idx) { case 0: np = of_find_compatible_node(NULL, NULL, "arm,pl011"); break; case 6: np = of_find_compatible_node(NULL, NULL, "hisilicon,lowpm_test"); break; default: break; } if (NULL != np) { sysreg_base.uart_base = of_iomap(np, 0); if (!sysreg_base.uart_base) goto err; } np = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); sysreg_base.sysctrl_base = of_iomap(np, 0); if (!sysreg_base.sysctrl_base) goto err; np = of_find_compatible_node(NULL, NULL, "hisilicon,pctrl"); sysreg_base.pctrl_base = of_iomap(np, 0); if (!sysreg_base.pctrl_base) goto err; np = of_find_compatible_node(NULL, NULL, "hisilicon,pmctrl"); sysreg_base.pmctrl_base = of_iomap(np, 0); if (!sysreg_base.pctrl_base) goto err; np = of_find_compatible_node(NULL, NULL, "hisilicon,crgctrl"); sysreg_base.crg_base= of_iomap(np, 0); if (!sysreg_base.crg_base) goto err; np = of_find_node_by_name(NULL, "pmic"); sysreg_base.pmic_base = of_iomap(np, 0); if (!sysreg_base.pmic_base) goto err; sysreg_base.reserved_base = ioremap((unsigned long)hisi_reserved_pm_phymem, PM_BUFFER_SIZE); if (!sysreg_base.reserved_base) goto err; if (!map_io_regs()) return 0; err: printk("hisi_lpregs:map_sysregs failed.\n"); sysreg_base.uart_base = NULL; sysreg_base.sysctrl_base = NULL; sysreg_base.pctrl_base = NULL; sysreg_base.crg_base = NULL; sysreg_base.reserved_base = NULL; return -ENODEV; }
static int __init mps2_clockevent_init(struct device_node *np) { void __iomem *base; struct clk *clk = NULL; struct clockevent_mps2 *ce; u32 rate; int irq, ret; const char *name = "mps2-clkevt"; ret = of_property_read_u32(np, "clock-frequency", &rate); if (ret) { clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); pr_err("failed to get clock for clockevent: %d\n", ret); goto out; } ret = clk_prepare_enable(clk); if (ret) { pr_err("failed to enable clock for clockevent: %d\n", ret); goto out_clk_put; } rate = clk_get_rate(clk); } base = of_iomap(np, 0); if (!base) { ret = -EADDRNOTAVAIL; pr_err("failed to map register for clockevent: %d\n", ret); goto out_clk_disable; } irq = irq_of_parse_and_map(np, 0); if (!irq) { ret = -ENOENT; pr_err("failed to get irq for clockevent: %d\n", ret); goto out_iounmap; } ce = kzalloc(sizeof(*ce), GFP_KERNEL); if (!ce) { ret = -ENOMEM; goto out_iounmap; } ce->reg = base; ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ); ce->clkevt.irq = irq; ce->clkevt.name = name; ce->clkevt.rating = 200; ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; ce->clkevt.cpumask = cpu_possible_mask; ce->clkevt.set_state_shutdown = mps2_timer_shutdown, ce->clkevt.set_state_periodic = mps2_timer_set_periodic, ce->clkevt.set_state_oneshot = mps2_timer_shutdown, ce->clkevt.set_next_event = mps2_timer_set_next_event; /* Ensure timer is disabled */ writel_relaxed(0, base + TIMER_CTRL); ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce); if (ret) { pr_err("failed to request irq for clockevent: %d\n", ret); goto out_kfree; } clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff); return 0; out_kfree: kfree(ce); out_iounmap: iounmap(base); out_clk_disable: /* clk_{disable, unprepare, put}() can handle NULL as a parameter */ clk_disable_unprepare(clk); out_clk_put: clk_put(clk); out: return ret; }
static int __devinit sdhci_of_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device_node *np = ofdev->node; struct sdhci_of_data *sdhci_of_data = match->data; struct sdhci_host *host; struct sdhci_of_host *of_host; const u32 *clk; int size; int ret; if (!of_device_is_available(np)) return -ENODEV; host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); if (IS_ERR(host)) return -ENOMEM; of_host = sdhci_priv(host); dev_set_drvdata(&ofdev->dev, host); host->ioaddr = of_iomap(np, 0); if (!host->ioaddr) { ret = -ENOMEM; goto err_addr_map; } host->irq = irq_of_parse_and_map(np, 0); if (!host->irq) { ret = -EINVAL; goto err_no_irq; } host->hw_name = dev_name(&ofdev->dev); if (sdhci_of_data) { host->quirks = sdhci_of_data->quirks; host->ops = &sdhci_of_data->ops; } if (of_get_property(np, "sdhci,1-bit-only", NULL)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; if (sdhci_of_wp_inverted(np)) host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; clk = of_get_property(np, "clock-frequency", &size); if (clk && size == sizeof(*clk) && *clk) of_host->clock = *clk; ret = sdhci_add_host(host); if (ret) goto err_add_host; return 0; err_add_host: irq_dispose_mapping(host->irq); err_no_irq: iounmap(host->ioaddr); err_addr_map: sdhci_free_host(host); return ret; }
static int __init mps2_clocksource_init(struct device_node *np) { void __iomem *base; struct clk *clk = NULL; u32 rate; int ret; const char *name = "mps2-clksrc"; ret = of_property_read_u32(np, "clock-frequency", &rate); if (ret) { clk = of_clk_get(np, 0); if (IS_ERR(clk)) { ret = PTR_ERR(clk); pr_err("failed to get clock for clocksource: %d\n", ret); goto out; } ret = clk_prepare_enable(clk); if (ret) { pr_err("failed to enable clock for clocksource: %d\n", ret); goto out_clk_put; } rate = clk_get_rate(clk); } base = of_iomap(np, 0); if (!base) { ret = -EADDRNOTAVAIL; pr_err("failed to map register for clocksource: %d\n", ret); goto out_clk_disable; } /* Ensure timer is disabled */ writel_relaxed(0, base + TIMER_CTRL); /* ... and set it up as free-running clocksource */ writel_relaxed(0xffffffff, base + TIMER_VALUE); writel_relaxed(0xffffffff, base + TIMER_RELOAD); writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL); ret = clocksource_mmio_init(base + TIMER_VALUE, name, rate, 200, 32, clocksource_mmio_readl_down); if (ret) { pr_err("failed to init clocksource: %d\n", ret); goto out_iounmap; } sched_clock_base = base; sched_clock_register(mps2_sched_read, 32, rate); return 0; out_iounmap: iounmap(base); out_clk_disable: /* clk_{disable, unprepare, put}() can handle NULL as a parameter */ clk_disable_unprepare(clk); out_clk_put: clk_put(clk); out: return ret; }
static int __devinit fsl_ssi_probe(struct platform_device *pdev) { struct fsl_ssi_private *ssi_private; int ret = 0; struct device_attribute *dev_attr = NULL; struct device_node *np = pdev->dev.of_node; const char *p, *sprop; const uint32_t *iprop; struct resource res; char name[64]; /* SSIs that are not connected on the board should have a * status = "disabled" * property in their device tree nodes. */ if (!of_device_is_available(np)) return -ENODEV; /* Check for a codec-handle property. */ if (!of_get_property(np, "codec-handle", NULL)) { dev_err(&pdev->dev, "missing codec-handle property\n"); return -ENODEV; } /* We only support the SSI in "I2S Slave" mode */ sprop = of_get_property(np, "fsl,mode", NULL); if (!sprop || strcmp(sprop, "i2s-slave")) { dev_notice(&pdev->dev, "mode %s is unsupported\n", sprop); return -ENODEV; } /* The DAI name is the last part of the full name of the node. */ p = strrchr(np->full_name, '/') + 1; ssi_private = kzalloc(sizeof(struct fsl_ssi_private) + strlen(p), GFP_KERNEL); if (!ssi_private) { dev_err(&pdev->dev, "could not allocate DAI object\n"); return -ENOMEM; } strcpy(ssi_private->name, p); /* Initialize this copy of the CPU DAI driver structure */ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, sizeof(fsl_ssi_dai_template)); ssi_private->cpu_dai_drv.name = ssi_private->name; /* Get the addresses and IRQ */ ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not determine device resources\n"); goto error_kmalloc; } ssi_private->ssi = of_iomap(np, 0); if (!ssi_private->ssi) { dev_err(&pdev->dev, "could not map device resources\n"); ret = -ENOMEM; goto error_kmalloc; } ssi_private->ssi_phys = res.start; ssi_private->irq = irq_of_parse_and_map(np, 0); if (ssi_private->irq == NO_IRQ) { dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); ret = -ENXIO; goto error_iomap; } /* The 'name' should not have any slashes in it. */ ret = request_irq(ssi_private->irq, fsl_ssi_isr, 0, ssi_private->name, ssi_private); if (ret < 0) { dev_err(&pdev->dev, "could not claim irq %u\n", ssi_private->irq); goto error_irqmap; } /* Are the RX and the TX clocks locked? */ if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) ssi_private->cpu_dai_drv.symmetric_rates = 1; /* Determine the FIFO depth. */ iprop = of_get_property(np, "fsl,fifo-depth", NULL); if (iprop) ssi_private->fifo_depth = be32_to_cpup(iprop); else /* Older 8610 DTs didn't have the fifo-depth property */ ssi_private->fifo_depth = 8; /* Initialize the the device_attribute structure */ dev_attr = &ssi_private->dev_attr; sysfs_attr_init(&dev_attr->attr); dev_attr->attr.name = "statistics"; dev_attr->attr.mode = S_IRUGO; dev_attr->show = fsl_sysfs_ssi_show; ret = device_create_file(&pdev->dev, dev_attr); if (ret) { dev_err(&pdev->dev, "could not create sysfs %s file\n", ssi_private->dev_attr.attr.name); goto error_irq; } /* Register with ASoC */ dev_set_drvdata(&pdev->dev, ssi_private); ret = snd_soc_register_dai(&pdev->dev, &ssi_private->cpu_dai_drv); if (ret) { dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); goto error_dev; } /* Trigger the machine driver's probe function. The platform driver * name of the machine driver is taken from /compatible property of the * device tree. We also pass the address of the CPU DAI driver * structure. */ sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL); /* Sometimes the compatible name has a "fsl," prefix, so we strip it. */ p = strrchr(sprop, ','); if (p) sprop = p + 1; snprintf(name, sizeof(name), "snd-soc-%s", sprop); make_lowercase(name); ssi_private->pdev = platform_device_register_data(&pdev->dev, name, 0, NULL, 0); if (IS_ERR(ssi_private->pdev)) { ret = PTR_ERR(ssi_private->pdev); dev_err(&pdev->dev, "failed to register platform: %d\n", ret); goto error_dai; } return 0; error_dai: snd_soc_unregister_dai(&pdev->dev); error_dev: dev_set_drvdata(&pdev->dev, NULL); device_remove_file(&pdev->dev, dev_attr); error_irq: free_irq(ssi_private->irq, ssi_private); error_irqmap: irq_dispose_mapping(ssi_private->irq); error_iomap: iounmap(ssi_private->ssi); error_kmalloc: kfree(ssi_private); return ret; }
static int __init pikawdt_init(void) { struct device_node *np; void __iomem *fpga; static u32 post1; int ret; np = of_find_compatible_node(NULL, NULL, "pika,fpga"); if (np == NULL) { ; return -ENOENT; } pikawdt_private.fpga = of_iomap(np, 0); of_node_put(np); if (pikawdt_private.fpga == NULL) { ; return -ENOMEM; } ident.firmware_version = in_be32(pikawdt_private.fpga + 0x1c) & 0xffff; /* POST information is in the sd area. */ np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd"); if (np == NULL) { ; ret = -ENOENT; goto out; } fpga = of_iomap(np, 0); of_node_put(np); if (fpga == NULL) { ; ret = -ENOMEM; goto out; } /* -- FPGA: POST Test Results Register 1 (32bit R/W) (Offset: 0x4040) -- * Bit 31, WDOG: Set to 1 when the last reset was caused by a watchdog * timeout. */ post1 = in_be32(fpga + 0x40); if (post1 & 0x80000000) pikawdt_private.bootstatus = WDIOF_CARDRESET; iounmap(fpga); setup_timer(&pikawdt_private.timer, pikawdt_ping, 0); ret = misc_register(&pikawdt_miscdev); if (ret) { ; goto out; } // printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n", ; return 0; out: iounmap(pikawdt_private.fpga); return ret; }