/* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type) { void __iomem *v = IO_ADDRESS(p); if (v == NULL) v = __arm_ioremap(p, size, type); return v; }
/* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type) { if (BETWEEN(p, IO_PHYS, IO_SIZE)) return XLATE(p, IO_PHYS, IO_VIRT); return __arm_ioremap(p, size, type); }
/* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type) { void __iomem *v = IO_ADDRESS(p); /* * __arm_ioremap fails to set the domain of ioremapped memory * correctly, only use it on physical memory. */ if (v == NULL) { if ((p >= TEGRA_DRAM_BASE && (p + size) <= (TEGRA_DRAM_BASE + TEGRA_DRAM_SIZE)) || (p >= TEGRA_NOR_FLASH_BASE && (p + size) <= (TEGRA_NOR_FLASH_BASE + TEGRA_NOR_FLASH_SIZE)) || (p >= TEGRA_PCIE_BASE && (p + size) <= (TEGRA_PCIE_BASE + TEGRA_PCIE_SIZE))) v = __arm_ioremap(p, size, type); } /* * If the physical address was not physical memory or statically * mapped, there's nothing we can do to map it safely. */ BUG_ON(v == NULL); return v; }
void __init pxa_cpu_reset_handler_init(void) { int cpu; #ifdef CONFIG_TZ_HYPERVISOR tzlc_cmd_desc cmd_desc; tzlc_handle tzlc_hdl; #endif /* Assign the address for saving reset handler */ reset_handler_pa = pm_reserve_pa + PAGE_SIZE; reset_handler = (u32 *)__arm_ioremap(reset_handler_pa, PAGE_SIZE, MT_MEMORY_SO); if (reset_handler == NULL) panic("failed to remap memory for reset handler!\n"); memset(reset_handler, 0x0, PAGE_SIZE); /* Flush the addr to DDR */ __cpuc_flush_dcache_area((void *)&reset_handler_pa, sizeof(reset_handler_pa)); outer_clean_range(__pa(&reset_handler_pa), __pa(&reset_handler_pa + 1)); /* * with TrustZone enabled, CIU_WARM_RESET_VECTOR is used by TrustZone software, * and kernel use CIU_SW_SCRATCH_REG to save the cpu reset entry. */ #ifdef CONFIG_TZ_HYPERVISOR tzlc_hdl = pxa_tzlc_create_handle(); cmd_desc.op = TZLC_CMD_SET_WARM_RESET_ENTRY; cmd_desc.args[0] = __pa(pxa988_cpu_reset_entry); pxa_tzlc_cmd_op(tzlc_hdl, &cmd_desc); pxa_tzlc_destroy_handle(tzlc_hdl); #else /* We will reset from DDR directly by default */ writel(__pa(pxa988_cpu_reset_entry), CIU_WARM_RESET_VECTOR); #endif #ifdef CONFIG_PM /* Setup the resume handler for the first core */ pxa988_set_reset_handler(__pa(pxa988_cpu_resume_handler), 0); #endif /* Setup the handler for secondary cores */ for (cpu = 1; cpu < CONFIG_NR_CPUS; cpu++) pxa988_set_reset_handler(__pa(pxa988_secondary_startup), cpu); #ifdef CONFIG_HOTPLUG_CPU /* Setup the handler for Hotplug cores */ writel(__pa(pxa988_secondary_startup), &secondary_cpu_handler); __cpuc_flush_dcache_area((void *)&secondary_cpu_handler, sizeof(secondary_cpu_handler)); outer_clean_range(__pa(&secondary_cpu_handler), __pa(&secondary_cpu_handler + 1)); #endif }
void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size, unsigned int mtype) { void __iomem * retval; switch (cookie) { case IOP13XX_PCIX_LOWER_MEM_RA ... IOP13XX_PCIX_UPPER_MEM_RA: if (unlikely(!iop13xx_atux_mem_base)) retval = NULL; else retval = (void *)(iop13xx_atux_mem_base + (cookie - IOP13XX_PCIX_LOWER_MEM_RA)); break; case IOP13XX_PCIE_LOWER_MEM_RA ... IOP13XX_PCIE_UPPER_MEM_RA: if (unlikely(!iop13xx_atue_mem_base)) retval = NULL; else retval = (void *)(iop13xx_atue_mem_base + (cookie - IOP13XX_PCIE_LOWER_MEM_RA)); break; case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA: retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA + (cookie - IOP13XX_PBI_LOWER_MEM_RA), size, mtype); break; case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA: retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie); break; case IOP13XX_PCIX_LOWER_IO_PA ... IOP13XX_PCIX_UPPER_IO_PA: retval = (void *) IOP13XX_PCIX_IO_PHYS_TO_VIRT(cookie); break; case IOP13XX_PMMR_PHYS_MEM_BASE ... IOP13XX_PMMR_UPPER_MEM_PA: retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie); break; default: retval = __arm_ioremap(cookie, size, mtype); } return retval; }
DDE_WEAK int bcm_dma_chan_alloc(unsigned int a, void __iomem **out_dma_base, int * out_dma_irq) { int rc = 0; resource_size_t DMA_SIZE = SZ_4K; void __iomem *dma_base = __arm_ioremap(DMA_BASE, DMA_SIZE, 0); *out_dma_base = BCM2708_DMA_CHANIO(dma_base, rc); *out_dma_irq = bcm_dma_irqs[rc]; return rc; dde_printf("bcm_dma_chan_alloc not implemented\n"); return 0; }
/* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type) { void __iomem *v = IO_ADDRESS(p); /* * __arm_ioremap fails to set the domain of ioremapped memory * correctly, only use it on physical memory. */ if (v == NULL && p < SZ_1G) v = __arm_ioremap(p, size, type); return v; }
/** * zynq_pm_remap_ocm() - Remap OCM * Returns a pointer to the mapped memory or NULL. * * Remap the OCM. */ static void __iomem *zynq_pm_remap_ocm(void) { struct device_node *np; const char *comp = "xlnx,zynq-ocmc-1.0"; void __iomem *base = NULL; np = of_find_compatible_node(NULL, NULL, comp); if (np) { struct device *dev; unsigned long pool_addr; unsigned long pool_addr_virt; struct gen_pool *pool; of_node_put(np); dev = &(of_find_device_by_node(np)->dev); /* Get OCM pool from device tree or platform data */ pool = dev_get_gen_pool(dev); if (!pool) { pr_warn("%s: OCM pool is not available\n", __func__); return NULL; } pool_addr_virt = gen_pool_alloc(pool, zynq_sys_suspend_sz); if (!pool_addr_virt) { pr_warn("%s: Can't get OCM poll\n", __func__); return NULL; } pool_addr = gen_pool_virt_to_phys(pool, pool_addr_virt); if (!pool_addr) { pr_warn("%s: Can't get physical address of OCM pool\n", __func__); return NULL; } base = __arm_ioremap(pool_addr, zynq_sys_suspend_sz, MT_MEMORY_RWX); if (!base) { pr_warn("%s: IOremap OCM pool failed\n", __func__); return NULL; } pr_debug("%s: Remap OCM %s from %lx to %lx\n", __func__, comp, pool_addr_virt, (unsigned long)base); } else { pr_warn("%s: no compatible node found for '%s'\n", __func__, comp); } return base; }
void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size, unsigned int mtype) { void __iomem * retval; switch (cookie) { case IOP3XX_PCI_LOWER_IO_PA ... IOP3XX_PCI_UPPER_IO_PA: retval = (void *) IOP3XX_PCI_IO_PHYS_TO_VIRT(cookie); break; case IOP3XX_PERIPHERAL_PHYS_BASE ... IOP3XX_PERIPHERAL_UPPER_PA: retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie); break; default: retval = __arm_ioremap(cookie, size, mtype); } return retval; }
static int __init pm_init(void) { scu_base = IO_ADDRESS(SCU_BASE_ADDR); gpc_base = IO_ADDRESS(GPC_BASE_ADDR); src_base = IO_ADDRESS(SRC_BASE_ADDR); gic_dist_base = IO_ADDRESS(IC_DISTRIBUTOR_BASE_ADDR); gic_cpu_base = IO_ADDRESS(IC_INTERFACES_BASE_ADDR); local_twd_base = IO_ADDRESS(LOCAL_TWD_ADDR); anatop_base = IO_ADDRESS(ANATOP_BASE_ADDR); pr_info("Static Power Management for Freescale i.MX6\n"); if (platform_driver_register(&mx6_pm_driver) != 0) { printk(KERN_ERR "mx6_pm_driver register failed\n"); return -ENODEV; } suspend_set_ops(&mx6_suspend_ops); /* Move suspend routine into iRAM */ cpaddr = (unsigned long)iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ suspend_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); pr_info("cpaddr = %x suspend_iram_base=%x\n", (unsigned int)cpaddr, (unsigned int)suspend_iram_base); /* * Need to run the suspend code from IRAM as the DDR needs * to be put into low power mode manually. */ memcpy((void *)cpaddr, mx6q_suspend, SZ_4K); suspend_in_iram = (void *)suspend_iram_base; cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_DEBUG "%s: failed to get cpu_clk\n", __func__); return PTR_ERR(cpu_clk); } printk(KERN_INFO "PM driver module loaded\n"); return 0; }
/* * Intercept ioremap() requests for addresses in our fixed mapping regions. */ void __iomem *tegra_ioremap(unsigned long p, size_t size, unsigned int type) { void __iomem *v = IO_ADDRESS(p); /* * __arm_ioremap fails to set the domain of ioremapped memory * correctly, only use it on physical memory. */ if (v == NULL && p < SZ_1G) v = __arm_ioremap(p, size, type); /* * If the physical address was not physical memory or statically * mapped, there's nothing we can do to map it safely. */ BUG_ON(v == NULL); return v; }
void configure_boot_globals(void) { static boot_globals_t bg; if (boot_globals_start != 0) { boot_globals=__arm_ioremap(boot_globals_start, BOOT_GLOBALS_SIZE, 0); } if(boot_globals) { /* Do nothing */ } else { boot_globals = &bg; } if(!check_boot_globals()) { unsigned long kernel_boot_flag_tmp; /* preserve the boot_flag even in a cold start case */ kernel_boot_flag_tmp=get_kernel_boot_flag(); memset(boot_globals, 0, BOOT_GLOBALS_SIZE); /* preserve the boot_flag even in a cold start case */ set_kernel_boot_flag(kernel_boot_flag_tmp); // Flag that we booted dirty (or just cold). // set_dirty_boot_flag(1); } else { // Flag that we didn't boot dirty (warm). // set_dirty_boot_flag(0); } // We're not warm until we've rebooted again cleanly, which our reboot notifier // callback will say. // set_warm_restart_flag(BOOT_GLOBALS_COLD_FLAG); install_reboot_notifier(); }
static int __init pm_init(void) { int ret = 0; scu_base = IO_ADDRESS(SCU_BASE_ADDR); gpc_base = IO_ADDRESS(GPC_BASE_ADDR); src_base = IO_ADDRESS(SRC_BASE_ADDR); gic_dist_base = IO_ADDRESS(IC_DISTRIBUTOR_BASE_ADDR); gic_cpu_base = IO_ADDRESS(IC_INTERFACES_BASE_ADDR); local_twd_base = IO_ADDRESS(LOCAL_TWD_ADDR); anatop_base = IO_ADDRESS(ANATOP_BASE_ADDR); pr_info("Static Power Management for Freescale i.MX6\n"); pr_info("wait mode is %s for i.MX6\n", enable_wait_mode ? "enabled" : "disabled"); if (platform_driver_register(&mx6_pm_driver) != 0) { printk(KERN_ERR "mx6_pm_driver register failed\n"); return -ENODEV; } suspend_set_ops(&mx6_suspend_ops); /* Move suspend routine into iRAM */ cpaddr = (unsigned long)iram_alloc(SZ_8K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ suspend_iram_base = __arm_ioremap(iram_paddr, SZ_8K, MT_MEMORY_NONCACHED); pr_info("cpaddr = %x suspend_iram_base=%x\n", (unsigned int)cpaddr, (unsigned int)suspend_iram_base); /* * Need to run the suspend code from IRAM as the DDR needs * to be put into low power mode manually. */ memcpy((void *)cpaddr, mx6_suspend, SZ_8K); suspend_in_iram = (void *)suspend_iram_base; cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_DEBUG "%s: failed to get cpu_clk\n", __func__); return PTR_ERR(cpu_clk); } axi_clk = clk_get(NULL, "axi_clk"); if (IS_ERR(axi_clk)) { printk(KERN_DEBUG "%s: failed to get axi_clk\n", __func__); return PTR_ERR(axi_clk); } periph_clk = clk_get(NULL, "periph_clk"); if (IS_ERR(periph_clk)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(periph_clk); } pll3_usb_otg_main_clk = clk_get(NULL, "pll3_main_clk"); if (IS_ERR(pll3_usb_otg_main_clk)) { printk(KERN_DEBUG "%s: failed to get pll3_main_clk\n", __func__); return PTR_ERR(pll3_usb_otg_main_clk); } vdd3p0_regulator = regulator_get(NULL, "cpu_vdd3p0"); if (IS_ERR(vdd3p0_regulator)) { printk(KERN_ERR "%s: failed to get 3p0 regulator Err: %d\n", __func__, ret); return PTR_ERR(vdd3p0_regulator); } ret = regulator_set_voltage(vdd3p0_regulator, VDD3P0_VOLTAGE, VDD3P0_VOLTAGE); if (ret) { printk(KERN_ERR "%s: failed to set 3p0 regulator voltage Err: %d\n", __func__, ret); } ret = regulator_enable(vdd3p0_regulator); if (ret) { printk(KERN_ERR "%s: failed to enable 3p0 regulator Err: %d\n", __func__, ret); } printk(KERN_INFO "PM driver module loaded\n"); return 0; }
int init_mmdc_settings(struct platform_device *busfreq_pdev) { struct device *dev = &busfreq_pdev->dev; struct platform_device *ocram_dev; unsigned int iram_paddr; int i, err; u32 cpu; struct device_node *node; struct gen_pool *iram_pool; node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine"); if (!node) { printk(KERN_ERR "failed to find imx6q-mmdc device tree data!\n"); return -EINVAL; } mmdc_base = of_iomap(node, 0); WARN(!mmdc_base, "unable to map mmdc registers\n"); node = NULL; if (cpu_is_imx6q()) node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc"); if (cpu_is_imx6dl()) node = of_find_compatible_node(NULL, NULL, "fsl,imx6dl-iomuxc"); if (!node) { printk(KERN_ERR "failed to find imx6q-iomux device tree data!\n"); return -EINVAL; } iomux_base = of_iomap(node, 0); WARN(!iomux_base, "unable to map iomux registers\n"); node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm"); if (!node) { printk(KERN_ERR "failed to find imx6q-ccm device tree data!\n"); return -EINVAL; } ccm_base = of_iomap(node, 0); WARN(!mmdc_base, "unable to map mmdc registers\n"); node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); if (!node) { printk(KERN_ERR "failed to find imx6q-pl310-cache device tree data!\n"); return -EINVAL; } l2_base = of_iomap(node, 0); WARN(!mmdc_base, "unable to map mmdc registers\n"); node = NULL; node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); if (!node) { printk(KERN_ERR "failed to find imx6q-a9-gic device tree data!\n"); return -EINVAL; } gic_dist_base = of_iomap(node, 0); WARN(!gic_dist_base, "unable to map gic dist registers\n"); if (cpu_is_imx6q()) ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) + ARRAY_SIZE(ddr3_calibration); if (cpu_is_imx6dl()) ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) + ARRAY_SIZE(ddr3_calibration); normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL); if (cpu_is_imx6q()) { memcpy(normal_mmdc_settings, ddr3_dll_mx6q, sizeof(ddr3_dll_mx6q)); memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)), ddr3_calibration, sizeof(ddr3_calibration)); } if (cpu_is_imx6dl()) { memcpy(normal_mmdc_settings, ddr3_dll_mx6dl, sizeof(ddr3_dll_mx6dl)); memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)), ddr3_calibration, sizeof(ddr3_calibration)); } /* store the original DDR settings at boot. */ for (i = 0; i < ddr_settings_size; i++) { /* * writes via command mode register cannot be read back. * hence hardcode them in the initial static array. * this may require modification on a per customer basis. */ if (normal_mmdc_settings[i][0] != 0x1C) normal_mmdc_settings[i][1] = readl_relaxed(mmdc_base + normal_mmdc_settings[i][0]); } irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(), GFP_KERNEL); for_each_present_cpu(cpu) { int irq; /* * set up a reserved interrupt to get all * the active cores into a WFE state * before changing the DDR frequency. */ irq = platform_get_irq(busfreq_pdev, cpu); err = request_irq(irq, wait_in_wfe_irq, IRQF_PERCPU, "mmdc_1", NULL); if (err) { dev_err(dev, "Busfreq:request_irq failed %d, err = %d\n", irq, err); return err; } err = irq_set_affinity(irq, cpumask_of(cpu)); if (err) { dev_err(dev, "Busfreq: Cannot set irq affinity irq=%d,\n", irq); return err; } irqs_used[cpu] = irq; } node = NULL; node = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!node) { dev_err(dev, "%s: failed to find ocram node\n", __func__); return -EINVAL; } ocram_dev = of_find_device_by_node(node); if (!ocram_dev) { dev_err(dev, "failed to find ocram device!\n"); return -EINVAL; } iram_pool = dev_get_gen_pool(&ocram_dev->dev); if (!iram_pool) { dev_err(dev, "iram pool unavailable!\n"); return -EINVAL; } iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q); iram_iomux_settings = gen_pool_alloc(iram_pool, (iomux_settings_size * 8) + 8); if (!iram_iomux_settings) { dev_err(dev, "unable to alloc iram for IOMUX settings!\n"); return -ENOMEM; } /* * Allocate extra space to store the number of entries in the * ddr_settings plus 4 extra regsiter information that needs * to be passed to the frequency change code. * sizeof(iram_ddr_settings) = sizeof(ddr_settings) + * entries in ddr_settings + 16. * The last 4 enties store the addresses of the registers: * CCM_BASE_ADDR * MMDC_BASE_ADDR * IOMUX_BASE_ADDR * L2X0_BASE_ADDR */ iram_ddr_settings = gen_pool_alloc(iram_pool, (ddr_settings_size * 8) + 8 + 32); if (!iram_ddr_settings) { dev_err(dev, "unable to alloc iram for ddr settings!\n"); return -ENOMEM; } i = ddr_settings_size + 1; iram_ddr_settings[i][0] = (unsigned long)mmdc_base; iram_ddr_settings[i+1][0] = (unsigned long)ccm_base; iram_ddr_settings[i+2][0] = (unsigned long)iomux_base; iram_ddr_settings[i+3][0] = (unsigned long)l2_base; if (cpu_is_imx6q()) { /* store the IOMUX settings at boot. */ for (i = 0; i < iomux_settings_size; i++) { iomux_offsets_mx6q[i][1] = readl_relaxed(iomux_base + iomux_offsets_mx6q[i][0]); iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0]; iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1]; } } if (cpu_is_imx6dl()) { for (i = 0; i < iomux_settings_size; i++) { iomux_offsets_mx6dl[i][1] = readl_relaxed(iomux_base + iomux_offsets_mx6dl[i][0]); iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0]; iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1]; } } ddr_freq_change_iram_base = gen_pool_alloc(iram_pool, DDR_FREQ_CHANGE_SIZE); if (!ddr_freq_change_iram_base) { dev_err(dev, "Cannot alloc iram for ddr freq change code!\n"); return -ENOMEM; } iram_paddr = gen_pool_virt_to_phys(iram_pool, (unsigned long)ddr_freq_change_iram_base); /* * need to remap the area here since we want * the memory region to be executable. */ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr, DDR_FREQ_CHANGE_SIZE, MT_MEMORY_RWX_NONCACHED); mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base, &mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE); curr_ddr_rate = ddr_normal_rate; return 0; }
static int __devinit busfreq_probe(struct platform_device *pdev) { u32 err; busfreq_dev = &pdev->dev; pll2_400 = clk_get(NULL, "pll2_pfd_400M"); if (IS_ERR(pll2_400)) { printk(KERN_DEBUG "%s: failed to get pll2_pfd_400M\n", __func__); return PTR_ERR(pll2_400); } pll2_200 = clk_get(NULL, "pll2_200M"); if (IS_ERR(pll2_200)) { printk(KERN_DEBUG "%s: failed to get pll2_200M\n", __func__); return PTR_ERR(pll2_200); } pll2 = clk_get(NULL, "pll2"); if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } pll1 = clk_get(NULL, "pll1_main_clk"); if (IS_ERR(pll1)) { printk(KERN_DEBUG "%s: failed to get pll1\n", __func__); return PTR_ERR(pll1); } pll1_sw_clk = clk_get(NULL, "pll1_sw_clk"); if (IS_ERR(pll1_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll1_sw_clk\n", __func__); return PTR_ERR(pll1_sw_clk); } if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_DEBUG "%s: failed to get cpu_clk\n", __func__); return PTR_ERR(cpu_clk); } pll3 = clk_get(NULL, "pll3_main_clk"); if (IS_ERR(pll3)) { printk(KERN_DEBUG "%s: failed to get pll3\n", __func__); return PTR_ERR(pll3); } pll3_540 = clk_get(NULL, "pll3_pfd_540M"); if (IS_ERR(pll3_540)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(pll3_540); } pll3_sw_clk = clk_get(NULL, "pll3_sw_clk"); if (IS_ERR(pll3_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll3_sw_clk\n", __func__); return PTR_ERR(pll3_sw_clk); } axi_clk = clk_get(NULL, "axi_clk"); if (IS_ERR(axi_clk)) { printk(KERN_DEBUG "%s: failed to get axi_clk\n", __func__); return PTR_ERR(axi_clk); } ahb_clk = clk_get(NULL, "ahb"); if (IS_ERR(ahb_clk)) { printk(KERN_DEBUG "%s: failed to get ahb_clk\n", __func__); return PTR_ERR(ahb_clk); } periph_clk = clk_get(NULL, "periph_clk"); if (IS_ERR(periph_clk)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(periph_clk); } osc_clk = clk_get(NULL, "osc"); if (IS_ERR(osc_clk)) { printk(KERN_DEBUG "%s: failed to get osc_clk\n", __func__); return PTR_ERR(osc_clk); } mmdc_ch0_axi = clk_get(NULL, "mmdc_ch0_axi"); if (IS_ERR(mmdc_ch0_axi)) { printk(KERN_DEBUG "%s: failed to get mmdc_ch0_axi\n", __func__); return PTR_ERR(mmdc_ch0_axi); } err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr); if (err) { printk(KERN_ERR "Unable to register sysdev entry for BUSFREQ"); return err; } cpu_op_tbl = get_cpu_op(&cpu_op_nr); low_bus_freq_mode = 0; if (cpu_is_mx6dl()) { high_bus_freq_mode = 0; med_bus_freq_mode = 1; /* To make pll2_400 use count right, as when system enter 24M, it will disable pll2_400 */ clk_enable(pll2_400); } else if (cpu_is_mx6sl()) { /* Set med_bus_freq_mode to 1 since med_bus_freq_mode is not supported as yet for MX6SL */ high_bus_freq_mode = 1; med_bus_freq_mode = 1; } else { high_bus_freq_mode = 1; med_bus_freq_mode = 0; } bus_freq_scaling_is_active = 0; bus_freq_scaling_initialized = 1; if (cpu_is_mx6q()) { ddr_low_rate = LPAPM_CLK; ddr_med_rate = DDR_MED_CLK; ddr_normal_rate = DDR3_NORMAL_CLK; } if (cpu_is_mx6dl() || cpu_is_mx6sl()) { ddr_low_rate = LPAPM_CLK; ddr_normal_rate = ddr_med_rate = DDR_MED_CLK; } INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler); register_pm_notifier(&imx_bus_freq_pm_notifier); if (!cpu_is_mx6sl()) init_mmdc_settings(); else { unsigned long iram_paddr; /* Allocate IRAM for WFI code when system is * in low freq mode. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want * the memory region to be executable. */ mx6sl_wfi_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); memcpy(mx6sl_wfi_iram_base, mx6sl_wait, SZ_4K); mx6sl_wfi_iram = (void *)mx6sl_wfi_iram_base; /* Allocate IRAM for WFI code when system is *in low freq mode. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ mx6sl_ddr_freq_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); memcpy(mx6sl_ddr_freq_base, mx6sl_ddr_iram, SZ_4K); mx6sl_ddr_freq_change_iram = (void *)mx6sl_ddr_freq_base; } return 0; }
void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type) { #ifdef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap1()) { if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE)) return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT); } if (cpu_is_omap730()) { if (BETWEEN(p, OMAP730_DSP_BASE, OMAP730_DSP_SIZE)) return XLATE(p, OMAP730_DSP_BASE, OMAP730_DSP_START); if (BETWEEN(p, OMAP730_DSPREG_BASE, OMAP730_DSPREG_SIZE)) return XLATE(p, OMAP730_DSPREG_BASE, OMAP730_DSPREG_START); } if (cpu_is_omap15xx()) { if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE)) return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START); if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE)) return XLATE(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_START); } if (cpu_is_omap16xx()) { if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE)) return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START); if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE)) return XLATE(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_START); } #endif #ifdef CONFIG_ARCH_OMAP2 if (cpu_is_omap24xx()) { if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE)) return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT); if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE)) return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT); } if (cpu_is_omap2420()) { if (BETWEEN(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_SIZE)) return XLATE(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_VIRT); if (BETWEEN(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE)) return XLATE(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE); if (BETWEEN(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_SIZE)) return XLATE(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_VIRT); } if (cpu_is_omap2430()) { if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE)) return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT); if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE)) return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT); if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE)) return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT); if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE)) return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT); } #endif #ifdef CONFIG_ARCH_OMAP3 if (cpu_is_omap34xx()) { if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE)) return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT); if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE)) return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT); if (BETWEEN(p, L4_WK_34XX_PHYS, L4_WK_34XX_SIZE)) return XLATE(p, L4_WK_34XX_PHYS, L4_WK_34XX_VIRT); if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE)) return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT); if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE)) return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT); if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE)) return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT); if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE)) return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT); if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE)) return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT); } #endif #ifdef CONFIG_ARCH_OMAP4 if (cpu_is_omap44xx()) { if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE)) return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT); if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE)) return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT); if (BETWEEN(p, L4_WK_44XX_PHYS, L4_WK_44XX_SIZE)) return XLATE(p, L4_WK_44XX_PHYS, L4_WK_44XX_VIRT); if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE)) return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT); if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE)) return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT); if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE)) return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT); } #endif return __arm_ioremap(p, size, type); }
int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev) { struct platform_device *ocram_dev; unsigned int iram_paddr; struct device_node *node; struct gen_pool *iram_pool; busfreq_dev = &busfreq_pdev->dev; node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc"); if (!node) { printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n"); return -EINVAL; } mmdc_base = of_iomap(node, 0); WARN(!mmdc_base, "unable to map mmdc registers\n"); node = NULL; node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm"); if (!node) { printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n"); return -EINVAL; } ccm_base = of_iomap(node, 0); WARN(!ccm_base, "unable to map ccm registers\n"); node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache"); if (!node) { printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n"); return -EINVAL; } l2_base = of_iomap(node, 0); WARN(!l2_base, "unable to map PL310 registers\n"); node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop"); if (!node) { printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n"); return -EINVAL; } anatop_base = of_iomap(node, 0); WARN(!anatop_base, "unable to map anatop registers\n"); node = NULL; node = of_find_compatible_node(NULL, NULL, "mmio-sram"); if (!node) { dev_err(busfreq_dev, "%s: failed to find ocram node\n", __func__); return -EINVAL; } ocram_dev = of_find_device_by_node(node); if (!ocram_dev) { dev_err(busfreq_dev, "failed to find ocram device!\n"); return -EINVAL; } iram_pool = dev_get_gen_pool(&ocram_dev->dev); if (!iram_pool) { dev_err(busfreq_dev, "iram pool unavailable!\n"); return -EINVAL; } reg_addrs[0] = (unsigned long)anatop_base; reg_addrs[1] = (unsigned long)ccm_base; reg_addrs[2] = (unsigned long)mmdc_base; reg_addrs[3] = (unsigned long)l2_base; ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool, LPDDR2_FREQ_CHANGE_SIZE); if (!ddr_freq_change_iram_base) { dev_err(busfreq_dev, "Cannot alloc iram for ddr freq change code!\n"); return -ENOMEM; } iram_paddr = gen_pool_virt_to_phys(iram_pool, (unsigned long)ddr_freq_change_iram_base); /* * Need to remap the area here since we want * the memory region to be executable. */ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr, LPDDR2_FREQ_CHANGE_SIZE, MT_MEMORY_NONCACHED); mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base, &mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE); curr_ddr_rate = ddr_normal_rate; return 0; }
void init_ddr_settings(void) { unsigned long iram_paddr; unsigned int reg; int i; struct clk *ddr_clk = clk_get(NULL, "ddr_clk"); databahn_base = ioremap(MX50_DATABAHN_BASE_ADDR, SZ_16K); /* Find the memory type, LPDDR2 or mddr. */ mx50_ddr_type = __raw_readl(databahn_base) & 0xF00; if (mx50_ddr_type == MX50_LPDDR2) { normal_databahn_settings = lpddr2_databhan_regs_offsets; ddr_settings_size = ARRAY_SIZE(lpddr2_databhan_regs_offsets); } else if (mx50_ddr_type == MX50_MDDR) { normal_databahn_settings = mddr_databhan_regs_offsets; ddr_settings_size = ARRAY_SIZE(mddr_databhan_regs_offsets); } else if (mx50_ddr_type == MX50_DDR2) { normal_databahn_settings = ddr2_databhan_regs_offsets; ddr_settings_size = ARRAY_SIZE(ddr2_databhan_regs_offsets); } else { printk(KERN_DEBUG "%s: Unsupported memory type\n", __func__); return; } /* Copy the databhan settings into the iram location. */ for (i = 0; i < ddr_settings_size; i++) { normal_databahn_settings[i][1] = __raw_readl(databahn_base + normal_databahn_settings[i][0]); } /* Store the size of the array in iRAM also, * increase the size by 8 bytes. */ iram_ddr_settings = iram_alloc(ddr_settings_size + 8, &iram_paddr); if (iram_ddr_settings == NULL) { printk(KERN_DEBUG "%s: failed to allocate iRAM memory for ddr settings\n", __func__); return; } /* Allocate IRAM for the DDR freq change code. */ iram_alloc(SZ_8K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr, SZ_8K, MT_HIGH_VECTORS); memcpy(ddr_freq_change_iram_base, mx50_ddr_freq_change, SZ_8K); change_ddr_freq = (void *)ddr_freq_change_iram_base; qosc_base = ioremap(QOSC_BASE_ADDR, SZ_4K); /* Enable the QoSC */ reg = __raw_readl(qosc_base); reg &= ~0xC0000000; __raw_writel(reg, qosc_base); /* Allocate IRAM to run the WFI code from iram, since * we can turn off the DDR clocks when ARM is in WFI. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ wait_in_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_HIGH_VECTORS); memcpy(wait_in_iram_base, mx50_wait, SZ_4K); wait_in_iram = (void *)wait_in_iram_base; clk_enable(ddr_clk); /* Set the DDR to enter automatic self-refresh. */ /* Set the DDR to automatically enter lower power mode 4. */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG22); reg &= ~LOWPOWER_AUTOENABLE_MASK; reg |= 1 << 1; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG22); /* set the counter for entering mode 4. */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG21); reg &= ~LOWPOWER_EXTERNAL_CNT_MASK; reg = 128 << LOWPOWER_EXTERNAL_CNT_OFFSET; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG21); /* Enable low power mode 4 */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG20); reg &= ~LOWPOWER_CONTROL_MASK; reg |= 1 << 1; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG20); clk_disable(ddr_clk); epdc_clk = clk_get(NULL, "epdc_axi"); if (IS_ERR(epdc_clk)) { printk(KERN_DEBUG "%s: failed to get epdc_axi_clk\n", __func__); return; } }
static int __init post_cpu_init(void) { unsigned int reg; void __iomem *base; unsigned long iram_paddr, cpaddr; iram_init(MX6Q_IRAM_BASE_ADDR, MX6Q_IRAM_SIZE); base = ioremap(AIPS1_ON_BASE_ADDR, PAGE_SIZE); __raw_writel(0x0, base + 0x40); __raw_writel(0x0, base + 0x44); __raw_writel(0x0, base + 0x48); __raw_writel(0x0, base + 0x4C); reg = __raw_readl(base + 0x50) & 0x00FFFFFF; __raw_writel(reg, base + 0x50); iounmap(base); base = ioremap(AIPS2_ON_BASE_ADDR, PAGE_SIZE); __raw_writel(0x0, base + 0x40); __raw_writel(0x0, base + 0x44); __raw_writel(0x0, base + 0x48); __raw_writel(0x0, base + 0x4C); reg = __raw_readl(base + 0x50) & 0x00FFFFFF; __raw_writel(reg, base + 0x50); iounmap(base); if (enable_wait_mode) { /* Allow SCU_CLK to be disabled when all cores are in WFI*/ base = IO_ADDRESS(SCU_BASE_ADDR); reg = __raw_readl(base); reg |= 0x20; __raw_writel(reg, base); } /* Disable SRC warm reset to work aound system reboot issue */ base = IO_ADDRESS(SRC_BASE_ADDR); reg = __raw_readl(base); reg &= ~0x1; __raw_writel(reg, base); /* Allocate IRAM for WAIT code. */ /* Move wait routine into iRAM */ cpaddr = (unsigned long)iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ mx6_wait_in_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); pr_info("cpaddr = %x wait_iram_base=%x\n", (unsigned int)cpaddr, (unsigned int)mx6_wait_in_iram_base); /* * Need to run the suspend code from IRAM as the DDR needs * to be put into low power mode manually. */ memcpy((void *)cpaddr, mx6_wait, SZ_4K); mx6_wait_in_iram = (void *)mx6_wait_in_iram_base; gpc_base = MX6_IO_ADDRESS(GPC_BASE_ADDR); ccm_base = MX6_IO_ADDRESS(CCM_BASE_ADDR); return 0; }