void __init mx6_usb_h2_init(void) { struct platform_device *pdev, *pdev_wakeup; static void __iomem *anatop_base_addr = MX6_IO_ADDRESS(ANATOP_BASE_ADDR); usbh2_config.wakeup_pdata = &usbh2_wakeup_config; hsic_strobe_start_pad = cpu_is_mx6q() ? MX6Q_PAD_RGMII_TX_CTL__USBOH3_H2_STROBE_START : (cpu_is_mx6dl() ? MX6DL_PAD_RGMII_TX_CTL__USBOH3_H2_STROBE_START : (cpu_is_mx6sl() ? MX6SL_PAD_HSIC_STROBE__USB_H_STROBE_START : 0)); if (cpu_is_mx6sl()) pdev = imx6sl_add_fsl_ehci_hs(2, &usbh2_config); else pdev = imx6q_add_fsl_ehci_hs(2, &usbh2_config); usbh2_wakeup_config.usb_pdata[0] = pdev->dev.platform_data; if (cpu_is_mx6sl()) pdev_wakeup = imx6sl_add_fsl_usb2_hs_wakeup(2, &usbh2_wakeup_config); else pdev_wakeup = imx6q_add_fsl_usb2_hs_wakeup(2, &usbh2_wakeup_config); platform_device_add(pdev); ((struct fsl_usb2_platform_data *)(pdev->dev.platform_data))->wakeup_pdata = pdev_wakeup->dev.platform_data; /* Some phy and power's special controls for host2 * 1. Its 480M is from OTG's 480M * 2. EN_USB_CLKS should always be opened */ __raw_writel(BM_ANADIG_USB1_PLL_480_CTRL_EN_USB_CLKS, anatop_base_addr + HW_ANADIG_USB1_PLL_480_CTRL_SET); /* must change the clkgate delay to 2 or 3 to avoid * 24M OSCI clock not stable issue */ __raw_writel(BF_ANADIG_ANA_MISC0_CLKGATE_DELAY(3), anatop_base_addr + HW_ANADIG_ANA_MISC0); }
static int __init mx6_usb_h1_init(void) { static void __iomem *anatop_base_addr = MX6_IO_ADDRESS(ANATOP_BASE_ADDR); struct imx_fsl_usb2_wakeup_data imx6q_fsl_hs_wakeup_data[] = { imx_fsl_usb2_wakeup_data_entry_single(MX6Q, 1, HS1) }; struct imx_fsl_usb2_wakeup_data imx6sl_fsl_hs_wakeup_data[] = { imx_fsl_usb2_wakeup_data_entry_single(MX6SL, 1, HS1) }; struct imx_mxc_ehci_data imx6q_mxc_ehci_hs_data[] = { imx_mxc_ehci_data_entry_single(MX6Q, 1, HS1) }; struct imx_mxc_ehci_data imx6sl_mxc_ehci_hs_data[] = { imx_mxc_ehci_data_entry_single(MX6SL, 1, HS1) }; mx6_get_host1_vbus_func(&mx6_set_usb_host1_vbus); if (mx6_set_usb_host1_vbus) mx6_set_usb_host1_vbus(true); /* Some phy and power's special controls for host1 * 1. The external charger detector needs to be disabled * or the signal at DP will be poor * 2. The PLL's power and output to usb for host 1 * is totally controlled by IC, so the Software only needs * to enable them at initializtion. */ __raw_writel(BM_ANADIG_USB2_CHRG_DETECT_EN_B \ | BM_ANADIG_USB2_CHRG_DETECT_CHK_CHRG_B, \ anatop_base_addr + HW_ANADIG_USB2_CHRG_DETECT); __raw_writel(BM_ANADIG_USB2_PLL_480_CTRL_BYPASS, anatop_base_addr + HW_ANADIG_USB2_PLL_480_CTRL_CLR); __raw_writel(BM_ANADIG_USB2_PLL_480_CTRL_ENABLE \ | BM_ANADIG_USB2_PLL_480_CTRL_POWER \ | BM_ANADIG_USB2_PLL_480_CTRL_EN_USB_CLKS, \ anatop_base_addr + HW_ANADIG_USB2_PLL_480_CTRL_SET); usbh1_config.wakeup_pdata = &usbh1_wakeup_config; if (usb_icbug_swfix_need()) { usbh1_config.platform_rh_suspend = usbh1_platform_rh_suspend_swfix; usbh1_config.platform_rh_resume = usbh1_platform_rh_resume_swfix; } else { usbh1_config.platform_rh_suspend = usbh1_platform_rh_suspend; usbh1_config.platform_rh_resume = usbh1_platform_rh_resume; } if (cpu_is_mx6sl()) pdev = imx6sl_add_fsl_ehci_hs(1, &usbh1_config); else pdev = imx6q_add_fsl_ehci_hs(1, &usbh1_config); usbh1_wakeup_config.usb_pdata[0] = pdev->dev.platform_data; if (cpu_is_mx6sl()) pdev_wakeup = imx6sl_add_fsl_usb2_hs_wakeup(1, &usbh1_wakeup_config); else pdev_wakeup = imx6q_add_fsl_usb2_hs_wakeup(1, &usbh1_wakeup_config); platform_device_add(pdev); ((struct fsl_usb2_platform_data *)(pdev->dev.platform_data))->wakeup_pdata = (struct fsl_usb2_wakeup_platform_data *)(pdev_wakeup->dev.platform_data); return 0; }
static int __init mx6_usb_h1_init(void) { static void __iomem *anatop_base_addr = MX6_IO_ADDRESS(ANATOP_BASE_ADDR); struct imx_fsl_usb2_wakeup_data imx6q_fsl_hs_wakeup_data[] = { imx_fsl_usb2_wakeup_data_entry_single(MX6Q, 1, HS1)}; struct imx_fsl_usb2_wakeup_data imx6sl_fsl_hs_wakeup_data[] = { imx_fsl_usb2_wakeup_data_entry_single(MX6SL, 1, HS1)}; struct imx_mxc_ehci_data imx6q_mxc_ehci_hs_data[] = { imx_mxc_ehci_data_entry_single(MX6Q, 1, HS1)}; struct imx_mxc_ehci_data imx6sl_mxc_ehci_hs_data[] = { imx_mxc_ehci_data_entry_single(MX6SL, 1, HS1)}; mx6_get_host1_vbus_func(&mx6_set_usb_host1_vbus); usbh1_config.platform_driver_vbus = mx6_set_usb_host1_vbus; /* The external charger detector needs to be disabled * or the signal at DP will be poor */ __raw_writel(BM_ANADIG_USB2_CHRG_DETECT_EN_B \ | BM_ANADIG_USB2_CHRG_DETECT_CHK_CHRG_B, \ anatop_base_addr + HW_ANADIG_USB2_CHRG_DETECT); /* Turn off PHY PLL until USB host driver is loaded */ __raw_writel(BM_ANADIG_USB2_PLL_480_CTRL_BYPASS, anatop_base_addr + HW_ANADIG_USB2_PLL_480_CTRL_SET); __raw_writel(BM_ANADIG_USB2_PLL_480_CTRL_ENABLE \ | BM_ANADIG_USB2_PLL_480_CTRL_POWER \ | BM_ANADIG_USB2_PLL_480_CTRL_EN_USB_CLKS, \ anatop_base_addr + HW_ANADIG_USB2_PLL_480_CTRL_CLR); usbh1_config.wakeup_pdata = &usbh1_wakeup_config; if (usb_icbug_swfix_need()) { usbh1_config.platform_rh_suspend = usbh1_platform_rh_suspend_swfix; usbh1_config.platform_rh_resume = usbh1_platform_rh_resume_swfix; } else { usbh1_config.platform_rh_suspend = usbh1_platform_rh_suspend; usbh1_config.platform_rh_resume = usbh1_platform_rh_resume; } if (cpu_is_mx6sl()) pdev = imx6sl_add_fsl_ehci_hs(1, &usbh1_config); else pdev = imx6q_add_fsl_ehci_hs(1, &usbh1_config); usbh1_wakeup_config.usb_pdata[0] = pdev->dev.platform_data; if (cpu_is_mx6sl()) pdev_wakeup = imx6sl_add_fsl_usb2_hs_wakeup(1, &usbh1_wakeup_config); else pdev_wakeup = imx6q_add_fsl_usb2_hs_wakeup(1, &usbh1_wakeup_config); platform_device_add(pdev); ((struct fsl_usb2_platform_data *)(pdev->dev.platform_data))->wakeup_pdata = (struct fsl_usb2_wakeup_platform_data *)(pdev_wakeup->dev.platform_data); return 0; }
static void mx6_suspend_store(void) { /* save some settings before suspend */ ccm_ccr = __raw_readl(MXC_CCM_CCR); ccm_clpcr = __raw_readl(MXC_CCM_CLPCR); ccm_analog_pfd528 = __raw_readl(PFD_528_BASE_ADDR); ccm_analog_pll3_480 = __raw_readl(PLL3_480_USB1_BASE_ADDR); ccm_anadig_ana_misc2 = __raw_readl(MXC_PLL_BASE + HW_ANADIG_ANA_MISC2); ccgr1 = __raw_readl(MXC_CCM_CCGR1); ccgr2 = __raw_readl(MXC_CCM_CCGR2); ccgr3 = __raw_readl(MXC_CCM_CCGR3); ccgr6 = __raw_readl(MXC_CCM_CCGR6); scu_ctrl = __raw_readl(scu_base + SCU_CTRL_OFFSET); gpc_imr[0] = __raw_readl(gpc_base + GPC_IMR1_OFFSET); gpc_imr[1] = __raw_readl(gpc_base + GPC_IMR2_OFFSET); gpc_imr[2] = __raw_readl(gpc_base + GPC_IMR3_OFFSET); gpc_imr[3] = __raw_readl(gpc_base + GPC_IMR4_OFFSET); gpc_cpu_pup = __raw_readl(gpc_base + GPC_PGC_CPU_PUPSCR_OFFSET); gpc_cpu_pdn = __raw_readl(gpc_base + GPC_PGC_CPU_PDNSCR_OFFSET); gpc_cpu = __raw_readl(gpc_base + GPC_PGC_CPU_PDN_OFFSET); gpc_ctr = __raw_readl(gpc_base + GPC_CNTR_OFFSET); if (cpu_is_mx6sl()) gpc_disp = __raw_readl(gpc_base + GPC_PGC_DISP_PGCR_OFFSET); anatop[0] = __raw_readl(anatop_base + ANATOP_REG_2P5_OFFSET); anatop[1] = __raw_readl(anatop_base + ANATOP_REG_CORE_OFFSET); }
static int mx6_get_srev(void) { void __iomem *anatop = MX6_IO_ADDRESS(ANATOP_BASE_ADDR); u32 rev; if (cpu_is_mx6sl()) rev = __raw_readl(anatop + MX6SL_USB_ANALOG_DIGPROG); else rev = __raw_readl(anatop + MX6_USB_ANALOG_DIGPROG); rev &= 0xff; if (rev == 0) return IMX_CHIP_REVISION_1_0; else if (rev == 1) return IMX_CHIP_REVISION_1_1; else if (rev == 2) return IMX_CHIP_REVISION_1_2; else if (rev == 3) return IMX_CHIP_REVISION_1_3; else if (rev == 4) return IMX_CHIP_REVISION_1_4; else if (rev == 5) return IMX_CHIP_REVISION_1_5; return IMX_CHIP_REVISION_UNKNOWN; }
static void mx6_suspend_restore(void) { /* restore settings after suspend */ __raw_writel(anatop[0], anatop_base + ANATOP_REG_2P5_OFFSET); __raw_writel(anatop[1], anatop_base + ANATOP_REG_CORE_OFFSET); /* Per spec, the count needs to be zeroed and reconfigured on exit from * low power mode */ __raw_writel(ccm_ccr & ~MXC_CCM_CCR_REG_BYPASS_CNT_MASK & ~MXC_CCM_CCR_WB_COUNT_MASK, MXC_CCM_CCR); udelay(50); __raw_writel(ccm_ccr, MXC_CCM_CCR); __raw_writel(ccm_clpcr, MXC_CCM_CLPCR); __raw_writel(scu_ctrl, scu_base + SCU_CTRL_OFFSET); __raw_writel(gpc_imr[0], gpc_base + GPC_IMR1_OFFSET); __raw_writel(gpc_imr[1], gpc_base + GPC_IMR2_OFFSET); __raw_writel(gpc_imr[2], gpc_base + GPC_IMR3_OFFSET); __raw_writel(gpc_imr[3], gpc_base + GPC_IMR4_OFFSET); __raw_writel(gpc_cpu_pup, gpc_base + GPC_PGC_CPU_PUPSCR_OFFSET); __raw_writel(gpc_cpu_pdn, gpc_base + GPC_PGC_CPU_PDNSCR_OFFSET); __raw_writel(gpc_cpu, gpc_base + GPC_PGC_CPU_PDN_OFFSET); if (cpu_is_mx6sl()) __raw_writel(gpc_disp, gpc_base + GPC_PGC_DISP_PGCR_OFFSET); __raw_writel(ccgr1, MXC_CCM_CCGR1); __raw_writel(ccgr2, MXC_CCM_CCGR2); __raw_writel(ccgr3, MXC_CCM_CCGR3); __raw_writel(ccgr6, MXC_CCM_CCGR6); __raw_writel(ccm_analog_pfd528, PFD_528_BASE_ADDR); __raw_writel(ccm_analog_pll3_480, PLL3_480_USB1_BASE_ADDR); __raw_writel(ccm_anadig_ana_misc2, MXC_PLL_BASE + HW_ANADIG_ANA_MISC2); }
int mxc_init_l2x0(void) { unsigned int val; #define IOMUXC_GPR11_L2CACHE_AS_OCRAM 0x00000002 val = readl(IOMUXC_GPR11); if (cpu_is_mx6sl() && (val & IOMUXC_GPR11_L2CACHE_AS_OCRAM)) { /* L2 cache configured as OCRAM, reset it */ val &= ~IOMUXC_GPR11_L2CACHE_AS_OCRAM; writel(val, IOMUXC_GPR11); } writel(0x132, IO_ADDRESS(L2_BASE_ADDR + L2X0_TAG_LATENCY_CTRL)); writel(0x132, IO_ADDRESS(L2_BASE_ADDR + L2X0_DATA_LATENCY_CTRL)); val = readl(IO_ADDRESS(L2_BASE_ADDR + L2X0_PREFETCH_CTRL)); val |= 0x40800000; writel(val, IO_ADDRESS(L2_BASE_ADDR + L2X0_PREFETCH_CTRL)); val = readl(IO_ADDRESS(L2_BASE_ADDR + L2X0_POWER_CTRL)); val |= L2X0_DYNAMIC_CLK_GATING_EN; val |= L2X0_STNDBY_MODE_EN; writel(val, IO_ADDRESS(L2_BASE_ADDR + L2X0_POWER_CTRL)); l2x0_init(IO_ADDRESS(L2_BASE_ADDR), 0x0, ~0x00000000); return 0; }
static void usbh1_platform_rh_suspend(struct fsl_usb2_platform_data *pdata) { /*for mx6sl ,we do not need any sw fix*/ if (cpu_is_mx6sl()) return ; __raw_writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT, MX6_IO_ADDRESS(pdata->phy_regs) + HW_USBPHY_CTRL_CLR); }
bool usb_icbug_swfix_need(void) { if (cpu_is_mx6sl()) return false; else if ((mx6q_revision() > IMX_CHIP_REVISION_1_1)) return false; else if ((mx6dl_revision() > IMX_CHIP_REVISION_1_0)) return false; return true; }
/* * Returns: * the silicon revision of the cpu */ int mx6sl_revision(void) { if (!cpu_is_mx6sl()) return -EINVAL; if (cpu_silicon_rev == -1) cpu_silicon_rev = mx6_get_srev(); return cpu_silicon_rev; }
void mx6_cpu_op_init(void) { unsigned int reg; void __iomem *base; if (!cpu_is_mx6sl()) { /*read fuse bit to know the max cpu freq : offset 0x440 * bit[17:16]:SPEED_GRADING[1:0],for mx6dq/dl*/ base = IO_ADDRESS(OCOTP_BASE_ADDR); reg = __raw_readl(base + 0x440); reg &= (0x3 << OCOTP_SPEED_BIT_OFFSET); reg >>= OCOTP_SPEED_BIT_OFFSET; /*choose the little value to run lower max cpufreq*/ arm_max_freq = (reg > arm_max_freq) ? arm_max_freq : reg; } else {
static int usb_phy_enable(struct fsl_usb2_platform_data *pdata) { u32 tmp; void __iomem *phy_reg = MX6_IO_ADDRESS(USB_PHY1_BASE_ADDR); void __iomem *phy_ctrl; /* Stop then Reset */ UH1_USBCMD &= ~UCMD_RUN_STOP; while (UH1_USBCMD & UCMD_RUN_STOP) ; UH1_USBCMD |= UCMD_RESET; while ((UH1_USBCMD) & (UCMD_RESET)) ; /* * If the controller reset does not put the PHY be out of * low power mode, do it manually. */ if (UH1_PORTSC1 & PORTSC_PHCD) { UH1_PORTSC1 &= ~PORTSC_PHCD; mdelay(1); } /* Reset USBPHY module */ phy_ctrl = phy_reg + HW_USBPHY_CTRL; tmp = __raw_readl(phy_ctrl); tmp |= BM_USBPHY_CTRL_SFTRST; __raw_writel(tmp, phy_ctrl); udelay(10); /* Remove CLKGATE and SFTRST */ tmp = __raw_readl(phy_ctrl); tmp &= ~(BM_USBPHY_CTRL_CLKGATE | BM_USBPHY_CTRL_SFTRST); __raw_writel(tmp, phy_ctrl); udelay(10); /* Power up the PHY */ __raw_writel(0, phy_reg + HW_USBPHY_PWD); /* enable FS/LS device */ tmp = __raw_readl(phy_reg + HW_USBPHY_CTRL); tmp |= (BM_USBPHY_CTRL_ENUTMILEVEL2 | BM_USBPHY_CTRL_ENUTMILEVEL3); __raw_writel(tmp, phy_reg + HW_USBPHY_CTRL); if (!usb_icbug_swfix_need()) __raw_writel((1 << 17), phy_reg + HW_USBPHY_IP_SET); if (cpu_is_mx6sl()) __raw_writel((1 << 18), phy_reg + HW_USBPHY_IP_SET); return 0; }
void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq) { uint32_t tctl_val; u32 reg; clk_enable(timer_clk); timer_base = base; /* * Initialise to a known state (all timers off, and timing reset) */ __raw_writel(0, timer_base + MXC_TCTL); __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */ if (timer_is_v2()) { if (cpu_is_mx5() || cpu_is_mx6sl() || mx6q_revision() == IMX_CHIP_REVISION_1_0) tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; else { tctl_val = V2_TCTL_CLK_OSC_DIV8 | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; if (!cpu_is_mx6q()) { reg = __raw_readl(timer_base + MXC_TPRER); reg |= (V2_TPRER_PRE24M_DIV8 << V2_TPRER_PRE24M_OFFSET); __raw_writel(reg, timer_base + MXC_TPRER); /* Enable the 24MHz input clock. */ tctl_val |= V2_TCTL_ENABLE24M; } } } else tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; __raw_writel(tctl_val, timer_base + MXC_TCTL); /* init and register the timer to the framework */ mxc_clocksource_init(timer_clk); mxc_clockevent_init(timer_clk); /* Make irqs happen */ setup_irq(irq, &mxc_timer_irq); }
static int anatop_thermal_counting_ratio(unsigned int fuse_data) { int ret = -EINVAL; pr_info("Thermal calibration data is 0x%x\n", fuse_data); if (fuse_data == 0 || fuse_data == 0xffffffff || (fuse_data & 0xfff00000) == 0) { pr_info("%s: invalid calibration data, disable cooling!!!\n", __func__); cooling_device_disable = true; ratio = DEFAULT_RATIO; disable_irq(thermal_irq); return ret; } ret = 0; /* Fuse data layout: * [31:20] sensor value @ 25C * [19:8] sensor value of hot * [7:0] hot temperature value */ raw_25c = fuse_data >> 20; raw_hot = (fuse_data & 0xfff00) >> 8; hot_temp = fuse_data & 0xff; if (!calibration_valid && !cpu_is_mx6sl()) /* * The universal equation for thermal sensor * is slope = 0.4297157 - (0.0015976 * 25C fuse), * here we convert them to integer to make them * easy for counting, FACTOR1 is 15976, * FACTOR2 is 4297157. Our ratio = -100 * slope. */ ratio = ((FACTOR1 * raw_25c - FACTOR2) + 50000) / 100000; else ratio = ((raw_25c - raw_hot) * 100) / (hot_temp - 25); pr_info("Thermal sensor with ratio = %d\n", ratio); raw_n40c = raw_25c + (13 * ratio) / 20; raw_125c = raw_25c - ratio; /* Init default critical temp to set alarm */ raw_critical = raw_25c - ratio * (KELVIN_TO_CEL(TEMP_CRITICAL, KELVIN_OFFSET) - 25) / 100; clk_enable(pll3_clk); anatop_update_alarm(raw_critical); return ret; }
/* * set_mclk_rate * * @param p_mclk_freq mclk frequence * @param csi csi 0 or csi 1 * */ void set_mclk_rate(uint32_t *p_mclk_freq, uint32_t csi) { struct clk *clk; uint32_t freq = 0; char *mclk; if (cpu_is_mx53()) { if (csi == 0) mclk = "ssi_ext1_clk"; else { pr_err("invalid csi num %d\n", csi); return; } } else if (cpu_is_mx6q() || cpu_is_mx6dl()) { if (csi == 0) { mclk = "clko2_clk"; } else { pr_err("invalid csi num %d\n", csi); return; }; } else if (cpu_is_mx25() || cpu_is_mx6sl()) { /* only has CSI0 */ mclk = "csi_clk"; } else { if (csi == 0) { mclk = "csi_mclk1"; } else if (csi == 1) { mclk = "csi_mclk2"; } else { pr_err("invalid csi num %d\n", csi); return; } } clk = clk_get(NULL, mclk); freq = clk_round_rate(clk, *p_mclk_freq); clk_set_rate(clk, freq); *p_mclk_freq = freq; clk_put(clk); pr_debug("%s frequency = %d\n", mclk, *p_mclk_freq); }
static void disp_power_up(void) { if (cpu_is_mx6sl() && (mx6sl_revision() >= IMX_CHIP_REVISION_1_2)) { /* * Need to enable EPDC/LCDIF pix clock, and * EPDC/LCDIF/PXP axi clock before power up. */ __raw_writel(ccgr3 | MXC_CCM_CCGRx_CG5_MASK | MXC_CCM_CCGRx_CG4_MASK | MXC_CCM_CCGRx_CG3_MASK | MXC_CCM_CCGRx_CG2_MASK | MXC_CCM_CCGRx_CG1_MASK, MXC_CCM_CCGR3); __raw_writel(0x0, gpc_base + GPC_PGC_DISP_PGCR_OFFSET); __raw_writel(0x20, gpc_base + GPC_CNTR_OFFSET); __raw_writel(0x1, gpc_base + GPC_PGC_DISP_SR_OFFSET); } }
int mxc_init_l2x0(void) { unsigned int val; #define IOMUXC_GPR11_L2CACHE_AS_OCRAM 0x00000002 val = readl(IOMUXC_GPR11); if (cpu_is_mx6sl() && (val & IOMUXC_GPR11_L2CACHE_AS_OCRAM)) { /* L2 cache configured as OCRAM, reset it */ val &= ~IOMUXC_GPR11_L2CACHE_AS_OCRAM; writel(val, IOMUXC_GPR11); } writel(0x132, IO_ADDRESS(L2_BASE_ADDR + L2X0_TAG_LATENCY_CTRL)); writel(0x132, IO_ADDRESS(L2_BASE_ADDR + L2X0_DATA_LATENCY_CTRL)); val = readl(IO_ADDRESS(L2_BASE_ADDR + L2X0_PREFETCH_CTRL)); /* Turn on the L2 I/D prefetch */ val |= 0x30000000; /* * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 * The L2 cache controller(PL310) version on the i.MX6DL/SOLO/SL is r3p2 * But according to ARM PL310 errata: 752271 * ID: 752271: Double linefill feature can cause data corruption * Fault Status: Present in: r3p0, r3p1, r3p1-50rel0. Fixed in r3p2 * Workaround: The only workaround to this erratum is to disable the * double linefill feature. This is the default behavior. */ if (!cpu_is_mx6q()) val |= 0x40800000; writel(val, IO_ADDRESS(L2_BASE_ADDR + L2X0_PREFETCH_CTRL)); val = readl(IO_ADDRESS(L2_BASE_ADDR + L2X0_POWER_CTRL)); val |= L2X0_DYNAMIC_CLK_GATING_EN; val |= L2X0_STNDBY_MODE_EN; writel(val, IO_ADDRESS(L2_BASE_ADDR + L2X0_POWER_CTRL)); l2x0_init(IO_ADDRESS(L2_BASE_ADDR), 0x0, ~0x00000000); return 0; }
static void disp_power_down(void) { if (cpu_is_mx6sl() && (mx6sl_revision() >= IMX_CHIP_REVISION_1_2)) { __raw_writel(0xFFFFFFFF, gpc_base + GPC_PGC_DISP_PUPSCR_OFFSET); __raw_writel(0xFFFFFFFF, gpc_base + GPC_PGC_DISP_PDNSCR_OFFSET); __raw_writel(0x1, gpc_base + GPC_PGC_DISP_PGCR_OFFSET); __raw_writel(0x10, gpc_base + GPC_CNTR_OFFSET); /* Disable EPDC/LCDIF pix clock, and EPDC/LCDIF/PXP axi clock */ __raw_writel(ccgr3 & ~MXC_CCM_CCGRx_CG5_MASK & ~MXC_CCM_CCGRx_CG4_MASK & ~MXC_CCM_CCGRx_CG3_MASK & ~MXC_CCM_CCGRx_CG2_MASK & ~MXC_CCM_CCGRx_CG1_MASK, MXC_CCM_CCGR3); } }
static void disp_power_down(void) { #if !defined(CONFIG_FB_MXC_ELCDIF_FB) && \ !defined(CONFIG_FB_MXC_ELCDIF_FB_MODULE) if (cpu_is_mx6sl()) { __raw_writel(0xFFFFFFFF, gpc_base + GPC_PGC_DISP_PUPSCR_OFFSET); __raw_writel(0xFFFFFFFF, gpc_base + GPC_PGC_DISP_PDNSCR_OFFSET); __raw_writel(0x1, gpc_base + GPC_PGC_DISP_PGCR_OFFSET); __raw_writel(0x10, gpc_base + GPC_CNTR_OFFSET); /* Disable EPDC/LCDIF pix clock, and EPDC/LCDIF/PXP axi clock */ __raw_writel(ccgr3 & ~MXC_CCM_CCGRx_CG5_MASK & ~MXC_CCM_CCGRx_CG4_MASK & ~MXC_CCM_CCGRx_CG3_MASK & ~MXC_CCM_CCGRx_CG2_MASK & ~MXC_CCM_CCGRx_CG1_MASK, MXC_CCM_CCGR3); } #endif }
static void disp_power_up(void) { #if !defined(CONFIG_FB_MXC_ELCDIF_FB) && \ !defined(CONFIG_FB_MXC_ELCDIF_FB_MODULE) if (cpu_is_mx6sl()) { /* * Need to enable EPDC/LCDIF pix clock, and * EPDC/LCDIF/PXP axi clock before power up. */ __raw_writel(ccgr3 | MXC_CCM_CCGRx_CG5_MASK | MXC_CCM_CCGRx_CG4_MASK | MXC_CCM_CCGRx_CG3_MASK | MXC_CCM_CCGRx_CG2_MASK | MXC_CCM_CCGRx_CG1_MASK, MXC_CCM_CCGR3); __raw_writel(0x0, gpc_base + GPC_PGC_DISP_PGCR_OFFSET); __raw_writel(0x20, gpc_base + GPC_CNTR_OFFSET); __raw_writel(0x1, gpc_base + GPC_PGC_DISP_SR_OFFSET); } #endif }
static void usbh1_platform_rh_resume(struct fsl_usb2_platform_data *pdata) { u32 index = 0; /*for mx6sl ,we do not need any sw fix*/ if (cpu_is_mx6sl()) return ; if ((UH1_PORTSC1 & (PORTSC_PORT_SPEED_MASK)) != PORTSC_PORT_SPEED_HIGH) return ; while ((UH1_PORTSC1 & PORTSC_PORT_FORCE_RESUME) && (index < 1000)) { udelay(500); index++; } if (index >= 1000) printk(KERN_ERR "failed to wait for the resume finished in %s() line:%d\n", __func__, __LINE__); /* We should add some delay to wait for the device switch to * High-Speed 45ohm termination resistors mode. */ udelay(500); __raw_writel(BM_USBPHY_CTRL_ENHOSTDISCONDETECT, MX6_IO_ADDRESS(pdata->phy_regs) + HW_USBPHY_CTRL_SET); }
static int mx6_suspend_enter(suspend_state_t state) { unsigned int wake_irq_isr[4]; unsigned int cpu_type; struct gic_dist_state gds; struct gic_cpu_state gcs; bool arm_pg = false; if (cpu_is_mx6q()) cpu_type = MXC_CPU_MX6Q; else if (cpu_is_mx6dl()) cpu_type = MXC_CPU_MX6DL; else cpu_type = MXC_CPU_MX6SL; wake_irq_isr[0] = __raw_readl(gpc_base + GPC_ISR1_OFFSET) & gpc_wake_irq[0]; wake_irq_isr[1] = __raw_readl(gpc_base + GPC_ISR2_OFFSET) & gpc_wake_irq[1]; wake_irq_isr[2] = __raw_readl(gpc_base + GPC_ISR3_OFFSET) & gpc_wake_irq[2]; wake_irq_isr[3] = __raw_readl(gpc_base + GPC_ISR4_OFFSET) & gpc_wake_irq[3]; if (wake_irq_isr[0] | wake_irq_isr[1] | wake_irq_isr[2] | wake_irq_isr[3]) { printk(KERN_INFO "There are wakeup irq pending,system resume!\n"); printk(KERN_INFO "wake_irq_isr[0-3]: 0x%x, 0x%x, 0x%x, 0x%x\n", wake_irq_isr[0], wake_irq_isr[1], wake_irq_isr[2], wake_irq_isr[3]); return 0; } mx6_suspend_store(); /* * i.MX6dl TO1.0/i.MX6dq TO1.1/1.0 TKT094231: can't support * ARM_POWER_OFF mode. */ if (state == PM_SUSPEND_MEM && ((mx6dl_revision() == IMX_CHIP_REVISION_1_0) || (cpu_is_mx6q() && mx6q_revision() <= IMX_CHIP_REVISION_1_1))) { state = PM_SUSPEND_STANDBY; } switch (state) { case PM_SUSPEND_MEM: disp_power_down(); usb_power_down_handler(); mxc_cpu_lp_set(ARM_POWER_OFF); arm_pg = true; break; case PM_SUSPEND_STANDBY: if (cpu_is_mx6sl()) { disp_power_down(); usb_power_down_handler(); mxc_cpu_lp_set(STOP_XTAL_ON); arm_pg = true; } else mxc_cpu_lp_set(STOP_POWER_OFF); break; default: return -EINVAL; } /* * L2 can exit by 'reset' or Inband beacon (from remote EP) * toggling phy_powerdown has same effect as 'inband beacon' * So, toggle bit18 of GPR1, to fix errata * "PCIe PCIe does not support L2 Power Down" */ __raw_writel(__raw_readl(IOMUXC_GPR1) | (1 << 18), IOMUXC_GPR1); if (state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY) { local_flush_tlb_all(); flush_cache_all(); if (arm_pg) { /* preserve gic state */ save_gic_dist_state(0, &gds); save_gic_cpu_state(0, &gcs); } if (pm_data && pm_data->suspend_enter) pm_data->suspend_enter(); suspend_in_iram(state, (unsigned long)iram_paddr, (unsigned long)suspend_iram_base, cpu_type); if (pm_data && pm_data->suspend_exit) pm_data->suspend_exit(); /* Reset the RBC counter. */ /* All interrupts should be masked before the * RBC counter is reset. */ /* Mask all interrupts. These will be unmasked by * the mx6_suspend_restore routine below. */ __raw_writel(0xffffffff, gpc_base + 0x08); __raw_writel(0xffffffff, gpc_base + 0x0c); __raw_writel(0xffffffff, gpc_base + 0x10); __raw_writel(0xffffffff, gpc_base + 0x14); /* Clear the RBC counter and RBC_EN bit. */ /* Disable the REG_BYPASS_COUNTER. */ __raw_writel(__raw_readl(MXC_CCM_CCR) & ~MXC_CCM_CCR_RBC_EN, MXC_CCM_CCR); /* Make sure we clear REG_BYPASS_COUNT*/ __raw_writel(__raw_readl(MXC_CCM_CCR) & (~MXC_CCM_CCR_REG_BYPASS_CNT_MASK), MXC_CCM_CCR); /* Need to wait for a minimum of 2 CLKILS (32KHz) for the * counter to clear and reset. */ udelay(80); if (arm_pg) { /* restore gic registers */ restore_gic_dist_state(0, &gds); restore_gic_cpu_state(0, &gcs); } if (state == PM_SUSPEND_MEM || (cpu_is_mx6sl())) { usb_power_up_handler(); disp_power_up(); } mx6_suspend_restore(); __raw_writel(BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG, anatop_base + HW_ANADIG_ANA_MISC0_CLR); } else { cpu_do_idle(); } /* * L2 can exit by 'reset' or Inband beacon (from remote EP) * toggling phy_powerdown has same effect as 'inband beacon' * So, toggle bit18 of GPR1, to fix errata * "PCIe PCIe does not support L2 Power Down" */ __raw_writel(__raw_readl(IOMUXC_GPR1) & (~(1 << 18)), IOMUXC_GPR1); return 0; }
void arch_idle_single_core(void) { u32 reg; if (cpu_is_mx6dl() && chip_rev > IMX_CHIP_REVISION_1_0) { /* * MX6DLS TO1.1 has the HW fix for the WAIT mode issue. * Ensure that the CGPR bit 17 is set to enable the fix. */ reg = __raw_readl(MXC_CCM_CGPR); reg |= MXC_CCM_CGPR_WAIT_MODE_FIX; __raw_writel(reg, MXC_CCM_CGPR); ca9_do_idle(); } else { if (low_bus_freq_mode || audio_bus_freq_mode) { int ddr_usecount = 0; if ((mmdc_ch0_axi != NULL)) ddr_usecount = clk_get_usecount(mmdc_ch0_axi); if (cpu_is_mx6sl() && low_bus_freq_mode && ddr_usecount == 1) { /* In this mode PLL2 i already in bypass, * ARM is sourced from PLL1. The code in IRAM * will set ARM to be sourced from STEP_CLK * at 24MHz. It will also set DDR to 1MHz to * reduce power. */ u32 org_arm_podf = __raw_readl(MXC_CCM_CACRR); /* Need to run WFI code from IRAM so that * we can lower DDR freq. */ mx6sl_wfi_iram(org_arm_podf, (unsigned long)mx6sl_wfi_iram_base); } else { /* Need to set ARM to run at 24MHz since IPG * is at 12MHz. This is valid for audio mode on * MX6SL, and all low power modes on MX6DLS. */ if (cpu_is_mx6sl() && low_bus_freq_mode) { /* ARM is from PLL1, need to switch to * STEP_CLK sourced from 24MHz. */ /* Swtich STEP_CLK to 24MHz. */ reg = __raw_readl(MXC_CCM_CCSR); reg &= ~MXC_CCM_CCSR_STEP_SEL; __raw_writel(reg, MXC_CCM_CCSR); /* Set PLL1_SW_CLK to be from *STEP_CLK. */ reg = __raw_readl(MXC_CCM_CCSR); reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL; __raw_writel(reg, MXC_CCM_CCSR); } else { /* PLL1_SW_CLK is sourced from * PLL2_PFD2_400MHz at this point. * Move it to bypassed PLL1. */ reg = __raw_readl(MXC_CCM_CCSR); reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL; __raw_writel(reg, MXC_CCM_CCSR); } ca9_do_idle(); if (cpu_is_mx6sl() && low_bus_freq_mode) { /* Set PLL1_SW_CLK to be from PLL1 */ reg = __raw_readl(MXC_CCM_CCSR); reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL; __raw_writel(reg, MXC_CCM_CCSR); } else { reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL; __raw_writel(reg, MXC_CCM_CCSR); } } } else { /* * Implement the 12:5 ARM:IPG_CLK ratio * workaround for the WAIT mode issue. * We can directly use the divider to drop the ARM * core freq in a single core environment. * Set the ARM_PODF to get the max freq possible * to avoid the WAIT mode issue when IPG is at 66MHz. */ __raw_writel(wait_mode_arm_podf, MXC_CCM_CACRR); while (__raw_readl(MXC_CCM_CDHIPR)) ; ca9_do_idle(); __raw_writel(cur_arm_podf - 1, MXC_CCM_CACRR); } } }
static int __init post_cpu_init(void) { unsigned int reg; void __iomem *base; u32 iram_size; if (cpu_is_mx6q()) iram_size = MX6Q_IRAM_SIZE; else iram_size = MX6DL_MX6SL_IRAM_SIZE; iram_init(MX6Q_IRAM_BASE_ADDR, iram_size); base = ioremap(AIPS1_ON_BASE_ADDR, PAGE_SIZE); __raw_writel(0x0, base + 0x40); __raw_writel(0x0, base + 0x44); __raw_writel(0x0, base + 0x48); __raw_writel(0x0, base + 0x4C); reg = __raw_readl(base + 0x50) & 0x00FFFFFF; __raw_writel(reg, base + 0x50); iounmap(base); base = ioremap(AIPS2_ON_BASE_ADDR, PAGE_SIZE); __raw_writel(0x0, base + 0x40); __raw_writel(0x0, base + 0x44); __raw_writel(0x0, base + 0x48); __raw_writel(0x0, base + 0x4C); reg = __raw_readl(base + 0x50) & 0x00FFFFFF; __raw_writel(reg, base + 0x50); iounmap(base); /* Allow SCU_CLK to be disabled when all cores are in WFI*/ base = IO_ADDRESS(SCU_BASE_ADDR); reg = __raw_readl(base); reg |= 0x20; __raw_writel(reg, base); /* Disable SRC warm reset to work aound system reboot issue */ base = IO_ADDRESS(SRC_BASE_ADDR); reg = __raw_readl(base); reg &= ~0x1; __raw_writel(reg, base); gpc_base = MX6_IO_ADDRESS(GPC_BASE_ADDR); ccm_base = MX6_IO_ADDRESS(CCM_BASE_ADDR); /* enable AXI cache for VDOA/VPU/IPU * set IPU AXI-id0 Qos=0xf(bypass) AXI-id1 Qos=0x7 * clear OCRAM_CTL bits to disable pipeline control */ reg = __raw_readl(IOMUXC_GPR3); reg &= ~IOMUXC_GPR3_OCRAM_CTL_EN; __raw_writel(reg, IOMUXC_GPR3); reg = __raw_readl(IOMUXC_GPR4); reg |= IOMUXC_GPR4_VDOA_CACHE_EN | IOMUXC_GPR4_VPU_CACHE_EN | IOMUXC_GPR4_IPU_CACHE_EN; __raw_writel(reg, IOMUXC_GPR4); __raw_writel(IOMUXC_GPR6_IPU1_QOS, IOMUXC_GPR6); __raw_writel(IOMUXC_GPR7_IPU2_QOS, IOMUXC_GPR7); num_cpu_idle_lock = 0x0; if (cpu_is_mx6dl()) num_cpu_idle_lock = 0xffff0000; #ifdef CONFIG_SMP switch (setup_max_cpus) { case 3: num_cpu_idle_lock = 0xff000000; break; case 2: num_cpu_idle_lock = 0xffff0000; break; case 1: case 0: num_cpu_idle_lock = 0xffffff00; break; } #endif /* * The option to keep ARM memory clocks enabled during WAIT * is only available on MX6SL, MX6DQ TO1.2 (or later) and * MX6DL TO1.1 (or later) * So if the user specifies "mem_clk_on" on any other chip, * ensure that it is disabled. */ if (!cpu_is_mx6sl() && (mx6q_revision() < IMX_CHIP_REVISION_1_2) && (mx6dl_revision() < IMX_CHIP_REVISION_1_1)) mem_clk_on_in_wait = false; if (cpu_is_mx6q()) chip_rev = mx6q_revision(); else if (cpu_is_mx6dl()) chip_rev = mx6dl_revision(); else chip_rev = mx6sl_revision(); /* mx6sl doesn't have pcie. save power, disable PCIe PHY */ if (!cpu_is_mx6sl()) { reg = __raw_readl(IOMUXC_GPR1); reg = reg & (~IOMUXC_GPR1_PCIE_REF_CLK_EN); reg |= IOMUXC_GPR1_TEST_POWERDOWN; __raw_writel(reg, IOMUXC_GPR1); } return 0; }
/* set cpu low power mode before WFI instruction */ void mxc_cpu_lp_set(enum mxc_cpu_pwr_mode mode) { int stop_mode = 0; void __iomem *anatop_base = IO_ADDRESS(ANATOP_BASE_ADDR); u32 ccm_clpcr, anatop_val; ccm_clpcr = __raw_readl(MXC_CCM_CLPCR) & ~(MXC_CCM_CLPCR_LPM_MASK); switch (mode) { case WAIT_CLOCKED: break; case WAIT_UNCLOCKED: ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; break; case WAIT_UNCLOCKED_POWER_OFF: case STOP_POWER_OFF: case ARM_POWER_OFF: if (mode == WAIT_UNCLOCKED_POWER_OFF) { ccm_clpcr &= ~MXC_CCM_CLPCR_VSTBY; ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS; ccm_clpcr |= 0x1 << MXC_CCM_CLPCR_LPM_OFFSET; if (cpu_is_mx6sl()) { ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH0_LPM_HS; ccm_clpcr |= MXC_CCM_CLPCR_BYPASS_PMIC_VFUNC_READY; } else ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH1_LPM_HS; stop_mode = 0; } else if (mode == STOP_POWER_OFF) { ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET; ccm_clpcr |= MXC_CCM_CLPCR_VSTBY; ccm_clpcr |= MXC_CCM_CLPCR_SBYOS; if (cpu_is_mx6sl()) { ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH0_LPM_HS; ccm_clpcr |= MXC_CCM_CLPCR_BYPASS_PMIC_VFUNC_READY; } else ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH1_LPM_HS; stop_mode = 1; } else { ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; ccm_clpcr |= 0x3 << MXC_CCM_CLPCR_STBY_COUNT_OFFSET; ccm_clpcr |= MXC_CCM_CLPCR_VSTBY; ccm_clpcr |= MXC_CCM_CLPCR_SBYOS; if (cpu_is_mx6sl()) { ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH0_LPM_HS; ccm_clpcr |= MXC_CCM_CLPCR_BYPASS_PMIC_VFUNC_READY; } else ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH1_LPM_HS; stop_mode = 2; } break; case STOP_XTAL_ON: ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET; ccm_clpcr |= MXC_CCM_CLPCR_VSTBY; ccm_clpcr &= ~MXC_CCM_CLPCR_SBYOS; if (cpu_is_mx6sl()) { ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH0_LPM_HS; ccm_clpcr |= MXC_CCM_CLPCR_BYPASS_PMIC_VFUNC_READY; } else ccm_clpcr |= MXC_CCM_CLPCR_BYP_MMDC_CH1_LPM_HS; stop_mode = 3; break; default: printk(KERN_WARNING "UNKNOWN cpu power mode: %d\n", mode); return; } if (stop_mode > 0) { gpc_set_wakeup(gpc_wake_irq); /* Power down and power up sequence */ /* The PUPSCR counter counts in terms of CLKIL (32KHz) cycles. * The PUPSCR should include the time it takes for the ARM LDO to * ramp up. */ __raw_writel(0x202, gpc_base + GPC_PGC_CPU_PUPSCR_OFFSET); /* The PDNSCR is a counter that counts in IPG_CLK cycles. This counter * can be set to minimum values to power down faster. */ __raw_writel(0x101, gpc_base + GPC_PGC_CPU_PDNSCR_OFFSET); if (stop_mode >= 2) { /* dormant mode, need to power off the arm core */ __raw_writel(0x1, gpc_base + GPC_PGC_CPU_PDN_OFFSET); if (cpu_is_mx6q() || cpu_is_mx6dl()) { /* If stop_mode_config is clear, then 2P5 will be off, need to enable weak 2P5, as DDR IO need 2P5 as pre-driver */ if ((__raw_readl(anatop_base + HW_ANADIG_ANA_MISC0) & BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG) == 0) { /* Enable weak 2P5 linear regulator */ anatop_val = __raw_readl(anatop_base + HW_ANADIG_REG_2P5); anatop_val |= BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG; __raw_writel(anatop_val, anatop_base + HW_ANADIG_REG_2P5); } if (mx6q_revision() != IMX_CHIP_REVISION_1_0) { /* Enable fet_odrive */ anatop_val = __raw_readl(anatop_base + HW_ANADIG_REG_CORE); anatop_val |= BM_ANADIG_REG_CORE_FET_ODRIVE; __raw_writel(anatop_val, anatop_base + HW_ANADIG_REG_CORE); } } else { if (stop_mode == 2) { /* Disable VDDHIGH_IN to VDDSNVS_IN * power path, only used when VDDSNVS_IN * is powered by dedicated * power rail */ anatop_val = __raw_readl(anatop_base + HW_ANADIG_ANA_MISC0); anatop_val |= BM_ANADIG_ANA_MISC0_RTC_RINGOSC_EN; __raw_writel(anatop_val, anatop_base + HW_ANADIG_ANA_MISC0); /* Need to enable pull down if 2P5 is disabled */ anatop_val = __raw_readl(anatop_base + HW_ANADIG_REG_2P5); anatop_val |= BM_ANADIG_REG_2P5_ENABLE_PULLDOWN; __raw_writel(anatop_val, anatop_base + HW_ANADIG_REG_2P5); } } if (stop_mode != 3) { /* Make sure we clear WB_COUNT * and re-config it. */ __raw_writel(__raw_readl(MXC_CCM_CCR) & (~MXC_CCM_CCR_WB_COUNT_MASK), MXC_CCM_CCR); /* Reconfigure WB, need to set WB counter * to 0x7 to make sure it work normally */ __raw_writel(__raw_readl(MXC_CCM_CCR) | (0x7 << MXC_CCM_CCR_WB_COUNT_OFFSET), MXC_CCM_CCR); /* Set WB_PER enable */ ccm_clpcr |= MXC_CCM_CLPCR_WB_PER_AT_LPM; } } if (cpu_is_mx6sl() || (mx6q_revision() > IMX_CHIP_REVISION_1_1) || (mx6dl_revision() > IMX_CHIP_REVISION_1_0)) { u32 reg; /* We need to allow the memories to be clock gated * in STOP mode, else the power consumption will * be very high. */ reg = __raw_readl(MXC_CCM_CGPR); reg |= MXC_CCM_CGPR_MEM_IPG_STOP_MASK; if (!cpu_is_mx6sl()) { /* * For MX6QTO1.2 or later and MX6DLTO1.1 or later, * ensure that the CCM_CGPR bit 17 is cleared before * dormant mode is entered. */ reg &= ~MXC_CCM_CGPR_WAIT_MODE_FIX; } __raw_writel(reg, MXC_CCM_CGPR); } } __raw_writel(ccm_clpcr, MXC_CCM_CLPCR); }
static int __devinit busfreq_probe(struct platform_device *pdev) { u32 err; busfreq_dev = &pdev->dev; pll2_400 = clk_get(NULL, "pll2_pfd_400M"); if (IS_ERR(pll2_400)) { printk(KERN_DEBUG "%s: failed to get pll2_pfd_400M\n", __func__); return PTR_ERR(pll2_400); } pll2_200 = clk_get(NULL, "pll2_200M"); if (IS_ERR(pll2_200)) { printk(KERN_DEBUG "%s: failed to get pll2_200M\n", __func__); return PTR_ERR(pll2_200); } pll2 = clk_get(NULL, "pll2"); if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } pll1 = clk_get(NULL, "pll1_main_clk"); if (IS_ERR(pll1)) { printk(KERN_DEBUG "%s: failed to get pll1\n", __func__); return PTR_ERR(pll1); } pll1_sw_clk = clk_get(NULL, "pll1_sw_clk"); if (IS_ERR(pll1_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll1_sw_clk\n", __func__); return PTR_ERR(pll1_sw_clk); } if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_DEBUG "%s: failed to get cpu_clk\n", __func__); return PTR_ERR(cpu_clk); } pll3 = clk_get(NULL, "pll3_main_clk"); if (IS_ERR(pll3)) { printk(KERN_DEBUG "%s: failed to get pll3\n", __func__); return PTR_ERR(pll3); } pll3_540 = clk_get(NULL, "pll3_pfd_540M"); if (IS_ERR(pll3_540)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(pll3_540); } pll3_sw_clk = clk_get(NULL, "pll3_sw_clk"); if (IS_ERR(pll3_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll3_sw_clk\n", __func__); return PTR_ERR(pll3_sw_clk); } axi_clk = clk_get(NULL, "axi_clk"); if (IS_ERR(axi_clk)) { printk(KERN_DEBUG "%s: failed to get axi_clk\n", __func__); return PTR_ERR(axi_clk); } ahb_clk = clk_get(NULL, "ahb"); if (IS_ERR(ahb_clk)) { printk(KERN_DEBUG "%s: failed to get ahb_clk\n", __func__); return PTR_ERR(ahb_clk); } periph_clk = clk_get(NULL, "periph_clk"); if (IS_ERR(periph_clk)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(periph_clk); } osc_clk = clk_get(NULL, "osc"); if (IS_ERR(osc_clk)) { printk(KERN_DEBUG "%s: failed to get osc_clk\n", __func__); return PTR_ERR(osc_clk); } mmdc_ch0_axi = clk_get(NULL, "mmdc_ch0_axi"); if (IS_ERR(mmdc_ch0_axi)) { printk(KERN_DEBUG "%s: failed to get mmdc_ch0_axi\n", __func__); return PTR_ERR(mmdc_ch0_axi); } err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr); if (err) { printk(KERN_ERR "Unable to register sysdev entry for BUSFREQ"); return err; } cpu_op_tbl = get_cpu_op(&cpu_op_nr); low_bus_freq_mode = 0; if (cpu_is_mx6dl()) { high_bus_freq_mode = 0; med_bus_freq_mode = 1; /* To make pll2_400 use count right, as when system enter 24M, it will disable pll2_400 */ clk_enable(pll2_400); } else if (cpu_is_mx6sl()) { /* Set med_bus_freq_mode to 1 since med_bus_freq_mode is not supported as yet for MX6SL */ high_bus_freq_mode = 1; med_bus_freq_mode = 1; } else { high_bus_freq_mode = 1; med_bus_freq_mode = 0; } bus_freq_scaling_is_active = 0; bus_freq_scaling_initialized = 1; if (cpu_is_mx6q()) { ddr_low_rate = LPAPM_CLK; ddr_med_rate = DDR_MED_CLK; ddr_normal_rate = DDR3_NORMAL_CLK; } if (cpu_is_mx6dl() || cpu_is_mx6sl()) { ddr_low_rate = LPAPM_CLK; ddr_normal_rate = ddr_med_rate = DDR_MED_CLK; } INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler); register_pm_notifier(&imx_bus_freq_pm_notifier); if (!cpu_is_mx6sl()) init_mmdc_settings(); else { unsigned long iram_paddr; /* Allocate IRAM for WFI code when system is * in low freq mode. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want * the memory region to be executable. */ mx6sl_wfi_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); memcpy(mx6sl_wfi_iram_base, mx6sl_wait, SZ_4K); mx6sl_wfi_iram = (void *)mx6sl_wfi_iram_base; /* Allocate IRAM for WFI code when system is *in low freq mode. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ mx6sl_ddr_freq_base = __arm_ioremap(iram_paddr, SZ_4K, MT_MEMORY_NONCACHED); memcpy(mx6sl_ddr_freq_base, mx6sl_ddr_iram, SZ_4K); mx6sl_ddr_freq_change_iram = (void *)mx6sl_ddr_freq_base; } return 0; }
void reduce_bus_freq(void) { if (!cpu_is_mx6sl()) { if (cpu_is_mx6dl() && (clk_get_parent(axi_clk) != periph_clk)) /* Set the axi_clk to be sourced from the periph_clk. * So that its frequency can be lowered down to 50MHz * or 24MHz as the case may be. */ clk_set_parent(axi_clk, periph_clk); clk_enable(pll3); if (lp_audio_freq) { /* Need to ensure that PLL2_PFD_400M is kept ON. */ clk_enable(pll2_400); update_ddr_freq(DDR_AUDIO_CLK); /* Make sure periph clk's parent also got updated */ clk_set_parent(periph_clk, pll2_200); audio_bus_freq_mode = 1; low_bus_freq_mode = 0; } else { update_ddr_freq(LPAPM_CLK); /* Make sure periph clk's parent also got updated */ clk_set_parent(periph_clk, osc_clk); if (audio_bus_freq_mode) clk_disable(pll2_400); low_bus_freq_mode = 1; audio_bus_freq_mode = 0; } if (med_bus_freq_mode) clk_disable(pll2_400); clk_disable(pll3); med_bus_freq_mode = 0; } else { u32 reg; u32 div; unsigned long flags; if (high_bus_freq_mode) { /* Set periph_clk to be sourced from OSC_CLK */ /* Set AXI to 24MHz. */ clk_set_parent(periph_clk, osc_clk); clk_set_rate(axi_clk, clk_round_rate(axi_clk, LPAPM_CLK)); /* Set AHB to 24MHz. */ clk_set_rate(ahb_clk, clk_round_rate(ahb_clk, LPAPM_CLK)); } if (lp_audio_freq) { u32 ttbr1; /* PLL2 is on in this mode, as DDR is at 100MHz. */ /* Now change DDR freq while running from IRAM. */ /* Set AHB to 24MHz. */ clk_set_rate(ahb_clk, clk_round_rate(ahb_clk, LPAPM_CLK / 3)); spin_lock_irqsave(&freq_lock, flags); /* sync the outer cache. */ outer_sync(); /* Save TTBR1 */ ttbr1 = save_ttbr1(); mx6sl_ddr_freq_change_iram(DDR_AUDIO_CLK, low_bus_freq_mode); restore_ttbr1(ttbr1); spin_unlock_irqrestore(&freq_lock, flags); if (low_bus_freq_mode) { /* Swtich ARM to run off PLL2_PFD2_400MHz * since DDR is anyway at 100MHz. */ clk_set_parent(pll1_sw_clk, pll2_400); /* Ensure that the clock will be * at original speed. */ reg = __raw_writel(org_arm_podf, MXC_CCM_CACRR); while (__raw_readl(MXC_CCM_CDHIPR)) ; /* We have enabled PLL1 in the code below when * ARM is from PLL1, so disable it here. */ clk_disable(pll1); } low_bus_freq_mode = 0; audio_bus_freq_mode = 1; } else { u32 ttbr1; /* Set MMDC clk to 24MHz. */ /* Since we are going to set PLL2 in bypass mode, * move the CPU clock off PLL2. */ /* Ensure that the clock will be at * lowest possible freq. */ org_arm_podf = __raw_readl(MXC_CCM_CACRR); /* Need to enable PLL1 before setting its rate. */ clk_enable(pll1); clk_set_rate(pll1, cpu_op_tbl[cpu_op_nr - 1].pll_lpm_rate); div = clk_get_rate(pll1) / cpu_op_tbl[cpu_op_nr - 1].cpu_rate; reg = __raw_writel(div - 1, MXC_CCM_CACRR); while (__raw_readl(MXC_CCM_CDHIPR)) ; clk_set_parent(pll1_sw_clk, pll1); spin_lock_irqsave(&freq_lock, flags); /* sync the outer cache. */ outer_sync(); ttbr1 = save_ttbr1(); /* Now change DDR freq while running from IRAM. */ mx6sl_ddr_freq_change_iram(LPAPM_CLK, low_bus_freq_mode); restore_ttbr1(ttbr1); spin_unlock_irqrestore(&freq_lock, flags); low_bus_freq_mode = 1; audio_bus_freq_mode = 0; } } high_bus_freq_mode = 0; }
static int __devinit busfreq_probe(struct platform_device *pdev) { u32 err; busfreq_dev = &pdev->dev; pll2_400 = clk_get(NULL, "pll2_pfd_400M"); if (IS_ERR(pll2_400)) { printk(KERN_DEBUG "%s: failed to get pll2_pfd_400M\n", __func__); return PTR_ERR(pll2_400); } pll2_200 = clk_get(NULL, "pll2_200M"); if (IS_ERR(pll2_200)) { printk(KERN_DEBUG "%s: failed to get pll2_200M\n", __func__); return PTR_ERR(pll2_200); } pll2 = clk_get(NULL, "pll2"); if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } pll1 = clk_get(NULL, "pll1_main_clk"); if (IS_ERR(pll1)) { printk(KERN_DEBUG "%s: failed to get pll1\n", __func__); return PTR_ERR(pll1); } pll1_sw_clk = clk_get(NULL, "pll1_sw_clk"); if (IS_ERR(pll1_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll1_sw_clk\n", __func__); return PTR_ERR(pll1_sw_clk); } if (IS_ERR(pll2)) { printk(KERN_DEBUG "%s: failed to get pll2\n", __func__); return PTR_ERR(pll2); } cpu_clk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpu_clk)) { printk(KERN_DEBUG "%s: failed to get cpu_clk\n", __func__); return PTR_ERR(cpu_clk); } pll3 = clk_get(NULL, "pll3_main_clk"); if (IS_ERR(pll3)) { printk(KERN_DEBUG "%s: failed to get pll3\n", __func__); return PTR_ERR(pll3); } pll3_540 = clk_get(NULL, "pll3_pfd_540M"); if (IS_ERR(pll3_540)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(pll3_540); } pll3_sw_clk = clk_get(NULL, "pll3_sw_clk"); if (IS_ERR(pll3_sw_clk)) { printk(KERN_DEBUG "%s: failed to get pll3_sw_clk\n", __func__); return PTR_ERR(pll3_sw_clk); } axi_clk = clk_get(NULL, "axi_clk"); if (IS_ERR(axi_clk)) { printk(KERN_DEBUG "%s: failed to get axi_clk\n", __func__); return PTR_ERR(axi_clk); } ahb_clk = clk_get(NULL, "ahb"); if (IS_ERR(ahb_clk)) { printk(KERN_DEBUG "%s: failed to get ahb_clk\n", __func__); return PTR_ERR(ahb_clk); } periph_clk = clk_get(NULL, "periph_clk"); if (IS_ERR(periph_clk)) { printk(KERN_DEBUG "%s: failed to get periph_clk\n", __func__); return PTR_ERR(periph_clk); } osc_clk = clk_get(NULL, "osc"); if (IS_ERR(osc_clk)) { printk(KERN_DEBUG "%s: failed to get osc_clk\n", __func__); return PTR_ERR(osc_clk); } mmdc_ch0_axi = clk_get(NULL, "mmdc_ch0_axi"); if (IS_ERR(mmdc_ch0_axi)) { printk(KERN_DEBUG "%s: failed to get mmdc_ch0_axi\n", __func__); return PTR_ERR(mmdc_ch0_axi); } err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr); if (err) { printk(KERN_ERR "Unable to register sysdev entry for BUSFREQ"); return err; } cpu_op_tbl = get_cpu_op(&cpu_op_nr); low_bus_freq_mode = 0; if (cpu_is_mx6dl()) { high_bus_freq_mode = 0; med_bus_freq_mode = 1; /* To make pll2_400 use count right, as when system enter 24M, it will disable pll2_400 */ clk_enable(pll2_400); } else if (cpu_is_mx6sl()) { /* Set med_bus_freq_mode to 1 since med_bus_freq_mode is not supported as yet for MX6SL */ high_bus_freq_mode = 1; med_bus_freq_mode = 1; } else { high_bus_freq_mode = 1; med_bus_freq_mode = 0; } bus_freq_scaling_is_active = 0; bus_freq_scaling_initialized = 1; if (cpu_is_mx6q()) { ddr_low_rate = LPAPM_CLK; ddr_med_rate = DDR_MED_CLK; ddr_normal_rate = DDR3_NORMAL_CLK; } if (cpu_is_mx6dl() || cpu_is_mx6sl()) { ddr_low_rate = LPAPM_CLK; ddr_normal_rate = ddr_med_rate = DDR_MED_CLK; } INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler); register_pm_notifier(&imx_bus_freq_pm_notifier); if (!cpu_is_mx6sl()) init_mmdc_settings(); else { /* Use preallocated memory */ mx6sl_wfi_iram_phys_addr = MX6SL_WFI_IRAM_CODE; /* * Don't ioremap the address, we have fixed the IRAM address * at IRAM_BASE_ADDR_VIRT */ mx6sl_wfi_iram_base = (void *)IRAM_BASE_ADDR_VIRT + (mx6sl_wfi_iram_phys_addr - IRAM_BASE_ADDR); memcpy(mx6sl_wfi_iram_base, mx6sl_wait, MX6SL_WFI_IRAM_CODE_SIZE); mx6sl_wfi_iram = (void *)mx6sl_wfi_iram_base; /* Use preallocated memory */ mx6sl_ddr_freq_phys_addr = MX6_DDR_FREQ_IRAM_CODE; /* * Don't ioremap the address, we have fixed the IRAM address * at IRAM_BASE_ADDR_VIRT */ mx6sl_ddr_freq_base = (void *)IRAM_BASE_ADDR_VIRT + (mx6sl_ddr_freq_phys_addr - IRAM_BASE_ADDR); memcpy(mx6sl_ddr_freq_base, mx6sl_ddr_iram, MX6SL_DDR_FREQ_CODE_SIZE); mx6sl_ddr_freq_change_iram = (void *)mx6sl_ddr_freq_base; } return 0; }
/* Set the DDR to either 528MHz or 400MHz for MX6q * or 400MHz for MX6DL. */ int set_high_bus_freq(int high_bus_freq) { if (bus_freq_scaling_initialized && bus_freq_scaling_is_active) cancel_delayed_work_sync(&low_bus_freq_handler); if (busfreq_suspended) return 0; if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active) return 0; if (cpu_is_mx6sl()) high_bus_freq = 1; if (high_bus_freq_mode && high_bus_freq) return 0; /* medium bus freq is only supported for MX6DQ */ if (cpu_is_mx6q() && med_bus_freq_mode && !high_bus_freq) return 0; if (cpu_is_mx6dl() && high_bus_freq) high_bus_freq = 0; if (cpu_is_mx6dl() && med_bus_freq_mode) return 0; if ((high_bus_freq_mode && (high_bus_freq || lp_high_freq)) || (med_bus_freq_mode && !high_bus_freq && lp_med_freq && !lp_high_freq)) return 0; if (cpu_is_mx6sl()) { u32 reg; unsigned long flags; u32 ttbr1; spin_lock_irqsave(&freq_lock, flags); /* sync the outer cache. */ outer_sync(); ttbr1 = save_ttbr1(); /* Change DDR freq in IRAM. */ mx6sl_ddr_freq_change_iram(ddr_normal_rate, low_bus_freq_mode); restore_ttbr1(ttbr1); spin_unlock_irqrestore(&freq_lock, flags); /* Set periph_clk to be sourced from pll2_pfd2_400M */ /* First need to set the divider before changing the */ /* parent if parent clock is larger than previous one */ clk_set_rate(ahb_clk, clk_round_rate(ahb_clk, LPAPM_CLK / 3)); clk_set_rate(axi_clk, clk_round_rate(axi_clk, LPAPM_CLK / 2)); clk_set_parent(periph_clk, pll2_400); if (low_bus_freq_mode) { /* Now move ARM to be sourced from PLL2_400 too. */ clk_set_parent(pll1_sw_clk, pll2_400); /* Ensure that the clock will be at original speed. */ reg = __raw_writel(org_arm_podf, MXC_CCM_CACRR); while (__raw_readl(MXC_CCM_CDHIPR)) ; clk_disable(pll1); } high_bus_freq_mode = 1; low_bus_freq_mode = 0; audio_bus_freq_mode = 0; } else { clk_enable(pll3); if (high_bus_freq) { update_ddr_freq(ddr_normal_rate); /* Make sure periph clk's parent also got updated */ clk_set_parent(periph_clk, pll2); if (med_bus_freq_mode) clk_disable(pll2_400); high_bus_freq_mode = 1; med_bus_freq_mode = 0; } else { clk_enable(pll2_400); update_ddr_freq(ddr_med_rate); /* Make sure periph clk's parent also got updated */ clk_set_parent(periph_clk, pll2_400); high_bus_freq_mode = 0; med_bus_freq_mode = 1; } if (audio_bus_freq_mode) clk_disable(pll2_400); /* AXI_CLK is sourced from PLL3_PFD_540 on MX6DL */ if (cpu_is_mx6dl() && clk_get_parent(axi_clk) != pll3_540) clk_set_parent(axi_clk, pll3_540); low_bus_freq_mode = 0; audio_bus_freq_mode = 0; clk_disable(pll3); } return 0; }