/** * omap_vp_disable() - API to disable a particular VP * @voltdm: pointer to the VDD whose VP is to be disabled. * * This API disables a particular voltage processor. Needed by the smartreflex * class drivers. */ void omap_vp_disable(struct voltagedomain *voltdm) { struct omap_vp_instance *vp; u32 vpconfig; int timeout; if (!voltdm || IS_ERR(voltdm)) { pr_warning("%s: VDD specified does not exist!\n", __func__); return; } vp = voltdm->vp; if (!voltdm->read || !voltdm->write) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } /* If VP is already disabled, do nothing. Return */ if (!vp->enabled) { pr_warning("%s: Trying to disable VP for vdd_%s when" "it is already disabled\n", __func__, voltdm->name); return; } /* * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us * Depending on if we catch VP in the middle of an SR operation. */ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) pr_warning("%s: vdd_%s idle timedout before disable\n", __func__, voltdm->name); /* Disable VP */ vpconfig = voltdm->read(vp->vpconfig); vpconfig &= ~vp->common->vpconfig_vpenable; voltdm->write(vpconfig, vp->vpconfig); /* * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us */ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) pr_warning("%s: vdd_%s idle timedout after disable\n", __func__, voltdm->name); vp->enabled = false; return; }
/** * omap2_cm_wait_idlest_ready - wait for a module to leave idle or standby * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * XXX document */ int omap2_cm_wait_module_ready(s16 prcm_mod, u8 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(cm_idlest_offs))) return -EINVAL; cm_idlest_reg = cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; if (cpu_is_omap24xx()) ena = mask; else if (cpu_is_omap34xx()) ena = 0; else BUG(); omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap2_prm_deassert_hardreset - deassert a submodule hardreset line and wait * @prm_mod: PRM submodule base (e.g. CORE_MOD) * @rst_shift: register bit shift corresponding to the reset line to deassert * @st_shift: register bit shift for the status of the deasserted submodule * * Some IPs like dsp or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift, u8 st_shift) { u32 rst, st; int c; if (!(cpu_is_omap24xx() || cpu_is_omap34xx())) return -EINVAL; rst = 1 << rst_shift; st = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTCTRL, rst) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap2_prm_rmw_mod_reg_bits(0xffffffff, st, prm_mod, OMAP2_RM_RSTST); /* de-assert the reset control line */ omap2_prm_rmw_mod_reg_bits(rst, 0, prm_mod, OMAP2_RM_RSTCTRL); /* wait the status to be set */ omap_test_timeout(omap2_prm_read_mod_bits_shift(prm_mod, OMAP2_RM_RSTST, st), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; }
/** * omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness * @reg: physical address of module IDLEST register * @mask: value to mask against to determine if the module is active * @name: name of the clock (for printk) * * Returns 1 if the module indicated readiness in time, or 0 if it * failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds. */ int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name) { int i = 0; int ena = 0; /* * 24xx uses 0 to indicate not ready, and 1 to indicate ready. * 34xx reverses this, just to keep us on our toes */ if (cpu_is_omap24xx()) ena = mask; else if (cpu_is_omap34xx()) ena = 0; else BUG(); /* Wait for lock */ omap_test_timeout(((__raw_readl(reg) & mask) == ena), MAX_MODULE_ENABLE_WAIT, i); if (i < MAX_MODULE_ENABLE_WAIT) pr_debug("cm: Module associated with clock %s ready after %d " "loops\n", name, i); else pr_err("cm: Module associated with clock %s didn't enable in " "%d tries\n", name, MAX_MODULE_ENABLE_WAIT); return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; };
/** * am33xx_prm_deassert_hardreset - deassert a submodule hardreset line and * wait * @shift: register bit shift corresponding to the reset line to deassert * @inst: CM instance register offset (*_INST macro) * @rstctrl_reg: RM_RSTCTRL register address for this module * @rstst_reg: RM_RSTST register address for this module * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int am33xx_prm_deassert_hardreset(u8 shift, u8 st_shift, s16 inst, u16 rstctrl_offs, u16 rstst_offs) { int c; u32 mask = 1 << st_shift; /* Check the current status to avoid de-asserting the line twice */ if (am33xx_prm_is_hardreset_asserted(shift, inst, rstctrl_offs) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs); /* de-assert the reset control line */ mask = 1 << shift; am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs); /* wait the status to be set */ omap_test_timeout(am33xx_prm_is_hardreset_asserted(st_shift, inst, rstst_offs), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; }
/** * omap4_prm_deassert_hardreset - deassert a submodule hardreset line and wait * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to deassert * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap4_prm_deassert_hardreset(void __iomem *rstctrl_reg, u8 shift) { u32 mask; void __iomem *rstst_reg; int c; if (!cpu_is_omap44xx() || !rstctrl_reg) return -EINVAL; rstst_reg = rstctrl_reg + OMAP4_RST_CTRL_ST_OFFSET; mask = 1 << shift; /* Check the current status to avoid de-asserting the line twice */ if (omap4_prm_read_bits_shift(rstctrl_reg, mask) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap4_prm_rmw_reg_bits(0xffffffff, mask, rstst_reg); /* de-assert the reset control line */ omap4_prm_rmw_reg_bits(mask, 0, rstctrl_reg); /* wait the status to be set */ omap_test_timeout(omap4_prm_read_bits_shift(rstst_reg, mask), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; }
/** * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Programs the CM shadow registers to update CORE DPLL M2 divider. M2 divider * is used to clock external DDR and its reconfiguration on frequency change * is managed through a hardware sequencer. This is managed by the PRCM with * EMIF using shadow registers. If rate specified matches DPLL_CORE's bypass * clock rate then put it in Low-Power Bypass. * Returns negative int on error and 0 on success. */ int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { int i = 0; u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0; if (!clk || !rate) return -EINVAL; validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); if (validrate != rate) return -EINVAL; /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); /* put MEMIF domain in SW_WKUP & increment usecount for clks */ omap2_clkdm_wakeup(l3_emif_clkdm); /* * maybe program core m5 divider here * definitely program m3, m6 & m7 dividers here */ /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for validrate divided * by 2. */ omap_emif_setup_registers(validrate / 2, LPDDR2_VOLTAGE_STABLE); /* * program DPLL_CORE_M2_DIV with same value as the one already * in direct register and lock DPLL_CORE */ shadow_freq_cfg1 = (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* put MEMIF clkdm back to HW_AUTO & decrement usecount for clks */ omap2_clkdm_allow_idle(l3_emif_clkdm); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } return 0; }
/** * am33xx_cm_wait_module_ready - wait for a module to be in 'func' state * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs) { int i = 0; omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_prcm_freq_update - set freq_update bit * * Programs the CM shadow registers to update EMIF * parametrs. Few usecase only few registers needs to * be updated using prcm freq update sequence. * EMIF read-idle control and zq-config needs to be * updated for temprature alerts and voltage change * Returns -1 on error and 0 on success. */ int omap4_prcm_freq_update(void) { u32 shadow_freq_cfg1; int i = 0; unsigned long flags; if (!l3_emif_clkdm) { pr_err("%s: clockdomain lookup failed\n", __func__); return -EINVAL; } spin_lock_irqsave(&l3_emif_lock, flags); /* Configures MEMIF domain in SW_WKUP */ clkdm_wakeup(l3_emif_clkdm); /* Disable DDR self refresh (Errata ID: i728) */ omap_emif_frequency_pre_notify(); /* * FREQ_UPDATE sequence: * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden * after CORE DPLL lock) * - FREQ_UPDATE=1 (to start HW sequence) */ shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1); shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK; __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* Re-enable DDR self refresh */ omap_emif_frequency_post_notify(); /* Configures MEMIF domain back to HW_WKUP */ clkdm_allow_idle(l3_emif_clkdm); spin_unlock_irqrestore(&l3_emif_lock, flags); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update failed (call from %pF)\n", __func__, (void *)_RET_IP_); pr_err("CLKCTRL: EMIF_1=0x%x EMIF_2=0x%x DMM=0x%x\n", __raw_readl(OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL), __raw_readl(OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL), __raw_readl(OMAP4430_CM_MEMIF_DMM_CLKCTRL)); emif_dump(0); emif_dump(1); return -1; } return 0; }
/** * am33xx_cm_wait_module_ready - wait for a module to be in 'func' state * @inst: Offset of CM instance associated with * @clkctrl_reg: CLKCTRL offset from CM instance base * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" * * Module idle state: * 0x0 func: Module is fully functional, including OCP * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep * abortion * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if * using separate functional clock * 0x3 disabled: Module is disabled and cannot be accessed * */ int am33xx_cm_wait_module_ready(u16 inst, u16 clkctrl_reg) { int i = 0; omap_test_timeout(( ((__raw_readl(AM33XX_CM_REGADDR(inst, clkctrl_reg)) & AM33XX_IDLEST_MASK) == 0)), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_cminst_wait_module_ready - wait for a module to be in 'func' state * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: bit shift for the register, ignored for OMAP4+ * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * am33xx_cm_wait_module_idle - wait for a module to be in 'disabled' * state * @part: CM partition, ignored for AM33xx * @inst: CM instance register offset (*_INST macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * @bit_shift: bit shift for the register, ignored for AM33xx * * Wait for the module IDLEST to be disabled. Some PRCM transition, * like reset assertion or parent clock de-activation must wait the * module to be fully disabled. */ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs, u8 bit_shift) { int i = 0; omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) == CLKCTRL_IDLEST_DISABLED), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_cm_wait_module_ready - wait for a module to be in 'func' state * @clkctrl_reg: CLKCTRL module address * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" * * Module idle state: * 0x0 func: Module is fully functional, including OCP * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep * abortion * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if * using separate functional clock * 0x3 disabled: Module is disabled and cannot be accessed * * TODO: Need to handle module accessible in idle state */ int omap4_cm_wait_module_ready(void __iomem *clkctrl_reg) { int i = 0; if (!clkctrl_reg) return 0; omap_test_timeout(((__raw_readl(clkctrl_reg) & OMAP4430_IDLEST_MASK) == 0), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_cminst_wait_module_ready - wait for a module to be in 'func' state * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Wait for the module IDLEST to be functional. If the idle state is in any * the non functional state (trans, idle or disabled), module and thus the * sysconfig cannot be accessed and will probably lead to an "imprecise * external abort" */ int omap4_cminst_wait_module_ready(u8 part, u16 inst, u16 cdoffs, u16 clkctrl_offs) { int i = 0; if (!clkctrl_offs) return 0; omap_test_timeout(_is_module_ready(part, inst, cdoffs, clkctrl_offs), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_cminst_wait_module_idle - wait for a module to be in 'disabled' * state * @part: PRCM partition ID that the CM_CLKCTRL register exists in * @inst: CM instance register offset (*_INST macro) * @cdoffs: Clockdomain register offset (*_CDOFFS macro) * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro) * * Wait for the module IDLEST to be disabled. Some PRCM transition, * like reset assertion or parent clock de-activation must wait the * module to be fully disabled. */ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs) { int i = 0; if (!clkctrl_offs) return 0; omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) == CLKCTRL_IDLEST_DISABLED), MAX_MODULE_DISABLE_TIME, i); return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY; }
/** * omap_vp_disable() - API to disable a particular VP * @voltdm: pointer to the VDD whose VP is to be disabled. * * This API disables a particular voltage processor. Needed by the smartreflex * class drivers. */ void omap_vp_disable(struct voltagedomain *voltdm) { struct omap_vdd_info *vdd; u32 vpconfig; u16 mod; int timeout; if (!voltdm || IS_ERR(voltdm)) { pr_warning("%s: VDD specified does not exist!\n", __func__); return; } vdd = container_of(voltdm, struct omap_vdd_info, voltdm); if (!vdd->read_reg || !vdd->write_reg) { pr_err("%s: No read/write API for accessing vdd_%s regs\n", __func__, voltdm->name); return; } mod = vdd->vp_reg.prm_mod; /* If VP is already disabled, do nothing. Return */ if (!vdd->vp_enabled) { pr_warning("%s: Trying to disable VP for vdd_%s when" "it is already disabled\n", __func__, voltdm->name); return; } /* Disable VP */ vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig); vpconfig &= ~vdd->vp_reg.vpconfig_vpenable; vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); /* * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us */ omap_test_timeout((vdd->read_reg(mod, vdd->vp_offs.vstatus)), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) pr_warning("%s: vdd_%s idle timedout\n", __func__, voltdm->name); vdd->vp_enabled = false; return; }
/** * _wait_idlest_generic - wait for a module to leave the idle state * @clk: module clock to wait for (needed for register offsets) * @reg: virtual address of module IDLEST register * @mask: value to mask against to determine if the module is active * @idlest: idle state indicator (0 or 1) for the clock * @name: name of the clock (for printk) * * Wait for a module to leave idle, where its idle-status register is * not inside the CM module. Returns 1 if the module left idle * promptly, or 0 if the module did not leave idle before the timeout * elapsed. XXX Deprecated - should be moved into drivers for the * individual IP block that the IDLEST register exists in. */ static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg, u32 mask, u8 idlest, const char *name) { int i = 0, ena = 0; ena = (idlest) ? 0 : mask; omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena), MAX_MODULE_ENABLE_WAIT, i); if (i < MAX_MODULE_ENABLE_WAIT) pr_debug("omap clock: module associated with clock %s ready after %d loops\n", name, i); else pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n", name, MAX_MODULE_ENABLE_WAIT); return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; };
/** * _vp_wait_for_idle() - wait for voltage processor to idle * @voltdm: voltage domain * @vp: voltage processor instance * * In some conditions, it is important to ensure that Voltage Processor * is idle before performing operations on the Voltage Processor(VP). * This is primarily to ensure that VP state machine does not enter into * invalid state. * * Returns -ETIMEDOUT if timeout occurs - This could be critical failure * as it indicates that Voltage processor might have it's state machine * stuck up without recovering out(theoretically should never happen * ofcourse). Returns 0 if idle state is detected. * * Note: callers are expected to ensure requisite checks are performed * on the pointers passed. */ static inline int _vp_wait_for_idle(struct voltagedomain *voltdm, struct omap_vp_instance *vp) { int timeout; omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) { /* Dont spam the console but ensure we catch attention */ pr_warn_ratelimited("%s: vdd_%s idle timedout\n", __func__, voltdm->name); WARN_ONCE("vdd_%s idle timedout\n", voltdm->name); return -ETIMEDOUT; } return 0; }
/** * omap3xxx_cm_wait_module_ready - wait for a module to leave idle or standby * @part: PRCM partition, ignored for OMAP3 * @prcm_mod: PRCM module offset * @idlest_id: CM_IDLESTx register ID (i.e., x = 1, 2, 3) * @idlest_shift: shift of the bit in the CM_IDLEST* register to check * * Wait for the PRCM to indicate that the module identified by * (@prcm_mod, @idlest_id, @idlest_shift) is clocked. Return 0 upon * success or -EBUSY if the module doesn't enable in time. */ static int omap3xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id, u8 idlest_shift) { int ena = 0, i = 0; u8 cm_idlest_reg; u32 mask; if (!idlest_id || (idlest_id > ARRAY_SIZE(omap3xxx_cm_idlest_offs))) return -EINVAL; cm_idlest_reg = omap3xxx_cm_idlest_offs[idlest_id - 1]; mask = 1 << idlest_shift; ena = 0; omap_test_timeout(((omap2_cm_read_mod_reg(prcm_mod, cm_idlest_reg) & mask) == ena), MAX_MODULE_READY_TIME, i); return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; }
/** * omap4_prcm_freq_update - set freq_update bit * * Programs the CM shadow registers to update EMIF * parametrs. Few usecase only few registers needs to * be updated using prcm freq update sequence. * EMIF read-idle control and zq-config needs to be * updated for temprature alerts and voltage change * Returns -1 on error and 0 on success. */ int omap4_set_freq_update(void) { u32 shadow_freq_cfg1; int i = 0; /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); /* Configures MEMIF domain in SW_WKUP */ omap2_clkdm_wakeup(l3_emif_clkdm); /* * FREQ_UPDATE sequence: * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden * after CORE DPLL lock) * - FREQ_UPDATE=1 (to start HW sequence) */ shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1); shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* Configures MEMIF domain back to HW_WKUP */ omap2_clkdm_allow_idle(l3_emif_clkdm); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update failed\n", __func__); return -1; } return 0; }
/** * omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness * @reg: physical address of module IDLEST register * @mask: value to mask against to determine if the module is active * @idlest: idle state indicator (0 or 1) for the clock * @name: name of the clock (for printk) * * Returns 1 if the module indicated readiness in time, or 0 if it * failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds. * * XXX This function is deprecated. It should be removed once the * hwmod conversion is complete. */ int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest, const char *name) { int i = 0; int ena = 0; if (idlest) ena = 0; else ena = mask; /* Wait for lock */ omap_test_timeout(((__raw_readl(reg) & mask) == ena), MAX_MODULE_ENABLE_WAIT, i); if (i < MAX_MODULE_ENABLE_WAIT) pr_debug("cm: Module associated with clock %s ready after %d " "loops\n", name, i); else pr_err("cm: Module associated with clock %s didn't enable in " "%d tries\n", name, MAX_MODULE_ENABLE_WAIT); return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0; };
/** * omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and * wait * @rstctrl_reg: RM_RSTCTRL register address for this module * @shift: register bit shift corresponding to the reset line to deassert * * Some IPs like dsp, ipu or iva contain processors that require an HW * reset line to be asserted / deasserted in order to fully enable the * IP. These modules may have multiple hard-reset lines that reset * different 'submodules' inside the IP block. This function will * take the submodule out of reset and wait until the PRCM indicates * that the reset has completed before returning. Returns 0 upon success or * -EINVAL upon an argument error, -EEXIST if the submodule was already out * of reset, or -EBUSY if the submodule did not exit reset promptly. */ int omap4_prminst_deassert_hardreset(u8 shift, u8 part, s16 inst, u16 rstctrl_offs) { int c; u32 mask = 1 << shift; u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET; /* Check the current status to avoid de-asserting the line twice */ if (omap4_prminst_is_hardreset_asserted(shift, part, inst, rstctrl_offs) == 0) return -EEXIST; /* Clear the reset status by writing 1 to the status bit */ omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst, rstst_offs); /* de-assert the reset control line */ omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs); /* wait the status to be set */ omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst, rstst_offs), MAX_MODULE_HARDRESET_WAIT, c); return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0; }
/* VP force update method of voltage scaling */ static int vp_forceupdate_scale_voltage(struct omap_vdd_info *vdd, unsigned long target_volt) { u32 vpconfig; u16 mod, ocp_mod; u8 target_vsel, current_vsel, prm_irqst_reg; int ret, timeout = 0; ret = _pre_volt_scale(vdd, target_volt, &target_vsel, ¤t_vsel); if (ret) return ret; mod = vdd->vp_reg.prm_mod; ocp_mod = vdd->ocp_mod; prm_irqst_reg = vdd->prm_irqst_reg; /* * Clear all pending TransactionDone interrupt/status. Typical latency * is <3us */ while (timeout++ < VP_TRANXDONE_TIMEOUT) { vdd->write_reg(vdd->vp_reg.tranxdone_status, ocp_mod, prm_irqst_reg); if (!(vdd->read_reg(ocp_mod, prm_irqst_reg) & vdd->vp_reg.tranxdone_status)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) { pr_warning("%s: vdd_%s TRANXDONE timeout exceeded." "Voltage change aborted", __func__, vdd->voltdm.name); return -ETIMEDOUT; } /* Configure for VP-Force Update */ vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig); vpconfig &= ~(vdd->vp_reg.vpconfig_initvdd | vdd->vp_reg.vpconfig_forceupdate | vdd->vp_reg.vpconfig_initvoltage_mask); vpconfig |= ((target_vsel << vdd->vp_reg.vpconfig_initvoltage_shift)); vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); /* Trigger initVDD value copy to voltage processor */ vpconfig |= vdd->vp_reg.vpconfig_initvdd; vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); /* Force update of voltage */ vpconfig |= vdd->vp_reg.vpconfig_forceupdate; vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); /* * Wait for TransactionDone. Typical latency is <200us. * Depends on SMPSWAITTIMEMIN/MAX and voltage change */ timeout = 0; omap_test_timeout((vdd->read_reg(ocp_mod, prm_irqst_reg) & vdd->vp_reg.tranxdone_status), VP_TRANXDONE_TIMEOUT, timeout); if (timeout >= VP_TRANXDONE_TIMEOUT) pr_err("%s: vdd_%s TRANXDONE timeout exceeded." "TRANXDONE never got set after the voltage update\n", __func__, vdd->voltdm.name); _post_volt_scale(vdd, target_volt, target_vsel, current_vsel); /* * Disable TransactionDone interrupt , clear all status, clear * control registers */ timeout = 0; while (timeout++ < VP_TRANXDONE_TIMEOUT) { vdd->write_reg(vdd->vp_reg.tranxdone_status, ocp_mod, prm_irqst_reg); if (!(vdd->read_reg(ocp_mod, prm_irqst_reg) & vdd->vp_reg.tranxdone_status)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) pr_warning("%s: vdd_%s TRANXDONE timeout exceeded while trying" "to clear the TRANXDONE status\n", __func__, vdd->voltdm.name); vpconfig = vdd->read_reg(mod, vdd->vp_offs.vpconfig); /* Clear initVDD copy trigger bit */ vpconfig &= ~vdd->vp_reg.vpconfig_initvdd;; vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); /* Clear force bit */ vpconfig &= ~vdd->vp_reg.vpconfig_forceupdate; vdd->write_reg(vpconfig, mod, vdd->vp_offs.vpconfig); return 0; }
/** * omap4_core_dpll_set_rate - set the rate for the CORE DPLL * @clk: struct clk * of the DPLL to set * @rate: rounded target rate * * Program the CORE DPLL, including handling of EMIF frequency changes on M2 * divider. Returns 0 on success, otherwise a negative error code. */ int omap4_core_dpll_set_rate(struct clk *clk, unsigned long rate) { int i = 0, m2_div, m5_div; u32 mask, reg; u32 shadow_freq_cfg1 = 0, shadow_freq_cfg2 = 0; struct clk *new_parent; struct dpll_data *dd; if (!clk || !rate) return -EINVAL; if (!clk->dpll_data) return -EINVAL; dd = clk->dpll_data; if (rate == clk->rate) return 0; /* enable reference and bypass clocks */ omap2_clk_enable(dd->clk_bypass); omap2_clk_enable(dd->clk_ref); /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); if (!dpll_core_m2_ck) dpll_core_m2_ck = clk_get(NULL, "dpll_core_m2_ck"); if (!dpll_core_m5x2_ck) dpll_core_m5x2_ck = clk_get(NULL, "dpll_core_m5x2_ck"); if (!gpmc_ick) gpmc_ick = clk_get(NULL, "gpmc_ick"); /* Make sure MEMIF clkdm is in SW_WKUP & GPMC clocks are active */ omap2_clkdm_wakeup(l3_emif_clkdm); omap2_clk_enable(gpmc_ick); /* FIXME set m3, m6 & m7 rates here? */ /* check for bypass rate */ if (rate == dd->clk_bypass->rate && clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)) { /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for bypass clock rate * divided by 2 */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 2 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program CM_DIV_M2_DPLL_CORE.DPLL_CLKOUT_DIV for divide by * two and put DPLL_CORE into LP Bypass */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOW_POWER_BYPASS << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_bypass; } else { if (dd->last_rounded_rate != rate) rate = clk->round_rate(clk, rate); if (dd->last_rounded_rate == 0) return -EINVAL; /* * DDR clock = DPLL_CORE_M2_CK / 2. Program EMIF timing * parameters in EMIF shadow registers for rate divided * by 2. */ omap_emif_setup_registers(rate / 2, LPDDR2_VOLTAGE_STABLE); /* * FIXME skipping bypass part of omap3_noncore_dpll_program. * also x-loader's configure_core_dpll_no_lock bypasses * DPLL_CORE directly through CM_CLKMODE_DPLL_CORE via MN * bypass; no shadow register necessary! */ mask = (dd->mult_mask | dd->div1_mask); reg = (dd->last_rounded_m << __ffs(dd->mult_mask)) | ((dd->last_rounded_n - 1) << __ffs(dd->div1_mask)); /* program mn divider values */ omap4_prm_rmw_reg_bits(mask, reg, dd->mult_div1_reg); /* * program CM_DIV_M5_DPLL_CORE.DPLL_CLKOUT_DIV into shadow * register as well as L3_CLK freq and update GPMC frequency * * HACK: hardcode L3_CLK = CORE_CLK / 2 for DPLL cascading * HACK: hardcode CORE_CLK = CORE_X2_CLK / 1 for DPLL * cascading */ m5_div = omap4_prm_read_bits_shift(dpll_core_m5x2_ck->clksel_reg, dpll_core_m5x2_ck->clksel_mask); shadow_freq_cfg2 = (m5_div << OMAP4430_DPLL_CORE_M5_DIV_SHIFT) | (1 << OMAP4430_CLKSEL_L3_SHADOW_SHIFT) | (0 << OMAP4430_CLKSEL_CORE_1_1_SHIFT) | (1 << OMAP4430_GPMC_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * program DPLL_CORE_M2_DIV with same value as the one already * in direct register and lock DPLL_CORE */ m2_div = omap4_prm_read_bits_shift(dpll_core_m2_ck->clksel_reg, dpll_core_m2_ck->clksel_mask); shadow_freq_cfg1 = (m2_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); new_parent = dd->clk_ref; } /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* clear GPMC_FREQ_UPDATE bit */ shadow_freq_cfg2 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG2); shadow_freq_cfg2 &= ~1; __raw_writel(shadow_freq_cfg2, OMAP4430_CM_SHADOW_FREQ_CONFIG2); /* * Switch the parent clock in the heirarchy, and make sure that the * new parent's usecount is correct. Note: we enable the new parent * before disabling the old to avoid any unnecessary hardware * disable->enable transitions. */ if (clk->usecount) { omap2_clk_enable(new_parent); omap2_clk_disable(clk->parent); } clk_reparent(clk, new_parent); clk->rate = rate; /* disable reference and bypass clocks */ omap2_clk_disable(dd->clk_bypass); omap2_clk_disable(dd->clk_ref); /* Configures MEMIF domain back to HW_WKUP & let GPMC clocks to idle */ omap2_clkdm_allow_idle(l3_emif_clkdm); omap2_clk_disable(gpmc_ick); /* * FIXME PRCM functional spec says we should set GPMC_FREQ_UPDATE bit * here, but we're not even handling CM_SHADOW_FREQ_CONFIG2 at all. */ if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } return 0; }
/* VP force update method of voltage scaling */ int omap_vp_forceupdate_scale(struct voltagedomain *voltdm, unsigned long target_volt) { struct omap_vp_instance *vp = voltdm->vp; u32 vpconfig; u8 target_vsel, current_vsel; int ret, timeout = 0; ret = omap_vc_pre_scale(voltdm, target_volt, &target_vsel, ¤t_vsel); if (ret) return ret; /* * Clear all pending TransactionDone interrupt/status. Typical latency * is <3us */ while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) { pr_warning("%s: vdd_%s TRANXDONE timeout exceeded." "Voltage change aborted", __func__, voltdm->name); return -ETIMEDOUT; } vpconfig = _vp_set_init_voltage(voltdm, target_volt); /* Force update of voltage */ voltdm->write(vpconfig | vp->common->vpconfig_forceupdate, voltdm->vp->vpconfig); /* * Wait for TransactionDone. Typical latency is <200us. * Depends on SMPSWAITTIMEMIN/MAX and voltage change */ timeout = 0; omap_test_timeout(vp->common->ops->check_txdone(vp->id), VP_TRANXDONE_TIMEOUT, timeout); if (timeout >= VP_TRANXDONE_TIMEOUT) pr_err("%s: vdd_%s TRANXDONE timeout exceeded." "TRANXDONE never got set after the voltage update\n", __func__, voltdm->name); omap_vc_post_scale(voltdm, target_volt, target_vsel, current_vsel); /* * Disable TransactionDone interrupt , clear all status, clear * control registers */ timeout = 0; while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) pr_warning("%s: vdd_%s TRANXDONE timeout exceeded while trying" "to clear the TRANXDONE status\n", __func__, voltdm->name); /* Clear force bit */ voltdm->write(vpconfig, vp->vpconfig); return 0; }
/** * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Programs the CM shadow registers to update CORE DPLL M2 * divider. M2 divider is used to clock external DDR and its * reconfiguration on frequency change is managed through a * hardware sequencer. This is managed by the PRCM with EMIF * uding shadow registers. * Returns -EINVAL/-1 on error and 0 on success. */ int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { int i = 0; u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0; unsigned long flags; if (!clk || !rate) return -EINVAL; validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); if (validrate != rate) return -EINVAL; /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) { l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); if (!l3_emif_clkdm) { pr_err("%s: clockdomain lookup failed\n", __func__); return -EINVAL; } } spin_lock_irqsave(&l3_emif_lock, flags); /* * Errata ID: i728 * * DESCRIPTION: * * If during a small window the following three events occur: * * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1 * FREQ_UPDATE set to 1 * 3) OCP access is requested * * There will be instable clock on the DDR interface. * * WORKAROUND: * * Prevent event 1) while event 2) is happening. * * Disable the self-refresh when requesting a frequency change. * Before requesting a frequency change, program * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0 * (omap_emif_frequency_pre_notify) * * When the frequency change is completed, reprogram * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2. * (omap_emif_frequency_post_notify) */ omap_emif_frequency_pre_notify(); /* Configures MEMIF domain in SW_WKUP */ clkdm_wakeup(l3_emif_clkdm); /* * Program EMIF timing parameters in EMIF shadow registers * for targetted DRR clock. * DDR Clock = core_dpll_m2 / 2 */ omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE); /* * FREQ_UPDATE sequence: * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden * after CORE DPLL lock) * - DLL_RESET=1 (DLL must be reset upon frequency change) * - DPLL_CORE_M2_DIV with same value as the one already * in direct register * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock) * - FREQ_UPDATE=1 (to start HW sequence) */ shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) | (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK; __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* Configures MEMIF domain back to HW_WKUP */ clkdm_allow_idle(l3_emif_clkdm); /* Re-enable DDR self refresh */ omap_emif_frequency_post_notify(); spin_unlock_irqrestore(&l3_emif_lock, flags); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } /* Update the clock change */ clk->rate = validrate; return 0; }
/** * omap4_prcm_freq_update - set freq_update bit * * Programs the CM shadow registers to update EMIF * parametrs. Few usecase only few registers needs to * be updated using prcm freq update sequence. * EMIF read-idle control and zq-config needs to be * updated for temprature alerts and voltage change * Returns -1 on error and 0 on success. */ int omap4_prcm_freq_update(void) { u32 shadow_freq_cfg1; int i = 0; unsigned long flags; if (!l3_emif_clkdm) { pr_err("%s: clockdomain lookup failed\n", __func__); return -EINVAL; } spin_lock_irqsave(&l3_emif_lock, flags); /* * Errata ID: i728 * * DESCRIPTION: * * If during a small window the following three events occur: * * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1 * FREQ_UPDATE set to 1 * 3) OCP access is requested * * There will be instable clock on the DDR interface. * * WORKAROUND: * * Prevent event 1) while event 2) is happening. * * Disable the self-refresh when requesting a frequency change. * Before requesting a frequency change, program * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0 * (omap_emif_frequency_pre_notify) * * When the frequency change is completed, reprogram * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2. * (omap_emif_frequency_post_notify) */ omap_emif_frequency_pre_notify(); /* Configures MEMIF domain in SW_WKUP */ clkdm_wakeup(l3_emif_clkdm); /* * FREQ_UPDATE sequence: * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden * after CORE DPLL lock) * - FREQ_UPDATE=1 (to start HW sequence) */ shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1); shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK; __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* Configures MEMIF domain back to HW_WKUP */ clkdm_allow_idle(l3_emif_clkdm); /* Re-enable DDR self refresh */ omap_emif_frequency_post_notify(); spin_unlock_irqrestore(&l3_emif_lock, flags); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update failed (call from %pF)\n", __func__, (void *)_RET_IP_); pr_err("CLKCTRL: EMIF_1=0x%x EMIF_2=0x%x DMM=0x%x\n", __raw_readl(OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL), __raw_readl(OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL), __raw_readl(OMAP4430_CM_MEMIF_DMM_CLKCTRL)); emif_dump(0); emif_dump(1); return -1; } return 0; }
/** * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider * @clk: struct clk * of DPLL to set * @rate: rounded target rate * * Programs the CM shadow registers to update CORE DPLL M2 * divider. M2 divider is used to clock external DDR and its * reconfiguration on frequency change is managed through a * hardware sequencer. This is managed by the PRCM with EMIF * uding shadow registers. * Returns -EINVAL/-1 on error and 0 on success. */ int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate) { int i = 0; u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0; unsigned long flags; if (!clk || !rate) return -EINVAL; validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); if (validrate != rate) return -EINVAL; /* Just to avoid look-up on every call to speed up */ if (!l3_emif_clkdm) { l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm"); if (!l3_emif_clkdm) { pr_err("%s: clockdomain lookup failed\n", __func__); return -EINVAL; } } spin_lock_irqsave(&l3_emif_lock, flags); /* Configures MEMIF domain in SW_WKUP */ clkdm_wakeup(l3_emif_clkdm); /* * Program EMIF timing parameters in EMIF shadow registers * for targetted DRR clock. * DDR Clock = core_dpll_m2 / 2 */ omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE); /* * FREQ_UPDATE sequence: * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden * after CORE DPLL lock) * - DLL_RESET=1 (DLL must be reset upon frequency change) * - DPLL_CORE_M2_DIV with same value as the one already * in direct register * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock) * - FREQ_UPDATE=1 (to start HW sequence) */ shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) | (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) | (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) | (1 << OMAP4430_FREQ_UPDATE_SHIFT); shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK; __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1); /* wait for the configuration to be applied */ omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1) & OMAP4430_FREQ_UPDATE_MASK) == 0), MAX_FREQ_UPDATE_TIMEOUT, i); /* Configures MEMIF domain back to HW_WKUP */ clkdm_allow_idle(l3_emif_clkdm); spin_unlock_irqrestore(&l3_emif_lock, flags); if (i == MAX_FREQ_UPDATE_TIMEOUT) { pr_err("%s: Frequency update for CORE DPLL M2 change failed\n", __func__); return -1; } /* Update the clock change */ clk->rate = validrate; return 0; }
/* VP force update method of voltage scaling */ int omap_vp_forceupdate_scale(struct voltagedomain *voltdm, struct omap_volt_data *target_v) { struct omap_vp_instance *vp; u32 vpconfig; u8 target_vsel, current_vsel; int ret, timeout = 0; unsigned long target_volt; if (IS_ERR_OR_NULL(voltdm)) { pr_err("%s: VDD specified does not exist!\n", __func__); return -EINVAL; } if (IS_ERR_OR_NULL(voltdm->write)) { pr_err("%s: No write API for writing vdd_%s regs\n", __func__, voltdm->name); return -EINVAL; } if (IS_ERR_OR_NULL(target_v)) { pr_err("%s: No target_v info to scale vdd_%s\n", __func__, voltdm->name); return -EINVAL; } vp = voltdm->vp; if (IS_ERR_OR_NULL(vp)) { pr_err("%s: No VP info for vdd_%s\n", __func__, voltdm->name); return -EINVAL; } target_volt = omap_get_operation_voltage(target_v); ret = _vp_wait_for_idle(voltdm, vp); if (ret) { _vp_controlled_err(vp, voltdm, "%s: vdd_%s idle timedout (v=%ld)\n", __func__, voltdm->name, target_volt); return ret; } ret = omap_vc_pre_scale(voltdm, target_volt, target_v, &target_vsel, ¤t_vsel); if (ret) return ret; /* * Clear all pending TransactionDone interrupt/status. Typical latency * is <3us */ while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) { _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded." "Voltage change aborted target volt=%ld," "target vsel=0x%02x, current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); return -ETIMEDOUT; } vpconfig = _vp_set_init_voltage(voltdm, target_volt); /* Force update of voltage */ voltdm->write(vpconfig | vp->common->vpconfig_forceupdate, voltdm->vp->vpconfig); /* * Wait for TransactionDone. Typical latency is <200us. * Depends on SMPSWAITTIMEMIN/MAX and voltage change */ timeout = 0; omap_test_timeout(vp->common->ops->check_txdone(vp->id), VP_TRANXDONE_TIMEOUT, timeout); if (timeout >= VP_TRANXDONE_TIMEOUT) _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded. " "TRANXDONE never got set after the voltage update. " "target volt=%ld, target vsel=0x%02x, " "current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); omap_vc_post_scale(voltdm, target_volt, target_v, target_vsel, current_vsel); /* * Disable TransactionDone interrupt , clear all status, clear * control registers */ timeout = 0; while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded while" "trying to clear the TRANXDONE status. target volt=%ld," "target vsel=0x%02x, current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); /* Clear force bit */ voltdm->write(vpconfig, vp->vpconfig); return 0; }
/* VP force update method of voltage scaling */ int omap_vp_forceupdate_scale(struct voltagedomain *voltdm, struct omap_volt_data *target_v) { struct omap_vp_instance *vp = voltdm->vp; u32 vpconfig; u8 target_vsel, current_vsel; int ret, timeout = 0; unsigned long target_volt = omap_get_operation_voltage(target_v); /* * Wait for VP idle Typical latency is <2us. Maximum latency is ~100us * This is an additional allowance to ensure we are in proper state * to enter into forceupdate state transition. */ omap_test_timeout((voltdm->read(vp->vstatus) & vp->common->vstatus_vpidle), VP_IDLE_TIMEOUT, timeout); if (timeout >= VP_IDLE_TIMEOUT) _vp_controlled_err(vp, voltdm, "%s:vdd_%s idletimdout forceupdate(v=%ld)\n", __func__, voltdm->name, target_volt); ret = omap_vc_pre_scale(voltdm, target_volt, target_v, &target_vsel, ¤t_vsel); if (ret) return ret; /* * Clear all pending TransactionDone interrupt/status. Typical latency * is <3us */ while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) { _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded." "Voltage change aborted target volt=%ld," "target vsel=0x%02x, current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); return -ETIMEDOUT; } /* Configure for VP-Force Update */ vpconfig = voltdm->read(vp->vpconfig); vpconfig &= ~(vp->common->vpconfig_initvdd | vp->common->vpconfig_forceupdate | vp->common->vpconfig_initvoltage_mask); vpconfig |= ((target_vsel << __ffs(vp->common->vpconfig_initvoltage_mask))); voltdm->write(vpconfig, vp->vpconfig); /* Trigger initVDD value copy to voltage processor */ vpconfig |= vp->common->vpconfig_initvdd; voltdm->write(vpconfig, vp->vpconfig); /* Force update of voltage */ vpconfig |= vp->common->vpconfig_forceupdate; voltdm->write(vpconfig, vp->vpconfig); /* * Wait for TransactionDone. Typical latency is <200us. * Depends on SMPSWAITTIMEMIN/MAX and voltage change */ timeout = 0; omap_test_timeout(vp->common->ops->check_txdone(vp->id), VP_TRANXDONE_TIMEOUT, timeout); if (timeout >= VP_TRANXDONE_TIMEOUT) _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded. " "TRANXDONE never got set after the voltage update. " "target volt=%ld, target vsel=0x%02x, " "current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); omap_vc_post_scale(voltdm, target_volt, target_v, target_vsel, current_vsel); /* * Disable TransactionDone interrupt , clear all status, clear * control registers */ timeout = 0; while (timeout++ < VP_TRANXDONE_TIMEOUT) { vp->common->ops->clear_txdone(vp->id); if (!vp->common->ops->check_txdone(vp->id)) break; udelay(1); } if (timeout >= VP_TRANXDONE_TIMEOUT) _vp_controlled_err(vp, voltdm, "%s: vdd_%s TRANXDONE timeout exceeded while" "trying to clear the TRANXDONE status. target volt=%ld," "target vsel=0x%02x, current_vsel=0x%02x\n", __func__, voltdm->name, target_volt, target_vsel, current_vsel); vpconfig = voltdm->read(vp->vpconfig); /* Clear initVDD copy trigger bit */ vpconfig &= ~vp->common->vpconfig_initvdd; voltdm->write(vpconfig, vp->vpconfig); /* Clear force bit */ vpconfig &= ~vp->common->vpconfig_forceupdate; voltdm->write(vpconfig, vp->vpconfig); return 0; }