/* read LPDDR2 memory modes */ static int tegra_emc_read_mrr(unsigned long addr) { u32 value; int count = 100; mutex_lock(&tegra_emc_mrr_lock); do { emc_readl(TEGRA_EMC_MRR); } while (--count && (emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD)); if (count == 0) { pr_err("%s: Failed to read memory type\n", __func__); BUG(); } value = (1 << 30) | (addr << 16); emc_writel(value, TEGRA_EMC_MRR); count = 100; while (--count && !(emc_readl(TEGRA_EMC_STATUS) & TEGRA_MRR_DIVLD)); if (count == 0) { pr_err("%s: Failed to read memory type\n", __func__); BUG(); } value = emc_readl(TEGRA_EMC_MRR) & 0xFFFF; mutex_unlock(&tegra_emc_mrr_lock); return value; }
static struct tegra_emc_pdata *tegra_emc_fill_pdata(struct platform_device *pdev) { struct clk *c = clk_get_sys(NULL, "emc"); struct tegra_emc_pdata *pdata; unsigned long khz; int i; WARN_ON(pdev->dev.platform_data); BUG_ON(IS_ERR(c)); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); pdata->tables = devm_kzalloc(&pdev->dev, sizeof(*pdata->tables), GFP_KERNEL); pdata->tables[0].rate = clk_get_rate(c) / 2 / 1000; for (i = 0; i < TEGRA_EMC_NUM_REGS; i++) pdata->tables[0].regs[i] = emc_readl(emc_reg_addr[i]); pdata->num_tables = 1; khz = pdata->tables[0].rate; dev_info(&pdev->dev, "no tables provided, using %ld kHz emc, " "%ld kHz mem\n", khz * 2, khz); return pdata; }
/* * The EMC registers have shadow registers. When the EMC clock is updated * in the clock controller, the shadow registers are copied to the active * registers, allowing glitchless memory bus frequency changes. * This function updates the shadow registers for a new clock frequency, * and relies on the clock lock on the emc clock to avoid races between * multiple frequency changes */ int tegra_emc_set_rate(unsigned long rate) { struct tegra_emc_pdata *pdata; int i; int j; if (!emc_pdev) return -EINVAL; pdata = emc_pdev->dev.platform_data; /* * The EMC clock rate is twice the bus rate, and the bus rate is * measured in kHz */ rate = rate / 2 / 1000; for (i = 0; i < pdata->num_tables; i++) if (pdata->tables[i].rate == rate) break; if (i >= pdata->num_tables) return -EINVAL; pr_debug("%s: setting to %lu\n", __func__, rate); for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) emc_writel(pdata->tables[i].regs[j], emc_reg_addr[j]); emc_readl(pdata->tables[i].regs[TEGRA_EMC_NUM_REGS - 1]); return 0; }
/* * The EMC registers have shadow registers. When the EMC clock is updated * in the clock controller, the shadow registers are copied to the active * registers, allowing glitchless memory bus frequency changes. * This function updates the shadow registers for a new clock frequency, * and relies on the clock lock on the emc clock to avoid races between * multiple frequency changes */ int tegra_emc_set_rate(unsigned long rate) { int i; int j; if (!tegra_emc_table) return -EINVAL; /* * The EMC clock rate is twice the bus rate, and the bus rate is * measured in kHz */ rate = rate / 2 / 1000; for (i = tegra_emc_table_size - 1; i >= 0; i--) if (tegra_emc_table[i].rate == rate) break; if (i < 0) return -EINVAL; pr_debug("%s: setting to %lu\n", __func__, rate); for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]); emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]); return 0; }
static inline void emc_get_timing(struct tegra11_emc_table *timing) { int i; /* burst and trimmers updates depends on previous state; burst_up_down are stateless */ for (i = 0; i < timing->burst_regs_num; i++) { if (burst_reg_addr[i]) timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]); else timing->burst_regs[i] = 0; } for (i = 0; i < timing->emc_trimmers_num; i++) { timing->emc_trimmers_0[i] = __raw_readl(emc0_base + emc_trimmer_offs[i]); timing->emc_trimmers_1[i] = __raw_readl(emc1_base + emc_trimmer_offs[i]); } timing->emc_acal_interval = 0; timing->emc_zcal_cnt_long = 0; timing->emc_mode_reset = 0; timing->emc_mode_1 = 0; timing->emc_mode_2 = 0; timing->emc_mode_4 = 0; timing->emc_cfg = emc_readl(EMC_CFG); timing->rate = clk_get_rate_locked(emc) / 1000; }
/* * The EMC registers have shadow registers. When the EMC clock is updated * in the clock controller, the shadow registers are copied to the active * registers, allowing glitchless memory bus frequency changes. * This function updates the shadow registers for a new clock frequency, * and relies on the clock lock on the emc clock to avoid races between * multiple frequency changes */ int tegra_emc_set_rate(unsigned long rate) { int i; int j; if (!tegra_emc_table){ printk("faile! 1 no tegra_emc_table+\n"); return -EINVAL; } /* * The EMC clock rate is twice the bus rate, and the bus rate is * measured in kHz */ rate = rate / 2 / 1000; for (i = 0; i < tegra_emc_table_size; i++) if (tegra_emc_table[i].rate == rate) break; if (i >= tegra_emc_table_size){ printk("faile! 2 tegra_emc_set_rateemc rate=%u +\n",rate); return -EINVAL; } pr_debug("%s: setting to %lu\n", __func__, rate); for (j = 0; j < TEGRA_EMC_NUM_REGS; j++) emc_writel(tegra_emc_table[i].regs[j], emc_reg_addr[j]); emc_readl(tegra_emc_table[i].regs[TEGRA_EMC_NUM_REGS - 1]); return 0; }
static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state) { int i; for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) { if (!!(emc_readl(status_reg) & bit_mask) == updated_state) return 0; udelay(1); } return -ETIMEDOUT; }
static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing, const struct tegra11_emc_table *last_timing, u32 clk_setting) { #ifndef EMULATE_CLOCK_SWITCH int i, dll_change, pre_wait; bool dyn_sref_enabled, zcal_long; u32 emc_cfg_reg = emc_readl(EMC_CFG); dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE; dll_change = get_dll_change(next_timing, last_timing); zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) && (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0); /* FIXME: remove steps enumeration below? */ /* 1. clear clkchange_complete interrupts */ emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS); /* 2. disable dynamic self-refresh and preset dqs vref, then wait for possible self-refresh entry/exit and/or dqs vref settled - waiting before the clock change decreases worst case change stall time */ pre_wait = 0; if (dyn_sref_enabled) { emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE; emc_writel(emc_cfg_reg, EMC_CFG); pre_wait = 5; /* 5us+ for self-refresh entry/exit */ } /* 2.5 check dq/dqs vref delay */ if (dqs_preset(next_timing, last_timing)) { if (pre_wait < 3) pre_wait = 3; /* 3us+ for dqs vref settled */ } if (pre_wait) { emc_timing_update(); udelay(pre_wait); } /* 3. disable auto-cal if vref mode is switching - removed */ /* 4. program burst shadow registers */ for (i = 0; i < next_timing->burst_regs_num; i++) { if (!burst_reg_addr[i]) continue; __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]); } for (i = 0; i < next_timing->emc_trimmers_num; i++) { __raw_writel(next_timing->emc_trimmers_0[i], emc0_base + emc_trimmer_offs[i]); __raw_writel(next_timing->emc_trimmers_1[i], emc1_base + emc_trimmer_offs[i]); } if ((dram_type == DRAM_TYPE_LPDDR2) && (dram_over_temp_state != DRAM_OVER_TEMP_NONE)) set_over_temp_timing(next_timing, dram_over_temp_state); emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK; emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK; emc_writel(emc_cfg_reg, EMC_CFG); wmb(); barrier(); /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and overwrite DFS table setting */ if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON)) overwrite_mrs_wait_cnt(next_timing, zcal_long); /* 5.2 disable auto-refresh to save time after clock change */ ccfifo_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL); /* 6. turn Off dll and enter self-refresh on DDR3 */ if (dram_type == DRAM_TYPE_DDR3) { if (dll_change == DLL_CHANGE_OFF) ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS); ccfifo_writel(DRAM_BROADCAST(dram_dev_num) | EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF); } /* 7. flow control marker 2 */ ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE); /* 8. exit self-refresh on DDR3 */ if (dram_type == DRAM_TYPE_DDR3) ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF); /* 8.1 re-enable auto-refresh */ ccfifo_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL); /* 9. set dram mode registers */ set_dram_mode(next_timing, last_timing, dll_change); /* 10. issue zcal command if turning zcal On */ if (zcal_long) { ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL); if (dram_dev_num > 1) ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL); } /* 10.1 dummy write to RO register to remove stall after change */ ccfifo_writel(0, EMC_CCFIFO_STATUS); /* 11.5 program burst_up_down registers if emc rate is going down */ if (next_timing->rate < last_timing->rate) { for (i = 0; i < next_timing->burst_up_down_regs_num; i++) __raw_writel(next_timing->burst_up_down_regs[i], burst_up_down_reg_addr[i]); wmb(); } /* 12-14. read any MC register to ensure the programming is done change EMC clock source register wait for clk change completion */ do_clock_change(clk_setting); /* 14.1 re-enable auto-refresh - moved to ccfifo in 8.1 */ /* 14.2 program burst_up_down registers if emc rate is going up */ if (next_timing->rate > last_timing->rate) { for (i = 0; i < next_timing->burst_up_down_regs_num; i++) __raw_writel(next_timing->burst_up_down_regs[i], burst_up_down_reg_addr[i]); wmb(); } /* 15. set auto-cal interval */ if (next_timing->rev >= 0x42) emc_writel(next_timing->emc_acal_interval, EMC_AUTO_CAL_INTERVAL); /* 16. restore dynamic self-refresh */ if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) { emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE; emc_writel(emc_cfg_reg, EMC_CFG); } /* 17. set zcal wait count */ emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT); /* 18. update restored timing */ udelay(2); emc_timing_update(); #else /* FIXME: implement */ pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n", next_timing->rate, clk_setting); #endif }