/* ddr_init() - initializes ddr */ void __ddr_init_samsung(void) { u32 ddr_size; const struct ddr_regs *ddr_regs = 0; ddr_size = ( get_hwid() & 0x18 ) >> 3; switch (ddr_size) { case DDR_SIZE_512MB: ddr_regs = &ddr_regs_samsung2G_400_mhz; break; case DDR_SIZE_1GB: default: ddr_regs = &ddr_regs_samsung4G_400_mhz; } /* * DMM Configuration: * 1GB - 128 byte interleaved * We also need to make sure the whole 2GB of the DMM are mapped * to workaround i614. */ __raw_writel(0x80640300, DMM_BASE + DMM_LISA_MAP_1); __raw_writel(0x80760300, DMM_BASE + DMM_LISA_MAP_0); /* same memory part on both EMIFs */ do_ddr_init(ddr_regs, ddr_regs); }
/* ddr_init() - initializes ddr */ void __ddr_init_samsung(void) { u32 rev; const struct ddr_regs *ddr_regs = 0; ddr_regs = &ddr_regs_samsung4G_400_mhz; /* * DMM Configuration: * 1GB - 128 byte interleaved * We also need to make sure the whole 2GB of the DMM are mapped * to workaround i614. */ __raw_writel(0x80640300, DMM_BASE + DMM_LISA_MAP_1); __raw_writel(0x80760300, DMM_BASE + DMM_LISA_MAP_0); /* same memory part on both EMIFs */ do_ddr_init(ddr_regs, ddr_regs); }
/* ddr_init() - initializes ddr */ void __ddr_init_elpida(void) { u32 rev; const struct ddr_regs *ddr_regs = 0; rev = omap_revision(); if (rev == OMAP4430_ES1_0) ddr_regs = &ddr_regs_elpida2G_380_mhz; else if (rev == OMAP4430_ES2_0) ddr_regs = &ddr_regs_elpida2G_200_mhz_2cs; else if (rev >= OMAP4430_ES2_1) ddr_regs = &ddr_regs_elpida2G_400_mhz_2cs; /* * DMM Configuration: * 1GB - 128 byte interleaved * We also need to make sure the whole 2GB of the DMM are mapped * to workaround i614. */ __raw_writel(0x80640300, DMM_BASE + DMM_LISA_MAP_1); __raw_writel(0x80760300, DMM_BASE + DMM_LISA_MAP_0); /* same memory part on both EMIFs */ do_ddr_init(ddr_regs, ddr_regs); }
/* ddr_init() - initializes ddr */ void __ddr_init_elpida(void) { u32 rev,ddr_size; const struct ddr_regs *ddr_regs = 0; rev = omap_revision(); ddr_size = ( get_hwid() & 0x18 ) >> 3; if (rev == OMAP4430_ES1_0) ddr_regs = &ddr_regs_elpida2G_380_mhz; else if (rev == OMAP4430_ES2_0) ddr_regs = &ddr_regs_elpida2G_200_mhz_2cs; else if (rev >= OMAP4430_ES2_1 && rev < OMAP4470_ES1_0) ddr_regs = &ddr_regs_elpida2G_400_mhz_2cs; else if (rev >= OMAP4470_ES1_0) if (ddr_size == DDR_SIZE_2GB) { #ifdef CORE_233MHZ ddr_regs = &ddr_regs_elpida4G_466_mhz_2cs; #else ddr_regs = &ddr_regs_elpida4G_400_mhz_2cs; #endif } else { #ifdef CORE_233MHZ ddr_regs = &ddr_regs_elpida4G_466_mhz_1cs; #else ddr_regs = &ddr_regs_elpida4G_400_mhz_1cs; #endif } /* * DMM DMM_LISA_MAP_i registers fields description * [31:24] SYS_ADDR * [22:20] SYS_SIZE * [19:18] SDRC_INTLDMM * [17:16] SDRC_ADDRSPC * [9:8] SDRC_MAP * [7:0] SDRC_ADDR */ /* TI Errata i614 - DMM Hang Issue During Unmapped Accesses * CRITICALITY: Medium * REVISIONS IMPACTED: OMAP4430 all * DESCRIPTION * DMM replies with an OCP error response in case of unmapped access. * DMM can generate unmapped access at the end of a mapped section. * A hang occurs if an unmapped access is issued after a mapped access. * * WORKAROUND * Define LISA section to have response error when unmapped addresses. * Define LISA section in such a way that all transactions reach the * EMIF, which will return an error response. * * For W/A configure DMM Section 0 size equal to 2048 MB and map it on * SDRC_ADDRSPC=0, and move original SDRAM configuration * to section 1 with higher priority. * * This W/A has been applied for all OMAP4 CPUs - It doesn't any harm * on 4460, and even desirable than hitting an un-mapped area in DMM. * This makes the code simpler and easier to understand and * the settings will be more uniform. **/ /* TRAP for catching accesses to the umapped memory */ __raw_writel(0x80720100, DMM_BASE + DMM_LISA_MAP_0); __raw_writel(0x00000000, DMM_BASE + DMM_LISA_MAP_2); /* TRAP for catching accesses to the memory actually used by TILER */ __raw_writel(0xFF020100, DMM_BASE + DMM_LISA_MAP_1); if (rev == OMAP4430_ES1_0) /* original DMM configuration * - 512 MB, 128 byte interleaved, EMIF1&2, SDRC_ADDRSPC=0 */ __raw_writel(0x80540300, DMM_BASE + DMM_LISA_MAP_3); else if (rev < OMAP4460_ES1_0) /* original DMM configuration * - 1024 MB, 128 byte interleaved, EMIF1&2, SDRC_ADDRSPC=0 */ __raw_writel(0x80640300, DMM_BASE + DMM_LISA_MAP_3); else { /* OMAP4460 and higher: original DMM configuration * - 1024 MB, 128 byte interleaved, EMIF1&2, SDRC_ADDRSPC=0 */ if (ddr_size == DDR_SIZE_2GB) __raw_writel(0x80740300, DMM_BASE + DMM_LISA_MAP_3); else __raw_writel(0x80640300, DMM_BASE + DMM_LISA_MAP_3); __raw_writel(0x80720100, MA_BASE + DMM_LISA_MAP_0); __raw_writel(0xFF020100, MA_BASE + DMM_LISA_MAP_1); __raw_writel(0x00000000, MA_BASE + DMM_LISA_MAP_2); if (ddr_size == DDR_SIZE_2GB) __raw_writel(0x80740300, MA_BASE + DMM_LISA_MAP_3); else __raw_writel(0x80640300, MA_BASE + DMM_LISA_MAP_3); } /* same memory part on both EMIFs */ do_ddr_init(ddr_regs, ddr_regs); /* Pull Dn enabled for "Weak driver control" on LPDDR * Interface. */ if (rev >= OMAP4460_ES1_0) { __raw_writel(0x9c9c9c9c, CONTROL_LPDDR2IO1_0); __raw_writel(0x9c9c9c9c, CONTROL_LPDDR2IO1_1); __raw_writel(0x9c989c00, CONTROL_LPDDR2IO1_2); __raw_writel(0xa0888c03, CONTROL_LPDDR2IO1_3); __raw_writel(0x9c9c9c9c, CONTROL_LPDDR2IO2_0); __raw_writel(0x9c9c9c9c, CONTROL_LPDDR2IO2_1); __raw_writel(0x9c989c00, CONTROL_LPDDR2IO2_2); __raw_writel(0xa0888c03, CONTROL_LPDDR2IO2_3); } #ifdef CORE_233MHZ /* * Slew Rate should be set to “FASTEST” and * Impedance Control to “Drv12”: * CONTROL_LPDDR2IOx_2[LPDDR2IO1_GR10_SR] = 0 * CONTROL_LPDDR2IOx_2[LPDDR2IO1_GR10_I] = 7 * where x=[1-2] */ if (rev >= OMAP4470_ES1_0) { __raw_writel(0x9c3c9c00, CONTROL_LPDDR2IO1_2); __raw_writel(0x9c3c9c00, CONTROL_LPDDR2IO2_2); } #endif }