Пример #1
0
static void __init
mpc85xx_setup_pci1(struct pci_controller *hose)
{
	volatile struct ccsr_pci *pci;
	volatile struct ccsr_guts *guts;
	unsigned short temps;
	bd_t *binfo = (bd_t *) __res;

	pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI1_OFFSET,
		    MPC85xx_PCI1_SIZE);

	guts = ioremap(binfo->bi_immr_base + MPC85xx_GUTS_OFFSET,
		    MPC85xx_GUTS_SIZE);

	early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps);
	temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
	early_write_config_word(hose, 0, 0, PCI_COMMAND, temps);

#define PORDEVSR_PCI	(0x00800000)	/* PCI Mode */
	if (guts->pordevsr & PORDEVSR_PCI) {
 		early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
 	} else {
		/* PCI-X init */
		temps = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
			| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
		early_write_config_word(hose, 0, 0, PCIX_COMMAND, temps);
	}

	/* Disable all windows (except powar0 since its ignored) */
	pci->powar1 = 0;
	pci->powar2 = 0;
	pci->powar3 = 0;
	pci->powar4 = 0;
	pci->piwar1 = 0;
	pci->piwar2 = 0;
	pci->piwar3 = 0;

	/* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI1_LOWER_MEM */
	pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
	pci->potear1 = 0x00000000;
	pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff;
	/* Enable, Mem R/W */
	pci->powar1 = 0x80044000 |
	   (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1);

	/* Setup outbound IO windows @ MPC85XX_PCI1_IO_BASE */
	pci->potar2 = (MPC85XX_PCI1_LOWER_IO >> 12) & 0x000fffff;
	pci->potear2 = 0x00000000;
	pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff;
	/* Enable, IO R/W */
	pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI1_IO_SIZE) - 1);

	/* Setup 2G inbound Memory Window @ 0 */
	pci->pitar1 = 0x00000000;
	pci->piwbar1 = 0x00000000;
	pci->piwar1 = 0xa0f5501e;	/* Enable, Prefetch, Local
					   Mem, Snoop R/W, 2G */
}
Пример #2
0
/* mv64360_get_irq()
 *
 * This function returns the lowest interrupt number of all interrupts that
 * are currently asserted.
 *
 * Output Variable(s):
 *  None.
 *
 * Returns:
 *  int	<interrupt number> or -2 (bogus interrupt)
 *
 */
int
mv64360_get_irq(void)
{
	int irq;
	int irq_gpp;

#ifdef CONFIG_SMP
	/*
	 * Second CPU gets only doorbell (message) interrupts.
	 * The doorbell interrupt is BIT28 in the main interrupt low cause reg.
	 */
	int cpu_nr = smp_processor_id();
	if (cpu_nr == 1) {
		if (!(mv64x60_read(&bh, MV64360_IC_MAIN_CAUSE_LO) &
		      (1 << MV64x60_IRQ_DOORBELL)))
			return -1;
		return mv64360_irq_base + MV64x60_IRQ_DOORBELL;
	}
#endif

	irq = mv64x60_read(&bh, MV64360_IC_MAIN_CAUSE_LO);
	irq = __ilog2((irq & 0x3dfffffe) & ppc_cached_irq_mask[0]);

	if (irq == -1) {
		irq = mv64x60_read(&bh, MV64360_IC_MAIN_CAUSE_HI);
		irq = __ilog2((irq & 0x1f0003f7) & ppc_cached_irq_mask[1]);

		if (irq == -1)
			irq = -2; /* bogus interrupt, should never happen */
		else {
			if ((irq >= 24) && (irq < MV64x60_IRQ_DOORBELL)) {
				irq_gpp = mv64x60_read(&bh,
					MV64x60_GPP_INTR_CAUSE);
				irq_gpp = __ilog2(irq_gpp &
					ppc_cached_irq_mask[2]);

				if (irq_gpp == -1)
					irq = -2;
				else {
					irq = irq_gpp + 64;
					mv64x60_write(&bh,
						MV64x60_GPP_INTR_CAUSE,
						~(1 << (irq - 64)));
				}
			}
			else
				irq += 32;
		}
	}

	(void)mv64x60_read(&bh, MV64x60_GPP_INTR_CAUSE);

	if (irq < 0)
		return (irq);
	else
		return (mv64360_irq_base + irq);
}
Пример #3
0
void __init
adjust_total_lowmem(void)
{
	unsigned long max_low_mem = MAX_LOW_MEM;
	
#ifdef HAVE_BATS
	unsigned long bat_max = 0x10000000;
	unsigned long align;
	unsigned long ram = total_lowmem;
	int is601 = 0;
	
	/* 601s have smaller BATs */
	if (PVR_VER(mfspr(PVR)) == 1) {
		bat_max = 0x00800000;
		is601 = 1;
	}

	/* Make sure we don't map a block larger than the
	   smallest alignment of the physical address. */
	/* alignment of ram_phys_base */
	align = ~(ram_phys_base-1) & ram_phys_base;
	/* set BAT block size to MIN(max_size, align) */
	if (align && align < bat_max)
		bat_max = align;

	/* Calculate BAT values */	
	__bat2 = 1UL << __ilog2(ram);
	if (__bat2 > bat_max)
		__bat2 = bat_max;
	ram -= __bat2;
	if (ram) {
		__bat3 = 1UL << __ilog2(ram);
		if (__bat3 > bat_max)
			__bat3 = bat_max;
		ram -= __bat3;
	}

	printk(KERN_INFO "Memory BAT mapping: BAT2=%ldMb, BAT3=%ldMb, residual: %ldMb\n",
		__bat2 >> 20, __bat3 >> 20, ram >> 20);

	/* On SMP, we limit the lowmem to the area mapped with BATs.
	 * We also assume nobody will do SMP with 601s
	 */
#ifdef CONFIG_SMP
	if (!is601)
		max_low_mem = __bat2 + __bat3;
#endif /* CONFIG_SMP */

#endif /* HAVE_BATS */
	if (total_lowmem > max_low_mem) {
		total_lowmem = max_low_mem;
#ifndef CONFIG_HIGHMEM
		printk(KERN_INFO "Warning, memory limited to %ld Mb, use CONFIG_HIGHMEM"
			" to reach %ld Mb\n", max_low_mem >> 20, total_lowmem >> 20);
		total_memory = total_lowmem;
#endif /* CONFIG_HIGHMEM */
	}
Пример #4
0
static void __init
mpc86xx_setup_pcie(struct pci_controller *hose, u32 pcie_offset, u32 lower_mem,
		   u32 upper_mem, u32 io_base)
{
	volatile struct ccsr_pcie *pcie;
	u16 cmd;
	bd_t *binfo = (bd_t *) __res;

	pcie = ioremap(binfo->bi_immr_base + pcie_offset,
		MPC86xx_PCIE_SIZE);

	early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
	cmd |=  PCI_COMMAND_SERR   | PCI_COMMAND_MASTER |
		PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
	early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
	early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);

	/* Disable all windows (except pcieowar0 since its ignored) */
	pcie->pcieowar1 = 0;
	pcie->pcieowar2 = 0;
	pcie->pcieowar3 = 0;
	pcie->pcieowar4 = 0;
	pcie->pcieiwar1 = 0;
	pcie->pcieiwar1 = 0;
	pcie->pcieiwar2 = 0;
	pcie->pcieiwar3 = 0;

	/* Setup Phys:PCIE 1:1 outbound mem window @ MPC86XX_PCIEn_LOWER_MEM */
	pcie->pcieotar1 = (lower_mem >> 12) & 0x000fffff;
	pcie->pcieotear1 = 0x00000000;
	pcie->pcieowbar1 = (lower_mem >> 12) & 0x000fffff;
	/* Enable, Mem R/W */
	pcie->pcieowar1 = 0x80044000 |
		(__ilog2(upper_mem - lower_mem + 1) - 1);

	/* Setup outbound IO windows @ MPC86XX_PCIEn_IO_BASE */
	pcie->pcieotar2 = (MPC86XX_PCIE_LOWER_IO >> 12) & 0x000fffff;
	pcie->pcieotear2 = 0x00000000;
	pcie->pcieowbar2 = (io_base >> 12) & 0x000fffff;
	/* Enable, IO R/W */
	pcie->pcieowar2 = 0x80088000 | (__ilog2(MPC86XX_PCIE_IO_SIZE) - 1);

	/* Setup 2G inbound Memory Window @ 0 */
	pcie->pcieitar1 = 0x00000000;
	pcie->pcieiwbar1 = 0x00000000;
	/* Enable, Prefetch, Local Mem, Snoop R/W, 2G */
	pcie->pcieiwar1 = 0xa0f5501e;
}
unsigned int mv64x60_get_irq(void)
{
	u32 cause;
	int level1;
	irq_hw_number_t hwirq;
	int virq = 0;

	cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
	if (cause & MV64X60_SELECT_CAUSE_HIGH) {
		cause &= mv64x60_cached_high_mask;
		level1 = MV64x60_LEVEL1_HIGH;
		if (cause & MV64X60_HIGH_GPP_GROUPS) {
			cause = in_le32(mv64x60_gpp_reg_base +
					MV64x60_GPP_INTR_CAUSE);
			cause &= mv64x60_cached_gpp_mask;
			level1 = MV64x60_LEVEL1_GPP;
		}
	} else {
		cause &= mv64x60_cached_low_mask;
		level1 = MV64x60_LEVEL1_LOW;
	}
	if (cause) {
		hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
		virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
	}

	return virq;
}
Пример #6
0
/* atmu setup for fsl pci/pcie controller */
void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
{
	struct ccsr_pci __iomem *pci;
	int i;

	pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
		    (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1);
	pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1);

	/* Disable all windows (except powar0 since its ignored) */
	for(i = 1; i < 5; i++)
		out_be32(&pci->pow[i].powar, 0);
	for(i = 0; i < 3; i++)
		out_be32(&pci->piw[i].piwar, 0);

	/* Setup outbound MEM window */
	for(i = 0; i < 3; i++)
		if (hose->mem_resources[i].flags & IORESOURCE_MEM){
			resource_size_t pci_addr_start =
				 hose->mem_resources[i].start -
				 hose->pci_mem_offset;
			pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
				(u64)hose->mem_resources[i].start,
				(u64)hose->mem_resources[i].end
				  - (u64)hose->mem_resources[i].start + 1);
			out_be32(&pci->pow[i+1].potar, (pci_addr_start >> 12));
			out_be32(&pci->pow[i+1].potear, 0);
			out_be32(&pci->pow[i+1].powbar,
				(hose->mem_resources[i].start >> 12));
			/* Enable, Mem R/W */
			out_be32(&pci->pow[i+1].powar, 0x80044000
				| (__ilog2(hose->mem_resources[i].end
				- hose->mem_resources[i].start + 1) - 1));
		}
Пример #7
0
/*************************************************************************
 *  fixed sdram init -- doesn't use serial presence detect.
 ************************************************************************/
int fixed_sdram(void)
{
	immap_t *im = (immap_t *) CFG_IMMR;
	u32 msize = CFG_DDR_SIZE * 1024 * 1024;
	u32 msize_log2 = __ilog2(msize);

	im->sysconf.ddrlaw[0].bar = CFG_DDR_SDRAM_BASE >> 12;
	im->sysconf.ddrlaw[0].ar = LBLAWAR_EN | (msize_log2 - 1);

	im->sysconf.ddrcdr = CFG_DDRCDR_VALUE;
	udelay(50000);

	im->ddr.sdram_clk_cntl = CFG_DDR_SDRAM_CLK_CNTL;
	udelay(1000);

	im->ddr.csbnds[0].csbnds = CFG_DDR_CS0_BNDS;
	im->ddr.cs_config[0] = CFG_DDR_CS0_CONFIG;
	udelay(1000);

	im->ddr.timing_cfg_0 = CFG_DDR_TIMING_0;
	im->ddr.timing_cfg_1 = CFG_DDR_TIMING_1;
	im->ddr.timing_cfg_2 = CFG_DDR_TIMING_2;
	im->ddr.timing_cfg_3 = CFG_DDR_TIMING_3;
	im->ddr.sdram_cfg = CFG_DDR_SDRAM_CFG;
	im->ddr.sdram_cfg2 = CFG_DDR_SDRAM_CFG2;
	im->ddr.sdram_mode = CFG_DDR_MODE;
	im->ddr.sdram_mode2 = CFG_DDR_MODE2;
	im->ddr.sdram_interval = CFG_DDR_INTERVAL;
	sync();
	udelay(1000);

	im->ddr.sdram_cfg |= SDRAM_CFG_MEM_EN;
	udelay(2000);
	return CFG_DDR_SIZE;
}
Пример #8
0
static int sun4i_spi_set_speed(struct udevice *dev, uint speed)
{
	struct sun4i_spi_platdata *plat = dev_get_platdata(dev);
	struct sun4i_spi_priv *priv = dev_get_priv(dev);
	unsigned int div;
	u32 reg;

	if (speed > plat->max_hz)
		speed = plat->max_hz;

	if (speed < SUN4I_SPI_MIN_RATE)
		speed = SUN4I_SPI_MIN_RATE;
	/*
	 * Setup clock divider.
	 *
	 * We have two choices there. Either we can use the clock
	 * divide rate 1, which is calculated thanks to this formula:
	 * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
	 * Or we can use CDR2, which is calculated with the formula:
	 * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
	 * Whether we use the former or the latter is set through the
	 * DRS bit.
	 *
	 * First try CDR2, and if we can't reach the expected
	 * frequency, fall back to CDR1.
	 */

	div = SUN4I_SPI_MAX_RATE / (2 * speed);
	reg = readl(&priv->regs->cctl);

	if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
		if (div > 0)
			div--;

		reg &= ~(SUN4I_CLK_CTL_CDR2_MASK | SUN4I_CLK_CTL_DRS);
		reg |= SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
	} else {
		div = __ilog2(SUN4I_SPI_MAX_RATE) - __ilog2(speed);
		reg &= ~((SUN4I_CLK_CTL_CDR1_MASK << 8) | SUN4I_CLK_CTL_DRS);
		reg |= SUN4I_CLK_CTL_CDR1(div);
	}

	priv->freq = speed;
	writel(reg, &priv->regs->cctl);

	return 0;
}
Пример #9
0
/* Chip Select Configuration (CSn_CONFIG) */
static void set_csn_config(int i, fsl_ddr_cfg_regs_t *ddr,
			       const memctl_options_t *popts,
			       const dimm_params_t *dimm_params)
{
	unsigned int cs_n_en = 0; /* Chip Select enable */
	unsigned int intlv_en = 0; /* Memory controller interleave enable */
	unsigned int intlv_ctl = 0; /* Interleaving control */
	unsigned int ap_n_en = 0; /* Chip select n auto-precharge enable */
	unsigned int odt_rd_cfg = 0; /* ODT for reads configuration */
	unsigned int odt_wr_cfg = 0; /* ODT for writes configuration */
	unsigned int ba_bits_cs_n = 0; /* Num of bank bits for SDRAM on CSn */
	unsigned int row_bits_cs_n = 0; /* Num of row bits for SDRAM on CSn */
	unsigned int col_bits_cs_n = 0; /* Num of ocl bits for SDRAM on CSn */

	/* Compute CS_CONFIG only for existing ranks of each DIMM.  */
	if ((((i&1) == 0)
	    && (dimm_params[i/2].n_ranks == 1))
	    || (dimm_params[i/2].n_ranks == 2)) {
		unsigned int n_banks_per_sdram_device;
		cs_n_en = 1;
		if (i == 0) {
			/* These fields only available in CS0_CONFIG */
			intlv_en = popts->memctl_interleaving;
			intlv_ctl = popts->memctl_interleaving_mode;
		}
		ap_n_en = popts->cs_local_opts[i].auto_precharge;
		odt_rd_cfg = popts->cs_local_opts[i].odt_rd_cfg;
		odt_wr_cfg = popts->cs_local_opts[i].odt_wr_cfg;
		n_banks_per_sdram_device
			= dimm_params[i/2].n_banks_per_sdram_device;
		ba_bits_cs_n = __ilog2(n_banks_per_sdram_device) - 2;
		row_bits_cs_n = dimm_params[i/2].n_row_addr - 12;
		col_bits_cs_n = dimm_params[i/2].n_col_addr - 8;
	}

	/* FIXME: intlv_en, intlv_ctl only on CS0_CONFIG */
	if (i != 0) {
		intlv_en = 0;
		intlv_ctl = 0;
	}

	ddr->cs[i].config = (0
		| ((cs_n_en & 0x1) << 31)
		| ((intlv_en & 0x3) << 29)
		| ((intlv_en & 0xf) << 24)
		| ((ap_n_en & 0x1) << 23)

		/* XXX: some implementation only have 1 bit starting at left */
		| ((odt_rd_cfg & 0x7) << 20)

		/* XXX: Some implementation only have 1 bit starting at left */
		| ((odt_wr_cfg & 0x7) << 16)

		| ((ba_bits_cs_n & 0x3) << 14)
		| ((row_bits_cs_n & 0x7) << 8)
		| ((col_bits_cs_n & 0x7) << 0)
		);
}
Пример #10
0
static void __init
mpc85xx_setup_pci2(struct pci_controller *hose)
{
	volatile struct ccsr_pci *pci;
	unsigned short temps;
	bd_t *binfo = (bd_t *) __res;

	pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET,
		    MPC85xx_PCI2_SIZE);

	early_read_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, &temps);
	temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
	early_write_config_word(hose, hose->bus_offset, 0, PCI_COMMAND, temps);
	early_write_config_byte(hose, hose->bus_offset, 0, PCI_LATENCY_TIMER, 0x80);

	/* Disable all windows (except powar0 since its ignored) */
	pci->powar1 = 0;
	pci->powar2 = 0;
	pci->powar3 = 0;
	pci->powar4 = 0;
	pci->piwar1 = 0;
	pci->piwar2 = 0;
	pci->piwar3 = 0;

	/* Setup Phys:PCI 1:1 outbound mem window @ MPC85XX_PCI2_LOWER_MEM */
	pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
	pci->potear1 = 0x00000000;
	pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff;
	/* Enable, Mem R/W */
	pci->powar1 = 0x80044000 |
	   (__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1);

	/* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */
	pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;
	pci->potear2 = 0x00000000;
	pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
	/* Enable, IO R/W */
	pci->powar2 = 0x80088000 | (__ilog2(MPC85XX_PCI2_IO_SIZE) - 1);

	/* Setup 2G inbound Memory Window @ 0 */
	pci->pitar1 = 0x00000000;
	pci->piwbar1 = 0x00000000;
	pci->piwar1 = 0xa0f5501e;	/* Enable, Prefetch, Local
					   Mem, Snoop R/W, 2G */
}
Пример #11
0
const char * step_to_string(unsigned int step) {

	unsigned int s = __ilog2(step);

	if ((1 << s) != step)
		return step_string_tbl[7];

	return step_string_tbl[s];
}
Пример #12
0
/*
 * gt64260_get_irq()
 *
 *  This function returns the lowest interrupt number of all interrupts that
 *  are currently asserted.
 *
 * Input Variable(s):
 *  struct pt_regs*	not used
 *
 * Output Variable(s):
 *  None.
 *
 * Returns:
 *  int	<interrupt number> or -2 (bogus interrupt)
 */
int
gt64260_get_irq(struct pt_regs *regs)
{
	int irq;
	int irq_gpp;

	irq = mv64x60_read(&bh, GT64260_IC_MAIN_CAUSE_LO);
	irq = __ilog2((irq & 0x3dfffffe) & ppc_cached_irq_mask[0]);

	if (irq == -1) {
		irq = mv64x60_read(&bh, GT64260_IC_MAIN_CAUSE_HI);
		irq = __ilog2((irq & 0x0f000db7) & ppc_cached_irq_mask[1]);

		if (irq == -1)
			irq = -2; /* bogus interrupt, should never happen */
		else {
			if (irq >= 24) {
				irq_gpp = mv64x60_read(&bh,
					MV64x60_GPP_INTR_CAUSE);
				irq_gpp = __ilog2(irq_gpp &
					ppc_cached_irq_mask[2]);

				if (irq_gpp == -1)
					irq = -2;
				else {
					irq = irq_gpp + 64;
					mv64x60_write(&bh,
						MV64x60_GPP_INTR_CAUSE,
						~(1 << (irq - 64)));
				}
			} else
				irq += 32;
		}
	}

	(void)mv64x60_read(&bh, MV64x60_GPP_INTR_CAUSE);

	if (irq < 0)
		return (irq);
	else
		return (gt64260_irq_base + irq);
}
Пример #13
0
void __init ps3_hpte_init(unsigned long htab_size)
{
	ppc_md.hpte_invalidate = ps3_hpte_invalidate;
	ppc_md.hpte_updatepp = ps3_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
	ppc_md.hpte_insert = ps3_hpte_insert;
	ppc_md.hpte_remove = ps3_hpte_remove;
	ppc_md.hpte_clear_all = ps3_hpte_clear;

	ppc64_pft_size = __ilog2(htab_size);
}
Пример #14
0
phys_size_t initdram(int board_type)
{
    u32 msize;

    volatile immap_t *immr = (immap_t *)CONFIG_SYS_IMMR;
    volatile clk83xx_t *clk = (clk83xx_t *)&immr->clk;

    /* Enable PCI_CLK[0:1] */
    clk->occr |= 0xc0000000;
    udelay(2000);

#if defined(CONFIG_SPD_EEPROM)
    msize = spd_sdram();
#else
    immap_t *im = (immap_t *) CONFIG_SYS_IMMR;
    u32 msize_log2;

    msize = CONFIG_SYS_DDR_SIZE;
    msize_log2 = __ilog2(msize);

    im->sysconf.ddrlaw[0].bar = CONFIG_SYS_DDR_SDRAM_BASE & 0xfffff000;
    im->sysconf.ddrlaw[0].ar = LBLAWAR_EN | (msize_log2 - 1);

    im->sysconf.ddrcdr = CONFIG_SYS_DDRCDR_VALUE;
    udelay(50000);

    im->ddr.sdram_clk_cntl = CONFIG_SYS_DDR_SDRAM_CLK_CNTL;
    udelay(1000);

    im->ddr.csbnds[0].csbnds = CONFIG_SYS_DDR_CS0_BNDS;
    im->ddr.cs_config[0] = CONFIG_SYS_DDR_CS0_CONFIG;
    udelay(1000);

    im->ddr.timing_cfg_0 = CONFIG_SYS_DDR_TIMING_0;
    im->ddr.timing_cfg_1 = CONFIG_SYS_DDR_TIMING_1;
    im->ddr.timing_cfg_2 = CONFIG_SYS_DDR_TIMING_2;
    im->ddr.timing_cfg_3 = CONFIG_SYS_DDR_TIMING_3;
    im->ddr.sdram_cfg = CONFIG_SYS_DDR_SDRAM_CFG;
    im->ddr.sdram_cfg2 = CONFIG_SYS_DDR_SDRAM_CFG2;
    im->ddr.sdram_mode = CONFIG_SYS_DDR_MODE;
    im->ddr.sdram_mode2 = CONFIG_SYS_DDR_MODE2;
    im->ddr.sdram_interval = CONFIG_SYS_DDR_INTERVAL;
    __asm__ __volatile__("sync");
    udelay(1000);

    im->ddr.sdram_cfg |= SDRAM_CFG_MEM_EN;
    udelay(2000);
#endif
    setup_serdes();

    return msize << 20;
}
Пример #15
0
unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
			  phys_addr_t phys)
{
	unsigned int camsize = __ilog2(ram);
	unsigned int align = __ffs(virt | phys);
	unsigned long max_cam;

	if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
		/* Convert (4^max) kB to (2^max) bytes */
		max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
		camsize &= ~1U;
		align &= ~1U;
	} else {
Пример #16
0
const char * step_to_string(unsigned int step) {

	unsigned int s = __ilog2(step);

	if ((1 << s) != step)
		return step_string_tbl[7];

	if (s >= ARRAY_SIZE(step_string_tbl)) {
		printf("Error for the step in %s\n", __func__);
		s = 0;
	}

	return step_string_tbl[s];
}
Пример #17
0
/* Fixed sdram init -- doesn't use serial presence detect.
 *
 * This is useful for faster booting in configs where the RAM is unlikely
 * to be changed, or for things like NAND booting where space is tight.
 */
static long fixed_sdram(void)
{
	volatile immap_t *im = (volatile immap_t *)CFG_IMMR;
	u32 msize = CFG_DDR_SIZE * 1024 * 1024;
	u32 msize_log2 = __ilog2(msize);

	im->sysconf.ddrlaw[0].bar = CFG_DDR_SDRAM_BASE >> 12;
	im->sysconf.ddrlaw[0].ar = LBLAWAR_EN | (msize_log2 - 1);
	im->sysconf.ddrcdr = CFG_DDRCDR_VALUE;

	/*
	 * Erratum DDR3 requires a 50ms delay after clearing DDRCDR[DDR_cfg],
	 * or the DDR2 controller may fail to initialize correctly.
	 */
	udelay(50000);

	im->ddr.csbnds[0].csbnds = (msize - 1) >> 24;
	im->ddr.cs_config[0] = CFG_DDR_CONFIG;

	/* Currently we use only one CS, so disable the other bank. */
	im->ddr.cs_config[1] = 0;

	im->ddr.sdram_clk_cntl = CFG_DDR_CLK_CNTL;
	im->ddr.timing_cfg_3 = CFG_DDR_TIMING_3;
	im->ddr.timing_cfg_1 = CFG_DDR_TIMING_1;
	im->ddr.timing_cfg_2 = CFG_DDR_TIMING_2;
	im->ddr.timing_cfg_0 = CFG_DDR_TIMING_0;

#ifndef CFG_8313ERDB_BROKEN_PMC
	if (im->pmc.pmccr1 & PMCCR1_POWER_OFF)
		im->ddr.sdram_cfg = CFG_SDRAM_CFG | SDRAM_CFG_BI;
	else
#endif
		im->ddr.sdram_cfg = CFG_SDRAM_CFG;

	im->ddr.sdram_cfg2 = CFG_SDRAM_CFG2;
	im->ddr.sdram_mode = CFG_DDR_MODE;
	im->ddr.sdram_mode2 = CFG_DDR_MODE_2;

	im->ddr.sdram_interval = CFG_DDR_INTERVAL;
	sync();

	/* enable DDR controller */
	im->ddr.sdram_cfg |= SDRAM_CFG_MEM_EN;

	return msize;
}
Пример #18
0
/* Depending on whether this is called from iSeries or pSeries setup
 * code, the location of the msChunks struct may or may not have
 * to be reloc'd, so we force the caller to do that for us by passing
 * in a pointer to the structure.
 */
unsigned long
msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size)
{
	unsigned long offset = reloc_offset();
	struct msChunks *_msChunks = PTRRELOC(&msChunks);

	_msChunks->num_chunks  = num_chunks;
	_msChunks->chunk_size  = chunk_size;
	_msChunks->chunk_shift = __ilog2(chunk_size);
	_msChunks->chunk_mask  = (1UL<<_msChunks->chunk_shift)-1;

	mem = _ALIGN(mem, sizeof(msChunks_entry));
	_msChunks->abs = (msChunks_entry *)(mem + offset);
	mem += num_chunks * sizeof(msChunks_entry);

	return mem;
}
Пример #19
0
static void iommu_buses_init(void)
{
	struct pci_controller* phb;
	struct device_node *dn, *first_dn;
	int num_slots, num_slots_ilog2;
	int first_phb = 1;
	unsigned long tcetable_ilog2;

	/*
	 * We default to a TCE table that maps 2GB (4MB table, 22 bits),
	 * however some machines have a 3GB IO hole and for these we
	 * create a table that maps 1GB (2MB table, 21 bits)
	 */
	if (io_hole_start < 0x80000000UL)
		tcetable_ilog2 = 21;
	else
		tcetable_ilog2 = 22;

	/* XXX Should we be using pci_root_buses instead?  -ojn 
	 */

	for (phb=hose_head; phb; phb=phb->next) {
		first_dn = ((struct device_node *)phb->arch_data)->child;

		/* Carve 2GB into the largest dma_window_size possible */
		for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling)
			num_slots++;
		num_slots_ilog2 = __ilog2(num_slots);

		if ((1<<num_slots_ilog2) != num_slots)
			num_slots_ilog2++;

		phb->dma_window_size = 1 << (tcetable_ilog2 - num_slots_ilog2);

		/* Reserve 16MB of DMA space on the first PHB.
		 * We should probably be more careful and use firmware props.
		 * In reality this space is remapped, not lost.  But we don't
		 * want to get that smart to handle it -- too much work.
		 */
		phb->dma_window_base_cur = first_phb ? (1 << 12) : 0;
		first_phb = 0;

		for (dn = first_dn; dn != NULL; dn = dn->sibling)
			iommu_devnode_init(dn);
	}
}
Пример #20
0
unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
			  phys_addr_t phys)
{
	unsigned int camsize = __ilog2(ram) & ~1U;
	unsigned int align = __ffs(virt | phys) & ~1U;
	unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf;

	/* Convert (4^max) kB to (2^max) bytes */
	max_cam = max_cam * 2 + 10;

	if (camsize > align)
		camsize = align;
	if (camsize > max_cam)
		camsize = max_cam;

	return 1UL << camsize;
}
Пример #21
0
int set_last_law(phys_addr_t addr, enum law_size sz, enum law_trgt_if id)
{
	u32 idx;

	/* we have no LAWs free */
	if (gd->used_laws == -1)
		return -1;

	/* grab the last free law */
	idx = __ilog2(~(gd->used_laws));

	if (idx >= FSL_HW_NUM_LAWS)
		return -1;

	set_law(idx, addr, sz, id);

	return idx;
}
Пример #22
0
static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
			    u32 bsize, u32 sets)
{
	info->size = size;
	info->sets = sets;
	info->line_size = lsize;
	info->block_size = bsize;
	info->log_block_size = __ilog2(bsize);
	if (bsize)
		info->blocks_per_page = PAGE_SIZE / bsize;
	else
		info->blocks_per_page = 0;

	if (sets == 0)
		info->assoc = 0xffff;
	else
		info->assoc = size / (sets * lsize);
}
Пример #23
0
static void stm32_sdmmc2_start_data(struct stm32_sdmmc2_priv *priv,
				    struct mmc_data *data,
				    struct stm32_sdmmc2_ctx *ctx)
{
	u32 data_ctrl, idmabase0;

	/* Configure the SDMMC DPSM (Data Path State Machine) */
	data_ctrl = (__ilog2(data->blocksize) <<
		     SDMMC_DCTRL_DBLOCKSIZE_SHIFT) &
		    SDMMC_DCTRL_DBLOCKSIZE;

	if (data->flags & MMC_DATA_READ) {
		data_ctrl |= SDMMC_DCTRL_DTDIR;
		idmabase0 = (u32)data->dest;
	} else {
		idmabase0 = (u32)data->src;
	}

	/* Set the SDMMC Data TimeOut value */
	writel(SDMMC_CMD_TIMEOUT, priv->base + SDMMC_DTIMER);

	/* Set the SDMMC DataLength value */
	writel(ctx->data_length, priv->base + SDMMC_DLEN);

	/* Write to SDMMC DCTRL */
	writel(data_ctrl, priv->base + SDMMC_DCTRL);

	/* Cache align */
	ctx->cache_start = rounddown(idmabase0, ARCH_DMA_MINALIGN);
	ctx->cache_end = roundup(idmabase0 + ctx->data_length,
				 ARCH_DMA_MINALIGN);

	/*
	 * Flush data cache before DMA start (clean and invalidate)
	 * Clean also needed for read
	 * Avoid issue on buffer not cached-aligned
	 */
	flush_dcache_range(ctx->cache_start, ctx->cache_end);

	/* Enable internal DMA */
	writel(idmabase0, priv->base + SDMMC_IDMABASE0);
	writel(SDMMC_IDMACTRL_IDMAEN, priv->base + SDMMC_IDMACTRL);
}
Пример #24
0
/*
 * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
 * in particular size must be a power of 4 between 4k and the max supported by
 * an implementation; max may further be limited by what can be represented in
 * an unsigned long (for example, 32-bit implementations cannot support a 4GB
 * size).
 */
void settlbcam(int index, unsigned long virt, phys_addr_t phys,
		unsigned long size, unsigned long flags, unsigned int pid)
{
	unsigned int tsize;

	tsize = __ilog2(size) - 10;

#ifdef CONFIG_SMP
	if ((flags & _PAGE_NO_CACHE) == 0)
		flags |= _PAGE_COHERENT;
#endif

	TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
	TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
	TLBCAM[index].MAS2 = virt & PAGE_MASK;

	TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
	TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
	TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
	TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
	TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;

	TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR;
	TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
	if (mmu_has_feature(MMU_FTR_BIG_PHYS))
		TLBCAM[index].MAS7 = (u64)phys >> 32;

	/* Below is unlikely -- only for large user pages or similar */
	if (pte_user(flags)) {
	   TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
	   TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
	}

	tlbcam_addrs[index].start = virt;
	tlbcam_addrs[index].limit = virt + size - 1;
	tlbcam_addrs[index].phys = phys;

	loadcam_entry(index);
}
Пример #25
0
/*************************************************************************
 *  fixed sdram init -- doesn't use serial presence detect.
 ************************************************************************/
int fixed_sdram(void)
{
	volatile immap_t *im = (immap_t *) CONFIG_SYS_IMMR;
	u32 msize = CONFIG_SYS_DDR_SIZE * 1024 * 1024;
	u32 msize_log2 = __ilog2(msize);

	im->sysconf.ddrlaw[0].bar = CONFIG_SYS_DDR_SDRAM_BASE & 0xfffff000;
	im->sysconf.ddrlaw[0].ar = LBLAWAR_EN | (msize_log2 - 1);

#if (CONFIG_SYS_DDR_SIZE != 512)
#warning Currenly any ddr size other than 512 is not supported
#endif
	im->sysconf.ddrcdr = CONFIG_SYS_DDRCDR_VALUE;
	udelay(50000);

	im->ddr.sdram_clk_cntl = CONFIG_SYS_DDR_SDRAM_CLK_CNTL;
	udelay(1000);

	im->ddr.csbnds[0].csbnds = CONFIG_SYS_DDR_CS0_BNDS;
	im->ddr.cs_config[0] = CONFIG_SYS_DDR_CS0_CONFIG;
	udelay(1000);

	im->ddr.timing_cfg_0 = CONFIG_SYS_DDR_TIMING_0;
	im->ddr.timing_cfg_1 = CONFIG_SYS_DDR_TIMING_1;
	im->ddr.timing_cfg_2 = CONFIG_SYS_DDR_TIMING_2;
	im->ddr.timing_cfg_3 = CONFIG_SYS_DDR_TIMING_3;
	im->ddr.sdram_cfg = CONFIG_SYS_DDR_SDRAM_CFG;
	im->ddr.sdram_cfg2 = CONFIG_SYS_DDR_SDRAM_CFG2;
	im->ddr.sdram_mode = CONFIG_SYS_DDR_MODE;
	im->ddr.sdram_mode2 = CONFIG_SYS_DDR_MODE2;
	im->ddr.sdram_interval = CONFIG_SYS_DDR_INTERVAL;
	__asm__ __volatile__("sync");
	udelay(1000);

	im->ddr.sdram_cfg |= SDRAM_CFG_MEM_EN;
	udelay(2000);
	return CONFIG_SYS_DDR_SIZE;
}
Пример #26
0
static unsigned long __init htab_get_table_size(void)
{
	unsigned long mem_size, rnd_mem_size, pteg_count;

	/* If hash size isn't already provided by the platform, we try to
	 * retrieve it from the device-tree. If it's not there neither, we
	 * calculate it now based on the total RAM size
	 */
	if (ppc64_pft_size == 0)
		of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
	if (ppc64_pft_size)
		return 1UL << ppc64_pft_size;

	/* round mem_size up to next power of 2 */
	mem_size = lmb_phys_mem_size();
	rnd_mem_size = 1UL << __ilog2(mem_size);
	if (rnd_mem_size < mem_size)
		rnd_mem_size <<= 1;

	/* # pages / 2 */
	pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);

	return pteg_count << 7;
}
Пример #27
0
/*
 * Initialize some remaining members of the ppc64_caches and systemcfg
 * structures
 * (at least until we get rid of them completely). This is mostly some
 * cache informations about the CPU that will be used by cache flush
 * routines and/or provided to userland
 */
static void __init initialize_cache_info(void)
{
	struct device_node *np;
	unsigned long num_cpus = 0;

	DBG(" -> initialize_cache_info()\n");

	for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
		num_cpus += 1;

		/* We're assuming *all* of the CPUs have the same
		 * d-cache and i-cache sizes... -Peter
		 */

		if ( num_cpus == 1 ) {
			const u32 *sizep, *lsizep;
			u32 size, lsize;

			size = 0;
			lsize = cur_cpu_spec->dcache_bsize;
			sizep = of_get_property(np, "d-cache-size", NULL);
			if (sizep != NULL)
				size = *sizep;
			lsizep = of_get_property(np, "d-cache-block-size", NULL);
			/* fallback if block size missing */
			if (lsizep == NULL)
				lsizep = of_get_property(np, "d-cache-line-size", NULL);
			if (lsizep != NULL)
				lsize = *lsizep;
			if (sizep == 0 || lsizep == 0)
				DBG("Argh, can't find dcache properties ! "
				    "sizep: %p, lsizep: %p\n", sizep, lsizep);

			ppc64_caches.dsize = size;
			ppc64_caches.dline_size = lsize;
			ppc64_caches.log_dline_size = __ilog2(lsize);
			ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;

			size = 0;
			lsize = cur_cpu_spec->icache_bsize;
			sizep = of_get_property(np, "i-cache-size", NULL);
			if (sizep != NULL)
				size = *sizep;
			lsizep = of_get_property(np, "i-cache-block-size", NULL);
			if (lsizep == NULL)
				lsizep = of_get_property(np, "i-cache-line-size", NULL);
			if (lsizep != NULL)
				lsize = *lsizep;
			if (sizep == 0 || lsizep == 0)
				DBG("Argh, can't find icache properties ! "
				    "sizep: %p, lsizep: %p\n", sizep, lsizep);

			ppc64_caches.isize = size;
			ppc64_caches.iline_size = lsize;
			ppc64_caches.log_iline_size = __ilog2(lsize);
			ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
		}
	}

	DBG(" <- initialize_cache_info()\n");
}
Пример #28
0
static void pci_init_bus(int bus, struct pci_region *reg)
{
    volatile immap_t *immr = (volatile immap_t *)CONFIG_SYS_IMMR;
    volatile pot83xx_t *pot = immr->ios.pot;
    volatile pcictrl83xx_t *pci_ctrl = &immr->pci_ctrl[bus];
    struct pci_controller *hose = &pci_hose[bus];
    u32 dev;
    u16 reg16;
    int i;

    if (bus == 1)
        pot += 3;

    /* Setup outbound translation windows */
    for (i = 0; i < 3; i++, reg++, pot++) {
        if (reg->size == 0)
            break;

        hose->regions[i] = *reg;
        hose->region_count++;

        pot->potar = reg->bus_start >> 12;
        pot->pobar = reg->phys_start >> 12;
        pot->pocmr = ~(reg->size - 1) >> 12;

        if (reg->flags & PCI_REGION_IO)
            pot->pocmr |= POCMR_IO;
#ifdef CONFIG_83XX_PCI_STREAMING
        else if (reg->flags & PCI_REGION_PREFETCH)
            pot->pocmr |= POCMR_SE;
#endif

        if (bus == 1)
            pot->pocmr |= POCMR_DST;

        pot->pocmr |= POCMR_EN;
    }

    /* Point inbound translation at RAM */
    pci_ctrl->pitar1 = 0;
    pci_ctrl->pibar1 = 0;
    pci_ctrl->piebar1 = 0;
    pci_ctrl->piwar1 = PIWAR_EN | PIWAR_PF | PIWAR_RTT_SNOOP |
                       PIWAR_WTT_SNOOP | (__ilog2(gd->ram_size - 1));

    i = hose->region_count++;
    hose->regions[i].bus_start = 0;
    hose->regions[i].phys_start = 0;
    hose->regions[i].size = gd->ram_size;
    hose->regions[i].flags = PCI_REGION_MEM | PCI_REGION_SYS_MEMORY;

    hose->first_busno = pci_last_busno() + 1;
    hose->last_busno = 0xff;

    pci_setup_indirect(hose, CONFIG_SYS_IMMR + 0x8300 + bus * 0x80,
                       CONFIG_SYS_IMMR + 0x8304 + bus * 0x80);

    pci_register_hose(hose);

    /*
     * Write to Command register
     */
    reg16 = 0xff;
    dev = PCI_BDF(hose->first_busno, 0, 0);
    pci_hose_read_config_word(hose, dev, PCI_COMMAND, &reg16);
    reg16 |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
    pci_hose_write_config_word(hose, dev, PCI_COMMAND, reg16);

    /*
     * Clear non-reserved bits in status register.
     */
    pci_hose_write_config_word(hose, dev, PCI_STATUS, 0xffff);
    pci_hose_write_config_byte(hose, dev, PCI_LATENCY_TIMER, 0x80);
    pci_hose_write_config_byte(hose, dev, PCI_CACHE_LINE_SIZE, 0x08);

#ifdef CONFIG_PCI_SCAN_SHOW
    printf("PCI:   Bus Dev VenId DevId Class Int\n");
#endif
#ifndef CONFIG_PCISLAVE
    /*
     * Hose scan.
     */
    hose->last_busno = pci_hose_scan(hose);
#endif
}
/*
 * fixed sdram init:
 * The board doesn't use memory modules that have serial presence
 * detect or similar mechanism for discovery of the DRAM settings
 */
long int fixed_sdram(ddr512x_config_t *mddrc_config,
			u32 *dram_init_seq, int seq_sz)
{
	volatile immap_t *im = (immap_t *)CONFIG_SYS_IMMR;
	u32 msize = CONFIG_SYS_MAX_RAM_SIZE;
	u32 msize_log2 = __ilog2(msize);
	u32 i;

	/* take default settings and init sequence if necessary */
	if (mddrc_config == NULL)
		mddrc_config = &default_mddrc_config;
	if (dram_init_seq == NULL) {
		dram_init_seq = default_init_seq;
		seq_sz = sizeof(default_init_seq)/sizeof(u32);
	}

	/* Initialize IO Control */
	out_be32(&im->io_ctrl.io_control_mem, CONFIG_SYS_IOCTRL_MUX_DDR);

	/* Initialize DDR Local Window */
	out_be32(&im->sysconf.ddrlaw.bar, CONFIG_SYS_DDR_BASE & 0xFFFFF000);
	out_be32(&im->sysconf.ddrlaw.ar, msize_log2 - 1);
	sync_law(&im->sysconf.ddrlaw.ar);

	/* DDR Enable */
	out_be32(&im->mddrc.ddr_sys_config, MDDRC_SYS_CFG_EN);

	/* Initialize DDR Priority Manager */
	out_be32(&im->mddrc.prioman_config1, CONFIG_SYS_MDDRCGRP_PM_CFG1);
	out_be32(&im->mddrc.prioman_config2, CONFIG_SYS_MDDRCGRP_PM_CFG2);
	out_be32(&im->mddrc.hiprio_config, CONFIG_SYS_MDDRCGRP_HIPRIO_CFG);
	out_be32(&im->mddrc.lut_table0_main_upper, CONFIG_SYS_MDDRCGRP_LUT0_MU);
	out_be32(&im->mddrc.lut_table0_main_lower, CONFIG_SYS_MDDRCGRP_LUT0_ML);
	out_be32(&im->mddrc.lut_table1_main_upper, CONFIG_SYS_MDDRCGRP_LUT1_MU);
	out_be32(&im->mddrc.lut_table1_main_lower, CONFIG_SYS_MDDRCGRP_LUT1_ML);
	out_be32(&im->mddrc.lut_table2_main_upper, CONFIG_SYS_MDDRCGRP_LUT2_MU);
	out_be32(&im->mddrc.lut_table2_main_lower, CONFIG_SYS_MDDRCGRP_LUT2_ML);
	out_be32(&im->mddrc.lut_table3_main_upper, CONFIG_SYS_MDDRCGRP_LUT3_MU);
	out_be32(&im->mddrc.lut_table3_main_lower, CONFIG_SYS_MDDRCGRP_LUT3_ML);
	out_be32(&im->mddrc.lut_table4_main_upper, CONFIG_SYS_MDDRCGRP_LUT4_MU);
	out_be32(&im->mddrc.lut_table4_main_lower, CONFIG_SYS_MDDRCGRP_LUT4_ML);
	out_be32(&im->mddrc.lut_table0_alternate_upper, CONFIG_SYS_MDDRCGRP_LUT0_AU);
	out_be32(&im->mddrc.lut_table0_alternate_lower, CONFIG_SYS_MDDRCGRP_LUT0_AL);
	out_be32(&im->mddrc.lut_table1_alternate_upper, CONFIG_SYS_MDDRCGRP_LUT1_AU);
	out_be32(&im->mddrc.lut_table1_alternate_lower, CONFIG_SYS_MDDRCGRP_LUT1_AL);
	out_be32(&im->mddrc.lut_table2_alternate_upper, CONFIG_SYS_MDDRCGRP_LUT2_AU);
	out_be32(&im->mddrc.lut_table2_alternate_lower, CONFIG_SYS_MDDRCGRP_LUT2_AL);
	out_be32(&im->mddrc.lut_table3_alternate_upper, CONFIG_SYS_MDDRCGRP_LUT3_AU);
	out_be32(&im->mddrc.lut_table3_alternate_lower, CONFIG_SYS_MDDRCGRP_LUT3_AL);
	out_be32(&im->mddrc.lut_table4_alternate_upper, CONFIG_SYS_MDDRCGRP_LUT4_AU);
	out_be32(&im->mddrc.lut_table4_alternate_lower, CONFIG_SYS_MDDRCGRP_LUT4_AL);

	/*
	 * Initialize MDDRC
	 *  put MDDRC in CMD mode and
	 *  set the max time between refreshes to 0 during init process
	 */
	out_be32(&im->mddrc.ddr_sys_config,
		mddrc_config->ddr_sys_config | MDDRC_SYS_CFG_CMD_MASK);
	out_be32(&im->mddrc.ddr_time_config0,
		mddrc_config->ddr_time_config0 & MDDRC_REFRESH_ZERO_MASK);
	out_be32(&im->mddrc.ddr_time_config1,
		mddrc_config->ddr_time_config1);
	out_be32(&im->mddrc.ddr_time_config2,
		mddrc_config->ddr_time_config2);

	/* Initialize DDR with either default or supplied init sequence */
	for (i = 0; i < seq_sz; i++)
		out_be32(&im->mddrc.ddr_command, dram_init_seq[i]);

	/* Start MDDRC */
	out_be32(&im->mddrc.ddr_time_config0, mddrc_config->ddr_time_config0);
	out_be32(&im->mddrc.ddr_sys_config, mddrc_config->ddr_sys_config);

	msize = get_ram_size(CONFIG_SYS_DDR_BASE, CONFIG_SYS_MAX_RAM_SIZE);
	/* Fix DDR Local Window for new size */
	out_be32(&im->sysconf.ddrlaw.ar, __ilog2(msize) - 1);
	sync_law(&im->sysconf.ddrlaw.ar);

	return msize;
}
Пример #30
0
/*
 * compute_lowest_common_dimm_parameters()
 *
 * Determine the worst-case DIMM timing parameters from the set of DIMMs
 * whose parameters have been computed into the array pointed to
 * by dimm_params.
 */
unsigned int
compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
				      common_timing_params_t *outpdimm,
				      unsigned int number_of_dimms)
{
	unsigned int i, j;

	unsigned int tCKmin_X_ps = 0;
	unsigned int tCKmax_ps = 0xFFFFFFFF;
	unsigned int tCKmax_max_ps = 0;
	unsigned int tRCD_ps = 0;
	unsigned int tRP_ps = 0;
	unsigned int tRAS_ps = 0;
	unsigned int tWR_ps = 0;
	unsigned int tWTR_ps = 0;
	unsigned int tRFC_ps = 0;
	unsigned int tRRD_ps = 0;
	unsigned int tRC_ps = 0;
	unsigned int refresh_rate_ps = 0;
	unsigned int tIS_ps = 0;
	unsigned int tIH_ps = 0;
	unsigned int tDS_ps = 0;
	unsigned int tDH_ps = 0;
	unsigned int tRTP_ps = 0;
	unsigned int tDQSQ_max_ps = 0;
	unsigned int tQHS_ps = 0;

	unsigned int temp1, temp2;
	unsigned int additive_latency = 0;
#if !defined(CONFIG_FSL_DDR3)
	const unsigned int mclk_ps = get_memory_clk_period_ps();
	unsigned int lowest_good_caslat;
	unsigned int not_ok;

	debug("using mclk_ps = %u\n", mclk_ps);
#endif

	temp1 = 0;
	for (i = 0; i < number_of_dimms; i++) {
		/*
		 * If there are no ranks on this DIMM,
		 * it probably doesn't exist, so skip it.
		 */
		if (dimm_params[i].n_ranks == 0) {
			temp1++;
			continue;
		}
		if (dimm_params[i].n_ranks == 4 && i != 0) {
			printf("Found Quad-rank DIMM in wrong bank, ignored."
				" Software may not run as expected.\n");
			temp1++;
			continue;
		}
		if (dimm_params[i].n_ranks == 4 && \
		  CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) {
			printf("Found Quad-rank DIMM, not able to support.");
			temp1++;
			continue;
		}

		/*
		 * Find minimum tCKmax_ps to find fastest slow speed,
		 * i.e., this is the slowest the whole system can go.
		 */
		tCKmax_ps = min(tCKmax_ps, dimm_params[i].tCKmax_ps);

		/* Either find maximum value to determine slowest
		 * speed, delay, time, period, etc */
		tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
		tCKmax_max_ps = max(tCKmax_max_ps, dimm_params[i].tCKmax_ps);
		tRCD_ps = max(tRCD_ps, dimm_params[i].tRCD_ps);
		tRP_ps = max(tRP_ps, dimm_params[i].tRP_ps);
		tRAS_ps = max(tRAS_ps, dimm_params[i].tRAS_ps);
		tWR_ps = max(tWR_ps, dimm_params[i].tWR_ps);
		tWTR_ps = max(tWTR_ps, dimm_params[i].tWTR_ps);
		tRFC_ps = max(tRFC_ps, dimm_params[i].tRFC_ps);
		tRRD_ps = max(tRRD_ps, dimm_params[i].tRRD_ps);
		tRC_ps = max(tRC_ps, dimm_params[i].tRC_ps);
		tIS_ps = max(tIS_ps, dimm_params[i].tIS_ps);
		tIH_ps = max(tIH_ps, dimm_params[i].tIH_ps);
		tDS_ps = max(tDS_ps, dimm_params[i].tDS_ps);
		tDH_ps = max(tDH_ps, dimm_params[i].tDH_ps);
		tRTP_ps = max(tRTP_ps, dimm_params[i].tRTP_ps);
		tQHS_ps = max(tQHS_ps, dimm_params[i].tQHS_ps);
		refresh_rate_ps = max(refresh_rate_ps,
				      dimm_params[i].refresh_rate_ps);

		/*
		 * Find maximum tDQSQ_max_ps to find slowest.
		 *
		 * FIXME: is finding the slowest value the correct
		 * strategy for this parameter?
		 */
		tDQSQ_max_ps = max(tDQSQ_max_ps, dimm_params[i].tDQSQ_max_ps);
	}

	outpdimm->ndimms_present = number_of_dimms - temp1;

	if (temp1 == number_of_dimms) {
		debug("no dimms this memory controller\n");
		return 0;
	}

	outpdimm->tCKmin_X_ps = tCKmin_X_ps;
	outpdimm->tCKmax_ps = tCKmax_ps;
	outpdimm->tCKmax_max_ps = tCKmax_max_ps;
	outpdimm->tRCD_ps = tRCD_ps;
	outpdimm->tRP_ps = tRP_ps;
	outpdimm->tRAS_ps = tRAS_ps;
	outpdimm->tWR_ps = tWR_ps;
	outpdimm->tWTR_ps = tWTR_ps;
	outpdimm->tRFC_ps = tRFC_ps;
	outpdimm->tRRD_ps = tRRD_ps;
	outpdimm->tRC_ps = tRC_ps;
	outpdimm->refresh_rate_ps = refresh_rate_ps;
	outpdimm->tIS_ps = tIS_ps;
	outpdimm->tIH_ps = tIH_ps;
	outpdimm->tDS_ps = tDS_ps;
	outpdimm->tDH_ps = tDH_ps;
	outpdimm->tRTP_ps = tRTP_ps;
	outpdimm->tDQSQ_max_ps = tDQSQ_max_ps;
	outpdimm->tQHS_ps = tQHS_ps;

	/* Determine common burst length for all DIMMs. */
	temp1 = 0xff;
	for (i = 0; i < number_of_dimms; i++) {
		if (dimm_params[i].n_ranks) {
			temp1 &= dimm_params[i].burst_lengths_bitmask;
		}
	}
	outpdimm->all_DIMMs_burst_lengths_bitmask = temp1;

	/* Determine if all DIMMs registered buffered. */
	temp1 = temp2 = 0;
	for (i = 0; i < number_of_dimms; i++) {
		if (dimm_params[i].n_ranks) {
			if (dimm_params[i].registered_dimm) {
				temp1 = 1;
				printf("Detected RDIMM %s\n",
					dimm_params[i].mpart);
			} else {
				temp2 = 1;
				printf("Detected UDIMM %s\n",
					dimm_params[i].mpart);
			}
		}
	}

	outpdimm->all_DIMMs_registered = 0;
	outpdimm->all_DIMMs_unbuffered = 0;
	if (temp1 && !temp2) {
		outpdimm->all_DIMMs_registered = 1;
	} else if (!temp1 && temp2) {
		outpdimm->all_DIMMs_unbuffered = 1;
	} else {
		printf("ERROR:  Mix of registered buffered and unbuffered "
				"DIMMs detected!\n");
	}

	temp1 = 0;
	if (outpdimm->all_DIMMs_registered)
		for (j = 0; j < 16; j++) {
			outpdimm->rcw[j] = dimm_params[0].rcw[j];
			for (i = 1; i < number_of_dimms; i++)
				if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) {
					temp1 = 1;
					break;
				}
		}

	if (temp1 != 0)
		printf("ERROR: Mix different RDIMM detected!\n");

#if defined(CONFIG_FSL_DDR3)
	if (compute_cas_latency_ddr3(dimm_params, outpdimm, number_of_dimms))
		return 1;
#else
	/*
	 * Compute a CAS latency suitable for all DIMMs
	 *
	 * Strategy for SPD-defined latencies: compute only
	 * CAS latency defined by all DIMMs.
	 */

	/*
	 * Step 1: find CAS latency common to all DIMMs using bitwise
	 * operation.
	 */
	temp1 = 0xFF;
	for (i = 0; i < number_of_dimms; i++) {
		if (dimm_params[i].n_ranks) {
			temp2 = 0;
			temp2 |= 1 << dimm_params[i].caslat_X;
			temp2 |= 1 << dimm_params[i].caslat_X_minus_1;
			temp2 |= 1 << dimm_params[i].caslat_X_minus_2;
			/*
			 * FIXME: If there was no entry for X-2 (X-1) in
			 * the SPD, then caslat_X_minus_2
			 * (caslat_X_minus_1) contains either 255 or
			 * 0xFFFFFFFF because that's what the glorious
			 * __ilog2 function returns for an input of 0.
			 * On 32-bit PowerPC, left shift counts with bit
			 * 26 set (that the value of 255 or 0xFFFFFFFF
			 * will have), cause the destination register to
			 * be 0.  That is why this works.
			 */
			temp1 &= temp2;
		}
	}

	/*
	 * Step 2: check each common CAS latency against tCK of each
	 * DIMM's SPD.
	 */
	lowest_good_caslat = 0;
	temp2 = 0;
	while (temp1) {
		not_ok = 0;
		temp2 =  __ilog2(temp1);
		debug("checking common caslat = %u\n", temp2);

		/* Check if this CAS latency will work on all DIMMs at tCK. */
		for (i = 0; i < number_of_dimms; i++) {
			if (!dimm_params[i].n_ranks) {
				continue;
			}
			if (dimm_params[i].caslat_X == temp2) {
				if (mclk_ps >= dimm_params[i].tCKmin_X_ps) {
					debug("CL = %u ok on DIMM %u at tCK=%u"
					    " ps with its tCKmin_X_ps of %u\n",
					       temp2, i, mclk_ps,
					       dimm_params[i].tCKmin_X_ps);
					continue;
				} else {
					not_ok++;
				}
			}

			if (dimm_params[i].caslat_X_minus_1 == temp2) {
				unsigned int tCKmin_X_minus_1_ps
					= dimm_params[i].tCKmin_X_minus_1_ps;
				if (mclk_ps >= tCKmin_X_minus_1_ps) {
					debug("CL = %u ok on DIMM %u at "
						"tCK=%u ps with its "
						"tCKmin_X_minus_1_ps of %u\n",
					       temp2, i, mclk_ps,
					       tCKmin_X_minus_1_ps);
					continue;
				} else {
					not_ok++;
				}
			}

			if (dimm_params[i].caslat_X_minus_2 == temp2) {
				unsigned int tCKmin_X_minus_2_ps
					= dimm_params[i].tCKmin_X_minus_2_ps;
				if (mclk_ps >= tCKmin_X_minus_2_ps) {
					debug("CL = %u ok on DIMM %u at "
						"tCK=%u ps with its "
						"tCKmin_X_minus_2_ps of %u\n",
					       temp2, i, mclk_ps,
					       tCKmin_X_minus_2_ps);
					continue;
				} else {
					not_ok++;
				}
			}
		}

		if (!not_ok) {
			lowest_good_caslat = temp2;
		}

		temp1 &= ~(1 << temp2);
	}

	debug("lowest common SPD-defined CAS latency = %u\n",
	       lowest_good_caslat);
	outpdimm->lowest_common_SPD_caslat = lowest_good_caslat;


	/*
	 * Compute a common 'de-rated' CAS latency.
	 *
	 * The strategy here is to find the *highest* dereated cas latency
	 * with the assumption that all of the DIMMs will support a dereated
	 * CAS latency higher than or equal to their lowest dereated value.
	 */
	temp1 = 0;
	for (i = 0; i < number_of_dimms; i++) {
		temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
	}
	outpdimm->highest_common_derated_caslat = temp1;
	debug("highest common dereated CAS latency = %u\n", temp1);
#endif /* #if defined(CONFIG_FSL_DDR3) */

	/* Determine if all DIMMs ECC capable. */
	temp1 = 1;
	for (i = 0; i < number_of_dimms; i++) {
		if (dimm_params[i].n_ranks &&
			!(dimm_params[i].edc_config & EDC_ECC)) {
			temp1 = 0;
			break;
		}
	}
	if (temp1) {
		debug("all DIMMs ECC capable\n");
	} else {
		debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
	}
	outpdimm->all_DIMMs_ECC_capable = temp1;

#ifndef CONFIG_FSL_DDR3
	/* FIXME: move to somewhere else to validate. */
	if (mclk_ps > tCKmax_max_ps) {
		printf("Warning: some of the installed DIMMs "
				"can not operate this slowly.\n");
		return 1;
	}
#endif
	/*
	 * Compute additive latency.
	 *
	 * For DDR1, additive latency should be 0.
	 *
	 * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
	 *	which comes from Trcd, and also note that:
	 *	    add_lat + caslat must be >= 4
	 *
	 * For DDR3, we use the AL=0
	 *
	 * When to use additive latency for DDR2:
	 *
	 * I. Because you are using CL=3 and need to do ODT on writes and
	 *    want functionality.
	 *    1. Are you going to use ODT? (Does your board not have
	 *      additional termination circuitry for DQ, DQS, DQS_,
	 *      DM, RDQS, RDQS_ for x4/x8 configs?)
	 *    2. If so, is your lowest supported CL going to be 3?
	 *    3. If so, then you must set AL=1 because
	 *
	 *       WL >= 3 for ODT on writes
	 *       RL = AL + CL
	 *       WL = RL - 1
	 *       ->
	 *       WL = AL + CL - 1
	 *       AL + CL - 1 >= 3
	 *       AL + CL >= 4
	 *  QED
	 *
	 *  RL >= 3 for ODT on reads
	 *  RL = AL + CL
	 *
	 *  Since CL aren't usually less than 2, AL=0 is a minimum,
	 *  so the WL-derived AL should be the  -- FIXME?
	 *
	 * II. Because you are using auto-precharge globally and want to
	 *     use additive latency (posted CAS) to get more bandwidth.
	 *     1. Are you going to use auto-precharge mode globally?
	 *
	 *        Use addtivie latency and compute AL to be 1 cycle less than
	 *        tRCD, i.e. the READ or WRITE command is in the cycle
	 *        immediately following the ACTIVATE command..
	 *
	 * III. Because you feel like it or want to do some sort of
	 *      degraded-performance experiment.
	 *     1.  Do you just want to use additive latency because you feel
	 *         like it?
	 *
	 * Validation:  AL is less than tRCD, and within the other
	 * read-to-precharge constraints.
	 */

	additive_latency = 0;

#if defined(CONFIG_FSL_DDR2)
	if (lowest_good_caslat < 4) {
		additive_latency = picos_to_mclk(tRCD_ps) - lowest_good_caslat;
		if (mclk_to_picos(additive_latency) > tRCD_ps) {
			additive_latency = picos_to_mclk(tRCD_ps);
			debug("setting additive_latency to %u because it was "
				" greater than tRCD_ps\n", additive_latency);
		}
	}

#elif defined(CONFIG_FSL_DDR3)
	/*
	 * The system will not use the global auto-precharge mode.
	 * However, it uses the page mode, so we set AL=0
	 */
	additive_latency = 0;
#endif

	/*
	 * Validate additive latency
	 * FIXME: move to somewhere else to validate
	 *
	 * AL <= tRCD(min)
	 */
	if (mclk_to_picos(additive_latency) > tRCD_ps) {
		printf("Error: invalid additive latency exceeds tRCD(min).\n");
		return 1;
	}

	/*
	 * RL = CL + AL;  RL >= 3 for ODT_RD_CFG to be enabled
	 * WL = RL - 1;  WL >= 3 for ODT_WL_CFG to be enabled
	 * ADD_LAT (the register) must be set to a value less
	 * than ACTTORW if WL = 1, then AL must be set to 1
	 * RD_TO_PRE (the register) must be set to a minimum
	 * tRTP + AL if AL is nonzero
	 */

	/*
	 * Additive latency will be applied only if the memctl option to
	 * use it.
	 */
	outpdimm->additive_latency = additive_latency;

	return 0;
}