int __init bcm63xx_spi_register(void) { if (BCMCPU_IS_6328() || BCMCPU_IS_6345()) return -ENODEV; spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); spi_resources[0].end = spi_resources[0].start; spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; } if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH; } bcm63xx_spi_regs_init(); return platform_device_register(&bcm63xx_spi_device); }
static unsigned int detect_cpu_clock(void) { unsigned int tmp, n1 = 0, n2 = 0, m1 = 0; if (BCMCPU_IS_6338()) { return 240000000; } /* * frequency depends on PLL configuration: */ if (BCMCPU_IS_6348()) { /* 16MHz * (N1 + 1) * (N2 + 2) / (M1_CPU + 1) */ tmp = bcm_perf_readl(PERF_MIPSPLLCTL_REG); n1 = (tmp & MIPSPLLCTL_N1_MASK) >> MIPSPLLCTL_N1_SHIFT; n2 = (tmp & MIPSPLLCTL_N2_MASK) >> MIPSPLLCTL_N2_SHIFT; m1 = (tmp & MIPSPLLCTL_M1CPU_MASK) >> MIPSPLLCTL_M1CPU_SHIFT; n1 += 1; n2 += 2; m1 += 1; } if (BCMCPU_IS_6358()) { /* 16MHz * N1 * N2 / M1_CPU */ tmp = bcm_ddr_readl(DDR_DMIPSPLLCFG_REG); n1 = (tmp & DMIPSPLLCFG_N1_MASK) >> DMIPSPLLCFG_N1_SHIFT; n2 = (tmp & DMIPSPLLCFG_N2_MASK) >> DMIPSPLLCFG_N2_SHIFT; m1 = (tmp & DMIPSPLLCFG_M1_MASK) >> DMIPSPLLCFG_M1_SHIFT; } return (16 * 1000000 * n1 * n2) / m1; }
void __init prom_init(void) { u32 reg, mask; bcm63xx_cpu_init(); /* stop any running watchdog */ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG); bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ALL_SAFE_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else mask = 0; reg = bcm_perf_readl(PERF_CKCTL_REG); reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); /* register gpiochip */ bcm63xx_gpio_init(); /* do low level board init */ board_prom_init(); }
static __init void bcm63xx_spi_regs_init(void) { if (BCMCPU_IS_6338()) bcm63xx_regs_spi = bcm6338_regs_spi; if (BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; if (BCMCPU_IS_6358()) bcm63xx_regs_spi = bcm6358_regs_spi; if (BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6368_regs_spi; }
/* * attempt to detect the amount of memory installed */ static unsigned int detect_memory_size(void) { unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; u32 val; if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { val = bcm_sdram_readl(SDRAM_CFG_REG); rows = (val & SDRAM_CFG_ROW_MASK) >> SDRAM_CFG_ROW_SHIFT; cols = (val & SDRAM_CFG_COL_MASK) >> SDRAM_CFG_COL_SHIFT; is_32bits = (val & SDRAM_CFG_32B_MASK) ? 1 : 0; banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1; }
static void spi_set(struct clk *clk, int enable) { u32 mask; if (BCMCPU_IS_6338()) mask = CKCTL_6338_SPI_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_SPI_EN; else mask = CKCTL_6358_SPI_EN; bcm_hwclock_set(mask, enable); }
/* * Ethernet MAC "misc" clock: dma clocks and main clock on 6348 */ static void enet_misc_set(struct clk *clk, int enable) { u32 mask; if (BCMCPU_IS_6338()) mask = CKCTL_6338_ENET_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ENET_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ENET_EN; else /* BCMCPU_IS_6358 */ mask = CKCTL_6358_EMUSB_EN; bcm_hwclock_set(mask, enable); }
/* * attempt to detect the amount of memory installed */ static unsigned int detect_memory_size(void) { unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; u32 val; if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) return bcm_ddr_readl(DDR_CSEND_REG) << 24; if (BCMCPU_IS_6345()) { val = bcm_sdram_readl(SDRAM_MBASE_REG); return val * 8 * 1024 * 1024; } if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { val = bcm_sdram_readl(SDRAM_CFG_REG); rows = (val & SDRAM_CFG_ROW_MASK) >> SDRAM_CFG_ROW_SHIFT; cols = (val & SDRAM_CFG_COL_MASK) >> SDRAM_CFG_COL_SHIFT; is_32bits = (val & SDRAM_CFG_32B_MASK) ? 1 : 0; banks = (val & SDRAM_CFG_BANK_MASK) ? 2 : 1; }
int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) { struct platform_device *pdev; struct bcm63xx_enet_platform_data *dpd; int ret; if (unit > 1) return -ENODEV; if (unit == 1 && BCMCPU_IS_6338()) return -ENODEV; if (!shared_device_registered) { shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); shared_res[0].end = shared_res[0].start; if (BCMCPU_IS_6338()) shared_res[0].end += (RSET_ENETDMA_SIZE / 2) - 1; else shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; ret = platform_device_register(&bcm63xx_enet_shared_device); if (ret) return ret; shared_device_registered = 1; } if (unit == 0) { enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); enet0_res[0].end = enet0_res[0].start; enet0_res[0].end += RSET_ENET_SIZE - 1; enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0); enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA); enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA); pdev = &bcm63xx_enet0_device; } else { enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1); enet1_res[0].end = enet1_res[0].start; enet1_res[0].end += RSET_ENET_SIZE - 1; enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1); enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA); enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA); pdev = &bcm63xx_enet1_device; } dpd = pdev->dev.platform_data; memcpy(dpd, pd, sizeof(*pd)); if (dpd->use_internal_phy) { if (unit == 1) return -ENODEV; dpd->phy_id = 1; dpd->has_phy_interrupt = 1; dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); } ret = platform_device_register(pdev); if (ret) return ret; return 0; }
int __init bcm63xx_enet_register(int unit, const struct bcm63xx_enet_platform_data *pd) { struct platform_device *pdev; struct bcm63xx_enet_platform_data *dpd; int ret; if (unit > 1) return -ENODEV; if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345())) return -ENODEV; ret = register_shared(); if (ret) return ret; if (unit == 0) { enet0_res[0].start = bcm63xx_regset_address(RSET_ENET0); enet0_res[0].end = enet0_res[0].start; enet0_res[0].end += RSET_ENET_SIZE - 1; enet0_res[1].start = bcm63xx_get_irq_number(IRQ_ENET0); enet0_res[2].start = bcm63xx_get_irq_number(IRQ_ENET0_RXDMA); enet0_res[3].start = bcm63xx_get_irq_number(IRQ_ENET0_TXDMA); pdev = &bcm63xx_enet0_device; } else { enet1_res[0].start = bcm63xx_regset_address(RSET_ENET1); enet1_res[0].end = enet1_res[0].start; enet1_res[0].end += RSET_ENET_SIZE - 1; enet1_res[1].start = bcm63xx_get_irq_number(IRQ_ENET1); enet1_res[2].start = bcm63xx_get_irq_number(IRQ_ENET1_RXDMA); enet1_res[3].start = bcm63xx_get_irq_number(IRQ_ENET1_TXDMA); pdev = &bcm63xx_enet1_device; } /* copy given platform data */ dpd = pdev->dev.platform_data; memcpy(dpd, pd, sizeof(*pd)); /* adjust them in case internal phy is used */ if (dpd->use_internal_phy) { /* internal phy only exists for enet0 */ if (unit == 1) return -ENODEV; dpd->phy_id = 1; dpd->has_phy_interrupt = 1; dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); } dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK; dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK; if (BCMCPU_IS_6345()) { dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK; dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK; dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK; dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK; dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK; dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH; dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT; } else { dpd->dma_has_sram = true; dpd->dma_chan_width = ENETDMA_CHAN_WIDTH; } ret = platform_device_register(pdev); if (ret) return ret; return 0; }
static int bcm63xx_external_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; int levelsense, sense, bothedge; flow_type &= IRQ_TYPE_SENSE_MASK; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; levelsense = sense = bothedge = 0; switch (flow_type) { case IRQ_TYPE_EDGE_BOTH: bothedge = 1; break; case IRQ_TYPE_EDGE_RISING: sense = 1; break; case IRQ_TYPE_EDGE_FALLING: break; case IRQ_TYPE_LEVEL_HIGH: levelsense = 1; sense = 1; break; case IRQ_TYPE_LEVEL_LOW: levelsense = 1; break; default: printk(KERN_ERR "bogus flow type combination given !\n"); return -EINVAL; } regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); irq %= 4; if (BCMCPU_IS_6348()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq); if (sense) reg |= EXTIRQ_CFG_SENSE_6348(irq); else reg &= ~EXTIRQ_CFG_SENSE_6348(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); } if (BCMCPU_IS_6338() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE(irq); if (sense) reg |= EXTIRQ_CFG_SENSE(irq); else reg &= ~EXTIRQ_CFG_SENSE(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE(irq); } bcm_perf_writel(reg, regaddr); irqd_set_trigger_type(d, flow_type); if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) __irq_set_handler_locked(d->irq, handle_level_irq); else __irq_set_handler_locked(d->irq, handle_edge_irq); return IRQ_SET_MASK_OK_NOCOPY; }
void __init prom_init(void) { u32 reg, mask; bcm63xx_cpu_init(); /* stop any running watchdog */ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG); bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ if (BCMCPU_IS_3368()) mask = CKCTL_3368_ALL_SAFE_EN; else if (BCMCPU_IS_6328()) mask = CKCTL_6328_ALL_SAFE_EN; else if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ALL_SAFE_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; else if (BCMCPU_IS_6362()) mask = CKCTL_6362_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else mask = 0; reg = bcm_perf_readl(PERF_CKCTL_REG); reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); /* register gpiochip */ bcm63xx_gpio_init(); /* do low level board init */ board_prom_init(); /* set up SMP */ if (!register_bmips_smp_ops()) { /* * BCM6328 might not have its second CPU enabled, while BCM3368 * and BCM6358 need special handling for their shared TLB, so * disable SMP for now. */ if (BCMCPU_IS_6328()) { reg = bcm_readl(BCM_6328_OTP_BASE + OTP_USER_BITS_6328_REG(3)); if (reg & OTP_6328_REG3_TP1_DISABLED) bmips_smp_enabled = 0; } else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { bmips_smp_enabled = 0; } if (!bmips_smp_enabled) return; /* * The bootloader has set up the CPU1 reset vector at * 0xa000_0200. * This conflicts with the special interrupt vector (IV). * The bootloader has also set up CPU1 to respond to the wrong * IPI interrupt. * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); /* * FIXME: we really should have some sort of hazard barrier here */ } }