struct clk *clk_get(struct device *dev, const char *id) { if (!strcmp(id, "enet0")) return &clk_enet0; if (!strcmp(id, "enet1")) return &clk_enet1; if (!strcmp(id, "enetsw")) return &clk_enetsw; if (!strcmp(id, "ephy")) return &clk_ephy; if (!strcmp(id, "usbh")) return &clk_usbh; if (!strcmp(id, "usbd")) return &clk_usbd; if (!strcmp(id, "spi")) return &clk_spi; if (!strcmp(id, "hsspi")) return &clk_hsspi; if (!strcmp(id, "xtm")) return &clk_xtm; if (!strcmp(id, "periph")) return &clk_periph; if ((BCMCPU_IS_3368() || BCMCPU_IS_6358()) && !strcmp(id, "pcm")) return &clk_pcm; if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec")) return &clk_ipsec; if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie")) return &clk_pcie; return ERR_PTR(-ENOENT); }
void __init prom_init(void) { u32 reg, mask; bcm63xx_cpu_init(); /* stop any running watchdog */ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG); bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ALL_SAFE_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else mask = 0; reg = bcm_perf_readl(PERF_CKCTL_REG); reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); /* register gpiochip */ bcm63xx_gpio_init(); /* do low level board init */ board_prom_init(); }
/* * PCM clock */ static void pcm_set(struct clk *clk, int enable) { if (BCMCPU_IS_3368()) bcm_hwclock_set(CKCTL_3368_PCM_EN, enable); if (BCMCPU_IS_6358()) bcm_hwclock_set(CKCTL_6358_PCM_EN, enable); }
int __init bcm63xx_spi_register(void) { if (BCMCPU_IS_6328() || BCMCPU_IS_6345()) return -ENODEV; spi_resources[0].start = bcm63xx_regset_address(RSET_SPI); spi_resources[0].end = spi_resources[0].start; spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; } if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH; } bcm63xx_spi_regs_init(); return platform_device_register(&bcm63xx_spi_device); }
static __init void bcm63xx_spi_regs_init(void) { if (BCMCPU_IS_6338()) bcm63xx_regs_spi = bcm6338_regs_spi; if (BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; if (BCMCPU_IS_6358()) bcm63xx_regs_spi = bcm6358_regs_spi; if (BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6368_regs_spi; }
/* * SPI clock */ static void spi_set(struct clk *clk, int enable) { u32 mask; if (BCMCPU_IS_6338()) mask = CKCTL_6338_SPI_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_SPI_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_SPI_EN; else /* BCMCPU_IS_6368 */ mask = CKCTL_6368_SPI_EN; bcm_hwclock_set(mask, enable); }
/* * Ethernet MAC clocks: only revelant on 6358, silently enable misc * clocks */ static void enetx_set(struct clk *clk, int enable) { if (enable) clk_enable_unlocked(&clk_enet_misc); else clk_disable_unlocked(&clk_enet_misc); if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { u32 mask; if (clk->id == 0) mask = CKCTL_6358_ENET0_EN; else mask = CKCTL_6358_ENET1_EN; bcm_hwclock_set(mask, enable); } }
struct clk *clk_get(struct device *dev, const char *id) { if (!strcmp(id, "enet0")) return &clk_enet0; if (!strcmp(id, "enet1")) return &clk_enet1; if (!strcmp(id, "ephy")) return &clk_ephy; if (!strcmp(id, "usbh")) return &clk_usbh; if (!strcmp(id, "spi")) return &clk_spi; if (!strcmp(id, "periph")) return &clk_periph; if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) return &clk_pcm; return ERR_PTR(-ENOENT); }
int __init bcm63xx_pcmcia_register(void) { int ret, i; if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358()) return 0; /* use correct pcmcia ready gpio depending on processor */ switch (bcm63xx_get_cpu_id()) { case BCM6348_CPU_ID: pd.ready_gpio = 22; break; case BCM6358_CPU_ID: pd.ready_gpio = 22; break; default: return -ENODEV; } pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA); pcmcia_resources[0].end = pcmcia_resources[0].start; pcmcia_resources[0].end += RSET_PCMCIA_SIZE - 1; pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA); /* configure pcmcia chip selects */ for (i = 0; i < 3; i++) { ret = config_pcmcia_cs(pcmcia_cs[i][0], pcmcia_cs[i][1], pcmcia_cs[i][2]); if (ret) goto out_err; } return platform_device_register(&bcm63xx_pcmcia_device); out_err: printk(KERN_ERR "unable to set pcmcia chip select"); return ret; }
static void bcm_mpi_enable_extra_CSs(u16 cs) { /* Code adapted from http://pastebin.com/g0bQGPRj */ if (BCMCPU_IS_6358()) { if (cs >= 2) { /* BCM6358 */ u32 val; /* Enable Overlay for SPI SS Pins */ val = bcm_gpio_readl(GPIO_MODE_REG); val |= GPIO_MODE_6358_EXTRA_SPI_SS; bcm_gpio_writel(val, GPIO_MODE_REG); /* Enable SPI Slave Select as Output Pins */ /* GPIO 32 is SS2, GPIO 33 is SS3 */ val = bcm_gpio_readl(GPIO_CTL_HI_REG); val |= 0x0003; bcm_gpio_writel(val, GPIO_CTL_HI_REG); } } if (BCMCPU_IS_6368()) { if (cs >= 2) { /* BCM6368 */ u32 val; /* Enable Extra SPI CS */ val = bcm_gpio_readl(GPIO_MODE_REG); val |= (GPIO_MODE_6368_SPI_SSN2 << (cs - 2)); bcm_gpio_writel(val, GPIO_MODE_REG); /* Enable SPI Slave Select as Output Pins */ /* GPIO 28 is SS2, GPIO 29 is SS3, GPIO 30 is SS4, GPIO 31 is SS5*/ val = bcm_gpio_readl(GPIO_CTL_LO_REG); val |= (GPIO_MODE_6368_SPI_SSN2 << (cs - 2)); bcm_gpio_writel(val, GPIO_CTL_LO_REG); } } }
static int bcm63xx_spi_probe(struct platform_device *pdev) { struct device *dev = &(pdev->dev); int ret; struct resource *r; int irq; bcm_mpi_dev_data_t *bs; bcm_pr_debug("%s()\n", __func__); if ( #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) (!pdev->id_entry->driver_data) #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ (!BCMCPU_IS_6358()) #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ ) { return -EINVAL; } bs = &(bcm_mpi_dev_data); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) bcm_mpi_dev_data_init(bs, (const unsigned long *)pdev->id_entry->driver_data); #else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ bcm_mpi_dev_data_init(bs, bcm6358_spi_reg_offsets); #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ bs->ref_count = 1; bs->pdev = pdev; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(dev, "no iomem\n"); ret = -ENXIO; goto fail_get_res; } bs->res_start = r->start; bs->res_size = resource_size(r); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "no irq\n"); ret = -ENXIO; goto fail_get_irq; } bs->irq = irq; bs->clk = devm_clk_get(dev, "spi"); if (IS_ERR(bs->clk)) { dev_err(dev, "no clock for device\n"); ret = PTR_ERR(bs->clk); goto fail_get_clk; } platform_set_drvdata(pdev, bs); if (!devm_request_mem_region(&(pdev->dev), bs->res_start, bs->res_size, driver_name)) { dev_err(dev, "iomem request failed\n"); ret = -ENXIO; goto fail_req_reg; } bs->regs = devm_ioremap_nocache(&(pdev->dev), bs->res_start, bs->res_size); if (!bs->regs) { dev_err(dev, "unable to ioremap regs\n"); ret = -ENOMEM; goto fail_io_remap; } bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]); bs->rx_io = (const u8 *)(bs->regs + bs->reg_offsets[SPI_RX_DATA]); ret = devm_request_irq(&(pdev->dev), irq, bcm63xx_spi_interrupt, 0, pdev->name, bs); if (ret) { dev_err(dev, "unable to request irq\n"); goto fail_req_irq; } /* Initialize hardware */ clk_enable(bs->clk); /* Read interupts and clear them immediately */ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); bcm_spi_writeb(bs, 0, SPI_INT_MASK); bcm_mpi_set_clk_cfg(bs, SPI_CLK_0_391MHZ); bcm_mpi_set_fill_byte(bs, 0); dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", bs->res_start, bs->irq, bs->fifo_size); return 0; clk_disable(bs->clk); devm_free_irq(&(pdev->dev), irq, bs); fail_req_irq: devm_iounmap(&(pdev->dev), bs->regs); fail_io_remap: devm_release_mem_region(&(pdev->dev), bs->res_start, bs->res_size); fail_req_reg: platform_set_drvdata(pdev, NULL); clk_put(bs->clk); fail_get_clk: fail_get_irq: fail_get_res: bs->ref_count = 0; return ret; }
void __init prom_init(void) { u32 reg, mask; bcm63xx_cpu_init(); /* stop any running watchdog */ bcm_wdt_writel(WDT_STOP_1, WDT_CTL_REG); bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG); /* disable all hardware blocks clock for now */ if (BCMCPU_IS_3368()) mask = CKCTL_3368_ALL_SAFE_EN; else if (BCMCPU_IS_6328()) mask = CKCTL_6328_ALL_SAFE_EN; else if (BCMCPU_IS_6338()) mask = CKCTL_6338_ALL_SAFE_EN; else if (BCMCPU_IS_6345()) mask = CKCTL_6345_ALL_SAFE_EN; else if (BCMCPU_IS_6348()) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; else if (BCMCPU_IS_6362()) mask = CKCTL_6362_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else mask = 0; reg = bcm_perf_readl(PERF_CKCTL_REG); reg &= ~mask; bcm_perf_writel(reg, PERF_CKCTL_REG); /* register gpiochip */ bcm63xx_gpio_init(); /* do low level board init */ board_prom_init(); /* set up SMP */ if (!register_bmips_smp_ops()) { /* * BCM6328 might not have its second CPU enabled, while BCM3368 * and BCM6358 need special handling for their shared TLB, so * disable SMP for now. */ if (BCMCPU_IS_6328()) { reg = bcm_readl(BCM_6328_OTP_BASE + OTP_USER_BITS_6328_REG(3)); if (reg & OTP_6328_REG3_TP1_DISABLED) bmips_smp_enabled = 0; } else if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) { bmips_smp_enabled = 0; } if (!bmips_smp_enabled) return; /* * The bootloader has set up the CPU1 reset vector at * 0xa000_0200. * This conflicts with the special interrupt vector (IV). * The bootloader has also set up CPU1 to respond to the wrong * IPI interrupt. * Here we will start up CPU1 in the background and ask it to * reconfigure itself then go back to sleep. */ memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20); __sync(); set_c0_cause(C_SW0); cpumask_set_cpu(1, &bmips_booted_mask); /* * FIXME: we really should have some sort of hazard barrier here */ } }
static int __init bcm63xx_pci_init(void) { unsigned int mem_size; u32 val; if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358() && !BCMCPU_IS_6368()) return -ENODEV; if (!bcm63xx_pci_enabled) return -ENODEV; pci_iospace_start = ioremap_nocache(BCM_PCI_IO_BASE_PA, 4); if (!pci_iospace_start) return -ENOMEM; val = BCM_PCI_MEM_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PMEMBASE1_REG); bcm_mpi_writel(~(BCM_PCI_MEM_SIZE - 1), MPI_L2PMEMRANGE1_REG); bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PMEMREMAP1_REG); val = bcm_pcmcia_readl(PCMCIA_C1_REG); val &= ~PCMCIA_C1_CBIDSEL_MASK; val |= (CARDBUS_PCI_IDSEL << PCMCIA_C1_CBIDSEL_SHIFT); bcm_pcmcia_writel(val, PCMCIA_C1_REG); #ifdef CONFIG_CARDBUS val = BCM_CB_MEM_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PMEMBASE2_REG); bcm_mpi_writel(~(BCM_CB_MEM_SIZE - 1), MPI_L2PMEMRANGE2_REG); val |= MPI_L2PREMAP_ENABLED_MASK | MPI_L2PREMAP_IS_CARDBUS_MASK; bcm_mpi_writel(val, MPI_L2PMEMREMAP2_REG); #else bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG); #endif val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PIOBASE_REG); bcm_mpi_writel(~(BCM_PCI_IO_SIZE - 1), MPI_L2PIORANGE_REG); bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PIOREMAP_REG); bcm_mpi_writel(MPI_LOCBUSCTL_EN_PCI_GPIO_MASK, MPI_LOCBUSCTL_REG); bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3); if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) val = MPI_SP0_REMAP_ENABLE_MASK; else val = 0; bcm_mpi_writel(val, MPI_SP0_REMAP_REG); bcm63xx_int_cfg_writel(0x0, PCI_BASE_ADDRESS_4); bcm_mpi_writel(0, MPI_SP1_REMAP_REG); mem_size = bcm63xx_get_memory_size(); if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() & 0xf0) == 0xa0) { if (mem_size > (16 * 1024 * 1024)) printk(KERN_WARNING "bcm63xx: this CPU " "revision cannot handle more than 16MB " "of RAM for PCI bus mastering\n"); } else { bcm_mpi_writel(~(mem_size - 1), MPI_SP0_RANGE_REG); bcm_mpi_writel(0, MPI_SP1_RANGE_REG); } val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS); val &= ~REG_TIMER_RETRY_MASK; bcm63xx_int_cfg_writel(val, BCMPCI_REG_TIMERS); val = bcm63xx_int_cfg_readl(PCI_COMMAND); val |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); bcm63xx_int_cfg_writel(val, PCI_COMMAND); val = bcm_mpi_readl(MPI_PCIMODESEL_REG); val &= ~MPI_PCIMODESEL_BAR1_NOSWAP_MASK; val &= ~MPI_PCIMODESEL_BAR2_NOSWAP_MASK; val &= ~MPI_PCIMODESEL_PREFETCH_MASK; val |= (8 << MPI_PCIMODESEL_PREFETCH_SHIFT); bcm_mpi_writel(val, MPI_PCIMODESEL_REG); val = bcm_mpi_readl(MPI_LOCINT_REG); val |= MPI_LOCINT_MASK(MPI_LOCINT_EXT_PCI_INT); bcm_mpi_writel(val, MPI_LOCINT_REG); register_pci_controller(&bcm63xx_controller); #ifdef CONFIG_CARDBUS register_pci_controller(&bcm63xx_cb_controller); #endif request_mem_region(BCM_PCI_IO_BASE_PA, BCM_PCI_IO_SIZE, "bcm63xx PCI IO space"); return 0; }
static void ephy_set(struct clk *clk, int enable) { if (!BCMCPU_IS_6358()) return; bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); }
static int __init bcm63xx_pci_init(void) { unsigned int mem_size; u32 val; if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358()) return -ENODEV; if (!bcm63xx_pci_enabled) return -ENODEV; /* * configuration access are done through IO space, remap 4 * first bytes to access it from CPU. * * this means that no io access from CPU should happen while * we do a configuration cycle, but there's no way we can add * a spinlock for each io access, so this is currently kind of * broken on SMP. */ pci_iospace_start = ioremap_nocache(BCM_PCI_IO_BASE_PA, 4); if (!pci_iospace_start) return -ENOMEM; /* setup local bus to PCI access (PCI memory) */ val = BCM_PCI_MEM_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PMEMBASE1_REG); bcm_mpi_writel(~(BCM_PCI_MEM_SIZE - 1), MPI_L2PMEMRANGE1_REG); bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PMEMREMAP1_REG); /* set Cardbus IDSEL (type 0 cfg access on primary bus for * this IDSEL will be done on Cardbus instead) */ val = bcm_pcmcia_readl(PCMCIA_C1_REG); val &= ~PCMCIA_C1_CBIDSEL_MASK; val |= (CARDBUS_PCI_IDSEL << PCMCIA_C1_CBIDSEL_SHIFT); bcm_pcmcia_writel(val, PCMCIA_C1_REG); #ifdef CONFIG_CARDBUS /* setup local bus to PCI access (Cardbus memory) */ val = BCM_CB_MEM_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PMEMBASE2_REG); bcm_mpi_writel(~(BCM_CB_MEM_SIZE - 1), MPI_L2PMEMRANGE2_REG); val |= MPI_L2PREMAP_ENABLED_MASK | MPI_L2PREMAP_IS_CARDBUS_MASK; bcm_mpi_writel(val, MPI_L2PMEMREMAP2_REG); #else /* disable second access windows */ bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG); #endif /* setup local bus to PCI access (IO memory), we have only 1 * IO window for both PCI and cardbus, but it cannot handle * both at the same time, assume standard PCI for now, if * cardbus card has IO zone, PCI fixup will change window to * cardbus */ val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK; bcm_mpi_writel(val, MPI_L2PIOBASE_REG); bcm_mpi_writel(~(BCM_PCI_IO_SIZE - 1), MPI_L2PIORANGE_REG); bcm_mpi_writel(val | MPI_L2PREMAP_ENABLED_MASK, MPI_L2PIOREMAP_REG); /* enable PCI related GPIO pins */ bcm_mpi_writel(MPI_LOCBUSCTL_EN_PCI_GPIO_MASK, MPI_LOCBUSCTL_REG); /* setup PCI to local bus access, used by PCI device to target * local RAM while bus mastering */ bcm63xx_int_cfg_writel(0, PCI_BASE_ADDRESS_3); if (BCMCPU_IS_6358()) val = MPI_SP0_REMAP_ENABLE_MASK; else val = 0; bcm_mpi_writel(val, MPI_SP0_REMAP_REG); bcm63xx_int_cfg_writel(0x0, PCI_BASE_ADDRESS_4); bcm_mpi_writel(0, MPI_SP1_REMAP_REG); mem_size = bcm63xx_get_memory_size(); /* 6348 before rev b0 exposes only 16 MB of RAM memory through * PCI, throw a warning if we have more memory */ if (BCMCPU_IS_6348() && (bcm63xx_get_cpu_rev() & 0xf0) == 0xa0) { if (mem_size > (16 * 1024 * 1024)) printk(KERN_WARNING "bcm63xx: this CPU " "revision cannot handle more than 16MB " "of RAM for PCI bus mastering\n"); } else { /* setup sp0 range to local RAM size */ bcm_mpi_writel(~(mem_size - 1), MPI_SP0_RANGE_REG); bcm_mpi_writel(0, MPI_SP1_RANGE_REG); } /* change host bridge retry counter to infinite number of * retry, needed for some broadcom wifi cards with Silicon * Backplane bus where access to srom seems very slow */ val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS); val &= ~REG_TIMER_RETRY_MASK; bcm63xx_int_cfg_writel(val, BCMPCI_REG_TIMERS); /* enable memory decoder and bus mastering */ val = bcm63xx_int_cfg_readl(PCI_COMMAND); val |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); bcm63xx_int_cfg_writel(val, PCI_COMMAND); /* enable read prefetching & disable byte swapping for bus * mastering transfers */ val = bcm_mpi_readl(MPI_PCIMODESEL_REG); val &= ~MPI_PCIMODESEL_BAR1_NOSWAP_MASK; val &= ~MPI_PCIMODESEL_BAR2_NOSWAP_MASK; val &= ~MPI_PCIMODESEL_PREFETCH_MASK; val |= (8 << MPI_PCIMODESEL_PREFETCH_SHIFT); bcm_mpi_writel(val, MPI_PCIMODESEL_REG); /* enable pci interrupt */ val = bcm_mpi_readl(MPI_LOCINT_REG); val |= MPI_LOCINT_MASK(MPI_LOCINT_EXT_PCI_INT); bcm_mpi_writel(val, MPI_LOCINT_REG); register_pci_controller(&bcm63xx_controller); #ifdef CONFIG_CARDBUS register_pci_controller(&bcm63xx_cb_controller); #endif /* mark memory space used for IO mapping as reserved */ request_mem_region(BCM_PCI_IO_BASE_PA, BCM_PCI_IO_SIZE, "bcm63xx PCI IO space"); return 0; }
static int bcm63xx_external_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; int levelsense, sense, bothedge; flow_type &= IRQ_TYPE_SENSE_MASK; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; levelsense = sense = bothedge = 0; switch (flow_type) { case IRQ_TYPE_EDGE_BOTH: bothedge = 1; break; case IRQ_TYPE_EDGE_RISING: sense = 1; break; case IRQ_TYPE_EDGE_FALLING: break; case IRQ_TYPE_LEVEL_HIGH: levelsense = 1; sense = 1; break; case IRQ_TYPE_LEVEL_LOW: levelsense = 1; break; default: printk(KERN_ERR "bogus flow type combination given !\n"); return -EINVAL; } regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); irq %= 4; if (BCMCPU_IS_6348()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq); if (sense) reg |= EXTIRQ_CFG_SENSE_6348(irq); else reg &= ~EXTIRQ_CFG_SENSE_6348(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); } if (BCMCPU_IS_6338() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE(irq); if (sense) reg |= EXTIRQ_CFG_SENSE(irq); else reg &= ~EXTIRQ_CFG_SENSE(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE(irq); } bcm_perf_writel(reg, regaddr); irqd_set_trigger_type(d, flow_type); if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) __irq_set_handler_locked(d->irq, handle_level_irq); else __irq_set_handler_locked(d->irq, handle_edge_irq); return IRQ_SET_MASK_OK_NOCOPY; }
/* * Ethernet PHY clock */ static void ephy_set(struct clk *clk, int enable) { if (BCMCPU_IS_3368() || BCMCPU_IS_6358()) bcm_hwclock_set(CKCTL_6358_EPHY_EN, enable); }
static int __devinit ohci_hcd_bcm63xx_drv_probe(struct platform_device *pdev) { struct resource *res_mem; struct usb_hcd *hcd; struct ohci_hcd *ohci; struct clk *clk; u32 reg; int ret, irq; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res_mem || irq < 0) return -ENODEV; /* enable USB host clock */ clk = clk_get(&pdev->dev, "usbh"); if (IS_ERR(clk)) return -ENODEV; clk_enable(clk); usb_host_clock = clk; msleep(100); if (BCMCPU_IS_6348()) bcm_rset_writel(RSET_OHCI_PRIV, 0, OHCI_PRIV_REG); else if (BCMCPU_IS_6358()) { reg = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6358_REG); reg &= ~USBH_PRIV_SWAP_OHCI_ENDN_MASK; reg |= USBH_PRIV_SWAP_OHCI_DATA_MASK; bcm_rset_writel(RSET_USBH_PRIV, reg, USBH_PRIV_SWAP_6358_REG); /* * The magic value comes for the original vendor BSP * and is needed for USB to work. Datasheet does not * help, so the magic value is used as-is. */ bcm_rset_writel(RSET_USBH_PRIV, 0x1c0020, USBH_PRIV_TEST_6358_REG); } else if (BCMCPU_IS_6368()) { reg = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG); reg &= ~USBH_PRIV_SWAP_OHCI_ENDN_MASK; reg |= USBH_PRIV_SWAP_OHCI_DATA_MASK; bcm_rset_writel(RSET_USBH_PRIV, reg, USBH_PRIV_SWAP_6368_REG); reg = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SETUP_6368_REG); reg |= USBH_PRIV_SETUP_IOC_MASK; bcm_rset_writel(RSET_USBH_PRIV, reg, USBH_PRIV_SETUP_6368_REG); } hcd = usb_create_hcd(&ohci_bcm63xx_hc_driver, &pdev->dev, "bcm63xx"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res_mem->start; hcd->rsrc_len = res_mem->end - res_mem->start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed\n"); ret = -EBUSY; goto out; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { pr_debug("ioremap failed\n"); ret = -EIO; goto out1; } ohci = hcd_to_ohci(hcd); ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC | OHCI_QUIRK_FRAME_NO; ohci_hcd_init(ohci); ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); if (ret) goto out2; platform_set_drvdata(pdev, hcd); return 0; out2: iounmap(hcd->regs); out1: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); out: usb_put_hcd(hcd); return ret; }